summaryrefslogtreecommitdiff
path: root/libgcc
diff options
context:
space:
mode:
Diffstat (limited to 'libgcc')
-rw-r--r--libgcc/ChangeLog1026
-rw-r--r--libgcc/Makefile.in206
-rw-r--r--libgcc/config.host585
-rw-r--r--libgcc/config/alpha/gthr-posix.c266
-rw-r--r--libgcc/config/alpha/libgcc-alpha-ldbl.ver50
-rw-r--r--libgcc/config/alpha/qrnnd.S163
-rw-r--r--libgcc/config/alpha/t-alpha2
-rw-r--r--libgcc/config/alpha/t-linux1
-rw-r--r--libgcc/config/alpha/t-osf-pthread2
-rw-r--r--libgcc/config/alpha/t-slibgcc-osf2
-rw-r--r--libgcc/config/alpha/t-vms9
-rw-r--r--libgcc/config/alpha/vms-dwarf2.S77
-rw-r--r--libgcc/config/alpha/vms-dwarf2eh.S30
-rw-r--r--libgcc/config/alpha/vms-gcc_shell_handler.c124
-rw-r--r--libgcc/config/arm/bpabi-v6m.S318
-rw-r--r--libgcc/config/arm/bpabi.S163
-rw-r--r--libgcc/config/arm/bpabi.c56
-rw-r--r--libgcc/config/arm/crti.S86
-rw-r--r--libgcc/config/arm/crtn.S83
-rw-r--r--libgcc/config/arm/fp16.c145
-rw-r--r--libgcc/config/arm/ieee754-df.S1447
-rw-r--r--libgcc/config/arm/ieee754-sf.S1060
-rw-r--r--libgcc/config/arm/lib1funcs.S1829
-rw-r--r--libgcc/config/arm/libgcc-bpabi.ver108
-rw-r--r--libgcc/config/arm/libunwind.S2
-rw-r--r--libgcc/config/arm/linux-atomic-64bit.c166
-rw-r--r--libgcc/config/arm/linux-atomic.c279
-rw-r--r--libgcc/config/arm/t-arm3
-rw-r--r--libgcc/config/arm/t-bpabi12
-rw-r--r--libgcc/config/arm/t-elf18
-rw-r--r--libgcc/config/arm/t-linux7
-rw-r--r--libgcc/config/arm/t-linux-eabi5
-rw-r--r--libgcc/config/arm/t-netbsd7
-rw-r--r--libgcc/config/arm/t-strongarm-elf6
-rw-r--r--libgcc/config/arm/t-symbian17
-rw-r--r--libgcc/config/arm/t-vxworks1
-rw-r--r--libgcc/config/arm/t-wince-pe1
-rw-r--r--libgcc/config/arm/unaligned-funcs.c57
-rw-r--r--libgcc/config/avr/lib1funcs.S1674
-rw-r--r--libgcc/config/avr/t-avr57
-rw-r--r--libgcc/config/avr/t-rtems2
-rw-r--r--libgcc/config/bfin/crti.S59
-rw-r--r--libgcc/config/bfin/crtlibid.S29
-rw-r--r--libgcc/config/bfin/crtn.S50
-rw-r--r--libgcc/config/bfin/lib1funcs.S211
-rw-r--r--libgcc/config/bfin/libgcc-glibc.ver1914
-rw-r--r--libgcc/config/bfin/t-bfin3
-rw-r--r--libgcc/config/bfin/t-crtlibid3
-rw-r--r--libgcc/config/bfin/t-crtstuff1
-rw-r--r--libgcc/config/bfin/t-linux1
-rw-r--r--libgcc/config/c6x/crti.S39
-rw-r--r--libgcc/config/c6x/crtn.S41
-rw-r--r--libgcc/config/c6x/eqd.c47
-rw-r--r--libgcc/config/c6x/eqf.c47
-rw-r--r--libgcc/config/c6x/ged.c47
-rw-r--r--libgcc/config/c6x/gef.c47
-rw-r--r--libgcc/config/c6x/gtd.c47
-rw-r--r--libgcc/config/c6x/gtf.c47
-rw-r--r--libgcc/config/c6x/led.c47
-rw-r--r--libgcc/config/c6x/lef.c47
-rw-r--r--libgcc/config/c6x/lib1funcs.S438
-rw-r--r--libgcc/config/c6x/libgcc-eabi.ver103
-rw-r--r--libgcc/config/c6x/ltd.c47
-rw-r--r--libgcc/config/c6x/ltf.c47
-rw-r--r--libgcc/config/c6x/t-c6x-elf4
-rw-r--r--libgcc/config/c6x/t-elf40
-rw-r--r--libgcc/config/c6x/t-uclinux3
-rw-r--r--libgcc/config/cris/arit.c304
-rw-r--r--libgcc/config/cris/libgcc-glibc.ver7
-rw-r--r--libgcc/config/cris/mulsi3.S255
-rw-r--r--libgcc/config/cris/t-cris10
-rw-r--r--libgcc/config/cris/t-elfmulti3
-rw-r--r--libgcc/config/cris/t-linux2
-rw-r--r--libgcc/config/darwin-64.c72
-rw-r--r--libgcc/config/darwin-crt3.c6
-rw-r--r--libgcc/config/epiphany/crti.S34
-rw-r--r--libgcc/config/epiphany/crtint.S27
-rw-r--r--libgcc/config/epiphany/crtm1reg-r43.S26
-rw-r--r--libgcc/config/epiphany/crtm1reg-r63.S26
-rw-r--r--libgcc/config/epiphany/crtn.S32
-rw-r--r--libgcc/config/epiphany/crtrunc.S26
-rw-r--r--libgcc/config/epiphany/divsi3-float.S77
-rw-r--r--libgcc/config/epiphany/divsi3.S92
-rw-r--r--libgcc/config/epiphany/divsi3.c120
-rw-r--r--libgcc/config/epiphany/epiphany-asm.h53
-rw-r--r--libgcc/config/epiphany/ieee-754/eqsf2.S50
-rw-r--r--libgcc/config/epiphany/ieee-754/fast_div.S124
-rw-r--r--libgcc/config/epiphany/ieee-754/gtesf2.S66
-rw-r--r--libgcc/config/epiphany/ieee-754/ordsf2.S50
-rw-r--r--libgcc/config/epiphany/ieee-754/uneqsf2.S45
-rw-r--r--libgcc/config/epiphany/modsi3-float.S65
-rw-r--r--libgcc/config/epiphany/modsi3.S77
-rw-r--r--libgcc/config/epiphany/modsi3.c106
-rw-r--r--libgcc/config/epiphany/mulsi3.c39
-rw-r--r--libgcc/config/epiphany/t-custom-eqsf1
-rw-r--r--libgcc/config/epiphany/t-epiphany35
-rw-r--r--libgcc/config/epiphany/udivsi3-float.S83
-rw-r--r--libgcc/config/epiphany/udivsi3-float.c125
-rw-r--r--libgcc/config/epiphany/udivsi3.S85
-rw-r--r--libgcc/config/epiphany/udivsi3.c114
-rw-r--r--libgcc/config/epiphany/umodsi3-float.S63
-rw-r--r--libgcc/config/epiphany/umodsi3.S70
-rw-r--r--libgcc/config/epiphany/umodsi3.c101
-rw-r--r--libgcc/config/fr30/crti.S61
-rw-r--r--libgcc/config/fr30/crtn.S44
-rw-r--r--libgcc/config/fr30/lib1funcs.S115
-rw-r--r--libgcc/config/fr30/t-fr302
-rw-r--r--libgcc/config/frv/cmovd.c51
-rw-r--r--libgcc/config/frv/cmovh.c47
-rw-r--r--libgcc/config/frv/cmovw.c51
-rw-r--r--libgcc/config/frv/frvbegin.c157
-rw-r--r--libgcc/config/frv/frvend.c70
-rw-r--r--libgcc/config/frv/lib1funcs.S269
-rw-r--r--libgcc/config/frv/libgcc-glibc.ver73
-rw-r--r--libgcc/config/frv/modi.c4
-rw-r--r--libgcc/config/frv/t-frv22
-rw-r--r--libgcc/config/frv/t-linux3
-rw-r--r--libgcc/config/frv/uitod.c4
-rw-r--r--libgcc/config/frv/uitof.c4
-rw-r--r--libgcc/config/frv/ulltod.c4
-rw-r--r--libgcc/config/frv/ulltof.c4
-rw-r--r--libgcc/config/frv/umodi.c4
-rw-r--r--libgcc/config/gthr-lynx.h61
-rw-r--r--libgcc/config/gthr-rtems.h157
-rw-r--r--libgcc/config/gthr-vxworks.h170
-rw-r--r--libgcc/config/h8300/clzhi2.c35
-rw-r--r--libgcc/config/h8300/crti.S63
-rw-r--r--libgcc/config/h8300/crtn.S53
-rw-r--r--libgcc/config/h8300/ctzhi2.c35
-rw-r--r--libgcc/config/h8300/fixunssfsi.c41
-rw-r--r--libgcc/config/h8300/lib1funcs.S838
-rw-r--r--libgcc/config/h8300/parityhi2.c36
-rw-r--r--libgcc/config/h8300/popcounthi2.c36
-rw-r--r--libgcc/config/h8300/t-h830013
-rw-r--r--libgcc/config/i386/crti.S (renamed from libgcc/config/i386/sol2-ci.S)2
-rw-r--r--libgcc/config/i386/crtn.S (renamed from libgcc/config/i386/sol2-cn.S)2
-rw-r--r--libgcc/config/i386/cygming-crtbegin.c136
-rw-r--r--libgcc/config/i386/cygming-crtend.c89
-rw-r--r--libgcc/config/i386/cygwin.S188
-rw-r--r--libgcc/config/i386/gthr-win32.c261
-rw-r--r--libgcc/config/i386/gthr-win32.h772
-rw-r--r--libgcc/config/i386/libgcc-darwin.10.4.ver98
-rw-r--r--libgcc/config/i386/libgcc-darwin.10.5.ver102
-rw-r--r--libgcc/config/i386/libgcc-glibc.ver186
-rw-r--r--libgcc/config/i386/t-chkstk2
-rw-r--r--libgcc/config/i386/t-cygming15
-rw-r--r--libgcc/config/i386/t-cygwin19
-rw-r--r--libgcc/config/i386/t-darwin4
-rw-r--r--libgcc/config/i386/t-darwin643
-rw-r--r--libgcc/config/i386/t-dlldir2
-rw-r--r--libgcc/config/i386/t-dlldir-x3
-rw-r--r--libgcc/config/i386/t-dw2-eh3
-rw-r--r--libgcc/config/i386/t-gthr-win322
-rw-r--r--libgcc/config/i386/t-interix3
-rw-r--r--libgcc/config/i386/t-linux4
-rw-r--r--libgcc/config/i386/t-mingw-pthread2
-rw-r--r--libgcc/config/i386/t-mingw322
-rw-r--r--libgcc/config/i386/t-nto3
-rw-r--r--libgcc/config/i386/t-sjlj-eh3
-rw-r--r--libgcc/config/i386/t-slibgcc-cygming58
-rw-r--r--libgcc/config/i386/t-sol22
-rw-r--r--libgcc/config/ia64/__divxf3.S (renamed from libgcc/config/ia64/__divxf3.asm)2
-rw-r--r--libgcc/config/ia64/_fixtfdi.S (renamed from libgcc/config/ia64/_fixtfdi.asm)2
-rw-r--r--libgcc/config/ia64/_fixunstfdi.S (renamed from libgcc/config/ia64/_fixunstfdi.asm)2
-rw-r--r--libgcc/config/ia64/_floatditf.S (renamed from libgcc/config/ia64/_floatditf.asm)2
-rw-r--r--libgcc/config/ia64/crtbegin.S254
-rw-r--r--libgcc/config/ia64/crtend.S121
-rw-r--r--libgcc/config/ia64/crti.S53
-rw-r--r--libgcc/config/ia64/crtn.S43
-rw-r--r--libgcc/config/ia64/fde-vms.c2
-rw-r--r--libgcc/config/ia64/lib1funcs.S795
-rw-r--r--libgcc/config/ia64/libgcc-glibc.ver97
-rw-r--r--libgcc/config/ia64/libgcc-ia64.ver30
-rw-r--r--libgcc/config/ia64/quadlib.c78
-rw-r--r--libgcc/config/ia64/t-hpux8
-rw-r--r--libgcc/config/ia64/t-ia6441
-rw-r--r--libgcc/config/ia64/t-linux (renamed from libgcc/config/ia64/t-glibc)2
-rw-r--r--libgcc/config/ia64/t-linux-libunwind (renamed from libgcc/config/ia64/t-glibc-libunwind)0
-rw-r--r--libgcc/config/ia64/t-slibgcc-hpux6
-rw-r--r--libgcc/config/ia64/t-softfp-compat2
-rw-r--r--libgcc/config/ia64/t-vms6
-rw-r--r--libgcc/config/ia64/unwind-ia64.c1
-rw-r--r--libgcc/config/ia64/vms-crtinit.S24
-rw-r--r--libgcc/config/iq2000/lib2funcs.c40
-rw-r--r--libgcc/config/iq2000/t-iq20005
-rw-r--r--libgcc/config/libbid/bid_gcc_intrinsics.h3
-rw-r--r--libgcc/config/libgcc-glibc.ver55
-rw-r--r--libgcc/config/libgcc-libsystem.ver1
-rw-r--r--libgcc/config/lm32/t-elf12
-rw-r--r--libgcc/config/lm32/t-uclinux4
-rw-r--r--libgcc/config/m32c/lib1funcs.S231
-rw-r--r--libgcc/config/m32c/lib2funcs.c134
-rw-r--r--libgcc/config/m32c/t-m32c13
-rw-r--r--libgcc/config/m32c/trapv.c43
-rw-r--r--libgcc/config/m32r/initfini.c168
-rw-r--r--libgcc/config/m32r/libgcc-glibc.ver48
-rw-r--r--libgcc/config/m32r/t-linux5
-rw-r--r--libgcc/config/m32r/t-m32r23
-rw-r--r--libgcc/config/m68k/crti.S44
-rw-r--r--libgcc/config/m68k/crtn.S40
-rw-r--r--libgcc/config/m68k/fpgnulib.c595
-rw-r--r--libgcc/config/m68k/lb1sf68.S4116
-rw-r--r--libgcc/config/m68k/t-floatlib11
-rw-r--r--libgcc/config/m68k/t-slibgcc-elf-ver3
-rw-r--r--libgcc/config/mcore/crti.S63
-rw-r--r--libgcc/config/mcore/crtn.S45
-rw-r--r--libgcc/config/mcore/lib1funcs.S303
-rw-r--r--libgcc/config/mcore/t-mcore5
-rw-r--r--libgcc/config/mep/lib1funcs.S125
-rw-r--r--libgcc/config/mep/lib2funcs.c139
-rw-r--r--libgcc/config/mep/t-mep16
-rw-r--r--libgcc/config/mep/tramp.c103
-rw-r--r--libgcc/config/microblaze/crti.S39
-rw-r--r--libgcc/config/microblaze/crtn.S35
-rw-r--r--libgcc/config/microblaze/divsi3.S (renamed from libgcc/config/microblaze/divsi3.asm)4
-rw-r--r--libgcc/config/microblaze/moddi3.S (renamed from libgcc/config/microblaze/moddi3.asm)4
-rw-r--r--libgcc/config/microblaze/modsi3.S (renamed from libgcc/config/microblaze/modsi3.asm)4
-rw-r--r--libgcc/config/microblaze/muldi3_hard.S (renamed from libgcc/config/microblaze/muldi3_hard.asm)4
-rw-r--r--libgcc/config/microblaze/mulsi3.S (renamed from libgcc/config/microblaze/mulsi3.asm)4
-rw-r--r--libgcc/config/microblaze/stack_overflow_exit.S (renamed from libgcc/config/microblaze/stack_overflow_exit.asm)4
-rw-r--r--libgcc/config/microblaze/t-microblaze18
-rw-r--r--libgcc/config/microblaze/udivsi3.S (renamed from libgcc/config/microblaze/udivsi3.asm)4
-rw-r--r--libgcc/config/microblaze/umodsi3.S (renamed from libgcc/config/microblaze/umodsi3.asm)4
-rw-r--r--libgcc/config/mips/crti.S49
-rw-r--r--libgcc/config/mips/crtn.S52
-rw-r--r--libgcc/config/mips/gthr-mipssde.h230
-rw-r--r--libgcc/config/mips/libgcc-mips16.ver86
-rw-r--r--libgcc/config/mips/mips16.S712
-rw-r--r--libgcc/config/mips/t-crtstuff2
-rw-r--r--libgcc/config/mips/t-elf3
-rw-r--r--libgcc/config/mips/t-mips2
-rw-r--r--libgcc/config/mips/t-mips1643
-rw-r--r--libgcc/config/mips/t-slibgcc-irix2
-rw-r--r--libgcc/config/mips/t-vr2
-rw-r--r--libgcc/config/mips/vr4120-div.S74
-rw-r--r--libgcc/config/mmix/crti.S116
-rw-r--r--libgcc/config/mmix/crtn.S87
-rw-r--r--libgcc/config/mmix/t-mmix24
-rw-r--r--libgcc/config/moxie/crti.S (renamed from libgcc/config/moxie/crti.asm)6
-rw-r--r--libgcc/config/moxie/crtn.S (renamed from libgcc/config/moxie/crtn.asm)6
-rw-r--r--libgcc/config/moxie/t-moxie9
-rw-r--r--libgcc/config/pa/fptr.c131
-rw-r--r--libgcc/config/pa/gthr-dce.h563
-rw-r--r--libgcc/config/pa/lib2funcs.S74
-rw-r--r--libgcc/config/pa/linux-atomic.c305
-rw-r--r--libgcc/config/pa/milli64.S2134
-rw-r--r--libgcc/config/pa/quadlib.c245
-rw-r--r--libgcc/config/pa/stublib.c97
-rw-r--r--libgcc/config/pa/t-hpux3
-rw-r--r--libgcc/config/pa/t-hpux101
-rw-r--r--libgcc/config/pa/t-linux10
-rw-r--r--libgcc/config/pa/t-linux648
-rw-r--r--libgcc/config/pa/t-pa643
-rw-r--r--libgcc/config/pa/t-slibgcc-dwarf-ver3
-rw-r--r--libgcc/config/pa/t-slibgcc-hpux25
-rw-r--r--libgcc/config/pa/t-slibgcc-sjlj-ver3
-rw-r--r--libgcc/config/pa/t-stublib21
-rw-r--r--libgcc/config/pa/t-stublib6413
-rw-r--r--libgcc/config/pdp11/t-pdp118
-rw-r--r--libgcc/config/picochip/adddi3.S194
-rw-r--r--libgcc/config/picochip/ashlsi3.S193
-rw-r--r--libgcc/config/picochip/ashlsi3.c82
-rw-r--r--libgcc/config/picochip/ashrsi3.S202
-rw-r--r--libgcc/config/picochip/ashrsi3.c113
-rw-r--r--libgcc/config/picochip/clzsi2.S189
-rw-r--r--libgcc/config/picochip/cmpsi2.S212
-rw-r--r--libgcc/config/picochip/divmod15.S261
-rw-r--r--libgcc/config/picochip/divmodhi4.S246
-rw-r--r--libgcc/config/picochip/divmodsi4.S233
-rw-r--r--libgcc/config/picochip/lib1funcs.S4
-rw-r--r--libgcc/config/picochip/longjmp.S182
-rw-r--r--libgcc/config/picochip/lshrsi3.S190
-rw-r--r--libgcc/config/picochip/lshrsi3.c76
-rw-r--r--libgcc/config/picochip/parityhi2.S179
-rw-r--r--libgcc/config/picochip/popcounthi2.S201
-rw-r--r--libgcc/config/picochip/setjmp.S182
-rw-r--r--libgcc/config/picochip/subdi3.S191
-rw-r--r--libgcc/config/picochip/t-picochip37
-rw-r--r--libgcc/config/picochip/ucmpsi2.S209
-rw-r--r--libgcc/config/picochip/udivmodhi4.S238
-rw-r--r--libgcc/config/picochip/udivmodsi4.S318
-rw-r--r--libgcc/config/rs6000/crtresfpr.S81
-rw-r--r--libgcc/config/rs6000/crtresgpr.S81
-rw-r--r--libgcc/config/rs6000/crtresxfpr.S126
-rw-r--r--libgcc/config/rs6000/crtresxgpr.S124
-rw-r--r--libgcc/config/rs6000/crtsavfpr.S81
-rw-r--r--libgcc/config/rs6000/crtsavgpr.S81
-rw-r--r--libgcc/config/rs6000/darwin-asm.h51
-rw-r--r--libgcc/config/rs6000/darwin-fallback.c3
-rw-r--r--libgcc/config/rs6000/darwin-fpsave.S92
-rw-r--r--libgcc/config/rs6000/darwin-gpsave.S118
-rw-r--r--libgcc/config/rs6000/darwin-tramp.S125
-rw-r--r--libgcc/config/rs6000/darwin-vecsave.S155
-rw-r--r--libgcc/config/rs6000/darwin-world.S259
-rw-r--r--libgcc/config/rs6000/e500crtres32gpr.S73
-rw-r--r--libgcc/config/rs6000/e500crtres64gpr.S73
-rw-r--r--libgcc/config/rs6000/e500crtres64gprctr.S90
-rw-r--r--libgcc/config/rs6000/e500crtrest32gpr.S75
-rw-r--r--libgcc/config/rs6000/e500crtrest64gpr.S74
-rw-r--r--libgcc/config/rs6000/e500crtresx32gpr.S75
-rw-r--r--libgcc/config/rs6000/e500crtresx64gpr.S75
-rw-r--r--libgcc/config/rs6000/e500crtsav32gpr.S73
-rw-r--r--libgcc/config/rs6000/e500crtsav64gpr.S72
-rw-r--r--libgcc/config/rs6000/e500crtsav64gprctr.S91
-rw-r--r--libgcc/config/rs6000/e500crtsavg32gpr.S73
-rw-r--r--libgcc/config/rs6000/e500crtsavg64gpr.S73
-rw-r--r--libgcc/config/rs6000/e500crtsavg64gprctr.S90
-rw-r--r--libgcc/config/rs6000/eabi-ci.S113
-rw-r--r--libgcc/config/rs6000/eabi-cn.S104
-rw-r--r--libgcc/config/rs6000/eabi.S289
-rw-r--r--libgcc/config/rs6000/gthr-aix.h35
-rw-r--r--libgcc/config/rs6000/libgcc-darwin.10.4.ver93
-rw-r--r--libgcc/config/rs6000/libgcc-darwin.10.5.ver106
-rw-r--r--libgcc/config/rs6000/libgcc-glibc.ver (renamed from libgcc/config/rs6000/libgcc-ppc-glibc.ver)0
-rw-r--r--libgcc/config/rs6000/libgcc-ibm-ldouble.ver (renamed from libgcc/config/rs6000/libgcc-ppc64.ver)0
-rw-r--r--libgcc/config/rs6000/sol-ci.S94
-rw-r--r--libgcc/config/rs6000/sol-cn.S72
-rw-r--r--libgcc/config/rs6000/t-crtstuff3
-rw-r--r--libgcc/config/rs6000/t-darwin19
-rw-r--r--libgcc/config/rs6000/t-darwin646
-rw-r--r--libgcc/config/rs6000/t-ibm-ldouble2
-rw-r--r--libgcc/config/rs6000/t-ldbl1283
-rw-r--r--libgcc/config/rs6000/t-linux3
-rw-r--r--libgcc/config/rs6000/t-linux642
-rw-r--r--libgcc/config/rs6000/t-lynx1
-rw-r--r--libgcc/config/rs6000/t-netbsd9
-rw-r--r--libgcc/config/rs6000/t-ppccomm132
-rw-r--r--libgcc/config/rs6000/t-slibgcc-aix44
-rw-r--r--libgcc/config/rs6000/tramp.S107
-rw-r--r--libgcc/config/s390/gthr-tpf.h229
-rw-r--r--libgcc/config/s390/t-crtstuff3
-rw-r--r--libgcc/config/s390/t-tpf2
-rw-r--r--libgcc/config/score/crti.S131
-rw-r--r--libgcc/config/score/crtn.S50
-rw-r--r--libgcc/config/sh/crt1.S1369
-rw-r--r--libgcc/config/sh/crti.S125
-rw-r--r--libgcc/config/sh/crtn.S77
-rw-r--r--libgcc/config/sh/lib1funcs-4-300.S936
-rw-r--r--libgcc/config/sh/lib1funcs-Os-4-200.S322
-rw-r--r--libgcc/config/sh/lib1funcs.S3933
-rw-r--r--libgcc/config/sh/lib1funcs.h76
-rw-r--r--libgcc/config/sh/libgcc-excl.ver8
-rw-r--r--libgcc/config/sh/libgcc-glibc.ver48
-rw-r--r--libgcc/config/sh/linux-atomic.S223
-rw-r--r--libgcc/config/sh/t-linux37
-rw-r--r--libgcc/config/sh/t-netbsd5
-rw-r--r--libgcc/config/sh/t-sh63
-rw-r--r--libgcc/config/sh/t-sh646
-rw-r--r--libgcc/config/sh/t-superh11
-rw-r--r--libgcc/config/sparc/crti.S (renamed from libgcc/config/sparc/sol2-ci.S)2
-rw-r--r--libgcc/config/sparc/crtn.S (renamed from libgcc/config/sparc/sol2-cn.S)2
-rw-r--r--libgcc/config/sparc/lb1spc.S784
-rw-r--r--libgcc/config/sparc/libgcc-glibc.ver93
-rw-r--r--libgcc/config/sparc/t-linux4
-rw-r--r--libgcc/config/sparc/t-linux641
-rw-r--r--libgcc/config/sparc/t-softmul2
-rw-r--r--libgcc/config/sparc/t-sol22
-rw-r--r--libgcc/config/spu/cache.S43
-rw-r--r--libgcc/config/spu/cachemgr.c438
-rw-r--r--libgcc/config/spu/divmodti4.c188
-rw-r--r--libgcc/config/spu/divv2df3.c195
-rw-r--r--libgcc/config/spu/float_disf.c31
-rw-r--r--libgcc/config/spu/float_unsdidf.c54
-rw-r--r--libgcc/config/spu/float_unsdisf.c31
-rw-r--r--libgcc/config/spu/float_unssidf.c45
-rw-r--r--libgcc/config/spu/mfc_multi_tag_release.c72
-rw-r--r--libgcc/config/spu/mfc_multi_tag_reserve.c84
-rw-r--r--libgcc/config/spu/mfc_tag_release.c59
-rw-r--r--libgcc/config/spu/mfc_tag_reserve.c51
-rw-r--r--libgcc/config/spu/mfc_tag_table.c39
-rw-r--r--libgcc/config/spu/multi3.c119
-rw-r--r--libgcc/config/spu/t-elf56
-rw-r--r--libgcc/config/stormy16/ashlsi3.c2
-rw-r--r--libgcc/config/stormy16/ashrsi3.c2
-rw-r--r--libgcc/config/stormy16/clzhi2.c2
-rw-r--r--libgcc/config/stormy16/cmpsi2.c2
-rw-r--r--libgcc/config/stormy16/ctzhi2.c2
-rw-r--r--libgcc/config/stormy16/divsi3.c2
-rw-r--r--libgcc/config/stormy16/ffshi2.c2
-rw-r--r--libgcc/config/stormy16/lib2funcs.c358
-rw-r--r--libgcc/config/stormy16/lshrsi3.c2
-rw-r--r--libgcc/config/stormy16/modsi3.c2
-rw-r--r--libgcc/config/stormy16/parityhi2.c2
-rw-r--r--libgcc/config/stormy16/popcounthi2.c2
-rw-r--r--libgcc/config/stormy16/t-stormy1639
-rw-r--r--libgcc/config/stormy16/ucmpsi2.c2
-rw-r--r--libgcc/config/stormy16/udivmodsi4.c2
-rw-r--r--libgcc/config/stormy16/udivsi3.c2
-rw-r--r--libgcc/config/stormy16/umodsi3.c2
-rw-r--r--libgcc/config/t-crtin4
-rw-r--r--libgcc/config/t-crtstuff-pic2
-rw-r--r--libgcc/config/t-darwin6
-rw-r--r--libgcc/config/t-freebsd-thread2
-rw-r--r--libgcc/config/t-libgcc-pic2
-rw-r--r--libgcc/config/t-libunwind9
-rw-r--r--libgcc/config/t-linux3
-rw-r--r--libgcc/config/t-openbsd-thread3
-rw-r--r--libgcc/config/t-rtems2
-rw-r--r--libgcc/config/t-slibgcc5
-rw-r--r--libgcc/config/t-slibgcc-darwin5
-rw-r--r--libgcc/config/t-slibgcc-elf-ver2
-rw-r--r--libgcc/config/t-slibgcc-gld-nover3
-rw-r--r--libgcc/config/t-slibgcc-hpux7
-rw-r--r--libgcc/config/t-slibgcc-libgcc32
-rw-r--r--libgcc/config/t-slibgcc-nolc-override1
-rw-r--r--libgcc/config/t-slibgcc-vms28
-rw-r--r--libgcc/config/t-sol22
-rw-r--r--libgcc/config/t-vxworks14
-rw-r--r--libgcc/config/v850/lib1funcs.S2330
-rw-r--r--libgcc/config/v850/t-v85060
-rw-r--r--libgcc/config/vax/lib1funcs.S92
-rw-r--r--libgcc/config/vax/t-linux2
-rw-r--r--libgcc/config/vms/t-vms6
-rw-r--r--libgcc/config/vms/vms-ucrt0.c127
-rw-r--r--libgcc/config/vxlib-tls.c362
-rw-r--r--libgcc/config/vxlib.c95
-rw-r--r--libgcc/config/xtensa/crti.S51
-rw-r--r--libgcc/config/xtensa/crtn.S46
-rw-r--r--libgcc/config/xtensa/ieee754-df.S2388
-rw-r--r--libgcc/config/xtensa/ieee754-sf.S1757
-rw-r--r--libgcc/config/xtensa/lib1funcs.S845
-rw-r--r--libgcc/config/xtensa/lib2funcs.S186
-rw-r--r--libgcc/config/xtensa/libgcc-glibc.ver3
-rw-r--r--libgcc/config/xtensa/t-elf5
-rw-r--r--libgcc/config/xtensa/t-linux1
-rw-r--r--libgcc/config/xtensa/t-xtensa14
-rw-r--r--libgcc/config/xtensa/unwind-dw2-xtensa.c1
-rw-r--r--libgcc/configure347
-rw-r--r--libgcc/configure.ac97
-rw-r--r--libgcc/crtstuff.c651
-rw-r--r--libgcc/dfp-bit.h3
-rw-r--r--libgcc/divmod.c73
-rw-r--r--libgcc/emutls.c3
-rw-r--r--libgcc/fixed-bit.c3
-rw-r--r--libgcc/floatunsidf.c15
-rw-r--r--libgcc/floatunsisf.c18
-rw-r--r--libgcc/floatunsitf.c15
-rw-r--r--libgcc/floatunsixf.c15
-rw-r--r--libgcc/fp-bit.c1
-rw-r--r--libgcc/gbl-ctors.h87
-rw-r--r--libgcc/generic-morestack-thread.c1
-rw-r--r--libgcc/generic-morestack.c1
-rw-r--r--libgcc/gthr-posix.h879
-rw-r--r--libgcc/gthr-single.h292
-rw-r--r--libgcc/gthr.h148
-rw-r--r--libgcc/libgcc2.c2253
-rw-r--r--libgcc/libgcc2.h530
-rw-r--r--libgcc/libgcov.c440
-rw-r--r--libgcc/longlong.h1660
-rw-r--r--libgcc/memcmp.c16
-rw-r--r--libgcc/memcpy.c12
-rw-r--r--libgcc/memmove.c20
-rw-r--r--libgcc/memset.c11
-rwxr-xr-xlibgcc/mkheader.sh41
-rw-r--r--libgcc/mkmap-flat.awk109
-rw-r--r--libgcc/mkmap-symver.awk136
-rw-r--r--libgcc/siditi-object.mk8
-rw-r--r--libgcc/udivmod.c37
-rw-r--r--libgcc/udivmodsi4.c47
-rw-r--r--libgcc/unwind-dw2-fde-dip.c3
-rw-r--r--libgcc/unwind-dw2-fde.c3
-rw-r--r--libgcc/unwind-dw2.c1
-rw-r--r--libgcc/unwind-sjlj.c3
463 files changed, 67481 insertions, 795 deletions
diff --git a/libgcc/ChangeLog b/libgcc/ChangeLog
index a261e7524cd..1f2070b03d7 100644
--- a/libgcc/ChangeLog
+++ b/libgcc/ChangeLog
@@ -1,3 +1,1029 @@
+2011-11-07 Nathan Sidwell <nathan@acm.org>
+
+ * libgcov.c (struct gcov_fn_buffer): New struct.
+ (buffer_fn_data): New helper.
+ (gcov_exit): Rework for new gcov data structures.
+
+2011-11-07 Georg-Johann Lay <avr@gjlay.de>
+
+ PR target/49313
+ * config/avr/lib1funcs.S (__divmodhi4, __divmodsi4): Tweak speed.
+
+2011-11-07 Rainer Orth <ro@CeBiTec.Uni-Bielefeld.DE>
+
+ * config.host (tmake_file): Correct comment.
+ (bfin*-elf*): Remove bfin/t-elf from tmake_file, add
+ t-libgcc-pic.
+ (bfin*-uclinux*): Likewise.
+ (bfin*-linux-uclibc*): Likewise.
+ (xstormy16-*-elf): Add stormy16/t-stormy16 to tmake_file.
+
+ * config/arm/t-elf (HOST_LIBGCC2_CFLAGS): Append instead of
+ assigning.
+ * config/arm/t-strongarm-elf (HOST_LIBGCC2_CFLAGS): Likewise.
+ * config/avr/t-avr (HOST_LIBGCC2_CFLAGS): Likewise.
+ * config/c6x/t-elf (HOST_LIBGCC2_CFLAGS): Likewise.
+ * config/h8300/t-h8300 (HOST_LIBGCC2_CFLAGS): Likewise.
+ * config/lm32/t-elf (HOST_LIBGCC2_CFLAGS): Likewise.
+ * config/m32r/t-m32r (HOST_LIBGCC2_CFLAGS): Likewise.
+ * config/mcore/t-mcore (HOST_LIBGCC2_CFLAGS): Likewise.
+ * config/mips/t-elf (HOST_LIBGCC2_CFLAGS): Likewise.
+ * config/mmix/t-mmix (HOST_LIBGCC2_CFLAGS): Likewise.
+ * config/pdp11/t-pdp11 (HOST_LIBGCC2_CFLAGS): Likewise.
+ * config/picochip/t-picochip (HOST_LIBGCC2_CFLAGS): Likewise.
+ * config/stormy16/t-stormy16 (HOST_LIBGCC2_CFLAGS): Likewise.
+ * config/t-openbsd-thread (HOST_LIBGCC2_CFLAGS): Likewise.
+
+ * config/bfin/t-elf: Remove.
+ * config/t-vxworks (HOST_LIBGCC2_CFLAGS): Remove.
+
+2011-11-07 Rainer Orth <ro@CeBiTec.Uni-Bielefeld.DE>
+
+ * config.host (*-*-rtems*): Add t-rtems to tmake_file.
+ (i[34567]86-*-rtems*): Remove t-rtems from tmake_file.
+ (lm32-*-elf*, lm32-*-rtems*): Split into ...
+ (lm32-*-elf*): ... this.
+ (lm32-*-rtems*): ... and this.
+ Add to tmake_file.
+ (m32r-*-rtems*): Add to tmake_file.
+ (moxie-*-rtems*): Likewise.
+ (sparc-*-rtems*): Likewise.
+ Remove t-rtems from tmake_file.
+ (sparc64-*-rtems*): Likewise.
+ * config/t-rtems (HOST_LIBGCC2_CFLAGS): Use LIBGCC2_INCLUDES
+ instead.
+
+2011-11-07 Rainer Orth <ro@CeBiTec.Uni-Bielefeld.DE>
+
+ PR bootstrap/50982
+ * config/gthr-posix.h: Move ...
+ * gthr-posix.h: ... here.
+ * config/gthr-lynx.h: Reflect this.
+ * config/gthr-vxworks.h: Likewise.
+ * config/rs6000/gthr-aix.h: Likewise.
+ * configure.ac (target_thread_file): Likewise.
+ * configure: Regenerate.
+
+2011-11-06 Sebastian Huber <sebastian.huber@embedded-brains.de>
+
+ * config.host (arm*-*-rtemseabi*): New target.
+
+2011-11-06 John David Anglin <dave.anglin@nrc-cnrc.gc.ca>
+
+ PR other/50991
+ * Makefile.in: Make EXTRA_PARTS depend on libgcc_tm.h instead of
+ extra-parts.
+
+2011-11-05 Joern Rennecke <joern.rennecke@embecosm.com>
+
+ * config.host (epiphany-*-elf*): New configuration.
+ * config/epiphany: New Directory.
+
+2011-11-05 Ralf Corsépius <ralf.corsepius@rtems.org>
+
+ * config.host (avr-*-rtems*): Add config/avr/t-rtems.
+ * config/avr/t-rtems: New.
+ Filter out _exit from LIB1ASMFUNCS.
+
+2011-11-04 David S. Miller <davem@davemloft.net>
+
+ * configure.ac: Test for 64-bit addresses on !x86 using __LP64__.
+ * configure: Rebuild.
+
+2011-11-04 Andreas Krebbel <Andreas.Krebbel@de.ibm.com>
+
+ * config/s390/t-crtstuff: Add -fPIC to CRTSTUFF_T_CFLAGS_S
+ variable.
+
+2011-11-04 Georg-Johann Lay <avr@gjlay.de>
+
+ PR target/50931
+ * config/t-avr (LIB1ASMFUNCS): Add _divmodpsi4, _udivmodpsi4.
+ * config/lib1funcs.S (__udivmodpsi4, __divmodpsi4): New functions.
+
+2011-11-04 Joel Sherrill <joel.sherrill@oarcorp.com>
+
+ PR target/50989
+ * config.host (sparc-*-rtems*): Add sparc/t-softmul.
+
+2011-11-04 Rainer Orth <ro@CeBiTec.Uni-Bielefeld.DE>
+
+ * config/c6x/t-elf (LIB2ADDEH): Set.
+ * config/c6x/t-c6x-elf: Remove.
+
+2011-11-04 Rainer Orth <ro@CeBiTec.Uni-Bielefeld.DE>
+
+ * config/i386/sol2-ci.S: Rename to ...
+ * config/i386/crti.S: ... this.
+ * config/i386/sol2-cn.S: Rename to ...
+ * config/i386/crtn.S: ... this.
+ * config/sparc/sol2-ci.S: Rename to ...
+ * config/sparc/crti.S: ... this.
+ * config/sparc/sol2-cn.S: Rename to ...
+ * config/sparc/crtn.S: ... this.
+ * config/t-sol2 (CUSTOM_CRTIN): Remove.
+ (crti.o, crtn.o): Remove.
+
+2011-11-04 Tristan Gingold <gingold@adacore.com>
+
+ * config/ia64/fde-vms.c: Do not include md-unwind-support.h
+
+2011-11-04 Kaz Kojima <kkojima@gcc.gnu.org>
+
+ * config/sh/t-sh: Use $(gcc_compile) instead of $(compile).
+
+2011-11-03 Hans-Peter Nilsson <hp@axis.com>
+
+ * config.host (crisv32-*-none, cris-*-none): Remove.
+ (crisv32-*-elf): Append to tmake_file, don't just set it.
+ (cris-*-elf): Add missing t-fdpbit to tmake_file.
+
+2011-11-03 Rainer Orth <ro@CeBiTec.Uni-Bielefeld.DE>
+
+ * config/rs6000/t-ppccomm (ecrti$(objext)): Use $<.
+ (ecrtn$(objext)): Likewise.
+ (ncrti$(objext)): Likewise.
+ (ncrtn$(objext)): Likewise.
+
+2011-11-03 Andreas Schwab <schwab@redhat.com>
+
+ * config/ia64/t-ia64 (crtbeginS.o): Fix whitespace damage.
+
+2011-11-02 David S. Miller <davem@davemloft.net>
+
+ * configure.ac: Set host_address on sparc too.
+ * configure: Regenerate.
+ * config.host: Add sparc/t-linux64 and sparc/t-softmul conditionally
+ based upon host_address.
+ * config/sparc/t-linux64: Set CRTSTUFF_T_CFLAGS unconditionally.
+
+2011-11-02 Jason Merrill <jason@redhat.com>
+
+ * config/rs6000/t-ppccomm: Add missing \.
+
+2011-11-02 Rainer Orth <ro@CeBiTec.Uni-Bielefeld.DE>
+
+ * gthr-single.h, gthr.h: New files.
+ * config/gthr-lynx.h, config/gthr-posix.h., config/gthr-rtems.h,
+ config/gthr-vxworks.h, config/i386/gthr-win32.h,
+ config/mips/gthr-mipssde.h, config/pa/gthr-dce.h,
+ config/rs6000/gthr-aix.h, config/s390/gthr-tpf.h: New files.
+ * config/i386/gthr-win32.c: Include "gthr-win32.h".
+ * configure.ac (thread_header): New variable.
+ Set it depending on target_thread_file.
+ (gthr-default.h): Link from $thread_header.
+ * configure: Regenerate.
+ * Makefile.in (LIBGCC2_CFLAGS): Remove $(GTHREAD_FLAGS).
+
+2011-11-02 Rainer Orth <ro@CeBiTec.Uni-Bielefeld.DE>
+
+ * configure.ac (tm_file_): New variable.
+ Determine from tm_file.
+ (tm_file, tm_defines): Substitute.
+ * configure: Regenerate.
+ * mkheader.sh: New file.
+ * Makefile.in (clean): Remove libgcc_tm.h.
+ ($(objects)): Depend on libgcc_tm.h.
+ (libgcc_tm_defines, libgcc_tm_file): New variables.
+ (libgcc_tm.h, libgcc_tm.stamp): New targets.
+ ($(libgcc-objects), $(libgcc-s-objects), $(libgcc-eh-objects))
+ ($(libgcov-objects), $(libunwind-objects), $(libunwind-s-objects))
+ ($(extra-parts)): Depend on libgcc_tm.h.
+ * config.host (tm_defines, tm_file): New variable.
+ (arm*-*-linux*): Set tm_file for arm*-*-linux-*eabi.
+ (arm*-*-uclinux*): Set tm_file for arm*-*-uclinux*eabi.
+ (arm*-*-eabi*, arm*-*-symbianelf*): Set tm_file.
+ (avr-*-rtems*): Likewise.
+ (avr-*-*): Likewise.
+ (frv-*-elf): Likewise.
+ (frv-*-*linux*): Likewise.
+ (h8300-*-rtems*): Likewise.
+ (h8300-*-elf*): Likewise.
+ (i[34567]86-*-darwin*): Likewise.
+ (x86_64-*-darwin*): Likewise.
+ (rx-*-elf): Likewise.
+ (tic6x-*-uclinux): Likewise.
+ (tic6x-*-elf): Likewise.
+ (i[34567]86-*-linux*, x86_64-*-linux*): Likewise.
+ * config/alpha/gthr-posix.c: Include libgcc_tm.h.
+ * config/i386/cygming-crtbegin.c: Likewise.
+ * config/i386/cygming-crtend.c: Likewise.
+ * config/ia64/fde-vms.c: Likewise.
+ * config/ia64/unwind-ia64.c: Likewise.
+ * config/libbid/bid_gcc_intrinsics.h: Likewise.
+ * config/rs6000/darwin-fallback.c: Likewise.
+ * config/stormy16/lib2funcs.c: Likewise.
+ * config/xtensa/unwind-dw2-xtensa.c: Likewise.
+ * crtstuff.c: Likewise.
+ * dfp-bit.h: Likewise.
+ * emutls.c: Likewise.
+ * fixed-bit.c: Likewise.
+ * fp-bit.c: Likewise.
+ * generic-morestack-thread.c: Likewise.
+ * generic-morestack.c: Likewise.
+ * libgcc2.c: Likewise.
+ * libgcov.c: Likewise.
+ * unwind-dw2-fde-dip.c: Likewise.
+ * unwind-dw2-fde.c: Likewise.
+ * unwind-dw2.c: Likewise.
+ * unwind-sjlj.c: Likewise.
+
+2011-11-02 Rainer Orth <ro@CeBiTec.Uni-Bielefeld.DE>
+
+ * configure.ac: Include ../config/picflag.m4.
+ (GCC_PICFLAG): Call it.
+ Substitute.
+ * configure: Regenerate.
+ * Makefile.in (gcc_srcdir): Remove.
+ (LIBGCC2_DEBUG_CFLAGS, LIBGCC2_CFLAGS, LIBGCC2_INCLUDES)
+ (HOST_LIBGCC2_CFLAGS, PICFLAG, LIB2FUNCS_ST, LIB2FUNCS_EXCLUDE)
+ (LIB2_DIVMOD_FUNCS, LIB2ADD, LIB2ADD_ST): Set.
+ ($(lib2funcs-o), $(lib2funcs-s-o), $(lib2-divmod-o))
+ ($(lib2-divmod-s-o)): Use $(srcdir) to refer to libgcc2.c.
+ Use $<.
+ Remove comment.
+ * libgcc2.c, libgcc2.h, gbl-ctors.h, longlong.h: New files.
+ * siditi-object.mk ($o$(objext), $(o)_s$(objext)): Use $(srcdir)
+ to refer to libgcc2.c.
+ Use $<.
+ * config/darwin-64.c: New file.
+ * config/darwin-crt3.c: Remove comment.
+ * config/divmod.c, config/floatunsidf.c, config/floatunsisf.c,
+ config/floatunsitf.c, config/floatunsixf.c, config/udivmod.c,
+ config/udivmodsi4.c: New files.
+ * config/memcmp.c, config/memcpy.c, config/memmove.c,
+ config/memset.c: New files.
+ * config/t-crtstuff-pic (CRTSTUFF_T_CFLAGS_S): Use $(PICFLAG).
+ * config/t-darwin (HOST_LIBGCC2_CFLAGS): Set.
+ * config/t-freebsd-thread, config/t-libgcc-pic: New files.
+ * config/t-libunwind (HOST_LIBGCC2_CFLAGS): Set.
+ * config/t-openbsd-thread: New file.
+ * config/t-sol2 (HOST_LIBGCC2_CFLAGS): Remove.
+ * config/t-vxworks, config/vxlib-tls.c, config/vxlib.c: New files.
+ * config/alpha/gthr-posix.c, config/alpha/qrnnd.S: New files.
+ * config/alpha/t-alpha (LIB2ADD): Use $(srcdir) to refer to
+ qrnnd.S.
+ Adapt filename.
+ * config/alpha/t-osf-pthread (LIB2ADD): Use $(srcdir)/config/alpha
+ to refer to gthr-posix.c.
+ * config/alpha/t-vms (LIB2ADD): Set.
+ * config/alpha/vms-gcc_shell_handler.c: New file.
+ * config/arm/bpabi.c, config/arm/fp16.c,
+ config/arm/linux-atomic.c, config/arm/linux-atomic-64bit.c,
+ config/arm/unaligned-funcs.c: New files.
+ * config/arm/t-bpabi (LIB2ADD, LIB2ADD_ST): Set.
+ * config/arm/t-elf (HOST_LIBGCC2_CFLAGS): Set.
+ * config/arm/t-linux: Likewise.
+ * config/arm/t-linux-eabi (LIB2ADD_ST): Add.
+ * config/arm/t-netbsd: New file.
+ * config/arm/t-strongarm-elf (HOST_LIBGCC2_CFLAGS): Set.
+ * config/arm/t-symbian (LIB2ADD_ST): Set.
+ * config/avr/t-avr (LIB2FUNCS_EXCLUDE, HOST_LIBGCC2_CFLAGS): Set.
+ * config/bfin/t-crtstuff (CRTSTUFF_T_CFLAGS): Use $(PICFLAG).
+ * config/bfin/t-elf: New file.
+ * config/c6x/eqd.c, config/c6x/eqf.c, config/c6x/ged.c,
+ config/c6x/gef.c, config/c6x/gtd.c, config/c6x/gtf.c,
+ config/c6x/led.c, config/c6x/lef.c, config/c6x/ltd.c,
+ config/c6x/ltf.c: New files.
+ * config/c6x/t-elf (LIB2FUNCS_EXCLUDE, LIB2ADD)
+ (HOST_LIBGCC2_CFLAGS): Set.
+ * config/c6x/t-uclinux (HOST_LIBGCC2_CFLAGS): Set.
+ (CRTSTUFF_T_CFLAGS): Use $(PICFLAG).
+ * config/cris/arit.c, config/cris/mulsi3.S, config/cris/t-cris:
+ New files.
+ * config/cris/t-elfmulti (LIB2ADD_ST): Set.
+ * config/cris/t-linux (HOST_LIBGCC2_CFLAGS): Remove.
+ * config/frv/cmovd.c, config/frv/cmovh.c, config/frv/cmovw.c,
+ config/frv/modi.c, config/frv/uitod.c, config/frv/uitof.c,
+ config/frv/ulltod.c, config/frv/ulltof.c, config/frv/umodi.c: New
+ files.
+ * config/frv/t-frv (LIB2ADD): Set.
+ * config/frv/t-linux (CRTSTUFF_T_CFLAGS): Use $(PICFLAG).
+ * config/h8300/clzhi2.c, config/h8300/ctzhi2.c,
+ config/h8300/fixunssfsi.c, config/h8300/parityhi2.c,
+ config/h8300/popcounthi2.c: New files.
+ * config/h8300/t-h8300 (LIB2ADD, HOST_LIBGCC2_CFLAGS): Set.
+ * config/i386/gthr-win32.c: New file.
+ * config/i386/t-cygming (LIBGCC2_INCLUDES): Set.
+ * config/i386/t-cygwin: Likewise.
+ * config/i386/t-darwin, config/i386/t-darwin64,
+ config/i386/t-gthr-win32, config/i386/t-interix: New files.
+ * config/i386/t-nto (HOST_LIBGCC2_CFLAGS): Set.
+ (CRTSTUFF_T_CFLAGS): Use $(PICFLAG).
+ * config/i386/t-sol2 (CRTSTUFF_T_CFLAGS): Use $(PICFLAG).
+ * config/ia64/quadlib.c: New file.
+ * config/ia64/t-hpux (LIB2ADD): Set.
+ * config/ia64/t-ia64: Add comment.
+ * config/iq2000/lib2funcs.c, config/iq2000/t-iq2000: New files.
+ * config/lm32/t-uclinux (CRTSTUFF_T_CFLAGS): Use $(PICFLAG).
+ (HOST_LIBGCC2_CFLAGS): Append, remove -fPIC.
+ * config/m32c/lib2funcs.c, config/m32c/trapv.c: New files.
+ * config/m32c/t-m32c (LIB2ADD): Set.
+ * config/m32r/t-linux (HOST_LIBGCC2_CFLAGS): Set.
+ * config/m32r/t-m32r: Likewise.
+ * config/m68k/fpgnulib.c: New file.
+ * config/m68k/t-floatlib (LIB2ADD): Set.
+ (xfgnulib.c): New target.
+ * config/mcore/t-mcore (HOST_LIBGCC2_CFLAGS): Set.
+ * config/mep/lib2funcs.c, config/mep/tramp.c: New files.
+ * config/mep/t-mep (LIB2ADD): Set.
+ * config/microblaze/divsi3.asm: Rename to divsi3.S.
+ * config/microblaze/moddi3.asm: Rename to moddi3.S.
+ * config/microblaze/modsi3.asm: Rename to modsi3.S.
+ * config/microblaze/muldi3_hard.asm: Rename to hard.S.
+ * config/microblaze/mulsi3.asm: Rename to mulsi3.S.
+ * config/microblaze/stack_overflow_exit.asm: Rename to exit.S.
+ * config/microblaze/udivsi3.asm: Rename to udivsi3.S.
+ * config/microblaze/umodsi3.asm: Rename to umodsi3.S.
+ * config/microblaze/t-microblaze (LIB2ADD): Reflect this.
+ * config/mips/t-elf, config/mips/t-vr, config/mips/vr4120-div.S:
+ New files.
+ * config/mips/t-mips (LIB2_SIDITI_CONV_FUNCS): Set.
+ * config/mmix/t-mmix (HOST_LIBGCC2_CFLAGS): Set.
+ * config/pa/fptr.c, config/pa/lib2funcs.S,
+ config/pa/linux-atomic.c, config/pa/quadlib.c: New files.
+ * config/pa/t-linux (HOST_LIBGCC2_CFLAGS): Set.
+ (LIB2ADD, LIB2ADD_ST): Set.
+ * config/pa/t-hpux, config/pa/t-hpux10, config/pa/t-pa64: New files.
+ * config/pa/t-linux (HOST_LIBGCC2_CFLAGS, LIB2ADD, LIB2ADD_ST):
+ Set.
+ * config/pa/t-linux64 (LIB2ADD_ST, HOST_LIBGCC2_CFLAGS): Set.
+ * config/pdp11/t-pdp11: New file.
+ * config/picochip/libgccExtras/adddi3.S,
+ config/picochip/libgccExtras/ashlsi3.S,
+ config/picochip/libgccExtras/ashrsi3.S,
+ config/picochip/libgccExtras/clzsi2.S,
+ config/picochip/libgccExtras/cmpsi2.S,
+ config/picochip/libgccExtras/divmod15.S,
+ config/picochip/libgccExtras/divmodhi4.S,
+ config/picochip/libgccExtras/divmodsi4.S,
+ config/picochip/libgccExtras/lshrsi3.S,
+ config/picochip/libgccExtras/parityhi2.S,
+ config/picochip/libgccExtras/popcounthi2.S,
+ config/picochip/libgccExtras/subdi3.S,
+ config/picochip/libgccExtras/ucmpsi2.S,
+ config/picochip/libgccExtras/udivmodhi4.S,
+ config/picochip/libgccExtras/udivmodsi4.S: New files.
+ * config/picochip/t-picochip (LIB2ADD, HOST_LIBGCC2_CFLAGS)
+ (LIBGCC2_DEBUG_CFLAGS, RANLIB_FOR_TARGET): Set.
+ * config/rs6000/crtresfpr.S, config/rs6000/crtresgpr.S,
+ config/rs6000/crtresxfpr.S, config/rs6000/crtresxgpr.S,
+ config/rs6000/crtsavfpr.S, config/rs6000/crtsavgpr.S)
+ config/rs6000/darwin-asm.h, config/rs6000/darwin-fpsave.S,
+ config/rs6000/darwin-gpsave.S, config/rs6000/darwin-tramp.S,
+ config/rs6000/darwin-vecsave.S, config/rs6000/darwin-world.S: New
+ files.
+ * config/rs6000/t-darwin (LIB2ADD, LIB2ADD_ST)
+ (HOST_LIBGCC2_CFLAGS): Set.
+ * config/rs6000/t-darwin64: New file.
+ * config/rs6000/t-linux64 (HOST_LIBGCC2_CFLAGS): Set.
+ * config/rs6000/t-lynx, config/rs6000/t-netbsd: New files.
+ * config/rs6000/t-ppccomm (LIB2ADD): Add
+ $(srcdir)/config/rs6000/tramp.S.
+ (LIB2ADD_ST): Use $(srcdir)/config/rs6000 to refer to sources.
+ Add $(srcdir)/config/rs6000/eabi.S.
+ (crtsavfpr.S, crtresfpr.S, crtsavgpr.S, crtresgpr.S, crtresxfpr.S)
+ (crtresxgpr.S, e500crtres32gpr.S, e500crtres64gpr.S)
+ (e500crtres64gprctr.S, e500crtrest32gpr.S, e500crtrest64gpr.S)
+ (e500crtresx32gpr.S, e500crtresx64gpr.S, e500crtsav32gpr.S)
+ (e500crtsav64gpr.S, e500crtsav64gprctr.S, e500crtsavg32gpr.S)
+ (e500crtsavg64gpr.S, e500crtsavg64gprctr.S): Remove.
+ * config/rs6000/tramp.S: New file.
+ * config/s390/t-tpf: Remove.
+ * config/sh/linux-atomic.S: New file.
+ * config/sh/t-linux (LIB2ADD): Set.
+ (HOST_LIBGCC2_CFLAGS): Append, remove -fpic.
+ * config/sh/t-netbsd (LIB2ADD, HOST_LIBGCC2_CFLAGS): Set.
+ * config/sh/t-sh (unwind-dw2-Os-4-200.o): Use $(srcdir) to refer
+ to unwind-dw2.c.
+ (HOST_LIBGCC2_CFLAGS): Set.
+ * config/sparc/t-sol2 (CRTSTUFF_T_CFLAGS): Use $(PICFLAG).
+ * config/spu/divmodti4.c, config/spu/divv2df3.c,
+ config/spu/float_disf.c, config/spu/float_unsdidf.c,
+ config/spu/float_unsdisf.c, config/spu/float_unssidf.c,
+ config/spu/mfc_multi_tag_release.c,
+ config/spu/mfc_multi_tag_reserve.c, config/spu/mfc_tag_release.c,
+ config/spu/mfc_tag_reserve.c, config/spu/mfc_tag_table.c,
+ config/spu/multi3.c: New files.
+ * config/spu/t-elf (LIB2ADD, LIB2ADD_ST, LIB2_SIDITI_CONV_FUNCS)
+ (HOST_LIBGCC2_CFLAGS): Set.
+ * config/stormy16/ashlsi3.c, config/stormy16/ashrsi3.c,
+ config/stormy16/clzhi2.c, config/stormy16/cmpsi2.c,
+ config/stormy16/ctzhi2.c, config/stormy16/divsi3.c,
+ config/stormy16/ffshi2.c, config/stormy16/lib2.c,
+ config/stormy16/lshrsi3.c, config/stormy16/modsi3.c,
+ config/stormy16/parityhi2.c, config/stormy16/popcounthi2.c,
+ config/stormy16/t-stormy16, config/stormy16/ucmpsi2.c,
+ config/stormy16/udivmodsi4.c, config/stormy16/udivsi3.c,
+ config/stormy16/umodsi3.c: New files.
+ * config/xtensa/lib2funcs.S: New file.
+ * config/xtensa/t-elf (HOST_LIBGCC2_CFLAGS): Set.
+ * config/xtensa/t-xtensa (LIB2ADD): Set.
+ * config.host (*-*-darwin*): Add t-libgcc-pic to tmake_file.
+ (*-*-freebsd*): Add t-freebsd, t-libgcc-pic to tmake_file.
+ Add t-freebsd-thread to tmake_file for posix threads.
+ (*-*-linux*, frv-*-*linux*, *-*-kfreebsd*-gnu, *-*-knetbsd*-gnu)
+ (*-*-gnu*, *-*-kopensolaris*-gnu): Add t-libgcc-pic to tmake_file.
+ (*-*-lynxos*): Likewise.
+ (*-*-netbsd*): Likewise.
+ (*-*-openbsd*): Likewise.
+ Add t-openbsd-thread to tmake_file for posix threads.
+ (*-*-solaris2*): Add t-libgcc-pic to tmake_file.
+ (*-*-vxworks*): Set tmake_file.
+ (alpha*-*-linux*): Add alpha/t-alpha, alpha/t-ieee to tmake_file.
+ (alpha*-*-freebsd*): Likewise.
+ (alpha*-*-netbsd*): Likewise.
+ (alpha*-*-openbsd*): Likewise.
+ (alpha*-dec-osf5.1*): Remove qrnnd.o, gthr-posix.o from extra_parts.
+ (alpha64-dec-*vms*): Add alpha/t-alpha, alpha/t-ieee to tmake_file.
+ (alpha*-dec-*vms*): Likewise.
+ (arm*-*-netbsdelf*): Add arm/t-netbsd to tmake_file.
+ (bfin*-elf*): Add bfin/t-elf to tmake_file.
+ (bfin*-uclinux*): Likewise.
+ (bfin*-linux-uclibc*): Likewise.
+ (crisv32-*-elf): Add cris/t-cris to tmake_file.
+ (crisv32-*-none): Likewise.
+ (cris-*-elf): Likewise.
+ (cris-*-none): Likewise.
+ (cris-*-linux*, crisv32-*-linux*): Likewise.
+ (hppa[12]*-*-hpux10*): Add pa/t-hpux pa/t-hpux10, t-libgcc-pic to
+ tmake_file.
+ (hppa*64*-*-hpux11*): Add pa/t-hpux, pa/t-pa64, t-libgcc-pic to
+ tmake_file.
+ (hppa[12]*-*-hpux11*): Add pa/t-hpux, t-libgcc-pic to tmake_file.
+ (i[34567]86-*-elf*): Add t-libgcc-pic to tmake_file.
+ (x86_64-*-elf*): Likewise.
+ (i[34567]86-*-nto-qnx*): Likewise.
+ (i[34567]86-*-mingw*): Add i386/t-gthr-win32 to tmake_file for
+ win32 threads.
+ (x86_64-*-mingw*): Likewise.
+ (i[34567]86-*-interix3*): Add i386/t-interix to tmake_file.
+ (lm32-*-uclinux*): Add t-libgcc-pic to tmake_file.
+ (mipsisa32-*-elf*, mipsisa32el-*-elf*, mipsisa32r2-*-elf*)
+ (mipsisa32r2el-*-elf*, mipsisa64-*-elf*, mipsisa64el-*-elf*)
+ (mipsisa64r2-*-elf*, mipsisa64r2el-*-elf*): Add mips/t-elf to
+ tmake_file.
+ (mipsisa64sr71k-*-elf*): Likewise.
+ (mipsisa64sb1-*-elf*, mipsisa64sb1el-*-elf*): Likewise.
+ (mips-*-elf*, mipsel-*-elf*): Likewise.
+ (mips64-*-elf*, mips64el-*-elf*): Likewise.
+ (mips64orion-*-elf*, mips64orionel-*-elf*): Likewise.
+ (mips*-*-rtems*): Likewise.
+ (mips64vr-*-elf*, mips64vrel-*-elf*): Add mips/t-elf, mips/t-vr
+ to tmake_file.
+ (pdp11-*-*): Add pdp11/t-pdp11 to tmake_file.
+ (powerpc64-*-darwin*): Add rs6000/t-darwin64 to tmake_file.
+ (s390x-ibm-tpf*): Add t-libgcc-pic to tmake_file.
+ (spu-*-elf*): Likewise.
+ (tic6x-*-uclinux): Add t-libgcc-pic to tmake_file.
+
+2011-11-02 Rainer Orth <ro@CeBiTec.Uni-Bielefeld.DE>
+
+ * Makefile.in ($(lib1asmfuncs-o), $(lib1asmfuncs-s-o)): Use
+ $(srcdir) to refer to $(LIB1ASMSRC).
+ Use $<.
+ * config/arm/bpabi-v6m.S, config/arm/bpabi.S,
+ config/arm/ieee754-df.S, config/arm/ieee754-sf.S,
+ config/arm/lib1funcs.S: New files.
+ * config/arm/libunwind.S [!__symbian__]: Use lib1funcs.S.
+ * config/arm/t-arm: New file.
+ * config/arm/t-bpabi (LIB1ASMFUNCS): Set.
+ * config/arm/t-elf, config/arm/t-linux, config/arm/t-linux-eabi,
+ config/arm/t-strongarm-elf: New files.
+ * config/arm/t-symbian (LIB1ASMFUNCS): Set.
+ * config/arm/t-vxworks, config/arm/t-wince-pe: New files.
+ * config/avr/lib1funcs.S: New file.
+ * config/avr/t-avr (LIB1ASMSRC, LIB1ASMFUNCS): Set.
+ * config/bfin/lib1funcs.S, config/bfin/t-bfin: New files.
+ * config/c6x/lib1funcs.S: New file.
+ * config/c6x/t-elf (LIB1ASMSRC, LIB1ASMFUNCS): Set.
+ * config/fr30/lib1funcs.S, config/fr30/t-fr30: New files.
+ * config/frv/lib1funcs.S: New file.
+ * config/frv/t-frv (LIB1ASMSRC, LIB1ASMFUNCS): Set.
+ * config/h8300/lib1funcs.S, config/h8300/t-h8300: New files.
+ * config/i386/cygwin.S, config/i386/t-chkstk: New files.
+ * config/ia64/__divxf3.asm: Rename to ...
+ * config/ia64/__divxf3.S: ... this.
+ Adapt lib1funcs.asm filename.
+ * config/ia64/_fixtfdi.asm: Rename to ...
+ * config/ia64/_fixtfdi.S: ... this.
+ Adapt lib1funcs.asm filename.
+ * config/ia64/_fixunstfdi.asm: Rename to ...
+ * config/ia64/_fixunstfdi.S: ... this.
+ Adapt lib1funcs.asm filename.
+ * config/ia64/_floatditf.asm: Rename to ...
+ * config/ia64/_floatditf.S: ... this.
+ Adapt lib1funcs.asm filename.
+ * config/ia64/lib1funcs.S: New file.
+ * config/ia64/t-hpux (LIB1ASMFUNCS): Set.
+ * config/ia64/t-ia64 (LIB1ASMSRC, LIB1ASMFUNCS): Set.
+ * config/ia64/t-softfp-compat (libgcc1-tf-compats): Adapt suffix.
+ * config/m32c/lib1funcs.S, config/m32c/t-m32c: New files.
+ * config/m68k/lb1sf68.S, config/m68k/t-floatlib: New files.
+ * config/mcore/lib1funcs.S, config/mcore/t-mcore: New files.
+ * config/mep/lib1funcs.S: New file.
+ * config/mep/t-mep (LIB1ASMSRC, LIB1ASMFUNCS): Set.
+ * config/mips/mips16.S: New file.
+ * config/mips/t-mips16 (LIB1ASMSRC, LIB1ASMFUNCS): Set.
+ * config/pa/milli64.S: New file.
+ * config/pa/t-linux, config/pa/t-linux64: New files.
+ * config/picochip/lib1funcs.S: New file.
+ * config/picochip/t-picochip (LIB1ASMSRC, LIB1ASMFUNCS): Set.
+ * config/sh/lib1funcs.S, config/sh/lib1funcs.h: New files.
+ * config/sh/t-linux (LIB1ASMFUNCS_CACHE): Set.
+ * config/sh/t-netbsd: New file.
+ * config/sh/t-sh (LIB1ASMSRC, LIB1ASMFUNCS, LIB1ASMFUNCS_CACHE): Set.
+ Use $(srcdir) to refer to lib1funcs.S, adapt filename.
+ * config/sh/t-sh64: New file.
+ * config/sparc/lb1spc.S: New file.
+ * config/sparc/t-softmul (LIB1ASMSRC): Adapt sparc/lb1spc.asm
+ filename.
+ * config/v850/lib1funcs.S, config/v850/t-v850: New files.
+ * config/vax/lib1funcs.S, config/vax/t-linux: New files.
+ * config/xtensa/ieee754-df.S, config/xtensa/ieee754-sf.S,
+ config/xtensa/lib1funcs.S: New files.
+ * config/xtensa/t-xtensa (LIB1ASMSRC, LIB1ASMFUNCS): Set.
+ * config.host (arm-wrs-vxworks): Add arm/t-arm, arm/t-vxworks to
+ tmake_file.
+ (arm*-*-freebsd*): Add arm/t-arm, arm/t-strongarm-elf to tmake_file.
+ (arm*-*-netbsdelf*): Add arm/t-arm to tmake_file.
+ (arm*-*-linux*): Likewise.
+ Add arm/t-elf, arm/t-bpabi, arm/t-linux-eabi to tmake_file for
+ arm*-*-linux-*eabi, add arm/t-linux otherwise.
+ (arm*-*-uclinux*): Add arm/t-arm, arm/t-elf to tmake_file.
+ (arm*-*-ecos-elf): Likewise.
+ (arm*-*-eabi*, arm*-*-symbianelf*): Likewise.
+ (arm*-*-rtems*): Likewise.
+ (arm*-*-elf): Likewise.
+ (arm*-wince-pe*): Add arm/t-arm, arm/t-wince-pe to tmake_file.
+ (avr-*-rtems*): Add to tmake_file, add avr/t-avr.
+ (bfin*-elf*): Add bfin/t-bfin to tmake_file.
+ (bfin*-uclinux*): Likewise.
+ (bfin*-linux-uclibc*): Likewise.
+ (bfin*-rtems*): Likewise.
+ (bfin*-*): Likewise.
+ (fido-*-elf): Merge into m68k-*-elf*.
+ (fr30-*-elf)): Add fr30/t-fr30 to tmake_file.
+ (frv-*-*linux*): Add frv/t-frv to tmake_file.
+ (h8300-*-rtems*): Add h8300/t-h8300 to tmake_file.
+ (h8300-*-elf*): Likewise.
+ (hppa*64*-*-linux*): Add pa/t-linux, pa/t-linux64 to tmake_file.
+ (hppa*-*-linux*): Add pa/t-linux to tmake_file.
+ (i[34567]86-*-cygwin*): Add i386/t-chkstk to tmake_file.
+ (i[34567]86-*-mingw*): Likewise.
+ (x86_64-*-mingw*): Likewise.
+ (i[34567]86-*-interix3*): Likewise.
+ (ia64*-*-hpux*): Add ia64/t-ia64, ia64/t-hpux to tmake_file.
+ (ia64-hp-*vms*): Add ia64/t-ia64 to tmake_file.
+ (m68k-*-elf*): Also handle fido-*-elf.
+ Add m68k/t-floatlib to tmake_file.
+ (m68k-*-uclinux*): Add m68k/t-floatlib to tmake_file.
+ (m68k-*-linux*): Likewise.
+ (m68k-*-rtems*): Likewise.
+ (mcore-*-elf): Add mcore/t-mcore to tmake_file.
+ (sh-*-elf*, sh[12346l]*-*-elf*): Add sh/t-sh64 to tmake_file for
+ sh64*-*-*.
+ (sh-*-linux*, sh[2346lbe]*-*-linux*): Add sh/t-sh to tmake_file.
+ Add sh/t-sh64 to tmake_file for sh64*-*-linux*.
+ (sh-*-netbsdelf*, shl*-*-netbsdelf*, sh5-*-netbsd*)
+ (sh5l*-*-netbsd*, sh64-*-netbsd*, sh64l*-*-netbsd*): Add sh/t-sh,
+ sh/t-netbsd to tmake_file.
+ Add sh/t-sh64 to tmake_file for sh5*-*-netbsd*, sh64*-netbsd*.
+ (sh-*-rtems*): Add sh/t-sh to tmake_file.
+ (sh-wrs-vxworks): Likewise.
+ (sparc-*-linux*): Add sparc/t-softmul to tmake_file except for
+ *-leon[3-9]*.
+ (v850*-*-*): Add v850/t-v850 to tmake_file.
+ (vax-*-linux*): Add vax/t-linux to tmake_file.
+ (m32c-*-elf*, m32c-*-rtems*): Add m32c/t-m32c to tmake_file.
+
+2011-11-02 Rainer Orth <ro@CeBiTec.Uni-Bielefeld.DE>
+
+ * crtstuff.c: New file.
+ * Makefile.in (CRTSTUFF_CFLAGS): Define.
+ (CRTSTUFF_T_CFLAGS): Define.
+ (extra-parts, INSTALL_PARTS): Remove conditional assignments.
+ (crtbegin$(objext), crtend$(objext), crtbeginS$(objext))
+ (crtendS$(objext), crtbeginT.o): Use $(srcdir) to refer to
+ crtstuff.c.
+ Use $<.
+ (crtbeginT.o): Use $(objext).
+ [!CUSTOM_CRTIN] (crti$(objext), crtn$(objext)): New rules.
+ (libgcc-extra-parts): Don't compare EXTRA_PARTS, GCC_EXTRA_PARTS.
+ (gcc-extra-parts): Remove.
+ * config.host (*-*-freebsd*): Add t-crtstuff-pic to tmake_file.
+ Set extra_parts.
+ (*-*-linux*, frv-*-*linux*, *-*-kfreebsd*-gnu, *-*-knetbsd*-gnu,
+ *-*-gnu*): Also handle *-*-kopensolaris*-gnu.
+ Add t-crtstuff-pic to tmake_file.
+ (*-*-lynxos*): New case.
+ Set tmake_file, extra_parts.
+ (*-*-netbsd*): Add t-crtstuff-pic to tmake_file.
+ Set extra_parts for *-*-netbsd*1.[7-9]*, *-*-netbsd[2-9]*,
+ *-*-netbsdelf[2-9]*.
+ (*-*-openbsd*): Add t-crtstuff-pic to tmake_file.
+ (*-*-rtems*): Set extra_parts.
+ (*-*-solaris2*): Remove t-crtin from tmake_file for Solaris < 10.
+ (*-*-uclinux*): New case.
+ Set extra_parts.
+ (*-*-vms*): New case.
+ Set tmake_file, extra_parts.
+ (*-*-elf): Set extra_parts.
+ (alpha*-*-freebsd*): Add crtbeginT.o to extra_parts.
+ (alpha64-dec-*vms*): Append to tmake_file, remove vms/t-vms,
+ vms/t-vms64.
+ Set extra_parts.
+ (alpha*-dec-*vms*): Append to tmake_file, remove vms/t-vms.
+ Set extra_parts.
+ (arm*-*-freebsd*): Add t-crtin to tmake_file.
+ Add crtbegin.o, crtend.o, crti.o, crtn.o to extra_parts.
+ (arm-wrs-vxworks): Append to tmake_file.
+ Set extra_parts.
+ (arm*-*-uclinux*): Set extra_parts.
+ (arm*-*-ecos-elf): Likewise.
+ (arm*-*-eabi*, arm*-*-symbianelf*): Set extra_parts for
+ arm*-*-eabi*.
+ (arm*-*-rtems*): Set extra_parts.
+ (arm*-*-elf): Likewise.
+ (avr-*-rtems*): Clear extra_parts.
+ (bfin*-elf*): Add bfin/t-crtlibid, bfin/t-crtstuff to extra_parts.
+ Set extra_parts.
+ (bfin*-uclinux*): Likewise.
+ (bfin*-linux-uclibc*): Add bfin/t-crtstuff to tmake_file.
+ (bfin*-rtems*): Append to tmake_file.
+ Set extra_parts.
+ (bfin*-*): Likewise.
+ (crisv32-*-elf, crisv32-*-none, cris-*-elf, cris-*-none): Split into ...
+ (crisv32-*-elf): ... this.
+ (crisv32-*-none): ... this.
+ (cris-*-elf, cris-*-none): New cases.
+ Add cris/t-elfmulti to tmake_file.
+ (fr30-*-elf): Append to tmake_file.
+ Set extra_parts.
+ (frv-*-elf): Append to tmake_file, add frv/t-frv.
+ Set extra_parts.
+ (h8300-*-rtems*): Append to tmake_file.
+ Set extra_parts.
+ (h8300-*-elf*): Likewise.
+ (hppa*64*-*-hpux11*): Add pa/t-stublib, pa/t-stublib64 to tmake_file.
+ Set extra_parts.
+ (hppa[12]*-*-hpux11*): Add pa/t-stublib to tmake_file.
+ Set extra_parts.
+ (i[34567]86-*-elf*): Add i386/t-crtstuff, t-crtstuff-pic to tmake_file.
+ (x86_64-*-elf*): Likewise.
+ (i[34567]86-*-freebsd*): Add i386/t-crtstuff to tmake_file.
+ (x86_64-*-freebsd*): Likewise.
+ (x86_64-*-netbsd*): Likewise.
+ (i[34567]86-*-linux*): Likewise.
+ (i[34567]86-*-kfreebsd*-gnu, i[34567]86-*-knetbsd*-gnu)
+ (i[34567]86-*-gnu*, i[34567]86-*-kopensolaris*-gnu): Likewise.
+ (x86_64-*-linux*): Likewise.
+ (x86_64-*-kfreebsd*-gnu, x86_64-*-knetbsd*-gnu): Likewise.
+ (i[34567]86-*-lynxos*): Add t-crtstuff-pic, i386/t-crtstuff to
+ tmake_file.
+ Set extra_parts.
+ (i[34567]86-*-nto-qnx*): Set tmake_file, extra_parts.
+ (i[34567]86-*-rtems*): Append to tmake_file, remove t-crtin.
+ Append to extra_parts, remove crtbegin.o, crtend.o.
+ (ia64*-*-elf*): Append to extra_parts, remove crtbegin.o, crtend.o.
+ (ia64*-*-freebsd*): Append to extra_parts, remove crtbegin.o,
+ crtend.o, crtbeginS.o, crtendS.o.
+ (ia64*-*-linux*): Comment extra_parts.
+ (ia64-hp-*vms*): Append to tmake_file, remove vms/t-vms, vms/t-vms64.
+ Set extra_parts.
+ (iq2000*-*-elf*): Clear extra_parts.
+ (lm32-*-elf*, lm32-*-rtems*): Add t-crtin to tmake_file.
+ (lm32-*-uclinux*): Add to extra_parts, remove crtbegin.o, crtend.o
+ (m32r-*-elf*, m32r-*-rtems*): Split off ...
+ (m32r-*-rtems*): ... this.
+ Add m32r/t-m32r to tmake_file.
+ (m68k-*-elf*): Add t-crtin to tmake_file.
+ (m68k-*-rtems*): Add crti.o, crtn.o to extra_parts.
+ (mcore-*-elf): Likewise.
+ (microblaze*-*-*): Set extra_parts.
+ (mips*-sde-elf*): New case.
+ Set tmake_file, extra_parts.
+ (mipsisa32-*-elf*, mipsisa32el-*-elf*, mipsisa32r2-*-elf*)
+ (mipsisa32r2el-*-elf*, mipsisa64-*-elf*, mipsisa64el-*-elf*)
+ (mipsisa64r2-*-elf*, mipsisa64r2el-*-elf*): Add mips/t-crtstuff to
+ tmake_file.
+ Set extra_parts.
+ (mipsisa64sr71k-*-elf*): Likewise.
+ (mipsisa64sb1-*-elf*, mipsisa64sb1el-*-elf*): Likewise.
+ (mips-*-elf*, mipsel-*-elf*): Likewise.
+ (mips64-*-elf*, mips64el-*-elf*): Likewise.
+ (mips64vr-*-elf*, mips64vrel-*-elf*): Likewise.
+ (mips64orion-*-elf*, mips64orionel-*-elf*): Likewise.
+ (mips*-*-rtems*): Likewise.
+ (mipstx39-*-elf*, mipstx39el-*-elf*): Likewise.
+ (moxie-*-*): Split into ...
+ (moxie-*-elf, moxie-*-uclinux*): ... this.
+ Add to extra_parts, remove crtbegin.o, crtend.o.
+ (moxie-*-rtems*): New case.
+ Set tmake_file.
+ Clear extra_parts.
+ (powerpc-*-freebsd*): Add rs6000/t-crtstuff to tmake_file.
+ Set extra_parts.
+ (powerpc-*-netbsd*): Add rs6000/t-netbsd to tmake_file.
+ (powerpc-*-eabispe*): Add rs6000/t-crtstuff, t-crtstuff-pic to
+ tmake_file.
+ Set extra_parts.
+ (powerpc-*-eabisimaltivec*): Add to tmake_file, add rs6000/t-ppccomm,
+ rs6000/t-crtstuff, t-crtstuff-pic to tmake_file.
+ Set extra_parts.
+ (powerpc-*-eabisim*): Likewise.
+ (powerpc-*-elf*): Likewise.
+ (powerpc-*-eabialtivec*): Likewise.
+ (powerpc-xilinx-eabi*): Likewise.
+ (powerpc-*-eabi*): Likewise.
+ (powerpc-*-rtems*): Likewise.
+ (powerpc-*-linux*, powerpc64-*-linux*): Add rs6000/t-crtstuff to
+ tmake_file.
+ Set extra_parts.
+ (powerpc-*-lynxos*): Add to tmake_file.
+ (powerpcle-*-elf*): Add to tmake_file, add rs6000/t-ppccomm,
+ rs6000/t-crtstuff, t-crtstuff-pic.
+ Set extra_parts.
+ (powerpcle-*-eabisim*): Likewise.
+ (powerpcle-*-eabi*): Likewise.
+ (rx-*-elf): Remove extra_parts.
+ (s390x-ibm-tpf*): Set extra_parts.
+ (score-*-elf): Set extra_parts.
+ (sh-*-elf*, sh[12346l]*-*-elf*, sh-*-linux*)
+ (sh[2346lbe]*-*-linux*, sh-*-netbsdelf*, shl*-*-netbsdelf*)
+ (sh5-*-netbsd*, sh5l*-*-netbsd*, sh64-*-netbsd*)
+ (sh64l*-*-netbsd*): Split into ...
+ (sh-*-elf*, sh[12346l]*-*-elf*): ... this.
+ Add t-crtstuff-pic to tmake_file.
+ Set extra_parts.
+ (sh-*-rtems*): Add to tmake_file, add t-crtstuff-pic.
+ Set extra_parts.
+ (sh-wrs-vxworks): Add to tmake_file, add t-crtstuff-pic.
+ (sparc-*-elf*): Remove t-crtin from tmake_file.
+ Add to extra_parts, remove crtbegin.o, crtend.o.
+ (sparc-*-linux*): Add sparc/t-linux64 to tmake_file.
+ (sparc64-*-linux*): Likewise.
+ (sparc-*-rtems*): Remove sparc/t-elf from tmake_file.
+ Add to extra_parts, remove crtbegin.o, crtend.o.
+ (sparc64-*-elf*): Remove t-crtin from tmake_file.
+ Add to extra_parts, remove crtbegin.o, crtend.o.
+ (sparc64-*-rtems*): Remove t-crtin from tmake_file.
+ Add to extra_parts, remove crtbegin.o, crtend.o.
+ (sparc64-*-freebsd*, ultrasparc-*-freebsd*): Add to extra_parts.
+ (sparc64-*-linux*): Add sparc/t-linux64 to tmake_file.
+ (spu-*-elf*): Add to tmake_file, add spu/t-elf.
+ Set extra_parts.
+ (tic6x-*-uclinux): Add c6x/t-uxlinux, t-crtstuff-pic to tmake_file.
+ Set extra_parts.
+ (tic6x-*-*): Change to ...
+ (tic6x-*-elf): ... this.
+ Set extra_parts.
+ (xtensa*-*-elf*): Add to tmake_file, add xtensa/t-elf.
+ Set extra_parts.
+ (am33_2.0-*-linux*): Add comment.
+ (mep*-*-*): Add mep/t-mep to tmake_file.
+ Set extra_parts.
+ * config/alpha/t-vms: New file.
+ * config/alpha/vms-dwarf2.S, config/alpha/vms-dwarf2eh.S: New files.
+ * config/arm/crti.S, config/arm/crtn.S: New files.
+ * config/bfin/crti.S, config/bfin/crtn.S: New files.
+ * config/bfin/crtlibid.S: New file.
+ * config/bfin/t-crtlibid, config/bfin/t-crtstuff: New files.
+ * config/c6x/crti.S, config/c6x/crtn.S: New files.
+ * config/c6x/t-elf (CUSTOM_CRTIN): Set.
+ (crti.o, crtin.o): New rules.
+ (CRTSTUFF_T_CFLAGS, CRTSTUFF_T_CFLAGS_S): Set.
+ * config/c6x/t-uclinux: New file.
+ * config/cris/t-elfmulti: New file.
+ * config/cris/t-linux (CRTSTUFF_T_CFLAGS_S): Set.
+ * config/fr30/crti.S, config/fr30/crtn.S: New files.
+ * config/frv/frvbegin.c, config/frv/frvend.c: New files.
+ * config/frv/t-frv: New file.
+ * config/frv/t-linux (CRTSTUFF_T_CFLAGS): Set.
+ * config/h8300/crti.S, config/h8300/crtn.S: New files.
+ * config/i386/cygming-crtbegin.c, config/i386/cygming-crtend.c:
+ New files.
+ * config/i386/t-cygming (crtbegin.o, crtend.o): Use $(srcdir) to
+ refer to cygming-crtbegin.c, cygming-crtend.c.
+ Use $<.
+ * config/i386/t-nto: New file.
+ * config/ia64/crtbegin.S, config/ia64/crtend.S: New files.
+ * config/ia64/crti.S, config/ia64/crtn.S: New files.
+ * config/ia64/t-ia64 (crtbegin.o, crtend.o, crtbeginS.o,
+ crtendS.o): Use $(srcdir) to refer to crtbegin.S, crtend.S.
+ Use .S extension.
+ Use $<.
+ * config/ia64/t-vms (CRTSTUFF_T_CFLAGS, CRTSTUFF_T_CFLAGS_S): Set.
+ (crtinitS.o): New rule.
+ * config/ia64/vms-crtinit.S: New file.
+ * config/lm32/t-elf ($(T)crti.o, $(T)crtn.o): Remove.
+ * config/m32r/initfini.c: New file.
+ * config/m32r/t-linux, config/m32r/t-m32r: New files.
+ * config/m68k/crti.S, config/m68k/crtn.S: New files.
+ * config/mcore/crti.S, config/mcore/crtn.S: New files.
+ * config/mep/t-mep: New file.
+ * config/microblaze/crti.S, config/microblaze/crtn.S: New files.
+ * config/microblaze/t-microblaze (MULTILIB_OPTIONS): Remove.
+ * config/mips/crti.S, config/mips/crtn.S: New files.
+ * config/mips/t-crtstuff: New file.
+ * config/mmix/crti.S, config/mmix/crtn.S: New files.
+ * config/mmix/t-mmix (CRTSTUFF_T_CFLAGS): Set.
+ (CUSTOM_CRTIN): Set.
+ ($(T)crti.o, $(T)crtn.o): Remove $(T),
+ dependencies.
+ Use $(srcdir) to refer to crti.S, crtn.S.
+ Use .S extension, $<.
+ * config/moxie/crti.asm: Rename to ...
+ * config/moxie/crti.S: ... this.
+ * config/moxie/crtn.asm: Rename to ...
+ * config/moxie/crtn.S: ... this.
+ * config/moxie/t-moxie: Remove.
+ * config/pa/stublib.c: New file.
+ * config/pa/t-stublib, config/pa/t-stublib64: New files.
+ * config/rs6000/eabi-ci.S, config/rs6000/eabi-cn.S: New files.
+ * config/rs6000/sol-ci.S, config/rs6000/sol-cn.S: New files.
+ * config/rs6000/t-crtstuff: New file.
+ * config/rs6000/t-ppccomm (EXTRA_PARTS): Remove.
+ (ecrti.S, ecrtn.S, ncrti.S, ncrtn.S): Remove.
+ (ecrti$(objext)): Depend on $(srcdir)/config/rs6000/eabi-ci.S.
+ Make output file explicit.
+ (ecrtn$(objext)): Depend on $(srcdir)/config/rs6000/eabi-cn.S.
+ Make output file explicit.
+ (ncrti$(objext): Depend on $(srcdir)/config/rs6000/sol-ci.S.
+ Make output file explicit.
+ (ncrtn$(objext)): Depend on $(srcdir)/config/rs6000/sol-cn.S.
+ Make output file explicit.
+ * config/score/crti.S, config/score/crtn.S: New files.
+ * config/sh/crt1.S, config/sh/crti.S, config/sh/crtn.S: New files.
+ * config/sh/lib1funcs-4-300.S, config/sh/lib1funcs-Os-4-200.S: New
+ files.
+ * config/sh/t-sh, config/sh/t-superh: New files.
+ * config/sparc/t-linux64: New file.
+ * config/spu/cache.S, config/spu/cachemgr.c: New files.
+ * config/spu/t-elf (CRTSTUFF_T_CFLAGS): Set.
+ (cachemgr.o, cachemgr_nonatomic.o, libgcc_%.a, cache8k.o)
+ (cache16k.o, cache32k.o, cache64k.o, cache128k.o): New rules.
+ * config/t-crtin: Remove.
+ * config/t-crtstuff-pic: New file.
+ * config/t-sol2 (CUSTOM_CRTIN): Set.
+ (crti.o, crtn.o): New rules.
+ * config/vms/t-vms: New file.
+ * config/vms/vms-ucrt0.c: New file.
+ * config/xtensa/t-elf: New file.
+
+2011-11-02 Rainer Orth <ro@CeBiTec.Uni-Bielefeld.DE>
+
+ * Makefile.in (SHLIB_NM_FLAGS): Set.
+ * mkmap-flat.awk, mkmap-symver.awk: New files.
+ * configure.ac (libgcc_cv_lib_sjlj_exceptions): Check for SjLj
+ exceptions.
+ * configure: Regenerate.
+ * config/libgcc-glibc.ver: New file.
+ * config/libgcc-libsystem.ver: New file.
+ * config/t-libunwind (SHLIB_LC): Set.
+ * config/t-linux: New file.
+ * config/t-slibgcc (INSTALL_SHLIB): New.
+ (SHLIB_INSTALL): Use it.
+ * config/t-slibgcc-darwin (SHLIB_MKMAP): Use $(srcdir) to refer
+ to mkmap-symver.awk.
+ (SHLIB_MAPFILES): Don't append, adapt pathname.
+ (SHLIB_VERPFX): Set.
+ * config/t-slibgcc-elf-ver (SHLIB_MKMAP): Use $(srcdir) to refer
+ to mkmap-symver.awk.
+ * config/t-slibgcc-gld-nover, config/t-slibgcc-hpux,
+ config/t-slibgcc-libgcc, config/t-slibgcc-vms: New files.
+ * config/alpha/libgcc-alpha-ldbl.ver, config/alpha/t-linux: New files.
+ * config/alpha/t-slibgcc-osf (SHLIB_MKMAP): Use $(srcdir) to refer
+ to mkmap-flat.awk.
+ * config/arm/t-bpabi (SHLIB_MAPFILES): Set.
+ * config/bfin/libgcc-glibc.ver, config/bfin/t-linux: New files.
+ * config/c6x/libgcc-eabi.ver, config/c6x/t-elf: New files.
+ * config/cris/libgcc-glibc.ver, config/cris/t-linux: New files.
+ * config/frv/libgcc-frv.ver, config/frv/t-linux: New files.
+ * config/i386/libgcc-darwin.10.4.ver,
+ config/i386/libgcc-darwin.10.5.ver, config/i386/libgcc-glibc.ver:
+ New files.
+ * config/i386/t-darwin: Remove.
+ * config/i386/t-darwin64: Likewise.
+ * config/i386/t-dw2-eh, config/i386/t-sjlj-eh: New files.
+ * config/i386/t-slibgcc-cygming, config/i386/t-cygwin,
+ config/i386/t-dlldir, config/i386/t-dlldir-x: New files.
+ * config/i386/t-linux: New file.
+ * config/i386/t-mingw32: New file.
+ * config/ia64/libgcc-glibc.ver, config/ia64/libgcc-ia64.ver: New files.
+ * config/ia64/t-glibc: Rename to ...
+ * config/ia64/t-linux: ... this.
+ (SHLIB_MAPFILES): Set.
+ * config/ia64/t-glibc-libunwind: Rename to ...
+ * config/ia64/t-linux-libunwind: ... this.
+ * config/ia64/t-ia64 (SHLIB_MAPFILES): Set.
+ * config/ia64/t-slibgcc-hpux: New file.
+ * config/m32r/libgcc-glibc.ver, config/m32r/t-linux: New files.
+ * config/m68k/t-slibgcc-elf-ver: New file.
+ * config/mips/t-mips16 (SHLIB_MAPFILES): Set.
+ * config/mips/t-slibgcc-irix (SHLIB_MKMAP): Use $(srcdir) to refer
+ to mkmap-flat.awk.
+ * config/pa/t-slibgcc-hpux: New file.
+ * config/pa/t-slibgcc-dwarf-ver, config/pa/t-slibgcc-sjsj-ver: New
+ files.
+ * config/rs6000/libgcc-darwin.10.4.ver,
+ config/rs6000/libgcc-darwin.10.5.ver: New files.
+ * config/rs6000/libgcc-ppc-glibc.ver: Rename to
+ config/rs6000/libgcc-glibc.ver.
+ * config/rs6000/libgcc-ppc64.ver: Rename to
+ config/rs6000/libgcc-ibm-ldouble.ver.
+ * config/rs6000/t-darwin (SHLIB_VERPFX): Remove.
+ * config/rs6000/t-ibm-ldouble (SHLIB_MAPFILES): Adapt filename.
+ * config/rs6000/t-ldbl128: Rename to ...
+ * config/rs6000/t-linux: ... this.
+ (SHLIB_MAPFILES): Adapt filename.
+ * config/rs6000/t-slibgcc-aix: New file.
+ * config/sh/libgcc-excl.ver, config/sh/libgcc-glibc.ver: New files.
+ * config/sh/t-linux (SHLIB_MAPFILES): Use $(srcdir) to refer to
+ libgcc-excl.ver, libgcc-glibc.ver.
+ (SHLIB_LINK, SHLIB_INSTALL): Remove.
+ * config/sparc/libgcc-glibc.ver: New file.
+ * config/sparc/t-linux: New file.
+ * config/xtensa/libgcc-glibc.ver, config/xtensa/t-linux: New files.
+ * config.host (*-*-freebsd*): Add t-slibgcc, t-slibgcc-gld,
+ t-slibgcc-elf-ver to tmake_file.
+ Add t-slibgcc-nolc-override to tmake_file for posix threads on
+ *-*-freebsd[34].
+ (*-*-linux*, frv-*-*linux*, *-*-kfreebsd*-gnu, *-*-knetbsd*-gnu,
+ *-*-gnu*, *-*-kopensolaris*-gnu): Add t-slibgcc, t-slibgcc-gld,
+ t-slibgcc-elf-ver, t-linux to tmake_file.
+ (*-*-netbsd*): Add t-slibgcc, t-slibgcc-gld, t-slibgcc-elf-ver to
+ tmake_file.
+ (alpha*-*-linux*): Add alpha/t-linux to tmake_file.
+ (alpha64-dec-*vms*): Add t-slibgcc-vms to tmake_file.
+ (alpha*-dec-*vms*): Likewise.
+ (arm*-*-freebsd*): Append to tmake_file.
+ (arm*-*-netbsdelf*): Add t-slibgcc-gld-nover to tmake_file.
+ (arm*-*-linux*): Add t-slibgcc-libgcc to tmake_file for
+ arm*-*-linux-*eabi.
+ (arm*-*-eabi*, arm*-*-symbianelf*): Add t-slibgcc-nolc-override to
+ tmake_file for arm*-*-symbianelf*.
+ (bfin*-linux-uclibc*): Append to tmake_file, add bfin/t-linux.
+ (cris-*-linux*, crisv32-*-linux*): Append to tmake_file, add
+ cris/t-linux.
+ (frv-*-*linux*): Append to tmake_file, add frv/t-linux.
+ (hppa*-*-linux*): Add t-slibgcc-libgcc, pa/t-slibgcc-sjlj-ver,
+ pa/t-slibgcc-dwarf-ver to tmake_file.
+ (hppa[12]*-*-hpux10*): Add t-slibgcc, pa/t-slibgcc-sjlj-ver,
+ pa/t-slibgcc-dwarf-ver, t-slibgcc-hpux, pa/t-slibgcc-hpux to tmake_file.
+ (hppa*64*-*-hpux11*): Likewise.
+ (hppa[12]*-*-hpux11*): Likewise.
+ (x86_64-*-darwin*): Don't override tmake_file, but only keep
+ i386/t-crtpc, i386/t-crtfm.
+ (i[34567]86-*-cygwin*): Set tmake_eh_file, tmake_dlldir_file.
+ Prepend $tmake_eh_file, $tmake_dlldir_file, i386/t-slibgcc-cygming
+ to tmake_file.
+ Add i386/t-cygwin to tmake_file.
+ Prepent i386/t-mingw-pthread to tmake_file for posix threads.
+ (i[34567]86-*-mingw*): Set tmake_eh_file, tmake_dlldir_file.
+ Prepend $tmake_eh_file, $tmake_dlldir_file, i386/t-slibgcc-cygming
+ to tmake_file.
+ Add i386/t-mingw32 to tmake_file.
+ (x86_64-*-mingw*): Likewise.
+ (ia64*-*-freebsd*): Append to tmake_file.
+ (ia64*-*-linux*): Append to tmake_file.
+ Replace ia64/t-glibc by ia64/t-linux.
+ Replace ia64/t-glibc-libunwind by ia64/t-linux-libunwind if using
+ system libunwind.
+ (ia64*-*-hpux*): Add t-slibgcc, ia64/t-slibgcc-hpux,
+ t-slibgcc-hpux to tmake_file.
+ (ia64-hp-*vms*): Add t-slibgcc-vms to tmake_file.
+ (m32r-*-linux*): Append to tmake_file, add m32r/t-linux.
+ (m32rle-*-linux*): Likewise.
+ (m68k-*-linux*)): Add m68k/t-slibgcc-elf-ver to tmake_file unless
+ sjlj exceptions.
+ (microblaze*-linux*): New case.
+ Append to tmake_file, add t-slibgcc-nolc-override.
+ (powerpc-*-freebsd*): Add t-slibgcc-libgcc to tmake_file.
+ (powerpc-*-linux*, powerpc64-*-linux*): Likewise.
+ Replace rs6000/t-ldbl128 by rs6000/t-linux in tmake_file.
+ (rs6000-ibm-aix4.[3456789]*, powerpc-ibm-aix4.[3456789]*): Add
+ rs6000/t-slibgcc-aix to tmake_file.
+ (rs6000-ibm-aix5.1.*, powerpc-ibm-aix5.1.*): Likewise.
+ (rs6000-ibm-aix[56789].*, powerpc-ibm-aix[56789].*): Likewise.
+ (sh-*-elf*, sh[12346l]*-*-elf*, sh-*-linux*)
+ (sh[2346lbe]*-*-linux*, sh-*-netbsdelf*, shl*-*-netbsdelf*)
+ (sh5-*-netbsd*, sh5l*-*-netbsd*, sh64-*-netbsd*)
+ (sh64l*-*-netbsd*): Add t-slibgcc-libgcc to tmake_file for
+ sh*-*-linux*.
+ (sparc-*-linux*): Append to tmake_file for *-leon*.
+ Add sparc/t-linux to tmake_file for non-Leon targets.
+ (sparc64-*-linux*): Add sparc/t-linux to tmake_file.
+ (tic6x-*-uclinux): New case.
+ Add t-slibgcc, t-slibgcc-gld, t-slibgcc-elf-ver to tmake_file.
+ (tic6x-*-*): Add c6x/t-elf to tmake_file.
+ (xtensa*-*-linux*): Append to tmake_file, add xtensa/t-linux.
+ (am33_2.0-*-linux*): Append to tmake_file.
+ (i[34567]86-*-linux*, x86_64-*-linux*, i[34567]86-*-kfreebsd*-gnu)
+ (i[34567]86-*-knetbsd*-gnu, i[34567]86-*-gnu*): Also handle
+ x86_64-*-kfreebsd*-gnu.
+ Add i386/t-linux to tmake_file.
+
2011-10-28 Ian Lance Taylor <iant@google.com>
* config/i386/morestack.S: Correct CFI information to do proper
diff --git a/libgcc/Makefile.in b/libgcc/Makefile.in
index e9f2fa2f2b8..3a8509d118c 100644
--- a/libgcc/Makefile.in
+++ b/libgcc/Makefile.in
@@ -22,7 +22,6 @@
libgcc_topdir = @libgcc_topdir@
host_subdir = @host_subdir@
-gcc_srcdir = $(libgcc_topdir)/gcc
gcc_objdir = $(MULTIBUILDTOP)../../$(host_subdir)/gcc
srcdir = @srcdir@
@@ -50,6 +49,8 @@ target_noncanonical = @target_noncanonical@
# The rules for compiling them should be in the t-* file for the machine.
EXTRA_PARTS = @extra_parts@
+extra-parts = libgcc-extra-parts
+
# Multilib support variables.
MULTISRCTOP =
MULTIBUILDTOP =
@@ -62,6 +63,7 @@ INSTALL = @INSTALL@
INSTALL_PROGRAM = @INSTALL_PROGRAM@
INSTALL_DATA = @INSTALL_DATA@
mkinstalldirs = $(SHELL) $(libgcc_topdir)/mkinstalldirs
+INSTALL_PARTS = $(EXTRA_PARTS)
objext = .o
@@ -119,7 +121,7 @@ installcheck:
.PHONY: all clean
clean:
- -rm -f config.h stamp-h stmp-ldirs libgcc.map
+ -rm -f config.h libgcc_tm.h stamp-h stmp-ldirs libgcc.map
-rm -f *$(objext)
-rm -f *.dep
-rm -f *.a
@@ -217,6 +219,41 @@ else
DECNUMINC =
endif
+# Options to use when compiling libgcc2.a.
+#
+LIBGCC2_DEBUG_CFLAGS = -g
+LIBGCC2_CFLAGS = -O2 $(LIBGCC2_INCLUDES) $(GCC_CFLAGS) $(HOST_LIBGCC2_CFLAGS) \
+ $(LIBGCC2_DEBUG_CFLAGS) -DIN_LIBGCC2 \
+ -fbuilding-libgcc -fno-stack-protector \
+ $(INHIBIT_LIBC_CFLAGS)
+
+# Additional options to use when compiling libgcc2.a.
+# Some targets override this to -isystem include
+LIBGCC2_INCLUDES =
+
+# Additional target-dependent options for compiling libgcc2.a.
+HOST_LIBGCC2_CFLAGS =
+
+PICFLAG = @PICFLAG@
+
+# Defined in libgcc2.c, included only in the static library.
+LIB2FUNCS_ST = _eprintf __gcc_bcmp
+
+# List of functions not to build from libgcc2.c.
+LIB2FUNCS_EXCLUDE =
+
+# These might cause a divide overflow trap and so are compiled with
+# unwinder info.
+LIB2_DIVMOD_FUNCS = _divdi3 _moddi3 _udivdi3 _umoddi3 _udiv_w_sdiv _udivmoddi4
+
+# List of extra C and assembler files to add to static and shared libgcc2.
+# Assembler files should have names ending in `.S'.
+LIB2ADD =
+
+# List of extra C and assembler files to add to static libgcc2.
+# Assembler files should have names ending in `.S'.
+LIB2ADD_ST =
+
# Specify the directories to be searched for header files.
# Both . and srcdir are used, in that order,
# so that *config.h will be found in the compilation
@@ -238,6 +275,16 @@ override CFLAGS := $(filter-out -fprofile-generate -fprofile-use,$(CFLAGS))
INTERNAL_CFLAGS = $(CFLAGS) $(LIBGCC2_CFLAGS) $(HOST_LIBGCC2_CFLAGS) \
$(INCLUDES) @set_have_cc_tls@ @set_use_emutls@
+# Options to use when compiling crtbegin/end.
+CRTSTUFF_CFLAGS = -O2 $(GCC_CFLAGS) $(INCLUDES) $(MULTILIB_CFLAGS) -g0 \
+ -finhibit-size-directive -fno-inline -fno-exceptions \
+ -fno-zero-initialized-in-bss -fno-toplevel-reorder -fno-tree-vectorize \
+ -fno-stack-protector \
+ $(INHIBIT_LIBC_CFLAGS)
+
+# Extra flags to use when compiling crt{begin,end}.o.
+CRTSTUFF_T_CFLAGS =
+
MULTIDIR := $(shell $(CC) $(CFLAGS) -print-multi-directory)
MULTIOSDIR := $(shell $(CC) $(CFLAGS) -print-multi-os-directory)
@@ -279,6 +326,9 @@ LIB2ADDEH = $(srcdir)/unwind-dw2.c $(srcdir)/unwind-dw2-fde.c \
LIB2ADDEHSTATIC = $(LIB2ADDEH)
LIB2ADDEHSHARED = $(LIB2ADDEH)
+# nm flags to list global symbols in libgcc object files.
+SHLIB_NM_FLAGS = -pg
+
# Don't build libunwind by default.
LIBUNWIND =
SHLIBUNWIND_LINK =
@@ -287,6 +337,16 @@ SHLIBUNWIND_INSTALL =
tmake_file = @tmake_file@
include $(srcdir)/empty.mk $(tmake_file)
+# Collect target defines and headers from config.host.
+libgcc_tm_defines = @tm_defines@
+libgcc_tm_file = @tm_file@
+libgcc_tm.h: libgcc_tm.stamp; @true
+libgcc_tm.stamp:
+ DEFINES='$(libgcc_tm_defines)' HEADERS='$(libgcc_tm_file)' \
+ $(srcdir)/mkheader.sh > tmp-libgcc_tm.h
+ $(SHELL) $(srcdir)/../move-if-change tmp-libgcc_tm.h libgcc_tm.h
+ echo timestamp > $@
+
# Only handle shared libraries if both:
# - the user requested them
# - we know how to build them
@@ -332,16 +392,6 @@ vis_hide =
gen-hide-list = echo > \$@
endif
-ifneq ($(EXTRA_PARTS),)
- extra-parts = libgcc-extra-parts
- INSTALL_PARTS = $(EXTRA_PARTS)
-else
-ifneq ($(GCC_EXTRA_PARTS),)
- extra-parts = gcc-extra-parts
- INSTALL_PARTS = $(GCC_EXTRA_PARTS)
-endif
-endif
-
LIB2ADD += enable-execute-stack.c
LIB2ADDEH += $(srcdir)/emutls.c
@@ -388,40 +438,36 @@ LIB2_DIVMOD_FUNCS := $(filter-out $(LIB2FUNCS_EXCLUDE) $(LIB1ASMFUNCS), \
ifeq ($(enable_shared),yes)
lib1asmfuncs-o = $(patsubst %,%$(objext),$(LIB1ASMFUNCS))
-$(lib1asmfuncs-o): %$(objext): $(gcc_srcdir)/config/$(LIB1ASMSRC) %.vis
- $(gcc_compile) -DL$* -xassembler-with-cpp \
- -c $(gcc_srcdir)/config/$(LIB1ASMSRC) -include $*.vis
+$(lib1asmfuncs-o): %$(objext): $(srcdir)/config/$(LIB1ASMSRC) %.vis
+ $(gcc_compile) -DL$* -xassembler-with-cpp -c $< -include $*.vis
$(patsubst %,%.vis,$(LIB1ASMFUNCS)): %.vis: %_s$(objext)
$(gen-hide-list)
libgcc-objects += $(lib1asmfuncs-o)
lib1asmfuncs-s-o = $(patsubst %,%_s$(objext),$(LIB1ASMFUNCS))
-$(lib1asmfuncs-s-o): %_s$(objext): $(gcc_srcdir)/config/$(LIB1ASMSRC)
- $(gcc_s_compile) -DL$* -xassembler-with-cpp \
- -c $(gcc_srcdir)/config/$(LIB1ASMSRC)
+$(lib1asmfuncs-s-o): %_s$(objext): $(srcdir)/config/$(LIB1ASMSRC)
+ $(gcc_s_compile) -DL$* -xassembler-with-cpp -c $<
libgcc-s-objects += $(lib1asmfuncs-s-o)
else
lib1asmfuncs-o = $(patsubst %,%$(objext),$(LIB1ASMFUNCS))
-$(lib1asmfuncs-o): %$(objext): $(gcc_srcdir)/config/$(LIB1ASMSRC)
- $(gcc_compile) -DL$* -xassembler-with-cpp \
- -c $(gcc_srcdir)/config/$(LIB1ASMSRC)
+$(lib1asmfuncs-o): %$(objext): $(srcdir)/config/$(LIB1ASMSRC)
+ $(gcc_compile) -DL$* -xassembler-with-cpp -c $<
libgcc-objects += $(lib1asmfuncs-o)
endif
# Build lib2funcs. For the static library also include LIB2FUNCS_ST.
lib2funcs-o = $(patsubst %,%$(objext),$(lib2funcs) $(LIB2FUNCS_ST))
-$(lib2funcs-o): %$(objext): $(gcc_srcdir)/libgcc2.c
- $(gcc_compile) -DL$* -c $(gcc_srcdir)/libgcc2.c \
- $(vis_hide)
+$(lib2funcs-o): %$(objext): $(srcdir)/libgcc2.c
+ $(gcc_compile) -DL$* -c $< $(vis_hide)
libgcc-objects += $(lib2funcs-o)
ifeq ($(enable_shared),yes)
lib2funcs-s-o = $(patsubst %,%_s$(objext),$(lib2funcs))
-$(lib2funcs-s-o): %_s$(objext): $(gcc_srcdir)/libgcc2.c
- $(gcc_s_compile) -DL$* -c $(gcc_srcdir)/libgcc2.c
+$(lib2funcs-s-o): %_s$(objext): $(srcdir)/libgcc2.c
+ $(gcc_s_compile) -DL$* -c $<
libgcc-s-objects += $(lib2funcs-s-o)
endif
@@ -449,15 +495,15 @@ endif
# Build LIB2_DIVMOD_FUNCS.
lib2-divmod-o = $(patsubst %,%$(objext),$(LIB2_DIVMOD_FUNCS))
-$(lib2-divmod-o): %$(objext): $(gcc_srcdir)/libgcc2.c
- $(gcc_compile) -DL$* -c $(gcc_srcdir)/libgcc2.c \
+$(lib2-divmod-o): %$(objext): $(srcdir)/libgcc2.c
+ $(gcc_compile) -DL$* -c $< \
-fexceptions -fnon-call-exceptions $(vis_hide)
libgcc-objects += $(lib2-divmod-o)
ifeq ($(enable_shared),yes)
lib2-divmod-s-o = $(patsubst %,%_s$(objext),$(LIB2_DIVMOD_FUNCS))
-$(lib2-divmod-s-o): %_s$(objext): $(gcc_srcdir)/libgcc2.c
- $(gcc_s_compile) -DL$* -c $(gcc_srcdir)/libgcc2.c \
+$(lib2-divmod-s-o): %_s$(objext): $(srcdir)/libgcc2.c
+ $(gcc_s_compile) -DL$* -c $< \
-fexceptions -fnon-call-exceptions
libgcc-s-objects += $(lib2-divmod-s-o)
endif
@@ -897,45 +943,39 @@ ALL_CRT_CFLAGS = $(CFLAGS) $(CRTSTUFF_CFLAGS) $(INCLUDES)
crt_compile = $(CC) $(ALL_CRT_CFLAGS) -o $@ $(compile_deps)
ifeq ($(CUSTOM_CRTSTUFF),)
-crtbegin$(objext): $(gcc_srcdir)/crtstuff.c
- $(crt_compile) $(CRTSTUFF_T_CFLAGS) \
- -c $(gcc_srcdir)/crtstuff.c -DCRT_BEGIN
+# Compile two additional files that are linked with every program
+# linked using GCC on systems using COFF or ELF, for the sake of C++
+# constructors.
+crtbegin$(objext): $(srcdir)/crtstuff.c
+ $(crt_compile) $(CRTSTUFF_T_CFLAGS) -c $< -DCRT_BEGIN
-crtend$(objext): $(gcc_srcdir)/crtstuff.c
- $(crt_compile) $(CRTSTUFF_T_CFLAGS) \
- -c $(gcc_srcdir)/crtstuff.c -DCRT_END
+crtend$(objext): $(srcdir)/crtstuff.c
+ $(crt_compile) $(CRTSTUFF_T_CFLAGS) -c $< -DCRT_END
# These are versions of crtbegin and crtend for shared libraries.
-crtbeginS$(objext): $(gcc_srcdir)/crtstuff.c
- $(crt_compile) $(CRTSTUFF_T_CFLAGS_S) \
- -c $(gcc_srcdir)/crtstuff.c -DCRT_BEGIN -DCRTSTUFFS_O
+crtbeginS$(objext): $(srcdir)/crtstuff.c
+ $(crt_compile) $(CRTSTUFF_T_CFLAGS_S) -c $< -DCRT_BEGIN -DCRTSTUFFS_O
-crtendS$(objext): $(gcc_srcdir)/crtstuff.c
- $(crt_compile) $(CRTSTUFF_T_CFLAGS_S) \
- -c $(gcc_srcdir)/crtstuff.c -DCRT_END -DCRTSTUFFS_O
+crtendS$(objext): $(srcdir)/crtstuff.c
+ $(crt_compile) $(CRTSTUFF_T_CFLAGS_S) -c $< -DCRT_END -DCRTSTUFFS_O
# This is a version of crtbegin for -static links.
-crtbeginT.o: $(gcc_srcdir)/crtstuff.c
- $(crt_compile) $(CRTSTUFF_T_CFLAGS) \
- -c $(gcc_srcdir)/crtstuff.c -DCRT_BEGIN -DCRTSTUFFT_O
+crtbeginT$(objext): $(srcdir)/crtstuff.c
+ $(crt_compile) $(CRTSTUFF_T_CFLAGS) -c $< -DCRT_BEGIN -DCRTSTUFFT_O
+endif
+
+ifeq ($(CUSTOM_CRTIN),)
+# -x assembler-with-cpp is only needed on case-insensitive filesystem.
+crti$(objext): $(srcdir)/config/$(cpu_type)/crti.S
+ $(crt_compile) -c -x assembler-with-cpp $<
+
+crtn$(objext): $(srcdir)/config/$(cpu_type)/crtn.S
+ $(crt_compile) -c -x assembler-with-cpp $<
endif
# Build extra startfiles in the libgcc directory.
.PHONY: libgcc-extra-parts
libgcc-extra-parts: $(EXTRA_PARTS)
-ifneq ($(GCC_EXTRA_PARTS),)
-ifneq ($(sort $(EXTRA_PARTS)),$(GCC_EXTRA_PARTS))
- # If the gcc directory specifies which extra parts to
- # build for this target, and the libgcc configuration also
- # specifies, make sure they match. This can be removed
- # when the gcc directory no longer holds libgcc configuration;
- # it is useful when migrating a target.
- @echo "Configuration mismatch!"
- @echo "Extra parts from gcc directory: $(GCC_EXTRA_PARTS)"
- @echo "Extra parts from libgcc: $(EXTRA_PARTS)"
- exit 1
-endif
-endif
# Early copyback; see "all" above for the rationale. The
# early copy is necessary so that the gcc -B options find
@@ -951,45 +991,13 @@ endif
esac; \
done
-# Build extra startfiles in the gcc directory, for unconverted
-# targets.
-.PHONY: gcc-extra-parts
-gcc-extra-parts:
- # Recursively invoke make in the GCC directory to build any
- # startfiles (for now). We must do this just once, passing
- # it all the GCC_EXTRA_PARTS as simultaneous goal targets,
- # so that rules which cannot execute simultaneously are properly
- # serialized. We indirect through T_TARGET in case any multilib
- # directories contain an equals sign, to prevent make from
- # interpreting any of the goals as variable assignments.
-
- # We must use cd && make rather than make -C, or else the stage
- # number will be embedded in debug information.
-
- T=`$(PWD_COMMAND)`/ \
- && cd $(gcc_objdir) \
- && $(MAKE) GCC_FOR_TARGET="$(CC)" \
- MULTILIB_CFLAGS="$(CFLAGS)" \
- T=$$T \
- T_TARGET="$(patsubst %,$${T}%,$(GCC_EXTRA_PARTS))" \
- T_TARGET
-
- # Early copyback; see "all" above for the rationale. The
- # early copy is necessary so that the gcc -B options find
- # the right startup files when linking shared libgcc.
- $(mkinstalldirs) $(gcc_objdir)$(MULTISUBDIR)
- parts="$(GCC_EXTRA_PARTS)"; \
- for file in $$parts; do \
- rm -f $(gcc_objdir)$(MULTISUBDIR)/$$file; \
- $(INSTALL_DATA) $$file $(gcc_objdir)$(MULTISUBDIR)/; \
- case $$file in \
- *.a) \
- $(RANLIB) $(gcc_objdir)$(MULTISUBDIR)/$$file ;; \
- esac; \
- done
-
all: $(extra-parts)
+$(libgcc-objects) $(libgcc-s-objects) $(libgcc-eh-objects) \
+ $(libgcov-objects) \
+ $(libunwind-objects) $(libunwind-s-objects) \
+ $(EXTRA_PARTS): libgcc_tm.h
+
install-unwind_h:
dest=$(gcc_objdir)/include/tmp$$$$-unwind.h; \
cp unwind.h $$dest; \
@@ -1067,11 +1075,3 @@ install-strip: install
.NOEXPORT:
include $(srcdir)/empty.mk $(wildcard *.dep)
-
-# TODO QUEUE:
-# Garbage collect in gcc/:
-# $(LIBGCC) settings in t-* are now unused
-#
-# Remove use of $(gcc_srcdir). Source files referenced using $(gcc_srcdir)
-# should move into the libgcc directory.
-
diff --git a/libgcc/config.host b/libgcc/config.host
index 08c00a37f97..8af99f57987 100644
--- a/libgcc/config.host
+++ b/libgcc/config.host
@@ -58,8 +58,12 @@
# Defaults to "$cpu_type/sfp-machine.h" if it exists,
# no-sfp-machine.h otherwise.
# tmake_file A list of machine-description-specific
-# makefile-fragments, if different from
-# "$cpu_type/t-$cpu_type".
+# makefile fragments.
+# tm_defines List of target macros to define for all compilations.
+# tm_file A list of target macro files used only for code
+# built for the target, not the host. These files
+# are relative to $srcdir/config and must not have
+# the same names as files in $srcdir/../gcc/config.
# unwind_header The name of the header file declaring the unwind
# runtime interface routines.
@@ -67,6 +71,8 @@ asm_hidden_op=.hidden
enable_execute_stack=
extra_parts=
tmake_file=
+tm_file=
+tm_define=
md_unwind_header=no-unwind.h
unwind_header=unwind-generic.h
@@ -163,7 +169,7 @@ esac
case ${host} in
*-*-darwin*)
asm_hidden_op=.private_extern
- tmake_file="$tmake_file t-darwin ${cpu_type}/t-darwin t-slibgcc-darwin"
+ tmake_file="$tmake_file t-darwin ${cpu_type}/t-darwin t-libgcc-pic t-slibgcc-darwin"
extra_parts=crt3.o
;;
*-*-freebsd[12] | *-*-freebsd[12].* | *-*-freebsd*aout*)
@@ -179,22 +185,56 @@ case ${host} in
# This is the generic ELF configuration of FreeBSD. Later
# machine-specific sections may refine and add to this
# configuration.
- tmake_file="$tmake_file t-eh-dw2-dip"
+ tmake_file="$tmake_file t-freebsd t-crtstuff-pic t-libgcc-pic t-eh-dw2-dip t-slibgcc t-slibgcc-gld t-slibgcc-elf-ver"
+ extra_parts="crtbegin.o crtend.o crtbeginS.o crtendS.o"
+ case ${target_thread_file} in
+ posix)
+ tmake_file="${tmake_file} t-freebsd-thread"
+ # Before 5.0, FreeBSD can't bind shared libraries to -lc
+ # when "optionally" threaded via weak pthread_* checks.
+ case ${host} in
+ *-*-freebsd[34] | *-*-freebsd[34].*)
+ tmake_file="${tmake_file} t-slibgcc-nolc-override"
+ ;;
+ esac
+ ;;
+ esac
;;
*-*-linux* | frv-*-*linux* | *-*-kfreebsd*-gnu | *-*-knetbsd*-gnu | *-*-gnu* | *-*-kopensolaris*-gnu)
- tmake_file="$tmake_file t-eh-dw2-dip"
+ tmake_file="$tmake_file t-crtstuff-pic t-libgcc-pic t-eh-dw2-dip t-slibgcc t-slibgcc-gld t-slibgcc-elf-ver t-linux"
extra_parts="crtbegin.o crtbeginS.o crtbeginT.o crtend.o crtendS.o"
;;
+*-*-lynxos*)
+ tmake_file="$tmake_file t-lynx $cpu_type/t-crtstuff t-crtstuff-pic t-libgcc-pic"
+ extra_parts="crtbegin.o crtbeginS.o crtend.o crtendS.o"
+ ;;
*-*-netbsd*)
+ tmake_file="$tmake_file t-crtstuff-pic t-libgcc-pic t-slibgcc t-slibgcc-gld t-slibgcc-elf-ver"
+ # NetBSD 1.7 and later are set up to use GCC's crtstuff for
+ # ELF configurations. We will clear extra_parts in the
+ # a.out configurations.
+ case ${host} in
+ *-*-netbsd*1.[7-9]* | *-*-netbsd[2-9]* | *-*-netbsdelf[2-9]*)
+ extra_parts="crtbegin.o crtend.o crtbeginS.o crtendS.o crtbeginT.o"
+ ;;
+ esac
;;
*-*-openbsd*)
+ tmake_file="$tmake_file t-crtstuff-pic t-libgcc-pic"
+ case ${target_thread_file} in
+ posix)
+ tmake_file="$tmake_file t-openbsd-thread"
+ ;;
+ esac
;;
*-*-rtems*)
+ tmake_file="$tmake_file t-rtems"
+ extra_parts="crtbegin.o crtend.o"
;;
*-*-solaris2*)
# Unless linker support and dl_iterate_phdr are present,
# unwind-dw2-fde-dip.c automatically falls back to unwind-dw2-fde.c.
- tmake_file="$tmake_file t-sol2 t-eh-dw2-dip t-slibgcc t-slibgcc-elf-ver"
+ tmake_file="$tmake_file t-sol2 t-eh-dw2-dip t-libgcc-pic t-slibgcc t-slibgcc-elf-ver"
if test $with_gnu_ld = yes; then
tmake_file="$tmake_file t-slibgcc-gld"
else
@@ -213,14 +253,22 @@ case ${host} in
extra_parts="$extra_parts crt1.o gcrt1.o"
;;
*)
- tmake_file="$tmake_file t-crtin"
extra_parts="$extra_parts crt1.o crti.o crtn.o gcrt1.o"
;;
esac
;;
+*-*-uclinux*)
+ extra_parts="crtbegin.o crtend.o"
+ ;;
+*-*-*vms*)
+ tmake_file="vms/t-vms"
+ extra_parts="vcrt0.o pcrt0.o crtbegin.o crtbeginS.o crtend.o crtendS.o"
+ ;;
*-*-vxworks*)
+ tmake_file=t-vxworks
;;
*-*-elf)
+ extra_parts="crtbegin.o crtend.o"
;;
esac
@@ -239,17 +287,19 @@ esac
case ${host} in
alpha*-*-linux*)
- tmake_file="${tmake_file} t-crtfm"
+ tmake_file="${tmake_file} alpha/t-alpha alpha/t-ieee t-crtfm alpha/t-linux"
extra_parts="$extra_parts crtfastmath.o"
md_unwind_header=alpha/linux-unwind.h
;;
alpha*-*-freebsd*)
- tmake_file="${tmake_file} t-crtfm"
- extra_parts="$extra_parts crtfastmath.o"
+ tmake_file="${tmake_file} alpha/t-alpha alpha/t-ieee t-crtfm"
+ extra_parts="$extra_parts crtbeginT.o crtfastmath.o"
;;
alpha*-*-netbsd*)
+ tmake_file="${tmake_file} alpha/t-alpha alpha/t-ieee"
;;
alpha*-*-openbsd*)
+ tmake_file="${tmake_file} alpha/t-alpha alpha/t-ieee"
;;
alpha*-dec-osf5.1*)
tmake_file="${tmake_file} alpha/t-alpha alpha/t-ieee t-crtfm t-slibgcc alpha/t-slibgcc-osf"
@@ -258,32 +308,40 @@ alpha*-dec-osf5.1*)
tmake_file="${tmake_file} alpha/t-osf-pthread"
;;
esac
- extra_parts="${extra_parts} qrnnd.o crtfastmath.o gthr-posix.o"
+ extra_parts="${extra_parts} crtfastmath.o"
md_unwind_header=alpha/osf5-unwind.h
;;
alpha64-dec-*vms*)
- tmake_file="vms/t-vms vms/t-vms64 alpha/t-vms"
+ tmake_file="$tmake_file alpha/t-alpha alpha/t-ieee alpha/t-vms t-slibgcc-vms"
+ extra_parts="$extra_parts vms-dwarf2.o vms-dwarf2eh.o"
md_unwind_header=alpha/vms-unwind.h
;;
alpha*-dec-*vms*)
- tmake_file="vms/t-vms alpha/t-vms"
+ tmake_file="$tmake_file alpha/t-alpha alpha/t-ieee alpha/t-vms t-slibgcc-vms"
+ extra_parts="$extra_parts vms-dwarf2.o vms-dwarf2eh.o"
md_unwind_header=alpha/vms-unwind.h
;;
arm-wrs-vxworks)
- tmake_file=t-fdpbit
+ tmake_file="$tmake_file arm/t-arm arm/t-vxworks t-fdpbit"
+ extra_parts="$extra_parts crti.o crtn.o"
;;
arm*-*-freebsd*)
- tmake_file=t-fdpbit
+ tmake_file="$tmake_file arm/t-arm arm/t-strongarm-elf t-fdpbit"
;;
arm*-*-netbsdelf*)
+ tmake_file="$tmake_file arm/t-arm arm/t-netbsd t-slibgcc-gld-nover"
;;
arm*-*-linux*) # ARM GNU/Linux with ELF
- tmake_file="${tmake_file} t-fixedpoint-gnu-prefix"
+ tmake_file="${tmake_file} arm/t-arm t-fixedpoint-gnu-prefix"
case ${host} in
arm*-*-linux-*eabi)
- tmake_file="${tmake_file} arm/t-bpabi"
+ tmake_file="${tmake_file} arm/t-elf arm/t-bpabi arm/t-linux-eabi t-slibgcc-libgcc"
+ tm_file="$tm_file arm/bpabi-lib.h"
unwind_header=config/arm/unwind-arm.h
;;
+ *)
+ tmake_file="$tmake_file arm/t-linux"
+ ;;
esac
tmake_file="$tmake_file t-softfp-sfdf t-softfp-excl arm/t-softfp t-softfp"
;;
@@ -292,121 +350,192 @@ arm*-*-uclinux*) # ARM ucLinux
case ${host} in
arm*-*-uclinux*eabi)
tmake_file="${tmake_file} arm/t-bpabi"
+ tm_file="$tm_file arm/bpabi-lib.h"
unwind_header=config/arm/unwind-arm.h
;;
esac
- tmake_file="$tmake_file t-softfp-sfdf t-softfp-excl arm/t-softfp t-softfp"
+ tmake_file="$tmake_file arm/t-arm arm/t-elf t-softfp-sfdf t-softfp-excl arm/t-softfp t-softfp"
+ extra_parts="$extra_parts crti.o crtn.o"
;;
arm*-*-ecos-elf)
- tmake_file="$tmake_file t-softfp-sfdf t-softfp-excl arm/t-softfp t-softfp"
+ tmake_file="$tmake_file arm/t-arm arm/t-elf t-softfp-sfdf t-softfp-excl arm/t-softfp t-softfp"
+ extra_parts="$extra_parts crti.o crtn.o"
;;
-arm*-*-eabi* | arm*-*-symbianelf* )
- tmake_file="${tmake_file} t-fixedpoint-gnu-prefix"
+arm*-*-eabi* | arm*-*-symbianelf* | arm*-*-rtemseabi*)
+ tmake_file="${tmake_file} arm/t-arm arm/t-elf t-fixedpoint-gnu-prefix"
+ tm_file="$tm_file arm/bpabi-lib.h"
case ${host} in
- arm*-*-eabi*)
+ arm*-*-eabi* | arm*-*-rtemseabi*)
tmake_file="${tmake_file} arm/t-bpabi"
+ extra_parts="crtbegin.o crtend.o crti.o crtn.o"
;;
arm*-*-symbianelf*)
- tmake_file="${tmake_file} arm/t-symbian"
+ tmake_file="${tmake_file} arm/t-symbian t-slibgcc-nolc-override"
+ tm_file="$tm_file arm/symbian-lib.h"
+ # Symbian OS provides its own startup code.
;;
esac
tmake_file="$tmake_file t-softfp-sfdf t-softfp-excl arm/t-softfp t-softfp"
unwind_header=config/arm/unwind-arm.h
;;
arm*-*-rtems*)
- tmake_file="$tmake_file t-softfp-sfdf t-softfp-excl arm/t-softfp t-softfp"
+ tmake_file="$tmake_file arm/t-arm arm/t-elf t-softfp-sfdf t-softfp-excl arm/t-softfp t-softfp"
+ extra_parts="$extra_parts crti.o crtn.o"
;;
arm*-*-elf)
- tmake_file="$tmake_file t-softfp-sfdf t-softfp-excl arm/t-softfp t-softfp"
+ tmake_file="$tmake_file arm/t-arm arm/t-elf t-softfp-sfdf t-softfp-excl arm/t-softfp t-softfp"
+ extra_parts="$extra_parts crti.o crtn.o"
;;
arm*-wince-pe*)
+ tmake_file="$tmake_file arm/t-arm arm/t-wince-pe"
;;
avr-*-rtems*)
- tmake_file=t-fpbit
+ tmake_file="$tmake_file avr/t-avr avr/t-rtems t-fpbit"
+ tm_file="$tm_file avr/avr-lib.h"
+ # Don't use default.
+ extra_parts=
;;
avr-*-*)
# Make HImode functions for AVR
tmake_file="${cpu_type}/t-avr t-fpbit"
+ tm_file="$tm_file avr/avr-lib.h"
;;
bfin*-elf*)
- tmke_file=t-fdpbit
+ tmake_file="bfin/t-bfin bfin/t-crtlibid bfin/t-crtstuff t-libgcc-pic t-fdpbit"
+ extra_parts="$extra_parts crtbeginS.o crtendS.o crti.o crtn.o crtlibid.o"
;;
bfin*-uclinux*)
- tmake_file=t-fdpbit
+ tmake_file="bfin/t-bfin bfin/t-crtlibid bfin/t-crtstuff t-libgcc-pic t-fdpbit"
+ extra_parts="$extra_parts crtbeginS.o crtendS.o crtlibid.o"
md_unwind_header=bfin/linux-unwind.h
;;
bfin*-linux-uclibc*)
- tmake_file=t-fdpbit
+ tmake_file="$tmake_file bfin/t-bfin bfin/t-crtstuff t-libgcc-pic t-fdpbit bfin/t-linux"
# No need to build crtbeginT.o on uClibc systems. Should probably
# be moved to the OS specific section above.
extra_parts="crtbegin.o crtbeginS.o crtend.o crtendS.o"
md_unwind_header=bfin/linux-unwind.h
;;
bfin*-rtems*)
- tmake_file=t-fdpbit
+ tmake_file="$tmake_file bfin/t-bfin t-fdpbit"
+ extra_parts="$extra_parts crti.o crtn.o"
;;
bfin*-*)
- tmake_file=t-fdpbit
+ tmake_file="$tmake_file bfin/t-bfin t-fdpbit"
+ extra_parts="crtbegin.o crtend.o crti.o crtn.o"
;;
-crisv32-*-elf | crisv32-*-none | cris-*-elf | cris-*-none)
- tmake_file=t-fdpbit
- extra_parts="crtbegin.o crtend.o"
+crisv32-*-elf)
+ tmake_file="$tmake_file cris/t-cris t-fdpbit"
+ ;;
+cris-*-elf)
+ tmake_file="$tmake_file cris/t-cris t-fdpbit cris/t-elfmulti"
;;
cris-*-linux* | crisv32-*-linux*)
- tmake_file=t-fdpbit
+ tmake_file="$tmake_file cris/t-cris t-fdpbit cris/t-linux"
;;
-fido-*-elf)
+epiphany-*-elf*)
+ tmake_file="epiphany/t-epiphany t-fdpbit epiphany/t-custom-eqsf"
+ extra_parts="$extra_parts crti.o crtint.o crtrunc.o crtm1reg-r43.o crtm1reg-r63.o crtn.o"
;;
fr30-*-elf)
- tmake_file=t-fdpbit
+ tmake_file="$tmake_file fr30/t-fr30 t-fdpbit"
+ extra_parts="$extra_parts crti.o crtn.o"
;;
frv-*-elf)
- tmake_file=t-fdpbit
+ tmake_file="$tmake_file frv/t-frv t-fdpbit"
+ tm_file="$tm_file frv/frv-abi.h"
+ # Don't use crtbegin.o, crtend.o.
+ extra_parts="frvbegin.o frvend.o"
;;
frv-*-*linux*)
- tmake_file=t-fdpbit
+ tmake_file="$tmake_file frv/t-frv frv/t-linux t-fdpbit"
+ tm_file="$tm_file frv/frv-abi.h"
;;
h8300-*-rtems*)
- tmake_file=t-fpbit
+ tmake_file="$tmake_file h8300/t-h8300 t-fpbit"
+ tm_file="$tm_file h8300/h8300-lib.h"
+ extra_parts="$extra_parts crti.o crtn.o"
;;
h8300-*-elf*)
- tmake_file=t-fpbit
+ tmake_file="$tmake_file h8300/t-h8300 t-fpbit"
+ tm_file="$tm_file h8300/h8300-lib.h"
+ extra_parts="$extra_parts crti.o crtn.o"
;;
hppa*64*-*-linux*)
+ tmake_file="$tmake_file pa/t-linux pa/t-linux64"
;;
hppa*-*-linux*)
+ tmake_file="$tmake_file pa/t-linux t-slibgcc-libgcc"
+ # Set the libgcc version number
+ if test x$enable_sjlj_exceptions = xyes; then
+ tmake_file="$tmake_file pa/t-slibgcc-sjlj-ver"
+ else
+ tmake_file="$tmake_file pa/t-slibgcc-dwarf-ver"
+ fi
md_unwind_header=pa/linux-unwind.h
;;
hppa[12]*-*-hpux10*)
+ tmake_file="$tmake_file pa/t-hpux pa/t-hpux10 t-libgcc-pic t-slibgcc"
+ # Set the libgcc version number
+ if test x$enable_sjlj_exceptions = xyes; then
+ tmake_file="$tmake_file pa/t-slibgcc-sjlj-ver"
+ else
+ tmake_file="$tmake_file pa/t-slibgcc-dwarf-ver"
+ fi
+ tmake_file="$tmake_file pa/t-slibgcc-hpux t-slibgcc-hpux"
md_unwind_header=pa/hpux-unwind.h
;;
hppa*64*-*-hpux11*)
+ tmake_file="$tmake_file pa/t-hpux pa/t-pa64 pa/t-stublib pa/t-stublib64 t-libgcc-pic t-slibgcc"
+ # Set the libgcc version number
+ if test x$enable_sjlj_exceptions = xyes; then
+ tmake_file="$tmake_file pa/t-slibgcc-sjlj-ver"
+ else
+ tmake_file="$tmake_file pa/t-slibgcc-dwarf-ver"
+ fi
+ tmake_file="$tmake_file pa/t-slibgcc-hpux t-slibgcc-hpux"
+ extra_parts="crtbegin.o crtend.o crtbeginS.o crtendS.o crtbeginT.o \
+ libgcc_stub.a"
md_unwind_header=pa/hpux-unwind.h
;;
hppa[12]*-*-hpux11*)
+ tmake_file="$tmake_file pa/t-hpux pa/t-stublib t-libgcc-pic t-slibgcc"
+ # Set the libgcc version number
+ if test x$enable_sjlj_exceptions = xyes; then
+ tmake_file="$tmake_file pa/t-slibgcc-sjlj-ver"
+ else
+ tmake_file="$tmake_file pa/t-slibgcc-dwarf-ver"
+ fi
+ tmake_file="$tmake_file pa/t-slibgcc-hpux t-slibgcc-hpux"
+ extra_parts="libgcc_stub.a"
md_unwind_header=pa/hpux-unwind.h
;;
i[34567]86-*-darwin*)
tmake_file="$tmake_file i386/t-crtpc i386/t-crtfm"
+ tm_file="$tm_file i386/darwin-lib.h"
extra_parts="$extra_parts crtprec32.o crtprec64.o crtprec80.o crtfastmath.o"
;;
x86_64-*-darwin*)
- tmake_file="t-darwin ${cpu_type}/t-darwin64 i386/t-crtpc i386/t-crtfm t-slibgcc-darwin"
+ tmake_file="$tmake_file i386/t-crtpc i386/t-crtfm"
+ tm_file="$tm_file i386/darwin-lib.h"
extra_parts="$extra_parts crtprec32.o crtprec64.o crtprec80.o crtfastmath.o"
;;
i[34567]86-*-elf*)
+ tmake_file="$tmake_file i386/t-crtstuff t-crtstuff-pic t-libgcc-pic"
;;
x86_64-*-elf*)
+ tmake_file="$tmake_file i386/t-crtstuff t-crtstuff-pic t-libgcc-pic"
;;
i[34567]86-*-freebsd*)
- tmake_file="${tmake_file} i386/t-freebsd"
+ tmake_file="${tmake_file} i386/t-freebsd i386/t-crtstuff"
;;
x86_64-*-freebsd*)
- tmake_file="${tmake_file} i386/t-freebsd"
+ tmake_file="${tmake_file} i386/t-freebsd i386/t-crtstuff"
;;
i[34567]86-*-netbsdelf*)
;;
x86_64-*-netbsd*)
+ tmake_file="${tmake_file} i386/t-crtstuff"
;;
i[34567]86-*-openbsd2.*|i[34567]86-*openbsd3.[0123])
;;
@@ -414,31 +543,33 @@ i[34567]86-*-openbsd*)
;;
i[34567]86-*-linux*)
extra_parts="$extra_parts crtprec32.o crtprec64.o crtprec80.o crtfastmath.o"
- tmake_file="${tmake_file} i386/t-crtpc i386/t-crtfm t-dfprules"
+ tmake_file="${tmake_file} i386/t-crtpc i386/t-crtfm i386/t-crtstuff t-dfprules"
md_unwind_header=i386/linux-unwind.h
;;
i[34567]86-*-kfreebsd*-gnu | i[34567]86-*-knetbsd*-gnu | i[34567]86-*-gnu* | i[34567]86-*-kopensolaris*-gnu)
extra_parts="$extra_parts crtprec32.o crtprec64.o crtprec80.o crtfastmath.o"
- tmake_file="${tmake_file} i386/t-crtpc i386/t-crtfm t-dfprules"
+ tmake_file="${tmake_file} i386/t-crtpc i386/t-crtfm i386/t-crtstuff t-dfprules"
;;
x86_64-*-linux*)
extra_parts="$extra_parts crtprec32.o crtprec64.o crtprec80.o crtfastmath.o"
- tmake_file="${tmake_file} i386/t-crtpc i386/t-crtfm t-dfprules"
+ tmake_file="${tmake_file} i386/t-crtpc i386/t-crtfm i386/t-crtstuff t-dfprules"
md_unwind_header=i386/linux-unwind.h
;;
x86_64-*-kfreebsd*-gnu | x86_64-*-knetbsd*-gnu)
extra_parts="$extra_parts crtprec32.o crtprec64.o crtprec80.o crtfastmath.o"
- tmake_file="${tmake_file} i386/t-crtpc i386/t-crtfm t-dfprules"
+ tmake_file="${tmake_file} i386/t-crtpc i386/t-crtfm i386/t-crtstuff t-dfprules"
;;
i[34567]86-pc-msdosdjgpp*)
;;
i[34567]86-*-lynxos*)
;;
i[34567]86-*-nto-qnx*)
+ tmake_file="$tmake_file i386/t-nto t-libgcc-pic"
+ extra_parts=crtbegin.o
;;
i[34567]86-*-rtems*)
- extra_parts="crtbegin.o crtend.o crti.o crtn.o"
- tmake_file="${tmake_file} t-crtin i386/t-softfp i386/t-crtstuff t-rtems"
+ tmake_file="$tmake_file i386/t-softfp i386/t-crtstuff"
+ extra_parts="$extra_parts crti.o crtn.o"
;;
i[34567]86-*-solaris2* | x86_64-*-solaris2.1[0-9]*)
tmake_file="$tmake_file i386/t-crtpc i386/t-crtfm"
@@ -449,86 +580,164 @@ i[4567]86-wrs-vxworks|i[4567]86-wrs-vxworksae)
;;
i[34567]86-*-cygwin*)
extra_parts="crtbegin.o crtend.o crtfastmath.o"
- tmake_file="i386/t-cygming i386/t-crtfm t-dfprules"
+ # This has to match the logic for DWARF2_UNWIND_INFO in gcc/config/i386/cygming.h
+ if test x$enable_sjlj_exceptions = xyes; then
+ tmake_eh_file="i386/t-sjlj-eh"
+ else
+ tmake_eh_file="i386/t-dw2-eh"
+ fi
+ # Shared libgcc DLL install dir depends on cross/native build.
+ if test x${build} = x${host} ; then
+ tmake_dlldir_file="i386/t-dlldir"
+ else
+ tmake_dlldir_file="i386/t-dlldir-x"
+ fi
+ tmake_file="${tmake_file} ${tmake_eh_file} ${tmake_dlldir_file} i386/t-slibgcc-cygming i386/t-cygming i386/t-cygwin i386/t-crtfm i386/t-chkstk t-dfprules"
+ case ${target_thread_file} in
+ posix)
+ tmake_file="i386/t-mingw-pthread $tmake_file"
+ ;;
+ esac
;;
i[34567]86-*-mingw*)
extra_parts="crtbegin.o crtend.o crtfastmath.o"
- tmake_file="i386/t-cygming i386/t-crtfm t-dfprules"
+ case ${target_thread_file} in
+ win32)
+ tmake_file="$tmake_file i386/t-gthr-win32"
+ ;;
+ esac
+ # This has to match the logic for DWARF2_UNWIND_INFO in gcc/config/i386/cygming.h
+ if test x$enable_sjlj_exceptions = xyes; then
+ tmake_eh_file="i386/t-sjlj-eh"
+ else
+ tmake_eh_file="i386/t-dw2-eh"
+ fi
+ # Shared libgcc DLL install dir depends on cross/native build.
+ if test x${build} = x${host} ; then
+ tmake_dlldir_file="i386/t-dlldir"
+ else
+ tmake_dlldir_file="i386/t-dlldir-x"
+ fi
+ tmake_file="${tmake_file} ${tmake_eh_file} ${tmake_dlldir_file} i386/t-slibgcc-cygming i386/t-cygming i386/t-mingw32 i386/t-crtfm i386/t-chkstk t-dfprules"
md_unwind_header=i386/w32-unwind.h
;;
x86_64-*-mingw*)
- tmake_file="t-dfprules i386/t-crtfm"
+ case ${target_thread_file} in
+ win32)
+ tmake_file="$tmake_file i386/t-gthr-win32"
+ ;;
+ esac
+ # This has to match the logic for DWARF2_UNWIND_INFO in gcc/config/i386/cygming.h
+ if test x$enable_sjlj_exceptions = xyes; then
+ tmake_eh_file="i386/t-sjlj-eh"
+ else
+ tmake_eh_file="i386/t-dw2-eh"
+ fi
+ # Shared libgcc DLL install dir depends on cross/native build.
+ if test x${build} = x${host} ; then
+ tmake_dlldir_file="i386/t-dlldir"
+ else
+ tmake_dlldir_file="i386/t-dlldir-x"
+ fi
+ tmake_file="${tmake_file} ${tmake_eh_file} ${tmake_dlldir_file} i386/t-slibgcc-cygming i386/t-mingw32 t-dfprules i386/t-crtfm i386/t-chkstk"
extra_parts="$extra_parts crtfastmath.o"
;;
i[34567]86-*-interix3*)
+ tmake_file="$tmake_file i386/t-interix i386/t-chkstk"
;;
ia64*-*-elf*)
- extra_parts="crtbegin.o crtend.o crtbeginS.o crtendS.o crtfastmath.o"
+ extra_parts="$extra_parts crtbeginS.o crtendS.o crtfastmath.o"
tmake_file="ia64/t-ia64 ia64/t-eh-ia64 t-crtfm"
;;
ia64*-*-freebsd*)
- extra_parts="crtbegin.o crtend.o crtbeginS.o crtendS.o crtfastmath.o"
- tmake_file="ia64/t-ia64 ia64/t-eh-ia64 t-crtfm"
+ extra_parts="$extra_parts crtfastmath.o"
+ tmake_file="$tmake_file ia64/t-ia64 ia64/t-eh-ia64 t-crtfm"
;;
ia64*-*-linux*)
+ # Don't use crtbeginT.o from *-*-linux* default.
extra_parts="crtbegin.o crtend.o crtbeginS.o crtendS.o crtfastmath.o"
- tmake_file="ia64/t-ia64 t-crtfm t-softfp-tf ia64/t-softfp t-softfp ia64/t-softfp-compat ia64/t-eh-ia64 t-libunwind ia64/t-glibc"
+ tmake_file="$tmake_file ia64/t-ia64 t-crtfm t-softfp-tf ia64/t-softfp t-softfp ia64/t-softfp-compat ia64/t-eh-ia64 t-libunwind ia64/t-linux"
if test x$with_system_libunwind != xyes ; then
- tmake_file="${tmake_file} t-libunwind-elf ia64/t-glibc-libunwind"
+ tmake_file="${tmake_file} t-libunwind-elf ia64/t-linux-libunwind"
fi
md_unwind_header=ia64/linux-unwind.h
;;
ia64*-*-hpux*)
- tmake_file="ia64/t-hpux"
+ tmake_file="ia64/t-ia64 ia64/t-hpux t-slibgcc ia64/t-slibgcc-hpux t-slibgcc-hpux"
;;
ia64-hp-*vms*)
- tmake_file="vms/t-vms vms/t-vms64 ia64/t-eh-ia64 ia64/t-vms"
+ tmake_file="$tmake_file ia64/t-ia64 ia64/t-eh-ia64 ia64/t-vms t-slibgcc-vms"
+ extra_parts="$extra_parts crtinitS.o"
md_unwind_header=ia64/vms-unwind.h
;;
iq2000*-*-elf*)
tmake_file=t-fdpbit
+ # Don't use default.
+ extra_parts=
;;
-lm32-*-elf*|lm32-*-rtems*)
- extra_parts="crtbegin.o crtend.o crti.o crtn.o"
+lm32-*-elf*)
+ extra_parts="$extra_parts crti.o crtn.o"
tmake_file="lm32/t-lm32 lm32/t-elf t-softfp-sfdf t-softfp"
;;
+lm32-*-rtems*)
+ tmake_file="$tmake_file lm32/t-lm32 lm32/t-elf t-softfp-sfdf t-softfp"
+ extra_parts="$extra_parts crti.o crtn.o"
+ ;;
lm32-*-uclinux*)
- extra_parts="crtbegin.o crtend.o crtbeginS.o crtendS.o crtbeginT.o"
- tmake_file="lm32/t-lm32 lm32/t-uclinux t-softfp-sfdf t-softfp"
+ extra_parts="$extra_parts crtbegin.o crtendS.o crtbeginT.o"
+ tmake_file="lm32/t-lm32 lm32/t-uclinux t-libgcc-pic t-softfp-sfdf t-softfp"
;;
-m32r-*-elf*|m32r-*-rtems*)
+m32r-*-elf*)
tmake_file=t-fdpbit
;;
+m32r-*-rtems*)
+ tmake_file="$tmake_file m32r/t-m32r t-fdpbit"
+ ;;
m32rle-*-elf*)
tmake_file=t-fdpbit
;;
m32r-*-linux*)
- tmake_file=t-fdpbit
+ tmake_file="$tmake_file m32r/t-linux t-fdpbit"
;;
m32rle-*-linux*)
- tmake_file=t-fdpbit
+ tmake_file="$tmake_file m32r/t-linux t-fdpbit"
;;
-m68k-*-elf*)
+m68k-*-elf* | fido-*-elf)
+ tmake_file="$tmake_file m68k/t-floatlib"
;;
m68k*-*-netbsdelf*)
;;
m68k*-*-openbsd*)
;;
m68k-*-uclinux*) # Motorola m68k/ColdFire running uClinux with uClibc
+ tmake_file="$tmake_file m68k/t-floatlib"
md_unwind_header=m68k/linux-unwind.h
;;
m68k-*-linux*) # Motorola m68k's running GNU/Linux
# with ELF format using glibc 2
# aka the GNU/Linux C library 6.
+ tmake_file="$tmake_file m68k/t-floatlib"
+ # If not configured with --enable-sjlj-exceptions, bump the
+ # libgcc version number.
+ if test x$enable_sjlj_exceptions != xyes; then
+ tmake_file="$tmake_file m68k/t-slibgcc-elf-ver"
+ fi
md_unwind_header=m68k/linux-unwind.h
;;
m68k-*-rtems*)
+ tmake_file="$tmake_file m68k/t-floatlib"
+ extra_parts="$extra_parts crti.o crtn.o"
;;
mcore-*-elf)
- tmake_file=t-fdpbit
+ tmake_file="mcore/t-mcore t-fdpbit"
+ extra_parts="$extra_parts crti.o crtn.o"
+ ;;
+microblaze*-linux*)
+ tmake_file="$tmake_file t-slibgcc-nolc-override microblaze/t-microblaze t-fdpbit"
;;
microblaze*-*-*)
tmake_file="microblaze/t-microblaze t-fdpbit"
+ extra_parts="crtbegin.o crtend.o crti.o crtn.o"
;;
mips-sgi-irix6.5*)
tmake_file="$tmake_file mips/t-irix6 t-crtfm mips/t-tpbit t-slibgcc mips/t-slibgcc-irix"
@@ -550,47 +759,57 @@ mips*-*-linux*) # Linux MIPS, either endian.
mips*-*-openbsd*)
;;
mips*-sde-elf*)
+ tmake_file="mips/t-crtstuff mips/t-mips16"
case "${with_newlib}" in
yes)
# newlib / libgloss.
;;
*)
# MIPS toolkit libraries.
- tmake_file=mips/t-sdemtk
+ tmake_file="$tmake_file mips/t-sdemtk"
;;
esac
- tmake_file="$tmake_file mips/t-mips16"
+ extra_parts="$extra_parts crti.o crtn.o"
;;
mipsisa32-*-elf* | mipsisa32el-*-elf* | \
mipsisa32r2-*-elf* | mipsisa32r2el-*-elf* | \
mipsisa64-*-elf* | mipsisa64el-*-elf* | \
mipsisa64r2-*-elf* | mipsisa64r2el-*-elf*)
- tmake_file="$tmake_file mips/t-mips16"
+ tmake_file="$tmake_file mips/t-elf mips/t-crtstuff mips/t-mips16"
+ extra_parts="$extra_parts crti.o crtn.o"
;;
mipsisa64sr71k-*-elf*)
- tmake_file=t-fdpbit
+ tmake_file="mips/t-elf mips/t-crtstuff t-fdpbit"
+ extra_parts="$extra_parts crti.o crtn.o"
;;
mipsisa64sb1-*-elf* | mipsisa64sb1el-*-elf*)
- tmake_file="$tmake_file mips/t-mips16"
+ tmake_file="$tmake_file mips/t-elf mips/t-crtstuff mips/t-mips16"
+ extra_parts="$extra_parts crti.o crtn.o"
;;
mips-*-elf* | mipsel-*-elf*)
- tmake_file="$tmake_file mips/t-mips16"
+ tmake_file="$tmake_file mips/t-elf mips/t-crtstuff mips/t-mips16"
+ extra_parts="$extra_parts crti.o crtn.o"
;;
mips64-*-elf* | mips64el-*-elf*)
- tmake_file="$tmake_file mips/t-mips16"
+ tmake_file="$tmake_file mips/t-elf mips/t-crtstuff mips/t-mips16"
+ extra_parts="$extra_parts crti.o crtn.o"
;;
mips64vr-*-elf* | mips64vrel-*-elf*)
+ tmake_file="$tmake_file mips/t-elf mips/t-vr mips/t-crtstuff"
+ extra_parts="$extra_parts crti.o crtn.o"
;;
mips64orion-*-elf* | mips64orionel-*-elf*)
- tmake_file="$tmake_file mips/t-mips16"
+ tmake_file="$tmake_file mips/t-elf mips/t-crtstuff mips/t-mips16"
+ extra_parts="$extra_parts crti.o crtn.o"
;;
mips*-*-rtems*)
- tmake_file="$tmake_file mips/t-mips16"
+ tmake_file="$tmake_file mips/t-elf mips/t-crtstuff mips/t-mips16"
+ extra_parts="$extra_parts crti.o crtn.o"
;;
mips-wrs-vxworks)
;;
mipstx39-*-elf* | mipstx39el-*-elf*)
- tmake_file="$tmake_file mips/t-mips16"
+ tmake_file="$tmake_file mips/t-crtstuff mips/t-mips16"
;;
mmix-knuth-mmixware)
extra_parts="crti.o crtn.o crtbegin.o crtend.o"
@@ -599,12 +818,17 @@ mmix-knuth-mmixware)
mn10300-*-*)
tmake_file=t-fdpbit
;;
-moxie-*-*)
+moxie-*-elf | moxie-*-uclinux*)
tmake_file="moxie/t-moxie t-softfp-sfdf t-softfp-excl t-softfp"
- extra_parts="crtbegin.o crtend.o crti.o crtn.o"
+ extra_parts="$extra_parts crti.o crtn.o"
+ ;;
+moxie-*-rtems*)
+ tmake_file="$tmake_file moxie/t-moxie t-softfp-sfdf t-softfp-excl t-softfp"
+ # Don't use default.
+ extra_parts=
;;
pdp11-*-*)
- tmake_file=t-fdpbit
+ tmake_file="pdp11/t-pdp11 t-fdpbit"
;;
picochip-*-*)
tmake_file="picochip/t-picochip t-fpbit"
@@ -623,72 +847,86 @@ powerpc-*-darwin*)
extra_parts="$extra_parts crt2.o"
;;
powerpc64-*-darwin*)
- tmake_file="$tmake_file rs6000/t-ibm-ldouble"
+ tmake_file="$tmake_file rs6000/t-darwin64 rs6000/t-ibm-ldouble"
extra_parts="$extra_parts crt2.o"
;;
powerpc-*-freebsd*)
- tmake_file="${tmake_file} rs6000/t-ppccomm rs6000/t-freebsd t-softfp-sfdf t-softfp-excl t-softfp"
+ tmake_file="${tmake_file} rs6000/t-ppccomm rs6000/t-crtstuff rs6000/t-freebsd t-softfp-sfdf t-softfp-excl t-softfp t-slibgcc-libgcc"
+ extra_parts="$extra_parts crtbeginT.o ecrti.o ecrtn.o ncrti.o ncrtn.o"
;;
powerpc-*-netbsd*)
+ tmake_file="$tmake_file rs6000/t-crtstuff"
;;
powerpc-*-eabispe*)
- tmake_file="${tmake_file} rs6000/t-ppccomm"
+ tmake_file="${tmake_file} rs6000/t-ppccomm rs6000/t-crtstuff t-crtstuff-pic"
+ extra_parts="$extra_parts crtbegin.o crtend.o crtbeginS.o crtendS.o crtbeginT.o ecrti.o ecrtn.o ncrti.o ncrtn.o"
;;
powerpc-*-eabisimaltivec*)
- tmake_file="rs6000/t-ppccomm-ldbl t-fdpbit"
+ tmake_file="${tmake_file} rs6000/t-ppccomm rs6000/t-ppccomm-ldbl rs6000/t-crtstuff t-crtstuff-pic t-fdpbit"
+ extra_parts="$extra_parts crtbegin.o crtend.o crtbeginS.o crtendS.o crtbeginT.o ecrti.o ecrtn.o ncrti.o ncrtn.o"
;;
powerpc-*-eabisim*)
- tmake_file="rs6000/t-ppccomm-ldbl t-fdpbit"
+ tmake_file="${tmake_file} rs6000/t-ppccomm rs6000/t-ppccomm-ldbl rs6000/t-crtstuff t-crtstuff-pic t-fdpbit"
+ extra_parts="$extra_parts crtbegin.o crtend.o crtbeginS.o crtendS.o crtbeginT.o ecrti.o ecrtn.o ncrti.o ncrtn.o"
;;
powerpc-*-elf*)
- tmake_file="rs6000/t-ppccomm-ldbl t-fdpbit"
+ tmake_file="${tmake_file} rs6000/t-ppccomm rs6000/t-ppccomm-ldbl rs6000/t-crtstuff t-crtstuff-pic t-fdpbit"
+ extra_parts="$extra_parts crtbeginS.o crtendS.o crtbeginT.o ecrti.o ecrtn.o ncrti.o ncrtn.o"
;;
powerpc-*-eabialtivec*)
- tmake_file="rs6000/t-ppccomm-ldbl t-fdpbit"
+ tmake_file="${tmake_file} rs6000/t-ppccomm rs6000/t-ppccomm-ldbl rs6000/t-crtstuff t-crtstuff-pic t-fdpbit"
+ extra_parts="$extra_parts crtbegin.o crtend.o crtbeginS.o crtendS.o crtbeginT.o ecrti.o ecrtn.o ncrti.o ncrtn.o"
;;
powerpc-xilinx-eabi*)
- tmake_file="rs6000/t-ppccomm-ldbl t-fdpbit"
+ tmake_file="${tmake_file} rs6000/t-ppccomm rs6000/t-ppccomm-ldbl rs6000/t-crtstuff t-crtstuff-pic t-fdpbit"
+ extra_parts="$extra_parts crtbegin.o crtend.o crtbeginS.o crtendS.o crtbeginT.o ecrti.o ecrtn.o ncrti.o ncrtn.o"
;;
powerpc-*-eabi*)
- tmake_file="${tmake_file} rs6000/t-ppccomm t-fdpbit"
+ tmake_file="${tmake_file} rs6000/t-ppccomm rs6000/t-crtstuff t-crtstuff-pic t-fdpbit"
+ extra_parts="$extra_parts crtbegin.o crtend.o crtbeginS.o crtendS.o crtbeginT.o ecrti.o ecrtn.o ncrti.o ncrtn.o"
;;
powerpc-*-rtems*)
- tmake_file="rs6000/t-ppccomm-ldbl t-fdpbit"
+ tmake_file="${tmake_file} rs6000/t-ppccomm rs6000/t-ppccomm-ldbl rs6000/t-crtstuff t-crtstuff-pic t-fdpbit"
+ extra_parts="$extra_parts crtbeginS.o crtendS.o crtbeginT.o ecrti.o ecrtn.o ncrti.o ncrtn.o"
;;
powerpc-*-linux* | powerpc64-*-linux*)
- tmake_file="${tmake_file} rs6000/t-ppccomm rs6000/t-ldbl128 t-softfp-sfdf t-softfp-excl t-dfprules rs6000/t-ppc64-fp t-softfp"
+ tmake_file="${tmake_file} rs6000/t-ppccomm rs6000/t-crtstuff rs6000/t-linux t-softfp-sfdf t-softfp-excl t-dfprules rs6000/t-ppc64-fp t-softfp t-slibgcc-libgcc"
+ extra_parts="$extra_parts ecrti.o ecrtn.o ncrti.o ncrtn.o"
md_unwind_header=rs6000/linux-unwind.h
;;
powerpc-wrs-vxworks|powerpc-wrs-vxworksae)
tmake_file="rs6000/t-ppccomm-ldbl t-fdpbit"
;;
powerpc-*-lynxos*)
- tmake_file=t-fdpbit
+ tmake_file="$tmake_file t-fdpbit"
;;
powerpcle-*-elf*)
- tmake_file="rs6000/t-ppccomm-ldbl t-fdpbit"
+ tmake_file="${tmake_file} rs6000/t-ppccomm rs6000/t-ppccomm-ldbl rs6000/t-crtstuff t-crtstuff-pic t-fdpbit"
+ extra_parts="$extra_parts crtbeginS.o crtendS.o crtbeginT.o ecrti.o ecrtn.o ncrti.o ncrtn.o"
;;
powerpcle-*-eabisim*)
- tmake_file="rs6000/t-ppccomm-ldbl t-fdpbit"
+ tmake_file="${tmake_file} rs6000/t-ppccomm rs6000/t-ppccomm-ldbl rs6000/t-crtstuff t-crtstuff-pic t-fdpbit"
+ extra_parts="$extra_parts crtbegin.o crtend.o crtbeginS.o crtendS.o crtbeginT.o ecrti.o ecrtn.o ncrti.o ncrtn.o"
;;
powerpcle-*-eabi*)
- tmake_file="rs6000/t-ppccomm-ldbl t-fdpbit"
+ tmake_file="${tmake_file} rs6000/t-ppccomm rs6000/t-ppccomm-ldbl rs6000/t-crtstuff t-crtstuff-pic t-fdpbit"
+ extra_parts="$extra_parts crtbegin.o crtend.o crtbeginS.o crtendS.o crtbeginT.o ecrti.o ecrtn.o ncrti.o ncrtn.o"
;;
rs6000-ibm-aix4.[3456789]* | powerpc-ibm-aix4.[3456789]*)
md_unwind_header=rs6000/aix-unwind.h
- tmake_file="t-fdpbit rs6000/t-ppc64-fp rs6000/t-ibm-ldouble"
+ tmake_file="t-fdpbit rs6000/t-ppc64-fp rs6000/t-ibm-ldouble rs6000/t-slibgcc-aix"
;;
rs6000-ibm-aix5.1.* | powerpc-ibm-aix5.1.*)
md_unwind_header=rs6000/aix-unwind.h
- tmake_file="t-fdpbit rs6000/t-ppc64-fp rs6000/t-ibm-ldouble"
+ tmake_file="t-fdpbit rs6000/t-ppc64-fp rs6000/t-ibm-ldouble rs6000/t-slibgcc-aix"
;;
rs6000-ibm-aix[56789].* | powerpc-ibm-aix[56789].*)
md_unwind_header=rs6000/aix-unwind.h
- tmake_file="t-fdpbit rs6000/t-ppc64-fp rs6000/t-ibm-ldouble"
+ tmake_file="t-fdpbit rs6000/t-ppc64-fp rs6000/t-ibm-ldouble rs6000/t-slibgcc-aix"
;;
rx-*-elf)
- extra_parts="crtbegin.o crtend.o"
tmake_file="rx/t-rx t-fdpbit"
+ tm_file="$tm_file rx/rx-abi.h rx/rx-lib.h"
;;
s390-*-linux*)
tmake_file="${tmake_file} s390/t-crtstuff s390/t-linux s390/32/t-floattodi"
@@ -699,37 +937,60 @@ s390x-*-linux*)
md_unwind_header=s390/linux-unwind.h
;;
s390x-ibm-tpf*)
- tmake_file="${tmake_file} s390/t-crtstuff s390/t-tpf t-eh-dw2-dip"
+ tmake_file="${tmake_file} s390/t-crtstuff t-libgcc-pic t-eh-dw2-dip"
+ extra_parts="crtbeginS.o crtendS.o"
md_unwind_header=s390/tpf-unwind.h
;;
score-*-elf)
tmake_file="${tmake_file} t-softfp-sfdf t-softfp-excl t-softfp"
+ extra_parts="$extra_parts crti.o crtn.o"
;;
-sh-*-elf* | sh[12346l]*-*-elf* | \
- sh-*-linux* | sh[2346lbe]*-*-linux* | \
- sh-*-netbsdelf* | shl*-*-netbsdelf* | sh5-*-netbsd* | sh5l*-*-netbsd* | \
- sh64-*-netbsd* | sh64l*-*-netbsd*)
- case ${host} in
- sh*-*-linux*)
- tmake_file="${tmake_file} sh/t-linux t-fdpbit"
- md_unwind_header=sh/linux-unwind.h
+sh-*-elf* | sh[12346l]*-*-elf*)
+ tmake_file="$tmake_file sh/t-sh t-crtstuff-pic t-fdpbit"
+ extra_parts="$extra_parts crt1.o crti.o crtn.o crtbeginS.o crtendS.o \
+ libic_invalidate_array_4-100.a \
+ libic_invalidate_array_4-200.a \
+ libic_invalidate_array_4a.a \
+ libgcc-Os-4-200.a libgcc-4-300.a"
+ case ${host} in sh64*-*-*)
+ tmake_file="$tmake_file sh/t-sh64"
;;
- sh*-*-netbsd*)
- # NetBSD's C library includes a fast software FP library that
- # has support for setting/setting the rounding mode, exception
- # mask, etc. Therefore, we don't want to include software FP
- # in libgcc.
+ esac
+ case ${host} in
+ sh*-superh-elf)
+ tmake_file="$tmake_file sh/t-superh"
+ extra_parts="$extra_parts crt1-mmu.o gcrt1-mmu.o gcrt1.o"
+ ;;
+ esac
+ ;;
+sh-*-linux* | sh[2346lbe]*-*-linux*)
+ tmake_file="${tmake_file} sh/t-sh t-slibgcc-libgcc sh/t-linux t-fdpbit"
+ case ${host} in sh64*-*-linux*)
+ tmake_file="$tmake_file sh/t-sh64"
;;
- sh*-*-*)
- tmake_file=t-fdpbit
+ esac
+ md_unwind_header=sh/linux-unwind.h
+ ;;
+sh-*-netbsdelf* | shl*-*-netbsdelf* | sh5-*-netbsd* | sh5l*-*-netbsd* | \
+ sh64-*-netbsd* | sh64l*-*-netbsd*)
+ tmake_file="$tmake_file sh/t-sh sh/t-netbsd"
+ case ${host} in
+ sh5*-*-netbsd* | sh64*-netbsd*)
+ tmake_file="$tmake_file sh/t-sh64"
;;
esac
+ # NetBSD's C library includes a fast software FP library that
+ # has support for setting/setting the rounding mode, exception
+ # mask, etc. Therefore, we don't want to include software FP
+ # in libgcc.
;;
sh-*-rtems*)
- tmake_file=t-fdpbit
+ tmake_file="$tmake_file sh/t-sh t-crtstuff-pic t-fdpbit"
+ extra_parts="$extra_parts crt1.o crti.o crtn.o crtbeginS.o crtendS.o \
+ $sh_ic_extra_parts $sh_opt_extra_parts"
;;
sh-wrs-vxworks)
- tmake_file=t-fdpbit
+ tmake_file="$tmake_file sh/t-sh t-crtstuff-pic t-fdpbit"
;;
sparc-*-netbsdelf*)
;;
@@ -743,22 +1004,37 @@ sparc-*-elf*)
tmake_file="sparc/t-softmul"
;;
esac
- tmake_file="${tmake_file} t-fdpbit t-crtin t-crtfm"
- extra_parts="crtbegin.o crtend.o crti.o crtn.o crtfastmath.o"
+ tmake_file="${tmake_file} t-fdpbit t-crtfm"
+ extra_parts="$extra_parts crti.o crtn.o crtfastmath.o"
;;
sparc-*-linux*) # SPARC's running GNU/Linux, libc6
+ tmake_file="${tmake_file} t-crtfm"
+ if test "${host_address}" = 64; then
+ tmake_file="$tmake_file sparc/t-linux64"
+ fi
case ${host} in
*-leon*)
- tmake_file=t-fdpbit
+ tmake_file="${tmake_file} t-fdpbit"
+ ;;
+ *)
+ tmake_file="${tmake_file} sparc/t-linux"
;;
esac
+ case ${host} in
+ *-leon[3-9]*)
+ ;;
+ *)
+ if test "${host_address}" = 32; then
+ tmake_file="$tmake_file sparc/t-softmul"
+ fi
+ ;;
+ esac
extra_parts="$extra_parts crtfastmath.o"
- tmake_file="${tmake_file} t-crtfm"
md_unwind_header=sparc/linux-unwind.h
;;
sparc-*-rtems*)
- tmake_file="sparc/t-elf t-crtin t-crtfm t-rtems t-fdpbit"
- extra_parts="crtbegin.o crtend.o crti.o crtn.o crtfastmath.o"
+ tmake_file="$tmake_file sparc/t-elf sparc/t-softmul t-crtfm t-fdpbit"
+ extra_parts="$extra_parts crti.o crtn.o crtfastmath.o"
;;
sparc*-*-solaris2*)
tmake_file="$tmake_file t-crtfm"
@@ -766,60 +1042,83 @@ sparc*-*-solaris2*)
md_unwind_header=sparc/sol2-unwind.h
;;
sparc64-*-elf*)
- tmake_file="${tmake_file} t-crtin t-crtfm"
- extra_parts="crtbegin.o crtend.o crti.o crtn.o crtfastmath.o"
+ tmake_file="${tmake_file} t-crtfm"
+ extra_parts="$extra_parts crti.o crtn.o crtfastmath.o"
;;
sparc64-*-rtems*)
- tmake_file="sparc/t-elf t-crtin t-crtfm t-rtems"
- extra_parts="crtbegin.o crtend.o crti.o crtn.o crtfastmath.o"
+ tmake_file="$tmake_file sparc/t-elf t-crtfm"
+ extra_parts="$extra_parts crti.o crtn.o crtfastmath.o"
;;
sparc-wrs-vxworks)
;;
sparc64-*-freebsd*|ultrasparc-*-freebsd*)
tmake_file="$tmake_file t-crtfm"
- extra_parts="crtbegin.o crtbeginS.o crtend.o crtendS.o crtfastmath.o"
+ extra_parts="$extra_parts crtfastmath.o"
;;
sparc64-*-linux*) # 64-bit SPARC's running GNU/Linux
extra_parts="$extra_parts crtfastmath.o"
- tmake_file="${tmake_file} t-crtfm"
+ tmake_file="${tmake_file} t-crtfm sparc/t-linux"
+ if test "${host_address}" = 64; then
+ tmake_file="${tmake_file} sparc/t-linux64"
+ fi
+ if test "${host_address}" = 32; then
+ tmake_file="${tmake_file} sparc/t-softmul"
+ fi
md_unwind_header=sparc/linux-unwind.h
;;
sparc64-*-netbsd*)
;;
spu-*-elf*)
- tmake_file="t-fdpbit spu/t-elf"
+ tmake_file="$tmake_file spu/t-elf t-libgcc-pic t-fdpbit"
+ extra_parts="$extra_parts \
+ libgcc_cachemgr.a libgcc_cachemgr_nonatomic.a \
+ libgcc_cache8k.a libgcc_cache16k.a libgcc_cache32k.a \
+ libgcc_cache64k.a libgcc_cache128k.a"
+ ;;
+tic6x-*-uclinux)
+ tmake_file="${tmake_file} t-softfp-sfdf t-softfp-excl t-softfp c6x/t-elf c6x/t-uclinux t-crtstuff-pic t-libgcc-pic t-slibgcc t-slibgcc-gld t-slibgcc-elf-ver t-gnu-prefix"
+ tm_file="$tm_file c6x/c6x-abi.h"
+ extra_parts="crtbeginS.o crtendS.o crti.o crtn.o"
+ unwind_header=config/c6x/unwind-c6x.h
;;
-tic6x-*-*)
- tmake_file="${tmake_file} t-softfp-sfdf t-softfp-excl t-softfp t-gnu-prefix c6x/t-c6x-elf"
+tic6x-*-elf)
+ tmake_file="${tmake_file} t-softfp-sfdf t-softfp-excl t-softfp t-gnu-prefix c6x/t-elf"
+ tm_file="$tm_file c6x/c6x-abi.h"
+ extra_parts="$extra_parts crtbeginS.o crtendS.o crti.o crtn.o"
unwind_header=config/c6x/unwind-c6x.h
;;
v850*-*-*)
- tmake_file=t-fdpbit
+ tmake_file="v850/t-v850 t-fdpbit"
;;
vax-*-linux*)
+ tmake_file="$tmake_file vax/t-linux"
;;
vax-*-netbsdelf*)
;;
vax-*-openbsd*)
;;
xstormy16-*-elf)
- tmake_file=t-fdpbit
+ tmake_file="stormy16/t-stormy16 t-fdpbit"
;;
xtensa*-*-elf*)
- tmake_file=xtensa/t-xtensa
+ tmake_file="$tmake_file xtensa/t-xtensa xtensa/t-elf"
+ extra_parts="$extra_parts crti.o crtn.o"
;;
xtensa*-*-linux*)
- tmake_file=xtensa/t-xtensa
+ tmake_file="$tmake_file xtensa/t-xtensa xtensa/t-linux"
md_unwind_header=xtensa/linux-unwind.h
;;
am33_2.0-*-linux*)
+ # Don't need crtbeginT.o from *-*-linux* default.
extra_parts="crtbegin.o crtend.o crtbeginS.o crtendS.o"
- tmake_file=t-fdpbit
+ tmake_file="$tmake_file t-fdpbit"
;;
m32c-*-elf*|m32c-*-rtems*)
+ tmake_file="$tmake_file m32c/t-m32c"
;;
mep*-*-*)
- tmake_file=t-fdpbit
+ tmake_file="mep/t-mep t-fdpbit"
+ extra_parts="crtbegin.o crtend.o"
;;
*)
echo "*** Configuration ${host} not supported" 1>&2
@@ -829,9 +1128,10 @@ esac
case ${host} in
i[34567]86-*-linux* | x86_64-*-linux* | \
- i[34567]86-*-kfreebsd*-gnu | i[34567]86-*-knetbsd*-gnu | \
+ i[34567]86-*-kfreebsd*-gnu | x86_64-*-kfreebsd*-gnu | \
+ i[34567]86-*-knetbsd*-gnu | \
i[34567]86-*-gnu*)
- tmake_file="${tmake_file} t-tls"
+ tmake_file="${tmake_file} t-tls i386/t-linux"
if test "$libgcc_cv_cfi" = "yes"; then
tmake_file="${tmake_file} t-stack i386/t-stack-i386"
fi
@@ -860,5 +1160,6 @@ i[34567]86-*-linux* | x86_64-*-linux*)
if test "${host_address}" = 64; then
tmake_file="${tmake_file} i386/${host_address}/t-softfp-compat"
fi
+ tm_file="${tm_file} i386/value-unwind.h"
;;
esac
diff --git a/libgcc/config/alpha/gthr-posix.c b/libgcc/config/alpha/gthr-posix.c
new file mode 100644
index 00000000000..02681a4371e
--- /dev/null
+++ b/libgcc/config/alpha/gthr-posix.c
@@ -0,0 +1,266 @@
+/* POSIX threads dummy routines for systems without weak definitions. */
+/* Compile this one with gcc. */
+/* Copyright (C) 2003, 2004, 2005, 2007, 2008, 2009, 2011
+ Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+#include "tconfig.h"
+#include "tm.h"
+#include "libgcc_tm.h"
+# define __gthrw_pragma(pragma) _Pragma (#pragma)
+/* Define so we provide weak definitions of functions used by libobjc only. */
+#define _LIBOBJC_WEAK
+#include "gthr.h"
+
+int
+pthread_once (pthread_once_t *once ATTRIBUTE_UNUSED,
+ void (*func) (void) ATTRIBUTE_UNUSED)
+{
+ return -1;
+}
+
+int
+pthread_key_create (pthread_key_t *key ATTRIBUTE_UNUSED,
+ void (*dtor) (void *) ATTRIBUTE_UNUSED)
+{
+ return -1;
+}
+
+int
+pthread_key_delete (pthread_key_t key ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+void *
+pthread_getspecific (pthread_key_t key ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+int
+pthread_setspecific (pthread_key_t key ATTRIBUTE_UNUSED,
+ const void *ptr ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+int
+pthread_create (pthread_t *thread ATTRIBUTE_UNUSED,
+ const pthread_attr_t *attr ATTRIBUTE_UNUSED,
+ void *(*start_routine) (void *) ATTRIBUTE_UNUSED,
+ void *arg ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+int
+pthread_join (pthread_t thread ATTRIBUTE_UNUSED,
+ void **value_ptr ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+void
+pthread_exit (void *value_ptr ATTRIBUTE_UNUSED)
+{
+}
+
+int
+pthread_detach (pthread_t thread ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+int
+pthread_cancel (pthread_t thread ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+int
+pthread_mutex_lock (pthread_mutex_t *mutex ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+int
+pthread_mutex_trylock (pthread_mutex_t *mutex ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+#ifdef _POSIX_TIMEOUTS
+#if _POSIX_TIMEOUTS >= 0
+int
+pthread_mutex_timedlock (pthread_mutex_t *mutex ATTRIBUTE_UNUSED,
+ const struct timespec *abs_timeout ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+#endif
+#endif /* _POSIX_TIMEOUTS */
+
+int
+pthread_mutex_unlock (pthread_mutex_t *mutex ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+int
+pthread_mutexattr_init (pthread_mutexattr_t *attr ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+int
+pthread_mutexattr_settype (pthread_mutexattr_t *attr ATTRIBUTE_UNUSED,
+ int type ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+int
+pthread_mutexattr_destroy (pthread_mutexattr_t *attr ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+int
+pthread_cond_broadcast (pthread_cond_t *cond ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+int
+pthread_cond_destroy (pthread_cond_t *cond ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+int
+pthread_cond_init (pthread_cond_t *cond ATTRIBUTE_UNUSED,
+ const pthread_condattr_t *attr ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+int
+pthread_cond_signal (pthread_cond_t *cond ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+int
+pthread_cond_wait (pthread_cond_t *cond ATTRIBUTE_UNUSED,
+ pthread_mutex_t *mutex ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+int
+pthread_cond_timedwait (pthread_cond_t *cond ATTRIBUTE_UNUSED,
+ pthread_mutex_t *mutex ATTRIBUTE_UNUSED,
+ const struct timespec *abstime ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+int
+pthread_mutex_init (pthread_mutex_t *mutex ATTRIBUTE_UNUSED,
+ const pthread_mutexattr_t *attr ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+int
+pthread_mutex_destroy (pthread_mutex_t *mutex ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+pthread_t
+pthread_self (void)
+{
+ return (pthread_t) 0;
+}
+
+#ifdef _POSIX_PRIORITY_SCHEDULING
+#ifdef _POSIX_THREAD_PRIORITY_SCHEDULING
+int
+sched_get_priority_max (int policy ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+int
+sched_get_priority_min (int policy ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+#endif /* _POSIX_THREAD_PRIORITY_SCHEDULING */
+#endif /* _POSIX_PRIORITY_SCHEDULING */
+
+int
+sched_yield (void)
+{
+ return 0;
+}
+
+int
+pthread_attr_destroy (pthread_attr_t *attr ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+int
+pthread_attr_init (pthread_attr_t *attr ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+int
+pthread_attr_setdetachstate (pthread_attr_t *attr ATTRIBUTE_UNUSED,
+ int detachstate ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+#ifdef _POSIX_THREAD_PRIORITY_SCHEDULING
+int
+pthread_getschedparam (pthread_t thread ATTRIBUTE_UNUSED,
+ int *policy ATTRIBUTE_UNUSED,
+ struct sched_param *param ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+int
+pthread_setschedparam (pthread_t thread ATTRIBUTE_UNUSED,
+ int policy ATTRIBUTE_UNUSED,
+ const struct sched_param *param ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+#endif /* _POSIX_THREAD_PRIORITY_SCHEDULING */
+
diff --git a/libgcc/config/alpha/libgcc-alpha-ldbl.ver b/libgcc/config/alpha/libgcc-alpha-ldbl.ver
new file mode 100644
index 00000000000..8dc54a74980
--- /dev/null
+++ b/libgcc/config/alpha/libgcc-alpha-ldbl.ver
@@ -0,0 +1,50 @@
+# Copyright (C) 2006 Free Software Foundation, Inc.
+#
+# This file is part of GCC.
+#
+# GCC is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GCC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>.
+
+%ifdef __LONG_DOUBLE_128__
+
+# long double 128 bit support in libgcc_s.so.1 is only available
+# when configured with --with-long-double-128. Make sure all the
+# symbols are available at @@GCC_LDBL_* versions to make it clear
+# there is a configurable symbol set.
+
+%exclude {
+ __fixtfdi
+ __fixunstfdi
+ __floatditf
+
+ __divtc3
+ __multc3
+ __powitf2
+}
+
+%inherit GCC_LDBL_3.0 GCC_3.0
+GCC_LDBL_3.0 {
+ __fixtfdi
+ __fixunstfdi
+ __floatditf
+}
+
+%inherit GCC_LDBL_4.0.0 GCC_4.0.0
+GCC_LDBL_4.0.0 {
+ __divtc3
+ __multc3
+ __powitf2
+}
+
+%endif
diff --git a/libgcc/config/alpha/qrnnd.S b/libgcc/config/alpha/qrnnd.S
new file mode 100644
index 00000000000..51b13bce6ad
--- /dev/null
+++ b/libgcc/config/alpha/qrnnd.S
@@ -0,0 +1,163 @@
+ # Alpha 21064 __udiv_qrnnd
+ # Copyright (C) 1992, 1994, 1995, 2000, 2009 Free Software Foundation, Inc.
+
+ # This file is part of GCC.
+
+ # The GNU MP Library is free software; you can redistribute it and/or modify
+ # it under the terms of the GNU General Public License as published by
+ # the Free Software Foundation; either version 3 of the License, or (at your
+ # option) any later version.
+
+ # This file is distributed in the hope that it will be useful, but
+ # WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ # or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public
+ # License for more details.
+
+ # Under Section 7 of GPL version 3, you are granted additional
+ # permissions described in the GCC Runtime Library Exception, version
+ # 3.1, as published by the Free Software Foundation.
+
+ # You should have received a copy of the GNU General Public License and
+ # a copy of the GCC Runtime Library Exception along with this program;
+ # see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ # <http://www.gnu.org/licenses/>.
+
+#ifdef __ELF__
+.section .note.GNU-stack,""
+#endif
+
+ .set noreorder
+ .set noat
+
+ .text
+
+ .globl __udiv_qrnnd
+ .ent __udiv_qrnnd
+__udiv_qrnnd:
+ .frame $30,0,$26,0
+ .prologue 0
+
+#define cnt $2
+#define tmp $3
+#define rem_ptr $16
+#define n1 $17
+#define n0 $18
+#define d $19
+#define qb $20
+#define AT $at
+
+ ldiq cnt,16
+ blt d,$largedivisor
+
+$loop1: cmplt n0,0,tmp
+ addq n1,n1,n1
+ bis n1,tmp,n1
+ addq n0,n0,n0
+ cmpule d,n1,qb
+ subq n1,d,tmp
+ cmovne qb,tmp,n1
+ bis n0,qb,n0
+ cmplt n0,0,tmp
+ addq n1,n1,n1
+ bis n1,tmp,n1
+ addq n0,n0,n0
+ cmpule d,n1,qb
+ subq n1,d,tmp
+ cmovne qb,tmp,n1
+ bis n0,qb,n0
+ cmplt n0,0,tmp
+ addq n1,n1,n1
+ bis n1,tmp,n1
+ addq n0,n0,n0
+ cmpule d,n1,qb
+ subq n1,d,tmp
+ cmovne qb,tmp,n1
+ bis n0,qb,n0
+ cmplt n0,0,tmp
+ addq n1,n1,n1
+ bis n1,tmp,n1
+ addq n0,n0,n0
+ cmpule d,n1,qb
+ subq n1,d,tmp
+ cmovne qb,tmp,n1
+ bis n0,qb,n0
+ subq cnt,1,cnt
+ bgt cnt,$loop1
+ stq n1,0(rem_ptr)
+ bis $31,n0,$0
+ ret $31,($26),1
+
+$largedivisor:
+ and n0,1,$4
+
+ srl n0,1,n0
+ sll n1,63,tmp
+ or tmp,n0,n0
+ srl n1,1,n1
+
+ and d,1,$6
+ srl d,1,$5
+ addq $5,$6,$5
+
+$loop2: cmplt n0,0,tmp
+ addq n1,n1,n1
+ bis n1,tmp,n1
+ addq n0,n0,n0
+ cmpule $5,n1,qb
+ subq n1,$5,tmp
+ cmovne qb,tmp,n1
+ bis n0,qb,n0
+ cmplt n0,0,tmp
+ addq n1,n1,n1
+ bis n1,tmp,n1
+ addq n0,n0,n0
+ cmpule $5,n1,qb
+ subq n1,$5,tmp
+ cmovne qb,tmp,n1
+ bis n0,qb,n0
+ cmplt n0,0,tmp
+ addq n1,n1,n1
+ bis n1,tmp,n1
+ addq n0,n0,n0
+ cmpule $5,n1,qb
+ subq n1,$5,tmp
+ cmovne qb,tmp,n1
+ bis n0,qb,n0
+ cmplt n0,0,tmp
+ addq n1,n1,n1
+ bis n1,tmp,n1
+ addq n0,n0,n0
+ cmpule $5,n1,qb
+ subq n1,$5,tmp
+ cmovne qb,tmp,n1
+ bis n0,qb,n0
+ subq cnt,1,cnt
+ bgt cnt,$loop2
+
+ addq n1,n1,n1
+ addq $4,n1,n1
+ bne $6,$Odd
+ stq n1,0(rem_ptr)
+ bis $31,n0,$0
+ ret $31,($26),1
+
+$Odd:
+ /* q' in n0. r' in n1 */
+ addq n1,n0,n1
+
+ cmpult n1,n0,tmp # tmp := carry from addq
+ subq n1,d,AT
+ addq n0,tmp,n0
+ cmovne tmp,AT,n1
+
+ cmpult n1,d,tmp
+ addq n0,1,AT
+ cmoveq tmp,AT,n0
+ subq n1,d,AT
+ cmoveq tmp,AT,n1
+
+ stq n1,0(rem_ptr)
+ bis $31,n0,$0
+ ret $31,($26),1
+
+ .end __udiv_qrnnd
diff --git a/libgcc/config/alpha/t-alpha b/libgcc/config/alpha/t-alpha
index 14c72d0808b..0b6ffb1ba34 100644
--- a/libgcc/config/alpha/t-alpha
+++ b/libgcc/config/alpha/t-alpha
@@ -1,2 +1,2 @@
# This is a support routine for longlong.h, used by libgcc2.c.
-LIB2ADD += $(gcc_srcdir)/config/alpha/qrnnd.asm
+LIB2ADD += $(srcdir)/config/alpha/qrnnd.S
diff --git a/libgcc/config/alpha/t-linux b/libgcc/config/alpha/t-linux
new file mode 100644
index 00000000000..fabf38f9cce
--- /dev/null
+++ b/libgcc/config/alpha/t-linux
@@ -0,0 +1 @@
+SHLIB_MAPFILES += $(srcdir)/config/alpha/libgcc-alpha-ldbl.ver
diff --git a/libgcc/config/alpha/t-osf-pthread b/libgcc/config/alpha/t-osf-pthread
index c51f375a048..9a175dbeb9e 100644
--- a/libgcc/config/alpha/t-osf-pthread
+++ b/libgcc/config/alpha/t-osf-pthread
@@ -2,4 +2,4 @@
HOST_LIBGCC2_CFLAGS += -pthread
# Provide dummy POSIX threads functions
-LIB2ADD += $(gcc_srcdir)/gthr-posix.c
+LIB2ADD += $(srcdir)/config/alpha/gthr-posix.c
diff --git a/libgcc/config/alpha/t-slibgcc-osf b/libgcc/config/alpha/t-slibgcc-osf
index 33a07a7b6aa..66e5cf12aa1 100644
--- a/libgcc/config/alpha/t-slibgcc-osf
+++ b/libgcc/config/alpha/t-slibgcc-osf
@@ -22,7 +22,7 @@
SHLIB_LDFLAGS = -Wl,-msym -Wl,-set_version,gcc.1 -Wl,-soname,$(SHLIB_SONAME) \
-Wl,-hidden -Wl,-input,$(SHLIB_MAP)
-SHLIB_MKMAP = $(gcc_srcdir)/mkmap-flat.awk
+SHLIB_MKMAP = $(srcdir)/mkmap-flat.awk
SHLIB_MKMAP_OPTS = -v osf_export=1
# Needed so mkmap-flat.awk can parse the nm output.
SHLIB_NM_FLAGS = -Bg
diff --git a/libgcc/config/alpha/t-vms b/libgcc/config/alpha/t-vms
new file mode 100644
index 00000000000..dd5760d9747
--- /dev/null
+++ b/libgcc/config/alpha/t-vms
@@ -0,0 +1,9 @@
+# This object must be linked with in order to make the executable debuggable.
+# vms-ld handles it automatically when passed -g.
+vms-dwarf2.o: $(srcdir)/config/alpha/vms-dwarf2.S
+ $(gcc_compile) -c -x assembler-with-cpp $<
+
+vms-dwarf2eh.o: $(srcdir)/config/alpha/vms-dwarf2eh.S
+ $(gcc_compile) -c -x assembler-with-cpp $<
+
+LIB2ADD += $(srcdir)/config/alpha/vms-gcc_shell_handler.c
diff --git a/libgcc/config/alpha/vms-dwarf2.S b/libgcc/config/alpha/vms-dwarf2.S
new file mode 100644
index 00000000000..531c7aa9984
--- /dev/null
+++ b/libgcc/config/alpha/vms-dwarf2.S
@@ -0,0 +1,77 @@
+/* VMS dwarf2 section sequentializer.
+ Copyright (C) 2001, 2009 Free Software Foundation, Inc.
+ Contributed by Douglas B. Rupp (rupp@gnat.com).
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+/* Linking with this file forces Dwarf2 debug sections to be
+ sequentially loaded by the VMS linker, enabling GDB to read them. */
+
+.section .debug_abbrev,NOWRT
+ .align 0
+ .globl $dwarf2.debug_abbrev
+$dwarf2.debug_abbrev:
+
+.section .debug_aranges,NOWRT
+ .align 0
+ .globl $dwarf2.debug_aranges
+$dwarf2.debug_aranges:
+
+.section .debug_frame,NOWRT
+ .align 0
+ .globl $dwarf2.debug_frame
+$dwarf2.debug_frame:
+
+.section .debug_info,NOWRT
+ .align 0
+ .globl $dwarf2.debug_info
+$dwarf2.debug_info:
+
+.section .debug_line,NOWRT
+ .align 0
+ .globl $dwarf2.debug_line
+$dwarf2.debug_line:
+
+.section .debug_loc,NOWRT
+ .align 0
+ .globl $dwarf2.debug_loc
+$dwarf2.debug_loc:
+
+.section .debug_macinfo,NOWRT
+ .align 0
+ .globl $dwarf2.debug_macinfo
+$dwarf2.debug_macinfo:
+
+.section .debug_pubnames,NOWRT
+ .align 0
+ .globl $dwarf2.debug_pubnames
+$dwarf2.debug_pubnames:
+
+.section .debug_str,NOWRT
+ .align 0
+ .globl $dwarf2.debug_str
+$dwarf2.debug_str:
+
+.section .debug_zzzzzz,NOWRT
+ .align 0
+ .globl $dwarf2.debug_zzzzzz
+$dwarf2.debug_zzzzzz:
diff --git a/libgcc/config/alpha/vms-dwarf2eh.S b/libgcc/config/alpha/vms-dwarf2eh.S
new file mode 100644
index 00000000000..e0eaf9d3741
--- /dev/null
+++ b/libgcc/config/alpha/vms-dwarf2eh.S
@@ -0,0 +1,30 @@
+/* VMS dwarf2 exception handling section sequentializer.
+ Copyright (C) 2002, 2009 Free Software Foundation, Inc.
+ Contributed by Douglas B. Rupp (rupp@gnat.com).
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+/* Linking with this file forces the Dwarf2 EH section to be
+ individually loaded by the VMS linker an the unwinder to read it. */
+
+.section .eh_frame,NOWRT
+ .align 0
diff --git a/libgcc/config/alpha/vms-gcc_shell_handler.c b/libgcc/config/alpha/vms-gcc_shell_handler.c
new file mode 100644
index 00000000000..67d0fe7f9aa
--- /dev/null
+++ b/libgcc/config/alpha/vms-gcc_shell_handler.c
@@ -0,0 +1,124 @@
+/* Static condition handler for Alpha/VMS.
+ Copyright (C) 2005-2009
+ Free Software Foundation, Inc.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+/* This file implements __gcc_shell_handler, the static VMS condition handler
+ used as the indirection wrapper around user level handlers installed with
+ establish_vms_condition_handler GCC builtin.
+
+ [ABI] in comments refers to the "HP OpenVMS calling standard" document
+ dated January 2005. */
+
+#include <vms/chfdef.h>
+#include <vms/pdscdef.h>
+#include <vms/ssdef.h>
+
+typedef void * ADDR;
+typedef unsigned long long REG;
+
+#define REG_AT(addr) (*(REG *)(addr))
+
+/* Compute pointer to procedure descriptor (Procedure Value) from Frame
+ Pointer FP, according to the rules in [ABI-3.5.1 Current Procedure]. */
+#define PV_FOR(FP) \
+ (((FP) != 0) \
+ ? (((REG_AT (FP) & 0x7) == 0) ? *(PDSCDEF **)(FP) : (PDSCDEF *)(FP)) : 0)
+
+long
+__gcc_shell_handler (struct chf$signal_array *sig_arr,
+ struct chf$mech_array *mech_arr);
+
+/* Helper for __gcc_shell_handler. Fetch the pointer to procedure currently
+ registered as the VMS condition handler for the live function with a frame
+ pointer FP. */
+
+static ADDR
+get_dyn_handler_pointer (REG fp)
+{
+ /* From the frame pointer we find the procedure descriptor, and fetch
+ the handler_data field from there. This field contains the offset
+ from FP at which the address of the currently installed handler is
+ to be found. */
+
+ PDSCDEF * pd = PV_FOR (fp);
+ /* Procedure descriptor pointer for the live subprogram with FP as the frame
+ pointer, and to which _gcc_shell_handler is attached as a condition
+ handler. */
+
+ REG handler_slot_offset;
+ /* Offset from FP at which the address of the currently established real
+ condition handler is to be found. This offset is available from the
+ handler_data field of the procedure descriptor. */
+
+ REG handler_data_offset;
+ /* The handler_data field position in the procedure descriptor, which
+ depends on the kind of procedure at hand. */
+
+ switch (pd->pdsc$w_flags & 0xf)
+ {
+ case PDSC$K_KIND_FP_STACK: /* [3.4.2 PD for stack frame procedures] */
+ handler_data_offset = 40;
+ break;
+
+ case PDSC$K_KIND_FP_REGISTER: /* [3.4.5 PD for reg frame procedures] */
+ handler_data_offset = 32;
+ break;
+
+ default:
+ handler_data_offset = 0;
+ break;
+ }
+
+ /* If we couldn't determine the handler_data field position, give up. */
+ if (handler_data_offset == 0)
+ return 0;
+
+ /* Otherwise, fetch the fp offset at which the real handler address is to be
+ found, then fetch and return the latter in turn. */
+
+ handler_slot_offset = REG_AT ((REG)pd + handler_data_offset);
+
+ return (ADDR) REG_AT (fp + handler_slot_offset);
+}
+
+/* The static VMS condition handler for GCC code. Fetch the address of the
+ currently established condition handler, then resignal if there is none or
+ call the handler with the VMS condition arguments. */
+
+long
+__gcc_shell_handler (struct chf$signal_array *sig_arr,
+ struct chf$mech_array *mech_arr)
+{
+ long ret;
+ long (*user_handler) (struct chf$signal_array *, struct chf$mech_array *);
+
+ user_handler = get_dyn_handler_pointer (mech_arr->chf$q_mch_frame);
+ if (!user_handler)
+ ret = SS$_RESIGNAL;
+ else
+ ret = user_handler (sig_arr, mech_arr);
+
+ return ret;
+}
+
diff --git a/libgcc/config/arm/bpabi-v6m.S b/libgcc/config/arm/bpabi-v6m.S
new file mode 100644
index 00000000000..4ecea6da5a6
--- /dev/null
+++ b/libgcc/config/arm/bpabi-v6m.S
@@ -0,0 +1,318 @@
+/* Miscellaneous BPABI functions. ARMv6M implementation
+
+ Copyright (C) 2006, 2008, 2009, 2010 Free Software Foundation, Inc.
+ Contributed by CodeSourcery.
+
+ This file is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by the
+ Free Software Foundation; either version 3, or (at your option) any
+ later version.
+
+ This file is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#ifdef __ARM_EABI__
+/* Some attributes that are common to all routines in this file. */
+ /* Tag_ABI_align_needed: This code does not require 8-byte
+ alignment from the caller. */
+ /* .eabi_attribute 24, 0 -- default setting. */
+ /* Tag_ABI_align_preserved: This code preserves 8-byte
+ alignment in any callee. */
+ .eabi_attribute 25, 1
+#endif /* __ARM_EABI__ */
+
+#ifdef L_aeabi_lcmp
+
+FUNC_START aeabi_lcmp
+ cmp xxh, yyh
+ beq 1f
+ bgt 2f
+ mov r0, #1
+ neg r0, r0
+ RET
+2:
+ mov r0, #1
+ RET
+1:
+ sub r0, xxl, yyl
+ beq 1f
+ bhi 2f
+ mov r0, #1
+ neg r0, r0
+ RET
+2:
+ mov r0, #1
+1:
+ RET
+ FUNC_END aeabi_lcmp
+
+#endif /* L_aeabi_lcmp */
+
+#ifdef L_aeabi_ulcmp
+
+FUNC_START aeabi_ulcmp
+ cmp xxh, yyh
+ bne 1f
+ sub r0, xxl, yyl
+ beq 2f
+1:
+ bcs 1f
+ mov r0, #1
+ neg r0, r0
+ RET
+1:
+ mov r0, #1
+2:
+ RET
+ FUNC_END aeabi_ulcmp
+
+#endif /* L_aeabi_ulcmp */
+
+.macro test_div_by_zero signed
+ cmp yyh, #0
+ bne 7f
+ cmp yyl, #0
+ bne 7f
+ cmp xxh, #0
+ bne 2f
+ cmp xxl, #0
+2:
+ .ifc \signed, unsigned
+ beq 3f
+ mov xxh, #0
+ mvn xxh, xxh @ 0xffffffff
+ mov xxl, xxh
+3:
+ .else
+ beq 5f
+ blt 6f
+ mov xxl, #0
+ mvn xxl, xxl @ 0xffffffff
+ lsr xxh, xxl, #1 @ 0x7fffffff
+ b 5f
+6: mov xxh, #0x80
+ lsl xxh, xxh, #24 @ 0x80000000
+ mov xxl, #0
+5:
+ .endif
+ @ tailcalls are tricky on v6-m.
+ push {r0, r1, r2}
+ ldr r0, 1f
+ adr r1, 1f
+ add r0, r1
+ str r0, [sp, #8]
+ @ We know we are not on armv4t, so pop pc is safe.
+ pop {r0, r1, pc}
+ .align 2
+1:
+ .word __aeabi_ldiv0 - 1b
+7:
+.endm
+
+#ifdef L_aeabi_ldivmod
+
+FUNC_START aeabi_ldivmod
+ test_div_by_zero signed
+
+ push {r0, r1}
+ mov r0, sp
+ push {r0, lr}
+ ldr r0, [sp, #8]
+ bl SYM(__gnu_ldivmod_helper)
+ ldr r3, [sp, #4]
+ mov lr, r3
+ add sp, sp, #8
+ pop {r2, r3}
+ RET
+ FUNC_END aeabi_ldivmod
+
+#endif /* L_aeabi_ldivmod */
+
+#ifdef L_aeabi_uldivmod
+
+FUNC_START aeabi_uldivmod
+ test_div_by_zero unsigned
+
+ push {r0, r1}
+ mov r0, sp
+ push {r0, lr}
+ ldr r0, [sp, #8]
+ bl SYM(__gnu_uldivmod_helper)
+ ldr r3, [sp, #4]
+ mov lr, r3
+ add sp, sp, #8
+ pop {r2, r3}
+ RET
+ FUNC_END aeabi_uldivmod
+
+#endif /* L_aeabi_uldivmod */
+
+#ifdef L_arm_addsubsf3
+
+FUNC_START aeabi_frsub
+
+ push {r4, lr}
+ mov r4, #1
+ lsl r4, #31
+ eor r0, r0, r4
+ bl __aeabi_fadd
+ pop {r4, pc}
+
+ FUNC_END aeabi_frsub
+
+#endif /* L_arm_addsubsf3 */
+
+#ifdef L_arm_cmpsf2
+
+FUNC_START aeabi_cfrcmple
+
+ mov ip, r0
+ mov r0, r1
+ mov r1, ip
+ b 6f
+
+FUNC_START aeabi_cfcmpeq
+FUNC_ALIAS aeabi_cfcmple aeabi_cfcmpeq
+
+ @ The status-returning routines are required to preserve all
+ @ registers except ip, lr, and cpsr.
+6: push {r0, r1, r2, r3, r4, lr}
+ bl __lesf2
+ @ Set the Z flag correctly, and the C flag unconditionally.
+ cmp r0, #0
+ @ Clear the C flag if the return value was -1, indicating
+ @ that the first operand was smaller than the second.
+ bmi 1f
+ mov r1, #0
+ cmn r0, r1
+1:
+ pop {r0, r1, r2, r3, r4, pc}
+
+ FUNC_END aeabi_cfcmple
+ FUNC_END aeabi_cfcmpeq
+ FUNC_END aeabi_cfrcmple
+
+FUNC_START aeabi_fcmpeq
+
+ push {r4, lr}
+ bl __eqsf2
+ neg r0, r0
+ add r0, r0, #1
+ pop {r4, pc}
+
+ FUNC_END aeabi_fcmpeq
+
+.macro COMPARISON cond, helper, mode=sf2
+FUNC_START aeabi_fcmp\cond
+
+ push {r4, lr}
+ bl __\helper\mode
+ cmp r0, #0
+ b\cond 1f
+ mov r0, #0
+ pop {r4, pc}
+1:
+ mov r0, #1
+ pop {r4, pc}
+
+ FUNC_END aeabi_fcmp\cond
+.endm
+
+COMPARISON lt, le
+COMPARISON le, le
+COMPARISON gt, ge
+COMPARISON ge, ge
+
+#endif /* L_arm_cmpsf2 */
+
+#ifdef L_arm_addsubdf3
+
+FUNC_START aeabi_drsub
+
+ push {r4, lr}
+ mov r4, #1
+ lsl r4, #31
+ eor xxh, xxh, r4
+ bl __aeabi_dadd
+ pop {r4, pc}
+
+ FUNC_END aeabi_drsub
+
+#endif /* L_arm_addsubdf3 */
+
+#ifdef L_arm_cmpdf2
+
+FUNC_START aeabi_cdrcmple
+
+ mov ip, r0
+ mov r0, r2
+ mov r2, ip
+ mov ip, r1
+ mov r1, r3
+ mov r3, ip
+ b 6f
+
+FUNC_START aeabi_cdcmpeq
+FUNC_ALIAS aeabi_cdcmple aeabi_cdcmpeq
+
+ @ The status-returning routines are required to preserve all
+ @ registers except ip, lr, and cpsr.
+6: push {r0, r1, r2, r3, r4, lr}
+ bl __ledf2
+ @ Set the Z flag correctly, and the C flag unconditionally.
+ cmp r0, #0
+ @ Clear the C flag if the return value was -1, indicating
+ @ that the first operand was smaller than the second.
+ bmi 1f
+ mov r1, #0
+ cmn r0, r1
+1:
+ pop {r0, r1, r2, r3, r4, pc}
+
+ FUNC_END aeabi_cdcmple
+ FUNC_END aeabi_cdcmpeq
+ FUNC_END aeabi_cdrcmple
+
+FUNC_START aeabi_dcmpeq
+
+ push {r4, lr}
+ bl __eqdf2
+ neg r0, r0
+ add r0, r0, #1
+ pop {r4, pc}
+
+ FUNC_END aeabi_dcmpeq
+
+.macro COMPARISON cond, helper, mode=df2
+FUNC_START aeabi_dcmp\cond
+
+ push {r4, lr}
+ bl __\helper\mode
+ cmp r0, #0
+ b\cond 1f
+ mov r0, #0
+ pop {r4, pc}
+1:
+ mov r0, #1
+ pop {r4, pc}
+
+ FUNC_END aeabi_dcmp\cond
+.endm
+
+COMPARISON lt, le
+COMPARISON le, le
+COMPARISON gt, ge
+COMPARISON ge, ge
+
+#endif /* L_arm_cmpdf2 */
diff --git a/libgcc/config/arm/bpabi.S b/libgcc/config/arm/bpabi.S
new file mode 100644
index 00000000000..2ff338927fa
--- /dev/null
+++ b/libgcc/config/arm/bpabi.S
@@ -0,0 +1,163 @@
+/* Miscellaneous BPABI functions.
+
+ Copyright (C) 2003, 2004, 2007, 2008, 2009, 2010
+ Free Software Foundation, Inc.
+ Contributed by CodeSourcery, LLC.
+
+ This file is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by the
+ Free Software Foundation; either version 3, or (at your option) any
+ later version.
+
+ This file is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#ifdef __ARM_EABI__
+/* Some attributes that are common to all routines in this file. */
+ /* Tag_ABI_align_needed: This code does not require 8-byte
+ alignment from the caller. */
+ /* .eabi_attribute 24, 0 -- default setting. */
+ /* Tag_ABI_align_preserved: This code preserves 8-byte
+ alignment in any callee. */
+ .eabi_attribute 25, 1
+#endif /* __ARM_EABI__ */
+
+#ifdef L_aeabi_lcmp
+
+ARM_FUNC_START aeabi_lcmp
+ cmp xxh, yyh
+ do_it lt
+ movlt r0, #-1
+ do_it gt
+ movgt r0, #1
+ do_it ne
+ RETc(ne)
+ subs r0, xxl, yyl
+ do_it lo
+ movlo r0, #-1
+ do_it hi
+ movhi r0, #1
+ RET
+ FUNC_END aeabi_lcmp
+
+#endif /* L_aeabi_lcmp */
+
+#ifdef L_aeabi_ulcmp
+
+ARM_FUNC_START aeabi_ulcmp
+ cmp xxh, yyh
+ do_it lo
+ movlo r0, #-1
+ do_it hi
+ movhi r0, #1
+ do_it ne
+ RETc(ne)
+ cmp xxl, yyl
+ do_it lo
+ movlo r0, #-1
+ do_it hi
+ movhi r0, #1
+ do_it eq
+ moveq r0, #0
+ RET
+ FUNC_END aeabi_ulcmp
+
+#endif /* L_aeabi_ulcmp */
+
+.macro test_div_by_zero signed
+/* Tail-call to divide-by-zero handlers which may be overridden by the user,
+ so unwinding works properly. */
+#if defined(__thumb2__)
+ cbnz yyh, 1f
+ cbnz yyl, 1f
+ cmp xxh, #0
+ do_it eq
+ cmpeq xxl, #0
+ .ifc \signed, unsigned
+ beq 2f
+ mov xxh, #0xffffffff
+ mov xxl, xxh
+2:
+ .else
+ do_it lt, t
+ movlt xxl, #0
+ movlt xxh, #0x80000000
+ do_it gt, t
+ movgt xxh, #0x7fffffff
+ movgt xxl, #0xffffffff
+ .endif
+ b SYM (__aeabi_ldiv0) __PLT__
+1:
+#else
+ /* Note: Thumb-1 code calls via an ARM shim on processors which
+ support ARM mode. */
+ cmp yyh, #0
+ cmpeq yyl, #0
+ bne 2f
+ cmp xxh, #0
+ cmpeq xxl, #0
+ .ifc \signed, unsigned
+ movne xxh, #0xffffffff
+ movne xxl, #0xffffffff
+ .else
+ movlt xxh, #0x80000000
+ movlt xxl, #0
+ movgt xxh, #0x7fffffff
+ movgt xxl, #0xffffffff
+ .endif
+ b SYM (__aeabi_ldiv0) __PLT__
+2:
+#endif
+.endm
+
+#ifdef L_aeabi_ldivmod
+
+ARM_FUNC_START aeabi_ldivmod
+ test_div_by_zero signed
+
+ sub sp, sp, #8
+#if defined(__thumb2__)
+ mov ip, sp
+ push {ip, lr}
+#else
+ do_push {sp, lr}
+#endif
+ bl SYM(__gnu_ldivmod_helper) __PLT__
+ ldr lr, [sp, #4]
+ add sp, sp, #8
+ do_pop {r2, r3}
+ RET
+
+#endif /* L_aeabi_ldivmod */
+
+#ifdef L_aeabi_uldivmod
+
+ARM_FUNC_START aeabi_uldivmod
+ test_div_by_zero unsigned
+
+ sub sp, sp, #8
+#if defined(__thumb2__)
+ mov ip, sp
+ push {ip, lr}
+#else
+ do_push {sp, lr}
+#endif
+ bl SYM(__gnu_uldivmod_helper) __PLT__
+ ldr lr, [sp, #4]
+ add sp, sp, #8
+ do_pop {r2, r3}
+ RET
+
+#endif /* L_aeabi_divmod */
+
diff --git a/libgcc/config/arm/bpabi.c b/libgcc/config/arm/bpabi.c
new file mode 100644
index 00000000000..283bdc0acf0
--- /dev/null
+++ b/libgcc/config/arm/bpabi.c
@@ -0,0 +1,56 @@
+/* Miscellaneous BPABI functions.
+
+ Copyright (C) 2003, 2004, 2009 Free Software Foundation, Inc.
+ Contributed by CodeSourcery, LLC.
+
+ This file is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by the
+ Free Software Foundation; either version 3, or (at your option) any
+ later version.
+
+ This file is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+extern long long __divdi3 (long long, long long);
+extern unsigned long long __udivdi3 (unsigned long long,
+ unsigned long long);
+extern long long __gnu_ldivmod_helper (long long, long long, long long *);
+extern unsigned long long __gnu_uldivmod_helper (unsigned long long,
+ unsigned long long,
+ unsigned long long *);
+
+
+long long
+__gnu_ldivmod_helper (long long a,
+ long long b,
+ long long *remainder)
+{
+ long long quotient;
+
+ quotient = __divdi3 (a, b);
+ *remainder = a - b * quotient;
+ return quotient;
+}
+
+unsigned long long
+__gnu_uldivmod_helper (unsigned long long a,
+ unsigned long long b,
+ unsigned long long *remainder)
+{
+ unsigned long long quotient;
+
+ quotient = __udivdi3 (a, b);
+ *remainder = a - b * quotient;
+ return quotient;
+}
diff --git a/libgcc/config/arm/crti.S b/libgcc/config/arm/crti.S
new file mode 100644
index 00000000000..50915f9e31f
--- /dev/null
+++ b/libgcc/config/arm/crti.S
@@ -0,0 +1,86 @@
+# Copyright (C) 2001, 2008, 2009, 2010, 2011 Free Software Foundation, Inc.
+# Written By Nick Clifton
+#
+# This file is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3, or (at your option) any
+# later version.
+#
+# This file is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# Under Section 7 of GPL version 3, you are granted additional
+# permissions described in the GCC Runtime Library Exception, version
+# 3.1, as published by the Free Software Foundation.
+#
+# You should have received a copy of the GNU General Public License and
+# a copy of the GCC Runtime Library Exception along with this program;
+# see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+# <http://www.gnu.org/licenses/>.
+
+/* An executable stack is *not* required for these functions. */
+#if defined(__ELF__) && defined(__linux__)
+.section .note.GNU-stack,"",%progbits
+.previous
+#endif
+
+# This file just make a stack frame for the contents of the .fini and
+# .init sections. Users may put any desired instructions in those
+# sections.
+
+#ifdef __ELF__
+#define TYPE(x) .type x,function
+#else
+#define TYPE(x)
+#endif
+#ifdef __ARM_EABI__
+/* Some attributes that are common to all routines in this file. */
+ /* Tag_ABI_align_needed: This code does not require 8-byte
+ alignment from the caller. */
+ /* .eabi_attribute 24, 0 -- default setting. */
+ /* Tag_ABI_align_preserved: This code preserves 8-byte
+ alignment in any callee. */
+ .eabi_attribute 25, 1
+#endif /* __ARM_EABI__ */
+
+ # Note - this macro is complemented by the FUNC_END macro
+ # in crtn.S. If you change this macro you must also change
+ # that macro match.
+.macro FUNC_START
+#ifdef __thumb__
+ .thumb
+
+ push {r3, r4, r5, r6, r7, lr}
+#else
+ .arm
+ # Create a stack frame and save any call-preserved registers
+ mov ip, sp
+ stmdb sp!, {r3, r4, r5, r6, r7, r8, r9, sl, fp, ip, lr, pc}
+ sub fp, ip, #4
+#endif
+.endm
+
+ .section ".init"
+ .align 2
+ .global _init
+#ifdef __thumb__
+ .thumb_func
+#endif
+ TYPE(_init)
+_init:
+ FUNC_START
+
+
+ .section ".fini"
+ .align 2
+ .global _fini
+#ifdef __thumb__
+ .thumb_func
+#endif
+ TYPE(_fini)
+_fini:
+ FUNC_START
+
+# end of crti.S
diff --git a/libgcc/config/arm/crtn.S b/libgcc/config/arm/crtn.S
new file mode 100644
index 00000000000..8c5f22572f7
--- /dev/null
+++ b/libgcc/config/arm/crtn.S
@@ -0,0 +1,83 @@
+# Copyright (C) 2001, 2004, 2008, 2009, 2010, 2011
+# Free Software Foundation, Inc.
+# Written By Nick Clifton
+#
+# This file is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3, or (at your option) any
+# later version.
+#
+# This file is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# Under Section 7 of GPL version 3, you are granted additional
+# permissions described in the GCC Runtime Library Exception, version
+# 3.1, as published by the Free Software Foundation.
+#
+# You should have received a copy of the GNU General Public License and
+# a copy of the GCC Runtime Library Exception along with this program;
+# see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+# <http://www.gnu.org/licenses/>.
+
+/* An executable stack is *not* required for these functions. */
+#if defined(__ELF__) && defined(__linux__)
+.section .note.GNU-stack,"",%progbits
+.previous
+#endif
+
+#ifdef __ARM_EABI__
+/* Some attributes that are common to all routines in this file. */
+ /* Tag_ABI_align_needed: This code does not require 8-byte
+ alignment from the caller. */
+ /* .eabi_attribute 24, 0 -- default setting. */
+ /* Tag_ABI_align_preserved: This code preserves 8-byte
+ alignment in any callee. */
+ .eabi_attribute 25, 1
+#endif /* __ARM_EABI__ */
+
+# This file just makes sure that the .fini and .init sections do in
+# fact return. Users may put any desired instructions in those sections.
+# This file is the last thing linked into any executable.
+
+ # Note - this macro is complemented by the FUNC_START macro
+ # in crti.S. If you change this macro you must also change
+ # that macro match.
+ #
+ # Note - we do not try any fancy optimizations of the return
+ # sequences here, it is just not worth it. Instead keep things
+ # simple. Restore all the save resgisters, including the link
+ # register and then perform the correct function return instruction.
+ # We also save/restore r3 to ensure stack alignment.
+.macro FUNC_END
+#ifdef __thumb__
+ .thumb
+
+ pop {r3, r4, r5, r6, r7}
+ pop {r3}
+ mov lr, r3
+#else
+ .arm
+
+ sub sp, fp, #40
+ ldmfd sp, {r4, r5, r6, r7, r8, r9, sl, fp, sp, lr}
+#endif
+
+#if defined __THUMB_INTERWORK__ || defined __thumb__
+ bx lr
+#else
+ mov pc, lr
+#endif
+.endm
+
+
+ .section ".init"
+ ;;
+ FUNC_END
+
+ .section ".fini"
+ ;;
+ FUNC_END
+
+# end of crtn.S
diff --git a/libgcc/config/arm/fp16.c b/libgcc/config/arm/fp16.c
new file mode 100644
index 00000000000..936caeb78d0
--- /dev/null
+++ b/libgcc/config/arm/fp16.c
@@ -0,0 +1,145 @@
+/* Half-float conversion routines.
+
+ Copyright (C) 2008, 2009 Free Software Foundation, Inc.
+ Contributed by CodeSourcery.
+
+ This file is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by the
+ Free Software Foundation; either version 3, or (at your option) any
+ later version.
+
+ This file is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+static inline unsigned short
+__gnu_f2h_internal(unsigned int a, int ieee)
+{
+ unsigned short sign = (a >> 16) & 0x8000;
+ int aexp = (a >> 23) & 0xff;
+ unsigned int mantissa = a & 0x007fffff;
+ unsigned int mask;
+ unsigned int increment;
+
+ if (aexp == 0xff)
+ {
+ if (!ieee)
+ return sign;
+ return sign | 0x7e00 | (mantissa >> 13);
+ }
+
+ if (aexp == 0 && mantissa == 0)
+ return sign;
+
+ aexp -= 127;
+
+ /* Decimal point between bits 22 and 23. */
+ mantissa |= 0x00800000;
+ if (aexp < -14)
+ {
+ mask = 0x007fffff;
+ if (aexp < -25)
+ aexp = -26;
+ else if (aexp != -25)
+ mask >>= 24 + aexp;
+ }
+ else
+ mask = 0x00001fff;
+
+ /* Round. */
+ if (mantissa & mask)
+ {
+ increment = (mask + 1) >> 1;
+ if ((mantissa & mask) == increment)
+ increment = mantissa & (increment << 1);
+ mantissa += increment;
+ if (mantissa >= 0x01000000)
+ {
+ mantissa >>= 1;
+ aexp++;
+ }
+ }
+
+ if (ieee)
+ {
+ if (aexp > 15)
+ return sign | 0x7c00;
+ }
+ else
+ {
+ if (aexp > 16)
+ return sign | 0x7fff;
+ }
+
+ if (aexp < -24)
+ return sign;
+
+ if (aexp < -14)
+ {
+ mantissa >>= -14 - aexp;
+ aexp = -14;
+ }
+
+ /* We leave the leading 1 in the mantissa, and subtract one
+ from the exponent bias to compensate. */
+ return sign | (((aexp + 14) << 10) + (mantissa >> 13));
+}
+
+unsigned int
+__gnu_h2f_internal(unsigned short a, int ieee)
+{
+ unsigned int sign = (unsigned int)(a & 0x8000) << 16;
+ int aexp = (a >> 10) & 0x1f;
+ unsigned int mantissa = a & 0x3ff;
+
+ if (aexp == 0x1f && ieee)
+ return sign | 0x7f800000 | (mantissa << 13);
+
+ if (aexp == 0)
+ {
+ int shift;
+
+ if (mantissa == 0)
+ return sign;
+
+ shift = __builtin_clz(mantissa) - 21;
+ mantissa <<= shift;
+ aexp = -shift;
+ }
+
+ return sign | (((aexp + 0x70) << 23) + (mantissa << 13));
+}
+
+unsigned short
+__gnu_f2h_ieee(unsigned int a)
+{
+ return __gnu_f2h_internal(a, 1);
+}
+
+unsigned int
+__gnu_h2f_ieee(unsigned short a)
+{
+ return __gnu_h2f_internal(a, 1);
+}
+
+unsigned short
+__gnu_f2h_alternative(unsigned int x)
+{
+ return __gnu_f2h_internal(x, 0);
+}
+
+unsigned int
+__gnu_h2f_alternative(unsigned short a)
+{
+ return __gnu_h2f_internal(a, 0);
+}
diff --git a/libgcc/config/arm/ieee754-df.S b/libgcc/config/arm/ieee754-df.S
new file mode 100644
index 00000000000..eb0c38632d0
--- /dev/null
+++ b/libgcc/config/arm/ieee754-df.S
@@ -0,0 +1,1447 @@
+/* ieee754-df.S double-precision floating point support for ARM
+
+ Copyright (C) 2003, 2004, 2005, 2007, 2008, 2009 Free Software Foundation, Inc.
+ Contributed by Nicolas Pitre (nico@cam.org)
+
+ This file is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by the
+ Free Software Foundation; either version 3, or (at your option) any
+ later version.
+
+ This file is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+/*
+ * Notes:
+ *
+ * The goal of this code is to be as fast as possible. This is
+ * not meant to be easy to understand for the casual reader.
+ * For slightly simpler code please see the single precision version
+ * of this file.
+ *
+ * Only the default rounding mode is intended for best performances.
+ * Exceptions aren't supported yet, but that can be added quite easily
+ * if necessary without impacting performances.
+ */
+
+
+@ For FPA, float words are always big-endian.
+@ For VFP, floats words follow the memory system mode.
+#if defined(__VFP_FP__) && !defined(__ARMEB__)
+#define xl r0
+#define xh r1
+#define yl r2
+#define yh r3
+#else
+#define xh r0
+#define xl r1
+#define yh r2
+#define yl r3
+#endif
+
+
+#ifdef L_arm_negdf2
+
+ARM_FUNC_START negdf2
+ARM_FUNC_ALIAS aeabi_dneg negdf2
+
+ @ flip sign bit
+ eor xh, xh, #0x80000000
+ RET
+
+ FUNC_END aeabi_dneg
+ FUNC_END negdf2
+
+#endif
+
+#ifdef L_arm_addsubdf3
+
+ARM_FUNC_START aeabi_drsub
+
+ eor xh, xh, #0x80000000 @ flip sign bit of first arg
+ b 1f
+
+ARM_FUNC_START subdf3
+ARM_FUNC_ALIAS aeabi_dsub subdf3
+
+ eor yh, yh, #0x80000000 @ flip sign bit of second arg
+#if defined(__INTERWORKING_STUBS__)
+ b 1f @ Skip Thumb-code prologue
+#endif
+
+ARM_FUNC_START adddf3
+ARM_FUNC_ALIAS aeabi_dadd adddf3
+
+1: do_push {r4, r5, lr}
+
+ @ Look for zeroes, equal values, INF, or NAN.
+ shift1 lsl, r4, xh, #1
+ shift1 lsl, r5, yh, #1
+ teq r4, r5
+ do_it eq
+ teqeq xl, yl
+ do_it ne, ttt
+ COND(orr,s,ne) ip, r4, xl
+ COND(orr,s,ne) ip, r5, yl
+ COND(mvn,s,ne) ip, r4, asr #21
+ COND(mvn,s,ne) ip, r5, asr #21
+ beq LSYM(Lad_s)
+
+ @ Compute exponent difference. Make largest exponent in r4,
+ @ corresponding arg in xh-xl, and positive exponent difference in r5.
+ shift1 lsr, r4, r4, #21
+ rsbs r5, r4, r5, lsr #21
+ do_it lt
+ rsblt r5, r5, #0
+ ble 1f
+ add r4, r4, r5
+ eor yl, xl, yl
+ eor yh, xh, yh
+ eor xl, yl, xl
+ eor xh, yh, xh
+ eor yl, xl, yl
+ eor yh, xh, yh
+1:
+ @ If exponent difference is too large, return largest argument
+ @ already in xh-xl. We need up to 54 bit to handle proper rounding
+ @ of 0x1p54 - 1.1.
+ cmp r5, #54
+ do_it hi
+ RETLDM "r4, r5" hi
+
+ @ Convert mantissa to signed integer.
+ tst xh, #0x80000000
+ mov xh, xh, lsl #12
+ mov ip, #0x00100000
+ orr xh, ip, xh, lsr #12
+ beq 1f
+#if defined(__thumb2__)
+ negs xl, xl
+ sbc xh, xh, xh, lsl #1
+#else
+ rsbs xl, xl, #0
+ rsc xh, xh, #0
+#endif
+1:
+ tst yh, #0x80000000
+ mov yh, yh, lsl #12
+ orr yh, ip, yh, lsr #12
+ beq 1f
+#if defined(__thumb2__)
+ negs yl, yl
+ sbc yh, yh, yh, lsl #1
+#else
+ rsbs yl, yl, #0
+ rsc yh, yh, #0
+#endif
+1:
+ @ If exponent == difference, one or both args were denormalized.
+ @ Since this is not common case, rescale them off line.
+ teq r4, r5
+ beq LSYM(Lad_d)
+LSYM(Lad_x):
+
+ @ Compensate for the exponent overlapping the mantissa MSB added later
+ sub r4, r4, #1
+
+ @ Shift yh-yl right per r5, add to xh-xl, keep leftover bits into ip.
+ rsbs lr, r5, #32
+ blt 1f
+ shift1 lsl, ip, yl, lr
+ shiftop adds xl xl yl lsr r5 yl
+ adc xh, xh, #0
+ shiftop adds xl xl yh lsl lr yl
+ shiftop adcs xh xh yh asr r5 yh
+ b 2f
+1: sub r5, r5, #32
+ add lr, lr, #32
+ cmp yl, #1
+ shift1 lsl,ip, yh, lr
+ do_it cs
+ orrcs ip, ip, #2 @ 2 not 1, to allow lsr #1 later
+ shiftop adds xl xl yh asr r5 yh
+ adcs xh, xh, yh, asr #31
+2:
+ @ We now have a result in xh-xl-ip.
+ @ Keep absolute value in xh-xl-ip, sign in r5 (the n bit was set above)
+ and r5, xh, #0x80000000
+ bpl LSYM(Lad_p)
+#if defined(__thumb2__)
+ mov lr, #0
+ negs ip, ip
+ sbcs xl, lr, xl
+ sbc xh, lr, xh
+#else
+ rsbs ip, ip, #0
+ rscs xl, xl, #0
+ rsc xh, xh, #0
+#endif
+
+ @ Determine how to normalize the result.
+LSYM(Lad_p):
+ cmp xh, #0x00100000
+ bcc LSYM(Lad_a)
+ cmp xh, #0x00200000
+ bcc LSYM(Lad_e)
+
+ @ Result needs to be shifted right.
+ movs xh, xh, lsr #1
+ movs xl, xl, rrx
+ mov ip, ip, rrx
+ add r4, r4, #1
+
+ @ Make sure we did not bust our exponent.
+ mov r2, r4, lsl #21
+ cmn r2, #(2 << 21)
+ bcs LSYM(Lad_o)
+
+ @ Our result is now properly aligned into xh-xl, remaining bits in ip.
+ @ Round with MSB of ip. If halfway between two numbers, round towards
+ @ LSB of xl = 0.
+ @ Pack final result together.
+LSYM(Lad_e):
+ cmp ip, #0x80000000
+ do_it eq
+ COND(mov,s,eq) ip, xl, lsr #1
+ adcs xl, xl, #0
+ adc xh, xh, r4, lsl #20
+ orr xh, xh, r5
+ RETLDM "r4, r5"
+
+ @ Result must be shifted left and exponent adjusted.
+LSYM(Lad_a):
+ movs ip, ip, lsl #1
+ adcs xl, xl, xl
+ adc xh, xh, xh
+ tst xh, #0x00100000
+ sub r4, r4, #1
+ bne LSYM(Lad_e)
+
+ @ No rounding necessary since ip will always be 0 at this point.
+LSYM(Lad_l):
+
+#if __ARM_ARCH__ < 5
+
+ teq xh, #0
+ movne r3, #20
+ moveq r3, #52
+ moveq xh, xl
+ moveq xl, #0
+ mov r2, xh
+ cmp r2, #(1 << 16)
+ movhs r2, r2, lsr #16
+ subhs r3, r3, #16
+ cmp r2, #(1 << 8)
+ movhs r2, r2, lsr #8
+ subhs r3, r3, #8
+ cmp r2, #(1 << 4)
+ movhs r2, r2, lsr #4
+ subhs r3, r3, #4
+ cmp r2, #(1 << 2)
+ subhs r3, r3, #2
+ sublo r3, r3, r2, lsr #1
+ sub r3, r3, r2, lsr #3
+
+#else
+
+ teq xh, #0
+ do_it eq, t
+ moveq xh, xl
+ moveq xl, #0
+ clz r3, xh
+ do_it eq
+ addeq r3, r3, #32
+ sub r3, r3, #11
+
+#endif
+
+ @ determine how to shift the value.
+ subs r2, r3, #32
+ bge 2f
+ adds r2, r2, #12
+ ble 1f
+
+ @ shift value left 21 to 31 bits, or actually right 11 to 1 bits
+ @ since a register switch happened above.
+ add ip, r2, #20
+ rsb r2, r2, #12
+ shift1 lsl, xl, xh, ip
+ shift1 lsr, xh, xh, r2
+ b 3f
+
+ @ actually shift value left 1 to 20 bits, which might also represent
+ @ 32 to 52 bits if counting the register switch that happened earlier.
+1: add r2, r2, #20
+2: do_it le
+ rsble ip, r2, #32
+ shift1 lsl, xh, xh, r2
+#if defined(__thumb2__)
+ lsr ip, xl, ip
+ itt le
+ orrle xh, xh, ip
+ lslle xl, xl, r2
+#else
+ orrle xh, xh, xl, lsr ip
+ movle xl, xl, lsl r2
+#endif
+
+ @ adjust exponent accordingly.
+3: subs r4, r4, r3
+ do_it ge, tt
+ addge xh, xh, r4, lsl #20
+ orrge xh, xh, r5
+ RETLDM "r4, r5" ge
+
+ @ Exponent too small, denormalize result.
+ @ Find out proper shift value.
+ mvn r4, r4
+ subs r4, r4, #31
+ bge 2f
+ adds r4, r4, #12
+ bgt 1f
+
+ @ shift result right of 1 to 20 bits, sign is in r5.
+ add r4, r4, #20
+ rsb r2, r4, #32
+ shift1 lsr, xl, xl, r4
+ shiftop orr xl xl xh lsl r2 yh
+ shiftop orr xh r5 xh lsr r4 yh
+ RETLDM "r4, r5"
+
+ @ shift result right of 21 to 31 bits, or left 11 to 1 bits after
+ @ a register switch from xh to xl.
+1: rsb r4, r4, #12
+ rsb r2, r4, #32
+ shift1 lsr, xl, xl, r2
+ shiftop orr xl xl xh lsl r4 yh
+ mov xh, r5
+ RETLDM "r4, r5"
+
+ @ Shift value right of 32 to 64 bits, or 0 to 32 bits after a switch
+ @ from xh to xl.
+2: shift1 lsr, xl, xh, r4
+ mov xh, r5
+ RETLDM "r4, r5"
+
+ @ Adjust exponents for denormalized arguments.
+ @ Note that r4 must not remain equal to 0.
+LSYM(Lad_d):
+ teq r4, #0
+ eor yh, yh, #0x00100000
+ do_it eq, te
+ eoreq xh, xh, #0x00100000
+ addeq r4, r4, #1
+ subne r5, r5, #1
+ b LSYM(Lad_x)
+
+
+LSYM(Lad_s):
+ mvns ip, r4, asr #21
+ do_it ne
+ COND(mvn,s,ne) ip, r5, asr #21
+ beq LSYM(Lad_i)
+
+ teq r4, r5
+ do_it eq
+ teqeq xl, yl
+ beq 1f
+
+ @ Result is x + 0.0 = x or 0.0 + y = y.
+ orrs ip, r4, xl
+ do_it eq, t
+ moveq xh, yh
+ moveq xl, yl
+ RETLDM "r4, r5"
+
+1: teq xh, yh
+
+ @ Result is x - x = 0.
+ do_it ne, tt
+ movne xh, #0
+ movne xl, #0
+ RETLDM "r4, r5" ne
+
+ @ Result is x + x = 2x.
+ movs ip, r4, lsr #21
+ bne 2f
+ movs xl, xl, lsl #1
+ adcs xh, xh, xh
+ do_it cs
+ orrcs xh, xh, #0x80000000
+ RETLDM "r4, r5"
+2: adds r4, r4, #(2 << 21)
+ do_it cc, t
+ addcc xh, xh, #(1 << 20)
+ RETLDM "r4, r5" cc
+ and r5, xh, #0x80000000
+
+ @ Overflow: return INF.
+LSYM(Lad_o):
+ orr xh, r5, #0x7f000000
+ orr xh, xh, #0x00f00000
+ mov xl, #0
+ RETLDM "r4, r5"
+
+ @ At least one of x or y is INF/NAN.
+ @ if xh-xl != INF/NAN: return yh-yl (which is INF/NAN)
+ @ if yh-yl != INF/NAN: return xh-xl (which is INF/NAN)
+ @ if either is NAN: return NAN
+ @ if opposite sign: return NAN
+ @ otherwise return xh-xl (which is INF or -INF)
+LSYM(Lad_i):
+ mvns ip, r4, asr #21
+ do_it ne, te
+ movne xh, yh
+ movne xl, yl
+ COND(mvn,s,eq) ip, r5, asr #21
+ do_it ne, t
+ movne yh, xh
+ movne yl, xl
+ orrs r4, xl, xh, lsl #12
+ do_it eq, te
+ COND(orr,s,eq) r5, yl, yh, lsl #12
+ teqeq xh, yh
+ orrne xh, xh, #0x00080000 @ quiet NAN
+ RETLDM "r4, r5"
+
+ FUNC_END aeabi_dsub
+ FUNC_END subdf3
+ FUNC_END aeabi_dadd
+ FUNC_END adddf3
+
+ARM_FUNC_START floatunsidf
+ARM_FUNC_ALIAS aeabi_ui2d floatunsidf
+
+ teq r0, #0
+ do_it eq, t
+ moveq r1, #0
+ RETc(eq)
+ do_push {r4, r5, lr}
+ mov r4, #0x400 @ initial exponent
+ add r4, r4, #(52-1 - 1)
+ mov r5, #0 @ sign bit is 0
+ .ifnc xl, r0
+ mov xl, r0
+ .endif
+ mov xh, #0
+ b LSYM(Lad_l)
+
+ FUNC_END aeabi_ui2d
+ FUNC_END floatunsidf
+
+ARM_FUNC_START floatsidf
+ARM_FUNC_ALIAS aeabi_i2d floatsidf
+
+ teq r0, #0
+ do_it eq, t
+ moveq r1, #0
+ RETc(eq)
+ do_push {r4, r5, lr}
+ mov r4, #0x400 @ initial exponent
+ add r4, r4, #(52-1 - 1)
+ ands r5, r0, #0x80000000 @ sign bit in r5
+ do_it mi
+ rsbmi r0, r0, #0 @ absolute value
+ .ifnc xl, r0
+ mov xl, r0
+ .endif
+ mov xh, #0
+ b LSYM(Lad_l)
+
+ FUNC_END aeabi_i2d
+ FUNC_END floatsidf
+
+ARM_FUNC_START extendsfdf2
+ARM_FUNC_ALIAS aeabi_f2d extendsfdf2
+
+ movs r2, r0, lsl #1 @ toss sign bit
+ mov xh, r2, asr #3 @ stretch exponent
+ mov xh, xh, rrx @ retrieve sign bit
+ mov xl, r2, lsl #28 @ retrieve remaining bits
+ do_it ne, ttt
+ COND(and,s,ne) r3, r2, #0xff000000 @ isolate exponent
+ teqne r3, #0xff000000 @ if not 0, check if INF or NAN
+ eorne xh, xh, #0x38000000 @ fixup exponent otherwise.
+ RETc(ne) @ and return it.
+
+ teq r2, #0 @ if actually 0
+ do_it ne, e
+ teqne r3, #0xff000000 @ or INF or NAN
+ RETc(eq) @ we are done already.
+
+ @ value was denormalized. We can normalize it now.
+ do_push {r4, r5, lr}
+ mov r4, #0x380 @ setup corresponding exponent
+ and r5, xh, #0x80000000 @ move sign bit in r5
+ bic xh, xh, #0x80000000
+ b LSYM(Lad_l)
+
+ FUNC_END aeabi_f2d
+ FUNC_END extendsfdf2
+
+ARM_FUNC_START floatundidf
+ARM_FUNC_ALIAS aeabi_ul2d floatundidf
+
+ orrs r2, r0, r1
+#if !defined (__VFP_FP__) && !defined(__SOFTFP__)
+ do_it eq, t
+ mvfeqd f0, #0.0
+#else
+ do_it eq
+#endif
+ RETc(eq)
+
+#if !defined (__VFP_FP__) && !defined(__SOFTFP__)
+ @ For hard FPA code we want to return via the tail below so that
+ @ we can return the result in f0 as well as in r0/r1 for backwards
+ @ compatibility.
+ adr ip, LSYM(f0_ret)
+ @ Push pc as well so that RETLDM works correctly.
+ do_push {r4, r5, ip, lr, pc}
+#else
+ do_push {r4, r5, lr}
+#endif
+
+ mov r5, #0
+ b 2f
+
+ARM_FUNC_START floatdidf
+ARM_FUNC_ALIAS aeabi_l2d floatdidf
+
+ orrs r2, r0, r1
+#if !defined (__VFP_FP__) && !defined(__SOFTFP__)
+ do_it eq, t
+ mvfeqd f0, #0.0
+#else
+ do_it eq
+#endif
+ RETc(eq)
+
+#if !defined (__VFP_FP__) && !defined(__SOFTFP__)
+ @ For hard FPA code we want to return via the tail below so that
+ @ we can return the result in f0 as well as in r0/r1 for backwards
+ @ compatibility.
+ adr ip, LSYM(f0_ret)
+ @ Push pc as well so that RETLDM works correctly.
+ do_push {r4, r5, ip, lr, pc}
+#else
+ do_push {r4, r5, lr}
+#endif
+
+ ands r5, ah, #0x80000000 @ sign bit in r5
+ bpl 2f
+#if defined(__thumb2__)
+ negs al, al
+ sbc ah, ah, ah, lsl #1
+#else
+ rsbs al, al, #0
+ rsc ah, ah, #0
+#endif
+2:
+ mov r4, #0x400 @ initial exponent
+ add r4, r4, #(52-1 - 1)
+
+ @ FPA little-endian: must swap the word order.
+ .ifnc xh, ah
+ mov ip, al
+ mov xh, ah
+ mov xl, ip
+ .endif
+
+ movs ip, xh, lsr #22
+ beq LSYM(Lad_p)
+
+ @ The value is too big. Scale it down a bit...
+ mov r2, #3
+ movs ip, ip, lsr #3
+ do_it ne
+ addne r2, r2, #3
+ movs ip, ip, lsr #3
+ do_it ne
+ addne r2, r2, #3
+ add r2, r2, ip, lsr #3
+
+ rsb r3, r2, #32
+ shift1 lsl, ip, xl, r3
+ shift1 lsr, xl, xl, r2
+ shiftop orr xl xl xh lsl r3 lr
+ shift1 lsr, xh, xh, r2
+ add r4, r4, r2
+ b LSYM(Lad_p)
+
+#if !defined (__VFP_FP__) && !defined(__SOFTFP__)
+
+ @ Legacy code expects the result to be returned in f0. Copy it
+ @ there as well.
+LSYM(f0_ret):
+ do_push {r0, r1}
+ ldfd f0, [sp], #8
+ RETLDM
+
+#endif
+
+ FUNC_END floatdidf
+ FUNC_END aeabi_l2d
+ FUNC_END floatundidf
+ FUNC_END aeabi_ul2d
+
+#endif /* L_addsubdf3 */
+
+#ifdef L_arm_muldivdf3
+
+ARM_FUNC_START muldf3
+ARM_FUNC_ALIAS aeabi_dmul muldf3
+ do_push {r4, r5, r6, lr}
+
+ @ Mask out exponents, trap any zero/denormal/INF/NAN.
+ mov ip, #0xff
+ orr ip, ip, #0x700
+ ands r4, ip, xh, lsr #20
+ do_it ne, tte
+ COND(and,s,ne) r5, ip, yh, lsr #20
+ teqne r4, ip
+ teqne r5, ip
+ bleq LSYM(Lml_s)
+
+ @ Add exponents together
+ add r4, r4, r5
+
+ @ Determine final sign.
+ eor r6, xh, yh
+
+ @ Convert mantissa to unsigned integer.
+ @ If power of two, branch to a separate path.
+ bic xh, xh, ip, lsl #21
+ bic yh, yh, ip, lsl #21
+ orrs r5, xl, xh, lsl #12
+ do_it ne
+ COND(orr,s,ne) r5, yl, yh, lsl #12
+ orr xh, xh, #0x00100000
+ orr yh, yh, #0x00100000
+ beq LSYM(Lml_1)
+
+#if __ARM_ARCH__ < 4
+
+ @ Put sign bit in r6, which will be restored in yl later.
+ and r6, r6, #0x80000000
+
+ @ Well, no way to make it shorter without the umull instruction.
+ stmfd sp!, {r6, r7, r8, r9, sl, fp}
+ mov r7, xl, lsr #16
+ mov r8, yl, lsr #16
+ mov r9, xh, lsr #16
+ mov sl, yh, lsr #16
+ bic xl, xl, r7, lsl #16
+ bic yl, yl, r8, lsl #16
+ bic xh, xh, r9, lsl #16
+ bic yh, yh, sl, lsl #16
+ mul ip, xl, yl
+ mul fp, xl, r8
+ mov lr, #0
+ adds ip, ip, fp, lsl #16
+ adc lr, lr, fp, lsr #16
+ mul fp, r7, yl
+ adds ip, ip, fp, lsl #16
+ adc lr, lr, fp, lsr #16
+ mul fp, xl, sl
+ mov r5, #0
+ adds lr, lr, fp, lsl #16
+ adc r5, r5, fp, lsr #16
+ mul fp, r7, yh
+ adds lr, lr, fp, lsl #16
+ adc r5, r5, fp, lsr #16
+ mul fp, xh, r8
+ adds lr, lr, fp, lsl #16
+ adc r5, r5, fp, lsr #16
+ mul fp, r9, yl
+ adds lr, lr, fp, lsl #16
+ adc r5, r5, fp, lsr #16
+ mul fp, xh, sl
+ mul r6, r9, sl
+ adds r5, r5, fp, lsl #16
+ adc r6, r6, fp, lsr #16
+ mul fp, r9, yh
+ adds r5, r5, fp, lsl #16
+ adc r6, r6, fp, lsr #16
+ mul fp, xl, yh
+ adds lr, lr, fp
+ mul fp, r7, sl
+ adcs r5, r5, fp
+ mul fp, xh, yl
+ adc r6, r6, #0
+ adds lr, lr, fp
+ mul fp, r9, r8
+ adcs r5, r5, fp
+ mul fp, r7, r8
+ adc r6, r6, #0
+ adds lr, lr, fp
+ mul fp, xh, yh
+ adcs r5, r5, fp
+ adc r6, r6, #0
+ ldmfd sp!, {yl, r7, r8, r9, sl, fp}
+
+#else
+
+ @ Here is the actual multiplication.
+ umull ip, lr, xl, yl
+ mov r5, #0
+ umlal lr, r5, xh, yl
+ and yl, r6, #0x80000000
+ umlal lr, r5, xl, yh
+ mov r6, #0
+ umlal r5, r6, xh, yh
+
+#endif
+
+ @ The LSBs in ip are only significant for the final rounding.
+ @ Fold them into lr.
+ teq ip, #0
+ do_it ne
+ orrne lr, lr, #1
+
+ @ Adjust result upon the MSB position.
+ sub r4, r4, #0xff
+ cmp r6, #(1 << (20-11))
+ sbc r4, r4, #0x300
+ bcs 1f
+ movs lr, lr, lsl #1
+ adcs r5, r5, r5
+ adc r6, r6, r6
+1:
+ @ Shift to final position, add sign to result.
+ orr xh, yl, r6, lsl #11
+ orr xh, xh, r5, lsr #21
+ mov xl, r5, lsl #11
+ orr xl, xl, lr, lsr #21
+ mov lr, lr, lsl #11
+
+ @ Check exponent range for under/overflow.
+ subs ip, r4, #(254 - 1)
+ do_it hi
+ cmphi ip, #0x700
+ bhi LSYM(Lml_u)
+
+ @ Round the result, merge final exponent.
+ cmp lr, #0x80000000
+ do_it eq
+ COND(mov,s,eq) lr, xl, lsr #1
+ adcs xl, xl, #0
+ adc xh, xh, r4, lsl #20
+ RETLDM "r4, r5, r6"
+
+ @ Multiplication by 0x1p*: let''s shortcut a lot of code.
+LSYM(Lml_1):
+ and r6, r6, #0x80000000
+ orr xh, r6, xh
+ orr xl, xl, yl
+ eor xh, xh, yh
+ subs r4, r4, ip, lsr #1
+ do_it gt, tt
+ COND(rsb,s,gt) r5, r4, ip
+ orrgt xh, xh, r4, lsl #20
+ RETLDM "r4, r5, r6" gt
+
+ @ Under/overflow: fix things up for the code below.
+ orr xh, xh, #0x00100000
+ mov lr, #0
+ subs r4, r4, #1
+
+LSYM(Lml_u):
+ @ Overflow?
+ bgt LSYM(Lml_o)
+
+ @ Check if denormalized result is possible, otherwise return signed 0.
+ cmn r4, #(53 + 1)
+ do_it le, tt
+ movle xl, #0
+ bicle xh, xh, #0x7fffffff
+ RETLDM "r4, r5, r6" le
+
+ @ Find out proper shift value.
+ rsb r4, r4, #0
+ subs r4, r4, #32
+ bge 2f
+ adds r4, r4, #12
+ bgt 1f
+
+ @ shift result right of 1 to 20 bits, preserve sign bit, round, etc.
+ add r4, r4, #20
+ rsb r5, r4, #32
+ shift1 lsl, r3, xl, r5
+ shift1 lsr, xl, xl, r4
+ shiftop orr xl xl xh lsl r5 r2
+ and r2, xh, #0x80000000
+ bic xh, xh, #0x80000000
+ adds xl, xl, r3, lsr #31
+ shiftop adc xh r2 xh lsr r4 r6
+ orrs lr, lr, r3, lsl #1
+ do_it eq
+ biceq xl, xl, r3, lsr #31
+ RETLDM "r4, r5, r6"
+
+ @ shift result right of 21 to 31 bits, or left 11 to 1 bits after
+ @ a register switch from xh to xl. Then round.
+1: rsb r4, r4, #12
+ rsb r5, r4, #32
+ shift1 lsl, r3, xl, r4
+ shift1 lsr, xl, xl, r5
+ shiftop orr xl xl xh lsl r4 r2
+ bic xh, xh, #0x7fffffff
+ adds xl, xl, r3, lsr #31
+ adc xh, xh, #0
+ orrs lr, lr, r3, lsl #1
+ do_it eq
+ biceq xl, xl, r3, lsr #31
+ RETLDM "r4, r5, r6"
+
+ @ Shift value right of 32 to 64 bits, or 0 to 32 bits after a switch
+ @ from xh to xl. Leftover bits are in r3-r6-lr for rounding.
+2: rsb r5, r4, #32
+ shiftop orr lr lr xl lsl r5 r2
+ shift1 lsr, r3, xl, r4
+ shiftop orr r3 r3 xh lsl r5 r2
+ shift1 lsr, xl, xh, r4
+ bic xh, xh, #0x7fffffff
+ shiftop bic xl xl xh lsr r4 r2
+ add xl, xl, r3, lsr #31
+ orrs lr, lr, r3, lsl #1
+ do_it eq
+ biceq xl, xl, r3, lsr #31
+ RETLDM "r4, r5, r6"
+
+ @ One or both arguments are denormalized.
+ @ Scale them leftwards and preserve sign bit.
+LSYM(Lml_d):
+ teq r4, #0
+ bne 2f
+ and r6, xh, #0x80000000
+1: movs xl, xl, lsl #1
+ adc xh, xh, xh
+ tst xh, #0x00100000
+ do_it eq
+ subeq r4, r4, #1
+ beq 1b
+ orr xh, xh, r6
+ teq r5, #0
+ do_it ne
+ RETc(ne)
+2: and r6, yh, #0x80000000
+3: movs yl, yl, lsl #1
+ adc yh, yh, yh
+ tst yh, #0x00100000
+ do_it eq
+ subeq r5, r5, #1
+ beq 3b
+ orr yh, yh, r6
+ RET
+
+LSYM(Lml_s):
+ @ Isolate the INF and NAN cases away
+ teq r4, ip
+ and r5, ip, yh, lsr #20
+ do_it ne
+ teqne r5, ip
+ beq 1f
+
+ @ Here, one or more arguments are either denormalized or zero.
+ orrs r6, xl, xh, lsl #1
+ do_it ne
+ COND(orr,s,ne) r6, yl, yh, lsl #1
+ bne LSYM(Lml_d)
+
+ @ Result is 0, but determine sign anyway.
+LSYM(Lml_z):
+ eor xh, xh, yh
+ and xh, xh, #0x80000000
+ mov xl, #0
+ RETLDM "r4, r5, r6"
+
+1: @ One or both args are INF or NAN.
+ orrs r6, xl, xh, lsl #1
+ do_it eq, te
+ moveq xl, yl
+ moveq xh, yh
+ COND(orr,s,ne) r6, yl, yh, lsl #1
+ beq LSYM(Lml_n) @ 0 * INF or INF * 0 -> NAN
+ teq r4, ip
+ bne 1f
+ orrs r6, xl, xh, lsl #12
+ bne LSYM(Lml_n) @ NAN * <anything> -> NAN
+1: teq r5, ip
+ bne LSYM(Lml_i)
+ orrs r6, yl, yh, lsl #12
+ do_it ne, t
+ movne xl, yl
+ movne xh, yh
+ bne LSYM(Lml_n) @ <anything> * NAN -> NAN
+
+ @ Result is INF, but we need to determine its sign.
+LSYM(Lml_i):
+ eor xh, xh, yh
+
+ @ Overflow: return INF (sign already in xh).
+LSYM(Lml_o):
+ and xh, xh, #0x80000000
+ orr xh, xh, #0x7f000000
+ orr xh, xh, #0x00f00000
+ mov xl, #0
+ RETLDM "r4, r5, r6"
+
+ @ Return a quiet NAN.
+LSYM(Lml_n):
+ orr xh, xh, #0x7f000000
+ orr xh, xh, #0x00f80000
+ RETLDM "r4, r5, r6"
+
+ FUNC_END aeabi_dmul
+ FUNC_END muldf3
+
+ARM_FUNC_START divdf3
+ARM_FUNC_ALIAS aeabi_ddiv divdf3
+
+ do_push {r4, r5, r6, lr}
+
+ @ Mask out exponents, trap any zero/denormal/INF/NAN.
+ mov ip, #0xff
+ orr ip, ip, #0x700
+ ands r4, ip, xh, lsr #20
+ do_it ne, tte
+ COND(and,s,ne) r5, ip, yh, lsr #20
+ teqne r4, ip
+ teqne r5, ip
+ bleq LSYM(Ldv_s)
+
+ @ Substract divisor exponent from dividend''s.
+ sub r4, r4, r5
+
+ @ Preserve final sign into lr.
+ eor lr, xh, yh
+
+ @ Convert mantissa to unsigned integer.
+ @ Dividend -> r5-r6, divisor -> yh-yl.
+ orrs r5, yl, yh, lsl #12
+ mov xh, xh, lsl #12
+ beq LSYM(Ldv_1)
+ mov yh, yh, lsl #12
+ mov r5, #0x10000000
+ orr yh, r5, yh, lsr #4
+ orr yh, yh, yl, lsr #24
+ mov yl, yl, lsl #8
+ orr r5, r5, xh, lsr #4
+ orr r5, r5, xl, lsr #24
+ mov r6, xl, lsl #8
+
+ @ Initialize xh with final sign bit.
+ and xh, lr, #0x80000000
+
+ @ Ensure result will land to known bit position.
+ @ Apply exponent bias accordingly.
+ cmp r5, yh
+ do_it eq
+ cmpeq r6, yl
+ adc r4, r4, #(255 - 2)
+ add r4, r4, #0x300
+ bcs 1f
+ movs yh, yh, lsr #1
+ mov yl, yl, rrx
+1:
+ @ Perform first substraction to align result to a nibble.
+ subs r6, r6, yl
+ sbc r5, r5, yh
+ movs yh, yh, lsr #1
+ mov yl, yl, rrx
+ mov xl, #0x00100000
+ mov ip, #0x00080000
+
+ @ The actual division loop.
+1: subs lr, r6, yl
+ sbcs lr, r5, yh
+ do_it cs, tt
+ subcs r6, r6, yl
+ movcs r5, lr
+ orrcs xl, xl, ip
+ movs yh, yh, lsr #1
+ mov yl, yl, rrx
+ subs lr, r6, yl
+ sbcs lr, r5, yh
+ do_it cs, tt
+ subcs r6, r6, yl
+ movcs r5, lr
+ orrcs xl, xl, ip, lsr #1
+ movs yh, yh, lsr #1
+ mov yl, yl, rrx
+ subs lr, r6, yl
+ sbcs lr, r5, yh
+ do_it cs, tt
+ subcs r6, r6, yl
+ movcs r5, lr
+ orrcs xl, xl, ip, lsr #2
+ movs yh, yh, lsr #1
+ mov yl, yl, rrx
+ subs lr, r6, yl
+ sbcs lr, r5, yh
+ do_it cs, tt
+ subcs r6, r6, yl
+ movcs r5, lr
+ orrcs xl, xl, ip, lsr #3
+
+ orrs lr, r5, r6
+ beq 2f
+ mov r5, r5, lsl #4
+ orr r5, r5, r6, lsr #28
+ mov r6, r6, lsl #4
+ mov yh, yh, lsl #3
+ orr yh, yh, yl, lsr #29
+ mov yl, yl, lsl #3
+ movs ip, ip, lsr #4
+ bne 1b
+
+ @ We are done with a word of the result.
+ @ Loop again for the low word if this pass was for the high word.
+ tst xh, #0x00100000
+ bne 3f
+ orr xh, xh, xl
+ mov xl, #0
+ mov ip, #0x80000000
+ b 1b
+2:
+ @ Be sure result starts in the high word.
+ tst xh, #0x00100000
+ do_it eq, t
+ orreq xh, xh, xl
+ moveq xl, #0
+3:
+ @ Check exponent range for under/overflow.
+ subs ip, r4, #(254 - 1)
+ do_it hi
+ cmphi ip, #0x700
+ bhi LSYM(Lml_u)
+
+ @ Round the result, merge final exponent.
+ subs ip, r5, yh
+ do_it eq, t
+ COND(sub,s,eq) ip, r6, yl
+ COND(mov,s,eq) ip, xl, lsr #1
+ adcs xl, xl, #0
+ adc xh, xh, r4, lsl #20
+ RETLDM "r4, r5, r6"
+
+ @ Division by 0x1p*: shortcut a lot of code.
+LSYM(Ldv_1):
+ and lr, lr, #0x80000000
+ orr xh, lr, xh, lsr #12
+ adds r4, r4, ip, lsr #1
+ do_it gt, tt
+ COND(rsb,s,gt) r5, r4, ip
+ orrgt xh, xh, r4, lsl #20
+ RETLDM "r4, r5, r6" gt
+
+ orr xh, xh, #0x00100000
+ mov lr, #0
+ subs r4, r4, #1
+ b LSYM(Lml_u)
+
+ @ Result mightt need to be denormalized: put remainder bits
+ @ in lr for rounding considerations.
+LSYM(Ldv_u):
+ orr lr, r5, r6
+ b LSYM(Lml_u)
+
+ @ One or both arguments is either INF, NAN or zero.
+LSYM(Ldv_s):
+ and r5, ip, yh, lsr #20
+ teq r4, ip
+ do_it eq
+ teqeq r5, ip
+ beq LSYM(Lml_n) @ INF/NAN / INF/NAN -> NAN
+ teq r4, ip
+ bne 1f
+ orrs r4, xl, xh, lsl #12
+ bne LSYM(Lml_n) @ NAN / <anything> -> NAN
+ teq r5, ip
+ bne LSYM(Lml_i) @ INF / <anything> -> INF
+ mov xl, yl
+ mov xh, yh
+ b LSYM(Lml_n) @ INF / (INF or NAN) -> NAN
+1: teq r5, ip
+ bne 2f
+ orrs r5, yl, yh, lsl #12
+ beq LSYM(Lml_z) @ <anything> / INF -> 0
+ mov xl, yl
+ mov xh, yh
+ b LSYM(Lml_n) @ <anything> / NAN -> NAN
+2: @ If both are nonzero, we need to normalize and resume above.
+ orrs r6, xl, xh, lsl #1
+ do_it ne
+ COND(orr,s,ne) r6, yl, yh, lsl #1
+ bne LSYM(Lml_d)
+ @ One or both arguments are 0.
+ orrs r4, xl, xh, lsl #1
+ bne LSYM(Lml_i) @ <non_zero> / 0 -> INF
+ orrs r5, yl, yh, lsl #1
+ bne LSYM(Lml_z) @ 0 / <non_zero> -> 0
+ b LSYM(Lml_n) @ 0 / 0 -> NAN
+
+ FUNC_END aeabi_ddiv
+ FUNC_END divdf3
+
+#endif /* L_muldivdf3 */
+
+#ifdef L_arm_cmpdf2
+
+@ Note: only r0 (return value) and ip are clobbered here.
+
+ARM_FUNC_START gtdf2
+ARM_FUNC_ALIAS gedf2 gtdf2
+ mov ip, #-1
+ b 1f
+
+ARM_FUNC_START ltdf2
+ARM_FUNC_ALIAS ledf2 ltdf2
+ mov ip, #1
+ b 1f
+
+ARM_FUNC_START cmpdf2
+ARM_FUNC_ALIAS nedf2 cmpdf2
+ARM_FUNC_ALIAS eqdf2 cmpdf2
+ mov ip, #1 @ how should we specify unordered here?
+
+1: str ip, [sp, #-4]!
+
+ @ Trap any INF/NAN first.
+ mov ip, xh, lsl #1
+ mvns ip, ip, asr #21
+ mov ip, yh, lsl #1
+ do_it ne
+ COND(mvn,s,ne) ip, ip, asr #21
+ beq 3f
+
+ @ Test for equality.
+ @ Note that 0.0 is equal to -0.0.
+2: add sp, sp, #4
+ orrs ip, xl, xh, lsl #1 @ if x == 0.0 or -0.0
+ do_it eq, e
+ COND(orr,s,eq) ip, yl, yh, lsl #1 @ and y == 0.0 or -0.0
+ teqne xh, yh @ or xh == yh
+ do_it eq, tt
+ teqeq xl, yl @ and xl == yl
+ moveq r0, #0 @ then equal.
+ RETc(eq)
+
+ @ Clear C flag
+ cmn r0, #0
+
+ @ Compare sign,
+ teq xh, yh
+
+ @ Compare values if same sign
+ do_it pl
+ cmppl xh, yh
+ do_it eq
+ cmpeq xl, yl
+
+ @ Result:
+ do_it cs, e
+ movcs r0, yh, asr #31
+ mvncc r0, yh, asr #31
+ orr r0, r0, #1
+ RET
+
+ @ Look for a NAN.
+3: mov ip, xh, lsl #1
+ mvns ip, ip, asr #21
+ bne 4f
+ orrs ip, xl, xh, lsl #12
+ bne 5f @ x is NAN
+4: mov ip, yh, lsl #1
+ mvns ip, ip, asr #21
+ bne 2b
+ orrs ip, yl, yh, lsl #12
+ beq 2b @ y is not NAN
+5: ldr r0, [sp], #4 @ unordered return code
+ RET
+
+ FUNC_END gedf2
+ FUNC_END gtdf2
+ FUNC_END ledf2
+ FUNC_END ltdf2
+ FUNC_END nedf2
+ FUNC_END eqdf2
+ FUNC_END cmpdf2
+
+ARM_FUNC_START aeabi_cdrcmple
+
+ mov ip, r0
+ mov r0, r2
+ mov r2, ip
+ mov ip, r1
+ mov r1, r3
+ mov r3, ip
+ b 6f
+
+ARM_FUNC_START aeabi_cdcmpeq
+ARM_FUNC_ALIAS aeabi_cdcmple aeabi_cdcmpeq
+
+ @ The status-returning routines are required to preserve all
+ @ registers except ip, lr, and cpsr.
+6: do_push {r0, lr}
+ ARM_CALL cmpdf2
+ @ Set the Z flag correctly, and the C flag unconditionally.
+ cmp r0, #0
+ @ Clear the C flag if the return value was -1, indicating
+ @ that the first operand was smaller than the second.
+ do_it mi
+ cmnmi r0, #0
+ RETLDM "r0"
+
+ FUNC_END aeabi_cdcmple
+ FUNC_END aeabi_cdcmpeq
+ FUNC_END aeabi_cdrcmple
+
+ARM_FUNC_START aeabi_dcmpeq
+
+ str lr, [sp, #-8]!
+ ARM_CALL aeabi_cdcmple
+ do_it eq, e
+ moveq r0, #1 @ Equal to.
+ movne r0, #0 @ Less than, greater than, or unordered.
+ RETLDM
+
+ FUNC_END aeabi_dcmpeq
+
+ARM_FUNC_START aeabi_dcmplt
+
+ str lr, [sp, #-8]!
+ ARM_CALL aeabi_cdcmple
+ do_it cc, e
+ movcc r0, #1 @ Less than.
+ movcs r0, #0 @ Equal to, greater than, or unordered.
+ RETLDM
+
+ FUNC_END aeabi_dcmplt
+
+ARM_FUNC_START aeabi_dcmple
+
+ str lr, [sp, #-8]!
+ ARM_CALL aeabi_cdcmple
+ do_it ls, e
+ movls r0, #1 @ Less than or equal to.
+ movhi r0, #0 @ Greater than or unordered.
+ RETLDM
+
+ FUNC_END aeabi_dcmple
+
+ARM_FUNC_START aeabi_dcmpge
+
+ str lr, [sp, #-8]!
+ ARM_CALL aeabi_cdrcmple
+ do_it ls, e
+ movls r0, #1 @ Operand 2 is less than or equal to operand 1.
+ movhi r0, #0 @ Operand 2 greater than operand 1, or unordered.
+ RETLDM
+
+ FUNC_END aeabi_dcmpge
+
+ARM_FUNC_START aeabi_dcmpgt
+
+ str lr, [sp, #-8]!
+ ARM_CALL aeabi_cdrcmple
+ do_it cc, e
+ movcc r0, #1 @ Operand 2 is less than operand 1.
+ movcs r0, #0 @ Operand 2 is greater than or equal to operand 1,
+ @ or they are unordered.
+ RETLDM
+
+ FUNC_END aeabi_dcmpgt
+
+#endif /* L_cmpdf2 */
+
+#ifdef L_arm_unorddf2
+
+ARM_FUNC_START unorddf2
+ARM_FUNC_ALIAS aeabi_dcmpun unorddf2
+
+ mov ip, xh, lsl #1
+ mvns ip, ip, asr #21
+ bne 1f
+ orrs ip, xl, xh, lsl #12
+ bne 3f @ x is NAN
+1: mov ip, yh, lsl #1
+ mvns ip, ip, asr #21
+ bne 2f
+ orrs ip, yl, yh, lsl #12
+ bne 3f @ y is NAN
+2: mov r0, #0 @ arguments are ordered.
+ RET
+
+3: mov r0, #1 @ arguments are unordered.
+ RET
+
+ FUNC_END aeabi_dcmpun
+ FUNC_END unorddf2
+
+#endif /* L_unorddf2 */
+
+#ifdef L_arm_fixdfsi
+
+ARM_FUNC_START fixdfsi
+ARM_FUNC_ALIAS aeabi_d2iz fixdfsi
+
+ @ check exponent range.
+ mov r2, xh, lsl #1
+ adds r2, r2, #(1 << 21)
+ bcs 2f @ value is INF or NAN
+ bpl 1f @ value is too small
+ mov r3, #(0xfffffc00 + 31)
+ subs r2, r3, r2, asr #21
+ bls 3f @ value is too large
+
+ @ scale value
+ mov r3, xh, lsl #11
+ orr r3, r3, #0x80000000
+ orr r3, r3, xl, lsr #21
+ tst xh, #0x80000000 @ the sign bit
+ shift1 lsr, r0, r3, r2
+ do_it ne
+ rsbne r0, r0, #0
+ RET
+
+1: mov r0, #0
+ RET
+
+2: orrs xl, xl, xh, lsl #12
+ bne 4f @ x is NAN.
+3: ands r0, xh, #0x80000000 @ the sign bit
+ do_it eq
+ moveq r0, #0x7fffffff @ maximum signed positive si
+ RET
+
+4: mov r0, #0 @ How should we convert NAN?
+ RET
+
+ FUNC_END aeabi_d2iz
+ FUNC_END fixdfsi
+
+#endif /* L_fixdfsi */
+
+#ifdef L_arm_fixunsdfsi
+
+ARM_FUNC_START fixunsdfsi
+ARM_FUNC_ALIAS aeabi_d2uiz fixunsdfsi
+
+ @ check exponent range.
+ movs r2, xh, lsl #1
+ bcs 1f @ value is negative
+ adds r2, r2, #(1 << 21)
+ bcs 2f @ value is INF or NAN
+ bpl 1f @ value is too small
+ mov r3, #(0xfffffc00 + 31)
+ subs r2, r3, r2, asr #21
+ bmi 3f @ value is too large
+
+ @ scale value
+ mov r3, xh, lsl #11
+ orr r3, r3, #0x80000000
+ orr r3, r3, xl, lsr #21
+ shift1 lsr, r0, r3, r2
+ RET
+
+1: mov r0, #0
+ RET
+
+2: orrs xl, xl, xh, lsl #12
+ bne 4f @ value is NAN.
+3: mov r0, #0xffffffff @ maximum unsigned si
+ RET
+
+4: mov r0, #0 @ How should we convert NAN?
+ RET
+
+ FUNC_END aeabi_d2uiz
+ FUNC_END fixunsdfsi
+
+#endif /* L_fixunsdfsi */
+
+#ifdef L_arm_truncdfsf2
+
+ARM_FUNC_START truncdfsf2
+ARM_FUNC_ALIAS aeabi_d2f truncdfsf2
+
+ @ check exponent range.
+ mov r2, xh, lsl #1
+ subs r3, r2, #((1023 - 127) << 21)
+ do_it cs, t
+ COND(sub,s,cs) ip, r3, #(1 << 21)
+ COND(rsb,s,cs) ip, ip, #(254 << 21)
+ bls 2f @ value is out of range
+
+1: @ shift and round mantissa
+ and ip, xh, #0x80000000
+ mov r2, xl, lsl #3
+ orr xl, ip, xl, lsr #29
+ cmp r2, #0x80000000
+ adc r0, xl, r3, lsl #2
+ do_it eq
+ biceq r0, r0, #1
+ RET
+
+2: @ either overflow or underflow
+ tst xh, #0x40000000
+ bne 3f @ overflow
+
+ @ check if denormalized value is possible
+ adds r2, r3, #(23 << 21)
+ do_it lt, t
+ andlt r0, xh, #0x80000000 @ too small, return signed 0.
+ RETc(lt)
+
+ @ denormalize value so we can resume with the code above afterwards.
+ orr xh, xh, #0x00100000
+ mov r2, r2, lsr #21
+ rsb r2, r2, #24
+ rsb ip, r2, #32
+#if defined(__thumb2__)
+ lsls r3, xl, ip
+#else
+ movs r3, xl, lsl ip
+#endif
+ shift1 lsr, xl, xl, r2
+ do_it ne
+ orrne xl, xl, #1 @ fold r3 for rounding considerations.
+ mov r3, xh, lsl #11
+ mov r3, r3, lsr #11
+ shiftop orr xl xl r3 lsl ip ip
+ shift1 lsr, r3, r3, r2
+ mov r3, r3, lsl #1
+ b 1b
+
+3: @ chech for NAN
+ mvns r3, r2, asr #21
+ bne 5f @ simple overflow
+ orrs r3, xl, xh, lsl #12
+ do_it ne, tt
+ movne r0, #0x7f000000
+ orrne r0, r0, #0x00c00000
+ RETc(ne) @ return NAN
+
+5: @ return INF with sign
+ and r0, xh, #0x80000000
+ orr r0, r0, #0x7f000000
+ orr r0, r0, #0x00800000
+ RET
+
+ FUNC_END aeabi_d2f
+ FUNC_END truncdfsf2
+
+#endif /* L_truncdfsf2 */
diff --git a/libgcc/config/arm/ieee754-sf.S b/libgcc/config/arm/ieee754-sf.S
new file mode 100644
index 00000000000..c93f66d8ff8
--- /dev/null
+++ b/libgcc/config/arm/ieee754-sf.S
@@ -0,0 +1,1060 @@
+/* ieee754-sf.S single-precision floating point support for ARM
+
+ Copyright (C) 2003, 2004, 2005, 2007, 2008, 2009 Free Software Foundation, Inc.
+ Contributed by Nicolas Pitre (nico@cam.org)
+
+ This file is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by the
+ Free Software Foundation; either version 3, or (at your option) any
+ later version.
+
+ This file is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+/*
+ * Notes:
+ *
+ * The goal of this code is to be as fast as possible. This is
+ * not meant to be easy to understand for the casual reader.
+ *
+ * Only the default rounding mode is intended for best performances.
+ * Exceptions aren't supported yet, but that can be added quite easily
+ * if necessary without impacting performances.
+ */
+
+#ifdef L_arm_negsf2
+
+ARM_FUNC_START negsf2
+ARM_FUNC_ALIAS aeabi_fneg negsf2
+
+ eor r0, r0, #0x80000000 @ flip sign bit
+ RET
+
+ FUNC_END aeabi_fneg
+ FUNC_END negsf2
+
+#endif
+
+#ifdef L_arm_addsubsf3
+
+ARM_FUNC_START aeabi_frsub
+
+ eor r0, r0, #0x80000000 @ flip sign bit of first arg
+ b 1f
+
+ARM_FUNC_START subsf3
+ARM_FUNC_ALIAS aeabi_fsub subsf3
+
+ eor r1, r1, #0x80000000 @ flip sign bit of second arg
+#if defined(__INTERWORKING_STUBS__)
+ b 1f @ Skip Thumb-code prologue
+#endif
+
+ARM_FUNC_START addsf3
+ARM_FUNC_ALIAS aeabi_fadd addsf3
+
+1: @ Look for zeroes, equal values, INF, or NAN.
+ movs r2, r0, lsl #1
+ do_it ne, ttt
+ COND(mov,s,ne) r3, r1, lsl #1
+ teqne r2, r3
+ COND(mvn,s,ne) ip, r2, asr #24
+ COND(mvn,s,ne) ip, r3, asr #24
+ beq LSYM(Lad_s)
+
+ @ Compute exponent difference. Make largest exponent in r2,
+ @ corresponding arg in r0, and positive exponent difference in r3.
+ mov r2, r2, lsr #24
+ rsbs r3, r2, r3, lsr #24
+ do_it gt, ttt
+ addgt r2, r2, r3
+ eorgt r1, r0, r1
+ eorgt r0, r1, r0
+ eorgt r1, r0, r1
+ do_it lt
+ rsblt r3, r3, #0
+
+ @ If exponent difference is too large, return largest argument
+ @ already in r0. We need up to 25 bit to handle proper rounding
+ @ of 0x1p25 - 1.1.
+ cmp r3, #25
+ do_it hi
+ RETc(hi)
+
+ @ Convert mantissa to signed integer.
+ tst r0, #0x80000000
+ orr r0, r0, #0x00800000
+ bic r0, r0, #0xff000000
+ do_it ne
+ rsbne r0, r0, #0
+ tst r1, #0x80000000
+ orr r1, r1, #0x00800000
+ bic r1, r1, #0xff000000
+ do_it ne
+ rsbne r1, r1, #0
+
+ @ If exponent == difference, one or both args were denormalized.
+ @ Since this is not common case, rescale them off line.
+ teq r2, r3
+ beq LSYM(Lad_d)
+LSYM(Lad_x):
+
+ @ Compensate for the exponent overlapping the mantissa MSB added later
+ sub r2, r2, #1
+
+ @ Shift and add second arg to first arg in r0.
+ @ Keep leftover bits into r1.
+ shiftop adds r0 r0 r1 asr r3 ip
+ rsb r3, r3, #32
+ shift1 lsl, r1, r1, r3
+
+ @ Keep absolute value in r0-r1, sign in r3 (the n bit was set above)
+ and r3, r0, #0x80000000
+ bpl LSYM(Lad_p)
+#if defined(__thumb2__)
+ negs r1, r1
+ sbc r0, r0, r0, lsl #1
+#else
+ rsbs r1, r1, #0
+ rsc r0, r0, #0
+#endif
+
+ @ Determine how to normalize the result.
+LSYM(Lad_p):
+ cmp r0, #0x00800000
+ bcc LSYM(Lad_a)
+ cmp r0, #0x01000000
+ bcc LSYM(Lad_e)
+
+ @ Result needs to be shifted right.
+ movs r0, r0, lsr #1
+ mov r1, r1, rrx
+ add r2, r2, #1
+
+ @ Make sure we did not bust our exponent.
+ cmp r2, #254
+ bhs LSYM(Lad_o)
+
+ @ Our result is now properly aligned into r0, remaining bits in r1.
+ @ Pack final result together.
+ @ Round with MSB of r1. If halfway between two numbers, round towards
+ @ LSB of r0 = 0.
+LSYM(Lad_e):
+ cmp r1, #0x80000000
+ adc r0, r0, r2, lsl #23
+ do_it eq
+ biceq r0, r0, #1
+ orr r0, r0, r3
+ RET
+
+ @ Result must be shifted left and exponent adjusted.
+LSYM(Lad_a):
+ movs r1, r1, lsl #1
+ adc r0, r0, r0
+ tst r0, #0x00800000
+ sub r2, r2, #1
+ bne LSYM(Lad_e)
+
+ @ No rounding necessary since r1 will always be 0 at this point.
+LSYM(Lad_l):
+
+#if __ARM_ARCH__ < 5
+
+ movs ip, r0, lsr #12
+ moveq r0, r0, lsl #12
+ subeq r2, r2, #12
+ tst r0, #0x00ff0000
+ moveq r0, r0, lsl #8
+ subeq r2, r2, #8
+ tst r0, #0x00f00000
+ moveq r0, r0, lsl #4
+ subeq r2, r2, #4
+ tst r0, #0x00c00000
+ moveq r0, r0, lsl #2
+ subeq r2, r2, #2
+ cmp r0, #0x00800000
+ movcc r0, r0, lsl #1
+ sbcs r2, r2, #0
+
+#else
+
+ clz ip, r0
+ sub ip, ip, #8
+ subs r2, r2, ip
+ shift1 lsl, r0, r0, ip
+
+#endif
+
+ @ Final result with sign
+ @ If exponent negative, denormalize result.
+ do_it ge, et
+ addge r0, r0, r2, lsl #23
+ rsblt r2, r2, #0
+ orrge r0, r0, r3
+#if defined(__thumb2__)
+ do_it lt, t
+ lsrlt r0, r0, r2
+ orrlt r0, r3, r0
+#else
+ orrlt r0, r3, r0, lsr r2
+#endif
+ RET
+
+ @ Fixup and adjust bit position for denormalized arguments.
+ @ Note that r2 must not remain equal to 0.
+LSYM(Lad_d):
+ teq r2, #0
+ eor r1, r1, #0x00800000
+ do_it eq, te
+ eoreq r0, r0, #0x00800000
+ addeq r2, r2, #1
+ subne r3, r3, #1
+ b LSYM(Lad_x)
+
+LSYM(Lad_s):
+ mov r3, r1, lsl #1
+
+ mvns ip, r2, asr #24
+ do_it ne
+ COND(mvn,s,ne) ip, r3, asr #24
+ beq LSYM(Lad_i)
+
+ teq r2, r3
+ beq 1f
+
+ @ Result is x + 0.0 = x or 0.0 + y = y.
+ teq r2, #0
+ do_it eq
+ moveq r0, r1
+ RET
+
+1: teq r0, r1
+
+ @ Result is x - x = 0.
+ do_it ne, t
+ movne r0, #0
+ RETc(ne)
+
+ @ Result is x + x = 2x.
+ tst r2, #0xff000000
+ bne 2f
+ movs r0, r0, lsl #1
+ do_it cs
+ orrcs r0, r0, #0x80000000
+ RET
+2: adds r2, r2, #(2 << 24)
+ do_it cc, t
+ addcc r0, r0, #(1 << 23)
+ RETc(cc)
+ and r3, r0, #0x80000000
+
+ @ Overflow: return INF.
+LSYM(Lad_o):
+ orr r0, r3, #0x7f000000
+ orr r0, r0, #0x00800000
+ RET
+
+ @ At least one of r0/r1 is INF/NAN.
+ @ if r0 != INF/NAN: return r1 (which is INF/NAN)
+ @ if r1 != INF/NAN: return r0 (which is INF/NAN)
+ @ if r0 or r1 is NAN: return NAN
+ @ if opposite sign: return NAN
+ @ otherwise return r0 (which is INF or -INF)
+LSYM(Lad_i):
+ mvns r2, r2, asr #24
+ do_it ne, et
+ movne r0, r1
+ COND(mvn,s,eq) r3, r3, asr #24
+ movne r1, r0
+ movs r2, r0, lsl #9
+ do_it eq, te
+ COND(mov,s,eq) r3, r1, lsl #9
+ teqeq r0, r1
+ orrne r0, r0, #0x00400000 @ quiet NAN
+ RET
+
+ FUNC_END aeabi_frsub
+ FUNC_END aeabi_fadd
+ FUNC_END addsf3
+ FUNC_END aeabi_fsub
+ FUNC_END subsf3
+
+ARM_FUNC_START floatunsisf
+ARM_FUNC_ALIAS aeabi_ui2f floatunsisf
+
+ mov r3, #0
+ b 1f
+
+ARM_FUNC_START floatsisf
+ARM_FUNC_ALIAS aeabi_i2f floatsisf
+
+ ands r3, r0, #0x80000000
+ do_it mi
+ rsbmi r0, r0, #0
+
+1: movs ip, r0
+ do_it eq
+ RETc(eq)
+
+ @ Add initial exponent to sign
+ orr r3, r3, #((127 + 23) << 23)
+
+ .ifnc ah, r0
+ mov ah, r0
+ .endif
+ mov al, #0
+ b 2f
+
+ FUNC_END aeabi_i2f
+ FUNC_END floatsisf
+ FUNC_END aeabi_ui2f
+ FUNC_END floatunsisf
+
+ARM_FUNC_START floatundisf
+ARM_FUNC_ALIAS aeabi_ul2f floatundisf
+
+ orrs r2, r0, r1
+#if !defined (__VFP_FP__) && !defined(__SOFTFP__)
+ do_it eq, t
+ mvfeqs f0, #0.0
+#else
+ do_it eq
+#endif
+ RETc(eq)
+
+ mov r3, #0
+ b 1f
+
+ARM_FUNC_START floatdisf
+ARM_FUNC_ALIAS aeabi_l2f floatdisf
+
+ orrs r2, r0, r1
+#if !defined (__VFP_FP__) && !defined(__SOFTFP__)
+ do_it eq, t
+ mvfeqs f0, #0.0
+#else
+ do_it eq
+#endif
+ RETc(eq)
+
+ ands r3, ah, #0x80000000 @ sign bit in r3
+ bpl 1f
+#if defined(__thumb2__)
+ negs al, al
+ sbc ah, ah, ah, lsl #1
+#else
+ rsbs al, al, #0
+ rsc ah, ah, #0
+#endif
+1:
+#if !defined (__VFP_FP__) && !defined(__SOFTFP__)
+ @ For hard FPA code we want to return via the tail below so that
+ @ we can return the result in f0 as well as in r0 for backwards
+ @ compatibility.
+ str lr, [sp, #-8]!
+ adr lr, LSYM(f0_ret)
+#endif
+
+ movs ip, ah
+ do_it eq, tt
+ moveq ip, al
+ moveq ah, al
+ moveq al, #0
+
+ @ Add initial exponent to sign
+ orr r3, r3, #((127 + 23 + 32) << 23)
+ do_it eq
+ subeq r3, r3, #(32 << 23)
+2: sub r3, r3, #(1 << 23)
+
+#if __ARM_ARCH__ < 5
+
+ mov r2, #23
+ cmp ip, #(1 << 16)
+ do_it hs, t
+ movhs ip, ip, lsr #16
+ subhs r2, r2, #16
+ cmp ip, #(1 << 8)
+ do_it hs, t
+ movhs ip, ip, lsr #8
+ subhs r2, r2, #8
+ cmp ip, #(1 << 4)
+ do_it hs, t
+ movhs ip, ip, lsr #4
+ subhs r2, r2, #4
+ cmp ip, #(1 << 2)
+ do_it hs, e
+ subhs r2, r2, #2
+ sublo r2, r2, ip, lsr #1
+ subs r2, r2, ip, lsr #3
+
+#else
+
+ clz r2, ip
+ subs r2, r2, #8
+
+#endif
+
+ sub r3, r3, r2, lsl #23
+ blt 3f
+
+ shiftop add r3 r3 ah lsl r2 ip
+ shift1 lsl, ip, al, r2
+ rsb r2, r2, #32
+ cmp ip, #0x80000000
+ shiftop adc r0 r3 al lsr r2 r2
+ do_it eq
+ biceq r0, r0, #1
+ RET
+
+3: add r2, r2, #32
+ shift1 lsl, ip, ah, r2
+ rsb r2, r2, #32
+ orrs al, al, ip, lsl #1
+ shiftop adc r0 r3 ah lsr r2 r2
+ do_it eq
+ biceq r0, r0, ip, lsr #31
+ RET
+
+#if !defined (__VFP_FP__) && !defined(__SOFTFP__)
+
+LSYM(f0_ret):
+ str r0, [sp, #-4]!
+ ldfs f0, [sp], #4
+ RETLDM
+
+#endif
+
+ FUNC_END floatdisf
+ FUNC_END aeabi_l2f
+ FUNC_END floatundisf
+ FUNC_END aeabi_ul2f
+
+#endif /* L_addsubsf3 */
+
+#ifdef L_arm_muldivsf3
+
+ARM_FUNC_START mulsf3
+ARM_FUNC_ALIAS aeabi_fmul mulsf3
+
+ @ Mask out exponents, trap any zero/denormal/INF/NAN.
+ mov ip, #0xff
+ ands r2, ip, r0, lsr #23
+ do_it ne, tt
+ COND(and,s,ne) r3, ip, r1, lsr #23
+ teqne r2, ip
+ teqne r3, ip
+ beq LSYM(Lml_s)
+LSYM(Lml_x):
+
+ @ Add exponents together
+ add r2, r2, r3
+
+ @ Determine final sign.
+ eor ip, r0, r1
+
+ @ Convert mantissa to unsigned integer.
+ @ If power of two, branch to a separate path.
+ @ Make up for final alignment.
+ movs r0, r0, lsl #9
+ do_it ne
+ COND(mov,s,ne) r1, r1, lsl #9
+ beq LSYM(Lml_1)
+ mov r3, #0x08000000
+ orr r0, r3, r0, lsr #5
+ orr r1, r3, r1, lsr #5
+
+#if __ARM_ARCH__ < 4
+
+ @ Put sign bit in r3, which will be restored into r0 later.
+ and r3, ip, #0x80000000
+
+ @ Well, no way to make it shorter without the umull instruction.
+ do_push {r3, r4, r5}
+ mov r4, r0, lsr #16
+ mov r5, r1, lsr #16
+ bic r0, r0, r4, lsl #16
+ bic r1, r1, r5, lsl #16
+ mul ip, r4, r5
+ mul r3, r0, r1
+ mul r0, r5, r0
+ mla r0, r4, r1, r0
+ adds r3, r3, r0, lsl #16
+ adc r1, ip, r0, lsr #16
+ do_pop {r0, r4, r5}
+
+#else
+
+ @ The actual multiplication.
+ umull r3, r1, r0, r1
+
+ @ Put final sign in r0.
+ and r0, ip, #0x80000000
+
+#endif
+
+ @ Adjust result upon the MSB position.
+ cmp r1, #(1 << 23)
+ do_it cc, tt
+ movcc r1, r1, lsl #1
+ orrcc r1, r1, r3, lsr #31
+ movcc r3, r3, lsl #1
+
+ @ Add sign to result.
+ orr r0, r0, r1
+
+ @ Apply exponent bias, check for under/overflow.
+ sbc r2, r2, #127
+ cmp r2, #(254 - 1)
+ bhi LSYM(Lml_u)
+
+ @ Round the result, merge final exponent.
+ cmp r3, #0x80000000
+ adc r0, r0, r2, lsl #23
+ do_it eq
+ biceq r0, r0, #1
+ RET
+
+ @ Multiplication by 0x1p*: let''s shortcut a lot of code.
+LSYM(Lml_1):
+ teq r0, #0
+ and ip, ip, #0x80000000
+ do_it eq
+ moveq r1, r1, lsl #9
+ orr r0, ip, r0, lsr #9
+ orr r0, r0, r1, lsr #9
+ subs r2, r2, #127
+ do_it gt, tt
+ COND(rsb,s,gt) r3, r2, #255
+ orrgt r0, r0, r2, lsl #23
+ RETc(gt)
+
+ @ Under/overflow: fix things up for the code below.
+ orr r0, r0, #0x00800000
+ mov r3, #0
+ subs r2, r2, #1
+
+LSYM(Lml_u):
+ @ Overflow?
+ bgt LSYM(Lml_o)
+
+ @ Check if denormalized result is possible, otherwise return signed 0.
+ cmn r2, #(24 + 1)
+ do_it le, t
+ bicle r0, r0, #0x7fffffff
+ RETc(le)
+
+ @ Shift value right, round, etc.
+ rsb r2, r2, #0
+ movs r1, r0, lsl #1
+ shift1 lsr, r1, r1, r2
+ rsb r2, r2, #32
+ shift1 lsl, ip, r0, r2
+ movs r0, r1, rrx
+ adc r0, r0, #0
+ orrs r3, r3, ip, lsl #1
+ do_it eq
+ biceq r0, r0, ip, lsr #31
+ RET
+
+ @ One or both arguments are denormalized.
+ @ Scale them leftwards and preserve sign bit.
+LSYM(Lml_d):
+ teq r2, #0
+ and ip, r0, #0x80000000
+1: do_it eq, tt
+ moveq r0, r0, lsl #1
+ tsteq r0, #0x00800000
+ subeq r2, r2, #1
+ beq 1b
+ orr r0, r0, ip
+ teq r3, #0
+ and ip, r1, #0x80000000
+2: do_it eq, tt
+ moveq r1, r1, lsl #1
+ tsteq r1, #0x00800000
+ subeq r3, r3, #1
+ beq 2b
+ orr r1, r1, ip
+ b LSYM(Lml_x)
+
+LSYM(Lml_s):
+ @ Isolate the INF and NAN cases away
+ and r3, ip, r1, lsr #23
+ teq r2, ip
+ do_it ne
+ teqne r3, ip
+ beq 1f
+
+ @ Here, one or more arguments are either denormalized or zero.
+ bics ip, r0, #0x80000000
+ do_it ne
+ COND(bic,s,ne) ip, r1, #0x80000000
+ bne LSYM(Lml_d)
+
+ @ Result is 0, but determine sign anyway.
+LSYM(Lml_z):
+ eor r0, r0, r1
+ bic r0, r0, #0x7fffffff
+ RET
+
+1: @ One or both args are INF or NAN.
+ teq r0, #0x0
+ do_it ne, ett
+ teqne r0, #0x80000000
+ moveq r0, r1
+ teqne r1, #0x0
+ teqne r1, #0x80000000
+ beq LSYM(Lml_n) @ 0 * INF or INF * 0 -> NAN
+ teq r2, ip
+ bne 1f
+ movs r2, r0, lsl #9
+ bne LSYM(Lml_n) @ NAN * <anything> -> NAN
+1: teq r3, ip
+ bne LSYM(Lml_i)
+ movs r3, r1, lsl #9
+ do_it ne
+ movne r0, r1
+ bne LSYM(Lml_n) @ <anything> * NAN -> NAN
+
+ @ Result is INF, but we need to determine its sign.
+LSYM(Lml_i):
+ eor r0, r0, r1
+
+ @ Overflow: return INF (sign already in r0).
+LSYM(Lml_o):
+ and r0, r0, #0x80000000
+ orr r0, r0, #0x7f000000
+ orr r0, r0, #0x00800000
+ RET
+
+ @ Return a quiet NAN.
+LSYM(Lml_n):
+ orr r0, r0, #0x7f000000
+ orr r0, r0, #0x00c00000
+ RET
+
+ FUNC_END aeabi_fmul
+ FUNC_END mulsf3
+
+ARM_FUNC_START divsf3
+ARM_FUNC_ALIAS aeabi_fdiv divsf3
+
+ @ Mask out exponents, trap any zero/denormal/INF/NAN.
+ mov ip, #0xff
+ ands r2, ip, r0, lsr #23
+ do_it ne, tt
+ COND(and,s,ne) r3, ip, r1, lsr #23
+ teqne r2, ip
+ teqne r3, ip
+ beq LSYM(Ldv_s)
+LSYM(Ldv_x):
+
+ @ Substract divisor exponent from dividend''s
+ sub r2, r2, r3
+
+ @ Preserve final sign into ip.
+ eor ip, r0, r1
+
+ @ Convert mantissa to unsigned integer.
+ @ Dividend -> r3, divisor -> r1.
+ movs r1, r1, lsl #9
+ mov r0, r0, lsl #9
+ beq LSYM(Ldv_1)
+ mov r3, #0x10000000
+ orr r1, r3, r1, lsr #4
+ orr r3, r3, r0, lsr #4
+
+ @ Initialize r0 (result) with final sign bit.
+ and r0, ip, #0x80000000
+
+ @ Ensure result will land to known bit position.
+ @ Apply exponent bias accordingly.
+ cmp r3, r1
+ do_it cc
+ movcc r3, r3, lsl #1
+ adc r2, r2, #(127 - 2)
+
+ @ The actual division loop.
+ mov ip, #0x00800000
+1: cmp r3, r1
+ do_it cs, t
+ subcs r3, r3, r1
+ orrcs r0, r0, ip
+ cmp r3, r1, lsr #1
+ do_it cs, t
+ subcs r3, r3, r1, lsr #1
+ orrcs r0, r0, ip, lsr #1
+ cmp r3, r1, lsr #2
+ do_it cs, t
+ subcs r3, r3, r1, lsr #2
+ orrcs r0, r0, ip, lsr #2
+ cmp r3, r1, lsr #3
+ do_it cs, t
+ subcs r3, r3, r1, lsr #3
+ orrcs r0, r0, ip, lsr #3
+ movs r3, r3, lsl #4
+ do_it ne
+ COND(mov,s,ne) ip, ip, lsr #4
+ bne 1b
+
+ @ Check exponent for under/overflow.
+ cmp r2, #(254 - 1)
+ bhi LSYM(Lml_u)
+
+ @ Round the result, merge final exponent.
+ cmp r3, r1
+ adc r0, r0, r2, lsl #23
+ do_it eq
+ biceq r0, r0, #1
+ RET
+
+ @ Division by 0x1p*: let''s shortcut a lot of code.
+LSYM(Ldv_1):
+ and ip, ip, #0x80000000
+ orr r0, ip, r0, lsr #9
+ adds r2, r2, #127
+ do_it gt, tt
+ COND(rsb,s,gt) r3, r2, #255
+ orrgt r0, r0, r2, lsl #23
+ RETc(gt)
+
+ orr r0, r0, #0x00800000
+ mov r3, #0
+ subs r2, r2, #1
+ b LSYM(Lml_u)
+
+ @ One or both arguments are denormalized.
+ @ Scale them leftwards and preserve sign bit.
+LSYM(Ldv_d):
+ teq r2, #0
+ and ip, r0, #0x80000000
+1: do_it eq, tt
+ moveq r0, r0, lsl #1
+ tsteq r0, #0x00800000
+ subeq r2, r2, #1
+ beq 1b
+ orr r0, r0, ip
+ teq r3, #0
+ and ip, r1, #0x80000000
+2: do_it eq, tt
+ moveq r1, r1, lsl #1
+ tsteq r1, #0x00800000
+ subeq r3, r3, #1
+ beq 2b
+ orr r1, r1, ip
+ b LSYM(Ldv_x)
+
+ @ One or both arguments are either INF, NAN, zero or denormalized.
+LSYM(Ldv_s):
+ and r3, ip, r1, lsr #23
+ teq r2, ip
+ bne 1f
+ movs r2, r0, lsl #9
+ bne LSYM(Lml_n) @ NAN / <anything> -> NAN
+ teq r3, ip
+ bne LSYM(Lml_i) @ INF / <anything> -> INF
+ mov r0, r1
+ b LSYM(Lml_n) @ INF / (INF or NAN) -> NAN
+1: teq r3, ip
+ bne 2f
+ movs r3, r1, lsl #9
+ beq LSYM(Lml_z) @ <anything> / INF -> 0
+ mov r0, r1
+ b LSYM(Lml_n) @ <anything> / NAN -> NAN
+2: @ If both are nonzero, we need to normalize and resume above.
+ bics ip, r0, #0x80000000
+ do_it ne
+ COND(bic,s,ne) ip, r1, #0x80000000
+ bne LSYM(Ldv_d)
+ @ One or both arguments are zero.
+ bics r2, r0, #0x80000000
+ bne LSYM(Lml_i) @ <non_zero> / 0 -> INF
+ bics r3, r1, #0x80000000
+ bne LSYM(Lml_z) @ 0 / <non_zero> -> 0
+ b LSYM(Lml_n) @ 0 / 0 -> NAN
+
+ FUNC_END aeabi_fdiv
+ FUNC_END divsf3
+
+#endif /* L_muldivsf3 */
+
+#ifdef L_arm_cmpsf2
+
+ @ The return value in r0 is
+ @
+ @ 0 if the operands are equal
+ @ 1 if the first operand is greater than the second, or
+ @ the operands are unordered and the operation is
+ @ CMP, LT, LE, NE, or EQ.
+ @ -1 if the first operand is less than the second, or
+ @ the operands are unordered and the operation is GT
+ @ or GE.
+ @
+ @ The Z flag will be set iff the operands are equal.
+ @
+ @ The following registers are clobbered by this function:
+ @ ip, r0, r1, r2, r3
+
+ARM_FUNC_START gtsf2
+ARM_FUNC_ALIAS gesf2 gtsf2
+ mov ip, #-1
+ b 1f
+
+ARM_FUNC_START ltsf2
+ARM_FUNC_ALIAS lesf2 ltsf2
+ mov ip, #1
+ b 1f
+
+ARM_FUNC_START cmpsf2
+ARM_FUNC_ALIAS nesf2 cmpsf2
+ARM_FUNC_ALIAS eqsf2 cmpsf2
+ mov ip, #1 @ how should we specify unordered here?
+
+1: str ip, [sp, #-4]!
+
+ @ Trap any INF/NAN first.
+ mov r2, r0, lsl #1
+ mov r3, r1, lsl #1
+ mvns ip, r2, asr #24
+ do_it ne
+ COND(mvn,s,ne) ip, r3, asr #24
+ beq 3f
+
+ @ Compare values.
+ @ Note that 0.0 is equal to -0.0.
+2: add sp, sp, #4
+ orrs ip, r2, r3, lsr #1 @ test if both are 0, clear C flag
+ do_it ne
+ teqne r0, r1 @ if not 0 compare sign
+ do_it pl
+ COND(sub,s,pl) r0, r2, r3 @ if same sign compare values, set r0
+
+ @ Result:
+ do_it hi
+ movhi r0, r1, asr #31
+ do_it lo
+ mvnlo r0, r1, asr #31
+ do_it ne
+ orrne r0, r0, #1
+ RET
+
+ @ Look for a NAN.
+3: mvns ip, r2, asr #24
+ bne 4f
+ movs ip, r0, lsl #9
+ bne 5f @ r0 is NAN
+4: mvns ip, r3, asr #24
+ bne 2b
+ movs ip, r1, lsl #9
+ beq 2b @ r1 is not NAN
+5: ldr r0, [sp], #4 @ return unordered code.
+ RET
+
+ FUNC_END gesf2
+ FUNC_END gtsf2
+ FUNC_END lesf2
+ FUNC_END ltsf2
+ FUNC_END nesf2
+ FUNC_END eqsf2
+ FUNC_END cmpsf2
+
+ARM_FUNC_START aeabi_cfrcmple
+
+ mov ip, r0
+ mov r0, r1
+ mov r1, ip
+ b 6f
+
+ARM_FUNC_START aeabi_cfcmpeq
+ARM_FUNC_ALIAS aeabi_cfcmple aeabi_cfcmpeq
+
+ @ The status-returning routines are required to preserve all
+ @ registers except ip, lr, and cpsr.
+6: do_push {r0, r1, r2, r3, lr}
+ ARM_CALL cmpsf2
+ @ Set the Z flag correctly, and the C flag unconditionally.
+ cmp r0, #0
+ @ Clear the C flag if the return value was -1, indicating
+ @ that the first operand was smaller than the second.
+ do_it mi
+ cmnmi r0, #0
+ RETLDM "r0, r1, r2, r3"
+
+ FUNC_END aeabi_cfcmple
+ FUNC_END aeabi_cfcmpeq
+ FUNC_END aeabi_cfrcmple
+
+ARM_FUNC_START aeabi_fcmpeq
+
+ str lr, [sp, #-8]!
+ ARM_CALL aeabi_cfcmple
+ do_it eq, e
+ moveq r0, #1 @ Equal to.
+ movne r0, #0 @ Less than, greater than, or unordered.
+ RETLDM
+
+ FUNC_END aeabi_fcmpeq
+
+ARM_FUNC_START aeabi_fcmplt
+
+ str lr, [sp, #-8]!
+ ARM_CALL aeabi_cfcmple
+ do_it cc, e
+ movcc r0, #1 @ Less than.
+ movcs r0, #0 @ Equal to, greater than, or unordered.
+ RETLDM
+
+ FUNC_END aeabi_fcmplt
+
+ARM_FUNC_START aeabi_fcmple
+
+ str lr, [sp, #-8]!
+ ARM_CALL aeabi_cfcmple
+ do_it ls, e
+ movls r0, #1 @ Less than or equal to.
+ movhi r0, #0 @ Greater than or unordered.
+ RETLDM
+
+ FUNC_END aeabi_fcmple
+
+ARM_FUNC_START aeabi_fcmpge
+
+ str lr, [sp, #-8]!
+ ARM_CALL aeabi_cfrcmple
+ do_it ls, e
+ movls r0, #1 @ Operand 2 is less than or equal to operand 1.
+ movhi r0, #0 @ Operand 2 greater than operand 1, or unordered.
+ RETLDM
+
+ FUNC_END aeabi_fcmpge
+
+ARM_FUNC_START aeabi_fcmpgt
+
+ str lr, [sp, #-8]!
+ ARM_CALL aeabi_cfrcmple
+ do_it cc, e
+ movcc r0, #1 @ Operand 2 is less than operand 1.
+ movcs r0, #0 @ Operand 2 is greater than or equal to operand 1,
+ @ or they are unordered.
+ RETLDM
+
+ FUNC_END aeabi_fcmpgt
+
+#endif /* L_cmpsf2 */
+
+#ifdef L_arm_unordsf2
+
+ARM_FUNC_START unordsf2
+ARM_FUNC_ALIAS aeabi_fcmpun unordsf2
+
+ mov r2, r0, lsl #1
+ mov r3, r1, lsl #1
+ mvns ip, r2, asr #24
+ bne 1f
+ movs ip, r0, lsl #9
+ bne 3f @ r0 is NAN
+1: mvns ip, r3, asr #24
+ bne 2f
+ movs ip, r1, lsl #9
+ bne 3f @ r1 is NAN
+2: mov r0, #0 @ arguments are ordered.
+ RET
+3: mov r0, #1 @ arguments are unordered.
+ RET
+
+ FUNC_END aeabi_fcmpun
+ FUNC_END unordsf2
+
+#endif /* L_unordsf2 */
+
+#ifdef L_arm_fixsfsi
+
+ARM_FUNC_START fixsfsi
+ARM_FUNC_ALIAS aeabi_f2iz fixsfsi
+
+ @ check exponent range.
+ mov r2, r0, lsl #1
+ cmp r2, #(127 << 24)
+ bcc 1f @ value is too small
+ mov r3, #(127 + 31)
+ subs r2, r3, r2, lsr #24
+ bls 2f @ value is too large
+
+ @ scale value
+ mov r3, r0, lsl #8
+ orr r3, r3, #0x80000000
+ tst r0, #0x80000000 @ the sign bit
+ shift1 lsr, r0, r3, r2
+ do_it ne
+ rsbne r0, r0, #0
+ RET
+
+1: mov r0, #0
+ RET
+
+2: cmp r2, #(127 + 31 - 0xff)
+ bne 3f
+ movs r2, r0, lsl #9
+ bne 4f @ r0 is NAN.
+3: ands r0, r0, #0x80000000 @ the sign bit
+ do_it eq
+ moveq r0, #0x7fffffff @ the maximum signed positive si
+ RET
+
+4: mov r0, #0 @ What should we convert NAN to?
+ RET
+
+ FUNC_END aeabi_f2iz
+ FUNC_END fixsfsi
+
+#endif /* L_fixsfsi */
+
+#ifdef L_arm_fixunssfsi
+
+ARM_FUNC_START fixunssfsi
+ARM_FUNC_ALIAS aeabi_f2uiz fixunssfsi
+
+ @ check exponent range.
+ movs r2, r0, lsl #1
+ bcs 1f @ value is negative
+ cmp r2, #(127 << 24)
+ bcc 1f @ value is too small
+ mov r3, #(127 + 31)
+ subs r2, r3, r2, lsr #24
+ bmi 2f @ value is too large
+
+ @ scale the value
+ mov r3, r0, lsl #8
+ orr r3, r3, #0x80000000
+ shift1 lsr, r0, r3, r2
+ RET
+
+1: mov r0, #0
+ RET
+
+2: cmp r2, #(127 + 31 - 0xff)
+ bne 3f
+ movs r2, r0, lsl #9
+ bne 4f @ r0 is NAN.
+3: mov r0, #0xffffffff @ maximum unsigned si
+ RET
+
+4: mov r0, #0 @ What should we convert NAN to?
+ RET
+
+ FUNC_END aeabi_f2uiz
+ FUNC_END fixunssfsi
+
+#endif /* L_fixunssfsi */
diff --git a/libgcc/config/arm/lib1funcs.S b/libgcc/config/arm/lib1funcs.S
new file mode 100644
index 00000000000..2e76c01df4b
--- /dev/null
+++ b/libgcc/config/arm/lib1funcs.S
@@ -0,0 +1,1829 @@
+@ libgcc routines for ARM cpu.
+@ Division routines, written by Richard Earnshaw, (rearnsha@armltd.co.uk)
+
+/* Copyright 1995, 1996, 1998, 1999, 2000, 2003, 2004, 2005, 2007, 2008,
+ 2009, 2010 Free Software Foundation, Inc.
+
+This file is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 3, or (at your option) any
+later version.
+
+This file is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+/* An executable stack is *not* required for these functions. */
+#if defined(__ELF__) && defined(__linux__)
+.section .note.GNU-stack,"",%progbits
+.previous
+#endif /* __ELF__ and __linux__ */
+
+#ifdef __ARM_EABI__
+/* Some attributes that are common to all routines in this file. */
+ /* Tag_ABI_align_needed: This code does not require 8-byte
+ alignment from the caller. */
+ /* .eabi_attribute 24, 0 -- default setting. */
+ /* Tag_ABI_align_preserved: This code preserves 8-byte
+ alignment in any callee. */
+ .eabi_attribute 25, 1
+#endif /* __ARM_EABI__ */
+/* ------------------------------------------------------------------------ */
+
+/* We need to know what prefix to add to function names. */
+
+#ifndef __USER_LABEL_PREFIX__
+#error __USER_LABEL_PREFIX__ not defined
+#endif
+
+/* ANSI concatenation macros. */
+
+#define CONCAT1(a, b) CONCAT2(a, b)
+#define CONCAT2(a, b) a ## b
+
+/* Use the right prefix for global labels. */
+
+#define SYM(x) CONCAT1 (__USER_LABEL_PREFIX__, x)
+
+#ifdef __ELF__
+#ifdef __thumb__
+#define __PLT__ /* Not supported in Thumb assembler (for now). */
+#elif defined __vxworks && !defined __PIC__
+#define __PLT__ /* Not supported by the kernel loader. */
+#else
+#define __PLT__ (PLT)
+#endif
+#define TYPE(x) .type SYM(x),function
+#define SIZE(x) .size SYM(x), . - SYM(x)
+#define LSYM(x) .x
+#else
+#define __PLT__
+#define TYPE(x)
+#define SIZE(x)
+#define LSYM(x) x
+#endif
+
+/* Function end macros. Variants for interworking. */
+
+#if defined(__ARM_ARCH_2__)
+# define __ARM_ARCH__ 2
+#endif
+
+#if defined(__ARM_ARCH_3__)
+# define __ARM_ARCH__ 3
+#endif
+
+#if defined(__ARM_ARCH_3M__) || defined(__ARM_ARCH_4__) \
+ || defined(__ARM_ARCH_4T__)
+/* We use __ARM_ARCH__ set to 4 here, but in reality it's any processor with
+ long multiply instructions. That includes v3M. */
+# define __ARM_ARCH__ 4
+#endif
+
+#if defined(__ARM_ARCH_5__) || defined(__ARM_ARCH_5T__) \
+ || defined(__ARM_ARCH_5E__) || defined(__ARM_ARCH_5TE__) \
+ || defined(__ARM_ARCH_5TEJ__)
+# define __ARM_ARCH__ 5
+#endif
+
+#if defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) \
+ || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) \
+ || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) \
+ || defined(__ARM_ARCH_6M__)
+# define __ARM_ARCH__ 6
+#endif
+
+#if defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) \
+ || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) \
+ || defined(__ARM_ARCH_7EM__)
+# define __ARM_ARCH__ 7
+#endif
+
+#ifndef __ARM_ARCH__
+#error Unable to determine architecture.
+#endif
+
+/* There are times when we might prefer Thumb1 code even if ARM code is
+ permitted, for example, the code might be smaller, or there might be
+ interworking problems with switching to ARM state if interworking is
+ disabled. */
+#if (defined(__thumb__) \
+ && !defined(__thumb2__) \
+ && (!defined(__THUMB_INTERWORK__) \
+ || defined (__OPTIMIZE_SIZE__) \
+ || defined(__ARM_ARCH_6M__)))
+# define __prefer_thumb__
+#endif
+
+/* How to return from a function call depends on the architecture variant. */
+
+#if (__ARM_ARCH__ > 4) || defined(__ARM_ARCH_4T__)
+
+# define RET bx lr
+# define RETc(x) bx##x lr
+
+/* Special precautions for interworking on armv4t. */
+# if (__ARM_ARCH__ == 4)
+
+/* Always use bx, not ldr pc. */
+# if (defined(__thumb__) || defined(__THUMB_INTERWORK__))
+# define __INTERWORKING__
+# endif /* __THUMB__ || __THUMB_INTERWORK__ */
+
+/* Include thumb stub before arm mode code. */
+# if defined(__thumb__) && !defined(__THUMB_INTERWORK__)
+# define __INTERWORKING_STUBS__
+# endif /* __thumb__ && !__THUMB_INTERWORK__ */
+
+#endif /* __ARM_ARCH == 4 */
+
+#else
+
+# define RET mov pc, lr
+# define RETc(x) mov##x pc, lr
+
+#endif
+
+.macro cfi_pop advance, reg, cfa_offset
+#ifdef __ELF__
+ .pushsection .debug_frame
+ .byte 0x4 /* DW_CFA_advance_loc4 */
+ .4byte \advance
+ .byte (0xc0 | \reg) /* DW_CFA_restore */
+ .byte 0xe /* DW_CFA_def_cfa_offset */
+ .uleb128 \cfa_offset
+ .popsection
+#endif
+.endm
+.macro cfi_push advance, reg, offset, cfa_offset
+#ifdef __ELF__
+ .pushsection .debug_frame
+ .byte 0x4 /* DW_CFA_advance_loc4 */
+ .4byte \advance
+ .byte (0x80 | \reg) /* DW_CFA_offset */
+ .uleb128 (\offset / -4)
+ .byte 0xe /* DW_CFA_def_cfa_offset */
+ .uleb128 \cfa_offset
+ .popsection
+#endif
+.endm
+.macro cfi_start start_label, end_label
+#ifdef __ELF__
+ .pushsection .debug_frame
+LSYM(Lstart_frame):
+ .4byte LSYM(Lend_cie) - LSYM(Lstart_cie) @ Length of CIE
+LSYM(Lstart_cie):
+ .4byte 0xffffffff @ CIE Identifier Tag
+ .byte 0x1 @ CIE Version
+ .ascii "\0" @ CIE Augmentation
+ .uleb128 0x1 @ CIE Code Alignment Factor
+ .sleb128 -4 @ CIE Data Alignment Factor
+ .byte 0xe @ CIE RA Column
+ .byte 0xc @ DW_CFA_def_cfa
+ .uleb128 0xd
+ .uleb128 0x0
+
+ .align 2
+LSYM(Lend_cie):
+ .4byte LSYM(Lend_fde)-LSYM(Lstart_fde) @ FDE Length
+LSYM(Lstart_fde):
+ .4byte LSYM(Lstart_frame) @ FDE CIE offset
+ .4byte \start_label @ FDE initial location
+ .4byte \end_label-\start_label @ FDE address range
+ .popsection
+#endif
+.endm
+.macro cfi_end end_label
+#ifdef __ELF__
+ .pushsection .debug_frame
+ .align 2
+LSYM(Lend_fde):
+ .popsection
+\end_label:
+#endif
+.endm
+
+/* Don't pass dirn, it's there just to get token pasting right. */
+
+.macro RETLDM regs=, cond=, unwind=, dirn=ia
+#if defined (__INTERWORKING__)
+ .ifc "\regs",""
+ ldr\cond lr, [sp], #8
+ .else
+# if defined(__thumb2__)
+ pop\cond {\regs, lr}
+# else
+ ldm\cond\dirn sp!, {\regs, lr}
+# endif
+ .endif
+ .ifnc "\unwind", ""
+ /* Mark LR as restored. */
+97: cfi_pop 97b - \unwind, 0xe, 0x0
+ .endif
+ bx\cond lr
+#else
+ /* Caller is responsible for providing IT instruction. */
+ .ifc "\regs",""
+ ldr\cond pc, [sp], #8
+ .else
+# if defined(__thumb2__)
+ pop\cond {\regs, pc}
+# else
+ ldm\cond\dirn sp!, {\regs, pc}
+# endif
+ .endif
+#endif
+.endm
+
+/* The Unified assembly syntax allows the same code to be assembled for both
+ ARM and Thumb-2. However this is only supported by recent gas, so define
+ a set of macros to allow ARM code on older assemblers. */
+#if defined(__thumb2__)
+.macro do_it cond, suffix=""
+ it\suffix \cond
+.endm
+.macro shift1 op, arg0, arg1, arg2
+ \op \arg0, \arg1, \arg2
+.endm
+#define do_push push
+#define do_pop pop
+#define COND(op1, op2, cond) op1 ## op2 ## cond
+/* Perform an arithmetic operation with a variable shift operand. This
+ requires two instructions and a scratch register on Thumb-2. */
+.macro shiftop name, dest, src1, src2, shiftop, shiftreg, tmp
+ \shiftop \tmp, \src2, \shiftreg
+ \name \dest, \src1, \tmp
+.endm
+#else
+.macro do_it cond, suffix=""
+.endm
+.macro shift1 op, arg0, arg1, arg2
+ mov \arg0, \arg1, \op \arg2
+.endm
+#define do_push stmfd sp!,
+#define do_pop ldmfd sp!,
+#define COND(op1, op2, cond) op1 ## cond ## op2
+.macro shiftop name, dest, src1, src2, shiftop, shiftreg, tmp
+ \name \dest, \src1, \src2, \shiftop \shiftreg
+.endm
+#endif
+
+#ifdef __ARM_EABI__
+.macro ARM_LDIV0 name signed
+ cmp r0, #0
+ .ifc \signed, unsigned
+ movne r0, #0xffffffff
+ .else
+ movgt r0, #0x7fffffff
+ movlt r0, #0x80000000
+ .endif
+ b SYM (__aeabi_idiv0) __PLT__
+.endm
+#else
+.macro ARM_LDIV0 name signed
+ str lr, [sp, #-8]!
+98: cfi_push 98b - __\name, 0xe, -0x8, 0x8
+ bl SYM (__div0) __PLT__
+ mov r0, #0 @ About as wrong as it could be.
+ RETLDM unwind=98b
+.endm
+#endif
+
+
+#ifdef __ARM_EABI__
+.macro THUMB_LDIV0 name signed
+#if defined(__ARM_ARCH_6M__)
+ .ifc \signed, unsigned
+ cmp r0, #0
+ beq 1f
+ mov r0, #0
+ mvn r0, r0 @ 0xffffffff
+1:
+ .else
+ cmp r0, #0
+ beq 2f
+ blt 3f
+ mov r0, #0
+ mvn r0, r0
+ lsr r0, r0, #1 @ 0x7fffffff
+ b 2f
+3: mov r0, #0x80
+ lsl r0, r0, #24 @ 0x80000000
+2:
+ .endif
+ push {r0, r1, r2}
+ ldr r0, 4f
+ adr r1, 4f
+ add r0, r1
+ str r0, [sp, #8]
+ @ We know we are not on armv4t, so pop pc is safe.
+ pop {r0, r1, pc}
+ .align 2
+4:
+ .word __aeabi_idiv0 - 4b
+#elif defined(__thumb2__)
+ .syntax unified
+ .ifc \signed, unsigned
+ cbz r0, 1f
+ mov r0, #0xffffffff
+1:
+ .else
+ cmp r0, #0
+ do_it gt
+ movgt r0, #0x7fffffff
+ do_it lt
+ movlt r0, #0x80000000
+ .endif
+ b.w SYM(__aeabi_idiv0) __PLT__
+#else
+ .align 2
+ bx pc
+ nop
+ .arm
+ cmp r0, #0
+ .ifc \signed, unsigned
+ movne r0, #0xffffffff
+ .else
+ movgt r0, #0x7fffffff
+ movlt r0, #0x80000000
+ .endif
+ b SYM(__aeabi_idiv0) __PLT__
+ .thumb
+#endif
+.endm
+#else
+.macro THUMB_LDIV0 name signed
+ push { r1, lr }
+98: cfi_push 98b - __\name, 0xe, -0x4, 0x8
+ bl SYM (__div0)
+ mov r0, #0 @ About as wrong as it could be.
+#if defined (__INTERWORKING__)
+ pop { r1, r2 }
+ bx r2
+#else
+ pop { r1, pc }
+#endif
+.endm
+#endif
+
+.macro FUNC_END name
+ SIZE (__\name)
+.endm
+
+.macro DIV_FUNC_END name signed
+ cfi_start __\name, LSYM(Lend_div0)
+LSYM(Ldiv0):
+#ifdef __thumb__
+ THUMB_LDIV0 \name \signed
+#else
+ ARM_LDIV0 \name \signed
+#endif
+ cfi_end LSYM(Lend_div0)
+ FUNC_END \name
+.endm
+
+.macro THUMB_FUNC_START name
+ .globl SYM (\name)
+ TYPE (\name)
+ .thumb_func
+SYM (\name):
+.endm
+
+/* Function start macros. Variants for ARM and Thumb. */
+
+#ifdef __thumb__
+#define THUMB_FUNC .thumb_func
+#define THUMB_CODE .force_thumb
+# if defined(__thumb2__)
+#define THUMB_SYNTAX .syntax divided
+# else
+#define THUMB_SYNTAX
+# endif
+#else
+#define THUMB_FUNC
+#define THUMB_CODE
+#define THUMB_SYNTAX
+#endif
+
+.macro FUNC_START name
+ .text
+ .globl SYM (__\name)
+ TYPE (__\name)
+ .align 0
+ THUMB_CODE
+ THUMB_FUNC
+ THUMB_SYNTAX
+SYM (__\name):
+.endm
+
+/* Special function that will always be coded in ARM assembly, even if
+ in Thumb-only compilation. */
+
+#if defined(__thumb2__)
+
+/* For Thumb-2 we build everything in thumb mode. */
+.macro ARM_FUNC_START name
+ FUNC_START \name
+ .syntax unified
+.endm
+#define EQUIV .thumb_set
+.macro ARM_CALL name
+ bl __\name
+.endm
+
+#elif defined(__INTERWORKING_STUBS__)
+
+.macro ARM_FUNC_START name
+ FUNC_START \name
+ bx pc
+ nop
+ .arm
+/* A hook to tell gdb that we've switched to ARM mode. Also used to call
+ directly from other local arm routines. */
+_L__\name:
+.endm
+#define EQUIV .thumb_set
+/* Branch directly to a function declared with ARM_FUNC_START.
+ Must be called in arm mode. */
+.macro ARM_CALL name
+ bl _L__\name
+.endm
+
+#else /* !(__INTERWORKING_STUBS__ || __thumb2__) */
+
+#ifdef __ARM_ARCH_6M__
+#define EQUIV .thumb_set
+#else
+.macro ARM_FUNC_START name
+ .text
+ .globl SYM (__\name)
+ TYPE (__\name)
+ .align 0
+ .arm
+SYM (__\name):
+.endm
+#define EQUIV .set
+.macro ARM_CALL name
+ bl __\name
+.endm
+#endif
+
+#endif
+
+.macro FUNC_ALIAS new old
+ .globl SYM (__\new)
+#if defined (__thumb__)
+ .thumb_set SYM (__\new), SYM (__\old)
+#else
+ .set SYM (__\new), SYM (__\old)
+#endif
+.endm
+
+#ifndef __ARM_ARCH_6M__
+.macro ARM_FUNC_ALIAS new old
+ .globl SYM (__\new)
+ EQUIV SYM (__\new), SYM (__\old)
+#if defined(__INTERWORKING_STUBS__)
+ .set SYM (_L__\new), SYM (_L__\old)
+#endif
+.endm
+#endif
+
+#ifdef __ARMEB__
+#define xxh r0
+#define xxl r1
+#define yyh r2
+#define yyl r3
+#else
+#define xxh r1
+#define xxl r0
+#define yyh r3
+#define yyl r2
+#endif
+
+#ifdef __ARM_EABI__
+.macro WEAK name
+ .weak SYM (__\name)
+.endm
+#endif
+
+#ifdef __thumb__
+/* Register aliases. */
+
+work .req r4 @ XXXX is this safe ?
+dividend .req r0
+divisor .req r1
+overdone .req r2
+result .req r2
+curbit .req r3
+#endif
+#if 0
+ip .req r12
+sp .req r13
+lr .req r14
+pc .req r15
+#endif
+
+/* ------------------------------------------------------------------------ */
+/* Bodies of the division and modulo routines. */
+/* ------------------------------------------------------------------------ */
+.macro ARM_DIV_BODY dividend, divisor, result, curbit
+
+#if __ARM_ARCH__ >= 5 && ! defined (__OPTIMIZE_SIZE__)
+
+#if defined (__thumb2__)
+ clz \curbit, \dividend
+ clz \result, \divisor
+ sub \curbit, \result, \curbit
+ rsb \curbit, \curbit, #31
+ adr \result, 1f
+ add \curbit, \result, \curbit, lsl #4
+ mov \result, #0
+ mov pc, \curbit
+.p2align 3
+1:
+ .set shift, 32
+ .rept 32
+ .set shift, shift - 1
+ cmp.w \dividend, \divisor, lsl #shift
+ nop.n
+ adc.w \result, \result, \result
+ it cs
+ subcs.w \dividend, \dividend, \divisor, lsl #shift
+ .endr
+#else
+ clz \curbit, \dividend
+ clz \result, \divisor
+ sub \curbit, \result, \curbit
+ rsbs \curbit, \curbit, #31
+ addne \curbit, \curbit, \curbit, lsl #1
+ mov \result, #0
+ addne pc, pc, \curbit, lsl #2
+ nop
+ .set shift, 32
+ .rept 32
+ .set shift, shift - 1
+ cmp \dividend, \divisor, lsl #shift
+ adc \result, \result, \result
+ subcs \dividend, \dividend, \divisor, lsl #shift
+ .endr
+#endif
+
+#else /* __ARM_ARCH__ < 5 || defined (__OPTIMIZE_SIZE__) */
+#if __ARM_ARCH__ >= 5
+
+ clz \curbit, \divisor
+ clz \result, \dividend
+ sub \result, \curbit, \result
+ mov \curbit, #1
+ mov \divisor, \divisor, lsl \result
+ mov \curbit, \curbit, lsl \result
+ mov \result, #0
+
+#else /* __ARM_ARCH__ < 5 */
+
+ @ Initially shift the divisor left 3 bits if possible,
+ @ set curbit accordingly. This allows for curbit to be located
+ @ at the left end of each 4-bit nibbles in the division loop
+ @ to save one loop in most cases.
+ tst \divisor, #0xe0000000
+ moveq \divisor, \divisor, lsl #3
+ moveq \curbit, #8
+ movne \curbit, #1
+
+ @ Unless the divisor is very big, shift it up in multiples of
+ @ four bits, since this is the amount of unwinding in the main
+ @ division loop. Continue shifting until the divisor is
+ @ larger than the dividend.
+1: cmp \divisor, #0x10000000
+ cmplo \divisor, \dividend
+ movlo \divisor, \divisor, lsl #4
+ movlo \curbit, \curbit, lsl #4
+ blo 1b
+
+ @ For very big divisors, we must shift it a bit at a time, or
+ @ we will be in danger of overflowing.
+1: cmp \divisor, #0x80000000
+ cmplo \divisor, \dividend
+ movlo \divisor, \divisor, lsl #1
+ movlo \curbit, \curbit, lsl #1
+ blo 1b
+
+ mov \result, #0
+
+#endif /* __ARM_ARCH__ < 5 */
+
+ @ Division loop
+1: cmp \dividend, \divisor
+ do_it hs, t
+ subhs \dividend, \dividend, \divisor
+ orrhs \result, \result, \curbit
+ cmp \dividend, \divisor, lsr #1
+ do_it hs, t
+ subhs \dividend, \dividend, \divisor, lsr #1
+ orrhs \result, \result, \curbit, lsr #1
+ cmp \dividend, \divisor, lsr #2
+ do_it hs, t
+ subhs \dividend, \dividend, \divisor, lsr #2
+ orrhs \result, \result, \curbit, lsr #2
+ cmp \dividend, \divisor, lsr #3
+ do_it hs, t
+ subhs \dividend, \dividend, \divisor, lsr #3
+ orrhs \result, \result, \curbit, lsr #3
+ cmp \dividend, #0 @ Early termination?
+ do_it ne, t
+ movnes \curbit, \curbit, lsr #4 @ No, any more bits to do?
+ movne \divisor, \divisor, lsr #4
+ bne 1b
+
+#endif /* __ARM_ARCH__ < 5 || defined (__OPTIMIZE_SIZE__) */
+
+.endm
+/* ------------------------------------------------------------------------ */
+.macro ARM_DIV2_ORDER divisor, order
+
+#if __ARM_ARCH__ >= 5
+
+ clz \order, \divisor
+ rsb \order, \order, #31
+
+#else
+
+ cmp \divisor, #(1 << 16)
+ movhs \divisor, \divisor, lsr #16
+ movhs \order, #16
+ movlo \order, #0
+
+ cmp \divisor, #(1 << 8)
+ movhs \divisor, \divisor, lsr #8
+ addhs \order, \order, #8
+
+ cmp \divisor, #(1 << 4)
+ movhs \divisor, \divisor, lsr #4
+ addhs \order, \order, #4
+
+ cmp \divisor, #(1 << 2)
+ addhi \order, \order, #3
+ addls \order, \order, \divisor, lsr #1
+
+#endif
+
+.endm
+/* ------------------------------------------------------------------------ */
+.macro ARM_MOD_BODY dividend, divisor, order, spare
+
+#if __ARM_ARCH__ >= 5 && ! defined (__OPTIMIZE_SIZE__)
+
+ clz \order, \divisor
+ clz \spare, \dividend
+ sub \order, \order, \spare
+ rsbs \order, \order, #31
+ addne pc, pc, \order, lsl #3
+ nop
+ .set shift, 32
+ .rept 32
+ .set shift, shift - 1
+ cmp \dividend, \divisor, lsl #shift
+ subcs \dividend, \dividend, \divisor, lsl #shift
+ .endr
+
+#else /* __ARM_ARCH__ < 5 || defined (__OPTIMIZE_SIZE__) */
+#if __ARM_ARCH__ >= 5
+
+ clz \order, \divisor
+ clz \spare, \dividend
+ sub \order, \order, \spare
+ mov \divisor, \divisor, lsl \order
+
+#else /* __ARM_ARCH__ < 5 */
+
+ mov \order, #0
+
+ @ Unless the divisor is very big, shift it up in multiples of
+ @ four bits, since this is the amount of unwinding in the main
+ @ division loop. Continue shifting until the divisor is
+ @ larger than the dividend.
+1: cmp \divisor, #0x10000000
+ cmplo \divisor, \dividend
+ movlo \divisor, \divisor, lsl #4
+ addlo \order, \order, #4
+ blo 1b
+
+ @ For very big divisors, we must shift it a bit at a time, or
+ @ we will be in danger of overflowing.
+1: cmp \divisor, #0x80000000
+ cmplo \divisor, \dividend
+ movlo \divisor, \divisor, lsl #1
+ addlo \order, \order, #1
+ blo 1b
+
+#endif /* __ARM_ARCH__ < 5 */
+
+ @ Perform all needed substractions to keep only the reminder.
+ @ Do comparisons in batch of 4 first.
+ subs \order, \order, #3 @ yes, 3 is intended here
+ blt 2f
+
+1: cmp \dividend, \divisor
+ subhs \dividend, \dividend, \divisor
+ cmp \dividend, \divisor, lsr #1
+ subhs \dividend, \dividend, \divisor, lsr #1
+ cmp \dividend, \divisor, lsr #2
+ subhs \dividend, \dividend, \divisor, lsr #2
+ cmp \dividend, \divisor, lsr #3
+ subhs \dividend, \dividend, \divisor, lsr #3
+ cmp \dividend, #1
+ mov \divisor, \divisor, lsr #4
+ subges \order, \order, #4
+ bge 1b
+
+ tst \order, #3
+ teqne \dividend, #0
+ beq 5f
+
+ @ Either 1, 2 or 3 comparison/substractions are left.
+2: cmn \order, #2
+ blt 4f
+ beq 3f
+ cmp \dividend, \divisor
+ subhs \dividend, \dividend, \divisor
+ mov \divisor, \divisor, lsr #1
+3: cmp \dividend, \divisor
+ subhs \dividend, \dividend, \divisor
+ mov \divisor, \divisor, lsr #1
+4: cmp \dividend, \divisor
+ subhs \dividend, \dividend, \divisor
+5:
+
+#endif /* __ARM_ARCH__ < 5 || defined (__OPTIMIZE_SIZE__) */
+
+.endm
+/* ------------------------------------------------------------------------ */
+.macro THUMB_DIV_MOD_BODY modulo
+ @ Load the constant 0x10000000 into our work register.
+ mov work, #1
+ lsl work, #28
+LSYM(Loop1):
+ @ Unless the divisor is very big, shift it up in multiples of
+ @ four bits, since this is the amount of unwinding in the main
+ @ division loop. Continue shifting until the divisor is
+ @ larger than the dividend.
+ cmp divisor, work
+ bhs LSYM(Lbignum)
+ cmp divisor, dividend
+ bhs LSYM(Lbignum)
+ lsl divisor, #4
+ lsl curbit, #4
+ b LSYM(Loop1)
+LSYM(Lbignum):
+ @ Set work to 0x80000000
+ lsl work, #3
+LSYM(Loop2):
+ @ For very big divisors, we must shift it a bit at a time, or
+ @ we will be in danger of overflowing.
+ cmp divisor, work
+ bhs LSYM(Loop3)
+ cmp divisor, dividend
+ bhs LSYM(Loop3)
+ lsl divisor, #1
+ lsl curbit, #1
+ b LSYM(Loop2)
+LSYM(Loop3):
+ @ Test for possible subtractions ...
+ .if \modulo
+ @ ... On the final pass, this may subtract too much from the dividend,
+ @ so keep track of which subtractions are done, we can fix them up
+ @ afterwards.
+ mov overdone, #0
+ cmp dividend, divisor
+ blo LSYM(Lover1)
+ sub dividend, dividend, divisor
+LSYM(Lover1):
+ lsr work, divisor, #1
+ cmp dividend, work
+ blo LSYM(Lover2)
+ sub dividend, dividend, work
+ mov ip, curbit
+ mov work, #1
+ ror curbit, work
+ orr overdone, curbit
+ mov curbit, ip
+LSYM(Lover2):
+ lsr work, divisor, #2
+ cmp dividend, work
+ blo LSYM(Lover3)
+ sub dividend, dividend, work
+ mov ip, curbit
+ mov work, #2
+ ror curbit, work
+ orr overdone, curbit
+ mov curbit, ip
+LSYM(Lover3):
+ lsr work, divisor, #3
+ cmp dividend, work
+ blo LSYM(Lover4)
+ sub dividend, dividend, work
+ mov ip, curbit
+ mov work, #3
+ ror curbit, work
+ orr overdone, curbit
+ mov curbit, ip
+LSYM(Lover4):
+ mov ip, curbit
+ .else
+ @ ... and note which bits are done in the result. On the final pass,
+ @ this may subtract too much from the dividend, but the result will be ok,
+ @ since the "bit" will have been shifted out at the bottom.
+ cmp dividend, divisor
+ blo LSYM(Lover1)
+ sub dividend, dividend, divisor
+ orr result, result, curbit
+LSYM(Lover1):
+ lsr work, divisor, #1
+ cmp dividend, work
+ blo LSYM(Lover2)
+ sub dividend, dividend, work
+ lsr work, curbit, #1
+ orr result, work
+LSYM(Lover2):
+ lsr work, divisor, #2
+ cmp dividend, work
+ blo LSYM(Lover3)
+ sub dividend, dividend, work
+ lsr work, curbit, #2
+ orr result, work
+LSYM(Lover3):
+ lsr work, divisor, #3
+ cmp dividend, work
+ blo LSYM(Lover4)
+ sub dividend, dividend, work
+ lsr work, curbit, #3
+ orr result, work
+LSYM(Lover4):
+ .endif
+
+ cmp dividend, #0 @ Early termination?
+ beq LSYM(Lover5)
+ lsr curbit, #4 @ No, any more bits to do?
+ beq LSYM(Lover5)
+ lsr divisor, #4
+ b LSYM(Loop3)
+LSYM(Lover5):
+ .if \modulo
+ @ Any subtractions that we should not have done will be recorded in
+ @ the top three bits of "overdone". Exactly which were not needed
+ @ are governed by the position of the bit, stored in ip.
+ mov work, #0xe
+ lsl work, #28
+ and overdone, work
+ beq LSYM(Lgot_result)
+
+ @ If we terminated early, because dividend became zero, then the
+ @ bit in ip will not be in the bottom nibble, and we should not
+ @ perform the additions below. We must test for this though
+ @ (rather relying upon the TSTs to prevent the additions) since
+ @ the bit in ip could be in the top two bits which might then match
+ @ with one of the smaller RORs.
+ mov curbit, ip
+ mov work, #0x7
+ tst curbit, work
+ beq LSYM(Lgot_result)
+
+ mov curbit, ip
+ mov work, #3
+ ror curbit, work
+ tst overdone, curbit
+ beq LSYM(Lover6)
+ lsr work, divisor, #3
+ add dividend, work
+LSYM(Lover6):
+ mov curbit, ip
+ mov work, #2
+ ror curbit, work
+ tst overdone, curbit
+ beq LSYM(Lover7)
+ lsr work, divisor, #2
+ add dividend, work
+LSYM(Lover7):
+ mov curbit, ip
+ mov work, #1
+ ror curbit, work
+ tst overdone, curbit
+ beq LSYM(Lgot_result)
+ lsr work, divisor, #1
+ add dividend, work
+ .endif
+LSYM(Lgot_result):
+.endm
+/* ------------------------------------------------------------------------ */
+/* Start of the Real Functions */
+/* ------------------------------------------------------------------------ */
+#ifdef L_udivsi3
+
+#if defined(__prefer_thumb__)
+
+ FUNC_START udivsi3
+ FUNC_ALIAS aeabi_uidiv udivsi3
+
+ cmp divisor, #0
+ beq LSYM(Ldiv0)
+LSYM(udivsi3_skip_div0_test):
+ mov curbit, #1
+ mov result, #0
+
+ push { work }
+ cmp dividend, divisor
+ blo LSYM(Lgot_result)
+
+ THUMB_DIV_MOD_BODY 0
+
+ mov r0, result
+ pop { work }
+ RET
+
+#else /* ARM version/Thumb-2. */
+
+ ARM_FUNC_START udivsi3
+ ARM_FUNC_ALIAS aeabi_uidiv udivsi3
+
+ /* Note: if called via udivsi3_skip_div0_test, this will unnecessarily
+ check for division-by-zero a second time. */
+LSYM(udivsi3_skip_div0_test):
+ subs r2, r1, #1
+ do_it eq
+ RETc(eq)
+ bcc LSYM(Ldiv0)
+ cmp r0, r1
+ bls 11f
+ tst r1, r2
+ beq 12f
+
+ ARM_DIV_BODY r0, r1, r2, r3
+
+ mov r0, r2
+ RET
+
+11: do_it eq, e
+ moveq r0, #1
+ movne r0, #0
+ RET
+
+12: ARM_DIV2_ORDER r1, r2
+
+ mov r0, r0, lsr r2
+ RET
+
+#endif /* ARM version */
+
+ DIV_FUNC_END udivsi3 unsigned
+
+#if defined(__prefer_thumb__)
+FUNC_START aeabi_uidivmod
+ cmp r1, #0
+ beq LSYM(Ldiv0)
+ push {r0, r1, lr}
+ bl LSYM(udivsi3_skip_div0_test)
+ POP {r1, r2, r3}
+ mul r2, r0
+ sub r1, r1, r2
+ bx r3
+#else
+ARM_FUNC_START aeabi_uidivmod
+ cmp r1, #0
+ beq LSYM(Ldiv0)
+ stmfd sp!, { r0, r1, lr }
+ bl LSYM(udivsi3_skip_div0_test)
+ ldmfd sp!, { r1, r2, lr }
+ mul r3, r2, r0
+ sub r1, r1, r3
+ RET
+#endif
+ FUNC_END aeabi_uidivmod
+
+#endif /* L_udivsi3 */
+/* ------------------------------------------------------------------------ */
+#ifdef L_umodsi3
+
+ FUNC_START umodsi3
+
+#ifdef __thumb__
+
+ cmp divisor, #0
+ beq LSYM(Ldiv0)
+ mov curbit, #1
+ cmp dividend, divisor
+ bhs LSYM(Lover10)
+ RET
+
+LSYM(Lover10):
+ push { work }
+
+ THUMB_DIV_MOD_BODY 1
+
+ pop { work }
+ RET
+
+#else /* ARM version. */
+
+ subs r2, r1, #1 @ compare divisor with 1
+ bcc LSYM(Ldiv0)
+ cmpne r0, r1 @ compare dividend with divisor
+ moveq r0, #0
+ tsthi r1, r2 @ see if divisor is power of 2
+ andeq r0, r0, r2
+ RETc(ls)
+
+ ARM_MOD_BODY r0, r1, r2, r3
+
+ RET
+
+#endif /* ARM version. */
+
+ DIV_FUNC_END umodsi3 unsigned
+
+#endif /* L_umodsi3 */
+/* ------------------------------------------------------------------------ */
+#ifdef L_divsi3
+
+#if defined(__prefer_thumb__)
+
+ FUNC_START divsi3
+ FUNC_ALIAS aeabi_idiv divsi3
+
+ cmp divisor, #0
+ beq LSYM(Ldiv0)
+LSYM(divsi3_skip_div0_test):
+ push { work }
+ mov work, dividend
+ eor work, divisor @ Save the sign of the result.
+ mov ip, work
+ mov curbit, #1
+ mov result, #0
+ cmp divisor, #0
+ bpl LSYM(Lover10)
+ neg divisor, divisor @ Loops below use unsigned.
+LSYM(Lover10):
+ cmp dividend, #0
+ bpl LSYM(Lover11)
+ neg dividend, dividend
+LSYM(Lover11):
+ cmp dividend, divisor
+ blo LSYM(Lgot_result)
+
+ THUMB_DIV_MOD_BODY 0
+
+ mov r0, result
+ mov work, ip
+ cmp work, #0
+ bpl LSYM(Lover12)
+ neg r0, r0
+LSYM(Lover12):
+ pop { work }
+ RET
+
+#else /* ARM/Thumb-2 version. */
+
+ ARM_FUNC_START divsi3
+ ARM_FUNC_ALIAS aeabi_idiv divsi3
+
+ cmp r1, #0
+ beq LSYM(Ldiv0)
+LSYM(divsi3_skip_div0_test):
+ eor ip, r0, r1 @ save the sign of the result.
+ do_it mi
+ rsbmi r1, r1, #0 @ loops below use unsigned.
+ subs r2, r1, #1 @ division by 1 or -1 ?
+ beq 10f
+ movs r3, r0
+ do_it mi
+ rsbmi r3, r0, #0 @ positive dividend value
+ cmp r3, r1
+ bls 11f
+ tst r1, r2 @ divisor is power of 2 ?
+ beq 12f
+
+ ARM_DIV_BODY r3, r1, r0, r2
+
+ cmp ip, #0
+ do_it mi
+ rsbmi r0, r0, #0
+ RET
+
+10: teq ip, r0 @ same sign ?
+ do_it mi
+ rsbmi r0, r0, #0
+ RET
+
+11: do_it lo
+ movlo r0, #0
+ do_it eq,t
+ moveq r0, ip, asr #31
+ orreq r0, r0, #1
+ RET
+
+12: ARM_DIV2_ORDER r1, r2
+
+ cmp ip, #0
+ mov r0, r3, lsr r2
+ do_it mi
+ rsbmi r0, r0, #0
+ RET
+
+#endif /* ARM version */
+
+ DIV_FUNC_END divsi3 signed
+
+#if defined(__prefer_thumb__)
+FUNC_START aeabi_idivmod
+ cmp r1, #0
+ beq LSYM(Ldiv0)
+ push {r0, r1, lr}
+ bl LSYM(divsi3_skip_div0_test)
+ POP {r1, r2, r3}
+ mul r2, r0
+ sub r1, r1, r2
+ bx r3
+#else
+ARM_FUNC_START aeabi_idivmod
+ cmp r1, #0
+ beq LSYM(Ldiv0)
+ stmfd sp!, { r0, r1, lr }
+ bl LSYM(divsi3_skip_div0_test)
+ ldmfd sp!, { r1, r2, lr }
+ mul r3, r2, r0
+ sub r1, r1, r3
+ RET
+#endif
+ FUNC_END aeabi_idivmod
+
+#endif /* L_divsi3 */
+/* ------------------------------------------------------------------------ */
+#ifdef L_modsi3
+
+ FUNC_START modsi3
+
+#ifdef __thumb__
+
+ mov curbit, #1
+ cmp divisor, #0
+ beq LSYM(Ldiv0)
+ bpl LSYM(Lover10)
+ neg divisor, divisor @ Loops below use unsigned.
+LSYM(Lover10):
+ push { work }
+ @ Need to save the sign of the dividend, unfortunately, we need
+ @ work later on. Must do this after saving the original value of
+ @ the work register, because we will pop this value off first.
+ push { dividend }
+ cmp dividend, #0
+ bpl LSYM(Lover11)
+ neg dividend, dividend
+LSYM(Lover11):
+ cmp dividend, divisor
+ blo LSYM(Lgot_result)
+
+ THUMB_DIV_MOD_BODY 1
+
+ pop { work }
+ cmp work, #0
+ bpl LSYM(Lover12)
+ neg dividend, dividend
+LSYM(Lover12):
+ pop { work }
+ RET
+
+#else /* ARM version. */
+
+ cmp r1, #0
+ beq LSYM(Ldiv0)
+ rsbmi r1, r1, #0 @ loops below use unsigned.
+ movs ip, r0 @ preserve sign of dividend
+ rsbmi r0, r0, #0 @ if negative make positive
+ subs r2, r1, #1 @ compare divisor with 1
+ cmpne r0, r1 @ compare dividend with divisor
+ moveq r0, #0
+ tsthi r1, r2 @ see if divisor is power of 2
+ andeq r0, r0, r2
+ bls 10f
+
+ ARM_MOD_BODY r0, r1, r2, r3
+
+10: cmp ip, #0
+ rsbmi r0, r0, #0
+ RET
+
+#endif /* ARM version */
+
+ DIV_FUNC_END modsi3 signed
+
+#endif /* L_modsi3 */
+/* ------------------------------------------------------------------------ */
+#ifdef L_dvmd_tls
+
+#ifdef __ARM_EABI__
+ WEAK aeabi_idiv0
+ WEAK aeabi_ldiv0
+ FUNC_START aeabi_idiv0
+ FUNC_START aeabi_ldiv0
+ RET
+ FUNC_END aeabi_ldiv0
+ FUNC_END aeabi_idiv0
+#else
+ FUNC_START div0
+ RET
+ FUNC_END div0
+#endif
+
+#endif /* L_divmodsi_tools */
+/* ------------------------------------------------------------------------ */
+#ifdef L_dvmd_lnx
+@ GNU/Linux division-by zero handler. Used in place of L_dvmd_tls
+
+/* Constant taken from <asm/signal.h>. */
+#define SIGFPE 8
+
+#ifdef __ARM_EABI__
+ WEAK aeabi_idiv0
+ WEAK aeabi_ldiv0
+ ARM_FUNC_START aeabi_idiv0
+ ARM_FUNC_START aeabi_ldiv0
+#else
+ ARM_FUNC_START div0
+#endif
+
+ do_push {r1, lr}
+ mov r0, #SIGFPE
+ bl SYM(raise) __PLT__
+ RETLDM r1
+
+#ifdef __ARM_EABI__
+ FUNC_END aeabi_ldiv0
+ FUNC_END aeabi_idiv0
+#else
+ FUNC_END div0
+#endif
+
+#endif /* L_dvmd_lnx */
+#ifdef L_clear_cache
+#if defined __ARM_EABI__ && defined __linux__
+@ EABI GNU/Linux call to cacheflush syscall.
+ ARM_FUNC_START clear_cache
+ do_push {r7}
+#if __ARM_ARCH__ >= 7 || defined(__ARM_ARCH_6T2__)
+ movw r7, #2
+ movt r7, #0xf
+#else
+ mov r7, #0xf0000
+ add r7, r7, #2
+#endif
+ mov r2, #0
+ swi 0
+ do_pop {r7}
+ RET
+ FUNC_END clear_cache
+#else
+#error "This is only for ARM EABI GNU/Linux"
+#endif
+#endif /* L_clear_cache */
+/* ------------------------------------------------------------------------ */
+/* Dword shift operations. */
+/* All the following Dword shift variants rely on the fact that
+ shft xxx, Reg
+ is in fact done as
+ shft xxx, (Reg & 255)
+ so for Reg value in (32...63) and (-1...-31) we will get zero (in the
+ case of logical shifts) or the sign (for asr). */
+
+#ifdef __ARMEB__
+#define al r1
+#define ah r0
+#else
+#define al r0
+#define ah r1
+#endif
+
+/* Prevent __aeabi double-word shifts from being produced on SymbianOS. */
+#ifndef __symbian__
+
+#ifdef L_lshrdi3
+
+ FUNC_START lshrdi3
+ FUNC_ALIAS aeabi_llsr lshrdi3
+
+#ifdef __thumb__
+ lsr al, r2
+ mov r3, ah
+ lsr ah, r2
+ mov ip, r3
+ sub r2, #32
+ lsr r3, r2
+ orr al, r3
+ neg r2, r2
+ mov r3, ip
+ lsl r3, r2
+ orr al, r3
+ RET
+#else
+ subs r3, r2, #32
+ rsb ip, r2, #32
+ movmi al, al, lsr r2
+ movpl al, ah, lsr r3
+ orrmi al, al, ah, lsl ip
+ mov ah, ah, lsr r2
+ RET
+#endif
+ FUNC_END aeabi_llsr
+ FUNC_END lshrdi3
+
+#endif
+
+#ifdef L_ashrdi3
+
+ FUNC_START ashrdi3
+ FUNC_ALIAS aeabi_lasr ashrdi3
+
+#ifdef __thumb__
+ lsr al, r2
+ mov r3, ah
+ asr ah, r2
+ sub r2, #32
+ @ If r2 is negative at this point the following step would OR
+ @ the sign bit into all of AL. That's not what we want...
+ bmi 1f
+ mov ip, r3
+ asr r3, r2
+ orr al, r3
+ mov r3, ip
+1:
+ neg r2, r2
+ lsl r3, r2
+ orr al, r3
+ RET
+#else
+ subs r3, r2, #32
+ rsb ip, r2, #32
+ movmi al, al, lsr r2
+ movpl al, ah, asr r3
+ orrmi al, al, ah, lsl ip
+ mov ah, ah, asr r2
+ RET
+#endif
+
+ FUNC_END aeabi_lasr
+ FUNC_END ashrdi3
+
+#endif
+
+#ifdef L_ashldi3
+
+ FUNC_START ashldi3
+ FUNC_ALIAS aeabi_llsl ashldi3
+
+#ifdef __thumb__
+ lsl ah, r2
+ mov r3, al
+ lsl al, r2
+ mov ip, r3
+ sub r2, #32
+ lsl r3, r2
+ orr ah, r3
+ neg r2, r2
+ mov r3, ip
+ lsr r3, r2
+ orr ah, r3
+ RET
+#else
+ subs r3, r2, #32
+ rsb ip, r2, #32
+ movmi ah, ah, lsl r2
+ movpl ah, al, lsl r3
+ orrmi ah, ah, al, lsr ip
+ mov al, al, lsl r2
+ RET
+#endif
+ FUNC_END aeabi_llsl
+ FUNC_END ashldi3
+
+#endif
+
+#endif /* __symbian__ */
+
+#if ((__ARM_ARCH__ > 5) && !defined(__ARM_ARCH_6M__)) \
+ || defined(__ARM_ARCH_5E__) || defined(__ARM_ARCH_5TE__) \
+ || defined(__ARM_ARCH_5TEJ__)
+#define HAVE_ARM_CLZ 1
+#endif
+
+#ifdef L_clzsi2
+#if defined(__ARM_ARCH_6M__)
+FUNC_START clzsi2
+ mov r1, #28
+ mov r3, #1
+ lsl r3, r3, #16
+ cmp r0, r3 /* 0x10000 */
+ bcc 2f
+ lsr r0, r0, #16
+ sub r1, r1, #16
+2: lsr r3, r3, #8
+ cmp r0, r3 /* #0x100 */
+ bcc 2f
+ lsr r0, r0, #8
+ sub r1, r1, #8
+2: lsr r3, r3, #4
+ cmp r0, r3 /* #0x10 */
+ bcc 2f
+ lsr r0, r0, #4
+ sub r1, r1, #4
+2: adr r2, 1f
+ ldrb r0, [r2, r0]
+ add r0, r0, r1
+ bx lr
+.align 2
+1:
+.byte 4, 3, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0
+ FUNC_END clzsi2
+#else
+ARM_FUNC_START clzsi2
+# if defined(HAVE_ARM_CLZ)
+ clz r0, r0
+ RET
+# else
+ mov r1, #28
+ cmp r0, #0x10000
+ do_it cs, t
+ movcs r0, r0, lsr #16
+ subcs r1, r1, #16
+ cmp r0, #0x100
+ do_it cs, t
+ movcs r0, r0, lsr #8
+ subcs r1, r1, #8
+ cmp r0, #0x10
+ do_it cs, t
+ movcs r0, r0, lsr #4
+ subcs r1, r1, #4
+ adr r2, 1f
+ ldrb r0, [r2, r0]
+ add r0, r0, r1
+ RET
+.align 2
+1:
+.byte 4, 3, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0
+# endif /* !HAVE_ARM_CLZ */
+ FUNC_END clzsi2
+#endif
+#endif /* L_clzsi2 */
+
+#ifdef L_clzdi2
+#if !defined(HAVE_ARM_CLZ)
+
+# if defined(__ARM_ARCH_6M__)
+FUNC_START clzdi2
+ push {r4, lr}
+# else
+ARM_FUNC_START clzdi2
+ do_push {r4, lr}
+# endif
+ cmp xxh, #0
+ bne 1f
+# ifdef __ARMEB__
+ mov r0, xxl
+ bl __clzsi2
+ add r0, r0, #32
+ b 2f
+1:
+ bl __clzsi2
+# else
+ bl __clzsi2
+ add r0, r0, #32
+ b 2f
+1:
+ mov r0, xxh
+ bl __clzsi2
+# endif
+2:
+# if defined(__ARM_ARCH_6M__)
+ pop {r4, pc}
+# else
+ RETLDM r4
+# endif
+ FUNC_END clzdi2
+
+#else /* HAVE_ARM_CLZ */
+
+ARM_FUNC_START clzdi2
+ cmp xxh, #0
+ do_it eq, et
+ clzeq r0, xxl
+ clzne r0, xxh
+ addeq r0, r0, #32
+ RET
+ FUNC_END clzdi2
+
+#endif
+#endif /* L_clzdi2 */
+
+/* ------------------------------------------------------------------------ */
+/* These next two sections are here despite the fact that they contain Thumb
+ assembler because their presence allows interworked code to be linked even
+ when the GCC library is this one. */
+
+/* Do not build the interworking functions when the target architecture does
+ not support Thumb instructions. (This can be a multilib option). */
+#if defined __ARM_ARCH_4T__ || defined __ARM_ARCH_5T__\
+ || defined __ARM_ARCH_5TE__ || defined __ARM_ARCH_5TEJ__ \
+ || __ARM_ARCH__ >= 6
+
+#if defined L_call_via_rX
+
+/* These labels & instructions are used by the Arm/Thumb interworking code.
+ The address of function to be called is loaded into a register and then
+ one of these labels is called via a BL instruction. This puts the
+ return address into the link register with the bottom bit set, and the
+ code here switches to the correct mode before executing the function. */
+
+ .text
+ .align 0
+ .force_thumb
+
+.macro call_via register
+ THUMB_FUNC_START _call_via_\register
+
+ bx \register
+ nop
+
+ SIZE (_call_via_\register)
+.endm
+
+ call_via r0
+ call_via r1
+ call_via r2
+ call_via r3
+ call_via r4
+ call_via r5
+ call_via r6
+ call_via r7
+ call_via r8
+ call_via r9
+ call_via sl
+ call_via fp
+ call_via ip
+ call_via sp
+ call_via lr
+
+#endif /* L_call_via_rX */
+
+/* Don't bother with the old interworking routines for Thumb-2. */
+/* ??? Maybe only omit these on "m" variants. */
+#if !defined(__thumb2__) && !defined(__ARM_ARCH_6M__)
+
+#if defined L_interwork_call_via_rX
+
+/* These labels & instructions are used by the Arm/Thumb interworking code,
+ when the target address is in an unknown instruction set. The address
+ of function to be called is loaded into a register and then one of these
+ labels is called via a BL instruction. This puts the return address
+ into the link register with the bottom bit set, and the code here
+ switches to the correct mode before executing the function. Unfortunately
+ the target code cannot be relied upon to return via a BX instruction, so
+ instead we have to store the resturn address on the stack and allow the
+ called function to return here instead. Upon return we recover the real
+ return address and use a BX to get back to Thumb mode.
+
+ There are three variations of this code. The first,
+ _interwork_call_via_rN(), will push the return address onto the
+ stack and pop it in _arm_return(). It should only be used if all
+ arguments are passed in registers.
+
+ The second, _interwork_r7_call_via_rN(), instead stores the return
+ address at [r7, #-4]. It is the caller's responsibility to ensure
+ that this address is valid and contains no useful data.
+
+ The third, _interwork_r11_call_via_rN(), works in the same way but
+ uses r11 instead of r7. It is useful if the caller does not really
+ need a frame pointer. */
+
+ .text
+ .align 0
+
+ .code 32
+ .globl _arm_return
+LSYM(Lstart_arm_return):
+ cfi_start LSYM(Lstart_arm_return) LSYM(Lend_arm_return)
+ cfi_push 0, 0xe, -0x8, 0x8
+ nop @ This nop is for the benefit of debuggers, so that
+ @ backtraces will use the correct unwind information.
+_arm_return:
+ RETLDM unwind=LSYM(Lstart_arm_return)
+ cfi_end LSYM(Lend_arm_return)
+
+ .globl _arm_return_r7
+_arm_return_r7:
+ ldr lr, [r7, #-4]
+ bx lr
+
+ .globl _arm_return_r11
+_arm_return_r11:
+ ldr lr, [r11, #-4]
+ bx lr
+
+.macro interwork_with_frame frame, register, name, return
+ .code 16
+
+ THUMB_FUNC_START \name
+
+ bx pc
+ nop
+
+ .code 32
+ tst \register, #1
+ streq lr, [\frame, #-4]
+ adreq lr, _arm_return_\frame
+ bx \register
+
+ SIZE (\name)
+.endm
+
+.macro interwork register
+ .code 16
+
+ THUMB_FUNC_START _interwork_call_via_\register
+
+ bx pc
+ nop
+
+ .code 32
+ .globl LSYM(Lchange_\register)
+LSYM(Lchange_\register):
+ tst \register, #1
+ streq lr, [sp, #-8]!
+ adreq lr, _arm_return
+ bx \register
+
+ SIZE (_interwork_call_via_\register)
+
+ interwork_with_frame r7,\register,_interwork_r7_call_via_\register
+ interwork_with_frame r11,\register,_interwork_r11_call_via_\register
+.endm
+
+ interwork r0
+ interwork r1
+ interwork r2
+ interwork r3
+ interwork r4
+ interwork r5
+ interwork r6
+ interwork r7
+ interwork r8
+ interwork r9
+ interwork sl
+ interwork fp
+ interwork ip
+ interwork sp
+
+ /* The LR case has to be handled a little differently... */
+ .code 16
+
+ THUMB_FUNC_START _interwork_call_via_lr
+
+ bx pc
+ nop
+
+ .code 32
+ .globl .Lchange_lr
+.Lchange_lr:
+ tst lr, #1
+ stmeqdb r13!, {lr, pc}
+ mov ip, lr
+ adreq lr, _arm_return
+ bx ip
+
+ SIZE (_interwork_call_via_lr)
+
+#endif /* L_interwork_call_via_rX */
+#endif /* !__thumb2__ */
+
+/* Functions to support compact pic switch tables in thumb1 state.
+ All these routines take an index into the table in r0. The
+ table is at LR & ~1 (but this must be rounded up in the case
+ of 32-bit entires). They are only permitted to clobber r12
+ and r14 and r0 must be preserved on exit. */
+#ifdef L_thumb1_case_sqi
+
+ .text
+ .align 0
+ .force_thumb
+ .syntax unified
+ THUMB_FUNC_START __gnu_thumb1_case_sqi
+ push {r1}
+ mov r1, lr
+ lsrs r1, r1, #1
+ lsls r1, r1, #1
+ ldrsb r1, [r1, r0]
+ lsls r1, r1, #1
+ add lr, lr, r1
+ pop {r1}
+ bx lr
+ SIZE (__gnu_thumb1_case_sqi)
+#endif
+
+#ifdef L_thumb1_case_uqi
+
+ .text
+ .align 0
+ .force_thumb
+ .syntax unified
+ THUMB_FUNC_START __gnu_thumb1_case_uqi
+ push {r1}
+ mov r1, lr
+ lsrs r1, r1, #1
+ lsls r1, r1, #1
+ ldrb r1, [r1, r0]
+ lsls r1, r1, #1
+ add lr, lr, r1
+ pop {r1}
+ bx lr
+ SIZE (__gnu_thumb1_case_uqi)
+#endif
+
+#ifdef L_thumb1_case_shi
+
+ .text
+ .align 0
+ .force_thumb
+ .syntax unified
+ THUMB_FUNC_START __gnu_thumb1_case_shi
+ push {r0, r1}
+ mov r1, lr
+ lsrs r1, r1, #1
+ lsls r0, r0, #1
+ lsls r1, r1, #1
+ ldrsh r1, [r1, r0]
+ lsls r1, r1, #1
+ add lr, lr, r1
+ pop {r0, r1}
+ bx lr
+ SIZE (__gnu_thumb1_case_shi)
+#endif
+
+#ifdef L_thumb1_case_uhi
+
+ .text
+ .align 0
+ .force_thumb
+ .syntax unified
+ THUMB_FUNC_START __gnu_thumb1_case_uhi
+ push {r0, r1}
+ mov r1, lr
+ lsrs r1, r1, #1
+ lsls r0, r0, #1
+ lsls r1, r1, #1
+ ldrh r1, [r1, r0]
+ lsls r1, r1, #1
+ add lr, lr, r1
+ pop {r0, r1}
+ bx lr
+ SIZE (__gnu_thumb1_case_uhi)
+#endif
+
+#ifdef L_thumb1_case_si
+
+ .text
+ .align 0
+ .force_thumb
+ .syntax unified
+ THUMB_FUNC_START __gnu_thumb1_case_si
+ push {r0, r1}
+ mov r1, lr
+ adds.n r1, r1, #2 /* Align to word. */
+ lsrs r1, r1, #2
+ lsls r0, r0, #2
+ lsls r1, r1, #2
+ ldr r0, [r1, r0]
+ adds r0, r0, r1
+ mov lr, r0
+ pop {r0, r1}
+ mov pc, lr /* We know we were called from thumb code. */
+ SIZE (__gnu_thumb1_case_si)
+#endif
+
+#endif /* Arch supports thumb. */
+
+#ifndef __symbian__
+#ifndef __ARM_ARCH_6M__
+#include "ieee754-df.S"
+#include "ieee754-sf.S"
+#include "bpabi.S"
+#else /* __ARM_ARCH_6M__ */
+#include "bpabi-v6m.S"
+#endif /* __ARM_ARCH_6M__ */
+#endif /* !__symbian__ */
diff --git a/libgcc/config/arm/libgcc-bpabi.ver b/libgcc/config/arm/libgcc-bpabi.ver
new file mode 100644
index 00000000000..3ba8364dc8e
--- /dev/null
+++ b/libgcc/config/arm/libgcc-bpabi.ver
@@ -0,0 +1,108 @@
+# Copyright (C) 2004, 2005, 2007 Free Software Foundation, Inc.
+#
+# This file is part of GCC.
+#
+# GCC is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GCC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>.
+
+GCC_3.5 {
+ # BPABI symbols
+ __aeabi_cdcmpeq
+ __aeabi_cdcmple
+ __aeabi_cdrcmple
+ __aeabi_cfcmpeq
+ __aeabi_cfcmple
+ __aeabi_cfrcmple
+ __aeabi_d2f
+ __aeabi_d2iz
+ __aeabi_d2lz
+ __aeabi_d2uiz
+ __aeabi_d2ulz
+ __aeabi_dadd
+ __aeabi_dcmpeq
+ __aeabi_dcmpge
+ __aeabi_dcmpgt
+ __aeabi_dcmple
+ __aeabi_dcmplt
+ __aeabi_dcmpun
+ __aeabi_ddiv
+ __aeabi_dmul
+ __aeabi_dneg
+ __aeabi_drsub
+ __aeabi_dsub
+ __aeabi_f2d
+ __aeabi_f2iz
+ __aeabi_f2lz
+ __aeabi_f2uiz
+ __aeabi_f2ulz
+ __aeabi_fadd
+ __aeabi_fcmpeq
+ __aeabi_fcmpge
+ __aeabi_fcmpgt
+ __aeabi_fcmple
+ __aeabi_fcmplt
+ __aeabi_fcmpun
+ __aeabi_fdiv
+ __aeabi_fmul
+ __aeabi_fneg
+ __aeabi_frsub
+ __aeabi_fsub
+ __aeabi_i2d
+ __aeabi_i2f
+ __aeabi_idiv
+ __aeabi_idiv0
+ __aeabi_idivmod
+ __aeabi_l2d
+ __aeabi_l2f
+ __aeabi_lasr
+ __aeabi_lcmp
+ __aeabi_ldiv0
+ __aeabi_ldivmod
+ __aeabi_llsl
+ __aeabi_llsr
+ __aeabi_lmul
+ __aeabi_ui2d
+ __aeabi_ui2f
+ __aeabi_uidiv
+ __aeabi_uidivmod
+ __aeabi_uldivmod
+ __aeabi_ulcmp
+ __aeabi_ul2d
+ __aeabi_ul2f
+ __aeabi_uread4
+ __aeabi_uread8
+ __aeabi_uwrite4
+ __aeabi_uwrite8
+
+ # Exception-Handling
+ # \S 7.5
+ _Unwind_Complete
+ _Unwind_VRS_Get
+ _Unwind_VRS_Set
+ _Unwind_VRS_Pop
+ # \S 9.2
+ __aeabi_unwind_cpp_pr0
+ __aeabi_unwind_cpp_pr1
+ __aeabi_unwind_cpp_pr2
+ # The libstdc++ exception-handling personality routine uses this
+ # GNU-specific entry point.
+ __gnu_unwind_frame
+}
+
+%exclude {
+ _Unwind_Backtrace
+}
+GCC_4.3.0 {
+ _Unwind_Backtrace
+}
diff --git a/libgcc/config/arm/libunwind.S b/libgcc/config/arm/libunwind.S
index a3a19daab4b..8166cd86e47 100644
--- a/libgcc/config/arm/libunwind.S
+++ b/libgcc/config/arm/libunwind.S
@@ -40,7 +40,7 @@
#ifndef __symbian__
-#include "config/arm/lib1funcs.asm"
+#include "lib1funcs.S"
.macro UNPREFIX name
.global SYM (\name)
diff --git a/libgcc/config/arm/linux-atomic-64bit.c b/libgcc/config/arm/linux-atomic-64bit.c
new file mode 100644
index 00000000000..af94c7f4ae5
--- /dev/null
+++ b/libgcc/config/arm/linux-atomic-64bit.c
@@ -0,0 +1,166 @@
+/* 64bit Linux-specific atomic operations for ARM EABI.
+ Copyright (C) 2008, 2009, 2010, 2011 Free Software Foundation, Inc.
+ Based on linux-atomic.c
+
+ 64 bit additions david.gilbert@linaro.org
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+/* 64bit helper functions for atomic operations; the compiler will
+ call these when the code is compiled for a CPU without ldrexd/strexd.
+ (If the CPU had those then the compiler inlines the operation).
+
+ These helpers require a kernel helper that's only present on newer
+ kernels; we check for that in an init section and bail out rather
+ unceremoneously. */
+
+extern unsigned int __write (int fd, const void *buf, unsigned int count);
+extern void abort (void);
+
+/* Kernel helper for compare-and-exchange. */
+typedef int (__kernel_cmpxchg64_t) (const long long* oldval,
+ const long long* newval,
+ long long *ptr);
+#define __kernel_cmpxchg64 (*(__kernel_cmpxchg64_t *) 0xffff0f60)
+
+/* Kernel helper page version number. */
+#define __kernel_helper_version (*(unsigned int *)0xffff0ffc)
+
+/* Check that the kernel has a new enough version at load. */
+static void __check_for_sync8_kernelhelper (void)
+{
+ if (__kernel_helper_version < 5)
+ {
+ const char err[] = "A newer kernel is required to run this binary. "
+ "(__kernel_cmpxchg64 helper)\n";
+ /* At this point we need a way to crash with some information
+ for the user - I'm not sure I can rely on much else being
+ available at this point, so do the same as generic-morestack.c
+ write () and abort (). */
+ __write (2 /* stderr. */, err, sizeof (err));
+ abort ();
+ }
+};
+
+static void (*__sync8_kernelhelper_inithook[]) (void)
+ __attribute__ ((used, section (".init_array"))) = {
+ &__check_for_sync8_kernelhelper
+};
+
+#define HIDDEN __attribute__ ((visibility ("hidden")))
+
+#define FETCH_AND_OP_WORD64(OP, PFX_OP, INF_OP) \
+ long long HIDDEN \
+ __sync_fetch_and_##OP##_8 (long long *ptr, long long val) \
+ { \
+ int failure; \
+ long long tmp,tmp2; \
+ \
+ do { \
+ tmp = *ptr; \
+ tmp2 = PFX_OP (tmp INF_OP val); \
+ failure = __kernel_cmpxchg64 (&tmp, &tmp2, ptr); \
+ } while (failure != 0); \
+ \
+ return tmp; \
+ }
+
+FETCH_AND_OP_WORD64 (add, , +)
+FETCH_AND_OP_WORD64 (sub, , -)
+FETCH_AND_OP_WORD64 (or, , |)
+FETCH_AND_OP_WORD64 (and, , &)
+FETCH_AND_OP_WORD64 (xor, , ^)
+FETCH_AND_OP_WORD64 (nand, ~, &)
+
+#define NAME_oldval(OP, WIDTH) __sync_fetch_and_##OP##_##WIDTH
+#define NAME_newval(OP, WIDTH) __sync_##OP##_and_fetch_##WIDTH
+
+/* Implement both __sync_<op>_and_fetch and __sync_fetch_and_<op> for
+ subword-sized quantities. */
+
+#define OP_AND_FETCH_WORD64(OP, PFX_OP, INF_OP) \
+ long long HIDDEN \
+ __sync_##OP##_and_fetch_8 (long long *ptr, long long val) \
+ { \
+ int failure; \
+ long long tmp,tmp2; \
+ \
+ do { \
+ tmp = *ptr; \
+ tmp2 = PFX_OP (tmp INF_OP val); \
+ failure = __kernel_cmpxchg64 (&tmp, &tmp2, ptr); \
+ } while (failure != 0); \
+ \
+ return tmp2; \
+ }
+
+OP_AND_FETCH_WORD64 (add, , +)
+OP_AND_FETCH_WORD64 (sub, , -)
+OP_AND_FETCH_WORD64 (or, , |)
+OP_AND_FETCH_WORD64 (and, , &)
+OP_AND_FETCH_WORD64 (xor, , ^)
+OP_AND_FETCH_WORD64 (nand, ~, &)
+
+long long HIDDEN
+__sync_val_compare_and_swap_8 (long long *ptr, long long oldval,
+ long long newval)
+{
+ int failure;
+ long long actual_oldval;
+
+ while (1)
+ {
+ actual_oldval = *ptr;
+
+ if (__builtin_expect (oldval != actual_oldval, 0))
+ return actual_oldval;
+
+ failure = __kernel_cmpxchg64 (&actual_oldval, &newval, ptr);
+
+ if (__builtin_expect (!failure, 1))
+ return oldval;
+ }
+}
+
+typedef unsigned char bool;
+
+bool HIDDEN
+__sync_bool_compare_and_swap_8 (long long *ptr, long long oldval,
+ long long newval)
+{
+ int failure = __kernel_cmpxchg64 (&oldval, &newval, ptr);
+ return (failure == 0);
+}
+
+long long HIDDEN
+__sync_lock_test_and_set_8 (long long *ptr, long long val)
+{
+ int failure;
+ long long oldval;
+
+ do {
+ oldval = *ptr;
+ failure = __kernel_cmpxchg64 (&oldval, &val, ptr);
+ } while (failure != 0);
+
+ return oldval;
+}
diff --git a/libgcc/config/arm/linux-atomic.c b/libgcc/config/arm/linux-atomic.c
new file mode 100644
index 00000000000..80f161d06a7
--- /dev/null
+++ b/libgcc/config/arm/linux-atomic.c
@@ -0,0 +1,279 @@
+/* Linux-specific atomic operations for ARM EABI.
+ Copyright (C) 2008, 2009, 2010 Free Software Foundation, Inc.
+ Contributed by CodeSourcery.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+/* Kernel helper for compare-and-exchange. */
+typedef int (__kernel_cmpxchg_t) (int oldval, int newval, int *ptr);
+#define __kernel_cmpxchg (*(__kernel_cmpxchg_t *) 0xffff0fc0)
+
+/* Kernel helper for memory barrier. */
+typedef void (__kernel_dmb_t) (void);
+#define __kernel_dmb (*(__kernel_dmb_t *) 0xffff0fa0)
+
+/* Note: we implement byte, short and int versions of atomic operations using
+ the above kernel helpers; see linux-atomic-64bit.c for "long long" (64-bit)
+ operations. */
+
+#define HIDDEN __attribute__ ((visibility ("hidden")))
+
+#ifdef __ARMEL__
+#define INVERT_MASK_1 0
+#define INVERT_MASK_2 0
+#else
+#define INVERT_MASK_1 24
+#define INVERT_MASK_2 16
+#endif
+
+#define MASK_1 0xffu
+#define MASK_2 0xffffu
+
+#define FETCH_AND_OP_WORD(OP, PFX_OP, INF_OP) \
+ int HIDDEN \
+ __sync_fetch_and_##OP##_4 (int *ptr, int val) \
+ { \
+ int failure, tmp; \
+ \
+ do { \
+ tmp = *ptr; \
+ failure = __kernel_cmpxchg (tmp, PFX_OP (tmp INF_OP val), ptr); \
+ } while (failure != 0); \
+ \
+ return tmp; \
+ }
+
+FETCH_AND_OP_WORD (add, , +)
+FETCH_AND_OP_WORD (sub, , -)
+FETCH_AND_OP_WORD (or, , |)
+FETCH_AND_OP_WORD (and, , &)
+FETCH_AND_OP_WORD (xor, , ^)
+FETCH_AND_OP_WORD (nand, ~, &)
+
+#define NAME_oldval(OP, WIDTH) __sync_fetch_and_##OP##_##WIDTH
+#define NAME_newval(OP, WIDTH) __sync_##OP##_and_fetch_##WIDTH
+
+/* Implement both __sync_<op>_and_fetch and __sync_fetch_and_<op> for
+ subword-sized quantities. */
+
+#define SUBWORD_SYNC_OP(OP, PFX_OP, INF_OP, TYPE, WIDTH, RETURN) \
+ TYPE HIDDEN \
+ NAME##_##RETURN (OP, WIDTH) (TYPE *ptr, TYPE val) \
+ { \
+ int *wordptr = (int *) ((unsigned int) ptr & ~3); \
+ unsigned int mask, shift, oldval, newval; \
+ int failure; \
+ \
+ shift = (((unsigned int) ptr & 3) << 3) ^ INVERT_MASK_##WIDTH; \
+ mask = MASK_##WIDTH << shift; \
+ \
+ do { \
+ oldval = *wordptr; \
+ newval = ((PFX_OP (((oldval & mask) >> shift) \
+ INF_OP (unsigned int) val)) << shift) & mask; \
+ newval |= oldval & ~mask; \
+ failure = __kernel_cmpxchg (oldval, newval, wordptr); \
+ } while (failure != 0); \
+ \
+ return (RETURN & mask) >> shift; \
+ }
+
+SUBWORD_SYNC_OP (add, , +, unsigned short, 2, oldval)
+SUBWORD_SYNC_OP (sub, , -, unsigned short, 2, oldval)
+SUBWORD_SYNC_OP (or, , |, unsigned short, 2, oldval)
+SUBWORD_SYNC_OP (and, , &, unsigned short, 2, oldval)
+SUBWORD_SYNC_OP (xor, , ^, unsigned short, 2, oldval)
+SUBWORD_SYNC_OP (nand, ~, &, unsigned short, 2, oldval)
+
+SUBWORD_SYNC_OP (add, , +, unsigned char, 1, oldval)
+SUBWORD_SYNC_OP (sub, , -, unsigned char, 1, oldval)
+SUBWORD_SYNC_OP (or, , |, unsigned char, 1, oldval)
+SUBWORD_SYNC_OP (and, , &, unsigned char, 1, oldval)
+SUBWORD_SYNC_OP (xor, , ^, unsigned char, 1, oldval)
+SUBWORD_SYNC_OP (nand, ~, &, unsigned char, 1, oldval)
+
+#define OP_AND_FETCH_WORD(OP, PFX_OP, INF_OP) \
+ int HIDDEN \
+ __sync_##OP##_and_fetch_4 (int *ptr, int val) \
+ { \
+ int tmp, failure; \
+ \
+ do { \
+ tmp = *ptr; \
+ failure = __kernel_cmpxchg (tmp, PFX_OP (tmp INF_OP val), ptr); \
+ } while (failure != 0); \
+ \
+ return PFX_OP (tmp INF_OP val); \
+ }
+
+OP_AND_FETCH_WORD (add, , +)
+OP_AND_FETCH_WORD (sub, , -)
+OP_AND_FETCH_WORD (or, , |)
+OP_AND_FETCH_WORD (and, , &)
+OP_AND_FETCH_WORD (xor, , ^)
+OP_AND_FETCH_WORD (nand, ~, &)
+
+SUBWORD_SYNC_OP (add, , +, unsigned short, 2, newval)
+SUBWORD_SYNC_OP (sub, , -, unsigned short, 2, newval)
+SUBWORD_SYNC_OP (or, , |, unsigned short, 2, newval)
+SUBWORD_SYNC_OP (and, , &, unsigned short, 2, newval)
+SUBWORD_SYNC_OP (xor, , ^, unsigned short, 2, newval)
+SUBWORD_SYNC_OP (nand, ~, &, unsigned short, 2, newval)
+
+SUBWORD_SYNC_OP (add, , +, unsigned char, 1, newval)
+SUBWORD_SYNC_OP (sub, , -, unsigned char, 1, newval)
+SUBWORD_SYNC_OP (or, , |, unsigned char, 1, newval)
+SUBWORD_SYNC_OP (and, , &, unsigned char, 1, newval)
+SUBWORD_SYNC_OP (xor, , ^, unsigned char, 1, newval)
+SUBWORD_SYNC_OP (nand, ~, &, unsigned char, 1, newval)
+
+int HIDDEN
+__sync_val_compare_and_swap_4 (int *ptr, int oldval, int newval)
+{
+ int actual_oldval, fail;
+
+ while (1)
+ {
+ actual_oldval = *ptr;
+
+ if (__builtin_expect (oldval != actual_oldval, 0))
+ return actual_oldval;
+
+ fail = __kernel_cmpxchg (actual_oldval, newval, ptr);
+
+ if (__builtin_expect (!fail, 1))
+ return oldval;
+ }
+}
+
+#define SUBWORD_VAL_CAS(TYPE, WIDTH) \
+ TYPE HIDDEN \
+ __sync_val_compare_and_swap_##WIDTH (TYPE *ptr, TYPE oldval, \
+ TYPE newval) \
+ { \
+ int *wordptr = (int *)((unsigned int) ptr & ~3), fail; \
+ unsigned int mask, shift, actual_oldval, actual_newval; \
+ \
+ shift = (((unsigned int) ptr & 3) << 3) ^ INVERT_MASK_##WIDTH; \
+ mask = MASK_##WIDTH << shift; \
+ \
+ while (1) \
+ { \
+ actual_oldval = *wordptr; \
+ \
+ if (__builtin_expect (((actual_oldval & mask) >> shift) != \
+ (unsigned int) oldval, 0)) \
+ return (actual_oldval & mask) >> shift; \
+ \
+ actual_newval = (actual_oldval & ~mask) \
+ | (((unsigned int) newval << shift) & mask); \
+ \
+ fail = __kernel_cmpxchg (actual_oldval, actual_newval, \
+ wordptr); \
+ \
+ if (__builtin_expect (!fail, 1)) \
+ return oldval; \
+ } \
+ }
+
+SUBWORD_VAL_CAS (unsigned short, 2)
+SUBWORD_VAL_CAS (unsigned char, 1)
+
+typedef unsigned char bool;
+
+bool HIDDEN
+__sync_bool_compare_and_swap_4 (int *ptr, int oldval, int newval)
+{
+ int failure = __kernel_cmpxchg (oldval, newval, ptr);
+ return (failure == 0);
+}
+
+#define SUBWORD_BOOL_CAS(TYPE, WIDTH) \
+ bool HIDDEN \
+ __sync_bool_compare_and_swap_##WIDTH (TYPE *ptr, TYPE oldval, \
+ TYPE newval) \
+ { \
+ TYPE actual_oldval \
+ = __sync_val_compare_and_swap_##WIDTH (ptr, oldval, newval); \
+ return (oldval == actual_oldval); \
+ }
+
+SUBWORD_BOOL_CAS (unsigned short, 2)
+SUBWORD_BOOL_CAS (unsigned char, 1)
+
+void HIDDEN
+__sync_synchronize (void)
+{
+ __kernel_dmb ();
+}
+
+int HIDDEN
+__sync_lock_test_and_set_4 (int *ptr, int val)
+{
+ int failure, oldval;
+
+ do {
+ oldval = *ptr;
+ failure = __kernel_cmpxchg (oldval, val, ptr);
+ } while (failure != 0);
+
+ return oldval;
+}
+
+#define SUBWORD_TEST_AND_SET(TYPE, WIDTH) \
+ TYPE HIDDEN \
+ __sync_lock_test_and_set_##WIDTH (TYPE *ptr, TYPE val) \
+ { \
+ int failure; \
+ unsigned int oldval, newval, shift, mask; \
+ int *wordptr = (int *) ((unsigned int) ptr & ~3); \
+ \
+ shift = (((unsigned int) ptr & 3) << 3) ^ INVERT_MASK_##WIDTH; \
+ mask = MASK_##WIDTH << shift; \
+ \
+ do { \
+ oldval = *wordptr; \
+ newval = (oldval & ~mask) \
+ | (((unsigned int) val << shift) & mask); \
+ failure = __kernel_cmpxchg (oldval, newval, wordptr); \
+ } while (failure != 0); \
+ \
+ return (oldval & mask) >> shift; \
+ }
+
+SUBWORD_TEST_AND_SET (unsigned short, 2)
+SUBWORD_TEST_AND_SET (unsigned char, 1)
+
+#define SYNC_LOCK_RELEASE(TYPE, WIDTH) \
+ void HIDDEN \
+ __sync_lock_release_##WIDTH (TYPE *ptr) \
+ { \
+ /* All writes before this point must be seen before we release \
+ the lock itself. */ \
+ __kernel_dmb (); \
+ *ptr = 0; \
+ }
+
+SYNC_LOCK_RELEASE (long long, 8)
+SYNC_LOCK_RELEASE (int, 4)
+SYNC_LOCK_RELEASE (short, 2)
+SYNC_LOCK_RELEASE (char, 1)
diff --git a/libgcc/config/arm/t-arm b/libgcc/config/arm/t-arm
new file mode 100644
index 00000000000..4e17e99b4a5
--- /dev/null
+++ b/libgcc/config/arm/t-arm
@@ -0,0 +1,3 @@
+LIB1ASMSRC = arm/lib1funcs.S
+LIB1ASMFUNCS = _thumb1_case_sqi _thumb1_case_uqi _thumb1_case_shi \
+ _thumb1_case_uhi _thumb1_case_si
diff --git a/libgcc/config/arm/t-bpabi b/libgcc/config/arm/t-bpabi
index a3b23dcd20c..e79cbd7064e 100644
--- a/libgcc/config/arm/t-bpabi
+++ b/libgcc/config/arm/t-bpabi
@@ -1,3 +1,15 @@
+# Add the bpabi.S functions.
+LIB1ASMFUNCS += _aeabi_lcmp _aeabi_ulcmp _aeabi_ldivmod _aeabi_uldivmod
+
+# Add the BPABI C functions.
+LIB2ADD += $(srcdir)/config/arm/bpabi.c \
+ $(srcdir)/config/arm/unaligned-funcs.c
+
+LIB2ADD_ST += $(srcdir)/config/arm/fp16.c
+
LIB2ADDEH = $(srcdir)/config/arm/unwind-arm.c \
$(srcdir)/config/arm/libunwind.S \
$(srcdir)/config/arm/pr-support.c $(srcdir)/unwind-c.c
+
+# Add the BPABI names.
+SHLIB_MAPFILES += $(srcdir)/config/arm/libgcc-bpabi.ver
diff --git a/libgcc/config/arm/t-elf b/libgcc/config/arm/t-elf
new file mode 100644
index 00000000000..d9e8064e4de
--- /dev/null
+++ b/libgcc/config/arm/t-elf
@@ -0,0 +1,18 @@
+# For most CPUs we have an assembly soft-float implementations.
+# However this is not true for ARMv6M. Here we want to use the soft-fp C
+# implementation. The soft-fp code is only build for ARMv6M. This pulls
+# in the asm implementation for other CPUs.
+LIB1ASMFUNCS += _udivsi3 _divsi3 _umodsi3 _modsi3 _dvmd_tls _bb_init_func \
+ _call_via_rX _interwork_call_via_rX \
+ _lshrdi3 _ashrdi3 _ashldi3 \
+ _arm_negdf2 _arm_addsubdf3 _arm_muldivdf3 _arm_cmpdf2 _arm_unorddf2 \
+ _arm_fixdfsi _arm_fixunsdfsi \
+ _arm_truncdfsf2 _arm_negsf2 _arm_addsubsf3 _arm_muldivsf3 \
+ _arm_cmpsf2 _arm_unordsf2 _arm_fixsfsi _arm_fixunssfsi \
+ _arm_floatdidf _arm_floatdisf _arm_floatundidf _arm_floatundisf \
+ _clzsi2 _clzdi2
+
+# Currently there is a bug somewhere in GCC's alias analysis
+# or scheduling code that is breaking _fpmul_parts in fp-bit.c.
+# Disabling function inlining is a workaround for this problem.
+HOST_LIBGCC2_CFLAGS += -fno-inline
diff --git a/libgcc/config/arm/t-linux b/libgcc/config/arm/t-linux
new file mode 100644
index 00000000000..4c1efebbd87
--- /dev/null
+++ b/libgcc/config/arm/t-linux
@@ -0,0 +1,7 @@
+LIB1ASMSRC = arm/lib1funcs.S
+LIB1ASMFUNCS = _udivsi3 _divsi3 _umodsi3 _modsi3 _dvmd_lnx _clzsi2 _clzdi2 \
+ _arm_addsubdf3 _arm_addsubsf3
+
+# Just for these, we omit the frame pointer since it makes such a big
+# difference.
+HOST_LIBGCC2_CFLAGS += -fomit-frame-pointer
diff --git a/libgcc/config/arm/t-linux-eabi b/libgcc/config/arm/t-linux-eabi
new file mode 100644
index 00000000000..a03e2b60064
--- /dev/null
+++ b/libgcc/config/arm/t-linux-eabi
@@ -0,0 +1,5 @@
+# Use a version of div0 which raises SIGFPE, and a special __clear_cache.
+LIB1ASMFUNCS := $(filter-out _dvmd_tls,$(LIB1ASMFUNCS)) _dvmd_lnx _clear_cache
+
+LIB2ADD_ST += $(srcdir)/config/arm/linux-atomic.c \
+ $(srcdir)/config/arm/linux-atomic-64bit.c
diff --git a/libgcc/config/arm/t-netbsd b/libgcc/config/arm/t-netbsd
new file mode 100644
index 00000000000..95358f931ba
--- /dev/null
+++ b/libgcc/config/arm/t-netbsd
@@ -0,0 +1,7 @@
+# Just for these, we omit the frame pointer since it makes such a big
+# difference. It is then pointless adding debugging.
+HOST_LIBGCC2_CFLAGS += -fomit-frame-pointer
+
+LIBGCC2_DEBUG_CFLAGS = -g0
+
+LIB2ADD += $(srcdir)/floatunsidf.c $(srcdir)/floatunsisf.c
diff --git a/libgcc/config/arm/t-strongarm-elf b/libgcc/config/arm/t-strongarm-elf
new file mode 100644
index 00000000000..45d1b993218
--- /dev/null
+++ b/libgcc/config/arm/t-strongarm-elf
@@ -0,0 +1,6 @@
+LIB1ASMFUNCS += _udivsi3 _divsi3 _umodsi3 _modsi3 _dvmd_tls _bb_init_func _clzsi2 _clzdi2
+
+# Currently there is a bug somewhere in GCC's alias analysis
+# or scheduling code that is breaking _fpmul_parts in fp-bit.c.
+# Disabling function inlining is a workaround for this problem.
+HOST_LIBGCC2_CFLAGS += -fno-inline
diff --git a/libgcc/config/arm/t-symbian b/libgcc/config/arm/t-symbian
index 6788d5f40b3..06d98faa6ae 100644
--- a/libgcc/config/arm/t-symbian
+++ b/libgcc/config/arm/t-symbian
@@ -1,2 +1,19 @@
+LIB1ASMFUNCS += _bb_init_func _call_via_rX _interwork_call_via_rX _clzsi2 _clzdi2
+
+# These functions have __aeabi equivalents and will never be called by GCC.
+# By putting them in LIB1ASMFUNCS, we avoid the standard libgcc2.c code being
+# used -- and we make sure that definitions are not available in lib1funcs.S,
+# either, so they end up undefined.
+LIB1ASMFUNCS += \
+ _ashldi3 _ashrdi3 _divdi3 _floatdidf _udivmoddi4 _umoddi3 \
+ _udivdi3 _lshrdi3 _moddi3 _muldi3 _negdi2 _cmpdi2 \
+ _fixdfdi _fixsfdi _fixunsdfdi _fixunssfdi _floatdisf \
+ _negdf2 _addsubdf3 _muldivdf3 _cmpdf2 _unorddf2 _fixdfsi _fixunsdfsi \
+ _truncdfsf2 _negsf2 _addsubsf3 _muldivsf3 _cmpsf2 _unordsf2 \
+ _fixsfsi _fixunssfsi
+
+# Include half-float helpers.
+LIB2ADD_ST += $(srcdir)/config/arm/fp16.c
+
# Include the gcc personality routine
LIB2ADDEH = $(srcdir)/unwind-c.c $(srcdir)/config/arm/pr-support.c
diff --git a/libgcc/config/arm/t-vxworks b/libgcc/config/arm/t-vxworks
new file mode 100644
index 00000000000..70ccdc1556a
--- /dev/null
+++ b/libgcc/config/arm/t-vxworks
@@ -0,0 +1 @@
+LIB1ASMFUNCS += _udivsi3 _divsi3 _umodsi3 _modsi3 _dvmd_tls _bb_init_func _call_via_rX _interwork_call_via_rX _clzsi2 _clzdi2
diff --git a/libgcc/config/arm/t-wince-pe b/libgcc/config/arm/t-wince-pe
new file mode 100644
index 00000000000..33ea969ccf4
--- /dev/null
+++ b/libgcc/config/arm/t-wince-pe
@@ -0,0 +1 @@
+LIB1ASMFUNCS += _udivsi3 _divsi3 _umodsi3 _modsi3 _dvmd_tls _call_via_rX _interwork_call_via_rX _clzsi2 _clzdi2
diff --git a/libgcc/config/arm/unaligned-funcs.c b/libgcc/config/arm/unaligned-funcs.c
new file mode 100644
index 00000000000..4e684f4fc94
--- /dev/null
+++ b/libgcc/config/arm/unaligned-funcs.c
@@ -0,0 +1,57 @@
+/* EABI unaligned read/write functions.
+
+ Copyright (C) 2005, 2009 Free Software Foundation, Inc.
+ Contributed by CodeSourcery, LLC.
+
+ This file is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by the
+ Free Software Foundation; either version 3, or (at your option) any
+ later version.
+
+ This file is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+int __aeabi_uread4 (void *);
+int __aeabi_uwrite4 (int, void *);
+long long __aeabi_uread8 (void *);
+long long __aeabi_uwrite8 (long long, void *);
+
+struct __attribute__((packed)) u4 { int data; };
+struct __attribute__((packed)) u8 { long long data; };
+
+int
+__aeabi_uread4 (void *ptr)
+{
+ return ((struct u4 *) ptr)->data;
+}
+
+int
+__aeabi_uwrite4 (int data, void *ptr)
+{
+ ((struct u4 *) ptr)->data = data;
+ return data;
+}
+
+long long
+__aeabi_uread8 (void *ptr)
+{
+ return ((struct u8 *) ptr)->data;
+}
+
+long long
+__aeabi_uwrite8 (long long data, void *ptr)
+{
+ ((struct u8 *) ptr)->data = data;
+ return data;
+}
diff --git a/libgcc/config/avr/lib1funcs.S b/libgcc/config/avr/lib1funcs.S
new file mode 100644
index 00000000000..f7a8f6335c4
--- /dev/null
+++ b/libgcc/config/avr/lib1funcs.S
@@ -0,0 +1,1674 @@
+/* -*- Mode: Asm -*- */
+/* Copyright (C) 1998, 1999, 2000, 2007, 2008, 2009
+ Free Software Foundation, Inc.
+ Contributed by Denis Chertykov <chertykov@gmail.com>
+
+This file is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 3, or (at your option) any
+later version.
+
+This file is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+#define __zero_reg__ r1
+#define __tmp_reg__ r0
+#define __SREG__ 0x3f
+#define __SP_H__ 0x3e
+#define __SP_L__ 0x3d
+#define __RAMPZ__ 0x3B
+#define __EIND__ 0x3C
+
+/* Most of the functions here are called directly from avr.md
+ patterns, instead of using the standard libcall mechanisms.
+ This can make better code because GCC knows exactly which
+ of the call-used registers (not all of them) are clobbered. */
+
+/* FIXME: At present, there is no SORT directive in the linker
+ script so that we must not assume that different modules
+ in the same input section like .libgcc.text.mul will be
+ located close together. Therefore, we cannot use
+ RCALL/RJMP to call a function like __udivmodhi4 from
+ __divmodhi4 and have to use lengthy XCALL/XJMP even
+ though they are in the same input section and all same
+ input sections together are small enough to reach every
+ location with a RCALL/RJMP instruction. */
+
+ .macro mov_l r_dest, r_src
+#if defined (__AVR_HAVE_MOVW__)
+ movw \r_dest, \r_src
+#else
+ mov \r_dest, \r_src
+#endif
+ .endm
+
+ .macro mov_h r_dest, r_src
+#if defined (__AVR_HAVE_MOVW__)
+ ; empty
+#else
+ mov \r_dest, \r_src
+#endif
+ .endm
+
+#if defined (__AVR_HAVE_JMP_CALL__)
+#define XCALL call
+#define XJMP jmp
+#else
+#define XCALL rcall
+#define XJMP rjmp
+#endif
+
+.macro DEFUN name
+.global \name
+.func \name
+\name:
+.endm
+
+.macro ENDF name
+.size \name, .-\name
+.endfunc
+.endm
+
+
+.section .text.libgcc.mul, "ax", @progbits
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+/* Note: mulqi3, mulhi3 are open-coded on the enhanced core. */
+#if !defined (__AVR_HAVE_MUL__)
+/*******************************************************
+ Multiplication 8 x 8 without MUL
+*******************************************************/
+#if defined (L_mulqi3)
+
+#define r_arg2 r22 /* multiplicand */
+#define r_arg1 r24 /* multiplier */
+#define r_res __tmp_reg__ /* result */
+
+DEFUN __mulqi3
+ clr r_res ; clear result
+__mulqi3_loop:
+ sbrc r_arg1,0
+ add r_res,r_arg2
+ add r_arg2,r_arg2 ; shift multiplicand
+ breq __mulqi3_exit ; while multiplicand != 0
+ lsr r_arg1 ;
+ brne __mulqi3_loop ; exit if multiplier = 0
+__mulqi3_exit:
+ mov r_arg1,r_res ; result to return register
+ ret
+ENDF __mulqi3
+
+#undef r_arg2
+#undef r_arg1
+#undef r_res
+
+#endif /* defined (L_mulqi3) */
+
+#if defined (L_mulqihi3)
+DEFUN __mulqihi3
+ clr r25
+ sbrc r24, 7
+ dec r25
+ clr r23
+ sbrc r22, 7
+ dec r22
+ XJMP __mulhi3
+ENDF __mulqihi3:
+#endif /* defined (L_mulqihi3) */
+
+#if defined (L_umulqihi3)
+DEFUN __umulqihi3
+ clr r25
+ clr r23
+ XJMP __mulhi3
+ENDF __umulqihi3
+#endif /* defined (L_umulqihi3) */
+
+/*******************************************************
+ Multiplication 16 x 16 without MUL
+*******************************************************/
+#if defined (L_mulhi3)
+#define r_arg1L r24 /* multiplier Low */
+#define r_arg1H r25 /* multiplier High */
+#define r_arg2L r22 /* multiplicand Low */
+#define r_arg2H r23 /* multiplicand High */
+#define r_resL __tmp_reg__ /* result Low */
+#define r_resH r21 /* result High */
+
+DEFUN __mulhi3
+ clr r_resH ; clear result
+ clr r_resL ; clear result
+__mulhi3_loop:
+ sbrs r_arg1L,0
+ rjmp __mulhi3_skip1
+ add r_resL,r_arg2L ; result + multiplicand
+ adc r_resH,r_arg2H
+__mulhi3_skip1:
+ add r_arg2L,r_arg2L ; shift multiplicand
+ adc r_arg2H,r_arg2H
+
+ cp r_arg2L,__zero_reg__
+ cpc r_arg2H,__zero_reg__
+ breq __mulhi3_exit ; while multiplicand != 0
+
+ lsr r_arg1H ; gets LSB of multiplier
+ ror r_arg1L
+ sbiw r_arg1L,0
+ brne __mulhi3_loop ; exit if multiplier = 0
+__mulhi3_exit:
+ mov r_arg1H,r_resH ; result to return register
+ mov r_arg1L,r_resL
+ ret
+ENDF __mulhi3
+
+#undef r_arg1L
+#undef r_arg1H
+#undef r_arg2L
+#undef r_arg2H
+#undef r_resL
+#undef r_resH
+
+#endif /* defined (L_mulhi3) */
+
+/*******************************************************
+ Widening Multiplication 32 = 16 x 16 without MUL
+*******************************************************/
+
+#if defined (L_mulhisi3)
+DEFUN __mulhisi3
+;;; FIXME: This is dead code (noone calls it)
+ mov_l r18, r24
+ mov_h r19, r25
+ clr r24
+ sbrc r23, 7
+ dec r24
+ mov r25, r24
+ clr r20
+ sbrc r19, 7
+ dec r20
+ mov r21, r20
+ XJMP __mulsi3
+ENDF __mulhisi3
+#endif /* defined (L_mulhisi3) */
+
+#if defined (L_umulhisi3)
+DEFUN __umulhisi3
+;;; FIXME: This is dead code (noone calls it)
+ mov_l r18, r24
+ mov_h r19, r25
+ clr r24
+ clr r25
+ mov_l r20, r24
+ mov_h r21, r25
+ XJMP __mulsi3
+ENDF __umulhisi3
+#endif /* defined (L_umulhisi3) */
+
+#if defined (L_mulsi3)
+/*******************************************************
+ Multiplication 32 x 32 without MUL
+*******************************************************/
+#define r_arg1L r22 /* multiplier Low */
+#define r_arg1H r23
+#define r_arg1HL r24
+#define r_arg1HH r25 /* multiplier High */
+
+#define r_arg2L r18 /* multiplicand Low */
+#define r_arg2H r19
+#define r_arg2HL r20
+#define r_arg2HH r21 /* multiplicand High */
+
+#define r_resL r26 /* result Low */
+#define r_resH r27
+#define r_resHL r30
+#define r_resHH r31 /* result High */
+
+DEFUN __mulsi3
+ clr r_resHH ; clear result
+ clr r_resHL ; clear result
+ clr r_resH ; clear result
+ clr r_resL ; clear result
+__mulsi3_loop:
+ sbrs r_arg1L,0
+ rjmp __mulsi3_skip1
+ add r_resL,r_arg2L ; result + multiplicand
+ adc r_resH,r_arg2H
+ adc r_resHL,r_arg2HL
+ adc r_resHH,r_arg2HH
+__mulsi3_skip1:
+ add r_arg2L,r_arg2L ; shift multiplicand
+ adc r_arg2H,r_arg2H
+ adc r_arg2HL,r_arg2HL
+ adc r_arg2HH,r_arg2HH
+
+ lsr r_arg1HH ; gets LSB of multiplier
+ ror r_arg1HL
+ ror r_arg1H
+ ror r_arg1L
+ brne __mulsi3_loop
+ sbiw r_arg1HL,0
+ cpc r_arg1H,r_arg1L
+ brne __mulsi3_loop ; exit if multiplier = 0
+__mulsi3_exit:
+ mov_h r_arg1HH,r_resHH ; result to return register
+ mov_l r_arg1HL,r_resHL
+ mov_h r_arg1H,r_resH
+ mov_l r_arg1L,r_resL
+ ret
+ENDF __mulsi3
+
+#undef r_arg1L
+#undef r_arg1H
+#undef r_arg1HL
+#undef r_arg1HH
+
+#undef r_arg2L
+#undef r_arg2H
+#undef r_arg2HL
+#undef r_arg2HH
+
+#undef r_resL
+#undef r_resH
+#undef r_resHL
+#undef r_resHH
+
+#endif /* defined (L_mulsi3) */
+
+#endif /* !defined (__AVR_HAVE_MUL__) */
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+#if defined (__AVR_HAVE_MUL__)
+#define A0 26
+#define B0 18
+#define C0 22
+
+#define A1 A0+1
+
+#define B1 B0+1
+#define B2 B0+2
+#define B3 B0+3
+
+#define C1 C0+1
+#define C2 C0+2
+#define C3 C0+3
+
+/*******************************************************
+ Widening Multiplication 32 = 16 x 16
+*******************************************************/
+
+#if defined (L_mulhisi3)
+;;; R25:R22 = (signed long) R27:R26 * (signed long) R19:R18
+;;; C3:C0 = (signed long) A1:A0 * (signed long) B1:B0
+;;; Clobbers: __tmp_reg__
+DEFUN __mulhisi3
+ XCALL __umulhisi3
+ ;; Sign-extend B
+ tst B1
+ brpl 1f
+ sub C2, A0
+ sbc C3, A1
+1: ;; Sign-extend A
+ XJMP __usmulhisi3_tail
+ENDF __mulhisi3
+#endif /* L_mulhisi3 */
+
+#if defined (L_usmulhisi3)
+;;; R25:R22 = (signed long) R27:R26 * (unsigned long) R19:R18
+;;; C3:C0 = (signed long) A1:A0 * (unsigned long) B1:B0
+;;; Clobbers: __tmp_reg__
+DEFUN __usmulhisi3
+ XCALL __umulhisi3
+ ;; FALLTHRU
+ENDF __usmulhisi3
+
+DEFUN __usmulhisi3_tail
+ ;; Sign-extend A
+ sbrs A1, 7
+ ret
+ sub C2, B0
+ sbc C3, B1
+ ret
+ENDF __usmulhisi3_tail
+#endif /* L_usmulhisi3 */
+
+#if defined (L_umulhisi3)
+;;; R25:R22 = (unsigned long) R27:R26 * (unsigned long) R19:R18
+;;; C3:C0 = (unsigned long) A1:A0 * (unsigned long) B1:B0
+;;; Clobbers: __tmp_reg__
+DEFUN __umulhisi3
+ mul A0, B0
+ movw C0, r0
+ mul A1, B1
+ movw C2, r0
+ mul A0, B1
+ rcall 1f
+ mul A1, B0
+1: add C1, r0
+ adc C2, r1
+ clr __zero_reg__
+ adc C3, __zero_reg__
+ ret
+ENDF __umulhisi3
+#endif /* L_umulhisi3 */
+
+/*******************************************************
+ Widening Multiplication 32 = 16 x 32
+*******************************************************/
+
+#if defined (L_mulshisi3)
+;;; R25:R22 = (signed long) R27:R26 * R21:R18
+;;; (C3:C0) = (signed long) A1:A0 * B3:B0
+;;; Clobbers: __tmp_reg__
+DEFUN __mulshisi3
+#ifdef __AVR_ERRATA_SKIP_JMP_CALL__
+ ;; Some cores have problem skipping 2-word instruction
+ tst A1
+ brmi __mulohisi3
+#else
+ sbrs A1, 7
+#endif /* __AVR_HAVE_JMP_CALL__ */
+ XJMP __muluhisi3
+ ;; FALLTHRU
+ENDF __mulshisi3
+
+;;; R25:R22 = (one-extended long) R27:R26 * R21:R18
+;;; (C3:C0) = (one-extended long) A1:A0 * B3:B0
+;;; Clobbers: __tmp_reg__
+DEFUN __mulohisi3
+ XCALL __muluhisi3
+ ;; One-extend R27:R26 (A1:A0)
+ sub C2, B0
+ sbc C3, B1
+ ret
+ENDF __mulohisi3
+#endif /* L_mulshisi3 */
+
+#if defined (L_muluhisi3)
+;;; R25:R22 = (unsigned long) R27:R26 * R21:R18
+;;; (C3:C0) = (unsigned long) A1:A0 * B3:B0
+;;; Clobbers: __tmp_reg__
+DEFUN __muluhisi3
+ XCALL __umulhisi3
+ mul A0, B3
+ add C3, r0
+ mul A1, B2
+ add C3, r0
+ mul A0, B2
+ add C2, r0
+ adc C3, r1
+ clr __zero_reg__
+ ret
+ENDF __muluhisi3
+#endif /* L_muluhisi3 */
+
+/*******************************************************
+ Multiplication 32 x 32
+*******************************************************/
+
+#if defined (L_mulsi3)
+;;; R25:R22 = R25:R22 * R21:R18
+;;; (C3:C0) = C3:C0 * B3:B0
+;;; Clobbers: R26, R27, __tmp_reg__
+DEFUN __mulsi3
+ movw A0, C0
+ push C2
+ push C3
+ XCALL __muluhisi3
+ pop A1
+ pop A0
+ ;; A1:A0 now contains the high word of A
+ mul A0, B0
+ add C2, r0
+ adc C3, r1
+ mul A0, B1
+ add C3, r0
+ mul A1, B0
+ add C3, r0
+ clr __zero_reg__
+ ret
+ENDF __mulsi3
+#endif /* L_mulsi3 */
+
+#undef A0
+#undef A1
+
+#undef B0
+#undef B1
+#undef B2
+#undef B3
+
+#undef C0
+#undef C1
+#undef C2
+#undef C3
+
+#endif /* __AVR_HAVE_MUL__ */
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+
+.section .text.libgcc.div, "ax", @progbits
+
+/*******************************************************
+ Division 8 / 8 => (result + remainder)
+*******************************************************/
+#define r_rem r25 /* remainder */
+#define r_arg1 r24 /* dividend, quotient */
+#define r_arg2 r22 /* divisor */
+#define r_cnt r23 /* loop count */
+
+#if defined (L_udivmodqi4)
+DEFUN __udivmodqi4
+ sub r_rem,r_rem ; clear remainder and carry
+ ldi r_cnt,9 ; init loop counter
+ rjmp __udivmodqi4_ep ; jump to entry point
+__udivmodqi4_loop:
+ rol r_rem ; shift dividend into remainder
+ cp r_rem,r_arg2 ; compare remainder & divisor
+ brcs __udivmodqi4_ep ; remainder <= divisor
+ sub r_rem,r_arg2 ; restore remainder
+__udivmodqi4_ep:
+ rol r_arg1 ; shift dividend (with CARRY)
+ dec r_cnt ; decrement loop counter
+ brne __udivmodqi4_loop
+ com r_arg1 ; complement result
+ ; because C flag was complemented in loop
+ ret
+ENDF __udivmodqi4
+#endif /* defined (L_udivmodqi4) */
+
+#if defined (L_divmodqi4)
+DEFUN __divmodqi4
+ bst r_arg1,7 ; store sign of dividend
+ mov __tmp_reg__,r_arg1
+ eor __tmp_reg__,r_arg2; r0.7 is sign of result
+ sbrc r_arg1,7
+ neg r_arg1 ; dividend negative : negate
+ sbrc r_arg2,7
+ neg r_arg2 ; divisor negative : negate
+ XCALL __udivmodqi4 ; do the unsigned div/mod
+ brtc __divmodqi4_1
+ neg r_rem ; correct remainder sign
+__divmodqi4_1:
+ sbrc __tmp_reg__,7
+ neg r_arg1 ; correct result sign
+__divmodqi4_exit:
+ ret
+ENDF __divmodqi4
+#endif /* defined (L_divmodqi4) */
+
+#undef r_rem
+#undef r_arg1
+#undef r_arg2
+#undef r_cnt
+
+
+/*******************************************************
+ Division 16 / 16 => (result + remainder)
+*******************************************************/
+#define r_remL r26 /* remainder Low */
+#define r_remH r27 /* remainder High */
+
+/* return: remainder */
+#define r_arg1L r24 /* dividend Low */
+#define r_arg1H r25 /* dividend High */
+
+/* return: quotient */
+#define r_arg2L r22 /* divisor Low */
+#define r_arg2H r23 /* divisor High */
+
+#define r_cnt r21 /* loop count */
+
+#if defined (L_udivmodhi4)
+DEFUN __udivmodhi4
+ sub r_remL,r_remL
+ sub r_remH,r_remH ; clear remainder and carry
+ ldi r_cnt,17 ; init loop counter
+ rjmp __udivmodhi4_ep ; jump to entry point
+__udivmodhi4_loop:
+ rol r_remL ; shift dividend into remainder
+ rol r_remH
+ cp r_remL,r_arg2L ; compare remainder & divisor
+ cpc r_remH,r_arg2H
+ brcs __udivmodhi4_ep ; remainder < divisor
+ sub r_remL,r_arg2L ; restore remainder
+ sbc r_remH,r_arg2H
+__udivmodhi4_ep:
+ rol r_arg1L ; shift dividend (with CARRY)
+ rol r_arg1H
+ dec r_cnt ; decrement loop counter
+ brne __udivmodhi4_loop
+ com r_arg1L
+ com r_arg1H
+; div/mod results to return registers, as for the div() function
+ mov_l r_arg2L, r_arg1L ; quotient
+ mov_h r_arg2H, r_arg1H
+ mov_l r_arg1L, r_remL ; remainder
+ mov_h r_arg1H, r_remH
+ ret
+ENDF __udivmodhi4
+#endif /* defined (L_udivmodhi4) */
+
+#if defined (L_divmodhi4)
+DEFUN __divmodhi4
+ .global _div
+_div:
+ bst r_arg1H,7 ; store sign of dividend
+ mov __tmp_reg__,r_arg2H
+ brtc 0f
+ com __tmp_reg__ ; r0.7 is sign of result
+ rcall __divmodhi4_neg1 ; dividend negative: negate
+0:
+ sbrc r_arg2H,7
+ rcall __divmodhi4_neg2 ; divisor negative: negate
+ XCALL __udivmodhi4 ; do the unsigned div/mod
+ sbrc __tmp_reg__,7
+ rcall __divmodhi4_neg2 ; correct remainder sign
+ brtc __divmodhi4_exit
+__divmodhi4_neg1:
+ ;; correct dividend/remainder sign
+ com r_arg1H
+ neg r_arg1L
+ sbci r_arg1H,0xff
+ ret
+__divmodhi4_neg2:
+ ;; correct divisor/result sign
+ com r_arg2H
+ neg r_arg2L
+ sbci r_arg2H,0xff
+__divmodhi4_exit:
+ ret
+ENDF __divmodhi4
+#endif /* defined (L_divmodhi4) */
+
+#undef r_remH
+#undef r_remL
+
+#undef r_arg1H
+#undef r_arg1L
+
+#undef r_arg2H
+#undef r_arg2L
+
+#undef r_cnt
+
+/*******************************************************
+ Division 24 / 24 => (result + remainder)
+*******************************************************/
+
+;; A[0..2]: In: Dividend; Out: Quotient
+#define A0 22
+#define A1 A0+1
+#define A2 A0+2
+
+;; B[0..2]: In: Divisor; Out: Remainder
+#define B0 18
+#define B1 B0+1
+#define B2 B0+2
+
+;; C[0..2]: Expand remainder
+#define C0 __zero_reg__
+#define C1 26
+#define C2 25
+
+;; Loop counter
+#define r_cnt 21
+
+#if defined (L_udivmodpsi4)
+;; R24:R22 = R24:R22 udiv R20:R18
+;; R20:R18 = R24:R22 umod R20:R18
+;; Clobbers: R21, R25, R26
+
+DEFUN __udivmodpsi4
+ ; init loop counter
+ ldi r_cnt, 24+1
+ ; Clear remainder and carry. C0 is already 0
+ clr C1
+ sub C2, C2
+ ; jump to entry point
+ rjmp __udivmodpsi4_start
+__udivmodpsi4_loop:
+ ; shift dividend into remainder
+ rol C0
+ rol C1
+ rol C2
+ ; compare remainder & divisor
+ cp C0, B0
+ cpc C1, B1
+ cpc C2, B2
+ brcs __udivmodpsi4_start ; remainder <= divisor
+ sub C0, B0 ; restore remainder
+ sbc C1, B1
+ sbc C2, B2
+__udivmodpsi4_start:
+ ; shift dividend (with CARRY)
+ rol A0
+ rol A1
+ rol A2
+ ; decrement loop counter
+ dec r_cnt
+ brne __udivmodpsi4_loop
+ com A0
+ com A1
+ com A2
+ ; div/mod results to return registers
+ ; remainder
+ mov B0, C0
+ mov B1, C1
+ mov B2, C2
+ clr __zero_reg__ ; C0
+ ret
+ENDF __udivmodpsi4
+#endif /* defined (L_udivmodpsi4) */
+
+#if defined (L_divmodpsi4)
+;; R24:R22 = R24:R22 div R20:R18
+;; R20:R18 = R24:R22 mod R20:R18
+;; Clobbers: T, __tmp_reg__, R21, R25, R26
+
+DEFUN __divmodpsi4
+ ; R0.7 will contain the sign of the result:
+ ; R0.7 = A.sign ^ B.sign
+ mov __tmp_reg__, B2
+ ; T-flag = sign of dividend
+ bst A2, 7
+ brtc 0f
+ com __tmp_reg__
+ ; Adjust dividend's sign
+ rcall __divmodpsi4_negA
+0:
+ ; Adjust divisor's sign
+ sbrc B2, 7
+ rcall __divmodpsi4_negB
+
+ ; Do the unsigned div/mod
+ XCALL __udivmodpsi4
+
+ ; Adjust quotient's sign
+ sbrc __tmp_reg__, 7
+ rcall __divmodpsi4_negA
+
+ ; Adjust remainder's sign
+ brtc __divmodpsi4_end
+
+__divmodpsi4_negB:
+ ; Correct divisor/remainder sign
+ com B2
+ com B1
+ neg B0
+ sbci B1, -1
+ sbci B2, -1
+ ret
+
+ ; Correct dividend/quotient sign
+__divmodpsi4_negA:
+ com A2
+ com A1
+ neg A0
+ sbci A1, -1
+ sbci A2, -1
+__divmodpsi4_end:
+ ret
+
+ENDF __divmodpsi4
+#endif /* defined (L_divmodpsi4) */
+
+#undef A0
+#undef A1
+#undef A2
+
+#undef B0
+#undef B1
+#undef B2
+
+#undef C0
+#undef C1
+#undef C2
+
+#undef r_cnt
+
+/*******************************************************
+ Division 32 / 32 => (result + remainder)
+*******************************************************/
+#define r_remHH r31 /* remainder High */
+#define r_remHL r30
+#define r_remH r27
+#define r_remL r26 /* remainder Low */
+
+/* return: remainder */
+#define r_arg1HH r25 /* dividend High */
+#define r_arg1HL r24
+#define r_arg1H r23
+#define r_arg1L r22 /* dividend Low */
+
+/* return: quotient */
+#define r_arg2HH r21 /* divisor High */
+#define r_arg2HL r20
+#define r_arg2H r19
+#define r_arg2L r18 /* divisor Low */
+
+#define r_cnt __zero_reg__ /* loop count (0 after the loop!) */
+
+#if defined (L_udivmodsi4)
+DEFUN __udivmodsi4
+ ldi r_remL, 33 ; init loop counter
+ mov r_cnt, r_remL
+ sub r_remL,r_remL
+ sub r_remH,r_remH ; clear remainder and carry
+ mov_l r_remHL, r_remL
+ mov_h r_remHH, r_remH
+ rjmp __udivmodsi4_ep ; jump to entry point
+__udivmodsi4_loop:
+ rol r_remL ; shift dividend into remainder
+ rol r_remH
+ rol r_remHL
+ rol r_remHH
+ cp r_remL,r_arg2L ; compare remainder & divisor
+ cpc r_remH,r_arg2H
+ cpc r_remHL,r_arg2HL
+ cpc r_remHH,r_arg2HH
+ brcs __udivmodsi4_ep ; remainder <= divisor
+ sub r_remL,r_arg2L ; restore remainder
+ sbc r_remH,r_arg2H
+ sbc r_remHL,r_arg2HL
+ sbc r_remHH,r_arg2HH
+__udivmodsi4_ep:
+ rol r_arg1L ; shift dividend (with CARRY)
+ rol r_arg1H
+ rol r_arg1HL
+ rol r_arg1HH
+ dec r_cnt ; decrement loop counter
+ brne __udivmodsi4_loop
+ ; __zero_reg__ now restored (r_cnt == 0)
+ com r_arg1L
+ com r_arg1H
+ com r_arg1HL
+ com r_arg1HH
+; div/mod results to return registers, as for the ldiv() function
+ mov_l r_arg2L, r_arg1L ; quotient
+ mov_h r_arg2H, r_arg1H
+ mov_l r_arg2HL, r_arg1HL
+ mov_h r_arg2HH, r_arg1HH
+ mov_l r_arg1L, r_remL ; remainder
+ mov_h r_arg1H, r_remH
+ mov_l r_arg1HL, r_remHL
+ mov_h r_arg1HH, r_remHH
+ ret
+ENDF __udivmodsi4
+#endif /* defined (L_udivmodsi4) */
+
+#if defined (L_divmodsi4)
+DEFUN __divmodsi4
+ mov __tmp_reg__,r_arg2HH
+ bst r_arg1HH,7 ; store sign of dividend
+ brtc 0f
+ com __tmp_reg__ ; r0.7 is sign of result
+ rcall __divmodsi4_neg1 ; dividend negative: negate
+0:
+ sbrc r_arg2HH,7
+ rcall __divmodsi4_neg2 ; divisor negative: negate
+ XCALL __udivmodsi4 ; do the unsigned div/mod
+ sbrc __tmp_reg__, 7 ; correct quotient sign
+ rcall __divmodsi4_neg2
+ brtc __divmodsi4_exit ; correct remainder sign
+__divmodsi4_neg1:
+ ;; correct dividend/remainder sign
+ com r_arg1HH
+ com r_arg1HL
+ com r_arg1H
+ neg r_arg1L
+ sbci r_arg1H, 0xff
+ sbci r_arg1HL,0xff
+ sbci r_arg1HH,0xff
+ ret
+__divmodsi4_neg2:
+ ;; correct divisor/quotient sign
+ com r_arg2HH
+ com r_arg2HL
+ com r_arg2H
+ neg r_arg2L
+ sbci r_arg2H,0xff
+ sbci r_arg2HL,0xff
+ sbci r_arg2HH,0xff
+__divmodsi4_exit:
+ ret
+ENDF __divmodsi4
+#endif /* defined (L_divmodsi4) */
+
+
+.section .text.libgcc.prologue, "ax", @progbits
+
+/**********************************
+ * This is a prologue subroutine
+ **********************************/
+#if defined (L_prologue)
+
+DEFUN __prologue_saves__
+ push r2
+ push r3
+ push r4
+ push r5
+ push r6
+ push r7
+ push r8
+ push r9
+ push r10
+ push r11
+ push r12
+ push r13
+ push r14
+ push r15
+ push r16
+ push r17
+ push r28
+ push r29
+ in r28,__SP_L__
+ in r29,__SP_H__
+ sub r28,r26
+ sbc r29,r27
+ in __tmp_reg__,__SREG__
+ cli
+ out __SP_H__,r29
+ out __SREG__,__tmp_reg__
+ out __SP_L__,r28
+#if defined (__AVR_HAVE_EIJMP_EICALL__)
+ eijmp
+#else
+ ijmp
+#endif
+
+ENDF __prologue_saves__
+#endif /* defined (L_prologue) */
+
+/*
+ * This is an epilogue subroutine
+ */
+#if defined (L_epilogue)
+
+DEFUN __epilogue_restores__
+ ldd r2,Y+18
+ ldd r3,Y+17
+ ldd r4,Y+16
+ ldd r5,Y+15
+ ldd r6,Y+14
+ ldd r7,Y+13
+ ldd r8,Y+12
+ ldd r9,Y+11
+ ldd r10,Y+10
+ ldd r11,Y+9
+ ldd r12,Y+8
+ ldd r13,Y+7
+ ldd r14,Y+6
+ ldd r15,Y+5
+ ldd r16,Y+4
+ ldd r17,Y+3
+ ldd r26,Y+2
+ ldd r27,Y+1
+ add r28,r30
+ adc r29,__zero_reg__
+ in __tmp_reg__,__SREG__
+ cli
+ out __SP_H__,r29
+ out __SREG__,__tmp_reg__
+ out __SP_L__,r28
+ mov_l r28, r26
+ mov_h r29, r27
+ ret
+ENDF __epilogue_restores__
+#endif /* defined (L_epilogue) */
+
+#ifdef L_exit
+ .section .fini9,"ax",@progbits
+DEFUN _exit
+ .weak exit
+exit:
+ENDF _exit
+
+ /* Code from .fini8 ... .fini1 sections inserted by ld script. */
+
+ .section .fini0,"ax",@progbits
+ cli
+__stop_program:
+ rjmp __stop_program
+#endif /* defined (L_exit) */
+
+#ifdef L_cleanup
+ .weak _cleanup
+ .func _cleanup
+_cleanup:
+ ret
+.endfunc
+#endif /* defined (L_cleanup) */
+
+
+.section .text.libgcc, "ax", @progbits
+
+#ifdef L_tablejump
+DEFUN __tablejump2__
+ lsl r30
+ rol r31
+ ;; FALLTHRU
+ENDF __tablejump2__
+
+DEFUN __tablejump__
+#if defined (__AVR_HAVE_LPMX__)
+ lpm __tmp_reg__, Z+
+ lpm r31, Z
+ mov r30, __tmp_reg__
+#if defined (__AVR_HAVE_EIJMP_EICALL__)
+ eijmp
+#else
+ ijmp
+#endif
+
+#else /* !HAVE_LPMX */
+ lpm
+ adiw r30, 1
+ push r0
+ lpm
+ push r0
+#if defined (__AVR_HAVE_EIJMP_EICALL__)
+ in __tmp_reg__, __EIND__
+ push __tmp_reg__
+#endif
+ ret
+#endif /* !HAVE_LPMX */
+ENDF __tablejump__
+#endif /* defined (L_tablejump) */
+
+#ifdef L_copy_data
+ .section .init4,"ax",@progbits
+DEFUN __do_copy_data
+#if defined(__AVR_HAVE_ELPMX__)
+ ldi r17, hi8(__data_end)
+ ldi r26, lo8(__data_start)
+ ldi r27, hi8(__data_start)
+ ldi r30, lo8(__data_load_start)
+ ldi r31, hi8(__data_load_start)
+ ldi r16, hh8(__data_load_start)
+ out __RAMPZ__, r16
+ rjmp .L__do_copy_data_start
+.L__do_copy_data_loop:
+ elpm r0, Z+
+ st X+, r0
+.L__do_copy_data_start:
+ cpi r26, lo8(__data_end)
+ cpc r27, r17
+ brne .L__do_copy_data_loop
+#elif !defined(__AVR_HAVE_ELPMX__) && defined(__AVR_HAVE_ELPM__)
+ ldi r17, hi8(__data_end)
+ ldi r26, lo8(__data_start)
+ ldi r27, hi8(__data_start)
+ ldi r30, lo8(__data_load_start)
+ ldi r31, hi8(__data_load_start)
+ ldi r16, hh8(__data_load_start - 0x10000)
+.L__do_copy_data_carry:
+ inc r16
+ out __RAMPZ__, r16
+ rjmp .L__do_copy_data_start
+.L__do_copy_data_loop:
+ elpm
+ st X+, r0
+ adiw r30, 1
+ brcs .L__do_copy_data_carry
+.L__do_copy_data_start:
+ cpi r26, lo8(__data_end)
+ cpc r27, r17
+ brne .L__do_copy_data_loop
+#elif !defined(__AVR_HAVE_ELPMX__) && !defined(__AVR_HAVE_ELPM__)
+ ldi r17, hi8(__data_end)
+ ldi r26, lo8(__data_start)
+ ldi r27, hi8(__data_start)
+ ldi r30, lo8(__data_load_start)
+ ldi r31, hi8(__data_load_start)
+ rjmp .L__do_copy_data_start
+.L__do_copy_data_loop:
+#if defined (__AVR_HAVE_LPMX__)
+ lpm r0, Z+
+#else
+ lpm
+ adiw r30, 1
+#endif
+ st X+, r0
+.L__do_copy_data_start:
+ cpi r26, lo8(__data_end)
+ cpc r27, r17
+ brne .L__do_copy_data_loop
+#endif /* !defined(__AVR_HAVE_ELPMX__) && !defined(__AVR_HAVE_ELPM__) */
+ENDF __do_copy_data
+#endif /* L_copy_data */
+
+/* __do_clear_bss is only necessary if there is anything in .bss section. */
+
+#ifdef L_clear_bss
+ .section .init4,"ax",@progbits
+DEFUN __do_clear_bss
+ ldi r17, hi8(__bss_end)
+ ldi r26, lo8(__bss_start)
+ ldi r27, hi8(__bss_start)
+ rjmp .do_clear_bss_start
+.do_clear_bss_loop:
+ st X+, __zero_reg__
+.do_clear_bss_start:
+ cpi r26, lo8(__bss_end)
+ cpc r27, r17
+ brne .do_clear_bss_loop
+ENDF __do_clear_bss
+#endif /* L_clear_bss */
+
+/* __do_global_ctors and __do_global_dtors are only necessary
+ if there are any constructors/destructors. */
+
+#ifdef L_ctors
+ .section .init6,"ax",@progbits
+DEFUN __do_global_ctors
+#if defined(__AVR_HAVE_RAMPZ__)
+ ldi r17, hi8(__ctors_start)
+ ldi r28, lo8(__ctors_end)
+ ldi r29, hi8(__ctors_end)
+ ldi r16, hh8(__ctors_end)
+ rjmp .L__do_global_ctors_start
+.L__do_global_ctors_loop:
+ sbiw r28, 2
+ sbc r16, __zero_reg__
+ mov_h r31, r29
+ mov_l r30, r28
+ out __RAMPZ__, r16
+ XCALL __tablejump_elpm__
+.L__do_global_ctors_start:
+ cpi r28, lo8(__ctors_start)
+ cpc r29, r17
+ ldi r24, hh8(__ctors_start)
+ cpc r16, r24
+ brne .L__do_global_ctors_loop
+#else
+ ldi r17, hi8(__ctors_start)
+ ldi r28, lo8(__ctors_end)
+ ldi r29, hi8(__ctors_end)
+ rjmp .L__do_global_ctors_start
+.L__do_global_ctors_loop:
+ sbiw r28, 2
+ mov_h r31, r29
+ mov_l r30, r28
+ XCALL __tablejump__
+.L__do_global_ctors_start:
+ cpi r28, lo8(__ctors_start)
+ cpc r29, r17
+ brne .L__do_global_ctors_loop
+#endif /* defined(__AVR_HAVE_RAMPZ__) */
+ENDF __do_global_ctors
+#endif /* L_ctors */
+
+#ifdef L_dtors
+ .section .fini6,"ax",@progbits
+DEFUN __do_global_dtors
+#if defined(__AVR_HAVE_RAMPZ__)
+ ldi r17, hi8(__dtors_end)
+ ldi r28, lo8(__dtors_start)
+ ldi r29, hi8(__dtors_start)
+ ldi r16, hh8(__dtors_start)
+ rjmp .L__do_global_dtors_start
+.L__do_global_dtors_loop:
+ sbiw r28, 2
+ sbc r16, __zero_reg__
+ mov_h r31, r29
+ mov_l r30, r28
+ out __RAMPZ__, r16
+ XCALL __tablejump_elpm__
+.L__do_global_dtors_start:
+ cpi r28, lo8(__dtors_end)
+ cpc r29, r17
+ ldi r24, hh8(__dtors_end)
+ cpc r16, r24
+ brne .L__do_global_dtors_loop
+#else
+ ldi r17, hi8(__dtors_end)
+ ldi r28, lo8(__dtors_start)
+ ldi r29, hi8(__dtors_start)
+ rjmp .L__do_global_dtors_start
+.L__do_global_dtors_loop:
+ mov_h r31, r29
+ mov_l r30, r28
+ XCALL __tablejump__
+ adiw r28, 2
+.L__do_global_dtors_start:
+ cpi r28, lo8(__dtors_end)
+ cpc r29, r17
+ brne .L__do_global_dtors_loop
+#endif /* defined(__AVR_HAVE_RAMPZ__) */
+ENDF __do_global_dtors
+#endif /* L_dtors */
+
+.section .text.libgcc, "ax", @progbits
+
+#ifdef L_tablejump_elpm
+DEFUN __tablejump_elpm__
+#if defined (__AVR_HAVE_ELPM__)
+#if defined (__AVR_HAVE_LPMX__)
+ elpm __tmp_reg__, Z+
+ elpm r31, Z
+ mov r30, __tmp_reg__
+#if defined (__AVR_HAVE_EIJMP_EICALL__)
+ eijmp
+#else
+ ijmp
+#endif
+
+#else
+ elpm
+ adiw r30, 1
+ push r0
+ elpm
+ push r0
+#if defined (__AVR_HAVE_EIJMP_EICALL__)
+ in __tmp_reg__, __EIND__
+ push __tmp_reg__
+#endif
+ ret
+#endif
+#endif /* defined (__AVR_HAVE_ELPM__) */
+ENDF __tablejump_elpm__
+#endif /* defined (L_tablejump_elpm) */
+
+
+.section .text.libgcc.builtins, "ax", @progbits
+
+/**********************************
+ * Find first set Bit (ffs)
+ **********************************/
+
+#if defined (L_ffssi2)
+;; find first set bit
+;; r25:r24 = ffs32 (r25:r22)
+;; clobbers: r22, r26
+DEFUN __ffssi2
+ clr r26
+ tst r22
+ brne 1f
+ subi r26, -8
+ or r22, r23
+ brne 1f
+ subi r26, -8
+ or r22, r24
+ brne 1f
+ subi r26, -8
+ or r22, r25
+ brne 1f
+ ret
+1: mov r24, r22
+ XJMP __loop_ffsqi2
+ENDF __ffssi2
+#endif /* defined (L_ffssi2) */
+
+#if defined (L_ffshi2)
+;; find first set bit
+;; r25:r24 = ffs16 (r25:r24)
+;; clobbers: r26
+DEFUN __ffshi2
+ clr r26
+#ifdef __AVR_ERRATA_SKIP_JMP_CALL__
+ ;; Some cores have problem skipping 2-word instruction
+ tst r24
+ breq 2f
+#else
+ cpse r24, __zero_reg__
+#endif /* __AVR_HAVE_JMP_CALL__ */
+1: XJMP __loop_ffsqi2
+2: ldi r26, 8
+ or r24, r25
+ brne 1b
+ ret
+ENDF __ffshi2
+#endif /* defined (L_ffshi2) */
+
+#if defined (L_loop_ffsqi2)
+;; Helper for ffshi2, ffssi2
+;; r25:r24 = r26 + zero_extend16 (ffs8(r24))
+;; r24 must be != 0
+;; clobbers: r26
+DEFUN __loop_ffsqi2
+ inc r26
+ lsr r24
+ brcc __loop_ffsqi2
+ mov r24, r26
+ clr r25
+ ret
+ENDF __loop_ffsqi2
+#endif /* defined (L_loop_ffsqi2) */
+
+
+/**********************************
+ * Count trailing Zeros (ctz)
+ **********************************/
+
+#if defined (L_ctzsi2)
+;; count trailing zeros
+;; r25:r24 = ctz32 (r25:r22)
+;; clobbers: r26, r22
+;; ctz(0) = 255
+;; Note that ctz(0) in undefined for GCC
+DEFUN __ctzsi2
+ XCALL __ffssi2
+ dec r24
+ ret
+ENDF __ctzsi2
+#endif /* defined (L_ctzsi2) */
+
+#if defined (L_ctzhi2)
+;; count trailing zeros
+;; r25:r24 = ctz16 (r25:r24)
+;; clobbers: r26
+;; ctz(0) = 255
+;; Note that ctz(0) in undefined for GCC
+DEFUN __ctzhi2
+ XCALL __ffshi2
+ dec r24
+ ret
+ENDF __ctzhi2
+#endif /* defined (L_ctzhi2) */
+
+
+/**********************************
+ * Count leading Zeros (clz)
+ **********************************/
+
+#if defined (L_clzdi2)
+;; count leading zeros
+;; r25:r24 = clz64 (r25:r18)
+;; clobbers: r22, r23, r26
+DEFUN __clzdi2
+ XCALL __clzsi2
+ sbrs r24, 5
+ ret
+ mov_l r22, r18
+ mov_h r23, r19
+ mov_l r24, r20
+ mov_h r25, r21
+ XCALL __clzsi2
+ subi r24, -32
+ ret
+ENDF __clzdi2
+#endif /* defined (L_clzdi2) */
+
+#if defined (L_clzsi2)
+;; count leading zeros
+;; r25:r24 = clz32 (r25:r22)
+;; clobbers: r26
+DEFUN __clzsi2
+ XCALL __clzhi2
+ sbrs r24, 4
+ ret
+ mov_l r24, r22
+ mov_h r25, r23
+ XCALL __clzhi2
+ subi r24, -16
+ ret
+ENDF __clzsi2
+#endif /* defined (L_clzsi2) */
+
+#if defined (L_clzhi2)
+;; count leading zeros
+;; r25:r24 = clz16 (r25:r24)
+;; clobbers: r26
+DEFUN __clzhi2
+ clr r26
+ tst r25
+ brne 1f
+ subi r26, -8
+ or r25, r24
+ brne 1f
+ ldi r24, 16
+ ret
+1: cpi r25, 16
+ brsh 3f
+ subi r26, -3
+ swap r25
+2: inc r26
+3: lsl r25
+ brcc 2b
+ mov r24, r26
+ clr r25
+ ret
+ENDF __clzhi2
+#endif /* defined (L_clzhi2) */
+
+
+/**********************************
+ * Parity
+ **********************************/
+
+#if defined (L_paritydi2)
+;; r25:r24 = parity64 (r25:r18)
+;; clobbers: __tmp_reg__
+DEFUN __paritydi2
+ eor r24, r18
+ eor r24, r19
+ eor r24, r20
+ eor r24, r21
+ XJMP __paritysi2
+ENDF __paritydi2
+#endif /* defined (L_paritydi2) */
+
+#if defined (L_paritysi2)
+;; r25:r24 = parity32 (r25:r22)
+;; clobbers: __tmp_reg__
+DEFUN __paritysi2
+ eor r24, r22
+ eor r24, r23
+ XJMP __parityhi2
+ENDF __paritysi2
+#endif /* defined (L_paritysi2) */
+
+#if defined (L_parityhi2)
+;; r25:r24 = parity16 (r25:r24)
+;; clobbers: __tmp_reg__
+DEFUN __parityhi2
+ eor r24, r25
+;; FALLTHRU
+ENDF __parityhi2
+
+;; r25:r24 = parity8 (r24)
+;; clobbers: __tmp_reg__
+DEFUN __parityqi2
+ ;; parity is in r24[0..7]
+ mov __tmp_reg__, r24
+ swap __tmp_reg__
+ eor r24, __tmp_reg__
+ ;; parity is in r24[0..3]
+ subi r24, -4
+ andi r24, -5
+ subi r24, -6
+ ;; parity is in r24[0,3]
+ sbrc r24, 3
+ inc r24
+ ;; parity is in r24[0]
+ andi r24, 1
+ clr r25
+ ret
+ENDF __parityqi2
+#endif /* defined (L_parityhi2) */
+
+
+/**********************************
+ * Population Count
+ **********************************/
+
+#if defined (L_popcounthi2)
+;; population count
+;; r25:r24 = popcount16 (r25:r24)
+;; clobbers: __tmp_reg__
+DEFUN __popcounthi2
+ XCALL __popcountqi2
+ push r24
+ mov r24, r25
+ XCALL __popcountqi2
+ clr r25
+ ;; FALLTHRU
+ENDF __popcounthi2
+
+DEFUN __popcounthi2_tail
+ pop __tmp_reg__
+ add r24, __tmp_reg__
+ ret
+ENDF __popcounthi2_tail
+#endif /* defined (L_popcounthi2) */
+
+#if defined (L_popcountsi2)
+;; population count
+;; r25:r24 = popcount32 (r25:r22)
+;; clobbers: __tmp_reg__
+DEFUN __popcountsi2
+ XCALL __popcounthi2
+ push r24
+ mov_l r24, r22
+ mov_h r25, r23
+ XCALL __popcounthi2
+ XJMP __popcounthi2_tail
+ENDF __popcountsi2
+#endif /* defined (L_popcountsi2) */
+
+#if defined (L_popcountdi2)
+;; population count
+;; r25:r24 = popcount64 (r25:r18)
+;; clobbers: r22, r23, __tmp_reg__
+DEFUN __popcountdi2
+ XCALL __popcountsi2
+ push r24
+ mov_l r22, r18
+ mov_h r23, r19
+ mov_l r24, r20
+ mov_h r25, r21
+ XCALL __popcountsi2
+ XJMP __popcounthi2_tail
+ENDF __popcountdi2
+#endif /* defined (L_popcountdi2) */
+
+#if defined (L_popcountqi2)
+;; population count
+;; r24 = popcount8 (r24)
+;; clobbers: __tmp_reg__
+DEFUN __popcountqi2
+ mov __tmp_reg__, r24
+ andi r24, 1
+ lsr __tmp_reg__
+ lsr __tmp_reg__
+ adc r24, __zero_reg__
+ lsr __tmp_reg__
+ adc r24, __zero_reg__
+ lsr __tmp_reg__
+ adc r24, __zero_reg__
+ lsr __tmp_reg__
+ adc r24, __zero_reg__
+ lsr __tmp_reg__
+ adc r24, __zero_reg__
+ lsr __tmp_reg__
+ adc r24, __tmp_reg__
+ ret
+ENDF __popcountqi2
+#endif /* defined (L_popcountqi2) */
+
+
+/**********************************
+ * Swap bytes
+ **********************************/
+
+;; swap two registers with different register number
+.macro bswap a, b
+ eor \a, \b
+ eor \b, \a
+ eor \a, \b
+.endm
+
+#if defined (L_bswapsi2)
+;; swap bytes
+;; r25:r22 = bswap32 (r25:r22)
+DEFUN __bswapsi2
+ bswap r22, r25
+ bswap r23, r24
+ ret
+ENDF __bswapsi2
+#endif /* defined (L_bswapsi2) */
+
+#if defined (L_bswapdi2)
+;; swap bytes
+;; r25:r18 = bswap64 (r25:r18)
+DEFUN __bswapdi2
+ bswap r18, r25
+ bswap r19, r24
+ bswap r20, r23
+ bswap r21, r22
+ ret
+ENDF __bswapdi2
+#endif /* defined (L_bswapdi2) */
+
+
+/**********************************
+ * 64-bit shifts
+ **********************************/
+
+#if defined (L_ashrdi3)
+;; Arithmetic shift right
+;; r25:r18 = ashr64 (r25:r18, r17:r16)
+DEFUN __ashrdi3
+ push r16
+ andi r16, 63
+ breq 2f
+1: asr r25
+ ror r24
+ ror r23
+ ror r22
+ ror r21
+ ror r20
+ ror r19
+ ror r18
+ dec r16
+ brne 1b
+2: pop r16
+ ret
+ENDF __ashrdi3
+#endif /* defined (L_ashrdi3) */
+
+#if defined (L_lshrdi3)
+;; Logic shift right
+;; r25:r18 = lshr64 (r25:r18, r17:r16)
+DEFUN __lshrdi3
+ push r16
+ andi r16, 63
+ breq 2f
+1: lsr r25
+ ror r24
+ ror r23
+ ror r22
+ ror r21
+ ror r20
+ ror r19
+ ror r18
+ dec r16
+ brne 1b
+2: pop r16
+ ret
+ENDF __lshrdi3
+#endif /* defined (L_lshrdi3) */
+
+#if defined (L_ashldi3)
+;; Shift left
+;; r25:r18 = ashl64 (r25:r18, r17:r16)
+DEFUN __ashldi3
+ push r16
+ andi r16, 63
+ breq 2f
+1: lsl r18
+ rol r19
+ rol r20
+ rol r21
+ rol r22
+ rol r23
+ rol r24
+ rol r25
+ dec r16
+ brne 1b
+2: pop r16
+ ret
+ENDF __ashldi3
+#endif /* defined (L_ashldi3) */
+
+
+.section .text.libgcc.fmul, "ax", @progbits
+
+/***********************************************************/
+;;; Softmul versions of FMUL, FMULS and FMULSU to implement
+;;; __builtin_avr_fmul* if !AVR_HAVE_MUL
+/***********************************************************/
+
+#define A1 24
+#define B1 25
+#define C0 22
+#define C1 23
+#define A0 __tmp_reg__
+
+#ifdef L_fmuls
+;;; r23:r22 = fmuls (r24, r25) like in FMULS instruction
+;;; Clobbers: r24, r25, __tmp_reg__
+DEFUN __fmuls
+ ;; A0.7 = negate result?
+ mov A0, A1
+ eor A0, B1
+ ;; B1 = |B1|
+ sbrc B1, 7
+ neg B1
+ XJMP __fmulsu_exit
+ENDF __fmuls
+#endif /* L_fmuls */
+
+#ifdef L_fmulsu
+;;; r23:r22 = fmulsu (r24, r25) like in FMULSU instruction
+;;; Clobbers: r24, r25, __tmp_reg__
+DEFUN __fmulsu
+ ;; A0.7 = negate result?
+ mov A0, A1
+;; FALLTHRU
+ENDF __fmulsu
+
+;; Helper for __fmuls and __fmulsu
+DEFUN __fmulsu_exit
+ ;; A1 = |A1|
+ sbrc A1, 7
+ neg A1
+#ifdef __AVR_ERRATA_SKIP_JMP_CALL__
+ ;; Some cores have problem skipping 2-word instruction
+ tst A0
+ brmi 1f
+#else
+ sbrs A0, 7
+#endif /* __AVR_HAVE_JMP_CALL__ */
+ XJMP __fmul
+1: XCALL __fmul
+ ;; C = -C iff A0.7 = 1
+ com C1
+ neg C0
+ sbci C1, -1
+ ret
+ENDF __fmulsu_exit
+#endif /* L_fmulsu */
+
+
+#ifdef L_fmul
+;;; r22:r23 = fmul (r24, r25) like in FMUL instruction
+;;; Clobbers: r24, r25, __tmp_reg__
+DEFUN __fmul
+ ; clear result
+ clr C0
+ clr C1
+ clr A0
+1: tst B1
+ ;; 1.0 = 0x80, so test for bit 7 of B to see if A must to be added to C.
+2: brpl 3f
+ ;; C += A
+ add C0, A0
+ adc C1, A1
+3: ;; A >>= 1
+ lsr A1
+ ror A0
+ ;; B <<= 1
+ lsl B1
+ brne 2b
+ ret
+ENDF __fmul
+#endif /* L_fmul */
+
+#undef A0
+#undef A1
+#undef B1
+#undef C0
+#undef C1
diff --git a/libgcc/config/avr/t-avr b/libgcc/config/avr/t-avr
index 78829c76af4..cd529ae8606 100644
--- a/libgcc/config/avr/t-avr
+++ b/libgcc/config/avr/t-avr
@@ -1,3 +1,60 @@
+LIB1ASMSRC = avr/lib1funcs.S
+LIB1ASMFUNCS = \
+ _mulqi3 \
+ _mulhi3 \
+ _mulhisi3 \
+ _umulhisi3 \
+ _usmulhisi3 \
+ _muluhisi3 \
+ _mulshisi3 \
+ _mulsi3 \
+ _udivmodqi4 \
+ _divmodqi4 \
+ _udivmodhi4 \
+ _divmodhi4 \
+ _divmodpsi4 _udivmodpsi4 \
+ _udivmodsi4 \
+ _divmodsi4 \
+ _prologue \
+ _epilogue \
+ _exit \
+ _cleanup \
+ _tablejump \
+ _tablejump_elpm \
+ _copy_data \
+ _clear_bss \
+ _ctors \
+ _dtors \
+ _ffssi2 \
+ _ffshi2 \
+ _loop_ffsqi2 \
+ _ctzsi2 \
+ _ctzhi2 \
+ _clzdi2 \
+ _clzsi2 \
+ _clzhi2 \
+ _paritydi2 \
+ _paritysi2 \
+ _parityhi2 \
+ _popcounthi2 \
+ _popcountsi2 \
+ _popcountdi2 \
+ _popcountqi2 \
+ _bswapsi2 \
+ _bswapdi2 \
+ _ashldi3 \
+ _ashrdi3 \
+ _lshrdi3 \
+ _fmul _fmuls _fmulsu
+
+LIB2FUNCS_EXCLUDE = \
+ _clz
+
+# We do not have the DF type.
+# Most of the C functions in libgcc2 use almost all registers,
+# so use -mcall-prologues for smaller code size.
+HOST_LIBGCC2_CFLAGS += -DDF=SF -Dinhibit_libc -mcall-prologues -Os
+
# Extra 16-bit integer functions.
intfuncs16 = _absvXX2 _addvXX3 _subvXX3 _mulvXX3 _negvXX2 _clrsbXX2
diff --git a/libgcc/config/avr/t-rtems b/libgcc/config/avr/t-rtems
new file mode 100644
index 00000000000..43b57ee323d
--- /dev/null
+++ b/libgcc/config/avr/t-rtems
@@ -0,0 +1,2 @@
+# RTEMS uses _exit from newlib
+LIB1ASMFUNCS := $(filter-out _exit,$(LIB1ASMFUNCS))
diff --git a/libgcc/config/bfin/crti.S b/libgcc/config/bfin/crti.S
new file mode 100644
index 00000000000..b6f20fc9e6b
--- /dev/null
+++ b/libgcc/config/bfin/crti.S
@@ -0,0 +1,59 @@
+/* Specialized code needed to support construction and destruction of
+ file-scope objects in C++ and Java code, and to support exception handling.
+ Copyright (C) 2005, 2008, 2009 Free Software Foundation, Inc.
+ Contributed by Analog Devices.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+/*
+ * This file just supplies function prologues for the .init and .fini
+ * sections. It is linked in before crtbegin.o.
+ */
+
+ .ident "GNU C crti.o"
+
+ .section .init
+ .globl __init
+ .type __init,@function
+__init:
+#if defined __ID_SHARED_LIB__
+ [--SP] = P5;
+#elif defined __BFIN_FDPIC__
+ [--SP] = P3;
+#endif
+ LINK 12;
+#if defined __ID_SHARED_LIB__
+ P5 = [P5 + _current_shared_library_p5_offset_]
+#endif
+ .section .fini
+ .globl __fini
+ .type __fini,@function
+__fini:
+#if defined __ID_SHARED_LIB__
+ [--SP] = P5;
+#elif defined __BFIN_FDPIC__
+ [--SP] = P3;
+#endif
+ LINK 12;
+#if defined __ID_SHARED_LIB__
+ P5 = [P5 + _current_shared_library_p5_offset_]
+#endif
diff --git a/libgcc/config/bfin/crtlibid.S b/libgcc/config/bfin/crtlibid.S
new file mode 100644
index 00000000000..beab8093810
--- /dev/null
+++ b/libgcc/config/bfin/crtlibid.S
@@ -0,0 +1,29 @@
+/* Provide a weak definition of the library ID, for the benefit of certain
+ configure scripts.
+ Copyright (C) 2005, 2008, 2009 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+ .ident "GNU C crtlibid.o"
+
+.weak _current_shared_library_p5_offset_
+.set _current_shared_library_p5_offset_, 0
diff --git a/libgcc/config/bfin/crtn.S b/libgcc/config/bfin/crtn.S
new file mode 100644
index 00000000000..7fcd27bfade
--- /dev/null
+++ b/libgcc/config/bfin/crtn.S
@@ -0,0 +1,50 @@
+/* Specialized code needed to support construction and destruction of
+ file-scope objects in C++ and Java code, and to support exception handling.
+ Copyright (C) 2005, 2008, 2009 Free Software Foundation, Inc.
+ Contributed by Analog Devices.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+/*
+ * This file supplies function epilogues for the .init and .fini sections.
+ * It is linked in after all other files.
+ */
+
+ .ident "GNU C crtn.o"
+
+ .section .init
+ unlink;
+#if defined __ID_SHARED_LIB__
+ P5 = [SP++];
+#elif defined __BFIN_FDPIC__
+ P3 = [SP++];
+#endif
+ rts;
+
+ .section .fini
+ unlink;
+#if defined __ID_SHARED_LIB__
+ P5 = [SP++];
+#elif defined __BFIN_FDPIC__
+ P3 = [SP++];
+#endif
+ rts;
diff --git a/libgcc/config/bfin/lib1funcs.S b/libgcc/config/bfin/lib1funcs.S
new file mode 100644
index 00000000000..c7bf4f3f05c
--- /dev/null
+++ b/libgcc/config/bfin/lib1funcs.S
@@ -0,0 +1,211 @@
+/* libgcc functions for Blackfin.
+ Copyright (C) 2005, 2009 Free Software Foundation, Inc.
+ Contributed by Analog Devices.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+#ifdef L_divsi3
+.text
+.align 2
+.global ___divsi3;
+.type ___divsi3, STT_FUNC;
+
+___divsi3:
+ [--SP]= RETS;
+ [--SP] = R7;
+
+ R2 = -R0;
+ CC = R0 < 0;
+ IF CC R0 = R2;
+ R7 = CC;
+
+ R2 = -R1;
+ CC = R1 < 0;
+ IF CC R1 = R2;
+ R2 = CC;
+ R7 = R7 ^ R2;
+
+ CALL ___udivsi3;
+
+ CC = R7;
+ R1 = -R0;
+ IF CC R0 = R1;
+
+ R7 = [SP++];
+ RETS = [SP++];
+ RTS;
+#endif
+
+#ifdef L_modsi3
+.align 2
+.global ___modsi3;
+.type ___modsi3, STT_FUNC;
+
+___modsi3:
+ [--SP] = RETS;
+ [--SP] = R0;
+ [--SP] = R1;
+ CALL ___divsi3;
+ R2 = [SP++];
+ R1 = [SP++];
+ R2 *= R0;
+ R0 = R1 - R2;
+ RETS = [SP++];
+ RTS;
+#endif
+
+#ifdef L_udivsi3
+.align 2
+.global ___udivsi3;
+.type ___udivsi3, STT_FUNC;
+
+___udivsi3:
+ P0 = 32;
+ LSETUP (0f, 1f) LC0 = P0;
+ /* upper half of dividend */
+ R3 = 0;
+0:
+ /* The first time round in the loop we shift in garbage, but since we
+ perform 33 shifts, it doesn't matter. */
+ R0 = ROT R0 BY 1;
+ R3 = ROT R3 BY 1;
+ R2 = R3 - R1;
+ CC = R3 < R1 (IU);
+1:
+ /* Last instruction of the loop. */
+ IF ! CC R3 = R2;
+
+ /* Shift in the last bit. */
+ R0 = ROT R0 BY 1;
+ /* R0 is the result, R3 contains the remainder. */
+ R0 = ~ R0;
+ RTS;
+#endif
+
+#ifdef L_umodsi3
+.align 2
+.global ___umodsi3;
+.type ___umodsi3, STT_FUNC;
+
+___umodsi3:
+ [--SP] = RETS;
+ CALL ___udivsi3;
+ R0 = R3;
+ RETS = [SP++];
+ RTS;
+#endif
+
+#ifdef L_umulsi3_highpart
+.align 2
+.global ___umulsi3_highpart;
+.type ___umulsi3_highpart, STT_FUNC;
+
+___umulsi3_highpart:
+ A1 = R1.L * R0.L (FU);
+ A1 = A1 >> 16;
+ A0 = R1.H * R0.H, A1 += R1.L * R0.H (FU);
+ A1 += R0.L * R1.H (FU);
+ A1 = A1 >> 16;
+ A0 += A1;
+ R0 = A0 (FU);
+ RTS;
+#endif
+
+#ifdef L_smulsi3_highpart
+.align 2
+.global ___smulsi3_highpart;
+.type ___smulsi3_highpart, STT_FUNC;
+
+___smulsi3_highpart:
+ A1 = R1.L * R0.L (FU);
+ A1 = A1 >> 16;
+ A0 = R0.H * R1.H, A1 += R0.H * R1.L (IS,M);
+ A1 += R1.H * R0.L (IS,M);
+ A1 = A1 >>> 16;
+ R0 = (A0 += A1);
+ RTS;
+#endif
+
+#ifdef L_muldi3
+.align 2
+.global ___muldi3;
+.type ___muldi3, STT_FUNC;
+
+/*
+ R1:R0 * R3:R2
+ = R1.h:R1.l:R0.h:R0.l * R3.h:R3.l:R2.h:R2.l
+[X] = (R1.h * R3.h) * 2^96
+[X] + (R1.h * R3.l + R1.l * R3.h) * 2^80
+[X] + (R1.h * R2.h + R1.l * R3.l + R3.h * R0.h) * 2^64
+[T1] + (R1.h * R2.l + R3.h * R0.l + R1.l * R2.h + R3.l * R0.h) * 2^48
+[T2] + (R1.l * R2.l + R3.l * R0.l + R0.h * R2.h) * 2^32
+[T3] + (R0.l * R2.h + R2.l * R0.h) * 2^16
+[T4] + (R0.l * R2.l)
+
+ We can discard the first three lines marked "X" since we produce
+ only a 64 bit result. So, we need ten 16-bit multiplies.
+
+ Individual mul-acc results:
+[E1] = R1.h * R2.l + R3.h * R0.l + R1.l * R2.h + R3.l * R0.h
+[E2] = R1.l * R2.l + R3.l * R0.l + R0.h * R2.h
+[E3] = R0.l * R2.h + R2.l * R0.h
+[E4] = R0.l * R2.l
+
+ We also need to add high parts from lower-level results to higher ones:
+ E[n]c = E[n] + (E[n+1]c >> 16), where E4c := E4
+
+ One interesting property is that all parts of the result that depend
+ on the sign of the multiplication are discarded. Those would be the
+ multiplications involving R1.h and R3.h, but only the top 16 bit of
+ the 32 bit result depend on the sign, and since R1.h and R3.h only
+ occur in E1, the top half of these results is cut off.
+ So, we can just use FU mode for all of the 16-bit multiplies, and
+ ignore questions of when to use mixed mode. */
+
+___muldi3:
+ /* [SP] technically is part of the caller's frame, but we can
+ use it as scratch space. */
+ A0 = R2.H * R1.L, A1 = R2.L * R1.H (FU) || R3 = [SP + 12]; /* E1 */
+ A0 += R3.H * R0.L, A1 += R3.L * R0.H (FU) || [SP] = R4; /* E1 */
+ A0 += A1; /* E1 */
+ R4 = A0.w;
+ A0 = R0.l * R3.l (FU); /* E2 */
+ A0 += R2.l * R1.l (FU); /* E2 */
+
+ A1 = R2.L * R0.L (FU); /* E4 */
+ R3 = A1.w;
+ A1 = A1 >> 16; /* E3c */
+ A0 += R2.H * R0.H, A1 += R2.L * R0.H (FU); /* E2, E3c */
+ A1 += R0.L * R2.H (FU); /* E3c */
+ R0 = A1.w;
+ A1 = A1 >> 16; /* E2c */
+ A0 += A1; /* E2c */
+ R1 = A0.w;
+
+ /* low(result) = low(E3c):low(E4) */
+ R0 = PACK (R0.l, R3.l);
+ /* high(result) = E2c + (E1 << 16) */
+ R1.h = R1.h + R4.l (NS) || R4 = [SP];
+ RTS;
+
+.size ___muldi3, .-___muldi3
+#endif
diff --git a/libgcc/config/bfin/libgcc-glibc.ver b/libgcc/config/bfin/libgcc-glibc.ver
new file mode 100644
index 00000000000..516d91f6584
--- /dev/null
+++ b/libgcc/config/bfin/libgcc-glibc.ver
@@ -0,0 +1,1914 @@
+# Copyright (C) 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007,
+# 2008, 2009, 2010 Free Software Foundation, Inc.
+#
+# This file is part of GCC.
+#
+# GCC is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GCC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>.
+
+GCC_3.0 {
+ # libgcc1 integer symbols
+ ___absvsi2
+ ___addvsi3
+ ___ashlsi3
+ ___ashrsi3
+ ___divsi3
+ ___lshrsi3
+ ___modsi3
+ ___mulsi3
+ ___mulvsi3
+ ___negvsi2
+ ___subvsi3
+ ___udivsi3
+ ___umodsi3
+
+ # libgcc1 floating point symbols
+ ___addsf3
+ ___adddf3
+ ___addxf3
+ ___addtf3
+ ___divsf3
+ ___divdf3
+ ___divxf3
+ ___divtf3
+ ___eqsf2
+ ___eqdf2
+ ___eqxf2
+ ___eqtf2
+ ___extenddfxf2
+ ___extenddftf2
+ ___extendsfdf2
+ ___extendsfxf2
+ ___extendsftf2
+ ___fixsfsi
+ ___fixdfsi
+ ___fixxfsi
+ ___fixtfsi
+ ___floatsisf
+ ___floatsidf
+ ___floatsixf
+ ___floatsitf
+ ___gesf2
+ ___gedf2
+ ___gexf2
+ ___getf2
+ ___gtsf2
+ ___gtdf2
+ ___gtxf2
+ ___gttf2
+ ___lesf2
+ ___ledf2
+ ___lexf2
+ ___letf2
+ ___ltsf2
+ ___ltdf2
+ ___ltxf2
+ ___lttf2
+ ___mulsf3
+ ___muldf3
+ ___mulxf3
+ ___multf3
+ ___negsf2
+ ___negdf2
+ ___negxf2
+ ___negtf2
+ ___nesf2
+ ___nedf2
+ ___nexf2
+ ___netf2
+ ___subsf3
+ ___subdf3
+ ___subxf3
+ ___subtf3
+ ___truncdfsf2
+ ___truncxfsf2
+ ___trunctfsf2
+ ___truncxfdf2
+ ___trunctfdf2
+
+ # libgcc2 DImode arithmetic (for 32-bit targets).
+ ___absvdi2
+ ___addvdi3
+ ___ashldi3
+ ___ashrdi3
+ ___cmpdi2
+ ___divdi3
+ ___ffsdi2
+ ___fixdfdi
+ ___fixsfdi
+ ___fixtfdi
+ ___fixxfdi
+ ___fixunsdfdi
+ ___fixunsdfsi
+ ___fixunssfsi
+ ___fixunssfdi
+ ___fixunstfdi
+ ___fixunstfsi
+ ___fixunsxfdi
+ ___fixunsxfsi
+ ___floatdidf
+ ___floatdisf
+ ___floatdixf
+ ___floatditf
+ ___lshrdi3
+ ___moddi3
+ ___muldi3
+ ___mulvdi3
+ ___negdi2
+ ___negvdi2
+ ___subvdi3
+ ___ucmpdi2
+ ___udivdi3
+ ___udivmoddi4
+ ___umoddi3
+
+ # libgcc2 TImode arithmetic (for 64-bit targets).
+ ___ashlti3
+ ___ashrti3
+ ___cmpti2
+ ___divti3
+ ___ffsti2
+ ___fixdfti
+ ___fixsfti
+ ___fixtfti
+ ___fixxfti
+ ___lshrti3
+ ___modti3
+ ___multi3
+ ___negti2
+ ___ucmpti2
+ ___udivmodti4
+ ___udivti3
+ ___umodti3
+ ___fixunsdfti
+ ___fixunssfti
+ ___fixunstfti
+ ___fixunsxfti
+ ___floattidf
+ ___floattisf
+ ___floattixf
+ ___floattitf
+
+ # Used to deal with trampoline initialization on some platforms
+ ___clear_cache
+
+ # EH symbols
+ __Unwind_DeleteException
+ __Unwind_Find_FDE
+ __Unwind_ForcedUnwind
+ __Unwind_GetGR
+ __Unwind_GetIP
+ __Unwind_GetLanguageSpecificData
+ __Unwind_GetRegionStart
+ __Unwind_GetTextRelBase
+ __Unwind_GetDataRelBase
+ __Unwind_RaiseException
+ __Unwind_Resume
+ __Unwind_SetGR
+ __Unwind_SetIP
+ ___deregister_frame
+ ___deregister_frame_info
+ ___deregister_frame_info_bases
+ ___register_frame
+ ___register_frame_info
+ ___register_frame_info_bases
+ ___register_frame_info_table
+ ___register_frame_info_table_bases
+ ___register_frame_table
+
+ # SjLj EH symbols
+ __Unwind_SjLj_Register
+ __Unwind_SjLj_Unregister
+ __Unwind_SjLj_RaiseException
+ __Unwind_SjLj_ForcedUnwind
+ __Unwind_SjLj_Resume
+}
+
+%inherit GCC_3.3 GCC_3.0
+GCC_3.3 {
+ __Unwind_FindEnclosingFunction
+ __Unwind_GetCFA
+ __Unwind_Backtrace
+ __Unwind_Resume_or_Rethrow
+ __Unwind_SjLj_Resume_or_Rethrow
+}
+
+%inherit GCC_3.3.1 GCC_3.3
+GCC_3.3.1 {
+ ___gcc_personality_sj0
+ ___gcc_personality_v0
+}
+
+%inherit GCC_3.3.2 GCC_3.3.1
+GCC_3.3.2 {
+}
+%inherit GCC_3.3.4 GCC_3.3.2
+GCC_3.3.4 {
+ ___unorddf2
+ ___unordsf2
+}
+
+%inherit GCC_3.4 GCC_3.3.4
+GCC_3.4 {
+ # bit scanning and counting built-ins
+ ___clzsi2
+ ___clzdi2
+ ___clzti2
+ ___ctzsi2
+ ___ctzdi2
+ ___ctzti2
+ ___popcountsi2
+ ___popcountdi2
+ ___popcountti2
+ ___paritysi2
+ ___paritydi2
+ ___parityti2
+}
+
+%inherit GCC_3.4.2 GCC_3.4
+GCC_3.4.2 {
+ # Used to deal with trampoline initialization on some platforms
+ ___enable_execute_stack
+ ___trampoline_setup
+}
+
+%inherit GCC_3.4.4 GCC_3.4.2
+GCC_3.4.4 {
+ # libgcc2 TImode arithmetic (for 64-bit targets).
+ ___absvti2
+ ___addvti3
+ ___mulvti3
+ ___negvti2
+ ___subvti3
+}
+
+%inherit GCC_4.0.0 GCC_3.4.4
+GCC_4.0.0 {
+ # libgcc2 __builtin_powi helpers.
+ ___powisf2
+ ___powidf2
+ ___powixf2
+ ___powitf2
+
+ # c99 compliant complex arithmetic
+ ___divsc3
+ ___divdc3
+ ___divxc3
+ ___divtc3
+ ___mulsc3
+ ___muldc3
+ ___mulxc3
+ ___multc3
+}
+
+%inherit GCC_4.1.0 GCC_4.0.0
+GCC_4.1.0 {
+ ___smulsi3_highpart
+ ___umulsi3_highpart
+}
+
+%inherit GCC_4.2.0 GCC_4.1.0
+GCC_4.2.0 {
+ # unsigned-to-floating conversions
+ ___floatunsisf
+ ___floatunsidf
+ ___floatunsixf
+ ___floatunsitf
+ ___floatundidf
+ ___floatundisf
+ ___floatundixf
+ ___floatunditf
+ ___floatuntidf
+ ___floatuntisf
+ ___floatuntixf
+ ___floatuntitf
+ __Unwind_GetIPInfo
+}
+
+%inherit GCC_4.3.0 GCC_4.2.0
+GCC_4.3.0 {
+ # byte swapping routines
+ ___bswapsi2
+ ___bswapdi2
+ ___emutls_get_address
+ ___emutls_register_common
+ ___ffssi2
+ ___extendxftf2
+ ___trunctfxf2
+
+ # fixed-point routines
+ ___addqq3
+ ___addhq3
+ ___addsq3
+ ___adddq3
+ ___addtq3
+ ___adduqq3
+ ___adduhq3
+ ___addusq3
+ ___addudq3
+ ___addutq3
+ ___addha3
+ ___addsa3
+ ___addda3
+ ___addta3
+ ___adduha3
+ ___addusa3
+ ___adduda3
+ ___adduta3
+ ___ssaddqq3
+ ___ssaddhq3
+ ___ssaddsq3
+ ___ssadddq3
+ ___ssaddtq3
+ ___ssaddha3
+ ___ssaddsa3
+ ___ssaddda3
+ ___ssaddta3
+ ___usadduqq3
+ ___usadduhq3
+ ___usaddusq3
+ ___usaddudq3
+ ___usaddutq3
+ ___usadduha3
+ ___usaddusa3
+ ___usadduda3
+ ___usadduta3
+ ___subqq3
+ ___subhq3
+ ___subsq3
+ ___subdq3
+ ___subtq3
+ ___subuqq3
+ ___subuhq3
+ ___subusq3
+ ___subudq3
+ ___subutq3
+ ___subha3
+ ___subsa3
+ ___subda3
+ ___subta3
+ ___subuha3
+ ___subusa3
+ ___subuda3
+ ___subuta3
+ ___sssubqq3
+ ___sssubhq3
+ ___sssubsq3
+ ___sssubdq3
+ ___sssubtq3
+ ___sssubha3
+ ___sssubsa3
+ ___sssubda3
+ ___sssubta3
+ ___ussubuqq3
+ ___ussubuhq3
+ ___ussubusq3
+ ___ussubudq3
+ ___ussubutq3
+ ___ussubuha3
+ ___ussubusa3
+ ___ussubuda3
+ ___ussubuta3
+ ___mulqq3
+ ___mulhq3
+ ___mulsq3
+ ___muldq3
+ ___multq3
+ ___muluqq3
+ ___muluhq3
+ ___mulusq3
+ ___muludq3
+ ___mulutq3
+ ___mulha3
+ ___mulsa3
+ ___mulda3
+ ___multa3
+ ___muluha3
+ ___mulusa3
+ ___muluda3
+ ___muluta3
+ ___ssmulqq3
+ ___ssmulhq3
+ ___ssmulsq3
+ ___ssmuldq3
+ ___ssmultq3
+ ___ssmulha3
+ ___ssmulsa3
+ ___ssmulda3
+ ___ssmulta3
+ ___usmuluqq3
+ ___usmuluhq3
+ ___usmulusq3
+ ___usmuludq3
+ ___usmulutq3
+ ___usmuluha3
+ ___usmulusa3
+ ___usmuluda3
+ ___usmuluta3
+ ___divqq3
+ ___divhq3
+ ___divsq3
+ ___divdq3
+ ___divtq3
+ ___divha3
+ ___divsa3
+ ___divda3
+ ___divta3
+ ___udivuqq3
+ ___udivuhq3
+ ___udivusq3
+ ___udivudq3
+ ___udivutq3
+ ___udivuha3
+ ___udivusa3
+ ___udivuda3
+ ___udivuta3
+ ___ssdivqq3
+ ___ssdivhq3
+ ___ssdivsq3
+ ___ssdivdq3
+ ___ssdivtq3
+ ___ssdivha3
+ ___ssdivsa3
+ ___ssdivda3
+ ___ssdivta3
+ ___usdivuqq3
+ ___usdivuhq3
+ ___usdivusq3
+ ___usdivudq3
+ ___usdivutq3
+ ___usdivuha3
+ ___usdivusa3
+ ___usdivuda3
+ ___usdivuta3
+ ___negqq2
+ ___neghq2
+ ___negsq2
+ ___negdq2
+ ___negtq2
+ ___neguqq2
+ ___neguhq2
+ ___negusq2
+ ___negudq2
+ ___negutq2
+ ___negha2
+ ___negsa2
+ ___negda2
+ ___negta2
+ ___neguha2
+ ___negusa2
+ ___neguda2
+ ___neguta2
+ ___ssnegqq2
+ ___ssneghq2
+ ___ssnegsq2
+ ___ssnegdq2
+ ___ssnegtq2
+ ___ssnegha2
+ ___ssnegsa2
+ ___ssnegda2
+ ___ssnegta2
+ ___usneguqq2
+ ___usneguhq2
+ ___usnegusq2
+ ___usnegudq2
+ ___usnegutq2
+ ___usneguha2
+ ___usnegusa2
+ ___usneguda2
+ ___usneguta2
+ ___ashlqq3
+ ___ashlhq3
+ ___ashlsq3
+ ___ashldq3
+ ___ashltq3
+ ___ashluqq3
+ ___ashluhq3
+ ___ashlusq3
+ ___ashludq3
+ ___ashlutq3
+ ___ashlha3
+ ___ashlsa3
+ ___ashlda3
+ ___ashlta3
+ ___ashluha3
+ ___ashlusa3
+ ___ashluda3
+ ___ashluta3
+ ___ashrqq3
+ ___ashrhq3
+ ___ashrsq3
+ ___ashrdq3
+ ___ashrtq3
+ ___ashrha3
+ ___ashrsa3
+ ___ashrda3
+ ___ashrta3
+ ___lshruqq3
+ ___lshruhq3
+ ___lshrusq3
+ ___lshrudq3
+ ___lshrutq3
+ ___lshruha3
+ ___lshrusa3
+ ___lshruda3
+ ___lshruta3
+ ___ssashlqq3
+ ___ssashlhq3
+ ___ssashlsq3
+ ___ssashldq3
+ ___ssashltq3
+ ___ssashlha3
+ ___ssashlsa3
+ ___ssashlda3
+ ___ssashlta3
+ ___usashluqq3
+ ___usashluhq3
+ ___usashlusq3
+ ___usashludq3
+ ___usashlutq3
+ ___usashluha3
+ ___usashlusa3
+ ___usashluda3
+ ___usashluta3
+ ___cmpqq2
+ ___cmphq2
+ ___cmpsq2
+ ___cmpdq2
+ ___cmptq2
+ ___cmpuqq2
+ ___cmpuhq2
+ ___cmpusq2
+ ___cmpudq2
+ ___cmputq2
+ ___cmpha2
+ ___cmpsa2
+ ___cmpda2
+ ___cmpta2
+ ___cmpuha2
+ ___cmpusa2
+ ___cmpuda2
+ ___cmputa2
+ ___fractqqhq2
+ ___fractqqsq2
+ ___fractqqdq2
+ ___fractqqtq2
+ ___fractqqha
+ ___fractqqsa
+ ___fractqqda
+ ___fractqqta
+ ___fractqquqq
+ ___fractqquhq
+ ___fractqqusq
+ ___fractqqudq
+ ___fractqqutq
+ ___fractqquha
+ ___fractqqusa
+ ___fractqquda
+ ___fractqquta
+ ___fractqqqi
+ ___fractqqhi
+ ___fractqqsi
+ ___fractqqdi
+ ___fractqqti
+ ___fractqqsf
+ ___fractqqdf
+ ___fracthqqq2
+ ___fracthqsq2
+ ___fracthqdq2
+ ___fracthqtq2
+ ___fracthqha
+ ___fracthqsa
+ ___fracthqda
+ ___fracthqta
+ ___fracthquqq
+ ___fracthquhq
+ ___fracthqusq
+ ___fracthqudq
+ ___fracthqutq
+ ___fracthquha
+ ___fracthqusa
+ ___fracthquda
+ ___fracthquta
+ ___fracthqqi
+ ___fracthqhi
+ ___fracthqsi
+ ___fracthqdi
+ ___fracthqti
+ ___fracthqsf
+ ___fracthqdf
+ ___fractsqqq2
+ ___fractsqhq2
+ ___fractsqdq2
+ ___fractsqtq2
+ ___fractsqha
+ ___fractsqsa
+ ___fractsqda
+ ___fractsqta
+ ___fractsquqq
+ ___fractsquhq
+ ___fractsqusq
+ ___fractsqudq
+ ___fractsqutq
+ ___fractsquha
+ ___fractsqusa
+ ___fractsquda
+ ___fractsquta
+ ___fractsqqi
+ ___fractsqhi
+ ___fractsqsi
+ ___fractsqdi
+ ___fractsqti
+ ___fractsqsf
+ ___fractsqdf
+ ___fractdqqq2
+ ___fractdqhq2
+ ___fractdqsq2
+ ___fractdqtq2
+ ___fractdqha
+ ___fractdqsa
+ ___fractdqda
+ ___fractdqta
+ ___fractdquqq
+ ___fractdquhq
+ ___fractdqusq
+ ___fractdqudq
+ ___fractdqutq
+ ___fractdquha
+ ___fractdqusa
+ ___fractdquda
+ ___fractdquta
+ ___fractdqqi
+ ___fractdqhi
+ ___fractdqsi
+ ___fractdqdi
+ ___fractdqti
+ ___fractdqsf
+ ___fractdqdf
+ ___fracttqqq2
+ ___fracttqhq2
+ ___fracttqsq2
+ ___fracttqdq2
+ ___fracttqha
+ ___fracttqsa
+ ___fracttqda
+ ___fracttqta
+ ___fracttquqq
+ ___fracttquhq
+ ___fracttqusq
+ ___fracttqudq
+ ___fracttqutq
+ ___fracttquha
+ ___fracttqusa
+ ___fracttquda
+ ___fracttquta
+ ___fracttqqi
+ ___fracttqhi
+ ___fracttqsi
+ ___fracttqdi
+ ___fracttqti
+ ___fracttqsf
+ ___fracttqdf
+ ___fracthaqq
+ ___fracthahq
+ ___fracthasq
+ ___fracthadq
+ ___fracthatq
+ ___fracthasa2
+ ___fracthada2
+ ___fracthata2
+ ___fracthauqq
+ ___fracthauhq
+ ___fracthausq
+ ___fracthaudq
+ ___fracthautq
+ ___fracthauha
+ ___fracthausa
+ ___fracthauda
+ ___fracthauta
+ ___fracthaqi
+ ___fracthahi
+ ___fracthasi
+ ___fracthadi
+ ___fracthati
+ ___fracthasf
+ ___fracthadf
+ ___fractsaqq
+ ___fractsahq
+ ___fractsasq
+ ___fractsadq
+ ___fractsatq
+ ___fractsaha2
+ ___fractsada2
+ ___fractsata2
+ ___fractsauqq
+ ___fractsauhq
+ ___fractsausq
+ ___fractsaudq
+ ___fractsautq
+ ___fractsauha
+ ___fractsausa
+ ___fractsauda
+ ___fractsauta
+ ___fractsaqi
+ ___fractsahi
+ ___fractsasi
+ ___fractsadi
+ ___fractsati
+ ___fractsasf
+ ___fractsadf
+ ___fractdaqq
+ ___fractdahq
+ ___fractdasq
+ ___fractdadq
+ ___fractdatq
+ ___fractdaha2
+ ___fractdasa2
+ ___fractdata2
+ ___fractdauqq
+ ___fractdauhq
+ ___fractdausq
+ ___fractdaudq
+ ___fractdautq
+ ___fractdauha
+ ___fractdausa
+ ___fractdauda
+ ___fractdauta
+ ___fractdaqi
+ ___fractdahi
+ ___fractdasi
+ ___fractdadi
+ ___fractdati
+ ___fractdasf
+ ___fractdadf
+ ___fracttaqq
+ ___fracttahq
+ ___fracttasq
+ ___fracttadq
+ ___fracttatq
+ ___fracttaha2
+ ___fracttasa2
+ ___fracttada2
+ ___fracttauqq
+ ___fracttauhq
+ ___fracttausq
+ ___fracttaudq
+ ___fracttautq
+ ___fracttauha
+ ___fracttausa
+ ___fracttauda
+ ___fracttauta
+ ___fracttaqi
+ ___fracttahi
+ ___fracttasi
+ ___fracttadi
+ ___fracttati
+ ___fracttasf
+ ___fracttadf
+ ___fractuqqqq
+ ___fractuqqhq
+ ___fractuqqsq
+ ___fractuqqdq
+ ___fractuqqtq
+ ___fractuqqha
+ ___fractuqqsa
+ ___fractuqqda
+ ___fractuqqta
+ ___fractuqquhq2
+ ___fractuqqusq2
+ ___fractuqqudq2
+ ___fractuqqutq2
+ ___fractuqquha
+ ___fractuqqusa
+ ___fractuqquda
+ ___fractuqquta
+ ___fractuqqqi
+ ___fractuqqhi
+ ___fractuqqsi
+ ___fractuqqdi
+ ___fractuqqti
+ ___fractuqqsf
+ ___fractuqqdf
+ ___fractuhqqq
+ ___fractuhqhq
+ ___fractuhqsq
+ ___fractuhqdq
+ ___fractuhqtq
+ ___fractuhqha
+ ___fractuhqsa
+ ___fractuhqda
+ ___fractuhqta
+ ___fractuhquqq2
+ ___fractuhqusq2
+ ___fractuhqudq2
+ ___fractuhqutq2
+ ___fractuhquha
+ ___fractuhqusa
+ ___fractuhquda
+ ___fractuhquta
+ ___fractuhqqi
+ ___fractuhqhi
+ ___fractuhqsi
+ ___fractuhqdi
+ ___fractuhqti
+ ___fractuhqsf
+ ___fractuhqdf
+ ___fractusqqq
+ ___fractusqhq
+ ___fractusqsq
+ ___fractusqdq
+ ___fractusqtq
+ ___fractusqha
+ ___fractusqsa
+ ___fractusqda
+ ___fractusqta
+ ___fractusquqq2
+ ___fractusquhq2
+ ___fractusqudq2
+ ___fractusqutq2
+ ___fractusquha
+ ___fractusqusa
+ ___fractusquda
+ ___fractusquta
+ ___fractusqqi
+ ___fractusqhi
+ ___fractusqsi
+ ___fractusqdi
+ ___fractusqti
+ ___fractusqsf
+ ___fractusqdf
+ ___fractudqqq
+ ___fractudqhq
+ ___fractudqsq
+ ___fractudqdq
+ ___fractudqtq
+ ___fractudqha
+ ___fractudqsa
+ ___fractudqda
+ ___fractudqta
+ ___fractudquqq2
+ ___fractudquhq2
+ ___fractudqusq2
+ ___fractudqutq2
+ ___fractudquha
+ ___fractudqusa
+ ___fractudquda
+ ___fractudquta
+ ___fractudqqi
+ ___fractudqhi
+ ___fractudqsi
+ ___fractudqdi
+ ___fractudqti
+ ___fractudqsf
+ ___fractudqdf
+ ___fractutqqq
+ ___fractutqhq
+ ___fractutqsq
+ ___fractutqdq
+ ___fractutqtq
+ ___fractutqha
+ ___fractutqsa
+ ___fractutqda
+ ___fractutqta
+ ___fractutquqq2
+ ___fractutquhq2
+ ___fractutqusq2
+ ___fractutqudq2
+ ___fractutquha
+ ___fractutqusa
+ ___fractutquda
+ ___fractutquta
+ ___fractutqqi
+ ___fractutqhi
+ ___fractutqsi
+ ___fractutqdi
+ ___fractutqti
+ ___fractutqsf
+ ___fractutqdf
+ ___fractuhaqq
+ ___fractuhahq
+ ___fractuhasq
+ ___fractuhadq
+ ___fractuhatq
+ ___fractuhaha
+ ___fractuhasa
+ ___fractuhada
+ ___fractuhata
+ ___fractuhauqq
+ ___fractuhauhq
+ ___fractuhausq
+ ___fractuhaudq
+ ___fractuhautq
+ ___fractuhausa2
+ ___fractuhauda2
+ ___fractuhauta2
+ ___fractuhaqi
+ ___fractuhahi
+ ___fractuhasi
+ ___fractuhadi
+ ___fractuhati
+ ___fractuhasf
+ ___fractuhadf
+ ___fractusaqq
+ ___fractusahq
+ ___fractusasq
+ ___fractusadq
+ ___fractusatq
+ ___fractusaha
+ ___fractusasa
+ ___fractusada
+ ___fractusata
+ ___fractusauqq
+ ___fractusauhq
+ ___fractusausq
+ ___fractusaudq
+ ___fractusautq
+ ___fractusauha2
+ ___fractusauda2
+ ___fractusauta2
+ ___fractusaqi
+ ___fractusahi
+ ___fractusasi
+ ___fractusadi
+ ___fractusati
+ ___fractusasf
+ ___fractusadf
+ ___fractudaqq
+ ___fractudahq
+ ___fractudasq
+ ___fractudadq
+ ___fractudatq
+ ___fractudaha
+ ___fractudasa
+ ___fractudada
+ ___fractudata
+ ___fractudauqq
+ ___fractudauhq
+ ___fractudausq
+ ___fractudaudq
+ ___fractudautq
+ ___fractudauha2
+ ___fractudausa2
+ ___fractudauta2
+ ___fractudaqi
+ ___fractudahi
+ ___fractudasi
+ ___fractudadi
+ ___fractudati
+ ___fractudasf
+ ___fractudadf
+ ___fractutaqq
+ ___fractutahq
+ ___fractutasq
+ ___fractutadq
+ ___fractutatq
+ ___fractutaha
+ ___fractutasa
+ ___fractutada
+ ___fractutata
+ ___fractutauqq
+ ___fractutauhq
+ ___fractutausq
+ ___fractutaudq
+ ___fractutautq
+ ___fractutauha2
+ ___fractutausa2
+ ___fractutauda2
+ ___fractutaqi
+ ___fractutahi
+ ___fractutasi
+ ___fractutadi
+ ___fractutati
+ ___fractutasf
+ ___fractutadf
+ ___fractqiqq
+ ___fractqihq
+ ___fractqisq
+ ___fractqidq
+ ___fractqitq
+ ___fractqiha
+ ___fractqisa
+ ___fractqida
+ ___fractqita
+ ___fractqiuqq
+ ___fractqiuhq
+ ___fractqiusq
+ ___fractqiudq
+ ___fractqiutq
+ ___fractqiuha
+ ___fractqiusa
+ ___fractqiuda
+ ___fractqiuta
+ ___fracthiqq
+ ___fracthihq
+ ___fracthisq
+ ___fracthidq
+ ___fracthitq
+ ___fracthiha
+ ___fracthisa
+ ___fracthida
+ ___fracthita
+ ___fracthiuqq
+ ___fracthiuhq
+ ___fracthiusq
+ ___fracthiudq
+ ___fracthiutq
+ ___fracthiuha
+ ___fracthiusa
+ ___fracthiuda
+ ___fracthiuta
+ ___fractsiqq
+ ___fractsihq
+ ___fractsisq
+ ___fractsidq
+ ___fractsitq
+ ___fractsiha
+ ___fractsisa
+ ___fractsida
+ ___fractsita
+ ___fractsiuqq
+ ___fractsiuhq
+ ___fractsiusq
+ ___fractsiudq
+ ___fractsiutq
+ ___fractsiuha
+ ___fractsiusa
+ ___fractsiuda
+ ___fractsiuta
+ ___fractdiqq
+ ___fractdihq
+ ___fractdisq
+ ___fractdidq
+ ___fractditq
+ ___fractdiha
+ ___fractdisa
+ ___fractdida
+ ___fractdita
+ ___fractdiuqq
+ ___fractdiuhq
+ ___fractdiusq
+ ___fractdiudq
+ ___fractdiutq
+ ___fractdiuha
+ ___fractdiusa
+ ___fractdiuda
+ ___fractdiuta
+ ___fracttiqq
+ ___fracttihq
+ ___fracttisq
+ ___fracttidq
+ ___fracttitq
+ ___fracttiha
+ ___fracttisa
+ ___fracttida
+ ___fracttita
+ ___fracttiuqq
+ ___fracttiuhq
+ ___fracttiusq
+ ___fracttiudq
+ ___fracttiutq
+ ___fracttiuha
+ ___fracttiusa
+ ___fracttiuda
+ ___fracttiuta
+ ___fractsfqq
+ ___fractsfhq
+ ___fractsfsq
+ ___fractsfdq
+ ___fractsftq
+ ___fractsfha
+ ___fractsfsa
+ ___fractsfda
+ ___fractsfta
+ ___fractsfuqq
+ ___fractsfuhq
+ ___fractsfusq
+ ___fractsfudq
+ ___fractsfutq
+ ___fractsfuha
+ ___fractsfusa
+ ___fractsfuda
+ ___fractsfuta
+ ___fractdfqq
+ ___fractdfhq
+ ___fractdfsq
+ ___fractdfdq
+ ___fractdftq
+ ___fractdfha
+ ___fractdfsa
+ ___fractdfda
+ ___fractdfta
+ ___fractdfuqq
+ ___fractdfuhq
+ ___fractdfusq
+ ___fractdfudq
+ ___fractdfutq
+ ___fractdfuha
+ ___fractdfusa
+ ___fractdfuda
+ ___fractdfuta
+ ___satfractqqhq2
+ ___satfractqqsq2
+ ___satfractqqdq2
+ ___satfractqqtq2
+ ___satfractqqha
+ ___satfractqqsa
+ ___satfractqqda
+ ___satfractqqta
+ ___satfractqquqq
+ ___satfractqquhq
+ ___satfractqqusq
+ ___satfractqqudq
+ ___satfractqqutq
+ ___satfractqquha
+ ___satfractqqusa
+ ___satfractqquda
+ ___satfractqquta
+ ___satfracthqqq2
+ ___satfracthqsq2
+ ___satfracthqdq2
+ ___satfracthqtq2
+ ___satfracthqha
+ ___satfracthqsa
+ ___satfracthqda
+ ___satfracthqta
+ ___satfracthquqq
+ ___satfracthquhq
+ ___satfracthqusq
+ ___satfracthqudq
+ ___satfracthqutq
+ ___satfracthquha
+ ___satfracthqusa
+ ___satfracthquda
+ ___satfracthquta
+ ___satfractsqqq2
+ ___satfractsqhq2
+ ___satfractsqdq2
+ ___satfractsqtq2
+ ___satfractsqha
+ ___satfractsqsa
+ ___satfractsqda
+ ___satfractsqta
+ ___satfractsquqq
+ ___satfractsquhq
+ ___satfractsqusq
+ ___satfractsqudq
+ ___satfractsqutq
+ ___satfractsquha
+ ___satfractsqusa
+ ___satfractsquda
+ ___satfractsquta
+ ___satfractdqqq2
+ ___satfractdqhq2
+ ___satfractdqsq2
+ ___satfractdqtq2
+ ___satfractdqha
+ ___satfractdqsa
+ ___satfractdqda
+ ___satfractdqta
+ ___satfractdquqq
+ ___satfractdquhq
+ ___satfractdqusq
+ ___satfractdqudq
+ ___satfractdqutq
+ ___satfractdquha
+ ___satfractdqusa
+ ___satfractdquda
+ ___satfractdquta
+ ___satfracttqqq2
+ ___satfracttqhq2
+ ___satfracttqsq2
+ ___satfracttqdq2
+ ___satfracttqha
+ ___satfracttqsa
+ ___satfracttqda
+ ___satfracttqta
+ ___satfracttquqq
+ ___satfracttquhq
+ ___satfracttqusq
+ ___satfracttqudq
+ ___satfracttqutq
+ ___satfracttquha
+ ___satfracttqusa
+ ___satfracttquda
+ ___satfracttquta
+ ___satfracthaqq
+ ___satfracthahq
+ ___satfracthasq
+ ___satfracthadq
+ ___satfracthatq
+ ___satfracthasa2
+ ___satfracthada2
+ ___satfracthata2
+ ___satfracthauqq
+ ___satfracthauhq
+ ___satfracthausq
+ ___satfracthaudq
+ ___satfracthautq
+ ___satfracthauha
+ ___satfracthausa
+ ___satfracthauda
+ ___satfracthauta
+ ___satfractsaqq
+ ___satfractsahq
+ ___satfractsasq
+ ___satfractsadq
+ ___satfractsatq
+ ___satfractsaha2
+ ___satfractsada2
+ ___satfractsata2
+ ___satfractsauqq
+ ___satfractsauhq
+ ___satfractsausq
+ ___satfractsaudq
+ ___satfractsautq
+ ___satfractsauha
+ ___satfractsausa
+ ___satfractsauda
+ ___satfractsauta
+ ___satfractdaqq
+ ___satfractdahq
+ ___satfractdasq
+ ___satfractdadq
+ ___satfractdatq
+ ___satfractdaha2
+ ___satfractdasa2
+ ___satfractdata2
+ ___satfractdauqq
+ ___satfractdauhq
+ ___satfractdausq
+ ___satfractdaudq
+ ___satfractdautq
+ ___satfractdauha
+ ___satfractdausa
+ ___satfractdauda
+ ___satfractdauta
+ ___satfracttaqq
+ ___satfracttahq
+ ___satfracttasq
+ ___satfracttadq
+ ___satfracttatq
+ ___satfracttaha2
+ ___satfracttasa2
+ ___satfracttada2
+ ___satfracttauqq
+ ___satfracttauhq
+ ___satfracttausq
+ ___satfracttaudq
+ ___satfracttautq
+ ___satfracttauha
+ ___satfracttausa
+ ___satfracttauda
+ ___satfracttauta
+ ___satfractuqqqq
+ ___satfractuqqhq
+ ___satfractuqqsq
+ ___satfractuqqdq
+ ___satfractuqqtq
+ ___satfractuqqha
+ ___satfractuqqsa
+ ___satfractuqqda
+ ___satfractuqqta
+ ___satfractuqquhq2
+ ___satfractuqqusq2
+ ___satfractuqqudq2
+ ___satfractuqqutq2
+ ___satfractuqquha
+ ___satfractuqqusa
+ ___satfractuqquda
+ ___satfractuqquta
+ ___satfractuhqqq
+ ___satfractuhqhq
+ ___satfractuhqsq
+ ___satfractuhqdq
+ ___satfractuhqtq
+ ___satfractuhqha
+ ___satfractuhqsa
+ ___satfractuhqda
+ ___satfractuhqta
+ ___satfractuhquqq2
+ ___satfractuhqusq2
+ ___satfractuhqudq2
+ ___satfractuhqutq2
+ ___satfractuhquha
+ ___satfractuhqusa
+ ___satfractuhquda
+ ___satfractuhquta
+ ___satfractusqqq
+ ___satfractusqhq
+ ___satfractusqsq
+ ___satfractusqdq
+ ___satfractusqtq
+ ___satfractusqha
+ ___satfractusqsa
+ ___satfractusqda
+ ___satfractusqta
+ ___satfractusquqq2
+ ___satfractusquhq2
+ ___satfractusqudq2
+ ___satfractusqutq2
+ ___satfractusquha
+ ___satfractusqusa
+ ___satfractusquda
+ ___satfractusquta
+ ___satfractudqqq
+ ___satfractudqhq
+ ___satfractudqsq
+ ___satfractudqdq
+ ___satfractudqtq
+ ___satfractudqha
+ ___satfractudqsa
+ ___satfractudqda
+ ___satfractudqta
+ ___satfractudquqq2
+ ___satfractudquhq2
+ ___satfractudqusq2
+ ___satfractudqutq2
+ ___satfractudquha
+ ___satfractudqusa
+ ___satfractudquda
+ ___satfractudquta
+ ___satfractutqqq
+ ___satfractutqhq
+ ___satfractutqsq
+ ___satfractutqdq
+ ___satfractutqtq
+ ___satfractutqha
+ ___satfractutqsa
+ ___satfractutqda
+ ___satfractutqta
+ ___satfractutquqq2
+ ___satfractutquhq2
+ ___satfractutqusq2
+ ___satfractutqudq2
+ ___satfractutquha
+ ___satfractutqusa
+ ___satfractutquda
+ ___satfractutquta
+ ___satfractuhaqq
+ ___satfractuhahq
+ ___satfractuhasq
+ ___satfractuhadq
+ ___satfractuhatq
+ ___satfractuhaha
+ ___satfractuhasa
+ ___satfractuhada
+ ___satfractuhata
+ ___satfractuhauqq
+ ___satfractuhauhq
+ ___satfractuhausq
+ ___satfractuhaudq
+ ___satfractuhautq
+ ___satfractuhausa2
+ ___satfractuhauda2
+ ___satfractuhauta2
+ ___satfractusaqq
+ ___satfractusahq
+ ___satfractusasq
+ ___satfractusadq
+ ___satfractusatq
+ ___satfractusaha
+ ___satfractusasa
+ ___satfractusada
+ ___satfractusata
+ ___satfractusauqq
+ ___satfractusauhq
+ ___satfractusausq
+ ___satfractusaudq
+ ___satfractusautq
+ ___satfractusauha2
+ ___satfractusauda2
+ ___satfractusauta2
+ ___satfractudaqq
+ ___satfractudahq
+ ___satfractudasq
+ ___satfractudadq
+ ___satfractudatq
+ ___satfractudaha
+ ___satfractudasa
+ ___satfractudada
+ ___satfractudata
+ ___satfractudauqq
+ ___satfractudauhq
+ ___satfractudausq
+ ___satfractudaudq
+ ___satfractudautq
+ ___satfractudauha2
+ ___satfractudausa2
+ ___satfractudauta2
+ ___satfractutaqq
+ ___satfractutahq
+ ___satfractutasq
+ ___satfractutadq
+ ___satfractutatq
+ ___satfractutaha
+ ___satfractutasa
+ ___satfractutada
+ ___satfractutata
+ ___satfractutauqq
+ ___satfractutauhq
+ ___satfractutausq
+ ___satfractutaudq
+ ___satfractutautq
+ ___satfractutauha2
+ ___satfractutausa2
+ ___satfractutauda2
+ ___satfractqiqq
+ ___satfractqihq
+ ___satfractqisq
+ ___satfractqidq
+ ___satfractqitq
+ ___satfractqiha
+ ___satfractqisa
+ ___satfractqida
+ ___satfractqita
+ ___satfractqiuqq
+ ___satfractqiuhq
+ ___satfractqiusq
+ ___satfractqiudq
+ ___satfractqiutq
+ ___satfractqiuha
+ ___satfractqiusa
+ ___satfractqiuda
+ ___satfractqiuta
+ ___satfracthiqq
+ ___satfracthihq
+ ___satfracthisq
+ ___satfracthidq
+ ___satfracthitq
+ ___satfracthiha
+ ___satfracthisa
+ ___satfracthida
+ ___satfracthita
+ ___satfracthiuqq
+ ___satfracthiuhq
+ ___satfracthiusq
+ ___satfracthiudq
+ ___satfracthiutq
+ ___satfracthiuha
+ ___satfracthiusa
+ ___satfracthiuda
+ ___satfracthiuta
+ ___satfractsiqq
+ ___satfractsihq
+ ___satfractsisq
+ ___satfractsidq
+ ___satfractsitq
+ ___satfractsiha
+ ___satfractsisa
+ ___satfractsida
+ ___satfractsita
+ ___satfractsiuqq
+ ___satfractsiuhq
+ ___satfractsiusq
+ ___satfractsiudq
+ ___satfractsiutq
+ ___satfractsiuha
+ ___satfractsiusa
+ ___satfractsiuda
+ ___satfractsiuta
+ ___satfractdiqq
+ ___satfractdihq
+ ___satfractdisq
+ ___satfractdidq
+ ___satfractditq
+ ___satfractdiha
+ ___satfractdisa
+ ___satfractdida
+ ___satfractdita
+ ___satfractdiuqq
+ ___satfractdiuhq
+ ___satfractdiusq
+ ___satfractdiudq
+ ___satfractdiutq
+ ___satfractdiuha
+ ___satfractdiusa
+ ___satfractdiuda
+ ___satfractdiuta
+ ___satfracttiqq
+ ___satfracttihq
+ ___satfracttisq
+ ___satfracttidq
+ ___satfracttitq
+ ___satfracttiha
+ ___satfracttisa
+ ___satfracttida
+ ___satfracttita
+ ___satfracttiuqq
+ ___satfracttiuhq
+ ___satfracttiusq
+ ___satfracttiudq
+ ___satfracttiutq
+ ___satfracttiuha
+ ___satfracttiusa
+ ___satfracttiuda
+ ___satfracttiuta
+ ___satfractsfqq
+ ___satfractsfhq
+ ___satfractsfsq
+ ___satfractsfdq
+ ___satfractsftq
+ ___satfractsfha
+ ___satfractsfsa
+ ___satfractsfda
+ ___satfractsfta
+ ___satfractsfuqq
+ ___satfractsfuhq
+ ___satfractsfusq
+ ___satfractsfudq
+ ___satfractsfutq
+ ___satfractsfuha
+ ___satfractsfusa
+ ___satfractsfuda
+ ___satfractsfuta
+ ___satfractdfqq
+ ___satfractdfhq
+ ___satfractdfsq
+ ___satfractdfdq
+ ___satfractdftq
+ ___satfractdfha
+ ___satfractdfsa
+ ___satfractdfda
+ ___satfractdfta
+ ___satfractdfuqq
+ ___satfractdfuhq
+ ___satfractdfusq
+ ___satfractdfudq
+ ___satfractdfutq
+ ___satfractdfuha
+ ___satfractdfusa
+ ___satfractdfuda
+ ___satfractdfuta
+ ___fractunsqqqi
+ ___fractunsqqhi
+ ___fractunsqqsi
+ ___fractunsqqdi
+ ___fractunsqqti
+ ___fractunshqqi
+ ___fractunshqhi
+ ___fractunshqsi
+ ___fractunshqdi
+ ___fractunshqti
+ ___fractunssqqi
+ ___fractunssqhi
+ ___fractunssqsi
+ ___fractunssqdi
+ ___fractunssqti
+ ___fractunsdqqi
+ ___fractunsdqhi
+ ___fractunsdqsi
+ ___fractunsdqdi
+ ___fractunsdqti
+ ___fractunstqqi
+ ___fractunstqhi
+ ___fractunstqsi
+ ___fractunstqdi
+ ___fractunstqti
+ ___fractunshaqi
+ ___fractunshahi
+ ___fractunshasi
+ ___fractunshadi
+ ___fractunshati
+ ___fractunssaqi
+ ___fractunssahi
+ ___fractunssasi
+ ___fractunssadi
+ ___fractunssati
+ ___fractunsdaqi
+ ___fractunsdahi
+ ___fractunsdasi
+ ___fractunsdadi
+ ___fractunsdati
+ ___fractunstaqi
+ ___fractunstahi
+ ___fractunstasi
+ ___fractunstadi
+ ___fractunstati
+ ___fractunsuqqqi
+ ___fractunsuqqhi
+ ___fractunsuqqsi
+ ___fractunsuqqdi
+ ___fractunsuqqti
+ ___fractunsuhqqi
+ ___fractunsuhqhi
+ ___fractunsuhqsi
+ ___fractunsuhqdi
+ ___fractunsuhqti
+ ___fractunsusqqi
+ ___fractunsusqhi
+ ___fractunsusqsi
+ ___fractunsusqdi
+ ___fractunsusqti
+ ___fractunsudqqi
+ ___fractunsudqhi
+ ___fractunsudqsi
+ ___fractunsudqdi
+ ___fractunsudqti
+ ___fractunsutqqi
+ ___fractunsutqhi
+ ___fractunsutqsi
+ ___fractunsutqdi
+ ___fractunsutqti
+ ___fractunsuhaqi
+ ___fractunsuhahi
+ ___fractunsuhasi
+ ___fractunsuhadi
+ ___fractunsuhati
+ ___fractunsusaqi
+ ___fractunsusahi
+ ___fractunsusasi
+ ___fractunsusadi
+ ___fractunsusati
+ ___fractunsudaqi
+ ___fractunsudahi
+ ___fractunsudasi
+ ___fractunsudadi
+ ___fractunsudati
+ ___fractunsutaqi
+ ___fractunsutahi
+ ___fractunsutasi
+ ___fractunsutadi
+ ___fractunsutati
+ ___fractunsqiqq
+ ___fractunsqihq
+ ___fractunsqisq
+ ___fractunsqidq
+ ___fractunsqitq
+ ___fractunsqiha
+ ___fractunsqisa
+ ___fractunsqida
+ ___fractunsqita
+ ___fractunsqiuqq
+ ___fractunsqiuhq
+ ___fractunsqiusq
+ ___fractunsqiudq
+ ___fractunsqiutq
+ ___fractunsqiuha
+ ___fractunsqiusa
+ ___fractunsqiuda
+ ___fractunsqiuta
+ ___fractunshiqq
+ ___fractunshihq
+ ___fractunshisq
+ ___fractunshidq
+ ___fractunshitq
+ ___fractunshiha
+ ___fractunshisa
+ ___fractunshida
+ ___fractunshita
+ ___fractunshiuqq
+ ___fractunshiuhq
+ ___fractunshiusq
+ ___fractunshiudq
+ ___fractunshiutq
+ ___fractunshiuha
+ ___fractunshiusa
+ ___fractunshiuda
+ ___fractunshiuta
+ ___fractunssiqq
+ ___fractunssihq
+ ___fractunssisq
+ ___fractunssidq
+ ___fractunssitq
+ ___fractunssiha
+ ___fractunssisa
+ ___fractunssida
+ ___fractunssita
+ ___fractunssiuqq
+ ___fractunssiuhq
+ ___fractunssiusq
+ ___fractunssiudq
+ ___fractunssiutq
+ ___fractunssiuha
+ ___fractunssiusa
+ ___fractunssiuda
+ ___fractunssiuta
+ ___fractunsdiqq
+ ___fractunsdihq
+ ___fractunsdisq
+ ___fractunsdidq
+ ___fractunsditq
+ ___fractunsdiha
+ ___fractunsdisa
+ ___fractunsdida
+ ___fractunsdita
+ ___fractunsdiuqq
+ ___fractunsdiuhq
+ ___fractunsdiusq
+ ___fractunsdiudq
+ ___fractunsdiutq
+ ___fractunsdiuha
+ ___fractunsdiusa
+ ___fractunsdiuda
+ ___fractunsdiuta
+ ___fractunstiqq
+ ___fractunstihq
+ ___fractunstisq
+ ___fractunstidq
+ ___fractunstitq
+ ___fractunstiha
+ ___fractunstisa
+ ___fractunstida
+ ___fractunstita
+ ___fractunstiuqq
+ ___fractunstiuhq
+ ___fractunstiusq
+ ___fractunstiudq
+ ___fractunstiutq
+ ___fractunstiuha
+ ___fractunstiusa
+ ___fractunstiuda
+ ___fractunstiuta
+ ___satfractunsqiqq
+ ___satfractunsqihq
+ ___satfractunsqisq
+ ___satfractunsqidq
+ ___satfractunsqitq
+ ___satfractunsqiha
+ ___satfractunsqisa
+ ___satfractunsqida
+ ___satfractunsqita
+ ___satfractunsqiuqq
+ ___satfractunsqiuhq
+ ___satfractunsqiusq
+ ___satfractunsqiudq
+ ___satfractunsqiutq
+ ___satfractunsqiuha
+ ___satfractunsqiusa
+ ___satfractunsqiuda
+ ___satfractunsqiuta
+ ___satfractunshiqq
+ ___satfractunshihq
+ ___satfractunshisq
+ ___satfractunshidq
+ ___satfractunshitq
+ ___satfractunshiha
+ ___satfractunshisa
+ ___satfractunshida
+ ___satfractunshita
+ ___satfractunshiuqq
+ ___satfractunshiuhq
+ ___satfractunshiusq
+ ___satfractunshiudq
+ ___satfractunshiutq
+ ___satfractunshiuha
+ ___satfractunshiusa
+ ___satfractunshiuda
+ ___satfractunshiuta
+ ___satfractunssiqq
+ ___satfractunssihq
+ ___satfractunssisq
+ ___satfractunssidq
+ ___satfractunssitq
+ ___satfractunssiha
+ ___satfractunssisa
+ ___satfractunssida
+ ___satfractunssita
+ ___satfractunssiuqq
+ ___satfractunssiuhq
+ ___satfractunssiusq
+ ___satfractunssiudq
+ ___satfractunssiutq
+ ___satfractunssiuha
+ ___satfractunssiusa
+ ___satfractunssiuda
+ ___satfractunssiuta
+ ___satfractunsdiqq
+ ___satfractunsdihq
+ ___satfractunsdisq
+ ___satfractunsdidq
+ ___satfractunsditq
+ ___satfractunsdiha
+ ___satfractunsdisa
+ ___satfractunsdida
+ ___satfractunsdita
+ ___satfractunsdiuqq
+ ___satfractunsdiuhq
+ ___satfractunsdiusq
+ ___satfractunsdiudq
+ ___satfractunsdiutq
+ ___satfractunsdiuha
+ ___satfractunsdiusa
+ ___satfractunsdiuda
+ ___satfractunsdiuta
+ ___satfractunstiqq
+ ___satfractunstihq
+ ___satfractunstisq
+ ___satfractunstidq
+ ___satfractunstitq
+ ___satfractunstiha
+ ___satfractunstisa
+ ___satfractunstida
+ ___satfractunstita
+ ___satfractunstiuqq
+ ___satfractunstiuhq
+ ___satfractunstiusq
+ ___satfractunstiudq
+ ___satfractunstiutq
+ ___satfractunstiuha
+ ___satfractunstiusa
+ ___satfractunstiuda
+ ___satfractunstiuta
+}
+
+%inherit GCC_4.4.0 GCC_4.3.0
+GCC_4.4.0 {
+ ___sync_fetch_and_add_1
+ ___sync_fetch_and_sub_1
+ ___sync_fetch_and_or_1
+ ___sync_fetch_and_and_1
+ ___sync_fetch_and_xor_1
+ ___sync_fetch_and_nand_1
+ ___sync_add_and_fetch_1
+ ___sync_sub_and_fetch_1
+ ___sync_or_and_fetch_1
+ ___sync_and_and_fetch_1
+ ___sync_xor_and_fetch_1
+ ___sync_nand_and_fetch_1
+ ___sync_bool_compare_and_swap_1
+ ___sync_val_compare_and_swap_1
+ ___sync_lock_test_and_set_1
+
+ ___sync_fetch_and_add_2
+ ___sync_fetch_and_sub_2
+ ___sync_fetch_and_or_2
+ ___sync_fetch_and_and_2
+ ___sync_fetch_and_xor_2
+ ___sync_fetch_and_nand_2
+ ___sync_add_and_fetch_2
+ ___sync_sub_and_fetch_2
+ ___sync_or_and_fetch_2
+ ___sync_and_and_fetch_2
+ ___sync_xor_and_fetch_2
+ ___sync_nand_and_fetch_2
+ ___sync_bool_compare_and_swap_2
+ ___sync_val_compare_and_swap_2
+ ___sync_lock_test_and_set_2
+
+ ___sync_fetch_and_add_4
+ ___sync_fetch_and_sub_4
+ ___sync_fetch_and_or_4
+ ___sync_fetch_and_and_4
+ ___sync_fetch_and_xor_4
+ ___sync_fetch_and_nand_4
+ ___sync_add_and_fetch_4
+ ___sync_sub_and_fetch_4
+ ___sync_or_and_fetch_4
+ ___sync_and_and_fetch_4
+ ___sync_xor_and_fetch_4
+ ___sync_nand_and_fetch_4
+ ___sync_bool_compare_and_swap_4
+ ___sync_val_compare_and_swap_4
+ ___sync_lock_test_and_set_4
+
+ ___sync_fetch_and_add_8
+ ___sync_fetch_and_sub_8
+ ___sync_fetch_and_or_8
+ ___sync_fetch_and_and_8
+ ___sync_fetch_and_xor_8
+ ___sync_fetch_and_nand_8
+ ___sync_add_and_fetch_8
+ ___sync_sub_and_fetch_8
+ ___sync_or_and_fetch_8
+ ___sync_and_and_fetch_8
+ ___sync_xor_and_fetch_8
+ ___sync_nand_and_fetch_8
+ ___sync_bool_compare_and_swap_8
+ ___sync_val_compare_and_swap_8
+ ___sync_lock_test_and_set_8
+
+ ___sync_fetch_and_add_16
+ ___sync_fetch_and_sub_16
+ ___sync_fetch_and_or_16
+ ___sync_fetch_and_and_16
+ ___sync_fetch_and_xor_16
+ ___sync_fetch_and_nand_16
+ ___sync_add_and_fetch_16
+ ___sync_sub_and_fetch_16
+ ___sync_or_and_fetch_16
+ ___sync_and_and_fetch_16
+ ___sync_xor_and_fetch_16
+ ___sync_nand_and_fetch_16
+ ___sync_bool_compare_and_swap_16
+ ___sync_val_compare_and_swap_16
+ ___sync_lock_test_and_set_16
+
+ ___sync_synchronize
+}
+
+%inherit GCC_4.5.0 GCC_4.4.0
+GCC_4.5.0 {
+ ___unordxf2
+ ___unordtf2
+}
diff --git a/libgcc/config/bfin/t-bfin b/libgcc/config/bfin/t-bfin
new file mode 100644
index 00000000000..bc2b088ffc1
--- /dev/null
+++ b/libgcc/config/bfin/t-bfin
@@ -0,0 +1,3 @@
+LIB1ASMSRC = bfin/lib1funcs.S
+LIB1ASMFUNCS = _divsi3 _udivsi3 _umodsi3 _modsi3 _muldi3 _umulsi3_highpart
+LIB1ASMFUNCS += _smulsi3_highpart
diff --git a/libgcc/config/bfin/t-crtlibid b/libgcc/config/bfin/t-crtlibid
new file mode 100644
index 00000000000..b0c93e71eb9
--- /dev/null
+++ b/libgcc/config/bfin/t-crtlibid
@@ -0,0 +1,3 @@
+# Assemble startup files.
+crtlibid.o: $(srcdir)/config/bfin/crtlibid.S
+ $(gcc_compile) -c -x assembler-with-cpp $<
diff --git a/libgcc/config/bfin/t-crtstuff b/libgcc/config/bfin/t-crtstuff
new file mode 100644
index 00000000000..eee12eb697a
--- /dev/null
+++ b/libgcc/config/bfin/t-crtstuff
@@ -0,0 +1 @@
+CRTSTUFF_T_CFLAGS = $(PICFLAG)
diff --git a/libgcc/config/bfin/t-linux b/libgcc/config/bfin/t-linux
new file mode 100644
index 00000000000..1c42e482b8d
--- /dev/null
+++ b/libgcc/config/bfin/t-linux
@@ -0,0 +1 @@
+SHLIB_MAPFILES = $(srcdir)/config/bfin/libgcc-glibc.ver
diff --git a/libgcc/config/c6x/crti.S b/libgcc/config/c6x/crti.S
new file mode 100644
index 00000000000..8fe35c1f121
--- /dev/null
+++ b/libgcc/config/c6x/crti.S
@@ -0,0 +1,39 @@
+/* Copyright 2010, 2011 Free Software Foundation, Inc.
+ Contributed by Bernd Schmidt <bernds@codesourcery.com>.
+
+This file is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 3, or (at your option) any
+later version.
+
+This file is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+/*
+ * This file just supplies function prologues for the .init and .fini
+ * sections. It is linked in before crtbegin.o.
+ */
+
+ .section .init
+ .globl _init
+ .type _init,@function
+_init:
+ add .l2 -8, B15, B15
+ stw .d2t2 B3,*+B15(4)
+ .section .fini
+ .globl _fini
+ .type _fini,@function
+_fini:
+ add .l2 -8, B15, B15
+ stw .d2t2 B3,*+B15(4)
diff --git a/libgcc/config/c6x/crtn.S b/libgcc/config/c6x/crtn.S
new file mode 100644
index 00000000000..5900a4b14c4
--- /dev/null
+++ b/libgcc/config/c6x/crtn.S
@@ -0,0 +1,41 @@
+/* Copyright 2010, 2011 Free Software Foundation, Inc.
+ Contributed by Bernd Schmidt <bernds@codesourcery.com>.
+
+This file is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 3, or (at your option) any
+later version.
+
+This file is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+/*
+ * This file supplies function epilogues for the .init and .fini sections.
+ * It is linked in after all other files.
+ */
+
+ .section .init
+ ldw .d2t2 *+B15(4), B3
+ add .d2 B15, 8, B15
+ nop 3
+ ret .s2 B3
+ nop 5
+
+ .section .fini
+ ldw .d2t2 *+B15(4), B3
+ add .d2 B15, 8, B15
+ nop 3
+ ret .s2 B3
+ nop 5
+
diff --git a/libgcc/config/c6x/eqd.c b/libgcc/config/c6x/eqd.c
new file mode 100644
index 00000000000..d6b32013bcb
--- /dev/null
+++ b/libgcc/config/c6x/eqd.c
@@ -0,0 +1,47 @@
+/* Software floating-point emulation.
+ Return 1 iff a == b, 0 otherwise.
+ Copyright (C) 1997,1999,2006,2007,2011 Free Software Foundation, Inc.
+ Contributed by Richard Henderson (rth@cygnus.com) and
+ Jakub Jelinek (jj@ultra.linux.cz).
+
+ This file is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ In addition to the permissions in the GNU Lesser General Public
+ License, the Free Software Foundation gives you unlimited
+ permission to link the compiled version of this file into
+ combinations with other programs, and to distribute those
+ combinations without any restriction coming from the use of this
+ file. (The Lesser General Public License restrictions do apply in
+ other respects; for example, they cover modification of the file,
+ and distribution when not linked into a combine executable.)
+
+ This file is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with GCC; see the file COPYING.LIB. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#include <soft-fp/soft-fp.h>
+#include <soft-fp/double.h>
+
+CMPtype __c6xabi_eqd(DFtype a, DFtype b)
+{
+ FP_DECL_EX;
+ FP_DECL_D(A); FP_DECL_D(B);
+ CMPtype r;
+
+ FP_UNPACK_RAW_D(A, a);
+ FP_UNPACK_RAW_D(B, b);
+ FP_CMP_EQ_D(r, A, B);
+ if (r && (FP_ISSIGNAN_D(A) || FP_ISSIGNAN_D(B)))
+ FP_SET_EXCEPTION(FP_EX_INVALID);
+ FP_HANDLE_EXCEPTIONS;
+
+ return !r;
+}
diff --git a/libgcc/config/c6x/eqf.c b/libgcc/config/c6x/eqf.c
new file mode 100644
index 00000000000..ee6dafc98b7
--- /dev/null
+++ b/libgcc/config/c6x/eqf.c
@@ -0,0 +1,47 @@
+/* Software floating-point emulation.
+ Return 1 iff a == b, 0 otherwise.
+ Copyright (C) 1997,1999,2006,2007,2011 Free Software Foundation, Inc.
+ Contributed by Richard Henderson (rth@cygnus.com) and
+ Jakub Jelinek (jj@ultra.linux.cz).
+
+ This file is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ In addition to the permissions in the GNU Lesser General Public
+ License, the Free Software Foundation gives you unlimited
+ permission to link the compiled version of this file into
+ combinations with other programs, and to distribute those
+ combinations without any restriction coming from the use of this
+ file. (The Lesser General Public License restrictions do apply in
+ other respects; for example, they cover modification of the file,
+ and distribution when not linked into a combine executable.)
+
+ This file is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with GCC; see the file COPYING.LIB. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#include <soft-fp/soft-fp.h>
+#include <soft-fp/single.h>
+
+CMPtype __c6xabi_eqf(SFtype a, SFtype b)
+{
+ FP_DECL_EX;
+ FP_DECL_S(A); FP_DECL_S(B);
+ CMPtype r;
+
+ FP_UNPACK_RAW_S(A, a);
+ FP_UNPACK_RAW_S(B, b);
+ FP_CMP_EQ_S(r, A, B);
+ if (r && (FP_ISSIGNAN_S(A) || FP_ISSIGNAN_S(B)))
+ FP_SET_EXCEPTION(FP_EX_INVALID);
+ FP_HANDLE_EXCEPTIONS;
+
+ return !r;
+}
diff --git a/libgcc/config/c6x/ged.c b/libgcc/config/c6x/ged.c
new file mode 100644
index 00000000000..2089904f92a
--- /dev/null
+++ b/libgcc/config/c6x/ged.c
@@ -0,0 +1,47 @@
+/* Software floating-point emulation.
+ Return 1 iff a >= b, 0 otherwise.
+ Copyright (C) 1997,1999,2006,2007,2011 Free Software Foundation, Inc.
+ Contributed by Richard Henderson (rth@cygnus.com) and
+ Jakub Jelinek (jj@ultra.linux.cz).
+
+ This file is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ In addition to the permissions in the GNU Lesser General Public
+ License, the Free Software Foundation gives you unlimited
+ permission to link the compiled version of this file into
+ combinations with other programs, and to distribute those
+ combinations without any restriction coming from the use of this
+ file. (The Lesser General Public License restrictions do apply in
+ other respects; for example, they cover modification of the file,
+ and distribution when not linked into a combine executable.)
+
+ This file is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with GCC; see the file COPYING.LIB. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#include <soft-fp/soft-fp.h>
+#include <soft-fp/double.h>
+
+CMPtype __c6xabi_ged(DFtype a, DFtype b)
+{
+ FP_DECL_EX;
+ FP_DECL_D(A); FP_DECL_D(B);
+ CMPtype r;
+
+ FP_UNPACK_RAW_D(A, a);
+ FP_UNPACK_RAW_D(B, b);
+ FP_CMP_D(r, A, B, -2);
+ if (r == -2 && (FP_ISSIGNAN_D(A) || FP_ISSIGNAN_D(B)))
+ FP_SET_EXCEPTION(FP_EX_INVALID);
+ FP_HANDLE_EXCEPTIONS;
+
+ return r >= 0;
+}
diff --git a/libgcc/config/c6x/gef.c b/libgcc/config/c6x/gef.c
new file mode 100644
index 00000000000..ce4c1c0af3d
--- /dev/null
+++ b/libgcc/config/c6x/gef.c
@@ -0,0 +1,47 @@
+/* Software floating-point emulation.
+ Return 1 iff a >= b, 0 otherwise.
+ Copyright (C) 1997,1999,2006,2007 Free Software Foundation, Inc.
+ Contributed by Richard Henderson (rth@cygnus.com) and
+ Jakub Jelinek (jj@ultra.linux.cz).
+
+ This file is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ In addition to the permissions in the GNU Lesser General Public
+ License, the Free Software Foundation gives you unlimited
+ permission to link the compiled version of this file into
+ combinations with other programs, and to distribute those
+ combinations without any restriction coming from the use of this
+ file. (The Lesser General Public License restrictions do apply in
+ other respects; for example, they cover modification of the file,
+ and distribution when not linked into a combine executable.)
+
+ This file is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with GCC; see the file COPYING.LIB. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#include <soft-fp/soft-fp.h>
+#include <soft-fp/single.h>
+
+CMPtype __c6xabi_gef(SFtype a, SFtype b)
+{
+ FP_DECL_EX;
+ FP_DECL_S(A); FP_DECL_S(B);
+ CMPtype r;
+
+ FP_UNPACK_RAW_S(A, a);
+ FP_UNPACK_RAW_S(B, b);
+ FP_CMP_S(r, A, B, -2);
+ if (r == -2 && (FP_ISSIGNAN_S(A) || FP_ISSIGNAN_S(B)))
+ FP_SET_EXCEPTION(FP_EX_INVALID);
+ FP_HANDLE_EXCEPTIONS;
+
+ return r >= 0;
+}
diff --git a/libgcc/config/c6x/gtd.c b/libgcc/config/c6x/gtd.c
new file mode 100644
index 00000000000..6d45aef9af8
--- /dev/null
+++ b/libgcc/config/c6x/gtd.c
@@ -0,0 +1,47 @@
+/* Software floating-point emulation.
+ Return 1 iff a > b, 0 otherwise.
+ Copyright (C) 1997,1999,2006,2007,2011 Free Software Foundation, Inc.
+ Contributed by Richard Henderson (rth@cygnus.com) and
+ Jakub Jelinek (jj@ultra.linux.cz).
+
+ This file is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ In addition to the permissions in the GNU Lesser General Public
+ License, the Free Software Foundation gives you unlimited
+ permission to link the compiled version of this file into
+ combinations with other programs, and to distribute those
+ combinations without any restriction coming from the use of this
+ file. (The Lesser General Public License restrictions do apply in
+ other respects; for example, they cover modification of the file,
+ and distribution when not linked into a combine executable.)
+
+ This file is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with GCC; see the file COPYING.LIB. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#include <soft-fp/soft-fp.h>
+#include <soft-fp/double.h>
+
+CMPtype __c6xabi_gtd(DFtype a, DFtype b)
+{
+ FP_DECL_EX;
+ FP_DECL_D(A); FP_DECL_D(B);
+ CMPtype r;
+
+ FP_UNPACK_RAW_D(A, a);
+ FP_UNPACK_RAW_D(B, b);
+ FP_CMP_D(r, A, B, -2);
+ if (r == -2 && (FP_ISSIGNAN_D(A) || FP_ISSIGNAN_D(B)))
+ FP_SET_EXCEPTION(FP_EX_INVALID);
+ FP_HANDLE_EXCEPTIONS;
+
+ return r > 0;
+}
diff --git a/libgcc/config/c6x/gtf.c b/libgcc/config/c6x/gtf.c
new file mode 100644
index 00000000000..c6a108a2833
--- /dev/null
+++ b/libgcc/config/c6x/gtf.c
@@ -0,0 +1,47 @@
+/* Software floating-point emulation.
+ Return 1 iff a > b, 0 otherwise.
+ Copyright (C) 1997,1999,2006,2007,2011 Free Software Foundation, Inc.
+ Contributed by Richard Henderson (rth@cygnus.com) and
+ Jakub Jelinek (jj@ultra.linux.cz).
+
+ This file is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ In addition to the permissions in the GNU Lesser General Public
+ License, the Free Software Foundation gives you unlimited
+ permission to link the compiled version of this file into
+ combinations with other programs, and to distribute those
+ combinations without any restriction coming from the use of this
+ file. (The Lesser General Public License restrictions do apply in
+ other respects; for example, they cover modification of the file,
+ and distribution when not linked into a combine executable.)
+
+ This file is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with GCC; see the file COPYING.LIB. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#include <soft-fp/soft-fp.h>
+#include <soft-fp/single.h>
+
+CMPtype __c6xabi_gtf(SFtype a, SFtype b)
+{
+ FP_DECL_EX;
+ FP_DECL_S(A); FP_DECL_S(B);
+ CMPtype r;
+
+ FP_UNPACK_RAW_S(A, a);
+ FP_UNPACK_RAW_S(B, b);
+ FP_CMP_S(r, A, B, -2);
+ if (r == -2 && (FP_ISSIGNAN_S(A) || FP_ISSIGNAN_S(B)))
+ FP_SET_EXCEPTION(FP_EX_INVALID);
+ FP_HANDLE_EXCEPTIONS;
+
+ return r > 0;
+}
diff --git a/libgcc/config/c6x/led.c b/libgcc/config/c6x/led.c
new file mode 100644
index 00000000000..c99e29e0ddf
--- /dev/null
+++ b/libgcc/config/c6x/led.c
@@ -0,0 +1,47 @@
+/* Software floating-point emulation.
+ Return 1 iff a <= b, 0 otherwise.
+ Copyright (C) 1997,1999,2006,2007,2011 Free Software Foundation, Inc.
+ Contributed by Richard Henderson (rth@cygnus.com) and
+ Jakub Jelinek (jj@ultra.linux.cz).
+
+ This file is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ In addition to the permissions in the GNU Lesser General Public
+ License, the Free Software Foundation gives you unlimited
+ permission to link the compiled version of this file into
+ combinations with other programs, and to distribute those
+ combinations without any restriction coming from the use of this
+ file. (The Lesser General Public License restrictions do apply in
+ other respects; for example, they cover modification of the file,
+ and distribution when not linked into a combine executable.)
+
+ This file is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with GCC; see the file COPYING.LIB. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#include <soft-fp/soft-fp.h>
+#include <soft-fp/double.h>
+
+CMPtype __c6xabi_led(DFtype a, DFtype b)
+{
+ FP_DECL_EX;
+ FP_DECL_D(A); FP_DECL_D(B);
+ CMPtype r;
+
+ FP_UNPACK_RAW_D(A, a);
+ FP_UNPACK_RAW_D(B, b);
+ FP_CMP_D(r, A, B, 2);
+ if (r == 2 && (FP_ISSIGNAN_D(A) || FP_ISSIGNAN_D(B)))
+ FP_SET_EXCEPTION(FP_EX_INVALID);
+ FP_HANDLE_EXCEPTIONS;
+
+ return r <= 0;
+}
diff --git a/libgcc/config/c6x/lef.c b/libgcc/config/c6x/lef.c
new file mode 100644
index 00000000000..ce2c16f8ede
--- /dev/null
+++ b/libgcc/config/c6x/lef.c
@@ -0,0 +1,47 @@
+/* Software floating-point emulation.
+ Return 1 iff a <= b, 0 otherwise.
+ Copyright (C) 1997,1999,2006,2007,2011 Free Software Foundation, Inc.
+ Contributed by Richard Henderson (rth@cygnus.com) and
+ Jakub Jelinek (jj@ultra.linux.cz).
+
+ This file is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ In addition to the permissions in the GNU Lesser General Public
+ License, the Free Software Foundation gives you unlimited
+ permission to link the compiled version of this file into
+ combinations with other programs, and to distribute those
+ combinations without any restriction coming from the use of this
+ file. (The Lesser General Public License restrictions do apply in
+ other respects; for example, they cover modification of the file,
+ and distribution when not linked into a combine executable.)
+
+ This file is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with GCC; see the file COPYING.LIB. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#include <soft-fp/soft-fp.h>
+#include <soft-fp/single.h>
+
+CMPtype __c6xabi_lef(SFtype a, SFtype b)
+{
+ FP_DECL_EX;
+ FP_DECL_S(A); FP_DECL_S(B);
+ CMPtype r;
+
+ FP_UNPACK_RAW_S(A, a);
+ FP_UNPACK_RAW_S(B, b);
+ FP_CMP_S(r, A, B, 2);
+ if (r == 2 && (FP_ISSIGNAN_S(A) || FP_ISSIGNAN_S(B)))
+ FP_SET_EXCEPTION(FP_EX_INVALID);
+ FP_HANDLE_EXCEPTIONS;
+
+ return r <= 0;
+}
diff --git a/libgcc/config/c6x/lib1funcs.S b/libgcc/config/c6x/lib1funcs.S
new file mode 100644
index 00000000000..5bf34474bbd
--- /dev/null
+++ b/libgcc/config/c6x/lib1funcs.S
@@ -0,0 +1,438 @@
+/* Copyright 2010, 2011 Free Software Foundation, Inc.
+ Contributed by Bernd Schmidt <bernds@codesourcery.com>.
+
+This file is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 3, or (at your option) any
+later version.
+
+This file is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+ ;; ABI considerations for the divide functions
+ ;; The following registers are call-used:
+ ;; __c6xabi_divi A0,A1,A2,A4,A6,B0,B1,B2,B4,B5
+ ;; __c6xabi_divu A0,A1,A2,A4,A6,B0,B1,B2,B4
+ ;; __c6xabi_remi A1,A2,A4,A5,A6,B0,B1,B2,B4
+ ;; __c6xabi_remu A1,A4,A5,A7,B0,B1,B2,B4
+ ;;
+ ;; In our implementation, divu and remu are leaf functions,
+ ;; while both divi and remi call into divu.
+ ;; A0 is not clobbered by any of the functions.
+ ;; divu does not clobber B2 either, which is taken advantage of
+ ;; in remi.
+ ;; divi uses B5 to hold the original return address during
+ ;; the call to divu.
+ ;; remi uses B2 and A5 to hold the input values during the
+ ;; call to divu. It stores B3 in on the stack.
+
+#ifdef L_divsi3
+.text
+.align 2
+.global __c6xabi_divi
+.hidden __c6xabi_divi
+.type __c6xabi_divi, STT_FUNC
+
+__c6xabi_divi:
+ call .s2 __c6xabi_divu
+|| mv .d2 B3, B5
+|| cmpgt .l1 0, A4, A1
+|| cmpgt .l2 0, B4, B1
+
+ [A1] neg .l1 A4, A4
+|| [B1] neg .l2 B4, B4
+|| xor .s1x A1, B1, A1
+
+#ifdef _TMS320C6400
+ [A1] addkpc .s2 1f, B3, 4
+#else
+ [A1] mvkl .s2 1f, B3
+ [A1] mvkh .s2 1f, B3
+ nop 2
+#endif
+1:
+ neg .l1 A4, A4
+|| mv .l2 B3,B5
+|| ret .s2 B5
+ nop 5
+#endif
+
+#if defined L_modsi3 || defined L_divmodsi4
+.align 2
+#ifdef L_modsi3
+#define MOD_OUTPUT_REG A4
+.global __c6xabi_remi
+.hidden __c6xabi_remi
+.type __c6xabi_remi, STT_FUNC
+#else
+#define MOD_OUTPUT_REG A5
+.global __c6xabi_divremi
+.hidden __c6xabi_divremi
+.type __c6xabi_divremi, STT_FUNC
+__c6xabi_divremi:
+#endif
+
+__c6xabi_remi:
+ stw .d2t2 B3, *B15--[2]
+|| cmpgt .l1 0, A4, A1
+|| cmpgt .l2 0, B4, B2
+|| mv .s1 A4, A5
+|| call .s2 __c6xabi_divu
+
+ [A1] neg .l1 A4, A4
+|| [B2] neg .l2 B4, B4
+|| xor .s2x B2, A1, B0
+|| mv .d2 B4, B2
+
+#ifdef _TMS320C6400
+ [B0] addkpc .s2 1f, B3, 1
+ [!B0] addkpc .s2 2f, B3, 1
+ nop 2
+#else
+ [B0] mvkl .s2 1f,B3
+ [!B0] mvkl .s2 2f,B3
+
+ [B0] mvkh .s2 1f,B3
+ [!B0] mvkh .s2 2f,B3
+#endif
+1:
+ neg .l1 A4, A4
+2:
+ ldw .d2t2 *++B15[2], B3
+
+#ifdef _TMS320C6400_PLUS
+ mpy32 .m1x A4, B2, A6
+ nop 3
+ ret .s2 B3
+ sub .l1 A5, A6, MOD_OUTPUT_REG
+ nop 4
+#else
+ mpyu .m1x A4, B2, A1
+ nop 1
+ mpylhu .m1x A4, B2, A6
+|| mpylhu .m2x B2, A4, B2
+ nop 1
+ add .l1x A6, B2, A6
+|| ret .s2 B3
+ shl .s1 A6, 16, A6
+ add .d1 A6, A1, A6
+ sub .l1 A5, A6, MOD_OUTPUT_REG
+ nop 2
+#endif
+
+#endif
+
+#if defined L_udivsi3 || defined L_udivmodsi4
+.align 2
+#ifdef L_udivsi3
+.global __c6xabi_divu
+.hidden __c6xabi_divu
+.type __c6xabi_divu, STT_FUNC
+__c6xabi_divu:
+#else
+.global __c6xabi_divremu
+.hidden __c6xabi_divremu
+.type __c6xabi_divremu, STT_FUNC
+__c6xabi_divremu:
+#endif
+ ;; We use a series of up to 31 subc instructions. First, we find
+ ;; out how many leading zero bits there are in the divisor. This
+ ;; gives us both a shift count for aligning (shifting) the divisor
+ ;; to the, and the number of times we have to execute subc.
+
+ ;; At the end, we have both the remainder and most of the quotient
+ ;; in A4. The top bit of the quotient is computed first and is
+ ;; placed in A2.
+
+ ;; Return immediately if the dividend is zero. Setting B4 to 1
+ ;; is a trick to allow us to leave the following insns in the jump
+ ;; delay slot without affecting the result.
+ mv .s2x A4, B1
+
+#ifndef _TMS320C6400
+[!b1] mvk .s2 1, B4
+#endif
+[b1] lmbd .l2 1, B4, B1
+||[!b1] b .s2 B3 ; RETURN A
+#ifdef _TMS320C6400
+||[!b1] mvk .d2 1, B4
+#endif
+#ifdef L_udivmodsi4
+||[!b1] zero .s1 A5
+#endif
+ mv .l1x B1, A6
+|| shl .s2 B4, B1, B4
+
+ ;; The loop performs a maximum of 28 steps, so we do the
+ ;; first 3 here.
+ cmpltu .l1x A4, B4, A2
+[!A2] sub .l1x A4, B4, A4
+|| shru .s2 B4, 1, B4
+|| xor .s1 1, A2, A2
+
+ shl .s1 A2, 31, A2
+|| [b1] subc .l1x A4,B4,A4
+|| [b1] add .s2 -1, B1, B1
+[b1] subc .l1x A4,B4,A4
+|| [b1] add .s2 -1, B1, B1
+
+ ;; RETURN A may happen here (note: must happen before the next branch)
+0:
+ cmpgt .l2 B1, 7, B0
+|| [b1] subc .l1x A4,B4,A4
+|| [b1] add .s2 -1, B1, B1
+[b1] subc .l1x A4,B4,A4
+|| [b1] add .s2 -1, B1, B1
+|| [b0] b .s1 0b
+[b1] subc .l1x A4,B4,A4
+|| [b1] add .s2 -1, B1, B1
+[b1] subc .l1x A4,B4,A4
+|| [b1] add .s2 -1, B1, B1
+[b1] subc .l1x A4,B4,A4
+|| [b1] add .s2 -1, B1, B1
+[b1] subc .l1x A4,B4,A4
+|| [b1] add .s2 -1, B1, B1
+[b1] subc .l1x A4,B4,A4
+|| [b1] add .s2 -1, B1, B1
+ ;; loop backwards branch happens here
+
+ ret .s2 B3
+|| mvk .s1 32, A1
+ sub .l1 A1, A6, A6
+#ifdef L_udivmodsi4
+|| extu .s1 A4, A6, A5
+#endif
+ shl .s1 A4, A6, A4
+ shru .s1 A4, 1, A4
+|| sub .l1 A6, 1, A6
+ or .l1 A2, A4, A4
+ shru .s1 A4, A6, A4
+ nop
+
+#endif
+
+#ifdef L_umodsi3
+.align 2
+.global __c6xabi_remu
+.hidden __c6xabi_remu
+.type __c6xabi_remu, STT_FUNC
+__c6xabi_remu:
+ ;; The ABI seems designed to prevent these functions calling each other,
+ ;; so we duplicate most of the divsi3 code here.
+ mv .s2x A4, B1
+#ifndef _TMS320C6400
+[!b1] mvk .s2 1, B4
+#endif
+ lmbd .l2 1, B4, B1
+||[!b1] b .s2 B3 ; RETURN A
+#ifdef _TMS320C6400
+||[!b1] mvk .d2 1, B4
+#endif
+
+ mv .l1x B1, A7
+|| shl .s2 B4, B1, B4
+
+ cmpltu .l1x A4, B4, A1
+[!a1] sub .l1x A4, B4, A4
+ shru .s2 B4, 1, B4
+
+0:
+ cmpgt .l2 B1, 7, B0
+|| [b1] subc .l1x A4,B4,A4
+|| [b1] add .s2 -1, B1, B1
+ ;; RETURN A may happen here (note: must happen before the next branch)
+[b1] subc .l1x A4,B4,A4
+|| [b1] add .s2 -1, B1, B1
+|| [b0] b .s1 0b
+[b1] subc .l1x A4,B4,A4
+|| [b1] add .s2 -1, B1, B1
+[b1] subc .l1x A4,B4,A4
+|| [b1] add .s2 -1, B1, B1
+[b1] subc .l1x A4,B4,A4
+|| [b1] add .s2 -1, B1, B1
+[b1] subc .l1x A4,B4,A4
+|| [b1] add .s2 -1, B1, B1
+[b1] subc .l1x A4,B4,A4
+|| [b1] add .s2 -1, B1, B1
+ ;; loop backwards branch happens here
+
+ ret .s2 B3
+[b1] subc .l1x A4,B4,A4
+|| [b1] add .s2 -1, B1, B1
+[b1] subc .l1x A4,B4,A4
+
+ extu .s1 A4, A7, A4
+ nop 2
+#endif
+
+#if defined L_strasgi_64plus && defined _TMS320C6400_PLUS
+
+.align 2
+.global __c6xabi_strasgi_64plus
+.hidden __c6xabi_strasgi_64plus
+.type __c6xabi_strasgi_64plus, STT_FUNC
+__c6xabi_strasgi_64plus:
+ shru .s2x a6, 2, b31
+|| mv .s1 a4, a30
+|| mv .d2 b4, b30
+
+ add .s2 -4, b31, b31
+
+ sploopd 1
+|| mvc .s2 b31, ilc
+ ldw .d2t2 *b30++, b31
+ nop 4
+ mv .s1x b31,a31
+ spkernel 6, 0
+|| stw .d1t1 a31, *a30++
+
+ ret .s2 b3
+ nop 5
+#endif
+
+#ifdef L_strasgi
+.global __c6xabi_strasgi
+.type __c6xabi_strasgi, STT_FUNC
+__c6xabi_strasgi:
+ ;; This is essentially memcpy, with alignment known to be at least
+ ;; 4, and the size a multiple of 4 greater than or equal to 28.
+ ldw .d2t1 *B4++, A0
+|| mvk .s2 16, B1
+ ldw .d2t1 *B4++, A1
+|| mvk .s2 20, B2
+|| sub .d1 A6, 24, A6
+ ldw .d2t1 *B4++, A5
+ ldw .d2t1 *B4++, A7
+|| mv .l2x A6, B7
+ ldw .d2t1 *B4++, A8
+ ldw .d2t1 *B4++, A9
+|| mv .s2x A0, B5
+|| cmpltu .l2 B2, B7, B0
+
+0:
+ stw .d1t2 B5, *A4++
+||[b0] ldw .d2t1 *B4++, A0
+|| mv .s2x A1, B5
+|| mv .l2 B7, B6
+
+[b0] sub .d2 B6, 24, B7
+||[b0] b .s2 0b
+|| cmpltu .l2 B1, B6, B0
+
+[b0] ldw .d2t1 *B4++, A1
+|| stw .d1t2 B5, *A4++
+|| mv .s2x A5, B5
+|| cmpltu .l2 12, B6, B0
+
+[b0] ldw .d2t1 *B4++, A5
+|| stw .d1t2 B5, *A4++
+|| mv .s2x A7, B5
+|| cmpltu .l2 8, B6, B0
+
+[b0] ldw .d2t1 *B4++, A7
+|| stw .d1t2 B5, *A4++
+|| mv .s2x A8, B5
+|| cmpltu .l2 4, B6, B0
+
+[b0] ldw .d2t1 *B4++, A8
+|| stw .d1t2 B5, *A4++
+|| mv .s2x A9, B5
+|| cmpltu .l2 0, B6, B0
+
+[b0] ldw .d2t1 *B4++, A9
+|| stw .d1t2 B5, *A4++
+|| mv .s2x A0, B5
+|| cmpltu .l2 B2, B7, B0
+
+ ;; loop back branch happens here
+
+ cmpltu .l2 B1, B6, B0
+|| ret .s2 b3
+
+[b0] stw .d1t1 A1, *A4++
+|| cmpltu .l2 12, B6, B0
+[b0] stw .d1t1 A5, *A4++
+|| cmpltu .l2 8, B6, B0
+[b0] stw .d1t1 A7, *A4++
+|| cmpltu .l2 4, B6, B0
+[b0] stw .d1t1 A8, *A4++
+|| cmpltu .l2 0, B6, B0
+[b0] stw .d1t1 A9, *A4++
+
+ ;; return happens here
+
+#endif
+
+#ifdef _TMS320C6400_PLUS
+#ifdef L_push_rts
+.align 2
+.global __c6xabi_push_rts
+.hidden __c6xabi_push_rts
+.type __c6xabi_push_rts, STT_FUNC
+__c6xabi_push_rts:
+ stw .d2t2 B14, *B15--[2]
+ stdw .d2t1 A15:A14, *B15--
+|| b .s2x A3
+ stdw .d2t2 B13:B12, *B15--
+ stdw .d2t1 A13:A12, *B15--
+ stdw .d2t2 B11:B10, *B15--
+ stdw .d2t1 A11:A10, *B15--
+ stdw .d2t2 B3:B2, *B15--
+#endif
+
+#ifdef L_pop_rts
+.align 2
+.global __c6xabi_pop_rts
+.hidden __c6xabi_pop_rts
+.type __c6xabi_pop_rts, STT_FUNC
+__c6xabi_pop_rts:
+ lddw .d2t2 *++B15, B3:B2
+ lddw .d2t1 *++B15, A11:A10
+ lddw .d2t2 *++B15, B11:B10
+ lddw .d2t1 *++B15, A13:A12
+ lddw .d2t2 *++B15, B13:B12
+ lddw .d2t1 *++B15, A15:A14
+|| b .s2 B3
+ ldw .d2t2 *++B15[2], B14
+ nop 4
+#endif
+
+#ifdef L_call_stub
+.align 2
+.global __c6xabi_call_stub
+.type __c6xabi_call_stub, STT_FUNC
+__c6xabi_call_stub:
+ stw .d2t1 A2, *B15--[2]
+ stdw .d2t1 A7:A6, *B15--
+|| call .s2 B31
+ stdw .d2t1 A1:A0, *B15--
+ stdw .d2t2 B7:B6, *B15--
+ stdw .d2t2 B5:B4, *B15--
+ stdw .d2t2 B1:B0, *B15--
+ stdw .d2t2 B3:B2, *B15--
+|| addkpc .s2 1f, B3, 0
+1:
+ lddw .d2t2 *++B15, B3:B2
+ lddw .d2t2 *++B15, B1:B0
+ lddw .d2t2 *++B15, B5:B4
+ lddw .d2t2 *++B15, B7:B6
+ lddw .d2t1 *++B15, A1:A0
+ lddw .d2t1 *++B15, A7:A6
+|| b .s2 B3
+ ldw .d2t1 *++B15[2], A2
+ nop 4
+#endif
+
+#endif
+
diff --git a/libgcc/config/c6x/libgcc-eabi.ver b/libgcc/config/c6x/libgcc-eabi.ver
new file mode 100644
index 00000000000..6bce556512e
--- /dev/null
+++ b/libgcc/config/c6x/libgcc-eabi.ver
@@ -0,0 +1,103 @@
+# Copyright (C) 2011 Free Software Foundation, Inc.
+#
+# This file is part of GCC.
+#
+# GCC is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GCC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>.
+
+GCC_4.7.0 {
+ __c6xabi_strasgi
+ __c6xabi_call_stub
+ __c6xabi_mpyll
+ __c6xabi_negll
+ __c6xabi_llshru
+ __c6xabi_llshl
+ __c6xabi_llshr
+ __c6xabi_fixfu
+ __c6xabi_fixdu
+ __c6xabi_fixflli
+ __c6xabi_fixdlli
+ __c6xabi_fixfull
+ __c6xabi_fixdull
+ __c6xabi_fltllif
+ __c6xabi_fltllid
+ __c6xabi_fltullf
+ __c6xabi_fltulld
+ __c6xabi_divlli
+ __c6xabi_remlli
+ __c6xabi_divull
+ __c6xabi_remull
+ __c6xabi_divremull
+ __c6xabi_gef
+ __c6xabi_gtf
+ __c6xabi_lef
+ __c6xabi_ltf
+ __c6xabi_eqf
+ __c6xabi_ged
+ __c6xabi_gtd
+ __c6xabi_led
+ __c6xabi_ltd
+ __c6xabi_eqd
+ __c6xabi_addf
+ __c6xabi_divf
+ __c6xabi_neqf
+ __c6xabi_cmpf
+ __c6xabi_mpyf
+ __c6xabi_negf
+ __c6xabi_subf
+ __c6xabi_unordf
+ __c6xabi_fixfi
+ __c6xabi_fltif
+ __c6xabi_fltuf
+ __c6xabi_addd
+ __c6xabi_divd
+ __c6xabi_neqd
+ __c6xabi_cmpd
+ __c6xabi_mpyd
+ __c6xabi_negd
+ __c6xabi_subd
+ __c6xabi_unordd
+ __c6xabi_fixdi
+ __c6xabi_fltid
+ __c6xabi_fltud
+ __c6xabi_cvtfd
+ __c6xabi_cvtdf
+ __c6xabi_mulcf
+ __c6xabi_mulcd
+ __c6xabi_divcf
+ __c6xabi_divcd
+
+ __gnu_ltsf2
+ __gnu_ltdf2
+ __gnu_gesf2
+ __gnu_gedf2
+ __gnu_gtsf2
+ __gnu_gtdf2
+ __gnu_eqsf2
+ __gnu_eqdf2
+
+ # Exception-Handling
+ _Unwind_Complete
+ _Unwind_VRS_Get
+ _Unwind_VRS_Set
+ _Unwind_VRS_Pop
+ __c6xabi_unwind_cpp_pr0
+ __c6xabi_unwind_cpp_pr1
+ __c6xabi_unwind_cpp_pr2
+ __c6xabi_unwind_cpp_pr3
+ __c6xabi_unwind_cpp_pr4
+ # The libstdc++ exception-handling personality routine uses this
+ # GNU-specific entry point.
+ __gnu_unwind_frame
+}
diff --git a/libgcc/config/c6x/ltd.c b/libgcc/config/c6x/ltd.c
new file mode 100644
index 00000000000..d4de25866b7
--- /dev/null
+++ b/libgcc/config/c6x/ltd.c
@@ -0,0 +1,47 @@
+/* Software floating-point emulation.
+ Return 1 iff a < b, 0 otherwise.
+ Copyright (C) 1997,1999,2006,2007,2011 Free Software Foundation, Inc.
+ Contributed by Richard Henderson (rth@cygnus.com) and
+ Jakub Jelinek (jj@ultra.linux.cz).
+
+ This file is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ In addition to the permissions in the GNU Lesser General Public
+ License, the Free Software Foundation gives you unlimited
+ permission to link the compiled version of this file into
+ combinations with other programs, and to distribute those
+ combinations without any restriction coming from the use of this
+ file. (The Lesser General Public License restrictions do apply in
+ other respects; for example, they cover modification of the file,
+ and distribution when not linked into a combine executable.)
+
+ This file is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with GCC; see the file COPYING.LIB. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#include <soft-fp/soft-fp.h>
+#include <soft-fp/double.h>
+
+CMPtype __c6xabi_ltd(DFtype a, DFtype b)
+{
+ FP_DECL_EX;
+ FP_DECL_D(A); FP_DECL_D(B);
+ CMPtype r;
+
+ FP_UNPACK_RAW_D(A, a);
+ FP_UNPACK_RAW_D(B, b);
+ FP_CMP_D(r, A, B, 2);
+ if (r == 2 && (FP_ISSIGNAN_D(A) || FP_ISSIGNAN_D(B)))
+ FP_SET_EXCEPTION(FP_EX_INVALID);
+ FP_HANDLE_EXCEPTIONS;
+
+ return r < 0;
+}
diff --git a/libgcc/config/c6x/ltf.c b/libgcc/config/c6x/ltf.c
new file mode 100644
index 00000000000..2fe15b99fde
--- /dev/null
+++ b/libgcc/config/c6x/ltf.c
@@ -0,0 +1,47 @@
+/* Software floating-point emulation.
+ Return 1 iff a < b, 0 otherwise.
+ Copyright (C) 1997,1999,2006,2007,2011 Free Software Foundation, Inc.
+ Contributed by Richard Henderson (rth@cygnus.com) and
+ Jakub Jelinek (jj@ultra.linux.cz).
+
+ This file is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ In addition to the permissions in the GNU Lesser General Public
+ License, the Free Software Foundation gives you unlimited
+ permission to link the compiled version of this file into
+ combinations with other programs, and to distribute those
+ combinations without any restriction coming from the use of this
+ file. (The Lesser General Public License restrictions do apply in
+ other respects; for example, they cover modification of the file,
+ and distribution when not linked into a combine executable.)
+
+ This file is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with GCC; see the file COPYING.LIB. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#include <soft-fp/soft-fp.h>
+#include <soft-fp/single.h>
+
+CMPtype __c6xabi_ltf(SFtype a, SFtype b)
+{
+ FP_DECL_EX;
+ FP_DECL_S(A); FP_DECL_S(B);
+ CMPtype r;
+
+ FP_UNPACK_RAW_S(A, a);
+ FP_UNPACK_RAW_S(B, b);
+ FP_CMP_S(r, A, B, 2);
+ if (r == 2 && (FP_ISSIGNAN_S(A) || FP_ISSIGNAN_S(B)))
+ FP_SET_EXCEPTION(FP_EX_INVALID);
+ FP_HANDLE_EXCEPTIONS;
+
+ return r < 0;
+}
diff --git a/libgcc/config/c6x/t-c6x-elf b/libgcc/config/c6x/t-c6x-elf
deleted file mode 100644
index 445de9b3224..00000000000
--- a/libgcc/config/c6x/t-c6x-elf
+++ /dev/null
@@ -1,4 +0,0 @@
-LIB2ADDEH = $(srcdir)/config/c6x/unwind-c6x.c \
- $(srcdir)/config/c6x/libunwind.S \
- $(srcdir)/config/c6x/pr-support.c $(srcdir)/unwind-c.c
-
diff --git a/libgcc/config/c6x/t-elf b/libgcc/config/c6x/t-elf
new file mode 100644
index 00000000000..c58c614849c
--- /dev/null
+++ b/libgcc/config/c6x/t-elf
@@ -0,0 +1,40 @@
+# Cannot use default rules due to $(CRTSTUFF_T_CFLAGS).
+CUSTOM_CRTIN = yes
+
+LIB1ASMSRC = c6x/lib1funcs.S
+LIB1ASMFUNCS = _divsi3 _udivsi3 _umodsi3 _modsi3 _udivmodsi4 _divmodsi4
+LIB1ASMFUNCS += _strasgi _strasgi_64plus _clzsi2 _clzdi2 _clz
+LIB1ASMFUNCS += _push_rts _pop_rts _call_stub
+
+LIB2FUNCS_EXCLUDE = _cmpdi2 _ucmpdi2 _gcc_bcmp _eprintf _clzsi _clzdi
+
+LIB2ADD = $(srcdir)/config/c6x/gef.c \
+ $(srcdir)/config/c6x/gtf.c \
+ $(srcdir)/config/c6x/lef.c \
+ $(srcdir)/config/c6x/ltf.c \
+ $(srcdir)/config/c6x/eqf.c \
+ $(srcdir)/config/c6x/ged.c \
+ $(srcdir)/config/c6x/gtd.c \
+ $(srcdir)/config/c6x/led.c \
+ $(srcdir)/config/c6x/ltd.c \
+ $(srcdir)/config/c6x/eqd.c
+
+# Avoid failures when the user's GOT becomes too large.
+HOST_LIBGCC2_CFLAGS += -msdata=none
+
+LIB2ADDEH = $(srcdir)/config/c6x/unwind-c6x.c \
+ $(srcdir)/config/c6x/libunwind.S \
+ $(srcdir)/config/c6x/pr-support.c $(srcdir)/unwind-c.c
+
+# Assemble startup files.
+crti.o: $(srcdir)/config/c6x/crti.S
+ $(crt_compile) -c $(CRTSTUFF_T_CFLAGS) $<
+
+crtn.o: $(srcdir)/config/c6x/crtn.S
+ $(crt_compile) -c $(CRTSTUFF_T_CFLAGS) $<
+
+# Avoid failures when the user's GOT becomes too large.
+CRTSTUFF_T_CFLAGS = -msdata=none
+CRTSTUFF_T_CFLAGS_S = -msdata=none
+
+SHLIB_MAPFILES += $(srcdir)/config/c6x/libgcc-eabi.ver
diff --git a/libgcc/config/c6x/t-uclinux b/libgcc/config/c6x/t-uclinux
new file mode 100644
index 00000000000..72a170a575a
--- /dev/null
+++ b/libgcc/config/c6x/t-uclinux
@@ -0,0 +1,3 @@
+HOST_LIBGCC2_CFLAGS += -msdata=none
+
+CRTSTUFF_T_CFLAGS += $(PICFLAG)
diff --git a/libgcc/config/cris/arit.c b/libgcc/config/cris/arit.c
new file mode 100644
index 00000000000..32255f99d39
--- /dev/null
+++ b/libgcc/config/cris/arit.c
@@ -0,0 +1,304 @@
+/* Signed and unsigned multiplication and division and modulus for CRIS.
+ Contributed by Axis Communications.
+ Written by Hans-Peter Nilsson <hp@axis.se>, c:a 1992.
+
+ Copyright (C) 1998, 1999, 2000, 2001, 2002,
+ 2005, 2009 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 3, or (at your option) any
+later version.
+
+This file is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+
+/* Note that we provide prototypes for all "const" functions, to attach
+ the const attribute. This is necessary in 2.7.2 - adding the
+ attribute to the function *definition* is a syntax error.
+ This did not work with e.g. 2.1; back then, the return type had to
+ be "const". */
+
+#include "config.h"
+
+#if defined (__CRIS_arch_version) && __CRIS_arch_version >= 3
+#define LZ(v) __builtin_clz (v)
+#endif
+
+
+#if defined (L_udivsi3) || defined (L_divsi3) || defined (L_umodsi3) \
+ || defined (L_modsi3)
+/* Result type of divmod worker function. */
+struct quot_rem
+ {
+ long quot;
+ long rem;
+ };
+
+/* This is the worker function for div and mod. It is inlined into the
+ respective library function. Parameter A must have bit 31 == 0. */
+
+static __inline__ struct quot_rem
+do_31div (unsigned long a, unsigned long b)
+ __attribute__ ((__const__, __always_inline__));
+
+static __inline__ struct quot_rem
+do_31div (unsigned long a, unsigned long b)
+{
+ /* Adjust operands and result if a is 31 bits. */
+ long extra = 0;
+ int quot_digits = 0;
+
+ if (b == 0)
+ {
+ struct quot_rem ret;
+ ret.quot = 0xffffffff;
+ ret.rem = 0xffffffff;
+ return ret;
+ }
+
+ if (a < b)
+ return (struct quot_rem) { 0, a };
+
+#ifdef LZ
+ if (b <= a)
+ {
+ quot_digits = LZ (b) - LZ (a);
+ quot_digits += (a >= (b << quot_digits));
+ b <<= quot_digits;
+ }
+#else
+ while (b <= a)
+ {
+ b <<= 1;
+ quot_digits++;
+ }
+#endif
+
+ /* Is a 31 bits? Note that bit 31 is handled by the caller. */
+ if (a & 0x40000000)
+ {
+ /* Then make b:s highest bit max 0x40000000, because it must have
+ been 0x80000000 to be 1 bit higher than a. */
+ b >>= 1;
+
+ /* Adjust a to be maximum 0x3fffffff, i.e. two upper bits zero. */
+ if (a >= b)
+ {
+ a -= b;
+ extra = 1 << (quot_digits - 1);
+ }
+ else
+ {
+ a -= b >> 1;
+
+ /* Remember that we adjusted a by subtracting b * 2 ** Something. */
+ extra = 1 << quot_digits;
+ }
+
+ /* The number of quotient digits will be one less, because
+ we just adjusted b. */
+ quot_digits--;
+ }
+
+ /* Now do the division part. */
+
+ /* Subtract b and add ones to the right when a >= b
+ i.e. "a - (b - 1) == (a - b) + 1". */
+ b--;
+
+#define DS __asm__ ("dstep %2,%0" : "=r" (a) : "0" (a), "r" (b))
+
+ switch (quot_digits)
+ {
+ case 32: DS; case 31: DS; case 30: DS; case 29: DS;
+ case 28: DS; case 27: DS; case 26: DS; case 25: DS;
+ case 24: DS; case 23: DS; case 22: DS; case 21: DS;
+ case 20: DS; case 19: DS; case 18: DS; case 17: DS;
+ case 16: DS; case 15: DS; case 14: DS; case 13: DS;
+ case 12: DS; case 11: DS; case 10: DS; case 9: DS;
+ case 8: DS; case 7: DS; case 6: DS; case 5: DS;
+ case 4: DS; case 3: DS; case 2: DS; case 1: DS;
+ case 0:;
+ }
+
+ {
+ struct quot_rem ret;
+ ret.quot = (a & ((1 << quot_digits) - 1)) + extra;
+ ret.rem = a >> quot_digits;
+ return ret;
+ }
+}
+
+#ifdef L_udivsi3
+unsigned long
+__Udiv (unsigned long a, unsigned long b) __attribute__ ((__const__));
+
+unsigned long
+__Udiv (unsigned long a, unsigned long b)
+{
+ long extra = 0;
+
+ /* Adjust operands and result, if a and/or b is 32 bits. */
+ /* Effectively: b & 0x80000000. */
+ if ((long) b < 0)
+ return a >= b;
+
+ /* Effectively: a & 0x80000000. */
+ if ((long) a < 0)
+ {
+ int tmp = 0;
+
+ if (b == 0)
+ return 0xffffffff;
+#ifdef LZ
+ tmp = LZ (b);
+#else
+ for (tmp = 31; (((long) b & (1 << tmp)) == 0); tmp--)
+ ;
+
+ tmp = 31 - tmp;
+#endif
+
+ if ((b << tmp) > a)
+ {
+ extra = 1 << (tmp-1);
+ a -= b << (tmp - 1);
+ }
+ else
+ {
+ extra = 1 << tmp;
+ a -= b << tmp;
+ }
+ }
+
+ return do_31div (a, b).quot+extra;
+}
+#endif /* L_udivsi3 */
+
+#ifdef L_divsi3
+long
+__Div (long a, long b) __attribute__ ((__const__));
+
+long
+__Div (long a, long b)
+{
+ long extra = 0;
+ long sign = (b < 0) ? -1 : 1;
+
+ /* We need to handle a == -2147483648 as expected and must while
+ doing that avoid producing a sequence like "abs (a) < 0" as GCC
+ may optimize out the test. That sequence may not be obvious as
+ we call inline functions. Testing for a being negative and
+ handling (presumably much rarer than positive) enables us to get
+ a bit of optimization for an (accumulated) reduction of the
+ penalty of the 0x80000000 special-case. */
+ if (a < 0)
+ {
+ sign = -sign;
+
+ if ((a & 0x7fffffff) == 0)
+ {
+ /* We're at 0x80000000. Tread carefully. */
+ a -= b * sign;
+ extra = sign;
+ }
+ a = -a;
+ }
+
+ /* We knowingly penalize pre-v10 models by multiplication with the
+ sign. */
+ return sign * do_31div (a, __builtin_labs (b)).quot + extra;
+}
+#endif /* L_divsi3 */
+
+
+#ifdef L_umodsi3
+unsigned long
+__Umod (unsigned long a, unsigned long b) __attribute__ ((__const__));
+
+unsigned long
+__Umod (unsigned long a, unsigned long b)
+{
+ /* Adjust operands and result if a and/or b is 32 bits. */
+ if ((long) b < 0)
+ return a >= b ? a - b : a;
+
+ if ((long) a < 0)
+ {
+ int tmp = 0;
+
+ if (b == 0)
+ return a;
+#ifdef LZ
+ tmp = LZ (b);
+#else
+ for (tmp = 31; (((long) b & (1 << tmp)) == 0); tmp--)
+ ;
+ tmp = 31 - tmp;
+#endif
+
+ if ((b << tmp) > a)
+ {
+ a -= b << (tmp - 1);
+ }
+ else
+ {
+ a -= b << tmp;
+ }
+ }
+
+ return do_31div (a, b).rem;
+}
+#endif /* L_umodsi3 */
+
+#ifdef L_modsi3
+long
+__Mod (long a, long b) __attribute__ ((__const__));
+
+long
+__Mod (long a, long b)
+{
+ long sign = 1;
+
+ /* We need to handle a == -2147483648 as expected and must while
+ doing that avoid producing a sequence like "abs (a) < 0" as GCC
+ may optimize out the test. That sequence may not be obvious as
+ we call inline functions. Testing for a being negative and
+ handling (presumably much rarer than positive) enables us to get
+ a bit of optimization for an (accumulated) reduction of the
+ penalty of the 0x80000000 special-case. */
+ if (a < 0)
+ {
+ sign = -1;
+ if ((a & 0x7fffffff) == 0)
+ /* We're at 0x80000000. Tread carefully. */
+ a += __builtin_labs (b);
+ a = -a;
+ }
+
+ return sign * do_31div (a, __builtin_labs (b)).rem;
+}
+#endif /* L_modsi3 */
+#endif /* L_udivsi3 || L_divsi3 || L_umodsi3 || L_modsi3 */
+
+/*
+ * Local variables:
+ * eval: (c-set-style "gnu")
+ * indent-tabs-mode: t
+ * End:
+ */
diff --git a/libgcc/config/cris/libgcc-glibc.ver b/libgcc/config/cris/libgcc-glibc.ver
new file mode 100644
index 00000000000..e35de83100f
--- /dev/null
+++ b/libgcc/config/cris/libgcc-glibc.ver
@@ -0,0 +1,7 @@
+GCC_4.3 {
+ __Mul
+ __Div
+ __Udiv
+ __Mod
+ __Umod
+}
diff --git a/libgcc/config/cris/mulsi3.S b/libgcc/config/cris/mulsi3.S
new file mode 100644
index 00000000000..76dfb634680
--- /dev/null
+++ b/libgcc/config/cris/mulsi3.S
@@ -0,0 +1,255 @@
+;; Copyright (C) 2001, 2004 Free Software Foundation, Inc.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify it under
+;; the terms of the GNU General Public License as published by the Free
+;; Software Foundation; either version 3, or (at your option) any later
+;; version.
+;;
+;; GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+;; WARRANTY; without even the implied warranty of MERCHANTABILITY or
+;; FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+;; for more details.
+;;
+;; Under Section 7 of GPL version 3, you are granted additional
+;; permissions described in the GCC Runtime Library Exception, version
+;; 3.1, as published by the Free Software Foundation.
+;;
+;; You should have received a copy of the GNU General Public License and
+;; a copy of the GCC Runtime Library Exception along with this program;
+;; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+;; <http://www.gnu.org/licenses/>.
+;;
+;; This code used to be expanded through interesting expansions in
+;; the machine description, compiled from this code:
+;;
+;; #ifdef L_mulsi3
+;; long __Mul (unsigned long a, unsigned long b) __attribute__ ((__const__));
+;;
+;; /* This must be compiled with the -mexpand-mul flag, to synthesize the
+;; multiplication from the mstep instructions. The check for
+;; smaller-size multiplication pays off in the order of .5-10%;
+;; estimated median 1%, depending on application.
+;; FIXME: It can be further optimized if we go to assembler code, as
+;; gcc 2.7.2 adds a few unnecessary instructions and does not put the
+;; basic blocks in optimal order. */
+;; long
+;; __Mul (unsigned long a, unsigned long b)
+;; {
+;; #if defined (__CRIS_arch_version) && __CRIS_arch_version >= 10
+;; /* In case other code is compiled without -march=v10, they will
+;; contain calls to __Mul, regardless of flags at link-time. The
+;; "else"-code below will work, but is unnecessarily slow. This
+;; sometimes cuts a few minutes off from simulation time by just
+;; returning a "mulu.d". */
+;; return a * b;
+;; #else
+;; unsigned long min;
+;;
+;; /* Get minimum via the bound insn. */
+;; min = a < b ? a : b;
+;;
+;; /* Can we omit computation of the high part? */
+;; if (min > 65535)
+;; /* No. Perform full multiplication. */
+;; return a * b;
+;; else
+;; {
+;; /* Check if both operands are within 16 bits. */
+;; unsigned long max;
+;;
+;; /* Get maximum, by knowing the minimum.
+;; This will partition a and b into max and min.
+;; This is not currently something GCC understands,
+;; so do this trick by asm. */
+;; __asm__ ("xor %1,%0\n\txor %2,%0"
+;; : "=r" (max)
+;; : "r" (b), "r" (a), "0" (min));
+;;
+;; if (max > 65535)
+;; /* Make GCC understand that only the low part of "min" will be
+;; used. */
+;; return max * (unsigned short) min;
+;; else
+;; /* Only the low parts of both operands are necessary. */
+;; return ((unsigned short) max) * (unsigned short) min;
+;; }
+;; #endif /* not __CRIS_arch_version >= 10 */
+;; }
+;; #endif /* L_mulsi3 */
+;;
+;; That approach was abandoned since the caveats outweighted the
+;; benefits. The expand-multiplication machinery is also removed, so you
+;; can't do this anymore.
+;;
+;; For doubters of there being any benefits, some where: insensitivity to:
+;; - ABI changes (mostly for experimentation)
+;; - assembler syntax differences (mostly debug format).
+;; - insn scheduling issues.
+;; Most ABI experiments will presumably happen with arches with mul insns,
+;; so that argument doesn't really hold anymore, and it's unlikely there
+;; being new arch variants needing insn scheduling and not having mul
+;; insns.
+
+;; ELF and a.out have different syntax for local labels: the "wrong"
+;; one may not be omitted from the object.
+#undef L
+#ifdef __AOUT__
+# define L(x) x
+#else
+# define L(x) .x
+#endif
+
+ .global ___Mul
+ .type ___Mul,@function
+___Mul:
+#if defined (__CRIS_arch_version) && __CRIS_arch_version >= 10
+;; Can't have the mulu.d last on a cache-line (in the delay-slot of the
+;; "ret"), due to hardware bug. See documentation for -mmul-bug-workaround.
+;; Not worthwhile to conditionalize here.
+ .p2alignw 2,0x050f
+ mulu.d $r11,$r10
+ ret
+ nop
+#else
+ move.d $r10,$r12
+ move.d $r11,$r9
+ bound.d $r12,$r9
+ cmpu.w 65535,$r9
+ bls L(L3)
+ move.d $r12,$r13
+
+ movu.w $r11,$r9
+ lslq 16,$r13
+ mstep $r9,$r13
+ mstep $r9,$r13
+ mstep $r9,$r13
+ mstep $r9,$r13
+ mstep $r9,$r13
+ mstep $r9,$r13
+ mstep $r9,$r13
+ mstep $r9,$r13
+ mstep $r9,$r13
+ mstep $r9,$r13
+ mstep $r9,$r13
+ mstep $r9,$r13
+ mstep $r9,$r13
+ mstep $r9,$r13
+ mstep $r9,$r13
+ mstep $r9,$r13
+ clear.w $r10
+ test.d $r10
+ mstep $r9,$r10
+ mstep $r9,$r10
+ mstep $r9,$r10
+ mstep $r9,$r10
+ mstep $r9,$r10
+ mstep $r9,$r10
+ mstep $r9,$r10
+ mstep $r9,$r10
+ mstep $r9,$r10
+ mstep $r9,$r10
+ mstep $r9,$r10
+ mstep $r9,$r10
+ mstep $r9,$r10
+ mstep $r9,$r10
+ mstep $r9,$r10
+ mstep $r9,$r10
+ movu.w $r12,$r12
+ move.d $r11,$r9
+ clear.w $r9
+ test.d $r9
+ mstep $r12,$r9
+ mstep $r12,$r9
+ mstep $r12,$r9
+ mstep $r12,$r9
+ mstep $r12,$r9
+ mstep $r12,$r9
+ mstep $r12,$r9
+ mstep $r12,$r9
+ mstep $r12,$r9
+ mstep $r12,$r9
+ mstep $r12,$r9
+ mstep $r12,$r9
+ mstep $r12,$r9
+ mstep $r12,$r9
+ mstep $r12,$r9
+ mstep $r12,$r9
+ add.w $r9,$r10
+ lslq 16,$r10
+ ret
+ add.d $r13,$r10
+
+L(L3):
+ move.d $r9,$r10
+ xor $r11,$r10
+ xor $r12,$r10
+ cmpu.w 65535,$r10
+ bls L(L5)
+ movu.w $r9,$r13
+
+ movu.w $r13,$r13
+ move.d $r10,$r9
+ lslq 16,$r9
+ mstep $r13,$r9
+ mstep $r13,$r9
+ mstep $r13,$r9
+ mstep $r13,$r9
+ mstep $r13,$r9
+ mstep $r13,$r9
+ mstep $r13,$r9
+ mstep $r13,$r9
+ mstep $r13,$r9
+ mstep $r13,$r9
+ mstep $r13,$r9
+ mstep $r13,$r9
+ mstep $r13,$r9
+ mstep $r13,$r9
+ mstep $r13,$r9
+ mstep $r13,$r9
+ clear.w $r10
+ test.d $r10
+ mstep $r13,$r10
+ mstep $r13,$r10
+ mstep $r13,$r10
+ mstep $r13,$r10
+ mstep $r13,$r10
+ mstep $r13,$r10
+ mstep $r13,$r10
+ mstep $r13,$r10
+ mstep $r13,$r10
+ mstep $r13,$r10
+ mstep $r13,$r10
+ mstep $r13,$r10
+ mstep $r13,$r10
+ mstep $r13,$r10
+ mstep $r13,$r10
+ mstep $r13,$r10
+ lslq 16,$r10
+ ret
+ add.d $r9,$r10
+
+L(L5):
+ movu.w $r9,$r9
+ lslq 16,$r10
+ mstep $r9,$r10
+ mstep $r9,$r10
+ mstep $r9,$r10
+ mstep $r9,$r10
+ mstep $r9,$r10
+ mstep $r9,$r10
+ mstep $r9,$r10
+ mstep $r9,$r10
+ mstep $r9,$r10
+ mstep $r9,$r10
+ mstep $r9,$r10
+ mstep $r9,$r10
+ mstep $r9,$r10
+ mstep $r9,$r10
+ mstep $r9,$r10
+ ret
+ mstep $r9,$r10
+#endif
+L(Lfe1):
+ .size ___Mul,L(Lfe1)-___Mul
diff --git a/libgcc/config/cris/t-cris b/libgcc/config/cris/t-cris
new file mode 100644
index 00000000000..b582974a42e
--- /dev/null
+++ b/libgcc/config/cris/t-cris
@@ -0,0 +1,10 @@
+LIB2ADD = _udivsi3.c _divsi3.c _umodsi3.c _modsi3.c
+
+# The fixed-point arithmetic code is in one file, arit.c,
+# similar to libgcc2.c (or the old libgcc1.c). We need to
+# "split it up" with one file per define.
+$(LIB2ADD): $(srcdir)/config/cris/arit.c
+ name=`echo $@ | sed -e 's,.*/,,' | sed -e 's,.c$$,,'`; \
+ echo "#define L$$name" > tmp-$@ \
+ && echo '#include "$<"' >> tmp-$@ \
+ && mv -f tmp-$@ $@
diff --git a/libgcc/config/cris/t-elfmulti b/libgcc/config/cris/t-elfmulti
new file mode 100644
index 00000000000..b180521039e
--- /dev/null
+++ b/libgcc/config/cris/t-elfmulti
@@ -0,0 +1,3 @@
+LIB2ADD_ST = $(srcdir)/config/cris/mulsi3.S
+
+CRTSTUFF_T_CFLAGS = -moverride-best-lib-options
diff --git a/libgcc/config/cris/t-linux b/libgcc/config/cris/t-linux
new file mode 100644
index 00000000000..8c7f4d44249
--- /dev/null
+++ b/libgcc/config/cris/t-linux
@@ -0,0 +1,2 @@
+# Override t-linux default.
+SHLIB_MAPFILES = libgcc-std.ver $(srcdir)/config/cris/libgcc-glibc.ver
diff --git a/libgcc/config/darwin-64.c b/libgcc/config/darwin-64.c
new file mode 100644
index 00000000000..a012e9dbc1e
--- /dev/null
+++ b/libgcc/config/darwin-64.c
@@ -0,0 +1,72 @@
+/* Functions shipped in the ppc64 and x86_64 version of libgcc_s.1.dylib
+ in older Mac OS X versions, preserved for backwards compatibility.
+ Copyright (C) 2006, 2009 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+#if defined (__ppc64__) || defined (__x86_64__)
+/* Many of these functions have probably never been used by anyone
+ anywhere on these targets, but it's hard to prove this, so they're defined
+ here. None are actually necessary, as demonstrated below by defining
+ each function using the operation it implements. */
+
+typedef long DI;
+typedef unsigned long uDI;
+typedef int SI;
+typedef unsigned int uSI;
+typedef int word_type __attribute__ ((mode (__word__)));
+
+DI __ashldi3 (DI x, word_type c);
+DI __ashrdi3 (DI x, word_type c);
+int __clzsi2 (uSI x);
+word_type __cmpdi2 (DI x, DI y);
+int __ctzsi2 (uSI x);
+DI __divdi3 (DI x, DI y);
+uDI __lshrdi3 (uDI x, word_type c);
+DI __moddi3 (DI x, DI y);
+DI __muldi3 (DI x, DI y);
+DI __negdi2 (DI x);
+int __paritysi2 (uSI x);
+int __popcountsi2 (uSI x);
+word_type __ucmpdi2 (uDI x, uDI y);
+uDI __udivdi3 (uDI x, uDI y);
+uDI __udivmoddi4 (uDI x, uDI y, uDI *r);
+uDI __umoddi3 (uDI x, uDI y);
+
+DI __ashldi3 (DI x, word_type c) { return x << c; }
+DI __ashrdi3 (DI x, word_type c) { return x >> c; }
+int __clzsi2 (uSI x) { return __builtin_clz (x); }
+word_type __cmpdi2 (DI x, DI y) { return x < y ? 0 : x == y ? 1 : 2; }
+int __ctzsi2 (uSI x) { return __builtin_ctz (x); }
+DI __divdi3 (DI x, DI y) { return x / y; }
+uDI __lshrdi3 (uDI x, word_type c) { return x >> c; }
+DI __moddi3 (DI x, DI y) { return x % y; }
+DI __muldi3 (DI x, DI y) { return x * y; }
+DI __negdi2 (DI x) { return -x; }
+int __paritysi2 (uSI x) { return __builtin_parity (x); }
+int __popcountsi2 (uSI x) { return __builtin_popcount (x); }
+word_type __ucmpdi2 (uDI x, uDI y) { return x < y ? 0 : x == y ? 1 : 2; }
+uDI __udivdi3 (uDI x, uDI y) { return x / y; }
+uDI __udivmoddi4 (uDI x, uDI y, uDI *r) { *r = x % y; return x / y; }
+uDI __umoddi3 (uDI x, uDI y) { return x % y; }
+
+#endif /* __ppc64__ || __x86_64__ */
diff --git a/libgcc/config/darwin-crt3.c b/libgcc/config/darwin-crt3.c
index 9b64f2aa8c3..5ef00546260 100644
--- a/libgcc/config/darwin-crt3.c
+++ b/libgcc/config/darwin-crt3.c
@@ -1,5 +1,5 @@
/* __cxa_atexit backwards-compatibility support for Darwin.
- Copyright (C) 2006, 2009 Free Software Foundation, Inc.
+ Copyright (C) 2006, 2009, 2011 Free Software Foundation, Inc.
This file is part of GCC.
@@ -25,10 +25,6 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
/* Don't do anything if we are compiling for a kext multilib. */
#ifdef __PIC__
-/* It is incorrect to include config.h here, because this file is being
- compiled for the target, and hence definitions concerning only the host
- do not apply. */
-
#include "tconfig.h"
#include "tsystem.h"
diff --git a/libgcc/config/epiphany/crti.S b/libgcc/config/epiphany/crti.S
new file mode 100644
index 00000000000..527d9264775
--- /dev/null
+++ b/libgcc/config/epiphany/crti.S
@@ -0,0 +1,34 @@
+# Start .init and .fini sections.
+# Copyright (C) 2010, 2011 Free Software Foundation, Inc.
+# Contributed by Embecosm on behalf of Adapteva, Inc.
+#
+# This file is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+# for more details.
+#
+# Under Section 7 of GPL version 3, you are granted additional
+# permissions described in the GCC Runtime Library Exception, version
+# 3.1, as published by the Free Software Foundation.
+#
+# You should have received a copy of the GNU General Public License and
+# a copy of the GCC Runtime Library Exception along with this program;
+# see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+# <http://www.gnu.org/licenses/>.
+
+ .section .init
+ .global init
+ .balign 2
+init:
+ str lr,[sp],-4
+
+ .section .fini
+ .global fini
+ .balign 2
+fini:
+ str lr,[sp],-4
diff --git a/libgcc/config/epiphany/crtint.S b/libgcc/config/epiphany/crtint.S
new file mode 100644
index 00000000000..e66b34e0652
--- /dev/null
+++ b/libgcc/config/epiphany/crtint.S
@@ -0,0 +1,27 @@
+# initialize config for -mfp-mode=int
+# Copyright (C) 2011 Free Software Foundation, Inc.
+# Contributed by Embecosm on behalf of Adapteva, Inc.
+#
+# This file is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+# for more details.
+#
+# Under Section 7 of GPL version 3, you are granted additional
+# permissions described in the GCC Runtime Library Exception, version
+# 3.1, as published by the Free Software Foundation.
+#
+# You should have received a copy of the GNU General Public License and
+# a copy of the GCC Runtime Library Exception along with this program;
+# see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+# <http://www.gnu.org/licenses/>.
+
+ .section .init
+ mov r0, %low(#524288)
+ movt r0, %high(#524288)
+ movts config,r0
diff --git a/libgcc/config/epiphany/crtm1reg-r43.S b/libgcc/config/epiphany/crtm1reg-r43.S
new file mode 100644
index 00000000000..02ef9f2025c
--- /dev/null
+++ b/libgcc/config/epiphany/crtm1reg-r43.S
@@ -0,0 +1,26 @@
+# initialize config for -m1reg-r43
+# Copyright (C) 2011 Free Software Foundation, Inc.
+# Contributed by Embecosm on behalf of Adapteva, Inc.
+#
+# This file is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+# for more details.
+#
+# Under Section 7 of GPL version 3, you are granted additional
+# permissions described in the GCC Runtime Library Exception, version
+# 3.1, as published by the Free Software Foundation.
+#
+# You should have received a copy of the GNU General Public License and
+# a copy of the GCC Runtime Library Exception along with this program;
+# see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+# <http://www.gnu.org/licenses/>.
+
+ .section .init
+ mov r0, 0
+ sub r43,r0,1
diff --git a/libgcc/config/epiphany/crtm1reg-r63.S b/libgcc/config/epiphany/crtm1reg-r63.S
new file mode 100644
index 00000000000..8bd9fb605cb
--- /dev/null
+++ b/libgcc/config/epiphany/crtm1reg-r63.S
@@ -0,0 +1,26 @@
+# initialize config for -m1reg-r63
+# Copyright (C) 2011 Free Software Foundation, Inc.
+# Contributed by Embecosm on behalf of Adapteva, Inc.
+#
+# This file is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+# for more details.
+#
+# Under Section 7 of GPL version 3, you are granted additional
+# permissions described in the GCC Runtime Library Exception, version
+# 3.1, as published by the Free Software Foundation.
+#
+# You should have received a copy of the GNU General Public License and
+# a copy of the GCC Runtime Library Exception along with this program;
+# see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+# <http://www.gnu.org/licenses/>.
+
+ .section .init
+ mov r0, 0
+ sub r63,r0,1
diff --git a/libgcc/config/epiphany/crtn.S b/libgcc/config/epiphany/crtn.S
new file mode 100644
index 00000000000..2c326bf7b96
--- /dev/null
+++ b/libgcc/config/epiphany/crtn.S
@@ -0,0 +1,32 @@
+# End .init and .fini sections.
+# Copyright (C) 2010, 2011 Free Software Foundation, Inc.
+# Contributed by Embecosm on behalf of Adapteva, Inc.
+#
+# This file is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+# for more details.
+#
+# Under Section 7 of GPL version 3, you are granted additional
+# permissions described in the GCC Runtime Library Exception, version
+# 3.1, as published by the Free Software Foundation.
+#
+# You should have received a copy of the GNU General Public License and
+# a copy of the GCC Runtime Library Exception along with this program;
+# see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+# <http://www.gnu.org/licenses/>.
+
+ .section .init
+ ldr lr,[sp,4]
+ add sp,sp,16
+ jr lr
+
+ .section .fini
+ ldr lr,[sp,4]
+ add sp,sp,16
+ jr lr
diff --git a/libgcc/config/epiphany/crtrunc.S b/libgcc/config/epiphany/crtrunc.S
new file mode 100644
index 00000000000..37b0507b491
--- /dev/null
+++ b/libgcc/config/epiphany/crtrunc.S
@@ -0,0 +1,26 @@
+# initialize config for -mfp-mode=truncate
+# Copyright (C) 2011 Free Software Foundation, Inc.
+# Contributed by Embecosm on behalf of Adapteva, Inc.
+#
+# This file is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+# for more details.
+#
+# Under Section 7 of GPL version 3, you are granted additional
+# permissions described in the GCC Runtime Library Exception, version
+# 3.1, as published by the Free Software Foundation.
+#
+# You should have received a copy of the GNU General Public License and
+# a copy of the GCC Runtime Library Exception along with this program;
+# see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+# <http://www.gnu.org/licenses/>.
+
+ .section .init
+ mov r0, 1
+ movts config,r0
diff --git a/libgcc/config/epiphany/divsi3-float.S b/libgcc/config/epiphany/divsi3-float.S
new file mode 100644
index 00000000000..31a0506946e
--- /dev/null
+++ b/libgcc/config/epiphany/divsi3-float.S
@@ -0,0 +1,77 @@
+/* Signed 32 bit division optimized for Epiphany.
+ Copyright (C) 2009, 2011 Free Software Foundation, Inc.
+ Contributed by Embecosm on behalf of Adapteva, Inc.
+
+This file is part of GCC.
+
+This file is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 3, or (at your option) any
+later version.
+
+This file is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+#include "epiphany-asm.h"
+
+ FSTAB (__divsi3,T_UINT)
+ .global SYM(__divsi3)
+ .balign 4
+ HIDDEN_FUNC(__divsi3)
+SYM(__divsi3):
+ float TMP2,r0
+ mov TMP4,0
+ float TMP1,r1
+ sub TMP0,TMP4,r0
+ beq .Lret_r0
+ movgt r0,TMP0
+ sub TMP0,TMP4,r1
+ movgt r1,TMP0
+ mov TMP0,1
+ sub TMP2,TMP2,TMP1
+ asr TMP3,TMP2,31 ; save sign
+ lsl TMP2,TMP2,1
+ blt .Lret0
+ sub TMP1,TMP2,1 ; rounding compensation, avoid overflow
+ movgte TMP2,TMP1
+ lsr TMP2,TMP2,24
+ lsl r1,r1,TMP2
+ lsl TMP0,TMP0,TMP2
+ sub TMP1,r0,r1
+ movgteu r0,TMP1
+ movgteu TMP4,TMP0
+ lsl TMP5,TMP0,1
+ sub TMP1,r0,r1
+ movgteu r0,TMP1
+ movgteu TMP4,TMP5
+ sub TMP1,r1,1
+ mov r1,%low(.L0step)
+ movt r1,%high(.L0step)
+ lsl TMP2,TMP2,3
+ sub r1,r1,TMP2
+ jr r1
+ .rep 30
+ lsl r0,r0,1
+ sub.l r1,r0,TMP1
+ movgteu r0,r1
+ .endr
+.L0step:sub r1,TMP0,1 ; mask result bits from steps ...
+ and r0,r0,r1
+ orr r0,r0,TMP4 ; ... and combine with first bit.
+ eor r0,r0,TMP3 ; restore sign
+ sub r0,r0,TMP3
+.Lret_r0:rts
+.Lret0: mov r0,0
+ rts
+ ENDFUNC(__divsi3)
diff --git a/libgcc/config/epiphany/divsi3.S b/libgcc/config/epiphany/divsi3.S
new file mode 100644
index 00000000000..bdb2860dc0d
--- /dev/null
+++ b/libgcc/config/epiphany/divsi3.S
@@ -0,0 +1,92 @@
+/* Signed 32 bit division optimized for Epiphany.
+ Copyright (C) 2009 Free Software Foundation, Inc.
+ Contributed by Embecosm on behalf of Adapteva, Inc.
+
+This file is part of GCC.
+
+This file is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 3, or (at your option) any
+later version.
+
+This file is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+#include "epiphany-asm.h"
+
+ FSTAB (__divsi3,T_INT)
+ .global SYM(__divsi3)
+ .balign 4
+ HIDDEN_FUNC(__divsi3)
+SYM(__divsi3):
+ mov r12,0
+ sub r2,r12,r0
+ movlt r2,r0
+ sub r3,r12,r1
+ movlt r3,r1
+ sub r19,r2,r3
+ bltu .Lret0
+ movt r12,0x4000
+ orr r16,r2,r12
+ orr r18,r3,r12
+ fsub r16,r16,r12
+ fsub r18,r18,r12
+ movt r12,0x4b80
+ lsr r19,r3,23
+ lsr r17,r2,23
+ movt r17,0x4b80
+ fsub r17,r17,r12
+ movt r19,0x4b80
+ fsub r19,r19,r12
+ mov r12,%low(.L0step)
+ movt r12,%high(.L0step)
+ mov r20,0
+ mov r21,1
+ movne r16,r17
+ lsr r17,r3,23
+ movne r18,r19
+ eor r1,r1,r0 ; save sign
+ asr r19,r1,31
+ lsr r1,r16,23
+ lsr r0,r18,23
+ sub r1,r1,r0 ; calculate bit number difference.
+ lsl r3,r3,r1
+ lsr r16,r3,1
+ lsl r0,r21,r1
+ lsl r1,r1,3
+ sub r12,r12,r1
+ sub r3,r2,r3
+ movgteu r2,r3
+ movgteu r20,r0
+ lsr r0,r0,1
+ add r17,r0,r20
+ sub r3,r2,r16
+ movgteu r2,r3
+ movgteu r20,r17
+ sub r16,r16,1
+ jr r12
+ .rep 30
+ lsl r2,r2,1
+ sub r3,r2,r16
+ movgteu r2,r3
+ .endr
+ sub r0,r0,1 ; mask result bits from steps ...
+ and r0,r0,r2
+ orr r20,r0,r20 ; ... and combine with first bit.
+.L0step:eor r0,r20,r19 ; restore sign
+ sub r0,r0,r19
+ rts
+.Lret0: mov r0,0
+ rts
+ ENDFUNC(__divsi3)
diff --git a/libgcc/config/epiphany/divsi3.c b/libgcc/config/epiphany/divsi3.c
new file mode 100644
index 00000000000..c15aaf3eef6
--- /dev/null
+++ b/libgcc/config/epiphany/divsi3.c
@@ -0,0 +1,120 @@
+/* Generic signed 32 bit division implementation.
+ Copyright (C) 2009 Free Software Foundation, Inc.
+ Contributed by Embecosm on behalf of Adapteva, Inc.
+
+This file is part of GCC.
+
+This file is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 3, or (at your option) any
+later version.
+
+This file is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+typedef union { unsigned int i; float f; } fu;
+
+/* Although the semantics of the function ask for signed / unsigned inputs,
+ for the actual implementation we use unsigned numbers. */
+unsigned int __divsi3 (unsigned int a, unsigned int b);
+
+unsigned int
+__divsi3 (unsigned int a, unsigned int b)
+{
+ unsigned int sign = (int) (a ^ b) >> 31;
+ unsigned int d, t, s0, s1, s2, r0, r1;
+ fu u0, u1, u2, u1b, u2b;
+
+ a = abs (a);
+ b = abs (b);
+
+ if (b > a)
+ return 0;
+
+ /* Compute difference in number of bits in S0. */
+ u0.i = 0x40000000;
+ u1b.i = u2b.i = u0.i;
+ u1.i = a;
+ u2.i = b;
+ u1.i = a | u0.i;
+ t = 0x4b800000 | ((a >> 23) & 0xffff);
+ if (a >> 23)
+ {
+ u1.i = t;
+ u1b.i = 0x4b800000;
+ }
+ u2.i = b | u0.i;
+ t = 0x4b800000 | ((b >> 23) & 0xffff);
+ if (b >> 23)
+ {
+ u2.i = t;
+ u2b.i = 0x4b800000;
+ }
+ u1.f = u1.f - u1b.f;
+ u2.f = u2.f - u2b.f;
+ s1 = u1.i >> 23;
+ s2 = u2.i >> 23;
+ s0 = s1 - s2;
+
+ b <<= s0;
+ d = b - 1;
+
+ r0 = 1 << s0;
+ r1 = 0;
+ t = a - b;
+ if (t <= a)
+ {
+ a = t;
+ r1 = r0;
+ }
+
+#define STEP(n) case n: a += a; t = a - d; if (t <= a) a = t;
+ switch (s0)
+ {
+ STEP (31)
+ STEP (30)
+ STEP (29)
+ STEP (28)
+ STEP (27)
+ STEP (26)
+ STEP (25)
+ STEP (24)
+ STEP (23)
+ STEP (22)
+ STEP (21)
+ STEP (20)
+ STEP (19)
+ STEP (18)
+ STEP (17)
+ STEP (16)
+ STEP (15)
+ STEP (14)
+ STEP (13)
+ STEP (12)
+ STEP (11)
+ STEP (10)
+ STEP (9)
+ STEP (8)
+ STEP (7)
+ STEP (6)
+ STEP (5)
+ STEP (4)
+ STEP (3)
+ STEP (2)
+ STEP (1)
+ case 0: ;
+ }
+ r0 = r1 | (r0-1 & a);
+ return (r0 ^ sign) - sign;
+}
diff --git a/libgcc/config/epiphany/epiphany-asm.h b/libgcc/config/epiphany/epiphany-asm.h
new file mode 100644
index 00000000000..e86f7efd894
--- /dev/null
+++ b/libgcc/config/epiphany/epiphany-asm.h
@@ -0,0 +1,53 @@
+/* Copyright (C) 1995, 1997, 2007, 2008, 2009, 2011
+ Free Software Foundation, Inc.
+ Contributed by Embecosm on behalf of Adapteva, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+/* ANSI concatenation macros. */
+
+#define CONCAT1(a, b) CONCAT2(a, b)
+#define CONCAT2(a, b) a ## b
+#define STRINGIFY2(a, b) STRINGIFY(a##b)
+#define STRINGIFY(a) #a
+
+/* Use the right prefix for global labels. */
+
+#define SYM(x) CONCAT1 (__USER_LABEL_PREFIX__, x)
+
+#define FSTAB(X,T) .stabs STRINGIFY2(X##:F,T),36,0,__LINE__,SYM(X)
+#define FUNC(X) .type SYM(X),@function
+#define HIDDEN_FUNC(X) FUNC(X)` .hidden SYM(X)
+#define ENDFUNC0(X) CONCAT1(.Lfe_,X): .size X,CONCAT1(.Lfe_,X)-X
+#define ENDFUNC(X) ENDFUNC0(SYM(X))
+
+#define TMP0 r12
+#define TMP1 r16
+#define TMP2 r17
+#define TMP3 r18
+#define TMP4 r19
+#define TMP5 r20
+
+#define T_INT (0,1)
+ .stabs "int:t(0,1)=r(0,1);-2147483648;2147483647;",128,0,1,0
+#define T_UINT (0,2)
+ .stabs "unsigned int:t(0,2)=r(0,2);0;037777777777;",128,0,1,0
diff --git a/libgcc/config/epiphany/ieee-754/eqsf2.S b/libgcc/config/epiphany/ieee-754/eqsf2.S
new file mode 100644
index 00000000000..3c04e2a94fc
--- /dev/null
+++ b/libgcc/config/epiphany/ieee-754/eqsf2.S
@@ -0,0 +1,50 @@
+/* Copyright (C) 2008, 2009 Free Software Foundation, Inc.
+ Contributed by Embecosm on behalf of Adapteva, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+#include "../epiphany-asm.h"
+
+ /* Assumption: NaNs have all bits 10..30 and one of bit 0..9 set. */
+
+ FSTAB (__eqsf2,T_INT)
+ .global SYM(__eqsf2)
+ .balign 4
+ HIDDEN_FUNC(__eqsf2)
+SYM(__eqsf2):
+ sub TMP0,r0,r1
+ beq .Lno_bdiff
+ orr TMP0,r0,r1
+ add TMP0,TMP0,TMP0
+ rts
+.Lno_bdiff:
+#ifndef FLOAT_FORMAT_MOTOROLA
+ mov TMP0,0xffff
+ movt TMP0,0x7f
+ add TMP0,TMP0,r0
+#else
+ add TMP0,r0,0x3ff
+#endif
+ eor TMP0,TMP0,r0
+ lsr TMP0,TMP0,31
+ rts
+ ENDFUNC(__eqsf2)
diff --git a/libgcc/config/epiphany/ieee-754/fast_div.S b/libgcc/config/epiphany/ieee-754/fast_div.S
new file mode 100644
index 00000000000..a6cf748c620
--- /dev/null
+++ b/libgcc/config/epiphany/ieee-754/fast_div.S
@@ -0,0 +1,124 @@
+/* Copyright (C) 2011 Free Software Foundation, Inc.
+ Contributed by Embecosm on behalf of Adapteva, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+#include "../epiphany-asm.h"
+
+.section _fast_div_text,"a",@progbits;
+ .balign 8;
+_fast_div_table:
+.word 0x007fffff// mantissa mask
+.word 0x40257ebb// hold constant a = 2.58586
+
+.word 0x3f000000// hold constant 126 shifted to bits [30:23]
+.word 0xc0ba2e88// hold constant b = -5.81818
+
+.word 0x4087c1e8// hold constant c = 4.24242
+.word 0x40000000// to hold constant 2 for Newton-Raphson iterations
+
+ .global SYM(__fast_recipsf2)
+ FUNC(__fast_recipsf2)
+SYM(__fast_recipsf2):
+
+//###################
+//# input operands:
+//###################
+// Divisor
+//R0
+// Function address (used with negative offsets to read _fast_div_table)
+//R1
+/* Scratch registers: two single (TMP0/TMP5) and two pairs. */
+#define P0L TMP1
+#define P0H TMP2
+#define P1L TMP3
+#define P1H TMP4
+
+//#########################################
+//# Constants to be used in the algorithm
+//#########################################
+ldrd P0L , [ R1 , -3 ]
+
+ldrd P1L , [ R1 , -2 ]
+
+
+
+//#############################################################################
+//# The Algorithm
+//#
+//# Operation: C=A/B
+//# stage 1 - find the reciprocal 1/B according to the following scheme:
+//# B = (2^E)*m (1<m<2, E=e-127)
+//# 1/B = 1/((2^E)*m) = 1/((2^(E+1))*m1) (0.5<m1<1)
+//# = (2^-(E+1))*(1/m1) = (2^E1)*(1/m1)
+//#
+//# Now we can find the new exponent:
+//# e1 = E1+127 = -E-1+127 = -e+127-1+127 = 253-e **
+//# 1/m1 alreadt has the exponent 127, so we have to add 126-e.
+//# the exponent might underflow, which we can detect as a sign change.
+//# Since the architeture uses flush-to-zero for subnormals, we can
+//# give the result 0. then.
+//#
+//# The 1/m1 term with 0.5<m1<1 is approximated with the Chebyshev polynomial
+//# 1/m1 = 2.58586*(m1^2) - 5.81818*m1 + 4.24242
+//#
+//# Next step is to use two iterations of Newton-Raphson algorithm to complete
+//# the reciprocal calculation.
+//#
+//# Final result is achieved by multiplying A with 1/B
+//#############################################################################
+
+
+
+// R0 exponent and sign "replacement" into TMP0
+AND TMP0,R0,P0L ;
+ORR TMP0,TMP0,P1L
+SUB TMP5,R0,TMP0 // R0 sign/exponent extraction into TMP5
+// Calculate new mantissa
+FMADD P1H,TMP0,P0H ;
+ // Calculate new exponent offset 126 - "old exponent"
+ SUB P1L,P1L,TMP5
+ ldrd P0L , [ R1 , -1 ]
+FMADD P0L,TMP0,P1H ;
+ eor P1H,r0,P1L // check for overflow (N-BIT).
+ blt .Lret_0
+// P0L exponent and sign "replacement"
+sub P0L,P0L,TMP5
+
+// Newton-Raphson iteration #1
+MOV TMP0,P0H ;
+FMSUB P0H,R0,P0L ;
+FMUL P0L,P0H,P0L ;
+// Newton-Raphson iteration #2
+FMSUB TMP0,R0,P0L ;
+FMUL R0,TMP0,P0L ;
+jr lr
+.Lret_0:ldrd P0L , [ R1 , -3 ]
+ lsr TMP0,r0,31 ; extract sign
+ lsl TMP0,TMP0,31
+ add P0L,P0L,r0 ; check for NaN input
+ eor P0L,P0L,r0
+ movgte r0,TMP0
+ jr lr
+// Quotient calculation is expected by the caller: FMUL quotient,divident,R0
+ ;
+ ENDFUNC(__fast_recipsf2)
diff --git a/libgcc/config/epiphany/ieee-754/gtesf2.S b/libgcc/config/epiphany/ieee-754/gtesf2.S
new file mode 100644
index 00000000000..615dde4fd8a
--- /dev/null
+++ b/libgcc/config/epiphany/ieee-754/gtesf2.S
@@ -0,0 +1,66 @@
+/* Copyright (C) 2008, 2009 Free Software Foundation, Inc.
+ Contributed by Embecosm on behalf of Adapteva, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+#include "../epiphany-asm.h"
+
+ /* Assumptions: NaNs have all bits 10..30 and one of bit 0..9 set.
+ after sub: AC = ~Borrow.
+ clobber: TMP0
+ output: gt / gte indicates greater / greater or equal. */
+
+ FSTAB (__gtesf2,T_INT)
+ .global SYM(__gtesf2)
+ .balign 4
+ HIDDEN_FUNC(__gtesf2)
+SYM(__gtesf2):
+#ifndef FLOAT_FORMAT_MOTOROLA
+ mov TMP0,0xffff
+ movt TMP0,0x7f
+ add TMP0,TMP0,r0
+ eor TMP0,TMP0,r0
+ blt .Lret
+ mov TMP0,0xffff
+ movt TMP0,0x7f
+ add TMP0,TMP0,r1
+#else
+ add TMP0,r0,0x3ff; check for r0 NaN
+ eor TMP0,TMP0,r0
+ blt .Lret
+ add TMP0,r1,0x3ff; check for r1 NaN
+#endif
+ eor TMP0,TMP0,r1
+ blt .Lret
+ and TMP0,r0,r1
+ blt .Lneg
+ orr TMP0,r0,r1
+ lsl TMP0,TMP0,1
+ beq .Lret
+ sub TMP0,r0,r1
+.Lret:
+ rts
+ .balign 4
+.Lneg:
+ sub TMP0,r1,r0
+ rts
+ ENDFUNC(__gtesf2)
diff --git a/libgcc/config/epiphany/ieee-754/ordsf2.S b/libgcc/config/epiphany/ieee-754/ordsf2.S
new file mode 100644
index 00000000000..8493660102c
--- /dev/null
+++ b/libgcc/config/epiphany/ieee-754/ordsf2.S
@@ -0,0 +1,50 @@
+/* Copyright (C) 2008, 2009, 2011 Free Software Foundation, Inc.
+ Contributed by Embecosm on behalf of Adapteva, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+#include "../epiphany-asm.h"
+
+ FSTAB (__ordsf2,T_INT)
+ .global SYM(__ordsf2)
+ .balign 8,,2
+ HIDDEN_FUNC(__ordsf2)
+SYM(__ordsf2):
+#ifndef FLOAT_FORMAT_MOTOROLA
+ mov TMP0,0
+ movt TMP0,0xff00
+ lsl TMP1,r0,1
+ sub TMP1,TMP1,TMP0
+ bgtu .Lret
+ lsl TMP1,r1,1
+ sub TMP1,TMP1,TMP0
+.Lret: rts /* ordered: lteu */
+#else
+ /* Assumption: NaNs have all bits 9..30 and one of bit 0..8 set. */
+ lsl TMP0,r0,1
+ add TMP0,TMP0,0x3fe
+ bgteu .Lret
+ lsl TMP0,r1,1
+ add TMP0,TMP0,0x3fe
+.Lret: rts /* ordered: ltu */
+#endif
+ ENDFUNC(__ordsf2)
diff --git a/libgcc/config/epiphany/ieee-754/uneqsf2.S b/libgcc/config/epiphany/ieee-754/uneqsf2.S
new file mode 100644
index 00000000000..cba04d3946c
--- /dev/null
+++ b/libgcc/config/epiphany/ieee-754/uneqsf2.S
@@ -0,0 +1,45 @@
+/* Copyright (C) 2008, 2009, 2011 Free Software Foundation, Inc.
+ Contributed by Embecosm on behalf of Adapteva, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+#include "../epiphany-asm.h"
+
+ FSTAB (__uneqsf2,T_INT)
+ .global SYM(__uneqsf2)
+ .balign 8,,2
+ HIDDEN_FUNC(__uneqsf2)
+SYM(__uneqsf2):
+ sub TMP0,r0,r1
+ beq .Lret
+ orr TMP0,r0,r1
+ add TMP0,TMP0,TMP0
+ beq .Lret
+ mov TMP0,1
+ movt TMP0,0xff00
+ lsl TMP1,r0,1
+ sub TMP1,TMP0,TMP1
+ blteu .Lret
+ lsl TMP1,r1,1
+ sub TMP1,TMP0,TMP1
+.Lret: rts /* uneq: lteu */
+ ENDFUNC(__uneqsf2)
diff --git a/libgcc/config/epiphany/modsi3-float.S b/libgcc/config/epiphany/modsi3-float.S
new file mode 100644
index 00000000000..b789412ac1f
--- /dev/null
+++ b/libgcc/config/epiphany/modsi3-float.S
@@ -0,0 +1,65 @@
+/* Unsigned 32 bit division optimized for Epiphany.
+ Copyright (C) 2009, 2011 Free Software Foundation, Inc.
+ Contributed by Embecosm on behalf of Adapteva, Inc.
+
+This file is part of GCC.
+
+This file is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 3, or (at your option) any
+later version.
+
+This file is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+#include "epiphany-asm.h"
+
+ FSTAB (__modsi3,T_UINT)
+ .global SYM(__modsi3)
+ .balign 4
+ HIDDEN_FUNC(__modsi3)
+SYM(__modsi3):
+ asr TMP3,r0,31 ; save sign
+ float TMP0,r0
+ float TMP1,r1
+ mov r2,0
+ sub TMP4,r2,r0
+ beq .Lret_r0
+ movgt r0,TMP4
+ sub TMP2,r2,r1
+ movlte TMP2,r1
+ sub r2,TMP0,TMP1
+ lsl r2,r2,1
+ blte .L0step
+ asr TMP4,r2,24
+ lsl r2,TMP4,3
+ mov TMP4,%low(.L0step)
+ movt TMP4,%high(.L0step)
+ sub r2,TMP4,r2
+ jr r2
+#define STEP(n) lsl.l r2,TMP2,n` sub r2,r0,r2` movgteu r0,r2
+ .balign 8,,2
+ STEP(31)` STEP(30)` STEP(29)` STEP(28)`
+ STEP(27)` STEP(26)` STEP(25)` STEP(24)`
+ STEP(23)` STEP(22)` STEP(21)` STEP(20)`
+ STEP(19)` STEP(18)` STEP(17)` STEP(16)`
+ STEP(15)` STEP(14)` STEP(13)` STEP(12)`
+ STEP(11)` STEP(10)` STEP(9)` STEP(8)`
+ STEP(7)` STEP(6)` STEP(5)` STEP(4)` STEP(3)` STEP(2)` STEP(1)
+.L0step:STEP(0)
+ eor r0,r0,TMP3 ; restore sign
+ sub r0,r0,TMP3
+.Lret_r0:
+ rts
+ ENDFUNC(__modsi3)
diff --git a/libgcc/config/epiphany/modsi3.S b/libgcc/config/epiphany/modsi3.S
new file mode 100644
index 00000000000..d969b79c933
--- /dev/null
+++ b/libgcc/config/epiphany/modsi3.S
@@ -0,0 +1,77 @@
+/* Signed 32 bit modulo optimized for Epiphany.
+ Copyright (C) 2009 Free Software Foundation, Inc.
+ Contributed by Embecosm on behalf of Adapteva, Inc.
+
+This file is part of GCC.
+
+This file is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 3, or (at your option) any
+later version.
+
+This file is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+#include "epiphany-asm.h"
+
+ FSTAB (__modsi3,T_INT)
+ .global SYM(__modsi3)
+ .balign 4
+ HIDDEN_FUNC(__modsi3)
+SYM(__modsi3):
+ asr r17,r0,31 ; save sign
+ mov r2,0
+ sub r3,r2,r0
+ movgt r0,r3
+ sub r3,r2,r1
+ movgt r1,r3
+ movt r2,0xa000 ; 0xa0000000
+ orr r3,r2,r0
+ lsr r15,r0,16
+ movt r15,0xa800
+ movne r3,r15
+ lsr r16,r2,2 ; 0x28000000
+ and r15,r3,r16
+ fadd r12,r3,r15
+ orr r3,r2,r1
+ lsr r2,r1,16
+ movt r2,0xa800
+ movne r3,r2
+ and r2,r16,r3
+ fadd r3,r3,r2
+ sub r2,r0,r1
+ bltu .Lret_a
+ lsr r12,r12,23
+ mov r2,%low(.L0step)
+ movt r2,%high(.L0step)
+ lsr r3,r3,23
+ sub r3,r12,r3 ; calculate bit number difference.
+ lsl r3,r3,3
+ sub r2,r2,r3
+ jr r2
+/* lsl_l r2,r1,n` sub r2,r0,r2` movgteu r0,r2 */
+#define STEP(n) .long 0x0006441f | (n) << 5` sub r2,r0,r2` movgteu r0,r2
+ .balign 8,,2
+ STEP(31)` STEP(30)` STEP(29)` STEP(28)`
+ STEP(27)` STEP(26)` STEP(25)` STEP(24)`
+ STEP(23)` STEP(22)` STEP(21)` STEP(20)`
+ STEP(19)` STEP(18)` STEP(17)` STEP(16)`
+ STEP(15)` STEP(14)` STEP(13)` STEP(12)`
+ STEP(11)` STEP(10)` STEP(9)` STEP(8)`
+ STEP(7)` STEP(6)` STEP(5)` STEP(4)` STEP(3)` STEP(2)` STEP(1)
+.L0step:STEP(0)
+.Lret_a:eor r0,r0,r17 ; restore sign
+ sub r0,r0,r17
+ rts
+ ENDFUNC(__modsi3)
diff --git a/libgcc/config/epiphany/modsi3.c b/libgcc/config/epiphany/modsi3.c
new file mode 100644
index 00000000000..feee3d1ea25
--- /dev/null
+++ b/libgcc/config/epiphany/modsi3.c
@@ -0,0 +1,106 @@
+/* Generic signed 32 bit modulo implementation.
+ Copyright (C) 2009 Free Software Foundation, Inc.
+ Contributed by Embecosm on behalf of Adapteva, Inc.
+
+This file is part of GCC.
+
+This file is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 3, or (at your option) any
+later version.
+
+This file is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+typedef union { unsigned int i; float f; } fu;
+
+unsigned int __modsi3 (unsigned int a, unsigned int b);
+
+unsigned int
+__modsi3 (unsigned int a, unsigned int b)
+{
+ unsigned int sign = (int) a >> 31;
+ unsigned int d, t, s0, s1, s2, r0, r1;
+ fu u0, u1, u2, u1b, u2b;
+
+ a = abs (a);
+ b = abs (b);
+
+ if (b > a)
+ goto ret_a;
+
+ /* Compute difference in number of bits in S0. */
+ u0.i = 0x40000000;
+ u1b.i = u2b.i = u0.i;
+ u1.i = a;
+ u2.i = b;
+ u1.i = a | u0.i;
+ t = 0x4b800000 | ((a >> 23) & 0xffff);
+ if (a >> 23)
+ {
+ u1.i = t;
+ u1b.i = 0x4b800000;
+ }
+ u2.i = b | u0.i;
+ t = 0x4b800000 | ((b >> 23) & 0xffff);
+ if (b >> 23)
+ {
+ u2.i = t;
+ u2b.i = 0x4b800000;
+ }
+ u1.f = u1.f - u1b.f;
+ u2.f = u2.f - u2b.f;
+ s1 = u1.i >> 23;
+ s2 = u2.i >> 23;
+ s0 = s1 - s2;
+
+#define STEP(n) case n: d = b << n; t = a - d; if (t <= a) a = t;
+ switch (s0)
+ {
+ STEP (31)
+ STEP (30)
+ STEP (29)
+ STEP (28)
+ STEP (27)
+ STEP (26)
+ STEP (25)
+ STEP (24)
+ STEP (23)
+ STEP (22)
+ STEP (21)
+ STEP (20)
+ STEP (19)
+ STEP (18)
+ STEP (17)
+ STEP (16)
+ STEP (15)
+ STEP (14)
+ STEP (13)
+ STEP (12)
+ STEP (11)
+ STEP (10)
+ STEP (9)
+ STEP (8)
+ STEP (7)
+ STEP (6)
+ STEP (5)
+ STEP (4)
+ STEP (3)
+ STEP (2)
+ STEP (1)
+ STEP (0)
+ }
+ ret_a:
+ return (a ^ sign) - sign;
+}
diff --git a/libgcc/config/epiphany/mulsi3.c b/libgcc/config/epiphany/mulsi3.c
new file mode 100644
index 00000000000..148361d92be
--- /dev/null
+++ b/libgcc/config/epiphany/mulsi3.c
@@ -0,0 +1,39 @@
+/* Generic 32 bit multiply.
+ Copyright (C) 2009 Free Software Foundation, Inc.
+ Contributed by Embecosm on behalf of Adapteva, Inc.
+
+This file is part of GCC.
+
+This file is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 3, or (at your option) any
+later version.
+
+This file is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+unsigned int
+__mulsi3 (unsigned int a, unsigned int b)
+{
+ unsigned int r = 0;
+
+ while (a)
+ {
+ if (a & 1)
+ r += b;
+ a >>= 1;
+ b <<= 1;
+ }
+ return r;
+}
diff --git a/libgcc/config/epiphany/t-custom-eqsf b/libgcc/config/epiphany/t-custom-eqsf
new file mode 100644
index 00000000000..3b5a54acef2
--- /dev/null
+++ b/libgcc/config/epiphany/t-custom-eqsf
@@ -0,0 +1 @@
+FPBIT_FUNCS := $(filter-out _eq_sf,$(FPBIT_FUNCS))
diff --git a/libgcc/config/epiphany/t-epiphany b/libgcc/config/epiphany/t-epiphany
new file mode 100644
index 00000000000..4b67f5d4690
--- /dev/null
+++ b/libgcc/config/epiphany/t-epiphany
@@ -0,0 +1,35 @@
+# Copyright (C) 1997, 1998, 1999, 2001, 2002, 2003,
+# 2004, 2009, 2010, 2011 Free Software Foundation, Inc.
+# Contributed by Embecosm on behalf of Adapteva, Inc.
+#
+# This file is part of GCC.
+#
+# GCC is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GCC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>.
+
+LIB2ADD_ST = $(srcdir)/config/epiphany/modsi3-float.S \
+ $(srcdir)/config/epiphany/divsi3-float.S \
+ $(srcdir)/config/epiphany/udivsi3-float.S \
+ $(srcdir)/config/epiphany/umodsi3-float.S \
+ $(srcdir)/config/epiphany/ieee-754/eqsf2.S \
+ $(srcdir)/config/epiphany/ieee-754/gtesf2.S \
+ $(srcdir)/config/epiphany/ieee-754/ordsf2.S \
+ $(srcdir)/config/epiphany/ieee-754/uneqsf2.S \
+ $(srcdir)/config/epiphany/ieee-754/fast_div.S
+
+# .init/.fini section routines
+
+crtint.o crtrunc.o crtm1reg-r43.o crtm1reg-r63.o : \
+ %.o: $(srcdir)/config/epiphany/%.S $(GCC_PASSES) $(CONFIG_H)
+ $(crt_compile) -c -x assembler-with-cpp $<
diff --git a/libgcc/config/epiphany/udivsi3-float.S b/libgcc/config/epiphany/udivsi3-float.S
new file mode 100644
index 00000000000..5c960dce5f0
--- /dev/null
+++ b/libgcc/config/epiphany/udivsi3-float.S
@@ -0,0 +1,83 @@
+/* Unsigned 32 bit division optimized for Epiphany.
+ Copyright (C) 2009, 2011 Free Software Foundation, Inc.
+ Contributed by Embecosm on behalf of Adapteva, Inc.
+
+This file is part of GCC.
+
+This file is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 3, or (at your option) any
+later version.
+
+This file is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+#include "epiphany-asm.h"
+
+ FSTAB (__udivsi3,T_UINT)
+ .global SYM(__udivsi3)
+ .balign 4
+ HIDDEN_FUNC(__udivsi3)
+SYM(__udivsi3):
+ sub TMP0,r0,r1
+ bltu .Lret0
+ float TMP2,r0
+ mov TMP1,%low(0xb0800000) ; ??? this would be faster with small data
+ float TMP3,r1
+ movt TMP1,%high(0xb0800000)
+ asr TMP0,r0,8
+ sub TMP0,TMP0,TMP1
+ movt TMP1,%high(0x00810000)
+ movgteu TMP2,TMP0
+ bblt .Lret1
+ sub TMP2,TMP2,TMP1
+ sub TMP2,TMP2,TMP3
+ mov TMP3,0
+ movltu TMP2,TMP3
+ lsr TMP2,TMP2,23
+ lsl r1,r1,TMP2
+ mov TMP0,1
+ lsl TMP0,TMP0,TMP2
+ sub r0,r0,r1
+ bltu .Ladd_back
+ add TMP3,TMP3,TMP0
+ sub r0,r0,r1
+ bltu .Ladd_back
+.Lsub_loop:; More than two iterations are rare, so it makes sense to leave
+ ; this label here to reduce average branch penalties.
+ add TMP3,TMP3,TMP0
+ sub r0,r0,r1
+ bgteu .Lsub_loop
+.Ladd_back:
+ add r0,r0,r1
+ sub TMP1,r1,1
+ mov r1,%low(.L0step)
+ movt r1,%high(.L0step)
+ lsl TMP2,TMP2,3
+ sub r1,r1,TMP2
+ jr r1
+ .rep 30
+ lsl r0,r0,1
+ sub.l r1,r0,TMP1
+ movgteu r0,r1
+ .endr
+.L0step:sub r1,TMP0,1 ; mask result bits from steps ...
+ and r0,r0,r1
+ orr r0,r0,TMP3 ; ... and combine with first bits.
+ rts
+.Lret0: mov r0,0
+ rts
+.Lret1: mov r0,1
+ rts
+ ENDFUNC(__udivsi3)
diff --git a/libgcc/config/epiphany/udivsi3-float.c b/libgcc/config/epiphany/udivsi3-float.c
new file mode 100644
index 00000000000..c7f10590c49
--- /dev/null
+++ b/libgcc/config/epiphany/udivsi3-float.c
@@ -0,0 +1,125 @@
+/* Generic unsigned 32 bit division implementation.
+ Copyright (C) 2009, 2011 Free Software Foundation, Inc.
+ Contributed by Embecosm on behalf of Adapteva, Inc.
+
+This file is part of GCC.
+
+This file is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 3, or (at your option) any
+later version.
+
+This file is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+typedef union { unsigned int i; float f; } fu;
+
+unsigned int __udivsi3 (unsigned int a, unsigned int b);
+
+unsigned int
+__udivsi3 (unsigned int a, unsigned int b)
+{
+ unsigned int d, t, s0, s1, s2, r0, r1;
+ fu u0, u1, u2, u1b, u2b;
+
+ if (b > a)
+ return 0;
+ if ((int) b < 0)
+ return 1;
+
+ /* Assuming B is nonzero, compute S0 such that 0 <= S0,
+ (B << S0+1) does not overflow,
+ A < 4.01 * (B << S0), with S0 choosen as small as possible
+ without taking to much time calculating. */
+#ifdef CONVERT_UNSIGNED
+ u0.f = a;
+ u1.f = b;
+#else /* !CONVERT_UNSIGNED */
+ u0.f = (int) a;
+ u1.f = (int) b;
+#ifdef CONCISE
+ if (a < 0)
+ u0.i = (a >> 8) - 0x00800000 + 0x3f800000 + (31 << 23);
+#else /* To use flag seting / cmove, this can be written as: */
+ {
+ unsigned c = 0xff800000 - 0x4f000000;
+ t = (int)a >> 8;
+ if (t >= c)
+ u0.i = (t - c);
+ }
+#endif
+#endif /* !CONVERT_UNSIGNED */
+ s0 = u0.i + 1 /* Compensate for rounding errors. */
+ - 0x00800000 /* adjust by one */ ;
+ s0 = s0 - u1.i;
+ s0 = (int)s0 >= 0 ? s0 : 0;
+ s0 >>= 23;
+
+ b <<= s0;
+ r1 = 0;
+
+ r0 = 1 << s0;
+ a = ((t=a) - b);
+ if (a <= t)
+ {
+ r1 += r0;
+ a = ((t=a) - b);
+ if (a <= t)
+ do {
+ r1 += r0;
+ a = ((t=a) - b);
+ } while (a <= t);
+ }
+ a += b;
+ d = b - 1;
+
+#define STEP(n) case n: a += a; t = a - d; if (t <= a) a = t;
+ switch (s0)
+ {
+ STEP (31)
+ STEP (30)
+ STEP (29)
+ STEP (28)
+ STEP (27)
+ STEP (26)
+ STEP (25)
+ STEP (24)
+ STEP (23)
+ STEP (22)
+ STEP (21)
+ STEP (20)
+ STEP (19)
+ STEP (18)
+ STEP (17)
+ STEP (16)
+ STEP (15)
+ STEP (14)
+ STEP (13)
+ STEP (12)
+ STEP (11)
+ STEP (10)
+ STEP (9)
+ STEP (8)
+ STEP (7)
+ STEP (6)
+ STEP (5)
+ STEP (4)
+ STEP (3)
+ STEP (2)
+ STEP (1)
+ case 0: ;
+ }
+ r0 = r1 | (r0-1 & a);
+ return r0;
+}
diff --git a/libgcc/config/epiphany/udivsi3.S b/libgcc/config/epiphany/udivsi3.S
new file mode 100644
index 00000000000..1396281f73a
--- /dev/null
+++ b/libgcc/config/epiphany/udivsi3.S
@@ -0,0 +1,85 @@
+/* Unsigned 32 bit division optimized for Epiphany.
+ Copyright (C) 2009 Free Software Foundation, Inc.
+ Contributed by Embecosm on behalf of Adapteva, Inc.
+
+This file is part of GCC.
+
+This file is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 3, or (at your option) any
+later version.
+
+This file is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+#include "epiphany-asm.h"
+
+ FSTAB (__udivsi3,T_UINT)
+ .global SYM(__udivsi3)
+ .balign 4
+ HIDDEN_FUNC(__udivsi3)
+SYM(__udivsi3):
+ sub r3,r0,r1
+ bltu .Lret0
+ mov r3,0x95
+ lsl r12,r3,23 ; 0x4a800000
+ lsl r3,r3,30 ; 0x40000000
+ orr r16,r0,r3
+ orr r2,r1,r3
+ fsub r16,r16,r3
+ fsub r2,r2,r3
+ lsr r3,r1,21
+ lsr r17,r0,21
+ movt r17,0x4a80
+ fsub r17,r17,r12
+ movt r3,0x4a80
+ fsub r3,r3,r12
+ mov r12,%low(.L0step)
+ movt r12,%high(.L0step)
+ mov r21,1
+ movne r16,r17
+ lsr r17,r1,21
+ movne r2,r3
+ lsr r3,r16,23 ; must mask lower bits of r2 in case op0 was ..
+ lsr r2,r2,23 ; .. shifted and op1 was not.
+ sub r3,r3,r2 ; calculate bit number difference.
+ lsl r1,r1,r3
+ lsr r16,r1,1
+ lsl r2,r21,r3
+ lsl r3,r3,3
+ sub r12,r12,r3
+ sub r3,r0,r1
+ movltu r3,r0
+ mov r0,0
+ movgteu r0,r2
+ lsr r2,r2,1
+ add r17,r2,r0
+ sub r1,r3,r16
+ movgteu r3,r1
+ movgteu r0,r17
+ sub r16,r16,1
+ jr r12
+ .rep 30
+ lsl r3,r3,1
+ sub r1,r3,r16
+ movgteu r3,r1
+ .endr
+ sub r2,r2,1 ; mask result bits from steps ...
+ and r3,r3,r2
+ orr r0,r0,r3 ; ... and combine with first bits.
+ nop
+.L0step:rts
+.Lret0: mov r0,0
+ rts
+ ENDFUNC(__udivsi3)
diff --git a/libgcc/config/epiphany/udivsi3.c b/libgcc/config/epiphany/udivsi3.c
new file mode 100644
index 00000000000..cd34c6d761c
--- /dev/null
+++ b/libgcc/config/epiphany/udivsi3.c
@@ -0,0 +1,114 @@
+/* Generic unsigned 32 bit division implementation.
+ Copyright (C) 2009 Free Software Foundation, Inc.
+ Contributed by Embecosm on behalf of Adapteva, Inc.
+
+This file is part of GCC.
+
+This file is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 3, or (at your option) any
+later version.
+
+This file is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+typedef union { unsigned int i; float f; } fu;
+
+unsigned int __udivsi3 (unsigned int a, unsigned int b);
+
+unsigned int
+__udivsi3 (unsigned int a, unsigned int b)
+{
+ unsigned int d, t, s0, s1, s2, r0, r1;
+ fu u0, u1, u2, u1b, u2b;
+
+ if (b > a)
+ return 0;
+
+ /* Compute difference in number of bits in S0. */
+ u0.i = 0x40000000;
+ u1b.i = u2b.i = u0.i;
+ u1.i = a;
+ u2.i = b;
+ u1.i = a | u0.i;
+ t = 0x4b800000 | ((a >> 23) & 0xffff);
+ if (a >> 23)
+ {
+ u1.i = t;
+ u1b.i = 0x4b800000;
+ }
+ u2.i = b | u0.i;
+ t = 0x4b800000 | ((b >> 23) & 0xffff);
+ if (b >> 23)
+ {
+ u2.i = t;
+ u2b.i = 0x4b800000;
+ }
+ u1.f = u1.f - u1b.f;
+ u2.f = u2.f - u2b.f;
+ s1 = u1.i >> 23;
+ s2 = u2.i >> 23;
+ s0 = s1 - s2;
+
+ b <<= s0;
+ d = b - 1;
+
+ r0 = 1 << s0;
+ r1 = 0;
+ t = a - b;
+ if (t <= a)
+ {
+ a = t;
+ r1 = r0;
+ }
+
+#define STEP(n) case n: a += a; t = a - d; if (t <= a) a = t;
+ switch (s0)
+ {
+ STEP (31)
+ STEP (30)
+ STEP (29)
+ STEP (28)
+ STEP (27)
+ STEP (26)
+ STEP (25)
+ STEP (24)
+ STEP (23)
+ STEP (22)
+ STEP (21)
+ STEP (20)
+ STEP (19)
+ STEP (18)
+ STEP (17)
+ STEP (16)
+ STEP (15)
+ STEP (14)
+ STEP (13)
+ STEP (12)
+ STEP (11)
+ STEP (10)
+ STEP (9)
+ STEP (8)
+ STEP (7)
+ STEP (6)
+ STEP (5)
+ STEP (4)
+ STEP (3)
+ STEP (2)
+ STEP (1)
+ case 0: ;
+ }
+ r0 = r1 | (r0-1 & a);
+ return r0;
+}
diff --git a/libgcc/config/epiphany/umodsi3-float.S b/libgcc/config/epiphany/umodsi3-float.S
new file mode 100644
index 00000000000..ca5db48906c
--- /dev/null
+++ b/libgcc/config/epiphany/umodsi3-float.S
@@ -0,0 +1,63 @@
+/* Unsigned 32 bit division optimized for Epiphany.
+ Copyright (C) 2009, 2011 Free Software Foundation, Inc.
+ Contributed by Embecosm on behalf of Adapteva, Inc.
+
+This file is part of GCC.
+
+This file is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 3, or (at your option) any
+later version.
+
+This file is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+#include "epiphany-asm.h"
+
+/* Because we handle a divident with bit 31 set with truncating integer
+ arithmetic, there is no rounding-related overflow. */
+ FSTAB (__umodsi3,T_UINT)
+ .global SYM(__umodsi3)
+ .balign 4
+ HIDDEN_FUNC(__umodsi3)
+SYM(__umodsi3):
+ float r2,r0
+ mov TMP1,%low(0xb0800000) ; ??? this would be faster with small data
+ float TMP2,r1
+ movt TMP1,%high(0xb0800000)
+ asr TMP0,r0,8
+ sub TMP0,TMP0,TMP1
+ mov TMP1,%low(.L0step)
+ movgteu r2,TMP0
+ sub r2,r2,TMP2
+ blteu .L0step
+ asr r2,r2,23
+ movt TMP1,%high(.L0step)
+ lsl TMP2,r2,3
+ lsl r2,r1,r2` sub r2,r0,r2` movgteu r0,r2 ; STEP(r2)
+ sub r2,TMP1,TMP2
+ jr r2
+#define STEP(n) lsl.l r2,r1,n` sub r2,r0,r2` movgteu r0,r2
+ .balign 8,,2
+ STEP(31)` STEP(30)` STEP(29)` STEP(28)`
+ STEP(27)` STEP(26)` STEP(25)` STEP(24)`
+ STEP(23)` STEP(22)` STEP(21)` STEP(20)`
+ STEP(19)` STEP(18)` STEP(17)` STEP(16)`
+ STEP(15)` STEP(14)` STEP(13)` STEP(12)`
+ STEP(11)` STEP(10)` STEP(9)` STEP(8)`
+ STEP(7)` STEP(6)` STEP(5)` STEP(4)` STEP(3)` STEP(2)` STEP(1)
+.L0step:STEP(0)
+.Lret_r0:
+ rts
+ ENDFUNC(__umodsi3)
diff --git a/libgcc/config/epiphany/umodsi3.S b/libgcc/config/epiphany/umodsi3.S
new file mode 100644
index 00000000000..6f808fdc7aa
--- /dev/null
+++ b/libgcc/config/epiphany/umodsi3.S
@@ -0,0 +1,70 @@
+/* Unsigned 32 bit modulo optimized for Epiphany.
+ Copyright (C) 2009 Free Software Foundation, Inc.
+ Contributed by Embecosm on behalf of Adapteva, Inc.
+
+This file is part of GCC.
+
+This file is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 3, or (at your option) any
+later version.
+
+This file is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+#include "epiphany-asm.h"
+
+ FSTAB (__umodsi3,T_UINT)
+ .global SYM(__umodsi3)
+ .balign 4
+ HIDDEN_FUNC(__umodsi3)
+SYM(__umodsi3):
+ mov r2,5
+ lsl r2,r2,29 ; 0xa0000000
+ orr r3,r2,r0
+ lsr r15,r0,16
+ movt r15,0xa800
+ movne r3,r15
+ lsr r16,r2,2 ; 0x28000000
+ and r15,r3,r16
+ fadd r12,r3,r15
+ orr r3,r2,r1
+ lsr r2,r1,16
+ movt r2,0xa800
+ movne r3,r2
+ and r2,r16,r3
+ fadd r3,r3,r2
+ sub r2,r0,r1
+ bltu .Lret_a
+ lsr r12,r12,23
+ mov r2,%low(.L0step)
+ movt r2,%high(.L0step)
+ lsr r3,r3,23
+ sub r3,r12,r3 ; calculate bit number difference.
+ lsl r3,r3,3
+ sub r2,r2,r3
+ jr r2
+/* lsl_l r2,r1,n` sub r2,r0,r2` movgteu r0,r2 */
+#define STEP(n) .long 0x0006441f | (n) << 5` sub r2,r0,r2` movgteu r0,r2
+ .balign 8,,2
+ STEP(31)` STEP(30)` STEP(29)` STEP(28)`
+ STEP(27)` STEP(26)` STEP(25)` STEP(24)`
+ STEP(23)` STEP(22)` STEP(21)` STEP(20)`
+ STEP(19)` STEP(18)` STEP(17)` STEP(16)`
+ STEP(15)` STEP(14)` STEP(13)` STEP(12)`
+ STEP(11)` STEP(10)` STEP(9)` STEP(8)`
+ STEP(7)` STEP(6)` STEP(5)` STEP(4)` STEP(3)` STEP(2)` STEP(1)
+.L0step:STEP(0)
+.Lret_a:rts
+ ENDFUNC(__umodsi3)
diff --git a/libgcc/config/epiphany/umodsi3.c b/libgcc/config/epiphany/umodsi3.c
new file mode 100644
index 00000000000..609a250f14a
--- /dev/null
+++ b/libgcc/config/epiphany/umodsi3.c
@@ -0,0 +1,101 @@
+/* Generic unsigned 32 bit modulo implementation.
+ Copyright (C) 2009 Free Software Foundation, Inc.
+ Contributed by Embecosm on behalf of Adapteva, Inc.
+
+This file is part of GCC.
+
+This file is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 3, or (at your option) any
+later version.
+
+This file is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+typedef union { unsigned int i; float f; } fu;
+
+unsigned int __umodsi3 (unsigned int a, unsigned int b);
+
+unsigned int
+__umodsi3 (unsigned int a, unsigned int b)
+{
+ unsigned int d, t, s0, s1, s2, r0, r1;
+ fu u0, u1, u2, u1b, u2b;
+
+ if (b > a)
+ return a;
+
+ /* Compute difference in number of bits in S0. */
+ u0.i = 0x40000000;
+ u1b.i = u2b.i = u0.i;
+ u1.i = a;
+ u2.i = b;
+ u1.i = a | u0.i;
+ t = 0x4b800000 | ((a >> 23) & 0xffff);
+ if (a >> 23)
+ {
+ u1.i = t;
+ u1b.i = 0x4b800000;
+ }
+ u2.i = b | u0.i;
+ t = 0x4b800000 | ((b >> 23) & 0xffff);
+ if (b >> 23)
+ {
+ u2.i = t;
+ u2b.i = 0x4b800000;
+ }
+ u1.f = u1.f - u1b.f;
+ u2.f = u2.f - u2b.f;
+ s1 = u1.i >> 23;
+ s2 = u2.i >> 23;
+ s0 = s1 - s2;
+
+#define STEP(n) case n: d = b << n; t = a - d; if (t <= a) a = t;
+ switch (s0)
+ {
+ STEP (31)
+ STEP (30)
+ STEP (29)
+ STEP (28)
+ STEP (27)
+ STEP (26)
+ STEP (25)
+ STEP (24)
+ STEP (23)
+ STEP (22)
+ STEP (21)
+ STEP (20)
+ STEP (19)
+ STEP (18)
+ STEP (17)
+ STEP (16)
+ STEP (15)
+ STEP (14)
+ STEP (13)
+ STEP (12)
+ STEP (11)
+ STEP (10)
+ STEP (9)
+ STEP (8)
+ STEP (7)
+ STEP (6)
+ STEP (5)
+ STEP (4)
+ STEP (3)
+ STEP (2)
+ STEP (1)
+ STEP (0)
+ }
+ return a;
+}
diff --git a/libgcc/config/fr30/crti.S b/libgcc/config/fr30/crti.S
new file mode 100644
index 00000000000..4ce61231bd7
--- /dev/null
+++ b/libgcc/config/fr30/crti.S
@@ -0,0 +1,61 @@
+# crti.s for ELF
+
+# Copyright (C) 1992, 1998, 1999, 2008, 2009 Free Software Foundation, Inc.
+# Written By David Vinayak Henkel-Wallace, June 1992
+#
+# This file is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3, or (at your option) any
+# later version.
+#
+# This file is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# Under Section 7 of GPL version 3, you are granted additional
+# permissions described in the GCC Runtime Library Exception, version
+# 3.1, as published by the Free Software Foundation.
+#
+# You should have received a copy of the GNU General Public License and
+# a copy of the GCC Runtime Library Exception along with this program;
+# see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+# <http://www.gnu.org/licenses/>.
+
+# This file just make a stack frame for the contents of the .fini and
+# .init sections. Users may put any desired instructions in those
+# sections.
+
+ .section ".init"
+ .global _init
+ .type _init,#function
+ .align 4
+_init:
+ st rp, @-r15
+ enter #4
+
+ # These nops are here to align the end of this code with a 16 byte
+ # boundary. The linker will start inserting code into the .init
+ # section at such a boundary.
+
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+
+
+ .section ".fini"
+ .global _fini
+ .type _fini,#function
+ .align 4
+_fini:
+ st rp, @-r15
+ enter #4
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
diff --git a/libgcc/config/fr30/crtn.S b/libgcc/config/fr30/crtn.S
new file mode 100644
index 00000000000..c62d37b6671
--- /dev/null
+++ b/libgcc/config/fr30/crtn.S
@@ -0,0 +1,44 @@
+# crtn.S for ELF
+
+# Copyright (C) 1992, 1999, 2008, 2009, 2011 Free Software Foundation, Inc.
+# Written By David Vinayak Henkel-Wallace, June 1992
+#
+# This file is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3, or (at your option) any
+# later version.
+#
+# This file is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# Under Section 7 of GPL version 3, you are granted additional
+# permissions described in the GCC Runtime Library Exception, version
+# 3.1, as published by the Free Software Foundation.
+#
+# You should have received a copy of the GNU General Public License and
+# a copy of the GCC Runtime Library Exception along with this program;
+# see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+# <http://www.gnu.org/licenses/>.
+
+# This file just makes sure that the .fini and .init sections do in
+# fact return. Users may put any desired instructions in those sections.
+# This file is the last thing linked into any executable.
+
+ .section ".init"
+ .align 4
+
+ leave
+ ld @r15+,rp
+ ret
+
+
+ .section ".fini"
+ .align 4
+
+ leave
+ ld @r15+,rp
+ ret
+
+# Th-th-th-that is all folks!
diff --git a/libgcc/config/fr30/lib1funcs.S b/libgcc/config/fr30/lib1funcs.S
new file mode 100644
index 00000000000..7c63453123a
--- /dev/null
+++ b/libgcc/config/fr30/lib1funcs.S
@@ -0,0 +1,115 @@
+/* libgcc routines for the FR30.
+ Copyright (C) 1998, 1999, 2009 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 3, or (at your option) any
+later version.
+
+This file is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+ .macro FUNC_START name
+ .text
+ .globl __\name
+ .type __\name, @function
+__\name:
+ .endm
+
+ .macro FUNC_END name
+ .size __\name, . - __\name
+ .endm
+
+ .macro DIV_BODY reg number
+ .if \number
+ DIV_BODY \reg, "\number - 1"
+ div1 \reg
+ .endif
+ .endm
+
+#ifdef L_udivsi3
+FUNC_START udivsi3
+ ;; Perform an unsiged division of r4 / r5 and place the result in r4.
+ ;; Does not handle overflow yet...
+ mov r4, mdl
+ div0u r5
+ DIV_BODY r5 32
+ mov mdl, r4
+ ret
+FUNC_END udivsi3
+#endif /* L_udivsi3 */
+
+#ifdef L_divsi3
+FUNC_START divsi3
+ ;; Perform a siged division of r4 / r5 and place the result in r4.
+ ;; Does not handle overflow yet...
+ mov r4, mdl
+ div0s r5
+ DIV_BODY r5 32
+ div2 r5
+ div3
+ div4s
+ mov mdl, r4
+ ret
+FUNC_END divsi3
+#endif /* L_divsi3 */
+
+#ifdef L_umodsi3
+FUNC_START umodsi3
+ ;; Perform an unsiged division of r4 / r5 and places the remainder in r4.
+ ;; Does not handle overflow yet...
+ mov r4, mdl
+ div0u r5
+ DIV_BODY r5 32
+ mov mdh, r4
+ ret
+FUNC_END umodsi3
+#endif /* L_umodsi3 */
+
+#ifdef L_modsi3
+FUNC_START modsi3
+ ;; Perform a siged division of r4 / r5 and place the remainder in r4.
+ ;; Does not handle overflow yet...
+ mov r4, mdl
+ div0s r5
+ DIV_BODY r5 32
+ div2 r5
+ div3
+ div4s
+ mov mdh, r4
+ ret
+FUNC_END modsi3
+#endif /* L_modsi3 */
+
+#ifdef L_negsi2
+FUNC_START negsi2
+ ldi:8 #0, r0
+ sub r4, r0
+ mov r0, r4
+ ret
+FUNC_END negsi2
+#endif /* L_negsi2 */
+
+#ifdef L_one_cmplsi2
+FUNC_START one_cmplsi2
+ ldi:8 #0xff, r0
+ extsb r0
+ eor r0, r4
+ ret
+FUNC_END one_cmplsi2
+#endif /* L_one_cmplsi2 */
+
+
diff --git a/libgcc/config/fr30/t-fr30 b/libgcc/config/fr30/t-fr30
new file mode 100644
index 00000000000..ee5ed9a127e
--- /dev/null
+++ b/libgcc/config/fr30/t-fr30
@@ -0,0 +1,2 @@
+LIB1ASMSRC = fr30/lib1funcs.S
+LIB1ASMFUNCS = _udivsi3 _divsi3 _umodsi3 _modsi3
diff --git a/libgcc/config/frv/cmovd.c b/libgcc/config/frv/cmovd.c
new file mode 100644
index 00000000000..e46070aac04
--- /dev/null
+++ b/libgcc/config/frv/cmovd.c
@@ -0,0 +1,51 @@
+/* Move double-word library function.
+ Copyright (C) 2000, 2003, 2009 Free Software Foundation, Inc.
+ Contributed by Red Hat, Inc.
+
+ This file is part of GCC.
+
+ GCC is free software ; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3, or (at your option)
+ any later version.
+
+ GCC is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY ; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+void
+__cmovd (long long *dest, const long long *src, unsigned len)
+{
+ unsigned i;
+ unsigned num = len >> 3;
+ unsigned xlen = len & ~7;
+ char *dest_byte = (char *)dest;
+ const char *src_byte = (const char *)src;
+
+ if (dest_byte < src_byte || dest_byte > src_byte+len)
+ {
+ for (i = 0; i < num; i++)
+ dest[i] = src[i];
+
+ while (len > xlen)
+ {
+ dest_byte[xlen] = src_byte[xlen];
+ xlen++;
+ }
+ }
+ else
+ {
+ while (len-- > 0)
+ dest_byte[len] = src_byte[len];
+ }
+}
diff --git a/libgcc/config/frv/cmovh.c b/libgcc/config/frv/cmovh.c
new file mode 100644
index 00000000000..6b0901d95a7
--- /dev/null
+++ b/libgcc/config/frv/cmovh.c
@@ -0,0 +1,47 @@
+/* Move half-word library function.
+ Copyright (C) 2000, 2003, 2009 Free Software Foundation, Inc.
+ Contributed by Red Hat, Inc.
+
+ This file is part of GCC.
+
+ GCC is free software ; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3, or (at your option)
+ any later version.
+
+ GCC is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY ; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+void
+__cmovh (short *dest, const short *src, unsigned len)
+{
+ unsigned i;
+ unsigned num = len >> 1;
+ char *dest_byte = (char *)dest;
+ const char *src_byte = (const char *)src;
+
+ if (dest_byte < src_byte || dest_byte > src_byte+len)
+ {
+ for (i = 0; i < num; i++)
+ dest[i] = src[i];
+
+ if ((len & 1) != 0)
+ dest_byte[len-1] = src_byte[len-1];
+ }
+ else
+ {
+ while (len-- > 0)
+ dest_byte[len] = src_byte[len];
+ }
+}
diff --git a/libgcc/config/frv/cmovw.c b/libgcc/config/frv/cmovw.c
new file mode 100644
index 00000000000..f27db75aaf6
--- /dev/null
+++ b/libgcc/config/frv/cmovw.c
@@ -0,0 +1,51 @@
+/* Move word library function.
+ Copyright (C) 2000, 2003, 2009 Free Software Foundation, Inc.
+ Contributed by Red Hat, Inc.
+
+ This file is part of GCC.
+
+ GCC is free software ; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3, or (at your option)
+ any later version.
+
+ GCC is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY ; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+void
+__cmovw (int *dest, const int *src, unsigned len)
+{
+ unsigned i;
+ unsigned num = len >> 2;
+ unsigned xlen = len & ~3;
+ char *dest_byte = (char *)dest;
+ const char *src_byte = (const char *)src;
+
+ if (dest_byte < src_byte || dest_byte > src_byte+len)
+ {
+ for (i = 0; i < num; i++)
+ dest[i] = src[i];
+
+ while (len > xlen)
+ {
+ dest_byte[xlen] = src_byte[xlen];
+ xlen++;
+ }
+ }
+ else
+ {
+ while (len-- > 0)
+ dest_byte[len] = src_byte[len];
+ }
+}
diff --git a/libgcc/config/frv/frvbegin.c b/libgcc/config/frv/frvbegin.c
new file mode 100644
index 00000000000..23cbf1ecc93
--- /dev/null
+++ b/libgcc/config/frv/frvbegin.c
@@ -0,0 +1,157 @@
+/* Frv initialization file linked before all user modules
+ Copyright (C) 1999, 2000, 2003, 2004, 2009 Free Software Foundation, Inc.
+ Contributed by Red Hat, Inc.
+
+ This file is part of GCC.
+
+ GCC is free software ; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3, or (at your option)
+ any later version.
+
+ GCC is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY ; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>.
+
+ This file was originally taken from the file crtstuff.c in the
+ main compiler directory, and simplified. */
+
+#include "defaults.h"
+#include <stddef.h>
+#include "../libgcc/unwind-dw2-fde.h"
+#include "gbl-ctors.h"
+
+/* Declare a pointer to void function type. */
+#define STATIC static
+
+#ifdef __FRV_UNDERSCORE__
+#define UNDERSCORE "_"
+#else
+#define UNDERSCORE ""
+#endif
+
+#define INIT_SECTION_NEG_ONE(SECTION, FLAGS, NAME) \
+__asm__ (".section " SECTION "," FLAGS "\n\t" \
+ ".globl " UNDERSCORE NAME "\n\t" \
+ ".type " UNDERSCORE NAME ",@object\n\t" \
+ ".p2align 2\n" \
+ UNDERSCORE NAME ":\n\t" \
+ ".word -1\n\t" \
+ ".previous")
+
+#define INIT_SECTION(SECTION, FLAGS, NAME) \
+__asm__ (".section " SECTION "," FLAGS "\n\t" \
+ ".globl " UNDERSCORE NAME "\n\t" \
+ ".type " UNDERSCORE NAME ",@object\n\t" \
+ ".p2align 2\n" \
+ UNDERSCORE NAME ":\n\t" \
+ ".previous")
+
+/* Beginning of .ctor/.dtor sections that provides a list of constructors and
+ destructors to run. */
+
+INIT_SECTION_NEG_ONE (".ctors", "\"aw\"", "__CTOR_LIST__");
+INIT_SECTION_NEG_ONE (".dtors", "\"aw\"", "__DTOR_LIST__");
+
+/* Beginning of .eh_frame section that provides all of the exception handling
+ tables. */
+
+INIT_SECTION (".eh_frame", "\"aw\"", "__EH_FRAME_BEGIN__");
+
+#if ! __FRV_FDPIC__
+/* In FDPIC, the linker itself generates this. */
+/* Beginning of .rofixup section that provides a list of pointers that we
+ need to adjust. */
+
+INIT_SECTION (".rofixup", "\"a\"", "__ROFIXUP_LIST__");
+#endif /* __FRV_FDPIC__ */
+
+extern void __frv_register_eh(void) __attribute__((__constructor__));
+extern void __frv_deregister_eh(void) __attribute__((__destructor__));
+
+extern func_ptr __EH_FRAME_BEGIN__[];
+
+/* Register the exception handling table as the first constructor. */
+void
+__frv_register_eh (void)
+{
+ static struct object object;
+ if (__register_frame_info)
+ __register_frame_info (__EH_FRAME_BEGIN__, &object);
+}
+
+/* Note, do not declare __{,de}register_frame_info weak as it seems
+ to interfere with the pic support. */
+
+/* Unregister the exception handling table as a deconstructor. */
+void
+__frv_deregister_eh (void)
+{
+ static int completed = 0;
+
+ if (completed)
+ return;
+
+ if (__deregister_frame_info)
+ __deregister_frame_info (__EH_FRAME_BEGIN__);
+
+ completed = 1;
+}
+
+/* Run the global destructors. */
+void
+__do_global_dtors (void)
+{
+ static func_ptr *p = __DTOR_LIST__ + 1;
+ while (*p)
+ {
+ p++;
+ (*(p-1)) ();
+ }
+}
+
+/* Run the global constructors. */
+void
+__do_global_ctors (void)
+{
+ unsigned long nptrs = (unsigned long) __CTOR_LIST__[0];
+ unsigned i;
+
+ if (nptrs == (unsigned long)-1)
+ for (nptrs = 0; __CTOR_LIST__[nptrs + 1] != 0; nptrs++);
+
+ for (i = nptrs; i >= 1; i--)
+ __CTOR_LIST__[i] ();
+
+ atexit (__do_global_dtors);
+}
+
+/* Subroutine called automatically by `main'.
+ Compiling a global function named `main'
+ produces an automatic call to this function at the beginning.
+
+ For many systems, this routine calls __do_global_ctors.
+ For systems which support a .init section we use the .init section
+ to run __do_global_ctors, so we need not do anything here. */
+
+void
+__main (void)
+{
+ /* Support recursive calls to `main': run initializers just once. */
+ static int initialized;
+ if (! initialized)
+ {
+ initialized = 1;
+ __do_global_ctors ();
+ }
+}
diff --git a/libgcc/config/frv/frvend.c b/libgcc/config/frv/frvend.c
new file mode 100644
index 00000000000..0bb07b56b4a
--- /dev/null
+++ b/libgcc/config/frv/frvend.c
@@ -0,0 +1,70 @@
+/* Frv initialization file linked after all user modules
+ Copyright (C) 1999, 2000, 2003, 2004, 2009 Free Software Foundation, Inc.
+ Contributed by Red Hat, Inc.
+
+ This file is part of GCC.
+
+ GCC is free software ; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3, or (at your option)
+ any later version.
+
+ GCC is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY ; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include "defaults.h"
+#include <stddef.h>
+#include "../libgcc/unwind-dw2-fde.h"
+
+#ifdef __FRV_UNDERSCORE__
+#define UNDERSCORE "_"
+#else
+#define UNDERSCORE ""
+#endif
+
+#define FINI_SECTION_ZERO(SECTION, FLAGS, NAME) \
+__asm__ (".section " SECTION "," FLAGS "\n\t" \
+ ".globl " UNDERSCORE NAME "\n\t" \
+ ".type " UNDERSCORE NAME ",@object\n\t" \
+ ".p2align 2\n" \
+ UNDERSCORE NAME ":\n\t" \
+ ".word 0\n\t" \
+ ".previous")
+
+#define FINI_SECTION(SECTION, FLAGS, NAME) \
+__asm__ (".section " SECTION "," FLAGS "\n\t" \
+ ".globl " UNDERSCORE NAME "\n\t" \
+ ".type " UNDERSCORE NAME ",@object\n\t" \
+ ".p2align 2\n" \
+ UNDERSCORE NAME ":\n\t" \
+ ".previous")
+
+/* End of .ctor/.dtor sections that provides a list of constructors and
+ destructors to run. */
+
+FINI_SECTION_ZERO (".ctors", "\"aw\"", "__CTOR_END__");
+FINI_SECTION_ZERO (".dtors", "\"aw\"", "__DTOR_END__");
+
+/* End of .eh_frame section that provides all of the exception handling
+ tables. */
+
+FINI_SECTION_ZERO (".eh_frame", "\"aw\"", "__FRAME_END__");
+
+#if ! __FRV_FDPIC__
+/* In FDPIC, the linker itself generates this. */
+/* End of .rofixup section that provides a list of pointers that we
+ need to adjust. */
+
+FINI_SECTION (".rofixup", "\"a\"", "__ROFIXUP_END__");
+#endif /* __FRV_FDPIC__ */
diff --git a/libgcc/config/frv/lib1funcs.S b/libgcc/config/frv/lib1funcs.S
new file mode 100644
index 00000000000..d1ffcab6133
--- /dev/null
+++ b/libgcc/config/frv/lib1funcs.S
@@ -0,0 +1,269 @@
+/* Library functions.
+ Copyright (C) 2000, 2003, 2008, 2009 Free Software Foundation, Inc.
+ Contributed by Red Hat, Inc.
+
+ This file is part of GCC.
+
+ GCC is free software ; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3, or (at your option)
+ any later version.
+
+ GCC is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY ; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include <frv-asm.h>
+
+
+#ifdef L_cmpll
+/* icc0 = __cmpll (long long a, long long b) */
+
+ .globl EXT(__cmpll)
+ .type EXT(__cmpll),@function
+ .text
+ .p2align 4
+EXT(__cmpll):
+ cmp gr8, gr10, icc0
+ ckeq icc0, cc4
+ P(ccmp) gr9, gr11, cc4, 1
+ ret
+.Lend:
+ .size EXT(__cmpll),.Lend-EXT(__cmpll)
+#endif /* L_cmpll */
+
+#ifdef L_cmpf
+/* icc0 = __cmpf (float a, float b) */
+/* Note, because this function returns the result in ICC0, it means it can't
+ handle NaNs. */
+
+ .globl EXT(__cmpf)
+ .type EXT(__cmpf),@function
+ .text
+ .p2align 4
+EXT(__cmpf):
+#ifdef __FRV_HARD_FLOAT__ /* floating point instructions available */
+ movgf gr8, fr0
+ P(movgf) gr9, fr1
+ setlos #1, gr8
+ fcmps fr0, fr1, fcc0
+ P(fcklt) fcc0, cc0
+ fckeq fcc0, cc1
+ csub gr0, gr8, gr8, cc0, 1
+ cmov gr0, gr8, cc1, 1
+ cmpi gr8, 0, icc0
+ ret
+#else /* no floating point instructions available */
+ movsg lr, gr4
+ addi sp, #-16, sp
+ sti gr4, @(sp, 8)
+ st fp, @(sp, gr0)
+ mov sp, fp
+ call EXT(__cmpsf2)
+ cmpi gr8, #0, icc0
+ ldi @(sp, 8), gr4
+ movgs gr4, lr
+ ld @(sp,gr0), fp
+ addi sp, #16, sp
+ ret
+#endif
+.Lend:
+ .size EXT(__cmpf),.Lend-EXT(__cmpf)
+#endif
+
+#ifdef L_cmpd
+/* icc0 = __cmpd (double a, double b) */
+/* Note, because this function returns the result in ICC0, it means it can't
+ handle NaNs. */
+
+ .globl EXT(__cmpd)
+ .type EXT(__cmpd),@function
+ .text
+ .p2align 4
+EXT(__cmpd):
+ movsg lr, gr4
+ addi sp, #-16, sp
+ sti gr4, @(sp, 8)
+ st fp, @(sp, gr0)
+ mov sp, fp
+ call EXT(__cmpdf2)
+ cmpi gr8, #0, icc0
+ ldi @(sp, 8), gr4
+ movgs gr4, lr
+ ld @(sp,gr0), fp
+ addi sp, #16, sp
+ ret
+.Lend:
+ .size EXT(__cmpd),.Lend-EXT(__cmpd)
+#endif
+
+#ifdef L_addll
+/* gr8,gr9 = __addll (long long a, long long b) */
+/* Note, gcc will never call this function, but it is present in case an
+ ABI program calls it. */
+
+ .globl EXT(__addll)
+ .type EXT(__addll),@function
+ .text
+ .p2align
+EXT(__addll):
+ addcc gr9, gr11, gr9, icc0
+ addx gr8, gr10, gr8, icc0
+ ret
+.Lend:
+ .size EXT(__addll),.Lend-EXT(__addll)
+#endif
+
+#ifdef L_subll
+/* gr8,gr9 = __subll (long long a, long long b) */
+/* Note, gcc will never call this function, but it is present in case an
+ ABI program calls it. */
+
+ .globl EXT(__subll)
+ .type EXT(__subll),@function
+ .text
+ .p2align 4
+EXT(__subll):
+ subcc gr9, gr11, gr9, icc0
+ subx gr8, gr10, gr8, icc0
+ ret
+.Lend:
+ .size EXT(__subll),.Lend-EXT(__subll)
+#endif
+
+#ifdef L_andll
+/* gr8,gr9 = __andll (long long a, long long b) */
+/* Note, gcc will never call this function, but it is present in case an
+ ABI program calls it. */
+
+ .globl EXT(__andll)
+ .type EXT(__andll),@function
+ .text
+ .p2align 4
+EXT(__andll):
+ P(and) gr9, gr11, gr9
+ P2(and) gr8, gr10, gr8
+ ret
+.Lend:
+ .size EXT(__andll),.Lend-EXT(__andll)
+#endif
+
+#ifdef L_orll
+/* gr8,gr9 = __orll (long long a, long long b) */
+/* Note, gcc will never call this function, but it is present in case an
+ ABI program calls it. */
+
+ .globl EXT(__orll)
+ .type EXT(__orll),@function
+ .text
+ .p2align 4
+EXT(__orll):
+ P(or) gr9, gr11, gr9
+ P2(or) gr8, gr10, gr8
+ ret
+.Lend:
+ .size EXT(__orll),.Lend-EXT(__orll)
+#endif
+
+#ifdef L_xorll
+/* gr8,gr9 = __xorll (long long a, long long b) */
+/* Note, gcc will never call this function, but it is present in case an
+ ABI program calls it. */
+
+ .globl EXT(__xorll)
+ .type EXT(__xorll),@function
+ .text
+ .p2align 4
+EXT(__xorll):
+ P(xor) gr9, gr11, gr9
+ P2(xor) gr8, gr10, gr8
+ ret
+.Lend:
+ .size EXT(__xorll),.Lend-EXT(__xorll)
+#endif
+
+#ifdef L_notll
+/* gr8,gr9 = __notll (long long a) */
+/* Note, gcc will never call this function, but it is present in case an
+ ABI program calls it. */
+
+ .globl EXT(__notll)
+ .type EXT(__notll),@function
+ .text
+ .p2align 4
+EXT(__notll):
+ P(not) gr9, gr9
+ P2(not) gr8, gr8
+ ret
+.Lend:
+ .size EXT(__notll),.Lend-EXT(__notll)
+#endif
+
+#ifdef L_cmov
+/* (void) __cmov (char *dest, const char *src, size_t len) */
+/*
+ * void __cmov (char *dest, const char *src, size_t len)
+ * {
+ * size_t i;
+ *
+ * if (dest < src || dest > src+len)
+ * {
+ * for (i = 0; i < len; i++)
+ * dest[i] = src[i];
+ * }
+ * else
+ * {
+ * while (len-- > 0)
+ * dest[len] = src[len];
+ * }
+ * }
+ */
+
+ .globl EXT(__cmov)
+ .type EXT(__cmov),@function
+ .text
+ .p2align 4
+EXT(__cmov):
+ P(cmp) gr8, gr9, icc0
+ add gr9, gr10, gr4
+ P(cmp) gr8, gr4, icc1
+ bc icc0, 0, .Lfwd
+ bls icc1, 0, .Lback
+.Lfwd:
+ /* move bytes in a forward direction */
+ P(setlos) #0, gr5
+ cmp gr0, gr10, icc0
+ P(subi) gr9, #1, gr9
+ P2(subi) gr8, #1, gr8
+ bnc icc0, 0, .Lret
+.Lfloop:
+ /* forward byte move loop */
+ addi gr5, #1, gr5
+ P(ldsb) @(gr9, gr5), gr4
+ cmp gr5, gr10, icc0
+ P(stb) gr4, @(gr8, gr5)
+ bc icc0, 0, .Lfloop
+ ret
+.Lbloop:
+ /* backward byte move loop body */
+ ldsb @(gr9,gr10),gr4
+ stb gr4,@(gr8,gr10)
+.Lback:
+ P(cmpi) gr10, #0, icc0
+ addi gr10, #-1, gr10
+ bne icc0, 0, .Lbloop
+.Lret:
+ ret
+.Lend:
+ .size EXT(__cmov),.Lend-EXT(__cmov)
+#endif
diff --git a/libgcc/config/frv/libgcc-glibc.ver b/libgcc/config/frv/libgcc-glibc.ver
new file mode 100644
index 00000000000..6e27b4f9b85
--- /dev/null
+++ b/libgcc/config/frv/libgcc-glibc.ver
@@ -0,0 +1,73 @@
+# Copyright (C) 2004 Free Software Foundation, Inc.
+#
+# This file is part of GCC.
+#
+# GCC is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GCC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>.
+
+GCC_3.4 {
+ # frv abi symbol names
+ __ftod
+ __ftoi
+ __ftoui
+ __dtoi
+ __ftoui
+ __dtoui
+ __ftoll
+ __dtoll
+ __ftoull
+ __dtoull
+ __itof
+ __lltof
+ __dtof
+ __itod
+ __lltof
+ __lltod
+ __addd
+ __subd
+ __muld
+ __divd
+ __addf
+ __subf
+ __mulf
+ __divf
+ __sllll
+ __srlll
+ __srall
+ __addll
+ __subll
+ __mulll
+ __umulll
+ __divll
+ __udivll
+ __modll
+ __umodll
+ __cmpll
+ __cmpf
+ __cmpd
+ __andll
+ __orll
+ __xorll
+ __notll
+ __cmov
+ __cmovd
+ __cmovh
+ __cmovw
+ __modi
+ __uitod
+ __uitof
+ __ulltod
+ __ulltof
+ __umodi
+}
diff --git a/libgcc/config/frv/modi.c b/libgcc/config/frv/modi.c
new file mode 100644
index 00000000000..d5a91fc0f55
--- /dev/null
+++ b/libgcc/config/frv/modi.c
@@ -0,0 +1,4 @@
+int __modi (int a, int b)
+{
+ return a % b;
+}
diff --git a/libgcc/config/frv/t-frv b/libgcc/config/frv/t-frv
new file mode 100644
index 00000000000..a4ff0585183
--- /dev/null
+++ b/libgcc/config/frv/t-frv
@@ -0,0 +1,22 @@
+LIB1ASMSRC = frv/lib1funcs.S
+LIB1ASMFUNCS = _cmpll _cmpf _cmpd _addll _subll _andll _orll _xorll _notll _cmov
+
+LIB2ADD = $(srcdir)/config/frv/cmovh.c \
+ $(srcdir)/config/frv/cmovw.c \
+ $(srcdir)/config/frv/cmovd.c \
+ $(srcdir)/config/frv/modi.c \
+ $(srcdir)/config/frv/umodi.c \
+ $(srcdir)/config/frv/uitof.c \
+ $(srcdir)/config/frv/uitod.c \
+ $(srcdir)/config/frv/ulltof.c \
+ $(srcdir)/config/frv/ulltod.c
+
+# Compile two additional files that are linked with every program
+# linked using GCC on systems using COFF or ELF, for the sake of C++
+# constructors.
+
+frvbegin$(objext): $(srcdir)/config/frv/frvbegin.c
+ $(gcc_compile) -c $<
+
+frvend$(objext): $(srcdir)/config/frv/frvend.c
+ $(gcc_compile) -c $<
diff --git a/libgcc/config/frv/t-linux b/libgcc/config/frv/t-linux
new file mode 100644
index 00000000000..0240efefae9
--- /dev/null
+++ b/libgcc/config/frv/t-linux
@@ -0,0 +1,3 @@
+CRTSTUFF_T_CFLAGS = $(PICFLAG)
+
+SHLIB_MAPFILES = libgcc-std.ver $(srcdir)/config/frv/libgcc-glibc.ver
diff --git a/libgcc/config/frv/uitod.c b/libgcc/config/frv/uitod.c
new file mode 100644
index 00000000000..14290ab6b04
--- /dev/null
+++ b/libgcc/config/frv/uitod.c
@@ -0,0 +1,4 @@
+double __uitod (unsigned int a)
+{
+ return a;
+}
diff --git a/libgcc/config/frv/uitof.c b/libgcc/config/frv/uitof.c
new file mode 100644
index 00000000000..059bc7c7417
--- /dev/null
+++ b/libgcc/config/frv/uitof.c
@@ -0,0 +1,4 @@
+float __uitof (unsigned int a)
+{
+ return a;
+}
diff --git a/libgcc/config/frv/ulltod.c b/libgcc/config/frv/ulltod.c
new file mode 100644
index 00000000000..e6bee12081f
--- /dev/null
+++ b/libgcc/config/frv/ulltod.c
@@ -0,0 +1,4 @@
+double __ulltod (unsigned long long a)
+{
+ return a;
+}
diff --git a/libgcc/config/frv/ulltof.c b/libgcc/config/frv/ulltof.c
new file mode 100644
index 00000000000..29cdfd4d2a1
--- /dev/null
+++ b/libgcc/config/frv/ulltof.c
@@ -0,0 +1,4 @@
+float __ulltof (unsigned long long a)
+{
+ return a;
+}
diff --git a/libgcc/config/frv/umodi.c b/libgcc/config/frv/umodi.c
new file mode 100644
index 00000000000..4ffe5ad8132
--- /dev/null
+++ b/libgcc/config/frv/umodi.c
@@ -0,0 +1,4 @@
+unsigned int __umodi (unsigned int a, unsigned int b)
+{
+ return a % b;
+}
diff --git a/libgcc/config/gthr-lynx.h b/libgcc/config/gthr-lynx.h
new file mode 100644
index 00000000000..d9211b0e556
--- /dev/null
+++ b/libgcc/config/gthr-lynx.h
@@ -0,0 +1,61 @@
+/* Threads compatibility routines for libgcc2 and libobjc for
+ LynxOS. */
+/* Compile this one with gcc. */
+/* Copyright (C) 2004, 2008, 2009, 2011 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_GTHR_LYNX_H
+#define GCC_GTHR_LYNX_H
+
+#ifdef _MULTITHREADED
+
+/* Using the macro version of pthread_setspecific leads to a
+ compilation error. Instead we have two choices either kill all
+ macros in pthread.h with defining _POSIX_THREADS_CALLS or undefine
+ individual macros where we should fall back on the function
+ implementation. We choose the second approach. */
+
+#include <pthread.h>
+#undef pthread_setspecific
+
+/* When using static libc on LynxOS, we cannot define pthread_create
+ weak. If the multi-threaded application includes iostream.h,
+ gthr-posix.h is included and pthread_create will be defined weak.
+ If pthread_create is weak its defining module in libc is not
+ necessarily included in the link and the symbol is resolved to zero.
+ Therefore the first call to it will crash.
+
+ Since -mthreads is a multilib switch on LynxOS we know that at this
+ point we are compiling for multi-threaded. Omitting the weak
+ definitions at this point should have no effect. */
+
+#undef GTHREAD_USE_WEAK
+#define GTHREAD_USE_WEAK 0
+
+#include "gthr-posix.h"
+
+#else
+#include "gthr-single.h"
+#endif
+
+#endif /* GCC_GTHR_LYNX_H */
diff --git a/libgcc/config/gthr-rtems.h b/libgcc/config/gthr-rtems.h
new file mode 100644
index 00000000000..c5bd52292cf
--- /dev/null
+++ b/libgcc/config/gthr-rtems.h
@@ -0,0 +1,157 @@
+/* RTEMS threads compatibility routines for libgcc2 and libobjc.
+ by: Rosimildo da Silva( rdasilva@connecttel.com ) */
+/* Compile this one with gcc. */
+/* Copyright (C) 1997, 1999, 2000, 2002, 2003, 2005, 2008, 2009
+ Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_GTHR_RTEMS_H
+#define GCC_GTHR_RTEMS_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define __GTHREADS 1
+
+#define __GTHREAD_ONCE_INIT 0
+#define __GTHREAD_MUTEX_INIT_FUNCTION rtems_gxx_mutex_init
+#define __GTHREAD_RECURSIVE_MUTEX_INIT_FUNCTION rtems_gxx_recursive_mutex_init
+
+/* Avoid dependency on rtems specific headers. */
+typedef void *__gthread_key_t;
+typedef int __gthread_once_t;
+typedef void *__gthread_mutex_t;
+typedef void *__gthread_recursive_mutex_t;
+
+/*
+ * External functions provided by RTEMS. They are very similar to their POSIX
+ * counterparts. A "Wrapper API" is being use to avoid dependency on any RTEMS
+ * header files.
+ */
+
+/* generic per task variables */
+extern int rtems_gxx_once (__gthread_once_t *__once, void (*__func) (void));
+extern int rtems_gxx_key_create (__gthread_key_t *__key, void (*__dtor) (void *));
+extern int rtems_gxx_key_delete (__gthread_key_t __key);
+extern void *rtems_gxx_getspecific (__gthread_key_t __key);
+extern int rtems_gxx_setspecific (__gthread_key_t __key, const void *__ptr);
+
+/* mutex support */
+extern void rtems_gxx_mutex_init (__gthread_mutex_t *__mutex);
+extern int rtems_gxx_mutex_destroy (__gthread_mutex_t *__mutex);
+extern int rtems_gxx_mutex_lock (__gthread_mutex_t *__mutex);
+extern int rtems_gxx_mutex_trylock (__gthread_mutex_t *__mutex);
+extern int rtems_gxx_mutex_unlock (__gthread_mutex_t *__mutex);
+
+/* recursive mutex support */
+extern void rtems_gxx_recursive_mutex_init (__gthread_recursive_mutex_t *__mutex);
+extern int rtems_gxx_recursive_mutex_lock (__gthread_recursive_mutex_t *__mutex);
+extern int rtems_gxx_recursive_mutex_trylock (__gthread_recursive_mutex_t *__mutex);
+extern int rtems_gxx_recursive_mutex_unlock (__gthread_recursive_mutex_t *__mutex);
+
+/* RTEMS threading is always active */
+static inline int
+__gthread_active_p (void)
+{
+ return 1;
+}
+
+/* Wrapper calls */
+static inline int
+__gthread_once (__gthread_once_t *__once, void (*__func) (void))
+{
+ return rtems_gxx_once( __once, __func );
+}
+
+static inline int
+__gthread_key_create (__gthread_key_t *__key, void (*__dtor) (void *))
+{
+ return rtems_gxx_key_create( __key, __dtor );
+}
+
+static inline int
+__gthread_key_delete (__gthread_key_t __key)
+{
+ return rtems_gxx_key_delete (__key);
+}
+
+static inline void *
+__gthread_getspecific (__gthread_key_t __key)
+{
+ return rtems_gxx_getspecific (__key);
+}
+
+static inline int
+__gthread_setspecific (__gthread_key_t __key, const void *__ptr)
+{
+ return rtems_gxx_setspecific (__key, __ptr);
+}
+
+static inline int
+__gthread_mutex_destroy (__gthread_mutex_t *__mutex)
+{
+ return rtems_gxx_mutex_destroy (__mutex);
+}
+
+static inline int
+__gthread_mutex_lock (__gthread_mutex_t *__mutex)
+{
+ return rtems_gxx_mutex_lock (__mutex);
+}
+
+static inline int
+__gthread_mutex_trylock (__gthread_mutex_t *__mutex)
+{
+ return rtems_gxx_mutex_trylock (__mutex);
+}
+
+static inline int
+__gthread_mutex_unlock (__gthread_mutex_t *__mutex)
+{
+ return rtems_gxx_mutex_unlock( __mutex );
+}
+
+static inline int
+__gthread_recursive_mutex_lock (__gthread_recursive_mutex_t *__mutex)
+{
+ return rtems_gxx_recursive_mutex_lock (__mutex);
+}
+
+static inline int
+__gthread_recursive_mutex_trylock (__gthread_recursive_mutex_t *__mutex)
+{
+ return rtems_gxx_recursive_mutex_trylock (__mutex);
+}
+
+static inline int
+__gthread_recursive_mutex_unlock (__gthread_recursive_mutex_t *__mutex)
+{
+ return rtems_gxx_recursive_mutex_unlock( __mutex );
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* ! GCC_GTHR_RTEMS_H */
diff --git a/libgcc/config/gthr-vxworks.h b/libgcc/config/gthr-vxworks.h
new file mode 100644
index 00000000000..63116c460a6
--- /dev/null
+++ b/libgcc/config/gthr-vxworks.h
@@ -0,0 +1,170 @@
+/* Threads compatibility routines for libgcc2 and libobjc for VxWorks. */
+/* Compile this one with gcc. */
+/* Copyright (C) 1997, 1999, 2000, 2008, 2009, 2011
+ Free Software Foundation, Inc.
+ Contributed by Mike Stump <mrs@wrs.com>.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_GTHR_VXWORKS_H
+#define GCC_GTHR_VXWORKS_H
+
+#ifdef _LIBOBJC
+
+/* libobjc requires the optional pthreads component. */
+#include "gthr-posix.h"
+
+#else
+#ifdef __cplusplus
+#define UNUSED(x)
+#else
+#define UNUSED(x) x __attribute__((unused))
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define __GTHREADS 1
+#define __gthread_active_p() 1
+
+/* Mutexes are easy, except that they need to be initialized at runtime. */
+
+#include <semLib.h>
+
+typedef SEM_ID __gthread_mutex_t;
+/* All VxWorks mutexes are recursive. */
+typedef SEM_ID __gthread_recursive_mutex_t;
+#define __GTHREAD_MUTEX_INIT_FUNCTION __gthread_mutex_init_function
+#define __GTHREAD_RECURSIVE_MUTEX_INIT_FUNCTION __gthread_recursive_mutex_init_function
+
+static inline void
+__gthread_mutex_init_function (__gthread_mutex_t *mutex)
+{
+ *mutex = semMCreate (SEM_Q_PRIORITY | SEM_INVERSION_SAFE | SEM_DELETE_SAFE);
+}
+
+static inline int
+__gthread_mutex_destroy (__gthread_mutex_t * UNUSED(mutex))
+{
+ return 0;
+}
+
+static inline int
+__gthread_mutex_lock (__gthread_mutex_t *mutex)
+{
+ return semTake (*mutex, WAIT_FOREVER);
+}
+
+static inline int
+__gthread_mutex_trylock (__gthread_mutex_t *mutex)
+{
+ return semTake (*mutex, NO_WAIT);
+}
+
+static inline int
+__gthread_mutex_unlock (__gthread_mutex_t *mutex)
+{
+ return semGive (*mutex);
+}
+
+static inline void
+__gthread_recursive_mutex_init_function (__gthread_recursive_mutex_t *mutex)
+{
+ __gthread_mutex_init_function (mutex);
+}
+
+static inline int
+__gthread_recursive_mutex_lock (__gthread_recursive_mutex_t *mutex)
+{
+ return __gthread_mutex_lock (mutex);
+}
+
+static inline int
+__gthread_recursive_mutex_trylock (__gthread_recursive_mutex_t *mutex)
+{
+ return __gthread_mutex_trylock (mutex);
+}
+
+static inline int
+__gthread_recursive_mutex_unlock (__gthread_recursive_mutex_t *mutex)
+{
+ return __gthread_mutex_unlock (mutex);
+}
+
+/* pthread_once is complicated enough that it's implemented
+ out-of-line. See config/vxlib.c. */
+
+typedef struct
+{
+#if !defined(__RTP__)
+#if defined(__PPC__)
+ __attribute ((aligned (__alignof (unsigned))))
+#endif
+ volatile unsigned char busy;
+#endif
+ volatile unsigned char done;
+#if !defined(__RTP__) && defined(__PPC__)
+ /* PPC's test-and-set implementation requires a 4 byte aligned
+ object, of which it only sets the first byte. We use padding
+ here, in order to maintain some amount of backwards
+ compatibility. Without this padding, gthread_once objects worked
+ by accident because they happen to be static objects and the ppc
+ port automatically increased their alignment to 4 bytes. */
+ unsigned char pad1;
+ unsigned char pad2;
+#endif
+}
+__gthread_once_t;
+
+#if defined (__RTP__)
+# define __GTHREAD_ONCE_INIT { 0 }
+#elif defined (__PPC__)
+# define __GTHREAD_ONCE_INIT { 0, 0, 0, 0 }
+#else
+# define __GTHREAD_ONCE_INIT { 0, 0 }
+#endif
+
+extern int __gthread_once (__gthread_once_t *__once, void (*__func)(void));
+
+/* Thread-specific data requires a great deal of effort, since VxWorks
+ is not really set up for it. See config/vxlib.c for the gory
+ details. All the TSD routines are sufficiently complex that they
+ need to be implemented out of line. */
+
+typedef unsigned int __gthread_key_t;
+
+extern int __gthread_key_create (__gthread_key_t *__keyp, void (*__dtor)(void *));
+extern int __gthread_key_delete (__gthread_key_t __key);
+
+extern void *__gthread_getspecific (__gthread_key_t __key);
+extern int __gthread_setspecific (__gthread_key_t __key, void *__ptr);
+
+#undef UNUSED
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* not _LIBOBJC */
+
+#endif /* gthr-vxworks.h */
diff --git a/libgcc/config/h8300/clzhi2.c b/libgcc/config/h8300/clzhi2.c
new file mode 100644
index 00000000000..54db7b9c56b
--- /dev/null
+++ b/libgcc/config/h8300/clzhi2.c
@@ -0,0 +1,35 @@
+/* The implementation of __clzhi2.
+ Copyright (C) 2003, 2009 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+int __clzhi2 (unsigned short x);
+
+int
+__clzhi2 (unsigned short x)
+{
+ int i;
+ for (i = 0; i < 16; i++)
+ if (x & ((unsigned short) 1 << (15 - i)))
+ break;
+ return i;
+}
diff --git a/libgcc/config/h8300/crti.S b/libgcc/config/h8300/crti.S
new file mode 100644
index 00000000000..f58c3aac9dd
--- /dev/null
+++ b/libgcc/config/h8300/crti.S
@@ -0,0 +1,63 @@
+/* Copyright (C) 2001, 2002, 2009, 2011 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 3, or (at your option) any
+later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+/* The code in sections .init and .fini is supposed to be a single
+ regular function. The function in .init is called directly from
+ start in crt0.S. The function in .fini is atexit()ed in crt0.S
+ too.
+
+ crti.S contributes the prologue of a function to these sections,
+ and crtn.S comes up the epilogue. STARTFILE_SPEC should list
+ crti.o before any other object files that might add code to .init
+ or .fini sections, and ENDFILE_SPEC should list crtn.o after any
+ such object files. */
+
+#ifdef __H8300H__
+#ifdef __NORMAL_MODE__
+ .h8300hn
+#else
+ .h8300h
+#endif
+#endif
+
+#ifdef __H8300S__
+#ifdef __NORMAL_MODE__
+ .h8300sn
+#else
+ .h8300s
+#endif
+#endif
+#ifdef __H8300SX__
+#ifdef __NORMAL_MODE__
+ .h8300sxn
+#else
+ .h8300sx
+#endif
+#endif
+
+ .section .init, "ax", @progbits
+ .global __init
+__init:
+ .section .fini, "ax", @progbits
+ .global __fini
+__fini:
diff --git a/libgcc/config/h8300/crtn.S b/libgcc/config/h8300/crtn.S
new file mode 100644
index 00000000000..7a591694563
--- /dev/null
+++ b/libgcc/config/h8300/crtn.S
@@ -0,0 +1,53 @@
+/* Copyright (C) 2001, 2009, 2011 Free Software Foundation, Inc.
+ This file was adapted from glibc sources.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 3, or (at your option) any
+later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+/* See an explanation about .init and .fini in crti.S. */
+
+#ifdef __H8300H__
+#ifdef __NORMAL_MODE__
+ .h8300hn
+#else
+ .h8300h
+#endif
+#endif
+
+#ifdef __H8300S__
+#ifdef __NORMAL_MODE__
+ .h8300sn
+#else
+ .h8300s
+#endif
+#endif
+#ifdef __H8300SX__
+#ifdef __NORMAL_MODE__
+ .h8300sxn
+#else
+ .h8300sx
+#endif
+#endif
+ .section .init, "ax", @progbits
+ rts
+
+ .section .fini, "ax", @progbits
+ rts
diff --git a/libgcc/config/h8300/ctzhi2.c b/libgcc/config/h8300/ctzhi2.c
new file mode 100644
index 00000000000..ba6f8e9086f
--- /dev/null
+++ b/libgcc/config/h8300/ctzhi2.c
@@ -0,0 +1,35 @@
+/* The implementation of __ctzhi2.
+ Copyright (C) 2003, 2009 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+int __ctzhi2 (unsigned short x);
+
+int
+__ctzhi2 (unsigned short x)
+{
+ int i;
+ for (i = 0; i < 16; i++)
+ if (x & ((unsigned short) 1 << i))
+ break;
+ return i;
+}
diff --git a/libgcc/config/h8300/fixunssfsi.c b/libgcc/config/h8300/fixunssfsi.c
new file mode 100644
index 00000000000..940d0c6dc6a
--- /dev/null
+++ b/libgcc/config/h8300/fixunssfsi.c
@@ -0,0 +1,41 @@
+/* More subroutines needed by GCC output code on some machines. */
+/* Compile this one with gcc. */
+/* Copyright (C) 1989, 1992, 2001, 2002, 2003, 2004, 2009, 2011
+ Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+/* The libgcc2.c implementation gets confused by our type setup and creates
+ a directly recursive call, so we do our own implementation. For
+ the H8/300, that's in lib1funcs.S, for H8/300H and H8S, it's here. */
+
+#ifndef __H8300__
+long __fixunssfsi (float a);
+
+long
+__fixunssfsi (float a)
+{
+ if (a >= (float) 32768L)
+ return (long) (a - 32768L) + 32768L;
+ return (long) a;
+}
+#endif
diff --git a/libgcc/config/h8300/lib1funcs.S b/libgcc/config/h8300/lib1funcs.S
new file mode 100644
index 00000000000..1b75b73269d
--- /dev/null
+++ b/libgcc/config/h8300/lib1funcs.S
@@ -0,0 +1,838 @@
+;; libgcc routines for the Renesas H8/300 CPU.
+;; Contributed by Steve Chamberlain <sac@cygnus.com>
+;; Optimizations by Toshiyasu Morita <toshiyasu.morita@renesas.com>
+
+/* Copyright (C) 1994, 2000, 2001, 2002, 2003, 2004, 2009
+ Free Software Foundation, Inc.
+
+This file is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 3, or (at your option) any
+later version.
+
+This file is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+/* Assembler register definitions. */
+
+#define A0 r0
+#define A0L r0l
+#define A0H r0h
+
+#define A1 r1
+#define A1L r1l
+#define A1H r1h
+
+#define A2 r2
+#define A2L r2l
+#define A2H r2h
+
+#define A3 r3
+#define A3L r3l
+#define A3H r3h
+
+#define S0 r4
+#define S0L r4l
+#define S0H r4h
+
+#define S1 r5
+#define S1L r5l
+#define S1H r5h
+
+#define S2 r6
+#define S2L r6l
+#define S2H r6h
+
+#ifdef __H8300__
+#define PUSHP push
+#define POPP pop
+
+#define A0P r0
+#define A1P r1
+#define A2P r2
+#define A3P r3
+#define S0P r4
+#define S1P r5
+#define S2P r6
+#endif
+
+#if defined (__H8300H__) || defined (__H8300S__) || defined (__H8300SX__)
+#define PUSHP push.l
+#define POPP pop.l
+
+#define A0P er0
+#define A1P er1
+#define A2P er2
+#define A3P er3
+#define S0P er4
+#define S1P er5
+#define S2P er6
+
+#define A0E e0
+#define A1E e1
+#define A2E e2
+#define A3E e3
+#endif
+
+#ifdef __H8300H__
+#ifdef __NORMAL_MODE__
+ .h8300hn
+#else
+ .h8300h
+#endif
+#endif
+
+#ifdef __H8300S__
+#ifdef __NORMAL_MODE__
+ .h8300sn
+#else
+ .h8300s
+#endif
+#endif
+#ifdef __H8300SX__
+#ifdef __NORMAL_MODE__
+ .h8300sxn
+#else
+ .h8300sx
+#endif
+#endif
+
+#ifdef L_cmpsi2
+#ifdef __H8300__
+ .section .text
+ .align 2
+ .global ___cmpsi2
+___cmpsi2:
+ cmp.w A0,A2
+ bne .L2
+ cmp.w A1,A3
+ bne .L4
+ mov.w #1,A0
+ rts
+.L2:
+ bgt .L5
+.L3:
+ mov.w #2,A0
+ rts
+.L4:
+ bls .L3
+.L5:
+ sub.w A0,A0
+ rts
+ .end
+#endif
+#endif /* L_cmpsi2 */
+
+#ifdef L_ucmpsi2
+#ifdef __H8300__
+ .section .text
+ .align 2
+ .global ___ucmpsi2
+___ucmpsi2:
+ cmp.w A0,A2
+ bne .L2
+ cmp.w A1,A3
+ bne .L4
+ mov.w #1,A0
+ rts
+.L2:
+ bhi .L5
+.L3:
+ mov.w #2,A0
+ rts
+.L4:
+ bls .L3
+.L5:
+ sub.w A0,A0
+ rts
+ .end
+#endif
+#endif /* L_ucmpsi2 */
+
+#ifdef L_divhi3
+
+;; HImode divides for the H8/300.
+;; We bunch all of this into one object file since there are several
+;; "supporting routines".
+
+; general purpose normalize routine
+;
+; divisor in A0
+; dividend in A1
+; turns both into +ve numbers, and leaves what the answer sign
+; should be in A2L
+
+#ifdef __H8300__
+ .section .text
+ .align 2
+divnorm:
+ or A0H,A0H ; is divisor > 0
+ stc ccr,A2L
+ bge _lab1
+ not A0H ; no - then make it +ve
+ not A0L
+ adds #1,A0
+_lab1: or A1H,A1H ; look at dividend
+ bge _lab2
+ not A1H ; it is -ve, make it positive
+ not A1L
+ adds #1,A1
+ xor #0x8,A2L; and toggle sign of result
+_lab2: rts
+;; Basically the same, except that the sign of the divisor determines
+;; the sign.
+modnorm:
+ or A0H,A0H ; is divisor > 0
+ stc ccr,A2L
+ bge _lab7
+ not A0H ; no - then make it +ve
+ not A0L
+ adds #1,A0
+_lab7: or A1H,A1H ; look at dividend
+ bge _lab8
+ not A1H ; it is -ve, make it positive
+ not A1L
+ adds #1,A1
+_lab8: rts
+
+; A0=A0/A1 signed
+
+ .global ___divhi3
+___divhi3:
+ bsr divnorm
+ bsr ___udivhi3
+negans: btst #3,A2L ; should answer be negative ?
+ beq _lab4
+ not A0H ; yes, so make it so
+ not A0L
+ adds #1,A0
+_lab4: rts
+
+; A0=A0%A1 signed
+
+ .global ___modhi3
+___modhi3:
+ bsr modnorm
+ bsr ___udivhi3
+ mov A3,A0
+ bra negans
+
+; A0=A0%A1 unsigned
+
+ .global ___umodhi3
+___umodhi3:
+ bsr ___udivhi3
+ mov A3,A0
+ rts
+
+; A0=A0/A1 unsigned
+; A3=A0%A1 unsigned
+; A2H trashed
+; D high 8 bits of denom
+; d low 8 bits of denom
+; N high 8 bits of num
+; n low 8 bits of num
+; M high 8 bits of mod
+; m low 8 bits of mod
+; Q high 8 bits of quot
+; q low 8 bits of quot
+; P preserve
+
+; The H8/300 only has a 16/8 bit divide, so we look at the incoming and
+; see how to partition up the expression.
+
+ .global ___udivhi3
+___udivhi3:
+ ; A0 A1 A2 A3
+ ; Nn Dd P
+ sub.w A3,A3 ; Nn Dd xP 00
+ or A1H,A1H
+ bne divlongway
+ or A0H,A0H
+ beq _lab6
+
+; we know that D == 0 and N is != 0
+ mov.b A0H,A3L ; Nn Dd xP 0N
+ divxu A1L,A3 ; MQ
+ mov.b A3L,A0H ; Q
+; dealt with N, do n
+_lab6: mov.b A0L,A3L ; n
+ divxu A1L,A3 ; mq
+ mov.b A3L,A0L ; Qq
+ mov.b A3H,A3L ; m
+ mov.b #0x0,A3H ; Qq 0m
+ rts
+
+; D != 0 - which means the denominator is
+; loop around to get the result.
+
+divlongway:
+ mov.b A0H,A3L ; Nn Dd xP 0N
+ mov.b #0x0,A0H ; high byte of answer has to be zero
+ mov.b #0x8,A2H ; 8
+div8: add.b A0L,A0L ; n*=2
+ rotxl A3L ; Make remainder bigger
+ rotxl A3H
+ sub.w A1,A3 ; Q-=N
+ bhs setbit ; set a bit ?
+ add.w A1,A3 ; no : too far , Q+=N
+
+ dec A2H
+ bne div8 ; next bit
+ rts
+
+setbit: inc A0L ; do insert bit
+ dec A2H
+ bne div8 ; next bit
+ rts
+
+#endif /* __H8300__ */
+#endif /* L_divhi3 */
+
+#ifdef L_divsi3
+
+;; 4 byte integer divides for the H8/300.
+;;
+;; We have one routine which does all the work and lots of
+;; little ones which prepare the args and massage the sign.
+;; We bunch all of this into one object file since there are several
+;; "supporting routines".
+
+ .section .text
+ .align 2
+
+; Put abs SIs into r0/r1 and r2/r3, and leave a 1 in r6l with sign of rest.
+; This function is here to keep branch displacements small.
+
+#ifdef __H8300__
+
+divnorm:
+ mov.b A0H,A0H ; is the numerator -ve
+ stc ccr,S2L ; keep the sign in bit 3 of S2L
+ bge postive
+
+ ; negate arg
+ not A0H
+ not A1H
+ not A0L
+ not A1L
+
+ add #1,A1L
+ addx #0,A1H
+ addx #0,A0L
+ addx #0,A0H
+postive:
+ mov.b A2H,A2H ; is the denominator -ve
+ bge postive2
+ not A2L
+ not A2H
+ not A3L
+ not A3H
+ add.b #1,A3L
+ addx #0,A3H
+ addx #0,A2L
+ addx #0,A2H
+ xor.b #0x08,S2L ; toggle the result sign
+postive2:
+ rts
+
+;; Basically the same, except that the sign of the divisor determines
+;; the sign.
+modnorm:
+ mov.b A0H,A0H ; is the numerator -ve
+ stc ccr,S2L ; keep the sign in bit 3 of S2L
+ bge mpostive
+
+ ; negate arg
+ not A0H
+ not A1H
+ not A0L
+ not A1L
+
+ add #1,A1L
+ addx #0,A1H
+ addx #0,A0L
+ addx #0,A0H
+mpostive:
+ mov.b A2H,A2H ; is the denominator -ve
+ bge mpostive2
+ not A2L
+ not A2H
+ not A3L
+ not A3H
+ add.b #1,A3L
+ addx #0,A3H
+ addx #0,A2L
+ addx #0,A2H
+mpostive2:
+ rts
+
+#else /* __H8300H__ */
+
+divnorm:
+ mov.l A0P,A0P ; is the numerator -ve
+ stc ccr,S2L ; keep the sign in bit 3 of S2L
+ bge postive
+
+ neg.l A0P ; negate arg
+
+postive:
+ mov.l A1P,A1P ; is the denominator -ve
+ bge postive2
+
+ neg.l A1P ; negate arg
+ xor.b #0x08,S2L ; toggle the result sign
+
+postive2:
+ rts
+
+;; Basically the same, except that the sign of the divisor determines
+;; the sign.
+modnorm:
+ mov.l A0P,A0P ; is the numerator -ve
+ stc ccr,S2L ; keep the sign in bit 3 of S2L
+ bge mpostive
+
+ neg.l A0P ; negate arg
+
+mpostive:
+ mov.l A1P,A1P ; is the denominator -ve
+ bge mpostive2
+
+ neg.l A1P ; negate arg
+
+mpostive2:
+ rts
+
+#endif
+
+; numerator in A0/A1
+; denominator in A2/A3
+ .global ___modsi3
+___modsi3:
+#ifdef __H8300__
+ PUSHP S2P
+ PUSHP S0P
+ PUSHP S1P
+ bsr modnorm
+ bsr divmodsi4
+ mov S0,A0
+ mov S1,A1
+ bra exitdiv
+#else
+ PUSHP S2P
+ bsr modnorm
+ bsr ___udivsi3
+ mov.l er3,er0
+ bra exitdiv
+#endif
+
+ ;; H8/300H and H8S version of ___udivsi3 is defined later in
+ ;; the file.
+#ifdef __H8300__
+ .global ___udivsi3
+___udivsi3:
+ PUSHP S2P
+ PUSHP S0P
+ PUSHP S1P
+ bsr divmodsi4
+ bra reti
+#endif
+
+ .global ___umodsi3
+___umodsi3:
+#ifdef __H8300__
+ PUSHP S2P
+ PUSHP S0P
+ PUSHP S1P
+ bsr divmodsi4
+ mov S0,A0
+ mov S1,A1
+ bra reti
+#else
+ bsr ___udivsi3
+ mov.l er3,er0
+ rts
+#endif
+
+ .global ___divsi3
+___divsi3:
+#ifdef __H8300__
+ PUSHP S2P
+ PUSHP S0P
+ PUSHP S1P
+ jsr divnorm
+ jsr divmodsi4
+#else
+ PUSHP S2P
+ jsr divnorm
+ bsr ___udivsi3
+#endif
+
+ ; examine what the sign should be
+exitdiv:
+ btst #3,S2L
+ beq reti
+
+ ; should be -ve
+#ifdef __H8300__
+ not A0H
+ not A1H
+ not A0L
+ not A1L
+
+ add #1,A1L
+ addx #0,A1H
+ addx #0,A0L
+ addx #0,A0H
+#else /* __H8300H__ */
+ neg.l A0P
+#endif
+
+reti:
+#ifdef __H8300__
+ POPP S1P
+ POPP S0P
+#endif
+ POPP S2P
+ rts
+
+ ; takes A0/A1 numerator (A0P for H8/300H)
+ ; A2/A3 denominator (A1P for H8/300H)
+ ; returns A0/A1 quotient (A0P for H8/300H)
+ ; S0/S1 remainder (S0P for H8/300H)
+ ; trashes S2H
+
+#ifdef __H8300__
+
+divmodsi4:
+ sub.w S0,S0 ; zero play area
+ mov.w S0,S1
+ mov.b A2H,S2H
+ or A2L,S2H
+ or A3H,S2H
+ bne DenHighNonZero
+ mov.b A0H,A0H
+ bne NumByte0Zero
+ mov.b A0L,A0L
+ bne NumByte1Zero
+ mov.b A1H,A1H
+ bne NumByte2Zero
+ bra NumByte3Zero
+NumByte0Zero:
+ mov.b A0H,S1L
+ divxu A3L,S1
+ mov.b S1L,A0H
+NumByte1Zero:
+ mov.b A0L,S1L
+ divxu A3L,S1
+ mov.b S1L,A0L
+NumByte2Zero:
+ mov.b A1H,S1L
+ divxu A3L,S1
+ mov.b S1L,A1H
+NumByte3Zero:
+ mov.b A1L,S1L
+ divxu A3L,S1
+ mov.b S1L,A1L
+
+ mov.b S1H,S1L
+ mov.b #0x0,S1H
+ rts
+
+; have to do the divide by shift and test
+DenHighNonZero:
+ mov.b A0H,S1L
+ mov.b A0L,A0H
+ mov.b A1H,A0L
+ mov.b A1L,A1H
+
+ mov.b #0,A1L
+ mov.b #24,S2H ; only do 24 iterations
+
+nextbit:
+ add.w A1,A1 ; double the answer guess
+ rotxl A0L
+ rotxl A0H
+
+ rotxl S1L ; double remainder
+ rotxl S1H
+ rotxl S0L
+ rotxl S0H
+ sub.w A3,S1 ; does it all fit
+ subx A2L,S0L
+ subx A2H,S0H
+ bhs setone
+
+ add.w A3,S1 ; no, restore mistake
+ addx A2L,S0L
+ addx A2H,S0H
+
+ dec S2H
+ bne nextbit
+ rts
+
+setone:
+ inc A1L
+ dec S2H
+ bne nextbit
+ rts
+
+#else /* __H8300H__ */
+
+ ;; This function also computes the remainder and stores it in er3.
+ .global ___udivsi3
+___udivsi3:
+ mov.w A1E,A1E ; denominator top word 0?
+ bne DenHighNonZero
+
+ ; do it the easy way, see page 107 in manual
+ mov.w A0E,A2
+ extu.l A2P
+ divxu.w A1,A2P
+ mov.w A2E,A0E
+ divxu.w A1,A0P
+ mov.w A0E,A3
+ mov.w A2,A0E
+ extu.l A3P
+ rts
+
+ ; er0 = er0 / er1
+ ; er3 = er0 % er1
+ ; trashes er1 er2
+ ; expects er1 >= 2^16
+DenHighNonZero:
+ mov.l er0,er3
+ mov.l er1,er2
+#ifdef __H8300H__
+divmod_L21:
+ shlr.l er0
+ shlr.l er2 ; make divisor < 2^16
+ mov.w e2,e2
+ bne divmod_L21
+#else
+ shlr.l #2,er2 ; make divisor < 2^16
+ mov.w e2,e2
+ beq divmod_L22A
+divmod_L21:
+ shlr.l #2,er0
+divmod_L22:
+ shlr.l #2,er2 ; make divisor < 2^16
+ mov.w e2,e2
+ bne divmod_L21
+divmod_L22A:
+ rotxl.w r2
+ bcs divmod_L23
+ shlr.l er0
+ bra divmod_L24
+divmod_L23:
+ rotxr.w r2
+ shlr.l #2,er0
+divmod_L24:
+#endif
+ ;; At this point,
+ ;; er0 contains shifted dividend
+ ;; er1 contains divisor
+ ;; er2 contains shifted divisor
+ ;; er3 contains dividend, later remainder
+ divxu.w r2,er0 ; r0 now contains the approximate quotient (AQ)
+ extu.l er0
+ beq divmod_L25
+ subs #1,er0 ; er0 = AQ - 1
+ mov.w e1,r2
+ mulxu.w r0,er2 ; er2 = upper (AQ - 1) * divisor
+ sub.w r2,e3 ; dividend - 65536 * er2
+ mov.w r1,r2
+ mulxu.w r0,er2 ; compute er3 = remainder (tentative)
+ sub.l er2,er3 ; er3 = dividend - (AQ - 1) * divisor
+divmod_L25:
+ cmp.l er1,er3 ; is divisor < remainder?
+ blo divmod_L26
+ adds #1,er0
+ sub.l er1,er3 ; correct the remainder
+divmod_L26:
+ rts
+
+#endif
+#endif /* L_divsi3 */
+
+#ifdef L_mulhi3
+
+;; HImode multiply.
+; The H8/300 only has an 8*8->16 multiply.
+; The answer is the same as:
+;
+; product = (srca.l * srcb.l) + ((srca.h * srcb.l) + (srcb.h * srca.l)) * 256
+; (we can ignore A1.h * A0.h cause that will all off the top)
+; A0 in
+; A1 in
+; A0 answer
+
+#ifdef __H8300__
+ .section .text
+ .align 2
+ .global ___mulhi3
+___mulhi3:
+ mov.b A1L,A2L ; A2l gets srcb.l
+ mulxu A0L,A2 ; A2 gets first sub product
+
+ mov.b A0H,A3L ; prepare for
+ mulxu A1L,A3 ; second sub product
+
+ add.b A3L,A2H ; sum first two terms
+
+ mov.b A1H,A3L ; third sub product
+ mulxu A0L,A3
+
+ add.b A3L,A2H ; almost there
+ mov.w A2,A0 ; that is
+ rts
+
+#endif
+#endif /* L_mulhi3 */
+
+#ifdef L_mulsi3
+
+;; SImode multiply.
+;;
+;; I think that shift and add may be sufficient for this. Using the
+;; supplied 8x8->16 would need 10 ops of 14 cycles each + overhead. This way
+;; the inner loop uses maybe 20 cycles + overhead, but terminates
+;; quickly on small args.
+;;
+;; A0/A1 src_a
+;; A2/A3 src_b
+;;
+;; while (a)
+;; {
+;; if (a & 1)
+;; r += b;
+;; a >>= 1;
+;; b <<= 1;
+;; }
+
+ .section .text
+ .align 2
+
+#ifdef __H8300__
+
+ .global ___mulsi3
+___mulsi3:
+ PUSHP S0P
+ PUSHP S1P
+
+ sub.w S0,S0
+ sub.w S1,S1
+
+ ; while (a)
+_top: mov.w A0,A0
+ bne _more
+ mov.w A1,A1
+ beq _done
+_more: ; if (a & 1)
+ bld #0,A1L
+ bcc _nobit
+ ; r += b
+ add.w A3,S1
+ addx A2L,S0L
+ addx A2H,S0H
+_nobit:
+ ; a >>= 1
+ shlr A0H
+ rotxr A0L
+ rotxr A1H
+ rotxr A1L
+
+ ; b <<= 1
+ add.w A3,A3
+ addx A2L,A2L
+ addx A2H,A2H
+ bra _top
+
+_done:
+ mov.w S0,A0
+ mov.w S1,A1
+ POPP S1P
+ POPP S0P
+ rts
+
+#else /* __H8300H__ */
+
+;
+; mulsi3 for H8/300H - based on Renesas SH implementation
+;
+; by Toshiyasu Morita
+;
+; Old code:
+;
+; 16b * 16b = 372 states (worst case)
+; 32b * 32b = 724 states (worst case)
+;
+; New code:
+;
+; 16b * 16b = 48 states
+; 16b * 32b = 72 states
+; 32b * 32b = 92 states
+;
+
+ .global ___mulsi3
+___mulsi3:
+ mov.w r1,r2 ; ( 2 states) b * d
+ mulxu r0,er2 ; (22 states)
+
+ mov.w e0,r3 ; ( 2 states) a * d
+ beq L_skip1 ; ( 4 states)
+ mulxu r1,er3 ; (22 states)
+ add.w r3,e2 ; ( 2 states)
+
+L_skip1:
+ mov.w e1,r3 ; ( 2 states) c * b
+ beq L_skip2 ; ( 4 states)
+ mulxu r0,er3 ; (22 states)
+ add.w r3,e2 ; ( 2 states)
+
+L_skip2:
+ mov.l er2,er0 ; ( 2 states)
+ rts ; (10 states)
+
+#endif
+#endif /* L_mulsi3 */
+#ifdef L_fixunssfsi_asm
+/* For the h8300 we use asm to save some bytes, to
+ allow more programs to fit into the tiny address
+ space. For the H8/300H and H8S, the C version is good enough. */
+#ifdef __H8300__
+/* We still treat NANs different than libgcc2.c, but then, the
+ behavior is undefined anyways. */
+ .global ___fixunssfsi
+___fixunssfsi:
+ cmp.b #0x4f,r0h
+ bge Large_num
+ jmp @___fixsfsi
+Large_num:
+ bhi L_huge_num
+ xor.b #0x80,A0L
+ bmi L_shift8
+L_huge_num:
+ mov.w #65535,A0
+ mov.w A0,A1
+ rts
+L_shift8:
+ mov.b A0L,A0H
+ mov.b A1H,A0L
+ mov.b A1L,A1H
+ mov.b #0,A1L
+ rts
+#endif
+#endif /* L_fixunssfsi_asm */
diff --git a/libgcc/config/h8300/parityhi2.c b/libgcc/config/h8300/parityhi2.c
new file mode 100644
index 00000000000..d58cb89b5c7
--- /dev/null
+++ b/libgcc/config/h8300/parityhi2.c
@@ -0,0 +1,36 @@
+/* The implementation of __parityhi2.
+ Copyright (C) 2003, 2009 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+int __parityhi2 (unsigned short x);
+
+int
+__parityhi2 (unsigned short x)
+{
+ int i;
+ int count = 0;
+ for (i = 0; i < 16; i++)
+ if (x & ((unsigned short) 1 << i))
+ count++;
+ return count & 1;
+}
diff --git a/libgcc/config/h8300/popcounthi2.c b/libgcc/config/h8300/popcounthi2.c
new file mode 100644
index 00000000000..47be193b38d
--- /dev/null
+++ b/libgcc/config/h8300/popcounthi2.c
@@ -0,0 +1,36 @@
+/* The implementation of __popcounthi2.
+ Copyright (C) 2003, 2009 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+int __popcounthi2 (unsigned short x);
+
+int
+__popcounthi2 (unsigned short x)
+{
+ int i;
+ int count = 0;
+ for (i = 0; i < 16; i++)
+ if (x & ((unsigned short) 1 << i))
+ count++;
+ return count;
+}
diff --git a/libgcc/config/h8300/t-h8300 b/libgcc/config/h8300/t-h8300
new file mode 100644
index 00000000000..b6448523e34
--- /dev/null
+++ b/libgcc/config/h8300/t-h8300
@@ -0,0 +1,13 @@
+LIB1ASMSRC = h8300/lib1funcs.S
+LIB1ASMFUNCS = _cmpsi2 _ucmpsi2 _divhi3 _divsi3 _mulhi3 _mulsi3 \
+ _fixunssfsi_asm
+
+LIB2ADD = \
+ $(srcdir)/config/h8300/clzhi2.c \
+ $(srcdir)/config/h8300/ctzhi2.c \
+ $(srcdir)/config/h8300/parityhi2.c \
+ $(srcdir)/config/h8300/popcounthi2.c \
+ $(srcdir)/config/h8300/fixunssfsi.c
+
+# We do not have DF type, so fake out the libgcc2 compilation.
+HOST_LIBGCC2_CFLAGS += -DDF=SF
diff --git a/libgcc/config/i386/sol2-ci.S b/libgcc/config/i386/crti.S
index 61e1436f560..76fd35869b5 100644
--- a/libgcc/config/i386/sol2-ci.S
+++ b/libgcc/config/i386/crti.S
@@ -1,4 +1,4 @@
-/* crti.s for Solaris 2, x86.
+/* crti.S for x86.
Copyright (C) 1993, 2008, 2009 Free Software Foundation, Inc.
Written By Fred Fish, Nov 1992
diff --git a/libgcc/config/i386/sol2-cn.S b/libgcc/config/i386/crtn.S
index 993675d2341..572e9cd931b 100644
--- a/libgcc/config/i386/sol2-cn.S
+++ b/libgcc/config/i386/crtn.S
@@ -1,4 +1,4 @@
-/* crtn.s for Solaris 2, x86.
+/* crtn.S for x86.
Copyright (C) 1993, 2008, 2009 Free Software Foundation, Inc.
Written By Fred Fish, Nov 1992
diff --git a/libgcc/config/i386/cygming-crtbegin.c b/libgcc/config/i386/cygming-crtbegin.c
new file mode 100644
index 00000000000..b589841fd01
--- /dev/null
+++ b/libgcc/config/i386/cygming-crtbegin.c
@@ -0,0 +1,136 @@
+/* crtbegin object for windows32 targets.
+ Copyright (C) 2007, 2009, 2010, 2011 Free Software Foundation, Inc.
+
+ Contributed by Danny Smith <dannysmith@users.sourceforge.net>
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+/* Target machine header files require this define. */
+#define IN_LIBGCC2
+
+#include "auto-host.h"
+#include "tconfig.h"
+#include "tsystem.h"
+#include "coretypes.h"
+#include "tm.h"
+#include "libgcc_tm.h"
+#include "unwind-dw2-fde.h"
+
+#define WIN32_LEAN_AND_MEAN
+#include <windows.h>
+
+#ifndef LIBGCC_SONAME
+#define LIBGCC_SONAME "libgcc_s.dll"
+#endif
+
+#ifndef LIBGCJ_SONAME
+#define LIBGCJ_SONAME "libgcj_s.dll"
+#endif
+
+
+/* Make the declarations weak. This is critical for
+ _Jv_RegisterClasses because it lives in libgcj.a */
+extern void __register_frame_info (const void *, struct object *)
+ TARGET_ATTRIBUTE_WEAK;
+extern void *__deregister_frame_info (const void *)
+ TARGET_ATTRIBUTE_WEAK;
+extern void _Jv_RegisterClasses (const void *) TARGET_ATTRIBUTE_WEAK;
+
+#if defined(HAVE_LD_RO_RW_SECTION_MIXING)
+# define EH_FRAME_SECTION_CONST const
+#else
+# define EH_FRAME_SECTION_CONST
+#endif
+
+/* Stick a label at the beginning of the frame unwind info so we can
+ register/deregister it with the exception handling library code. */
+#if DWARF2_UNWIND_INFO
+static EH_FRAME_SECTION_CONST char __EH_FRAME_BEGIN__[]
+ __attribute__((used, section(EH_FRAME_SECTION_NAME), aligned(4)))
+ = { };
+
+static struct object obj;
+#endif
+
+#if TARGET_USE_JCR_SECTION
+static void *__JCR_LIST__[]
+ __attribute__ ((used, section(JCR_SECTION_NAME), aligned(4)))
+ = { };
+#endif
+
+/* Pull in references from libgcc.a(unwind-dw2-fde.o) in the
+ startfile. These are referenced by a ctor and dtor in crtend.o. */
+extern void __gcc_register_frame (void);
+extern void __gcc_deregister_frame (void);
+
+void
+__gcc_register_frame (void)
+{
+#if DWARF2_UNWIND_INFO
+/* Weak undefined symbols won't be pulled in from dlls; hence
+ we first test if the dll is already loaded and, if so,
+ get the symbol's address at run-time. If the dll is not loaded,
+ fallback to weak linkage to static archive. */
+
+ void (*register_frame_fn) (const void *, struct object *);
+ HANDLE h = GetModuleHandle (LIBGCC_SONAME);
+ if (h)
+ register_frame_fn = (void (*) (const void *, struct object *))
+ GetProcAddress (h, "__register_frame_info");
+ else
+ register_frame_fn = __register_frame_info;
+ if (register_frame_fn)
+ register_frame_fn (__EH_FRAME_BEGIN__, &obj);
+#endif
+
+#if TARGET_USE_JCR_SECTION
+ if (__JCR_LIST__[0])
+ {
+ void (*register_class_fn) (const void *);
+ HANDLE h = GetModuleHandle (LIBGCJ_SONAME);
+ if (h)
+ register_class_fn = (void (*) (const void *))
+ GetProcAddress (h, "_Jv_RegisterClasses");
+ else
+ register_class_fn = _Jv_RegisterClasses;
+
+ if (register_class_fn)
+ register_class_fn (__JCR_LIST__);
+ }
+#endif
+}
+
+void
+__gcc_deregister_frame (void)
+{
+#if DWARF2_UNWIND_INFO
+ void * (*deregister_frame_fn) (const void *);
+ HANDLE h = GetModuleHandle (LIBGCC_SONAME);
+ if (h)
+ deregister_frame_fn = (void* (*) (const void *))
+ GetProcAddress (h, "__deregister_frame_info");
+ else
+ deregister_frame_fn = __deregister_frame_info;
+ if (deregister_frame_fn)
+ deregister_frame_fn (__EH_FRAME_BEGIN__);
+#endif
+}
diff --git a/libgcc/config/i386/cygming-crtend.c b/libgcc/config/i386/cygming-crtend.c
new file mode 100644
index 00000000000..ea53c84f6f1
--- /dev/null
+++ b/libgcc/config/i386/cygming-crtend.c
@@ -0,0 +1,89 @@
+/* crtend object for windows32 targets.
+ Copyright (C) 2007, 2008, 2009, 2010, 2011 Free Software Foundation, Inc.
+
+ Contributed by Danny Smith <dannysmith@users.sourceforge.net>
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+/* Target machine header files require this define. */
+#define IN_LIBGCC2
+
+/* auto-host.h is needed by cygming.h for HAVE_GAS_WEAK and here
+ for HAVE_LD_RO_RW_SECTION_MIXING. */
+#include "auto-host.h"
+#include "tconfig.h"
+#include "tsystem.h"
+#include "coretypes.h"
+#include "tm.h"
+#include "libgcc_tm.h"
+#include "unwind-dw2-fde.h"
+
+#if defined(HAVE_LD_RO_RW_SECTION_MIXING)
+# define EH_FRAME_SECTION_CONST const
+#else
+# define EH_FRAME_SECTION_CONST
+#endif
+
+#if DWARF2_UNWIND_INFO
+/* Terminate the frame unwind info section with a 0 as a sentinel;
+ this would be the 'length' field in a real FDE. */
+
+static EH_FRAME_SECTION_CONST int __FRAME_END__[]
+ __attribute__ ((used, section(EH_FRAME_SECTION_NAME),
+ aligned(4)))
+ = { 0 };
+#endif
+
+#if TARGET_USE_JCR_SECTION
+/* Null terminate the .jcr section array. */
+static void *__JCR_END__[1]
+ __attribute__ ((used, section(JCR_SECTION_NAME),
+ aligned(sizeof(void *))))
+ = { 0 };
+#endif
+
+extern void __gcc_register_frame (void);
+extern void __gcc_deregister_frame (void);
+
+static void register_frame_ctor (void) __attribute__ ((constructor (0)));
+
+static void
+register_frame_ctor (void)
+{
+ __gcc_register_frame ();
+#if DEFAULT_USE_CXA_ATEXIT
+ /* If we use the __cxa_atexit method to register C++ dtors
+ at object construction, also use atexit to register eh frame
+ info cleanup. */
+ atexit (__gcc_deregister_frame);
+#endif
+}
+
+#if !DEFAULT_USE_CXA_ATEXIT
+static void deregister_frame_dtor (void) __attribute__ ((destructor (0)));
+
+static void
+deregister_frame_dtor (void)
+{
+ __gcc_deregister_frame ();
+}
+#endif
diff --git a/libgcc/config/i386/cygwin.S b/libgcc/config/i386/cygwin.S
new file mode 100644
index 00000000000..8f9c486850e
--- /dev/null
+++ b/libgcc/config/i386/cygwin.S
@@ -0,0 +1,188 @@
+/* stuff needed for libgcc on win32.
+ *
+ * Copyright (C) 1996, 1998, 2001, 2003, 2008, 2009, 2010
+ * Free Software Foundation, Inc.
+ * Written By Steve Chamberlain
+ *
+ * This file is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 3, or (at your option) any
+ * later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Under Section 7 of GPL version 3, you are granted additional
+ * permissions described in the GCC Runtime Library Exception, version
+ * 3.1, as published by the Free Software Foundation.
+ *
+ * You should have received a copy of the GNU General Public License and
+ * a copy of the GCC Runtime Library Exception along with this program;
+ * see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ * <http://www.gnu.org/licenses/>.
+ */
+
+#include "auto-host.h"
+
+#ifdef HAVE_GAS_CFI_SECTIONS_DIRECTIVE
+ .cfi_sections .debug_frame
+# define cfi_startproc() .cfi_startproc
+# define cfi_endproc() .cfi_endproc
+# define cfi_adjust_cfa_offset(X) .cfi_adjust_cfa_offset X
+# define cfi_def_cfa_register(X) .cfi_def_cfa_register X
+# define cfi_register(D,S) .cfi_register D, S
+# ifdef _WIN64
+# define cfi_push(X) .cfi_adjust_cfa_offset 8; .cfi_rel_offset X, 0
+# define cfi_pop(X) .cfi_adjust_cfa_offset -8; .cfi_restore X
+# else
+# define cfi_push(X) .cfi_adjust_cfa_offset 4; .cfi_rel_offset X, 0
+# define cfi_pop(X) .cfi_adjust_cfa_offset -4; .cfi_restore X
+# endif
+#else
+# define cfi_startproc()
+# define cfi_endproc()
+# define cfi_adjust_cfa_offset(X)
+# define cfi_def_cfa_register(X)
+# define cfi_register(D,S)
+# define cfi_push(X)
+# define cfi_pop(X)
+#endif /* HAVE_GAS_CFI_SECTIONS_DIRECTIVE */
+
+#ifdef L_chkstk
+/* Function prologue calls __chkstk to probe the stack when allocating more
+ than CHECK_STACK_LIMIT bytes in one go. Touching the stack at 4K
+ increments is necessary to ensure that the guard pages used
+ by the OS virtual memory manger are allocated in correct sequence. */
+
+ .global ___chkstk
+ .global __alloca
+#ifdef _WIN64
+/* __alloca is a normal function call, which uses %rcx as the argument. */
+ cfi_startproc()
+__alloca:
+ movq %rcx, %rax
+ /* FALLTHRU */
+
+/* ___chkstk is a *special* function call, which uses %rax as the argument.
+ We avoid clobbering the 4 integer argument registers, %rcx, %rdx,
+ %r8 and %r9, which leaves us with %rax, %r10, and %r11 to use. */
+ .align 4
+___chkstk:
+ popq %r11 /* pop return address */
+ cfi_adjust_cfa_offset(-8) /* indicate return address in r11 */
+ cfi_register(%rip, %r11)
+ movq %rsp, %r10
+ cmpq $0x1000, %rax /* > 4k ?*/
+ jb 2f
+
+1: subq $0x1000, %r10 /* yes, move pointer down 4k*/
+ orl $0x0, (%r10) /* probe there */
+ subq $0x1000, %rax /* decrement count */
+ cmpq $0x1000, %rax
+ ja 1b /* and do it again */
+
+2: subq %rax, %r10
+ movq %rsp, %rax /* hold CFA until return */
+ cfi_def_cfa_register(%rax)
+ orl $0x0, (%r10) /* less than 4k, just peek here */
+ movq %r10, %rsp /* decrement stack */
+
+ /* Push the return value back. Doing this instead of just
+ jumping to %r11 preserves the cached call-return stack
+ used by most modern processors. */
+ pushq %r11
+ ret
+ cfi_endproc()
+#else
+ cfi_startproc()
+___chkstk:
+__alloca:
+ pushl %ecx /* save temp */
+ cfi_push(%eax)
+ leal 8(%esp), %ecx /* point past return addr */
+ cmpl $0x1000, %eax /* > 4k ?*/
+ jb 2f
+
+1: subl $0x1000, %ecx /* yes, move pointer down 4k*/
+ orl $0x0, (%ecx) /* probe there */
+ subl $0x1000, %eax /* decrement count */
+ cmpl $0x1000, %eax
+ ja 1b /* and do it again */
+
+2: subl %eax, %ecx
+ orl $0x0, (%ecx) /* less than 4k, just peek here */
+ movl %esp, %eax /* save current stack pointer */
+ cfi_def_cfa_register(%eax)
+ movl %ecx, %esp /* decrement stack */
+ movl (%eax), %ecx /* recover saved temp */
+
+ /* Copy the return register. Doing this instead of just jumping to
+ the address preserves the cached call-return stack used by most
+ modern processors. */
+ pushl 4(%eax)
+ ret
+ cfi_endproc()
+#endif /* _WIN64 */
+#endif /* L_chkstk */
+
+#ifdef L_chkstk_ms
+/* ___chkstk_ms is a *special* function call, which uses %rax as the argument.
+ We avoid clobbering any registers. Unlike ___chkstk, it just probes the
+ stack and does no stack allocation. */
+ .global ___chkstk_ms
+#ifdef _WIN64
+ cfi_startproc()
+___chkstk_ms:
+ pushq %rcx /* save temps */
+ cfi_push(%rcx)
+ pushq %rax
+ cfi_push(%rax)
+ cmpq $0x1000, %rax /* > 4k ?*/
+ leaq 24(%rsp), %rcx /* point past return addr */
+ jb 2f
+
+1: subq $0x1000, %rcx /* yes, move pointer down 4k */
+ orq $0x0, (%rcx) /* probe there */
+ subq $0x1000, %rax /* decrement count */
+ cmpq $0x1000, %rax
+ ja 1b /* and do it again */
+
+2: subq %rax, %rcx
+ orq $0x0, (%rcx) /* less than 4k, just peek here */
+
+ popq %rax
+ cfi_pop(%rax)
+ popq %rcx
+ cfi_pop(%rcx)
+ ret
+ cfi_endproc()
+#else
+ cfi_startproc()
+___chkstk_ms:
+ pushl %ecx /* save temp */
+ cfi_push(%ecx)
+ pushl %eax
+ cfi_push(%eax)
+ cmpl $0x1000, %eax /* > 4k ?*/
+ leal 12(%esp), %ecx /* point past return addr */
+ jb 2f
+
+1: subl $0x1000, %ecx /* yes, move pointer down 4k*/
+ orl $0x0, (%ecx) /* probe there */
+ subl $0x1000, %eax /* decrement count */
+ cmpl $0x1000, %eax
+ ja 1b /* and do it again */
+
+2: subl %eax, %ecx
+ orl $0x0, (%ecx) /* less than 4k, just peek here */
+
+ popl %eax
+ cfi_pop(%eax)
+ popl %ecx
+ cfi_pop(%ecx)
+ ret
+ cfi_endproc()
+#endif /* _WIN64 */
+#endif /* L_chkstk_ms */
diff --git a/libgcc/config/i386/gthr-win32.c b/libgcc/config/i386/gthr-win32.c
new file mode 100644
index 00000000000..ab1b69fd4cf
--- /dev/null
+++ b/libgcc/config/i386/gthr-win32.c
@@ -0,0 +1,261 @@
+/* Implementation of W32-specific threads compatibility routines for
+ libgcc2. */
+
+/* Copyright (C) 1999, 2000, 2002, 2004, 2008, 2009, 2011
+ Free Software Foundation, Inc.
+ Contributed by Mumit Khan <khan@xraylith.wisc.edu>.
+ Modified and moved to separate file by Danny Smith
+ <dannysmith@users.sourceforge.net>.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+#include <windows.h>
+#ifndef __GTHREAD_HIDE_WIN32API
+# define __GTHREAD_HIDE_WIN32API 1
+#endif
+#undef __GTHREAD_I486_INLINE_LOCK_PRIMITIVES
+#define __GTHREAD_I486_INLINE_LOCK_PRIMITIVES
+#include "gthr-win32.h"
+
+/* Windows32 threads specific definitions. The windows32 threading model
+ does not map well into pthread-inspired gcc's threading model, and so
+ there are caveats one needs to be aware of.
+
+ 1. The destructor supplied to __gthread_key_create is ignored for
+ generic x86-win32 ports. This will certainly cause memory leaks
+ due to unreclaimed eh contexts (sizeof (eh_context) is at least
+ 24 bytes for x86 currently).
+
+ This memory leak may be significant for long-running applications
+ that make heavy use of C++ EH.
+
+ However, Mingw runtime (version 0.3 or newer) provides a mechanism
+ to emulate pthreads key dtors; the runtime provides a special DLL,
+ linked in if -mthreads option is specified, that runs the dtors in
+ the reverse order of registration when each thread exits. If
+ -mthreads option is not given, a stub is linked in instead of the
+ DLL, which results in memory leak. Other x86-win32 ports can use
+ the same technique of course to avoid the leak.
+
+ 2. The error codes returned are non-POSIX like, and cast into ints.
+ This may cause incorrect error return due to truncation values on
+ hw where sizeof (DWORD) > sizeof (int).
+
+ 3. We are currently using a special mutex instead of the Critical
+ Sections, since Win9x does not support TryEnterCriticalSection
+ (while NT does).
+
+ The basic framework should work well enough. In the long term, GCC
+ needs to use Structured Exception Handling on Windows32. */
+
+int
+__gthr_win32_once (__gthread_once_t *once, void (*func) (void))
+{
+ if (once == NULL || func == NULL)
+ return EINVAL;
+
+ if (! once->done)
+ {
+ if (InterlockedIncrement (&(once->started)) == 0)
+ {
+ (*func) ();
+ once->done = TRUE;
+ }
+ else
+ {
+ /* Another thread is currently executing the code, so wait for it
+ to finish; yield the CPU in the meantime. If performance
+ does become an issue, the solution is to use an Event that
+ we wait on here (and set above), but that implies a place to
+ create the event before this routine is called. */
+ while (! once->done)
+ Sleep (0);
+ }
+ }
+ return 0;
+}
+
+/* Windows32 thread local keys don't support destructors; this leads to
+ leaks, especially in threaded applications making extensive use of
+ C++ EH. Mingw uses a thread-support DLL to work-around this problem. */
+
+int
+__gthr_win32_key_create (__gthread_key_t *key,
+ void (*dtor) (void *) __attribute__((unused)))
+{
+ int status = 0;
+ DWORD tls_index = TlsAlloc ();
+ if (tls_index != 0xFFFFFFFF)
+ {
+ *key = tls_index;
+#ifdef MINGW32_SUPPORTS_MT_EH
+ /* Mingw runtime will run the dtors in reverse order for each thread
+ when the thread exits. */
+ status = __mingwthr_key_dtor (*key, dtor);
+#endif
+ }
+ else
+ status = (int) GetLastError ();
+ return status;
+}
+
+int
+__gthr_win32_key_delete (__gthread_key_t key)
+{
+ return (TlsFree (key) != 0) ? 0 : (int) GetLastError ();
+}
+
+void *
+__gthr_win32_getspecific (__gthread_key_t key)
+{
+ DWORD lasterror;
+ void *ptr;
+ lasterror = GetLastError();
+ ptr = TlsGetValue(key);
+ SetLastError( lasterror );
+ return ptr;
+}
+
+int
+__gthr_win32_setspecific (__gthread_key_t key, const void *ptr)
+{
+ if (TlsSetValue (key, CONST_CAST2(void *, const void *, ptr)) != 0)
+ return 0;
+ else
+ return GetLastError ();
+}
+
+void
+__gthr_win32_mutex_init_function (__gthread_mutex_t *mutex)
+{
+ mutex->counter = -1;
+ mutex->sema = CreateSemaphore (NULL, 0, 65535, NULL);
+}
+
+void
+__gthr_win32_mutex_destroy (__gthread_mutex_t *mutex)
+{
+ CloseHandle ((HANDLE) mutex->sema);
+}
+
+int
+__gthr_win32_mutex_lock (__gthread_mutex_t *mutex)
+{
+ if (InterlockedIncrement (&mutex->counter) == 0 ||
+ WaitForSingleObject (mutex->sema, INFINITE) == WAIT_OBJECT_0)
+ return 0;
+ else
+ {
+ /* WaitForSingleObject returns WAIT_FAILED, and we can only do
+ some best-effort cleanup here. */
+ InterlockedDecrement (&mutex->counter);
+ return 1;
+ }
+}
+
+int
+__gthr_win32_mutex_trylock (__gthread_mutex_t *mutex)
+{
+ if (__GTHR_W32_InterlockedCompareExchange (&mutex->counter, 0, -1) < 0)
+ return 0;
+ else
+ return 1;
+}
+
+int
+__gthr_win32_mutex_unlock (__gthread_mutex_t *mutex)
+{
+ if (InterlockedDecrement (&mutex->counter) >= 0)
+ return ReleaseSemaphore (mutex->sema, 1, NULL) ? 0 : 1;
+ else
+ return 0;
+}
+
+void
+__gthr_win32_recursive_mutex_init_function (__gthread_recursive_mutex_t *mutex)
+{
+ mutex->counter = -1;
+ mutex->depth = 0;
+ mutex->owner = 0;
+ mutex->sema = CreateSemaphore (NULL, 0, 65535, NULL);
+}
+
+int
+__gthr_win32_recursive_mutex_lock (__gthread_recursive_mutex_t *mutex)
+{
+ DWORD me = GetCurrentThreadId();
+ if (InterlockedIncrement (&mutex->counter) == 0)
+ {
+ mutex->depth = 1;
+ mutex->owner = me;
+ }
+ else if (mutex->owner == me)
+ {
+ InterlockedDecrement (&mutex->counter);
+ ++(mutex->depth);
+ }
+ else if (WaitForSingleObject (mutex->sema, INFINITE) == WAIT_OBJECT_0)
+ {
+ mutex->depth = 1;
+ mutex->owner = me;
+ }
+ else
+ {
+ /* WaitForSingleObject returns WAIT_FAILED, and we can only do
+ some best-effort cleanup here. */
+ InterlockedDecrement (&mutex->counter);
+ return 1;
+ }
+ return 0;
+}
+
+int
+__gthr_win32_recursive_mutex_trylock (__gthread_recursive_mutex_t *mutex)
+{
+ DWORD me = GetCurrentThreadId();
+ if (__GTHR_W32_InterlockedCompareExchange (&mutex->counter, 0, -1) < 0)
+ {
+ mutex->depth = 1;
+ mutex->owner = me;
+ }
+ else if (mutex->owner == me)
+ ++(mutex->depth);
+ else
+ return 1;
+
+ return 0;
+}
+
+int
+__gthr_win32_recursive_mutex_unlock (__gthread_recursive_mutex_t *mutex)
+{
+ --(mutex->depth);
+ if (mutex->depth == 0)
+ {
+ mutex->owner = 0;
+
+ if (InterlockedDecrement (&mutex->counter) >= 0)
+ return ReleaseSemaphore (mutex->sema, 1, NULL) ? 0 : 1;
+ }
+
+ return 0;
+}
diff --git a/libgcc/config/i386/gthr-win32.h b/libgcc/config/i386/gthr-win32.h
new file mode 100644
index 00000000000..53f8396cc81
--- /dev/null
+++ b/libgcc/config/i386/gthr-win32.h
@@ -0,0 +1,772 @@
+/* Threads compatibility routines for libgcc2 and libobjc. */
+/* Compile this one with gcc. */
+
+/* Copyright (C) 1999, 2000, 2002, 2003, 2004, 2005, 2008, 2009
+ Free Software Foundation, Inc.
+ Contributed by Mumit Khan <khan@xraylith.wisc.edu>.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_GTHR_WIN32_H
+#define GCC_GTHR_WIN32_H
+
+/* Make sure CONST_CAST2 (origin in system.h) is declared. */
+#ifndef CONST_CAST2
+#define CONST_CAST2(TOTYPE,FROMTYPE,X) ((__extension__(union {FROMTYPE _q; TOTYPE _nq;})(X))._nq)
+#endif
+
+/* Windows32 threads specific definitions. The windows32 threading model
+ does not map well into pthread-inspired gcc's threading model, and so
+ there are caveats one needs to be aware of.
+
+ 1. The destructor supplied to __gthread_key_create is ignored for
+ generic x86-win32 ports. This will certainly cause memory leaks
+ due to unreclaimed eh contexts (sizeof (eh_context) is at least
+ 24 bytes for x86 currently).
+
+ This memory leak may be significant for long-running applications
+ that make heavy use of C++ EH.
+
+ However, Mingw runtime (version 0.3 or newer) provides a mechanism
+ to emulate pthreads key dtors; the runtime provides a special DLL,
+ linked in if -mthreads option is specified, that runs the dtors in
+ the reverse order of registration when each thread exits. If
+ -mthreads option is not given, a stub is linked in instead of the
+ DLL, which results in memory leak. Other x86-win32 ports can use
+ the same technique of course to avoid the leak.
+
+ 2. The error codes returned are non-POSIX like, and cast into ints.
+ This may cause incorrect error return due to truncation values on
+ hw where sizeof (DWORD) > sizeof (int).
+
+ 3. We are currently using a special mutex instead of the Critical
+ Sections, since Win9x does not support TryEnterCriticalSection
+ (while NT does).
+
+ The basic framework should work well enough. In the long term, GCC
+ needs to use Structured Exception Handling on Windows32. */
+
+#define __GTHREADS 1
+
+#include <errno.h>
+#ifdef __MINGW32__
+#include <_mingw.h>
+#endif
+
+#ifndef __UNUSED_PARAM
+#define __UNUSED_PARAM(x) x
+#endif
+
+#ifdef _LIBOBJC
+
+/* This is necessary to prevent windef.h (included from windows.h) from
+ defining its own BOOL as a typedef. */
+#ifndef __OBJC__
+#define __OBJC__
+#endif
+#include <windows.h>
+/* Now undef the windows BOOL. */
+#undef BOOL
+
+/* Key structure for maintaining thread specific storage */
+static DWORD __gthread_objc_data_tls = (DWORD) -1;
+
+/* Backend initialization functions */
+
+/* Initialize the threads subsystem. */
+int
+__gthread_objc_init_thread_system (void)
+{
+ /* Initialize the thread storage key. */
+ if ((__gthread_objc_data_tls = TlsAlloc ()) != (DWORD) -1)
+ return 0;
+ else
+ return -1;
+}
+
+/* Close the threads subsystem. */
+int
+__gthread_objc_close_thread_system (void)
+{
+ if (__gthread_objc_data_tls != (DWORD) -1)
+ TlsFree (__gthread_objc_data_tls);
+ return 0;
+}
+
+/* Backend thread functions */
+
+/* Create a new thread of execution. */
+objc_thread_t
+__gthread_objc_thread_detach (void (*func)(void *arg), void *arg)
+{
+ DWORD thread_id = 0;
+ HANDLE win32_handle;
+
+ if (!(win32_handle = CreateThread (NULL, 0, (LPTHREAD_START_ROUTINE) func,
+ arg, 0, &thread_id)))
+ thread_id = 0;
+
+ return (objc_thread_t) (INT_PTR) thread_id;
+}
+
+/* Set the current thread's priority. */
+int
+__gthread_objc_thread_set_priority (int priority)
+{
+ int sys_priority = 0;
+
+ switch (priority)
+ {
+ case OBJC_THREAD_INTERACTIVE_PRIORITY:
+ sys_priority = THREAD_PRIORITY_NORMAL;
+ break;
+ default:
+ case OBJC_THREAD_BACKGROUND_PRIORITY:
+ sys_priority = THREAD_PRIORITY_BELOW_NORMAL;
+ break;
+ case OBJC_THREAD_LOW_PRIORITY:
+ sys_priority = THREAD_PRIORITY_LOWEST;
+ break;
+ }
+
+ /* Change priority */
+ if (SetThreadPriority (GetCurrentThread (), sys_priority))
+ return 0;
+ else
+ return -1;
+}
+
+/* Return the current thread's priority. */
+int
+__gthread_objc_thread_get_priority (void)
+{
+ int sys_priority;
+
+ sys_priority = GetThreadPriority (GetCurrentThread ());
+
+ switch (sys_priority)
+ {
+ case THREAD_PRIORITY_HIGHEST:
+ case THREAD_PRIORITY_TIME_CRITICAL:
+ case THREAD_PRIORITY_ABOVE_NORMAL:
+ case THREAD_PRIORITY_NORMAL:
+ return OBJC_THREAD_INTERACTIVE_PRIORITY;
+
+ default:
+ case THREAD_PRIORITY_BELOW_NORMAL:
+ return OBJC_THREAD_BACKGROUND_PRIORITY;
+
+ case THREAD_PRIORITY_IDLE:
+ case THREAD_PRIORITY_LOWEST:
+ return OBJC_THREAD_LOW_PRIORITY;
+ }
+
+ /* Couldn't get priority. */
+ return -1;
+}
+
+/* Yield our process time to another thread. */
+void
+__gthread_objc_thread_yield (void)
+{
+ Sleep (0);
+}
+
+/* Terminate the current thread. */
+int
+__gthread_objc_thread_exit (void)
+{
+ /* exit the thread */
+ ExitThread (__objc_thread_exit_status);
+
+ /* Failed if we reached here */
+ return -1;
+}
+
+/* Returns an integer value which uniquely describes a thread. */
+objc_thread_t
+__gthread_objc_thread_id (void)
+{
+ return (objc_thread_t) (INT_PTR) GetCurrentThreadId ();
+}
+
+/* Sets the thread's local storage pointer. */
+int
+__gthread_objc_thread_set_data (void *value)
+{
+ if (TlsSetValue (__gthread_objc_data_tls, value))
+ return 0;
+ else
+ return -1;
+}
+
+/* Returns the thread's local storage pointer. */
+void *
+__gthread_objc_thread_get_data (void)
+{
+ DWORD lasterror;
+ void *ptr;
+
+ lasterror = GetLastError ();
+
+ ptr = TlsGetValue (__gthread_objc_data_tls); /* Return thread data. */
+
+ SetLastError (lasterror);
+
+ return ptr;
+}
+
+/* Backend mutex functions */
+
+/* Allocate a mutex. */
+int
+__gthread_objc_mutex_allocate (objc_mutex_t mutex)
+{
+ if ((mutex->backend = (void *) CreateMutex (NULL, 0, NULL)) == NULL)
+ return -1;
+ else
+ return 0;
+}
+
+/* Deallocate a mutex. */
+int
+__gthread_objc_mutex_deallocate (objc_mutex_t mutex)
+{
+ CloseHandle ((HANDLE) (mutex->backend));
+ return 0;
+}
+
+/* Grab a lock on a mutex. */
+int
+__gthread_objc_mutex_lock (objc_mutex_t mutex)
+{
+ int status;
+
+ status = WaitForSingleObject ((HANDLE) (mutex->backend), INFINITE);
+ if (status != WAIT_OBJECT_0 && status != WAIT_ABANDONED)
+ return -1;
+ else
+ return 0;
+}
+
+/* Try to grab a lock on a mutex. */
+int
+__gthread_objc_mutex_trylock (objc_mutex_t mutex)
+{
+ int status;
+
+ status = WaitForSingleObject ((HANDLE) (mutex->backend), 0);
+ if (status != WAIT_OBJECT_0 && status != WAIT_ABANDONED)
+ return -1;
+ else
+ return 0;
+}
+
+/* Unlock the mutex */
+int
+__gthread_objc_mutex_unlock (objc_mutex_t mutex)
+{
+ if (ReleaseMutex ((HANDLE) (mutex->backend)) == 0)
+ return -1;
+ else
+ return 0;
+}
+
+/* Backend condition mutex functions */
+
+/* Allocate a condition. */
+int
+__gthread_objc_condition_allocate (objc_condition_t __UNUSED_PARAM(condition))
+{
+ /* Unimplemented. */
+ return -1;
+}
+
+/* Deallocate a condition. */
+int
+__gthread_objc_condition_deallocate (objc_condition_t __UNUSED_PARAM(condition))
+{
+ /* Unimplemented. */
+ return -1;
+}
+
+/* Wait on the condition */
+int
+__gthread_objc_condition_wait (objc_condition_t __UNUSED_PARAM(condition),
+ objc_mutex_t __UNUSED_PARAM(mutex))
+{
+ /* Unimplemented. */
+ return -1;
+}
+
+/* Wake up all threads waiting on this condition. */
+int
+__gthread_objc_condition_broadcast (objc_condition_t __UNUSED_PARAM(condition))
+{
+ /* Unimplemented. */
+ return -1;
+}
+
+/* Wake up one thread waiting on this condition. */
+int
+__gthread_objc_condition_signal (objc_condition_t __UNUSED_PARAM(condition))
+{
+ /* Unimplemented. */
+ return -1;
+}
+
+#else /* _LIBOBJC */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef unsigned long __gthread_key_t;
+
+typedef struct {
+ int done;
+ long started;
+} __gthread_once_t;
+
+typedef struct {
+ long counter;
+ void *sema;
+} __gthread_mutex_t;
+
+typedef struct {
+ long counter;
+ long depth;
+ unsigned long owner;
+ void *sema;
+} __gthread_recursive_mutex_t;
+
+#define __GTHREAD_ONCE_INIT {0, -1}
+#define __GTHREAD_MUTEX_INIT_FUNCTION __gthread_mutex_init_function
+#define __GTHREAD_MUTEX_INIT_DEFAULT {-1, 0}
+#define __GTHREAD_RECURSIVE_MUTEX_INIT_FUNCTION \
+ __gthread_recursive_mutex_init_function
+#define __GTHREAD_RECURSIVE_MUTEX_INIT_DEFAULT {-1, 0, 0, 0}
+
+#if defined (_WIN32) && !defined(__CYGWIN__)
+#define MINGW32_SUPPORTS_MT_EH 1
+/* Mingw runtime >= v0.3 provides a magic variable that is set to nonzero
+ if -mthreads option was specified, or 0 otherwise. This is to get around
+ the lack of weak symbols in PE-COFF. */
+extern int _CRT_MT;
+extern int __mingwthr_key_dtor (unsigned long, void (*) (void *));
+#endif /* _WIN32 && !__CYGWIN__ */
+
+/* The Windows95 kernel does not export InterlockedCompareExchange.
+ This provides a substitute. When building apps that reference
+ gthread_mutex_try_lock, the __GTHREAD_I486_INLINE_LOCK_PRIMITIVES
+ macro must be defined if Windows95 is a target. Currently
+ gthread_mutex_try_lock is not referenced by libgcc or libstdc++. */
+#ifdef __GTHREAD_I486_INLINE_LOCK_PRIMITIVES
+static inline long
+__gthr_i486_lock_cmp_xchg(long *__dest, long __xchg, long __comperand)
+{
+ long result;
+ __asm__ __volatile__ ("\n\
+ lock\n\
+ cmpxchg{l} {%4, %1|%1, %4}\n"
+ : "=a" (result), "=m" (*__dest)
+ : "0" (__comperand), "m" (*__dest), "r" (__xchg)
+ : "cc");
+ return result;
+}
+#define __GTHR_W32_InterlockedCompareExchange __gthr_i486_lock_cmp_xchg
+#else /* __GTHREAD_I486_INLINE_LOCK_PRIMITIVES */
+#define __GTHR_W32_InterlockedCompareExchange InterlockedCompareExchange
+#endif /* __GTHREAD_I486_INLINE_LOCK_PRIMITIVES */
+
+static inline int
+__gthread_active_p (void)
+{
+#ifdef MINGW32_SUPPORTS_MT_EH
+ return _CRT_MT;
+#else
+ return 1;
+#endif
+}
+
+#if __GTHREAD_HIDE_WIN32API
+
+/* The implementations are in config/i386/gthr-win32.c in libgcc.a.
+ Only stubs are exposed to avoid polluting the C++ namespace with
+ windows api definitions. */
+
+extern int __gthr_win32_once (__gthread_once_t *, void (*) (void));
+extern int __gthr_win32_key_create (__gthread_key_t *, void (*) (void*));
+extern int __gthr_win32_key_delete (__gthread_key_t);
+extern void * __gthr_win32_getspecific (__gthread_key_t);
+extern int __gthr_win32_setspecific (__gthread_key_t, const void *);
+extern void __gthr_win32_mutex_init_function (__gthread_mutex_t *);
+extern int __gthr_win32_mutex_lock (__gthread_mutex_t *);
+extern int __gthr_win32_mutex_trylock (__gthread_mutex_t *);
+extern int __gthr_win32_mutex_unlock (__gthread_mutex_t *);
+extern void
+ __gthr_win32_recursive_mutex_init_function (__gthread_recursive_mutex_t *);
+extern int __gthr_win32_recursive_mutex_lock (__gthread_recursive_mutex_t *);
+extern int
+ __gthr_win32_recursive_mutex_trylock (__gthread_recursive_mutex_t *);
+extern int __gthr_win32_recursive_mutex_unlock (__gthread_recursive_mutex_t *);
+extern void __gthr_win32_mutex_destroy (__gthread_mutex_t *);
+
+static inline int
+__gthread_once (__gthread_once_t *__once, void (*__func) (void))
+{
+ if (__gthread_active_p ())
+ return __gthr_win32_once (__once, __func);
+ else
+ return -1;
+}
+
+static inline int
+__gthread_key_create (__gthread_key_t *__key, void (*__dtor) (void *))
+{
+ return __gthr_win32_key_create (__key, __dtor);
+}
+
+static inline int
+__gthread_key_delete (__gthread_key_t __key)
+{
+ return __gthr_win32_key_delete (__key);
+}
+
+static inline void *
+__gthread_getspecific (__gthread_key_t __key)
+{
+ return __gthr_win32_getspecific (__key);
+}
+
+static inline int
+__gthread_setspecific (__gthread_key_t __key, const void *__ptr)
+{
+ return __gthr_win32_setspecific (__key, __ptr);
+}
+
+static inline void
+__gthread_mutex_init_function (__gthread_mutex_t *__mutex)
+{
+ __gthr_win32_mutex_init_function (__mutex);
+}
+
+static inline void
+__gthread_mutex_destroy (__gthread_mutex_t *__mutex)
+{
+ __gthr_win32_mutex_destroy (__mutex);
+}
+
+static inline int
+__gthread_mutex_lock (__gthread_mutex_t *__mutex)
+{
+ if (__gthread_active_p ())
+ return __gthr_win32_mutex_lock (__mutex);
+ else
+ return 0;
+}
+
+static inline int
+__gthread_mutex_trylock (__gthread_mutex_t *__mutex)
+{
+ if (__gthread_active_p ())
+ return __gthr_win32_mutex_trylock (__mutex);
+ else
+ return 0;
+}
+
+static inline int
+__gthread_mutex_unlock (__gthread_mutex_t *__mutex)
+{
+ if (__gthread_active_p ())
+ return __gthr_win32_mutex_unlock (__mutex);
+ else
+ return 0;
+}
+
+static inline void
+__gthread_recursive_mutex_init_function (__gthread_recursive_mutex_t *__mutex)
+{
+ __gthr_win32_recursive_mutex_init_function (__mutex);
+}
+
+static inline int
+__gthread_recursive_mutex_lock (__gthread_recursive_mutex_t *__mutex)
+{
+ if (__gthread_active_p ())
+ return __gthr_win32_recursive_mutex_lock (__mutex);
+ else
+ return 0;
+}
+
+static inline int
+__gthread_recursive_mutex_trylock (__gthread_recursive_mutex_t *__mutex)
+{
+ if (__gthread_active_p ())
+ return __gthr_win32_recursive_mutex_trylock (__mutex);
+ else
+ return 0;
+}
+
+static inline int
+__gthread_recursive_mutex_unlock (__gthread_recursive_mutex_t *__mutex)
+{
+ if (__gthread_active_p ())
+ return __gthr_win32_recursive_mutex_unlock (__mutex);
+ else
+ return 0;
+}
+
+#else /* ! __GTHREAD_HIDE_WIN32API */
+
+#include <windows.h>
+#include <errno.h>
+
+static inline int
+__gthread_once (__gthread_once_t *__once, void (*__func) (void))
+{
+ if (! __gthread_active_p ())
+ return -1;
+ else if (__once == NULL || __func == NULL)
+ return EINVAL;
+
+ if (! __once->done)
+ {
+ if (InterlockedIncrement (&(__once->started)) == 0)
+ {
+ (*__func) ();
+ __once->done = TRUE;
+ }
+ else
+ {
+ /* Another thread is currently executing the code, so wait for it
+ to finish; yield the CPU in the meantime. If performance
+ does become an issue, the solution is to use an Event that
+ we wait on here (and set above), but that implies a place to
+ create the event before this routine is called. */
+ while (! __once->done)
+ Sleep (0);
+ }
+ }
+
+ return 0;
+}
+
+/* Windows32 thread local keys don't support destructors; this leads to
+ leaks, especially in threaded applications making extensive use of
+ C++ EH. Mingw uses a thread-support DLL to work-around this problem. */
+static inline int
+__gthread_key_create (__gthread_key_t *__key,
+ void (*__dtor) (void *) __attribute__((unused)))
+{
+ int __status = 0;
+ DWORD __tls_index = TlsAlloc ();
+ if (__tls_index != 0xFFFFFFFF)
+ {
+ *__key = __tls_index;
+#ifdef MINGW32_SUPPORTS_MT_EH
+ /* Mingw runtime will run the dtors in reverse order for each thread
+ when the thread exits. */
+ __status = __mingwthr_key_dtor (*__key, __dtor);
+#endif
+ }
+ else
+ __status = (int) GetLastError ();
+ return __status;
+}
+
+static inline int
+__gthread_key_delete (__gthread_key_t __key)
+{
+ return (TlsFree (__key) != 0) ? 0 : (int) GetLastError ();
+}
+
+static inline void *
+__gthread_getspecific (__gthread_key_t __key)
+{
+ DWORD __lasterror;
+ void *__ptr;
+
+ __lasterror = GetLastError ();
+
+ __ptr = TlsGetValue (__key);
+
+ SetLastError (__lasterror);
+
+ return __ptr;
+}
+
+static inline int
+__gthread_setspecific (__gthread_key_t __key, const void *__ptr)
+{
+ if (TlsSetValue (__key, CONST_CAST2(void *, const void *, __ptr)) != 0)
+ return 0;
+ else
+ return GetLastError ();
+}
+
+static inline void
+__gthread_mutex_init_function (__gthread_mutex_t *__mutex)
+{
+ __mutex->counter = -1;
+ __mutex->sema = CreateSemaphore (NULL, 0, 65535, NULL);
+}
+
+static inline void
+__gthread_mutex_destroy (__gthread_mutex_t *__mutex)
+{
+ CloseHandle ((HANDLE) __mutex->sema);
+}
+
+static inline int
+__gthread_mutex_lock (__gthread_mutex_t *__mutex)
+{
+ int __status = 0;
+
+ if (__gthread_active_p ())
+ {
+ if (InterlockedIncrement (&__mutex->counter) == 0 ||
+ WaitForSingleObject (__mutex->sema, INFINITE) == WAIT_OBJECT_0)
+ __status = 0;
+ else
+ {
+ /* WaitForSingleObject returns WAIT_FAILED, and we can only do
+ some best-effort cleanup here. */
+ InterlockedDecrement (&__mutex->counter);
+ __status = 1;
+ }
+ }
+ return __status;
+}
+
+static inline int
+__gthread_mutex_trylock (__gthread_mutex_t *__mutex)
+{
+ int __status = 0;
+
+ if (__gthread_active_p ())
+ {
+ if (__GTHR_W32_InterlockedCompareExchange (&__mutex->counter, 0, -1) < 0)
+ __status = 0;
+ else
+ __status = 1;
+ }
+ return __status;
+}
+
+static inline int
+__gthread_mutex_unlock (__gthread_mutex_t *__mutex)
+{
+ if (__gthread_active_p ())
+ {
+ if (InterlockedDecrement (&__mutex->counter) >= 0)
+ return ReleaseSemaphore (__mutex->sema, 1, NULL) ? 0 : 1;
+ }
+ return 0;
+}
+
+static inline void
+__gthread_recursive_mutex_init_function (__gthread_recursive_mutex_t *__mutex)
+{
+ __mutex->counter = -1;
+ __mutex->depth = 0;
+ __mutex->owner = 0;
+ __mutex->sema = CreateSemaphore (NULL, 0, 65535, NULL);
+}
+
+static inline int
+__gthread_recursive_mutex_lock (__gthread_recursive_mutex_t *__mutex)
+{
+ if (__gthread_active_p ())
+ {
+ DWORD __me = GetCurrentThreadId();
+ if (InterlockedIncrement (&__mutex->counter) == 0)
+ {
+ __mutex->depth = 1;
+ __mutex->owner = __me;
+ }
+ else if (__mutex->owner == __me)
+ {
+ InterlockedDecrement (&__mutex->counter);
+ ++(__mutex->depth);
+ }
+ else if (WaitForSingleObject (__mutex->sema, INFINITE) == WAIT_OBJECT_0)
+ {
+ __mutex->depth = 1;
+ __mutex->owner = __me;
+ }
+ else
+ {
+ /* WaitForSingleObject returns WAIT_FAILED, and we can only do
+ some best-effort cleanup here. */
+ InterlockedDecrement (&__mutex->counter);
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static inline int
+__gthread_recursive_mutex_trylock (__gthread_recursive_mutex_t *__mutex)
+{
+ if (__gthread_active_p ())
+ {
+ DWORD __me = GetCurrentThreadId();
+ if (__GTHR_W32_InterlockedCompareExchange (&__mutex->counter, 0, -1) < 0)
+ {
+ __mutex->depth = 1;
+ __mutex->owner = __me;
+ }
+ else if (__mutex->owner == __me)
+ ++(__mutex->depth);
+ else
+ return 1;
+ }
+ return 0;
+}
+
+static inline int
+__gthread_recursive_mutex_unlock (__gthread_recursive_mutex_t *__mutex)
+{
+ if (__gthread_active_p ())
+ {
+ --(__mutex->depth);
+ if (__mutex->depth == 0)
+ {
+ __mutex->owner = 0;
+
+ if (InterlockedDecrement (&__mutex->counter) >= 0)
+ return ReleaseSemaphore (__mutex->sema, 1, NULL) ? 0 : 1;
+ }
+ }
+ return 0;
+}
+
+#endif /* __GTHREAD_HIDE_WIN32API */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _LIBOBJC */
+
+#endif /* ! GCC_GTHR_WIN32_H */
diff --git a/libgcc/config/i386/libgcc-darwin.10.4.ver b/libgcc/config/i386/libgcc-darwin.10.4.ver
new file mode 100644
index 00000000000..67f5e239ca1
--- /dev/null
+++ b/libgcc/config/i386/libgcc-darwin.10.4.ver
@@ -0,0 +1,98 @@
+# Copyright (C) 2005 Free Software Foundation, Inc.
+#
+# This file is part of GCC.
+#
+# GCC is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GCC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>.
+__Unwind_Backtrace
+__Unwind_DeleteException
+__Unwind_FindEnclosingFunction
+__Unwind_Find_FDE
+__Unwind_ForcedUnwind
+__Unwind_GetCFA
+__Unwind_GetDataRelBase
+__Unwind_GetGR
+__Unwind_GetIP
+__Unwind_GetLanguageSpecificData
+__Unwind_GetRegionStart
+__Unwind_GetTextRelBase
+__Unwind_RaiseException
+__Unwind_Resume
+__Unwind_Resume_or_Rethrow
+__Unwind_SetGR
+__Unwind_SetIP
+___absvdi2
+___absvsi2
+___addvdi3
+___addvsi3
+___ashldi3
+___ashrdi3
+___clear_cache
+___clzdi2
+___clzsi2
+___cmpdi2
+___ctzdi2
+___ctzsi2
+___deregister_frame
+___deregister_frame_info
+___deregister_frame_info_bases
+___divdc3
+___divdi3
+___divsc3
+___divxc3
+___enable_execute_stack
+___ffsdi2
+___fixdfdi
+___fixsfdi
+___fixunsdfdi
+___fixunsdfsi
+___fixunssfdi
+___fixunssfsi
+___fixunsxfdi
+___fixunsxfsi
+___fixxfdi
+___floatdidf
+___floatdisf
+___floatdixf
+___gcc_personality_v0
+___lshrdi3
+___moddi3
+___muldc3
+___muldi3
+___mulsc3
+___mulvdi3
+___mulvsi3
+___mulxc3
+___negdi2
+___negvdi2
+___negvsi2
+___paritydi2
+___paritysi2
+___popcountdi2
+___popcountsi2
+___powidf2
+___powisf2
+___powixf2
+___register_frame
+___register_frame_info
+___register_frame_info_bases
+___register_frame_info_table
+___register_frame_info_table_bases
+___register_frame_table
+___subvdi3
+___subvsi3
+___ucmpdi2
+___udivdi3
+___udivmoddi4
+___umoddi3
diff --git a/libgcc/config/i386/libgcc-darwin.10.5.ver b/libgcc/config/i386/libgcc-darwin.10.5.ver
new file mode 100644
index 00000000000..eeec9fbfcdf
--- /dev/null
+++ b/libgcc/config/i386/libgcc-darwin.10.5.ver
@@ -0,0 +1,102 @@
+# Copyright (C) 2005, 2006 Free Software Foundation, Inc.
+#
+# This file is part of GCC.
+#
+# GCC is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GCC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>.
+__Unwind_Backtrace
+__Unwind_DeleteException
+__Unwind_FindEnclosingFunction
+__Unwind_Find_FDE
+__Unwind_ForcedUnwind
+__Unwind_GetCFA
+__Unwind_GetDataRelBase
+__Unwind_GetGR
+__Unwind_GetIP
+__Unwind_GetIPInfo
+__Unwind_GetLanguageSpecificData
+__Unwind_GetRegionStart
+__Unwind_GetTextRelBase
+__Unwind_RaiseException
+__Unwind_Resume
+__Unwind_Resume_or_Rethrow
+__Unwind_SetGR
+__Unwind_SetIP
+___absvdi2
+___absvsi2
+___addvdi3
+___addvsi3
+___ashldi3
+___ashrdi3
+___clear_cache
+___clzdi2
+___clzsi2
+___cmpdi2
+___ctzdi2
+___ctzsi2
+___deregister_frame
+___deregister_frame_info
+___deregister_frame_info_bases
+___divdc3
+___divdi3
+___divsc3
+___divxc3
+___enable_execute_stack
+___ffsdi2
+___fixdfdi
+___fixsfdi
+___fixunsdfdi
+___fixunsdfsi
+___fixunssfdi
+___fixunssfsi
+___fixunsxfdi
+___fixunsxfsi
+___fixxfdi
+___floatdidf
+___floatdisf
+___floatdixf
+___floatundidf
+___floatundisf
+___floatundixf
+___gcc_personality_v0
+___lshrdi3
+___moddi3
+___muldc3
+___muldi3
+___mulsc3
+___mulvdi3
+___mulvsi3
+___mulxc3
+___negdi2
+___negvdi2
+___negvsi2
+___paritydi2
+___paritysi2
+___popcountdi2
+___popcountsi2
+___powidf2
+___powisf2
+___powixf2
+___register_frame
+___register_frame_info
+___register_frame_info_bases
+___register_frame_info_table
+___register_frame_info_table_bases
+___register_frame_table
+___subvdi3
+___subvsi3
+___ucmpdi2
+___udivdi3
+___udivmoddi4
+___umoddi3
diff --git a/libgcc/config/i386/libgcc-glibc.ver b/libgcc/config/i386/libgcc-glibc.ver
new file mode 100644
index 00000000000..e79d3267f6f
--- /dev/null
+++ b/libgcc/config/i386/libgcc-glibc.ver
@@ -0,0 +1,186 @@
+# Copyright (C) 2008, 2010 Free Software Foundation, Inc.
+#
+# This file is part of GCC.
+#
+# GCC is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GCC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>.
+
+# In order to work around the very problems that force us to now generally
+# create a libgcc.so, glibc reexported a number of routines from libgcc.a.
+# By now choosing the same version tags for these specific routines, we
+# maintain enough binary compatibility to allow future versions of glibc
+# to defer implementation of these routines to libgcc.so via DT_AUXILIARY.
+
+%ifndef __x86_64__
+%exclude {
+ __divdi3
+ __moddi3
+ __udivdi3
+ __umoddi3
+ __register_frame
+ __register_frame_table
+ __deregister_frame
+ __register_frame_info
+ __deregister_frame_info
+ __frame_state_for
+ __register_frame_info_table
+}
+
+%inherit GCC_3.0 GLIBC_2.0
+GLIBC_2.0 {
+ # Sampling of DImode arithmetic used by (at least) i386 and m68k.
+ __divdi3
+ __moddi3
+ __udivdi3
+ __umoddi3
+
+ # Exception handling support functions used by most everyone.
+ __register_frame
+ __register_frame_table
+ __deregister_frame
+ __register_frame_info
+ __deregister_frame_info
+ __frame_state_for
+ __register_frame_info_table
+}
+%endif
+
+# 128 bit long double support was introduced with GCC 4.3.0 to 64bit
+# and with GCC 4.4.0 to 32bit. These lines make the symbols to get
+# a @@GCC_4.3.0 or @@GCC_4.4.0 attached.
+
+%exclude {
+ __addtf3
+ __divtc3
+ __divtf3
+ __eqtf2
+ __extenddftf2
+ __extendsftf2
+ __extendxftf2
+ __fixtfdi
+ __fixtfsi
+ __fixtfti
+ __fixunstfdi
+ __fixunstfsi
+ __fixunstfti
+ __floatditf
+ __floatsitf
+ __floattitf
+ __floatunditf
+ __floatunsitf
+ __floatuntitf
+ __getf2
+ __gttf2
+ __letf2
+ __lttf2
+ __multc3
+ __multf3
+ __negtf2
+ __netf2
+ __powitf2
+ __subtf3
+ __trunctfdf2
+ __trunctfsf2
+ __trunctfxf2
+ __unordtf2
+}
+
+%ifdef __x86_64__
+# Those symbols had improper versions when they were added to gcc 4.3.0.
+# We corrected the default version to GCC_4.3.0. But we keep the old
+# version for backward binary compatibility.
+GCC_3.0 {
+ __gttf2
+ __lttf2
+ __netf2
+}
+
+GCC_4.0.0 {
+ __divtc3
+ __multc3
+ __powitf2
+}
+
+GCC_4.3.0 {
+ __addtf3
+ __divtc3
+ __divtf3
+ __eqtf2
+ __extenddftf2
+ __extendsftf2
+ __extendxftf2
+ __fixtfdi
+ __fixtfsi
+ __fixtfti
+ __fixunstfdi
+ __fixunstfsi
+ __fixunstfti
+ __floatditf
+ __floatsitf
+ __floattitf
+ __floatunditf
+ __floatunsitf
+ __floatuntitf
+ __getf2
+ __gttf2
+ __letf2
+ __lttf2
+ __multc3
+ __multf3
+ __negtf2
+ __netf2
+ __powitf2
+ __subtf3
+ __trunctfdf2
+ __trunctfsf2
+ __trunctfxf2
+ __unordtf2
+}
+%else
+GCC_4.4.0 {
+ __addtf3
+ __copysigntf3
+ __divtc3
+ __divtf3
+ __eqtf2
+ __extenddftf2
+ __extendsftf2
+ __fabstf2
+ __fixtfdi
+ __fixtfsi
+ __fixunstfdi
+ __fixunstfsi
+ __floatditf
+ __floatsitf
+ __floatunditf
+ __floatunsitf
+ __getf2
+ __gttf2
+ __letf2
+ __lttf2
+ __multc3
+ __multf3
+ __negtf2
+ __netf2
+ __powitf2
+ __subtf3
+ __trunctfdf2
+ __trunctfsf2
+ __trunctfxf2
+ __unordtf2
+}
+GCC_4.5.0 {
+ __extendxftf2
+}
+%endif
diff --git a/libgcc/config/i386/t-chkstk b/libgcc/config/i386/t-chkstk
new file mode 100644
index 00000000000..822981faab8
--- /dev/null
+++ b/libgcc/config/i386/t-chkstk
@@ -0,0 +1,2 @@
+LIB1ASMSRC = i386/cygwin.S
+LIB1ASMFUNCS = _chkstk _chkstk_ms
diff --git a/libgcc/config/i386/t-cygming b/libgcc/config/i386/t-cygming
index 048cadbd5a1..d76004c48e5 100644
--- a/libgcc/config/i386/t-cygming
+++ b/libgcc/config/i386/t-cygming
@@ -1,11 +1,14 @@
+# If we are building next to winsup, this will let us find the real
+# limits.h when building libgcc2. Otherwise, winsup must be installed
+# first.
+LIBGCC2_INCLUDES = -I$(srcdir)/../winsup/w32api/include
+
CUSTOM_CRTSTUFF = yes
-crtbegin.o: $(gcc_srcdir)/config/i386/cygming-crtbegin.c
- $(crt_compile) -fno-omit-frame-pointer -c \
- $(gcc_srcdir)/config/i386/cygming-crtbegin.c
+crtbegin.o: $(srcdir)/config/i386/cygming-crtbegin.c
+ $(crt_compile) -fno-omit-frame-pointer -c $<
# We intentionally use a implementation-reserved init priority of 0,
# so allow the warning.
-crtend.o: $(gcc_srcdir)/config/i386/cygming-crtend.c
- $(crt_compile) -fno-omit-frame-pointer -Wno-error -c \
- $(gcc_srcdir)/config/i386/cygming-crtend.c
+crtend.o: $(srcdir)/config/i386/cygming-crtend.c
+ $(crt_compile) -fno-omit-frame-pointer -Wno-error -c $<
diff --git a/libgcc/config/i386/t-cygwin b/libgcc/config/i386/t-cygwin
new file mode 100644
index 00000000000..f85ec24220e
--- /dev/null
+++ b/libgcc/config/i386/t-cygwin
@@ -0,0 +1,19 @@
+# If we are building next to winsup, this will let us find the real
+# limits.h when building libgcc2. Otherwise, winsup must be installed
+# first.
+LIBGCC2_INCLUDES += -I$(srcdir)/../winsup/include \
+ -I$(srcdir)/../winsup/cygwin/include
+
+# Cygwin-specific parts of LIB_SPEC
+SHLIB_LC = -lcygwin -ladvapi32 -lshell32 -luser32 -lkernel32
+
+# We have already included one of the t-{dw2,sjlj}-eh fragments for EH_MODEL
+SHLIB_EH_EXTENSION = $(subst -dw2,,-$(EH_MODEL))
+
+# Cygwin uses different conventions than MinGW; override generic SHLIB_ def'ns here.
+SHLIB_IMPLIB = @shlib_base_name@$(SHLIB_EXT).a
+SHLIB_SONAME = cyggcc_s$(SHLIB_EH_EXTENSION)-$(SHLIB_SOVERSION)$(SHLIB_EXT)
+# This must match the definitions of SHLIB_SONAME/SHLIB_SOVERSION and LIBGCC_SONAME.
+# We'd like to use SHLIB_SONAME here too, and we can, since
+# we don't rely on shlib_base_name substitution for it.
+SHLIB_MKMAP_OPTS = -v pe_dll=$(SHLIB_SONAME)
diff --git a/libgcc/config/i386/t-darwin b/libgcc/config/i386/t-darwin
index 4578f74c3fa..5f2c69725d0 100644
--- a/libgcc/config/i386/t-darwin
+++ b/libgcc/config/i386/t-darwin
@@ -1 +1,3 @@
-SHLIB_VERPFX = $(gcc_srcdir)/config/i386/darwin-libgcc
+LIB2_SIDITI_CONV_FUNCS = yes
+LIB2ADD = $(srcdir)/config/darwin-64.c
+LIB2FUNCS_EXCLUDE = _fixtfdi _fixunstfdi _floatditf _floatunditf
diff --git a/libgcc/config/i386/t-darwin64 b/libgcc/config/i386/t-darwin64
index 4578f74c3fa..30cf58b38f9 100644
--- a/libgcc/config/i386/t-darwin64
+++ b/libgcc/config/i386/t-darwin64
@@ -1 +1,2 @@
-SHLIB_VERPFX = $(gcc_srcdir)/config/i386/darwin-libgcc
+LIB2_SIDITI_CONV_FUNCS = yes
+LIB2ADD = $(srcdir)/config/darwin-64.c
diff --git a/libgcc/config/i386/t-dlldir b/libgcc/config/i386/t-dlldir
new file mode 100644
index 00000000000..d2cf39bddd6
--- /dev/null
+++ b/libgcc/config/i386/t-dlldir
@@ -0,0 +1,2 @@
+# In a native build, target DLLs go in bindir, where they can be executed.
+SHLIB_DLLDIR = $(bindir)
diff --git a/libgcc/config/i386/t-dlldir-x b/libgcc/config/i386/t-dlldir-x
new file mode 100644
index 00000000000..0f8c29b4e90
--- /dev/null
+++ b/libgcc/config/i386/t-dlldir-x
@@ -0,0 +1,3 @@
+# In a cross build, bindir contains host not target binaries, so target DLLs
+# instead go in toolexeclibdir, alongside other target binaries and static libs.
+SHLIB_DLLDIR = $(toolexeclibdir)
diff --git a/libgcc/config/i386/t-dw2-eh b/libgcc/config/i386/t-dw2-eh
new file mode 100644
index 00000000000..ffcc39aea33
--- /dev/null
+++ b/libgcc/config/i386/t-dw2-eh
@@ -0,0 +1,3 @@
+
+# We are using Dwarf-2 EH.
+EH_MODEL = dw2
diff --git a/libgcc/config/i386/t-gthr-win32 b/libgcc/config/i386/t-gthr-win32
new file mode 100644
index 00000000000..e7380d6f6e4
--- /dev/null
+++ b/libgcc/config/i386/t-gthr-win32
@@ -0,0 +1,2 @@
+# We hide calls to w32api needed for w32 thread support here:
+LIB2ADD = $(srcdir)/config/i386/gthr-win32.c
diff --git a/libgcc/config/i386/t-interix b/libgcc/config/i386/t-interix
new file mode 100644
index 00000000000..8889e7c6c63
--- /dev/null
+++ b/libgcc/config/i386/t-interix
@@ -0,0 +1,3 @@
+# We need to override LIBGCC2_DEBUG_CFLAGS so libgcc2 will be
+# built without debugging information
+LIBGCC2_DEBUG_CFLAGS =
diff --git a/libgcc/config/i386/t-linux b/libgcc/config/i386/t-linux
new file mode 100644
index 00000000000..29b4c223983
--- /dev/null
+++ b/libgcc/config/i386/t-linux
@@ -0,0 +1,4 @@
+# On 64bit we do not need any exports for glibc for 64-bit libgcc_s.
+# Need to support TImode for x86. Override the settings from
+# t-slibgcc-elf-ver and t-linux
+SHLIB_MAPFILES = libgcc-std.ver $(srcdir)/config/i386/libgcc-glibc.ver
diff --git a/libgcc/config/i386/t-mingw-pthread b/libgcc/config/i386/t-mingw-pthread
new file mode 100644
index 00000000000..622ef82be6e
--- /dev/null
+++ b/libgcc/config/i386/t-mingw-pthread
@@ -0,0 +1,2 @@
+SHLIB_PTHREAD_CFLAG = -pthread
+SHLIB_PTHREAD_LDFLAG = -Wl,-lpthread
diff --git a/libgcc/config/i386/t-mingw32 b/libgcc/config/i386/t-mingw32
new file mode 100644
index 00000000000..bfdef6723ce
--- /dev/null
+++ b/libgcc/config/i386/t-mingw32
@@ -0,0 +1,2 @@
+# MinGW-specific parts of LIB_SPEC
+SHLIB_LC = -lmingwthrd -lmingw32 -lmingwex -lmoldname -lmsvcrt -ladvapi32 -lshell32 -luser32 -lkernel32
diff --git a/libgcc/config/i386/t-nto b/libgcc/config/i386/t-nto
new file mode 100644
index 00000000000..44c90661824
--- /dev/null
+++ b/libgcc/config/i386/t-nto
@@ -0,0 +1,3 @@
+HOST_LIBGCC2_CFLAGS += -fexceptions
+
+CRTSTUFF_T_CFLAGS = -fno-omit-frame-pointer $(PICFLAG)
diff --git a/libgcc/config/i386/t-sjlj-eh b/libgcc/config/i386/t-sjlj-eh
new file mode 100644
index 00000000000..c9085f43216
--- /dev/null
+++ b/libgcc/config/i386/t-sjlj-eh
@@ -0,0 +1,3 @@
+
+# We are using SjLj EH.
+EH_MODEL = sjlj
diff --git a/libgcc/config/i386/t-slibgcc-cygming b/libgcc/config/i386/t-slibgcc-cygming
new file mode 100644
index 00000000000..3bee8b98084
--- /dev/null
+++ b/libgcc/config/i386/t-slibgcc-cygming
@@ -0,0 +1,58 @@
+# Build a shared libgcc library for PECOFF with a DEF file
+# with the GNU linker.
+#
+# mkmap-flat.awk is used with the pe_dll option to produce a DEF instead
+# of an ELF map file.
+#
+# Warning: If SHLIB_SOVERSION or SHLIB_SONAME are updated, LIBGCC_SONAME
+# in mingw32.h and SHLIB_MKMAP_OPTS below must be updated also.
+
+SHLIB_EXT = .dll
+SHLIB_IMPLIB = @shlib_base_name@.a
+SHLIB_SOVERSION = 1
+SHLIB_SONAME = @shlib_base_name@_$(EH_MODEL)-$(SHLIB_SOVERSION)$(SHLIB_EXT)
+SHLIB_MAP = @shlib_map_file@
+SHLIB_OBJS = @shlib_objs@
+SHLIB_DIR = @multilib_dir@/shlib
+SHLIB_SLIBDIR_QUAL = @shlib_slibdir_qual@
+# SHLIB_DLLDIR is defined by including one of either t-dlldir or t-dlldir-x
+# (native/cross build respectively) in the tmake_file list in
+# libgcc/config.host.
+ifndef SHLIB_DLLDIR
+$(error SHLIB_DLLDIR must be defined)
+endif
+ifndef SHLIB_PTHREAD_CFLAG
+SHLIB_PTHREAD_CFLAG =
+endif
+ifndef SHLIB_PTHREAD_LDFLAG
+SHLIB_PTHREAD_LDFLAG =
+endif
+
+SHLIB_LINK = $(LN_S) -f $(SHLIB_MAP) $(SHLIB_MAP).def && \
+ if [ ! -d $(SHLIB_DIR) ]; then \
+ mkdir $(SHLIB_DIR); \
+ else true; fi && \
+ $(CC) $(LIBGCC2_CFLAGS) $(SHLIB_PTHREAD_CFLAG) \
+ -shared -nodefaultlibs \
+ $(SHLIB_MAP).def \
+ -Wl,--out-implib,$(SHLIB_DIR)/$(SHLIB_IMPLIB).tmp \
+ -o $(SHLIB_DIR)/$(SHLIB_SONAME).tmp @multilib_flags@ \
+ $(SHLIB_OBJS) ${SHLIB_PTHREAD_LDFLAG} $(SHLIB_LC) && \
+ if [ -f $(SHLIB_DIR)/$(SHLIB_SONAME) ]; then \
+ mv -f $(SHLIB_DIR)/$(SHLIB_SONAME) \
+ $(SHLIB_DIR)/$(SHLIB_SONAME).backup; \
+ else true; fi && \
+ mv $(SHLIB_DIR)/$(SHLIB_SONAME).tmp $(SHLIB_DIR)/$(SHLIB_SONAME) && \
+ mv $(SHLIB_DIR)/$(SHLIB_IMPLIB).tmp $(SHLIB_DIR)/$(SHLIB_IMPLIB)
+SHLIB_INSTALL = \
+ $(mkinstalldirs) $(DESTDIR)$(SHLIB_DLLDIR) \
+ $(DESTDIR)$(slibdir)$(SHLIB_SLIBDIR_QUAL); \
+ $(INSTALL) $(SHLIB_DIR)/$(SHLIB_SONAME) \
+ $(DESTDIR)$(SHLIB_DLLDIR)/$(SHLIB_SONAME); \
+ $(INSTALL_DATA) $(SHLIB_DIR)/$(SHLIB_IMPLIB) \
+ $(DESTDIR)$(slibdir)$(SHLIB_SLIBDIR_QUAL)/$(SHLIB_IMPLIB)
+SHLIB_MKMAP = $(srcdir)/mkmap-flat.awk
+# We'd like to use SHLIB_SONAME here too, but shlib_base_name
+# does not get substituted before mkmap-flat.awk is run.
+SHLIB_MKMAP_OPTS = -v pe_dll=libgcc_s_$(EH_MODEL)-$(SHLIB_SOVERSION)$(SHLIB_EXT)
+SHLIB_MAPFILES = libgcc-std.ver
diff --git a/libgcc/config/i386/t-sol2 b/libgcc/config/i386/t-sol2
index 1102146a589..b9cfb00ef49 100644
--- a/libgcc/config/i386/t-sol2
+++ b/libgcc/config/i386/t-sol2
@@ -7,7 +7,7 @@
# We must also enable optimization to avoid having any code appear after
# the call & alignment statement, but before we switch back to the
# .text section.
-CRTSTUFF_T_CFLAGS = -fPIC -O2
+CRTSTUFF_T_CFLAGS = $(PICFLAG) -O2
# Add support for the introduction of 128-bit long double.
SHLIB_MAPFILES += $(srcdir)/config/i386/libgcc-sol2.ver
diff --git a/libgcc/config/ia64/__divxf3.asm b/libgcc/config/ia64/__divxf3.S
index f741bdaf9bc..9cba8f59423 100644
--- a/libgcc/config/ia64/__divxf3.asm
+++ b/libgcc/config/ia64/__divxf3.S
@@ -3,7 +3,7 @@
#endif
#define L__divxf3
-#include "config/ia64/lib1funcs.asm"
+#include "config/ia64/lib1funcs.S"
#ifdef SHARED
#undef __divtf3
diff --git a/libgcc/config/ia64/_fixtfdi.asm b/libgcc/config/ia64/_fixtfdi.S
index 4d13c808c51..863b70f7edc 100644
--- a/libgcc/config/ia64/_fixtfdi.asm
+++ b/libgcc/config/ia64/_fixtfdi.S
@@ -3,7 +3,7 @@
#endif
#define L_fixtfdi
-#include "config/ia64/lib1funcs.asm"
+#include "config/ia64/lib1funcs.S"
#ifdef SHARED
#undef __fixtfti
diff --git a/libgcc/config/ia64/_fixunstfdi.asm b/libgcc/config/ia64/_fixunstfdi.S
index b722d9e90dc..aac6a284eaa 100644
--- a/libgcc/config/ia64/_fixunstfdi.asm
+++ b/libgcc/config/ia64/_fixunstfdi.S
@@ -3,7 +3,7 @@
#endif
#define L_fixunstfdi
-#include "config/ia64/lib1funcs.asm"
+#include "config/ia64/lib1funcs.S"
#ifdef SHARED
#undef __fixunstfti
diff --git a/libgcc/config/ia64/_floatditf.asm b/libgcc/config/ia64/_floatditf.S
index 21d77028176..e37404d26d5 100644
--- a/libgcc/config/ia64/_floatditf.asm
+++ b/libgcc/config/ia64/_floatditf.S
@@ -3,7 +3,7 @@
#endif
#define L_floatditf
-#include "config/ia64/lib1funcs.asm"
+#include "config/ia64/lib1funcs.S"
#ifdef SHARED
#undef __floattitf
diff --git a/libgcc/config/ia64/crtbegin.S b/libgcc/config/ia64/crtbegin.S
new file mode 100644
index 00000000000..638489990d5
--- /dev/null
+++ b/libgcc/config/ia64/crtbegin.S
@@ -0,0 +1,254 @@
+/* Copyright (C) 2000, 2001, 2003, 2005, 2009 Free Software Foundation, Inc.
+ Contributed by Jes Sorensen, <Jes.Sorensen@cern.ch>
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3, or (at your option)
+ any later version.
+
+ GCC is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include "auto-host.h"
+
+.section .ctors,"aw","progbits"
+ .align 8
+__CTOR_LIST__:
+ data8 -1
+
+.section .dtors,"aw","progbits"
+ .align 8
+__DTOR_LIST__:
+ data8 -1
+
+.section .jcr,"aw","progbits"
+ .align 8
+__JCR_LIST__:
+
+.section .sdata
+ .type dtor_ptr,@object
+ .size dtor_ptr,8
+dtor_ptr:
+ data8 @gprel(__DTOR_LIST__ + 8)
+
+ /* A handle for __cxa_finalize to manage c++ local destructors. */
+ .global __dso_handle
+ .type __dso_handle,@object
+ .size __dso_handle,8
+#ifdef SHARED
+ .section .data
+__dso_handle:
+ data8 __dso_handle
+#else
+ .section .bss
+ .align 8
+__dso_handle:
+ .skip 8
+#endif
+ .hidden __dso_handle
+
+
+#ifdef HAVE_INITFINI_ARRAY
+
+.section .fini_array, "a"
+ data8 @fptr(__do_global_dtors_aux)
+
+.section .init_array, "a"
+ data8 @fptr(__do_jv_register_classes)
+ data8 @fptr(__do_global_ctors_aux)
+
+#else /* !HAVE_INITFINI_ARRAY */
+/*
+ * Fragment of the ELF _fini routine that invokes our dtor cleanup.
+ *
+ * We make the call by indirection, because in large programs the
+ * .fini and .init sections are not in range of the destination, and
+ * we cannot allow the linker to insert a stub at the end of this
+ * fragment of the _fini function. Further, Itanium does not implement
+ * the long branch instructions, and we do not wish every program to
+ * trap to the kernel for emulation.
+ *
+ * Note that we require __do_global_dtors_aux to preserve the GP,
+ * so that the next fragment in .fini gets the right value.
+ */
+.section .fini,"ax","progbits"
+ { .mlx
+ movl r2 = @pcrel(__do_global_dtors_aux - 16)
+ }
+ { .mii
+ mov r3 = ip
+ ;;
+ add r2 = r2, r3
+ ;;
+ }
+ { .mib
+ nop 0
+ mov b6 = r2
+ br.call.sptk.many b0 = b6
+ }
+
+/* Likewise for _init. */
+
+.section .init,"ax","progbits"
+ { .mlx
+ movl r2 = @pcrel(__do_jv_register_classes - 16)
+ }
+ { .mii
+ mov r3 = ip
+ ;;
+ add r2 = r2, r3
+ ;;
+ }
+ { .mib
+ nop 0
+ mov b6 = r2
+ br.call.sptk.many b0 = b6
+ }
+#endif /* !HAVE_INITFINI_ARRAY */
+
+.section .text
+ .align 32
+ .proc __do_global_dtors_aux
+__do_global_dtors_aux:
+ .prologue
+#ifndef SHARED
+ .save ar.pfs, r35
+ alloc loc3 = ar.pfs, 0, 4, 1, 0
+ addl loc0 = @gprel(dtor_ptr), gp
+ .save rp, loc1
+ mov loc1 = rp
+ .body
+
+ mov loc2 = gp
+ nop 0
+ br.sptk.many .entry
+#else
+ /*
+ if (__cxa_finalize)
+ __cxa_finalize(__dso_handle)
+ */
+ .save ar.pfs, r35
+ alloc loc3 = ar.pfs, 0, 4, 1, 0
+ addl loc0 = @gprel(dtor_ptr), gp
+ addl r16 = @ltoff(@fptr(__cxa_finalize)), gp
+ ;;
+
+ ld8 r16 = [r16]
+ ;;
+ addl out0 = @ltoff(__dso_handle), gp
+ cmp.ne p7, p0 = r0, r16
+ ;;
+
+ ld8 out0 = [out0]
+(p7) ld8 r18 = [r16], 8
+ .save rp, loc1
+ mov loc1 = rp
+ .body
+ ;;
+
+ mov loc2 = gp
+(p7) ld8 gp = [r16]
+(p7) mov b6 = r18
+
+ nop 0
+ nop 0
+(p7) br.call.sptk.many rp = b6
+ ;;
+
+ nop 0
+ nop 0
+ br.sptk.many .entry
+#endif
+ /*
+ do {
+ dtor_ptr++;
+ (*(dtor_ptr-1)) ();
+ } while (dtor_ptr);
+ */
+.loop:
+ st8 [loc0] = r15 // update dtor_ptr (in memory)
+ ld8 r17 = [r16], 8 // r17 <- dtor's entry-point
+ nop 0
+ ;;
+
+ ld8 gp = [r16] // gp <- dtor's gp
+ mov b6 = r17
+ br.call.sptk.many rp = b6
+
+.entry: ld8 r15 = [loc0] // r15 <- dtor_ptr (gp-relative)
+ ;;
+ add r16 = r15, loc2 // r16 <- dtor_ptr (absolute)
+ adds r15 = 8, r15
+ ;;
+
+ ld8 r16 = [r16] // r16 <- pointer to dtor's fdesc
+ mov rp = loc1
+ mov ar.pfs = loc3
+ ;;
+
+ cmp.ne p6, p0 = r0, r16
+(p6) br.cond.sptk.few .loop
+ br.ret.sptk.many rp
+ .endp __do_global_dtors_aux
+
+ .align 32
+ .proc __do_jv_register_classes
+__do_jv_register_classes:
+ .prologue
+ .save ar.pfs, r33
+ alloc loc1 = ar.pfs, 0, 3, 1, 0
+ movl out0 = @gprel(__JCR_LIST__)
+ ;;
+
+ addl r14 = @ltoff(@fptr(_Jv_RegisterClasses)), gp
+ add out0 = out0, gp
+ .save rp, loc0
+ mov loc0 = rp
+ .body
+ ;;
+
+ ld8 r14 = [r14]
+ ld8 r15 = [out0]
+ cmp.ne p6, p0 = r0, r0
+ ;;
+
+ cmp.eq.or p6, p0 = r0, r14
+ cmp.eq.or p6, p0 = r0, r15
+(p6) br.ret.sptk.many rp
+
+ ld8 r15 = [r14], 8
+ ;;
+ nop 0
+ mov b6 = r15
+
+ mov loc2 = gp
+ ld8 gp = [r14]
+ br.call.sptk.many rp = b6
+ ;;
+
+ mov gp = loc2
+ mov rp = loc0
+ mov ar.pfs = loc1
+
+ nop 0
+ nop 0
+ br.ret.sptk.many rp
+ .endp __do_jv_register_classes
+
+#ifdef SHARED
+.weak __cxa_finalize
+#endif
+.weak _Jv_RegisterClasses
diff --git a/libgcc/config/ia64/crtend.S b/libgcc/config/ia64/crtend.S
new file mode 100644
index 00000000000..a904af9cfd9
--- /dev/null
+++ b/libgcc/config/ia64/crtend.S
@@ -0,0 +1,121 @@
+/* Copyright (C) 2000, 2001, 2003, 2005, 2009 Free Software Foundation, Inc.
+ Contributed by Jes Sorensen, <Jes.Sorensen@cern.ch>
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3, or (at your option)
+ any later version.
+
+ GCC is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include "auto-host.h"
+
+.section .ctors,"aw","progbits"
+ .align 8
+__CTOR_END__:
+ data8 0
+
+.section .dtors,"aw","progbits"
+ .align 8
+__DTOR_END__:
+ data8 0
+
+.section .jcr,"aw","progbits"
+ .align 8
+__JCR_END__:
+ data8 0
+
+#ifdef HAVE_INITFINI_ARRAY
+ .global __do_global_ctors_aux
+ .hidden __do_global_ctors_aux
+#else /* !HAVE_INITFINI_ARRAY */
+/*
+ * Fragment of the ELF _init routine that invokes our dtor cleanup.
+ *
+ * We make the call by indirection, because in large programs the
+ * .fini and .init sections are not in range of the destination, and
+ * we cannot allow the linker to insert a stub at the end of this
+ * fragment of the _fini function. Further, Itanium does not implement
+ * the long branch instructions, and we do not wish every program to
+ * trap to the kernel for emulation.
+ *
+ * Note that we require __do_global_ctors_aux to preserve the GP,
+ * so that the next fragment in .fini gets the right value.
+ */
+.section .init,"ax","progbits"
+ { .mlx
+ movl r2 = @pcrel(__do_global_ctors_aux - 16)
+ }
+ { .mii
+ mov r3 = ip
+ ;;
+ add r2 = r2, r3
+ ;;
+ }
+ { .mib
+ mov b6 = r2
+ br.call.sptk.many b0 = b6
+ ;;
+ }
+#endif /* !HAVE_INITFINI_ARRAY */
+
+.text
+ .align 32
+ .proc __do_global_ctors_aux
+__do_global_ctors_aux:
+ .prologue
+ /*
+ for (loc0 = __CTOR_END__-1; *p != -1; --p)
+ (*p) ();
+ */
+ .save ar.pfs, r34
+ alloc loc2 = ar.pfs, 0, 5, 0, 0
+ movl loc0 = @gprel(__CTOR_END__ - 8)
+ ;;
+
+ add loc0 = loc0, gp
+ ;;
+ ld8 loc3 = [loc0], -8
+ .save rp, loc1
+ mov loc1 = rp
+ .body
+ ;;
+
+ cmp.eq p6, p0 = -1, loc3
+ mov loc4 = gp
+(p6) br.cond.spnt.few .exit
+
+.loop: ld8 r15 = [loc3], 8
+ ;;
+ ld8 gp = [loc3]
+ mov b6 = r15
+
+ ld8 loc3 = [loc0], -8
+ nop 0
+ br.call.sptk.many rp = b6
+ ;;
+
+ cmp.ne p6, p0 = -1, loc3
+ nop 0
+(p6) br.cond.sptk.few .loop
+
+.exit: mov gp = loc3
+ mov rp = loc1
+ mov ar.pfs = loc2
+
+ br.ret.sptk.many rp
+ .endp __do_global_ctors_aux
diff --git a/libgcc/config/ia64/crti.S b/libgcc/config/ia64/crti.S
new file mode 100644
index 00000000000..c15ff662bbe
--- /dev/null
+++ b/libgcc/config/ia64/crti.S
@@ -0,0 +1,53 @@
+# Copyright (C) 2000, 2001, 2008, 2009, 2011 Free Software Foundation, Inc.
+# Written By Timothy Wall
+#
+# This file is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3, or (at your option) any
+# later version.
+#
+# This file is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# Under Section 7 of GPL version 3, you are granted additional
+# permissions described in the GCC Runtime Library Exception, version
+# 3.1, as published by the Free Software Foundation.
+#
+# You should have received a copy of the GNU General Public License and
+# a copy of the GCC Runtime Library Exception along with this program;
+# see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+# <http://www.gnu.org/licenses/>.
+
+# This file just make a stack frame for the contents of the .fini and
+# .init sections. Users may put any desired instructions in those
+# sections.
+
+ .section ".init"
+ .align 16
+ .global _init
+_init:
+ .prologue 14, 33
+ .save ar.pfs, r34
+ alloc r34 = ar.pfs, 0, 4, 0, 0
+ .vframe r35
+ mov r35 = r12
+ .save rp, r33
+ mov r33 = b0
+ .body
+
+ .section ".fini"
+ .align 16
+ .global _fini
+_fini:
+ .prologue 14, 33
+ .save ar.pfs, r34
+ alloc r34 = ar.pfs, 0, 4, 0, 0
+ .vframe r35
+ mov r35 = r12
+ .save rp, r33
+ mov r33 = b0
+ .body
+
+# end of crti.S
diff --git a/libgcc/config/ia64/crtn.S b/libgcc/config/ia64/crtn.S
new file mode 100644
index 00000000000..d44abbea3c7
--- /dev/null
+++ b/libgcc/config/ia64/crtn.S
@@ -0,0 +1,43 @@
+# Copyright (C) 2000, 2001, 2008, 2009, 2011 Free Software Foundation, Inc.
+# Written By Timothy Wall
+#
+# This file is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3, or (at your option) any
+# later version.
+#
+# This file is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# Under Section 7 of GPL version 3, you are granted additional
+# permissions described in the GCC Runtime Library Exception, version
+# 3.1, as published by the Free Software Foundation.
+#
+# You should have received a copy of the GNU General Public License and
+# a copy of the GCC Runtime Library Exception along with this program;
+# see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+# <http://www.gnu.org/licenses/>.
+
+# This file just makes sure that the .fini and .init sections do in
+# fact return. Users may put any desired instructions in those sections.
+# This file is the last thing linked into any executable.
+
+ .section ".init"
+ ;;
+ mov ar.pfs = r34
+ mov b0 = r33
+ .restore sp
+ mov r12 = r35
+ br.ret.sptk.many b0
+
+ .section ".fini"
+ ;;
+ mov ar.pfs = r34
+ mov b0 = r33
+ .restore sp
+ mov r12 = r35
+ br.ret.sptk.many b0
+
+# end of crtn.S
diff --git a/libgcc/config/ia64/fde-vms.c b/libgcc/config/ia64/fde-vms.c
index c9ac5d28076..e02b9abbccf 100644
--- a/libgcc/config/ia64/fde-vms.c
+++ b/libgcc/config/ia64/fde-vms.c
@@ -29,10 +29,10 @@
#include "tsystem.h"
#include "coretypes.h"
#include "tm.h"
+#include "libgcc_tm.h"
#include <stddef.h>
#include <stdlib.h>
#include <stdio.h>
-#include "md-unwind-support.h"
#include "unwind-ia64.h"
#define __int64 long
diff --git a/libgcc/config/ia64/lib1funcs.S b/libgcc/config/ia64/lib1funcs.S
new file mode 100644
index 00000000000..b7eaa6eca3c
--- /dev/null
+++ b/libgcc/config/ia64/lib1funcs.S
@@ -0,0 +1,795 @@
+/* Copyright (C) 2000, 2001, 2003, 2005, 2009 Free Software Foundation, Inc.
+ Contributed by James E. Wilson <wilson@cygnus.com>.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3, or (at your option)
+ any later version.
+
+ GCC is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#ifdef L__divxf3
+// Compute a 80-bit IEEE double-extended quotient.
+//
+// From the Intel IA-64 Optimization Guide, choose the minimum latency
+// alternative.
+//
+// farg0 holds the dividend. farg1 holds the divisor.
+//
+// __divtf3 is an alternate symbol name for backward compatibility.
+
+ .text
+ .align 16
+ .global __divxf3
+ .proc __divxf3
+__divxf3:
+#ifdef SHARED
+ .global __divtf3
+__divtf3:
+#endif
+ cmp.eq p7, p0 = r0, r0
+ frcpa.s0 f10, p6 = farg0, farg1
+ ;;
+(p6) cmp.ne p7, p0 = r0, r0
+ .pred.rel.mutex p6, p7
+(p6) fnma.s1 f11 = farg1, f10, f1
+(p6) fma.s1 f12 = farg0, f10, f0
+ ;;
+(p6) fma.s1 f13 = f11, f11, f0
+(p6) fma.s1 f14 = f11, f11, f11
+ ;;
+(p6) fma.s1 f11 = f13, f13, f11
+(p6) fma.s1 f13 = f14, f10, f10
+ ;;
+(p6) fma.s1 f10 = f13, f11, f10
+(p6) fnma.s1 f11 = farg1, f12, farg0
+ ;;
+(p6) fma.s1 f11 = f11, f10, f12
+(p6) fnma.s1 f12 = farg1, f10, f1
+ ;;
+(p6) fma.s1 f10 = f12, f10, f10
+(p6) fnma.s1 f12 = farg1, f11, farg0
+ ;;
+(p6) fma.s0 fret0 = f12, f10, f11
+(p7) mov fret0 = f10
+ br.ret.sptk rp
+ .endp __divxf3
+#endif
+
+#ifdef L__divdf3
+// Compute a 64-bit IEEE double quotient.
+//
+// From the Intel IA-64 Optimization Guide, choose the minimum latency
+// alternative.
+//
+// farg0 holds the dividend. farg1 holds the divisor.
+
+ .text
+ .align 16
+ .global __divdf3
+ .proc __divdf3
+__divdf3:
+ cmp.eq p7, p0 = r0, r0
+ frcpa.s0 f10, p6 = farg0, farg1
+ ;;
+(p6) cmp.ne p7, p0 = r0, r0
+ .pred.rel.mutex p6, p7
+(p6) fmpy.s1 f11 = farg0, f10
+(p6) fnma.s1 f12 = farg1, f10, f1
+ ;;
+(p6) fma.s1 f11 = f12, f11, f11
+(p6) fmpy.s1 f13 = f12, f12
+ ;;
+(p6) fma.s1 f10 = f12, f10, f10
+(p6) fma.s1 f11 = f13, f11, f11
+ ;;
+(p6) fmpy.s1 f12 = f13, f13
+(p6) fma.s1 f10 = f13, f10, f10
+ ;;
+(p6) fma.d.s1 f11 = f12, f11, f11
+(p6) fma.s1 f10 = f12, f10, f10
+ ;;
+(p6) fnma.d.s1 f8 = farg1, f11, farg0
+ ;;
+(p6) fma.d fret0 = f8, f10, f11
+(p7) mov fret0 = f10
+ br.ret.sptk rp
+ ;;
+ .endp __divdf3
+#endif
+
+#ifdef L__divsf3
+// Compute a 32-bit IEEE float quotient.
+//
+// From the Intel IA-64 Optimization Guide, choose the minimum latency
+// alternative.
+//
+// farg0 holds the dividend. farg1 holds the divisor.
+
+ .text
+ .align 16
+ .global __divsf3
+ .proc __divsf3
+__divsf3:
+ cmp.eq p7, p0 = r0, r0
+ frcpa.s0 f10, p6 = farg0, farg1
+ ;;
+(p6) cmp.ne p7, p0 = r0, r0
+ .pred.rel.mutex p6, p7
+(p6) fmpy.s1 f8 = farg0, f10
+(p6) fnma.s1 f9 = farg1, f10, f1
+ ;;
+(p6) fma.s1 f8 = f9, f8, f8
+(p6) fmpy.s1 f9 = f9, f9
+ ;;
+(p6) fma.s1 f8 = f9, f8, f8
+(p6) fmpy.s1 f9 = f9, f9
+ ;;
+(p6) fma.d.s1 f10 = f9, f8, f8
+ ;;
+(p6) fnorm.s.s0 fret0 = f10
+(p7) mov fret0 = f10
+ br.ret.sptk rp
+ ;;
+ .endp __divsf3
+#endif
+
+#ifdef L__divdi3
+// Compute a 64-bit integer quotient.
+//
+// From the Intel IA-64 Optimization Guide, choose the minimum latency
+// alternative.
+//
+// in0 holds the dividend. in1 holds the divisor.
+
+ .text
+ .align 16
+ .global __divdi3
+ .proc __divdi3
+__divdi3:
+ .regstk 2,0,0,0
+ // Transfer inputs to FP registers.
+ setf.sig f8 = in0
+ setf.sig f9 = in1
+ // Check divide by zero.
+ cmp.ne.unc p0,p7=0,in1
+ ;;
+ // Convert the inputs to FP, so that they won't be treated as unsigned.
+ fcvt.xf f8 = f8
+ fcvt.xf f9 = f9
+(p7) break 1
+ ;;
+ // Compute the reciprocal approximation.
+ frcpa.s1 f10, p6 = f8, f9
+ ;;
+ // 3 Newton-Raphson iterations.
+(p6) fnma.s1 f11 = f9, f10, f1
+(p6) fmpy.s1 f12 = f8, f10
+ ;;
+(p6) fmpy.s1 f13 = f11, f11
+(p6) fma.s1 f12 = f11, f12, f12
+ ;;
+(p6) fma.s1 f10 = f11, f10, f10
+(p6) fma.s1 f11 = f13, f12, f12
+ ;;
+(p6) fma.s1 f10 = f13, f10, f10
+(p6) fnma.s1 f12 = f9, f11, f8
+ ;;
+(p6) fma.s1 f10 = f12, f10, f11
+ ;;
+ // Round quotient to an integer.
+ fcvt.fx.trunc.s1 f10 = f10
+ ;;
+ // Transfer result to GP registers.
+ getf.sig ret0 = f10
+ br.ret.sptk rp
+ ;;
+ .endp __divdi3
+#endif
+
+#ifdef L__moddi3
+// Compute a 64-bit integer modulus.
+//
+// From the Intel IA-64 Optimization Guide, choose the minimum latency
+// alternative.
+//
+// in0 holds the dividend (a). in1 holds the divisor (b).
+
+ .text
+ .align 16
+ .global __moddi3
+ .proc __moddi3
+__moddi3:
+ .regstk 2,0,0,0
+ // Transfer inputs to FP registers.
+ setf.sig f14 = in0
+ setf.sig f9 = in1
+ // Check divide by zero.
+ cmp.ne.unc p0,p7=0,in1
+ ;;
+ // Convert the inputs to FP, so that they won't be treated as unsigned.
+ fcvt.xf f8 = f14
+ fcvt.xf f9 = f9
+(p7) break 1
+ ;;
+ // Compute the reciprocal approximation.
+ frcpa.s1 f10, p6 = f8, f9
+ ;;
+ // 3 Newton-Raphson iterations.
+(p6) fmpy.s1 f12 = f8, f10
+(p6) fnma.s1 f11 = f9, f10, f1
+ ;;
+(p6) fma.s1 f12 = f11, f12, f12
+(p6) fmpy.s1 f13 = f11, f11
+ ;;
+(p6) fma.s1 f10 = f11, f10, f10
+(p6) fma.s1 f11 = f13, f12, f12
+ ;;
+ sub in1 = r0, in1
+(p6) fma.s1 f10 = f13, f10, f10
+(p6) fnma.s1 f12 = f9, f11, f8
+ ;;
+ setf.sig f9 = in1
+(p6) fma.s1 f10 = f12, f10, f11
+ ;;
+ fcvt.fx.trunc.s1 f10 = f10
+ ;;
+ // r = q * (-b) + a
+ xma.l f10 = f10, f9, f14
+ ;;
+ // Transfer result to GP registers.
+ getf.sig ret0 = f10
+ br.ret.sptk rp
+ ;;
+ .endp __moddi3
+#endif
+
+#ifdef L__udivdi3
+// Compute a 64-bit unsigned integer quotient.
+//
+// From the Intel IA-64 Optimization Guide, choose the minimum latency
+// alternative.
+//
+// in0 holds the dividend. in1 holds the divisor.
+
+ .text
+ .align 16
+ .global __udivdi3
+ .proc __udivdi3
+__udivdi3:
+ .regstk 2,0,0,0
+ // Transfer inputs to FP registers.
+ setf.sig f8 = in0
+ setf.sig f9 = in1
+ // Check divide by zero.
+ cmp.ne.unc p0,p7=0,in1
+ ;;
+ // Convert the inputs to FP, to avoid FP software-assist faults.
+ fcvt.xuf.s1 f8 = f8
+ fcvt.xuf.s1 f9 = f9
+(p7) break 1
+ ;;
+ // Compute the reciprocal approximation.
+ frcpa.s1 f10, p6 = f8, f9
+ ;;
+ // 3 Newton-Raphson iterations.
+(p6) fnma.s1 f11 = f9, f10, f1
+(p6) fmpy.s1 f12 = f8, f10
+ ;;
+(p6) fmpy.s1 f13 = f11, f11
+(p6) fma.s1 f12 = f11, f12, f12
+ ;;
+(p6) fma.s1 f10 = f11, f10, f10
+(p6) fma.s1 f11 = f13, f12, f12
+ ;;
+(p6) fma.s1 f10 = f13, f10, f10
+(p6) fnma.s1 f12 = f9, f11, f8
+ ;;
+(p6) fma.s1 f10 = f12, f10, f11
+ ;;
+ // Round quotient to an unsigned integer.
+ fcvt.fxu.trunc.s1 f10 = f10
+ ;;
+ // Transfer result to GP registers.
+ getf.sig ret0 = f10
+ br.ret.sptk rp
+ ;;
+ .endp __udivdi3
+#endif
+
+#ifdef L__umoddi3
+// Compute a 64-bit unsigned integer modulus.
+//
+// From the Intel IA-64 Optimization Guide, choose the minimum latency
+// alternative.
+//
+// in0 holds the dividend (a). in1 holds the divisor (b).
+
+ .text
+ .align 16
+ .global __umoddi3
+ .proc __umoddi3
+__umoddi3:
+ .regstk 2,0,0,0
+ // Transfer inputs to FP registers.
+ setf.sig f14 = in0
+ setf.sig f9 = in1
+ // Check divide by zero.
+ cmp.ne.unc p0,p7=0,in1
+ ;;
+ // Convert the inputs to FP, to avoid FP software assist faults.
+ fcvt.xuf.s1 f8 = f14
+ fcvt.xuf.s1 f9 = f9
+(p7) break 1;
+ ;;
+ // Compute the reciprocal approximation.
+ frcpa.s1 f10, p6 = f8, f9
+ ;;
+ // 3 Newton-Raphson iterations.
+(p6) fmpy.s1 f12 = f8, f10
+(p6) fnma.s1 f11 = f9, f10, f1
+ ;;
+(p6) fma.s1 f12 = f11, f12, f12
+(p6) fmpy.s1 f13 = f11, f11
+ ;;
+(p6) fma.s1 f10 = f11, f10, f10
+(p6) fma.s1 f11 = f13, f12, f12
+ ;;
+ sub in1 = r0, in1
+(p6) fma.s1 f10 = f13, f10, f10
+(p6) fnma.s1 f12 = f9, f11, f8
+ ;;
+ setf.sig f9 = in1
+(p6) fma.s1 f10 = f12, f10, f11
+ ;;
+ // Round quotient to an unsigned integer.
+ fcvt.fxu.trunc.s1 f10 = f10
+ ;;
+ // r = q * (-b) + a
+ xma.l f10 = f10, f9, f14
+ ;;
+ // Transfer result to GP registers.
+ getf.sig ret0 = f10
+ br.ret.sptk rp
+ ;;
+ .endp __umoddi3
+#endif
+
+#ifdef L__divsi3
+// Compute a 32-bit integer quotient.
+//
+// From the Intel IA-64 Optimization Guide, choose the minimum latency
+// alternative.
+//
+// in0 holds the dividend. in1 holds the divisor.
+
+ .text
+ .align 16
+ .global __divsi3
+ .proc __divsi3
+__divsi3:
+ .regstk 2,0,0,0
+ // Check divide by zero.
+ cmp.ne.unc p0,p7=0,in1
+ sxt4 in0 = in0
+ sxt4 in1 = in1
+ ;;
+ setf.sig f8 = in0
+ setf.sig f9 = in1
+(p7) break 1
+ ;;
+ mov r2 = 0x0ffdd
+ fcvt.xf f8 = f8
+ fcvt.xf f9 = f9
+ ;;
+ setf.exp f11 = r2
+ frcpa.s1 f10, p6 = f8, f9
+ ;;
+(p6) fmpy.s1 f8 = f8, f10
+(p6) fnma.s1 f9 = f9, f10, f1
+ ;;
+(p6) fma.s1 f8 = f9, f8, f8
+(p6) fma.s1 f9 = f9, f9, f11
+ ;;
+(p6) fma.s1 f10 = f9, f8, f8
+ ;;
+ fcvt.fx.trunc.s1 f10 = f10
+ ;;
+ getf.sig ret0 = f10
+ br.ret.sptk rp
+ ;;
+ .endp __divsi3
+#endif
+
+#ifdef L__modsi3
+// Compute a 32-bit integer modulus.
+//
+// From the Intel IA-64 Optimization Guide, choose the minimum latency
+// alternative.
+//
+// in0 holds the dividend. in1 holds the divisor.
+
+ .text
+ .align 16
+ .global __modsi3
+ .proc __modsi3
+__modsi3:
+ .regstk 2,0,0,0
+ mov r2 = 0x0ffdd
+ sxt4 in0 = in0
+ sxt4 in1 = in1
+ ;;
+ setf.sig f13 = r32
+ setf.sig f9 = r33
+ // Check divide by zero.
+ cmp.ne.unc p0,p7=0,in1
+ ;;
+ sub in1 = r0, in1
+ fcvt.xf f8 = f13
+ fcvt.xf f9 = f9
+ ;;
+ setf.exp f11 = r2
+ frcpa.s1 f10, p6 = f8, f9
+(p7) break 1
+ ;;
+(p6) fmpy.s1 f12 = f8, f10
+(p6) fnma.s1 f10 = f9, f10, f1
+ ;;
+ setf.sig f9 = in1
+(p6) fma.s1 f12 = f10, f12, f12
+(p6) fma.s1 f10 = f10, f10, f11
+ ;;
+(p6) fma.s1 f10 = f10, f12, f12
+ ;;
+ fcvt.fx.trunc.s1 f10 = f10
+ ;;
+ xma.l f10 = f10, f9, f13
+ ;;
+ getf.sig ret0 = f10
+ br.ret.sptk rp
+ ;;
+ .endp __modsi3
+#endif
+
+#ifdef L__udivsi3
+// Compute a 32-bit unsigned integer quotient.
+//
+// From the Intel IA-64 Optimization Guide, choose the minimum latency
+// alternative.
+//
+// in0 holds the dividend. in1 holds the divisor.
+
+ .text
+ .align 16
+ .global __udivsi3
+ .proc __udivsi3
+__udivsi3:
+ .regstk 2,0,0,0
+ mov r2 = 0x0ffdd
+ zxt4 in0 = in0
+ zxt4 in1 = in1
+ ;;
+ setf.sig f8 = in0
+ setf.sig f9 = in1
+ // Check divide by zero.
+ cmp.ne.unc p0,p7=0,in1
+ ;;
+ fcvt.xf f8 = f8
+ fcvt.xf f9 = f9
+(p7) break 1
+ ;;
+ setf.exp f11 = r2
+ frcpa.s1 f10, p6 = f8, f9
+ ;;
+(p6) fmpy.s1 f8 = f8, f10
+(p6) fnma.s1 f9 = f9, f10, f1
+ ;;
+(p6) fma.s1 f8 = f9, f8, f8
+(p6) fma.s1 f9 = f9, f9, f11
+ ;;
+(p6) fma.s1 f10 = f9, f8, f8
+ ;;
+ fcvt.fxu.trunc.s1 f10 = f10
+ ;;
+ getf.sig ret0 = f10
+ br.ret.sptk rp
+ ;;
+ .endp __udivsi3
+#endif
+
+#ifdef L__umodsi3
+// Compute a 32-bit unsigned integer modulus.
+//
+// From the Intel IA-64 Optimization Guide, choose the minimum latency
+// alternative.
+//
+// in0 holds the dividend. in1 holds the divisor.
+
+ .text
+ .align 16
+ .global __umodsi3
+ .proc __umodsi3
+__umodsi3:
+ .regstk 2,0,0,0
+ mov r2 = 0x0ffdd
+ zxt4 in0 = in0
+ zxt4 in1 = in1
+ ;;
+ setf.sig f13 = in0
+ setf.sig f9 = in1
+ // Check divide by zero.
+ cmp.ne.unc p0,p7=0,in1
+ ;;
+ sub in1 = r0, in1
+ fcvt.xf f8 = f13
+ fcvt.xf f9 = f9
+ ;;
+ setf.exp f11 = r2
+ frcpa.s1 f10, p6 = f8, f9
+(p7) break 1;
+ ;;
+(p6) fmpy.s1 f12 = f8, f10
+(p6) fnma.s1 f10 = f9, f10, f1
+ ;;
+ setf.sig f9 = in1
+(p6) fma.s1 f12 = f10, f12, f12
+(p6) fma.s1 f10 = f10, f10, f11
+ ;;
+(p6) fma.s1 f10 = f10, f12, f12
+ ;;
+ fcvt.fxu.trunc.s1 f10 = f10
+ ;;
+ xma.l f10 = f10, f9, f13
+ ;;
+ getf.sig ret0 = f10
+ br.ret.sptk rp
+ ;;
+ .endp __umodsi3
+#endif
+
+#ifdef L__save_stack_nonlocal
+// Notes on save/restore stack nonlocal: We read ar.bsp but write
+// ar.bspstore. This is because ar.bsp can be read at all times
+// (independent of the RSE mode) but since it's read-only we need to
+// restore the value via ar.bspstore. This is OK because
+// ar.bsp==ar.bspstore after executing "flushrs".
+
+// void __ia64_save_stack_nonlocal(void *save_area, void *stack_pointer)
+
+ .text
+ .align 16
+ .global __ia64_save_stack_nonlocal
+ .proc __ia64_save_stack_nonlocal
+__ia64_save_stack_nonlocal:
+ { .mmf
+ alloc r18 = ar.pfs, 2, 0, 0, 0
+ mov r19 = ar.rsc
+ ;;
+ }
+ { .mmi
+ flushrs
+ st8 [in0] = in1, 24
+ and r19 = 0x1c, r19
+ ;;
+ }
+ { .mmi
+ st8 [in0] = r18, -16
+ mov ar.rsc = r19
+ or r19 = 0x3, r19
+ ;;
+ }
+ { .mmi
+ mov r16 = ar.bsp
+ mov r17 = ar.rnat
+ adds r2 = 8, in0
+ ;;
+ }
+ { .mmi
+ st8 [in0] = r16
+ st8 [r2] = r17
+ }
+ { .mib
+ mov ar.rsc = r19
+ br.ret.sptk.few rp
+ ;;
+ }
+ .endp __ia64_save_stack_nonlocal
+#endif
+
+#ifdef L__nonlocal_goto
+// void __ia64_nonlocal_goto(void *target_label, void *save_area,
+// void *static_chain);
+
+ .text
+ .align 16
+ .global __ia64_nonlocal_goto
+ .proc __ia64_nonlocal_goto
+__ia64_nonlocal_goto:
+ { .mmi
+ alloc r20 = ar.pfs, 3, 0, 0, 0
+ ld8 r12 = [in1], 8
+ mov.ret.sptk rp = in0, .L0
+ ;;
+ }
+ { .mmf
+ ld8 r16 = [in1], 8
+ mov r19 = ar.rsc
+ ;;
+ }
+ { .mmi
+ flushrs
+ ld8 r17 = [in1], 8
+ and r19 = 0x1c, r19
+ ;;
+ }
+ { .mmi
+ ld8 r18 = [in1]
+ mov ar.rsc = r19
+ or r19 = 0x3, r19
+ ;;
+ }
+ { .mmi
+ mov ar.bspstore = r16
+ ;;
+ mov ar.rnat = r17
+ ;;
+ }
+ { .mmi
+ loadrs
+ invala
+ mov r15 = in2
+ ;;
+ }
+.L0: { .mib
+ mov ar.rsc = r19
+ mov ar.pfs = r18
+ br.ret.sptk.few rp
+ ;;
+ }
+ .endp __ia64_nonlocal_goto
+#endif
+
+#ifdef L__restore_stack_nonlocal
+// This is mostly the same as nonlocal_goto above.
+// ??? This has not been tested yet.
+
+// void __ia64_restore_stack_nonlocal(void *save_area)
+
+ .text
+ .align 16
+ .global __ia64_restore_stack_nonlocal
+ .proc __ia64_restore_stack_nonlocal
+__ia64_restore_stack_nonlocal:
+ { .mmf
+ alloc r20 = ar.pfs, 4, 0, 0, 0
+ ld8 r12 = [in0], 8
+ ;;
+ }
+ { .mmb
+ ld8 r16=[in0], 8
+ mov r19 = ar.rsc
+ ;;
+ }
+ { .mmi
+ flushrs
+ ld8 r17 = [in0], 8
+ and r19 = 0x1c, r19
+ ;;
+ }
+ { .mmf
+ ld8 r18 = [in0]
+ mov ar.rsc = r19
+ ;;
+ }
+ { .mmi
+ mov ar.bspstore = r16
+ ;;
+ mov ar.rnat = r17
+ or r19 = 0x3, r19
+ ;;
+ }
+ { .mmf
+ loadrs
+ invala
+ ;;
+ }
+.L0: { .mib
+ mov ar.rsc = r19
+ mov ar.pfs = r18
+ br.ret.sptk.few rp
+ ;;
+ }
+ .endp __ia64_restore_stack_nonlocal
+#endif
+
+#ifdef L__trampoline
+// Implement the nested function trampoline. This is out of line
+// so that we don't have to bother with flushing the icache, as
+// well as making the on-stack trampoline smaller.
+//
+// The trampoline has the following form:
+//
+// +-------------------+ >
+// TRAMP: | __ia64_trampoline | |
+// +-------------------+ > fake function descriptor
+// | TRAMP+16 | |
+// +-------------------+ >
+// | target descriptor |
+// +-------------------+
+// | static link |
+// +-------------------+
+
+ .text
+ .align 16
+ .global __ia64_trampoline
+ .proc __ia64_trampoline
+__ia64_trampoline:
+ { .mmi
+ ld8 r2 = [r1], 8
+ ;;
+ ld8 r15 = [r1]
+ }
+ { .mmi
+ ld8 r3 = [r2], 8
+ ;;
+ ld8 r1 = [r2]
+ mov b6 = r3
+ }
+ { .bbb
+ br.sptk.many b6
+ ;;
+ }
+ .endp __ia64_trampoline
+#endif
+
+#ifdef SHARED
+// Thunks for backward compatibility.
+#ifdef L_fixtfdi
+ .text
+ .align 16
+ .global __fixtfti
+ .proc __fixtfti
+__fixtfti:
+ { .bbb
+ br.sptk.many __fixxfti
+ ;;
+ }
+ .endp __fixtfti
+#endif
+#ifdef L_fixunstfdi
+ .align 16
+ .global __fixunstfti
+ .proc __fixunstfti
+__fixunstfti:
+ { .bbb
+ br.sptk.many __fixunsxfti
+ ;;
+ }
+ .endp __fixunstfti
+#endif
+#ifdef L_floatditf
+ .align 16
+ .global __floattitf
+ .proc __floattitf
+__floattitf:
+ { .bbb
+ br.sptk.many __floattixf
+ ;;
+ }
+ .endp __floattitf
+#endif
+#endif
diff --git a/libgcc/config/ia64/libgcc-glibc.ver b/libgcc/config/ia64/libgcc-glibc.ver
new file mode 100644
index 00000000000..34a69618d1b
--- /dev/null
+++ b/libgcc/config/ia64/libgcc-glibc.ver
@@ -0,0 +1,97 @@
+# Copyright (C) 2009 Free Software Foundation, Inc.
+#
+# This file is part of GCC.
+#
+# GCC is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GCC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>.
+
+# 128 bit long double support was introduced with GCC 4.4.0. These lines
+# make the symbols to get @@GCC_4.4.0 attached.
+
+%exclude {
+ __addtf3
+ __divtc3
+ __divtf3
+ __eqtf2
+ __extenddftf2
+ __extendsftf2
+ __extendxftf2
+ __fixtfdi
+ __fixtfsi
+ __fixtfti
+ __fixunstfdi
+ __fixunstfsi
+ __fixunstfti
+ __floatditf
+ __floatsitf
+ __floattitf
+ __floatunditf
+ __floatunsitf
+ __floatuntitf
+ __getf2
+ __gttf2
+ __letf2
+ __lttf2
+ __multc3
+ __multf3
+ __negtf2
+ __netf2
+ __powitf2
+ __subtf3
+ __trunctfdf2
+ __trunctfsf2
+ __trunctfxf2
+ __unordtf2
+}
+
+# Those TF functions are the aliases of the XF functions before gcc 3.4.
+GCC_3.0 {
+ __divtf3
+ __fixtfti
+ __fixunstfti
+ __floattitf
+}
+
+GCC_4.4.0 {
+ __addtf3
+ __copysigntf3
+ __divtc3
+ __divtf3
+ __eqtf2
+ __extenddftf2
+ __extendsftf2
+ __fabstf2
+ __fixtfdi
+ __fixtfsi
+ __fixunstfdi
+ __fixunstfsi
+ __floatditf
+ __floatsitf
+ __floatunditf
+ __floatunsitf
+ __getf2
+ __gttf2
+ __letf2
+ __lttf2
+ __multc3
+ __multf3
+ __negtf2
+ __netf2
+ __powitf2
+ __subtf3
+ __trunctfdf2
+ __trunctfsf2
+ __trunctfxf2
+ __unordtf2
+}
diff --git a/libgcc/config/ia64/libgcc-ia64.ver b/libgcc/config/ia64/libgcc-ia64.ver
new file mode 100644
index 00000000000..11c1fe629bd
--- /dev/null
+++ b/libgcc/config/ia64/libgcc-ia64.ver
@@ -0,0 +1,30 @@
+# Copyright (C) 2000, 2001, 2003 Free Software Foundation, Inc.
+#
+# This file is part of GCC.
+#
+# GCC is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GCC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>.
+
+GCC_3.0 {
+ # IA-64 symbols
+ __ia64_nonlocal_goto
+ __ia64_personality_v1
+ __ia64_restore_stack_nonlocal
+ __ia64_save_stack_nonlocal
+ __ia64_trampoline
+ __ia64_backtrace
+}
+GCC_3.3.2 {
+ _Unwind_GetBSP
+}
diff --git a/libgcc/config/ia64/quadlib.c b/libgcc/config/ia64/quadlib.c
new file mode 100644
index 00000000000..f9ee30b587c
--- /dev/null
+++ b/libgcc/config/ia64/quadlib.c
@@ -0,0 +1,78 @@
+/* Subroutines for long double support.
+ Copyright (C) 2000, 2001, 2002, 2009 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+extern int _U_Qfcmp (long double a, long double b, int);
+
+int _U_Qfeq (long double, long double);
+int _U_Qfne (long double, long double);
+int _U_Qfgt (long double, long double);
+int _U_Qfge (long double, long double);
+int _U_Qflt (long double, long double);
+int _U_Qfle (long double, long double);
+int _U_Qfcomp (long double, long double);
+
+int
+_U_Qfeq (long double a, long double b)
+{
+ return (_U_Qfcmp (a, b, 4) != 0);
+}
+
+int
+_U_Qfne (long double a, long double b)
+{
+ return (_U_Qfcmp (a, b, 4) == 0);
+}
+
+int
+_U_Qfgt (long double a, long double b)
+{
+ return (_U_Qfcmp (a, b, 17) != 0);
+}
+
+int
+_U_Qfge (long double a, long double b)
+{
+ return (_U_Qfcmp (a, b, 21) != 0);
+}
+
+int
+_U_Qflt (long double a, long double b)
+{
+ return (_U_Qfcmp (a, b, 9) != 0);
+}
+
+int
+_U_Qfle (long double a, long double b)
+{
+ return (_U_Qfcmp (a, b, 13) != 0);
+}
+
+int
+_U_Qfcomp (long double a, long double b)
+{
+ if (_U_Qfcmp (a, b, 4) == 0)
+ return 0;
+
+ return (_U_Qfcmp (a, b, 22) != 0 ? 1 : -1);
+}
diff --git a/libgcc/config/ia64/t-hpux b/libgcc/config/ia64/t-hpux
index ef3387e7a61..ddc1135d737 100644
--- a/libgcc/config/ia64/t-hpux
+++ b/libgcc/config/ia64/t-hpux
@@ -1 +1,9 @@
+# On HP-UX we do not want _fixtfdi, _fixunstfdi, or _floatditf from
+# LIB1ASMSRC. These functions map the 128 bit conversion function names
+# to 80 bit conversions and were done for Linux backwards compatibility.
+LIB1ASMFUNCS := $(filter-out _fixtfdi _fixunstfdi _floatditf,$(LIB1ASMFUNCS))
+
+# Support routines for HP-UX 128 bit floats.
+LIB2ADD = $(srcdir)/config/ia64/quadlib.c $(srcdir)/floatunsitf.c
+
LIB2ADDEH = $(srcdir)/unwind-c.c
diff --git a/libgcc/config/ia64/t-ia64 b/libgcc/config/ia64/t-ia64
index 3ec5fc5db08..d1ec3536db0 100644
--- a/libgcc/config/ia64/t-ia64
+++ b/libgcc/config/ia64/t-ia64
@@ -1,15 +1,36 @@
+LIB1ASMSRC = ia64/lib1funcs.S
+
+# We use different names for the DImode div/mod files so that they won't
+# conflict with libgcc2.c files. We used to use __ia64 as a prefix, now
+# we use __ as the prefix. Note that L_divdi3 in libgcc2.c actually defines
+# a TImode divide function, so there is no actual overlap here between
+# libgcc2.c and lib1funcs.S.
+LIB1ASMFUNCS = __divxf3 __divdf3 __divsf3 \
+ __divdi3 __moddi3 __udivdi3 __umoddi3 \
+ __divsi3 __modsi3 __udivsi3 __umodsi3 __save_stack_nonlocal \
+ __nonlocal_goto __restore_stack_nonlocal __trampoline \
+ _fixtfdi _fixunstfdi _floatditf
+
+# ??? Hack to get -P option used when compiling lib1funcs.S, because Intel
+# assembler does not accept # line number as a comment.
+# ??? This breaks C++ pragma interface/implementation, which is used in the
+# C++ part of libgcc2, hence it had to be disabled. Must find some other way
+# to support the Intel assembler.
+#LIBGCC2_DEBUG_CFLAGS = -g1 -P
+
CUSTOM_CRTSTUFF = yes
# Assemble startup files.
-crtbegin.o: $(gcc_srcdir)/config/ia64/crtbegin.asm
+# FIXME: -I$(gcc_objdir) is necessary to find auto-host.h. Really?
+crtbegin.o: $(srcdir)/config/ia64/crtbegin.S
+ $(CC) $(compile_deps) -I. -I$(gcc_objdir) -c -x assembler-with-cpp $<
+crtend.o: $(srcdir)/config/ia64/crtend.S
+ $(CC) $(compile_deps) -I. -I$(gcc_objdir) -c -x assembler-with-cpp $<
+crtbeginS.o: $(srcdir)/config/ia64/crtbegin.S
$(CC) $(compile_deps) -I. -I$(gcc_objdir) -c -x assembler-with-cpp \
- -o $@ $(gcc_srcdir)/config/ia64/crtbegin.asm
-crtend.o: $(gcc_srcdir)/config/ia64/crtend.asm
+ -o $@ -DSHARED $<
+crtendS.o: $(srcdir)/config/ia64/crtend.S
$(CC) $(compile_deps) -I. -I$(gcc_objdir) -c -x assembler-with-cpp \
- -o $@ $(gcc_srcdir)/config/ia64/crtend.asm
-crtbeginS.o: $(gcc_srcdir)/config/ia64/crtbegin.asm
- $(CC) $(compile_deps) -I. -I$(gcc_objdir) -c -x assembler-with-cpp \
- -o $@ -DSHARED $(gcc_srcdir)/config/ia64/crtbegin.asm
-crtendS.o: $(gcc_srcdir)/config/ia64/crtend.asm
- $(CC) $(compile_deps) -I. -I$(gcc_objdir) -c -x assembler-with-cpp \
- -o $@ -DSHARED $(gcc_srcdir)/config/ia64/crtend.asm
+ -o $@ -DSHARED $<
+
+SHLIB_MAPFILES += $(srcdir)/config/ia64/libgcc-ia64.ver
diff --git a/libgcc/config/ia64/t-glibc b/libgcc/config/ia64/t-linux
index df4fe9c4404..e6d72b94a87 100644
--- a/libgcc/config/ia64/t-glibc
+++ b/libgcc/config/ia64/t-linux
@@ -1,3 +1,5 @@
# Use system libunwind library on IA-64 GLIBC based system.
LIB2ADDEH = $(srcdir)/unwind-sjlj.c $(srcdir)/unwind-c.c \
$(srcdir)/unwind-compat.c
+
+SHLIB_MAPFILES += $(srcdir)/config/ia64/libgcc-glibc.ver
diff --git a/libgcc/config/ia64/t-glibc-libunwind b/libgcc/config/ia64/t-linux-libunwind
index 8b1736a2d67..8b1736a2d67 100644
--- a/libgcc/config/ia64/t-glibc-libunwind
+++ b/libgcc/config/ia64/t-linux-libunwind
diff --git a/libgcc/config/ia64/t-slibgcc-hpux b/libgcc/config/ia64/t-slibgcc-hpux
new file mode 100644
index 00000000000..27c7a661b1e
--- /dev/null
+++ b/libgcc/config/ia64/t-slibgcc-hpux
@@ -0,0 +1,6 @@
+# Build a shared libgcc library with the HP-UX linker on IA64.
+
+SHLIB_SOVERSION = 0
+# Must include -lunwind in the link, so that libgcc_s.so has the necessary
+# DT_NEEDED entry for libunwind.
+SHLIB_LC += -lunwind
diff --git a/libgcc/config/ia64/t-softfp-compat b/libgcc/config/ia64/t-softfp-compat
index d3dad68c48f..00f45d51cd0 100644
--- a/libgcc/config/ia64/t-softfp-compat
+++ b/libgcc/config/ia64/t-softfp-compat
@@ -3,5 +3,5 @@
# Replace __dvxf3 _fixtfdi _fixunstfdi _floatditf
libgcc1-tf-functions = __divxf3 _fixtfdi _fixunstfdi _floatditf
LIB1ASMFUNCS := $(filter-out $(libgcc1-tf-functions), $(LIB1ASMFUNCS))
-libgcc1-tf-compats = $(addsuffix .asm, $(libgcc1-tf-functions))
+libgcc1-tf-compats = $(addsuffix .S, $(libgcc1-tf-functions))
LIB2ADD += $(addprefix $(srcdir)/config/ia64/, $(libgcc1-tf-compats))
diff --git a/libgcc/config/ia64/t-vms b/libgcc/config/ia64/t-vms
index 9bc933adfe0..140c748b771 100644
--- a/libgcc/config/ia64/t-vms
+++ b/libgcc/config/ia64/t-vms
@@ -1 +1,7 @@
+CRTSTUFF_T_CFLAGS = -O0
+CRTSTUFF_T_CFLAGS_S = -O0
+
+crtinitS.o: $(srcdir)/config/ia64/vms-crtinit.S
+ $(gcc_compile) -c -x assembler-with-cpp $<
+
LIB2ADDEH += $(srcdir)/config/ia64/fde-vms.c
diff --git a/libgcc/config/ia64/unwind-ia64.c b/libgcc/config/ia64/unwind-ia64.c
index e9ddfca8864..ca5c2670bf2 100644
--- a/libgcc/config/ia64/unwind-ia64.c
+++ b/libgcc/config/ia64/unwind-ia64.c
@@ -31,6 +31,7 @@
#include "tsystem.h"
#include "coretypes.h"
#include "tm.h"
+#include "libgcc_tm.h"
#include "unwind.h"
#include "unwind-ia64.h"
#include "unwind-compat.h"
diff --git a/libgcc/config/ia64/vms-crtinit.S b/libgcc/config/ia64/vms-crtinit.S
new file mode 100644
index 00000000000..322b2927347
--- /dev/null
+++ b/libgcc/config/ia64/vms-crtinit.S
@@ -0,0 +1,24 @@
+/* Copyright (C) 2009 Free Software Foundation, Inc.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3, or (at your option)
+ any later version.
+
+ GCC is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+ .global LIB$INITIALIZE#
diff --git a/libgcc/config/iq2000/lib2funcs.c b/libgcc/config/iq2000/lib2funcs.c
new file mode 100644
index 00000000000..d53786c8c7d
--- /dev/null
+++ b/libgcc/config/iq2000/lib2funcs.c
@@ -0,0 +1,40 @@
+/* Copyright (C) 2003 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+typedef unsigned int USItype __attribute__ ((mode (SI)));
+
+USItype
+__mulsi3 (USItype a, USItype b)
+{
+ USItype c = 0;
+
+ while (a != 0)
+ {
+ if (a & 1)
+ c += b;
+ a >>= 1;
+ b <<= 1;
+ }
+
+ return c;
+}
diff --git a/libgcc/config/iq2000/t-iq2000 b/libgcc/config/iq2000/t-iq2000
new file mode 100644
index 00000000000..18fd53c37d6
--- /dev/null
+++ b/libgcc/config/iq2000/t-iq2000
@@ -0,0 +1,5 @@
+LIB2ADD = $(srcdir)/udivmod.c \
+ $(srcdir)/divmod.c \
+ $(srcdir)/udivmodsi4.c \
+ $(srcdir)/config/iq2000/lib2funcs.c
+
diff --git a/libgcc/config/libbid/bid_gcc_intrinsics.h b/libgcc/config/libbid/bid_gcc_intrinsics.h
index 3f3e1ef1706..70529618c11 100644
--- a/libgcc/config/libbid/bid_gcc_intrinsics.h
+++ b/libgcc/config/libbid/bid_gcc_intrinsics.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007, 2009, 2010 Free Software Foundation, Inc.
+/* Copyright (C) 2007, 2009, 2010, 2011 Free Software Foundation, Inc.
This file is part of GCC.
@@ -29,6 +29,7 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
#include "tconfig.h"
#include "coretypes.h"
#include "tm.h"
+#include "libgcc_tm.h"
#ifndef LIBGCC2_LONG_DOUBLE_TYPE_SIZE
#define LIBGCC2_LONG_DOUBLE_TYPE_SIZE LONG_DOUBLE_TYPE_SIZE
diff --git a/libgcc/config/libgcc-glibc.ver b/libgcc/config/libgcc-glibc.ver
new file mode 100644
index 00000000000..7824ad5a268
--- /dev/null
+++ b/libgcc/config/libgcc-glibc.ver
@@ -0,0 +1,55 @@
+# Copyright (C) 2000, 2008 Free Software Foundation, Inc.
+#
+# This file is part of GCC.
+#
+# GCC is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GCC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>.
+
+# In order to work around the very problems that force us to now generally
+# create a libgcc.so, glibc reexported a number of routines from libgcc.a.
+# By now choosing the same version tags for these specific routines, we
+# maintain enough binary compatibility to allow future versions of glibc
+# to defer implementation of these routines to libgcc.so via DT_AUXILIARY.
+
+%exclude {
+ __divdi3
+ __moddi3
+ __udivdi3
+ __umoddi3
+ __register_frame
+ __register_frame_table
+ __deregister_frame
+ __register_frame_info
+ __deregister_frame_info
+ __frame_state_for
+ __register_frame_info_table
+}
+
+%inherit GCC_3.0 GLIBC_2.0
+GLIBC_2.0 {
+ # Sampling of DImode arithmetic used by (at least) i386 and m68k.
+ __divdi3
+ __moddi3
+ __udivdi3
+ __umoddi3
+
+ # Exception handling support functions used by most everyone.
+ __register_frame
+ __register_frame_table
+ __deregister_frame
+ __register_frame_info
+ __deregister_frame_info
+ __frame_state_for
+ __register_frame_info_table
+}
diff --git a/libgcc/config/libgcc-libsystem.ver b/libgcc/config/libgcc-libsystem.ver
new file mode 100644
index 00000000000..47631749dc2
--- /dev/null
+++ b/libgcc/config/libgcc-libsystem.ver
@@ -0,0 +1 @@
+_darwin10_Unwind_FindEnclosingFunction
diff --git a/libgcc/config/lm32/t-elf b/libgcc/config/lm32/t-elf
index 679f00711e7..f96c0988b07 100644
--- a/libgcc/config/lm32/t-elf
+++ b/libgcc/config/lm32/t-elf
@@ -1,12 +1,2 @@
-# Assemble startup files.
-
-$(T)crti.o: $(srcdir)/config/lm32/crti.S $(GCC_PASSES)
- $(GCC_FOR_TARGET) $(GCC_CFLAGS) $(MULTILIB_CFLAGS) $(INCLUDES) \
- -c -o $(T)crti.o -x assembler-with-cpp $(srcdir)/config/lm32/crti.S
-
-$(T)crtn.o: $(srcdir)/config/lm32/crtn.S $(GCC_PASSES)
- $(GCC_FOR_TARGET) $(GCC_CFLAGS) $(MULTILIB_CFLAGS) $(INCLUDES) \
- -c -o $(T)crtn.o -x assembler-with-cpp $(srcdir)/config/lm32/crtn.S
-
CRTSTUFF_T_CFLAGS = -G 0 -msign-extend-enabled
-HOST_LIBGCC2_CFLAGS = -G 0 -msign-extend-enabled
+HOST_LIBGCC2_CFLAGS += -G 0 -msign-extend-enabled
diff --git a/libgcc/config/lm32/t-uclinux b/libgcc/config/lm32/t-uclinux
index d388f56c3a2..764243c8f88 100644
--- a/libgcc/config/lm32/t-uclinux
+++ b/libgcc/config/lm32/t-uclinux
@@ -1,2 +1,2 @@
-CRTSTUFF_T_CFLAGS = -fPIC -msign-extend-enabled
-HOST_LIBGCC2_CFLAGS = -fPIC -msign-extend-enabled
+CRTSTUFF_T_CFLAGS = $(PICFLAG) -msign-extend-enabled
+HOST_LIBGCC2_CFLAGS += -msign-extend-enabled
diff --git a/libgcc/config/m32c/lib1funcs.S b/libgcc/config/m32c/lib1funcs.S
new file mode 100644
index 00000000000..9b657787187
--- /dev/null
+++ b/libgcc/config/m32c/lib1funcs.S
@@ -0,0 +1,231 @@
+/* libgcc routines for R8C/M16C/M32C
+ Copyright (C) 2005, 2009, 2010
+ Free Software Foundation, Inc.
+ Contributed by Red Hat.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#if defined(__r8c_cpu__) || defined(__m16c_cpu__)
+#define A16
+#define A(n,w) n
+#define W w
+#else
+#define A24
+#define A(n,w) w
+#define W l
+#endif
+
+
+#ifdef L__m32c_memregs
+
+/* Warning: these memory locations are used as a register bank. They
+ *must* end up consecutive in any final executable, so you may *not*
+ use the otherwise obvious ".comm" directive to allocate space for
+ them. */
+
+ .bss
+ .global mem0
+mem0: .space 1
+ .global mem1
+mem1: .space 1
+ .global mem2
+mem2: .space 1
+ .global mem3
+mem3: .space 1
+ .global mem4
+mem4: .space 1
+ .global mem5
+mem5: .space 1
+ .global mem6
+mem6: .space 1
+ .global mem7
+mem7: .space 1
+ .global mem8
+mem8: .space 1
+ .global mem9
+mem9: .space 1
+ .global mem10
+mem10: .space 1
+ .global mem11
+mem11: .space 1
+ .global mem12
+mem12: .space 1
+ .global mem13
+mem13: .space 1
+ .global mem14
+mem14: .space 1
+ .global mem15
+mem15: .space 1
+
+#endif
+
+#ifdef L__m32c_eh_return
+ .text
+ .global __m32c_eh_return
+__m32c_eh_return:
+
+ /* At this point, r0 has the stack adjustment, r1r3 has the
+ address to return to. The stack looks like this:
+
+ old_ra
+ old_fp
+ <- unwound sp
+ ...
+ fb
+ through
+ r0
+ <- sp
+
+ What we need to do is restore all the registers, update the
+ stack, and return to the right place.
+ */
+
+ stc sp,a0
+
+ add.W A(#16,#24),a0
+ /* a0 points to the current stack, just above the register
+ save areas */
+
+ mov.w a0,a1
+ exts.w r0
+ sub.W A(r0,r2r0),a1
+ sub.W A(#3,#4),a1
+ /* a1 points to the new stack. */
+
+ /* This is for the "rts" below. */
+ mov.w r1,[a1]
+#ifdef A16
+ mov.w r2,r1
+ mov.b r1l,2[a1]
+#else
+ mov.w r2,2[a1]
+#endif
+
+ /* This is for the "popc sp" below. */
+ mov.W a1,[a0]
+
+ popm r0,r1,r2,r3,a0,a1,sb,fb
+ popc sp
+ rts
+#endif
+
+/* SImode arguments for SI foo(SI,SI) functions. */
+#ifdef A16
+#define SAL 5[fb]
+#define SAH 7[fb]
+#define SBL 9[fb]
+#define SBH 11[fb]
+#else
+#define SAL 8[fb]
+#define SAH 10[fb]
+#define SBL 12[fb]
+#define SBH 14[fb]
+#endif
+
+#ifdef L__m32c_mulsi3
+ .text
+ .global ___mulsi3
+___mulsi3:
+ enter #0
+ push.w r2
+ mov.w SAL,r0
+ mulu.w SBL,r0 /* writes to r2r0 */
+ mov.w r0,mem0
+ mov.w r2,mem2
+ mov.w SAL,r0
+ mulu.w SBH,r0 /* writes to r2r0 */
+ add.w r0,mem2
+ mov.w SAH,r0
+ mulu.w SBL,r0 /* writes to r2r0 */
+ add.w r0,mem2
+ pop.w r2
+ exitd
+#endif
+
+#ifdef L__m32c_cmpsi2
+ .text
+ .global ___cmpsi2
+___cmpsi2:
+ enter #0
+ cmp.w SBH,SAH
+ jgt cmpsi_gt
+ jlt cmpsi_lt
+ cmp.w SBL,SAL
+ jgt cmpsi_gt
+ jlt cmpsi_lt
+ mov.w #1,r0
+ exitd
+cmpsi_gt:
+ mov.w #2,r0
+ exitd
+cmpsi_lt:
+ mov.w #0,r0
+ exitd
+#endif
+
+#ifdef L__m32c_ucmpsi2
+ .text
+ .global ___ucmpsi2
+___ucmpsi2:
+ enter #0
+ cmp.w SBH,SAH
+ jgtu cmpsi_gt
+ jltu cmpsi_lt
+ cmp.w SBL,SAL
+ jgtu cmpsi_gt
+ jltu cmpsi_lt
+ mov.w #1,r0
+ exitd
+cmpsi_gt:
+ mov.w #2,r0
+ exitd
+cmpsi_lt:
+ mov.w #0,r0
+ exitd
+#endif
+
+#ifdef L__m32c_jsri16
+ .text
+#ifdef A16
+ .global m32c_jsri16
+m32c_jsri16:
+ add.w #-1, sp
+
+ /* Read the address (16 bits) and return address (24 bits) off
+ the stack. */
+ mov.w 4[sp], r0
+ mov.w 1[sp], r3
+ mov.b 3[sp], a0 /* This zero-extends, so the high byte has
+ zero in it. */
+
+ /* Write the return address, then new address, to the stack. */
+ mov.w a0, 1[sp] /* Just to get the zero in 2[sp]. */
+ mov.w r0, 0[sp]
+ mov.w r3, 3[sp]
+ mov.b a0, 5[sp]
+
+ /* This "returns" to the target address, leaving the pending
+ return address on the stack. */
+ rts
+#endif
+
+#endif
diff --git a/libgcc/config/m32c/lib2funcs.c b/libgcc/config/m32c/lib2funcs.c
new file mode 100644
index 00000000000..274affc4ab0
--- /dev/null
+++ b/libgcc/config/m32c/lib2funcs.c
@@ -0,0 +1,134 @@
+/* libgcc routines for R8C/M16C/M32C
+ Copyright (C) 2005, 2009
+ Free Software Foundation, Inc.
+ Contributed by Red Hat.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+typedef int sint32_type __attribute__ ((mode (SI)));
+typedef unsigned int uint32_type __attribute__ ((mode (SI)));
+typedef int word_type __attribute__ ((mode (__word__)));
+
+uint32_type udivmodsi4 (uint32_type, uint32_type, word_type);
+sint32_type __divsi3 (sint32_type, sint32_type);
+sint32_type __modsi3 (sint32_type, sint32_type);
+
+uint32_type
+udivmodsi4 (uint32_type num, uint32_type den, word_type modwanted)
+{
+ uint32_type bit = 1;
+ uint32_type res = 0;
+
+ while (den < num && bit && !(den & (1L << 31)))
+ {
+ den <<= 1;
+ bit <<= 1;
+ }
+ while (bit)
+ {
+ if (num >= den)
+ {
+ num -= den;
+ res |= bit;
+ }
+ bit >>= 1;
+ den >>= 1;
+ }
+ if (modwanted)
+ return num;
+ return res;
+}
+
+sint32_type
+__divsi3 (sint32_type a, sint32_type b)
+{
+ word_type neg = 0;
+ sint32_type res;
+
+ if (a < 0)
+ {
+ a = -a;
+ neg = !neg;
+ }
+
+ if (b < 0)
+ {
+ b = -b;
+ neg = !neg;
+ }
+
+ res = udivmodsi4 (a, b, 0);
+
+ if (neg)
+ res = -res;
+
+ return res;
+}
+
+sint32_type
+__modsi3 (sint32_type a, sint32_type b)
+{
+ word_type neg = 0;
+ sint32_type res;
+
+ if (a < 0)
+ {
+ a = -a;
+ neg = 1;
+ }
+
+ if (b < 0)
+ b = -b;
+
+ res = udivmodsi4 (a, b, 1);
+
+ if (neg)
+ res = -res;
+
+ return res;
+}
+
+/* See the comment by the definition of LIBGCC2_UNITS_PER_WORD in
+ m32c.h for why we are creating extra versions of some of the
+ functions defined in libgcc2.c. */
+
+#define LIBGCC2_UNITS_PER_WORD 2
+
+#define L_clzsi2
+#define L_ctzsi2
+#define L_ffssi2
+#define L_paritysi2
+#define L_popcountsi2
+
+#include "libgcc2.c"
+
+uint32_type
+__udivsi3 (uint32_type a, uint32_type b)
+{
+ return udivmodsi4 (a, b, 0);
+}
+
+uint32_type
+__umoddi3 (uint32_type a, uint32_type b)
+{
+ return udivmodsi4 (a, b, 1);
+}
diff --git a/libgcc/config/m32c/t-m32c b/libgcc/config/m32c/t-m32c
new file mode 100644
index 00000000000..dac99ec652f
--- /dev/null
+++ b/libgcc/config/m32c/t-m32c
@@ -0,0 +1,13 @@
+LIB1ASMSRC = m32c/lib1funcs.S
+
+LIB1ASMFUNCS = \
+ __m32c_memregs \
+ __m32c_eh_return \
+ __m32c_mulsi3 \
+ __m32c_cmpsi2 \
+ __m32c_ucmpsi2 \
+ __m32c_jsri16
+
+LIB2ADD = $(srcdir)/config/m32c/lib2funcs.c \
+ $(srcdir)/config/m32c/trapv.c
+
diff --git a/libgcc/config/m32c/trapv.c b/libgcc/config/m32c/trapv.c
new file mode 100644
index 00000000000..0c8c174ef81
--- /dev/null
+++ b/libgcc/config/m32c/trapv.c
@@ -0,0 +1,43 @@
+/* 16-bit trapping arithmetic routines for R8C/M16C/M32C
+ Copyright (C) 2009, 2011
+ Free Software Foundation, Inc.
+ Contributed by Red Hat.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+/* See the comment by the definition of LIBGCC2_UNITS_PER_WORD in
+ m32c.h for why we are creating extra versions of some of the
+ functions defined in libgcc2.c.
+
+ Note - this file is separate from lib2funcs.c so that the following
+ functions will appear in the their object file. This is necessary
+ because they call abort() which is defined in the C library whereas
+ the functions in lib2funcs.c are completely self sufficient. */
+
+#define LIBGCC2_UNITS_PER_WORD 2
+
+#define L_mulvsi3
+#define L_negvsi2
+#define L_addvsi3
+#define L_subvsi3
+
+#include "libgcc2.c"
diff --git a/libgcc/config/m32r/initfini.c b/libgcc/config/m32r/initfini.c
new file mode 100644
index 00000000000..56332459223
--- /dev/null
+++ b/libgcc/config/m32r/initfini.c
@@ -0,0 +1,168 @@
+/* .init/.fini section handling + C++ global constructor/destructor handling.
+ This file is based on crtstuff.c, sol2-crti.S, sol2-crtn.S.
+
+ Copyright (C) 1996, 1997, 1998, 2006, 2009 Free Software Foundation, Inc.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3, or (at your option)
+ any later version.
+
+ GCC is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+/* Declare a pointer to void function type. */
+typedef void (*func_ptr) (void);
+
+#ifdef CRT_INIT
+
+/* NOTE: In order to be able to support SVR4 shared libraries, we arrange
+ to have one set of symbols { __CTOR_LIST__, __DTOR_LIST__, __CTOR_END__,
+ __DTOR_END__ } per root executable and also one set of these symbols
+ per shared library. So in any given whole process image, we may have
+ multiple definitions of each of these symbols. In order to prevent
+ these definitions from conflicting with one another, and in order to
+ ensure that the proper lists are used for the initialization/finalization
+ of each individual shared library (respectively), we give these symbols
+ only internal (i.e. `static') linkage, and we also make it a point to
+ refer to only the __CTOR_END__ symbol in crtfini.o and the __DTOR_LIST__
+ symbol in crtinit.o, where they are defined. */
+
+static func_ptr __CTOR_LIST__[1]
+ __attribute__ ((used, section (".ctors")))
+ = { (func_ptr) (-1) };
+
+static func_ptr __DTOR_LIST__[1]
+ __attribute__ ((used, section (".dtors")))
+ = { (func_ptr) (-1) };
+
+/* Run all the global destructors on exit from the program. */
+
+/* Some systems place the number of pointers in the first word of the
+ table. On SVR4 however, that word is -1. In all cases, the table is
+ null-terminated. On SVR4, we start from the beginning of the list and
+ invoke each per-compilation-unit destructor routine in order
+ until we find that null.
+
+ Note that this function MUST be static. There will be one of these
+ functions in each root executable and one in each shared library, but
+ although they all have the same code, each one is unique in that it
+ refers to one particular associated `__DTOR_LIST__' which belongs to the
+ same particular root executable or shared library file. */
+
+static void __do_global_dtors (void)
+asm ("__do_global_dtors") __attribute__ ((used, section (".text")));
+
+static void
+__do_global_dtors (void)
+{
+ func_ptr *p;
+
+ for (p = __DTOR_LIST__ + 1; *p; p++)
+ (*p) ();
+}
+
+/* .init section start.
+ This must appear at the start of the .init section. */
+
+asm ("\n\
+ .section .init,\"ax\",@progbits\n\
+ .balign 4\n\
+ .global __init\n\
+__init:\n\
+ push fp\n\
+ push lr\n\
+ mv fp,sp\n\
+ seth r0, #shigh(__fini)\n\
+ add3 r0, r0, #low(__fini)\n\
+ bl atexit\n\
+ .fillinsn\n\
+");
+
+/* .fini section start.
+ This must appear at the start of the .init section. */
+
+asm ("\n\
+ .section .fini,\"ax\",@progbits\n\
+ .balign 4\n\
+ .global __fini\n\
+__fini:\n\
+ push fp\n\
+ push lr\n\
+ mv fp,sp\n\
+ bl __do_global_dtors\n\
+ .fillinsn\n\
+");
+
+#endif /* CRT_INIT */
+
+#ifdef CRT_FINI
+
+/* Put a word containing zero at the end of each of our two lists of function
+ addresses. Note that the words defined here go into the .ctors and .dtors
+ sections of the crtend.o file, and since that file is always linked in
+ last, these words naturally end up at the very ends of the two lists
+ contained in these two sections. */
+
+static func_ptr __CTOR_END__[1]
+ __attribute__ ((used, section (".ctors")))
+ = { (func_ptr) 0 };
+
+static func_ptr __DTOR_END__[1]
+ __attribute__ ((used, section (".dtors")))
+ = { (func_ptr) 0 };
+
+/* Run all global constructors for the program.
+ Note that they are run in reverse order. */
+
+static void __do_global_ctors (void)
+asm ("__do_global_ctors") __attribute__ ((used, section (".text")));
+
+static void
+__do_global_ctors (void)
+{
+ func_ptr *p;
+
+ for (p = __CTOR_END__ - 1; *p != (func_ptr) -1; p--)
+ (*p) ();
+}
+
+/* .init section end.
+ This must live at the end of the .init section. */
+
+asm ("\n\
+ .section .init,\"ax\",@progbits\n\
+ bl __do_global_ctors\n\
+ mv sp,fp\n\
+ pop lr\n\
+ pop fp\n\
+ jmp lr\n\
+ .fillinsn\n\
+");
+
+/* .fini section end.
+ This must live at the end of the .fini section. */
+
+asm ("\n\
+ .section .fini,\"ax\",@progbits\n\
+ mv sp,fp\n\
+ pop lr\n\
+ pop fp\n\
+ jmp lr\n\
+ .fillinsn\n\
+");
+
+#endif /* CRT_FINI */
diff --git a/libgcc/config/m32r/libgcc-glibc.ver b/libgcc/config/m32r/libgcc-glibc.ver
new file mode 100644
index 00000000000..0e1304b2a3a
--- /dev/null
+++ b/libgcc/config/m32r/libgcc-glibc.ver
@@ -0,0 +1,48 @@
+# Copyright (C) 2004, 2008 Free Software Foundation, Inc.
+#
+# This file is part of GCC.
+#
+# GCC is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GCC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>.
+
+# In order to work around the very problems that force us to now generally
+# create a libgcc.so, glibc reexported a number of routines from libgcc.a.
+# By now choosing the same version tags for these specific routines, we
+# maintain enough binary compatibility to allow future versions of glibc
+# to defer implementation of these routines to libgcc.so via DT_AUXILIARY.
+
+# Note that we cannot use the default libgcc-glibc.ver file on sh,
+# because GLIBC_2.0 does not exist on this architecture, as the first
+# ever glibc release on the platform was GLIBC_2.3.
+
+%exclude {
+ __register_frame
+ __register_frame_table
+ __deregister_frame
+ __register_frame_info
+ __deregister_frame_info
+ __frame_state_for
+ __register_frame_info_table
+}
+
+%inherit GCC_3.0 GLIBC_2.3
+GLIBC_2.3 {
+ __register_frame
+ __register_frame_table
+ __deregister_frame
+ __register_frame_info
+ __deregister_frame_info
+ __frame_state_for
+ __register_frame_info_table
+}
diff --git a/libgcc/config/m32r/t-linux b/libgcc/config/m32r/t-linux
new file mode 100644
index 00000000000..5223b731ff7
--- /dev/null
+++ b/libgcc/config/m32r/t-linux
@@ -0,0 +1,5 @@
+# Turn off the SDA while compiling libgcc2. There are no headers for it
+# and we want maximal upward compatibility here.
+HOST_LIBGCC2_CFLAGS += -G 0
+
+SHLIB_MAPFILES = libgcc-std.ver $(srcdir)/config/m32r/libgcc-glibc.ver
diff --git a/libgcc/config/m32r/t-m32r b/libgcc/config/m32r/t-m32r
new file mode 100644
index 00000000000..861044b15b9
--- /dev/null
+++ b/libgcc/config/m32r/t-m32r
@@ -0,0 +1,23 @@
+# Turn off the SDA while compiling libgcc2. There are no headers for it
+# and we want maximal upward compatibility here.
+HOST_LIBGCC2_CFLAGS += -G 0
+
+# We need to use -fpic when we are using gcc to compile the routines in
+# initfini.c. This is only really needed when we are going to use gcc/g++
+# to produce a shared library, but since we don't know ahead of time when
+# we will be doing that, we just always use -fpic when compiling the
+# routines in initfini.c.
+# -fpic currently isn't supported for the m32r.
+# FIXME: No longer true. Empty CRTSTUFF_T_CFLAGS is the default.
+CRTSTUFF_T_CFLAGS =
+
+# .init/.fini section routines
+crtinit.o: $(srcdir)/config/m32r/initfini.c
+ $(crt_compile) $(CRTSTUFF_T_CFLAGS) -DCRT_INIT \
+ -finhibit-size-directive -fno-inline-functions -g0 \
+ -mmodel=medium -c $<
+
+crtfini.o: $(srcdir)/config/m32r/initfini.c
+ $(crt_compile) $(CRTSTUFF_T_CFLAGS) -DCRT_FINI \
+ -finhibit-size-directive -fno-inline-functions -g0 \
+ -mmodel=medium -c $<
diff --git a/libgcc/config/m68k/crti.S b/libgcc/config/m68k/crti.S
new file mode 100644
index 00000000000..12fb59f4130
--- /dev/null
+++ b/libgcc/config/m68k/crti.S
@@ -0,0 +1,44 @@
+/* Specialized code needed to support construction and destruction of
+ file-scope objects in C++ and Java code, and to support exception handling.
+ Copyright (C) 1999, 2008, 2009 Free Software Foundation, Inc.
+ Contributed by Charles-Antoine Gauthier (charles.gauthier@iit.nrc.ca).
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+/*
+ * This file just supplies function prologues for the .init and .fini
+ * sections. It is linked in before crtbegin.o.
+ */
+
+ .ident "GNU C crti.o"
+
+ .section .init
+ .globl _init
+ .type _init,@function
+_init:
+ linkw %fp,#0
+
+ .section .fini
+ .globl _fini
+ .type _fini,@function
+_fini:
+ linkw %fp,#0
diff --git a/libgcc/config/m68k/crtn.S b/libgcc/config/m68k/crtn.S
new file mode 100644
index 00000000000..b7d70f02ed5
--- /dev/null
+++ b/libgcc/config/m68k/crtn.S
@@ -0,0 +1,40 @@
+/* Specialized code needed to support construction and destruction of
+ file-scope objects in C++ and Java code, and to support exception handling.
+ Copyright (C) 1999, 2008, 2009 Free Software Foundation, Inc.
+ Contributed by Charles-Antoine Gauthier (charles.gauthier@iit.nrc.ca).
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+/*
+ * This file supplies function epilogues for the .init and .fini sections.
+ * It is linked in after all other files.
+ */
+
+ .ident "GNU C crtn.o"
+
+ .section .init
+ unlk %fp
+ rts
+
+ .section .fini
+ unlk %fp
+ rts
diff --git a/libgcc/config/m68k/fpgnulib.c b/libgcc/config/m68k/fpgnulib.c
new file mode 100644
index 00000000000..fe41edf26aa
--- /dev/null
+++ b/libgcc/config/m68k/fpgnulib.c
@@ -0,0 +1,595 @@
+/* This is a stripped down version of floatlib.c. It supplies only those
+ functions which exist in libgcc, but for which there is not assembly
+ language versions in m68k/lb1sf68.S.
+
+ It also includes simplistic support for extended floats (by working in
+ double precision). You must compile this file again with -DEXTFLOAT
+ to get this support. */
+
+/*
+** gnulib support for software floating point.
+** Copyright (C) 1991 by Pipeline Associates, Inc. All rights reserved.
+** Permission is granted to do *anything* you want with this file,
+** commercial or otherwise, provided this message remains intact. So there!
+** I would appreciate receiving any updates/patches/changes that anyone
+** makes, and am willing to be the repository for said changes (am I
+** making a big mistake?).
+**
+** Pat Wood
+** Pipeline Associates, Inc.
+** pipeline!phw@motown.com or
+** sun!pipeline!phw or
+** uunet!motown!pipeline!phw
+**
+** 05/01/91 -- V1.0 -- first release to gcc mailing lists
+** 05/04/91 -- V1.1 -- added float and double prototypes and return values
+** -- fixed problems with adding and subtracting zero
+** -- fixed rounding in truncdfsf2
+** -- fixed SWAP define and tested on 386
+*/
+
+/*
+** The following are routines that replace the gnulib soft floating point
+** routines that are called automatically when -msoft-float is selected.
+** The support single and double precision IEEE format, with provisions
+** for byte-swapped machines (tested on 386). Some of the double-precision
+** routines work at full precision, but most of the hard ones simply punt
+** and call the single precision routines, producing a loss of accuracy.
+** long long support is not assumed or included.
+** Overall accuracy is close to IEEE (actually 68882) for single-precision
+** arithmetic. I think there may still be a 1 in 1000 chance of a bit
+** being rounded the wrong way during a multiply. I'm not fussy enough to
+** bother with it, but if anyone is, knock yourself out.
+**
+** Efficiency has only been addressed where it was obvious that something
+** would make a big difference. Anyone who wants to do this right for
+** best speed should go in and rewrite in assembler.
+**
+** I have tested this only on a 68030 workstation and 386/ix integrated
+** in with -msoft-float.
+*/
+
+/* the following deal with IEEE single-precision numbers */
+#define EXCESS 126L
+#define SIGNBIT 0x80000000L
+#define HIDDEN (1L << 23L)
+#define SIGN(fp) ((fp) & SIGNBIT)
+#define EXP(fp) (((fp) >> 23L) & 0xFF)
+#define MANT(fp) (((fp) & 0x7FFFFFL) | HIDDEN)
+#define PACK(s,e,m) ((s) | ((e) << 23L) | (m))
+
+/* the following deal with IEEE double-precision numbers */
+#define EXCESSD 1022L
+#define HIDDEND (1L << 20L)
+#define EXPDBITS 11
+#define EXPDMASK 0x7FFL
+#define EXPD(fp) (((fp.l.upper) >> 20L) & 0x7FFL)
+#define SIGND(fp) ((fp.l.upper) & SIGNBIT)
+#define MANTD(fp) (((((fp.l.upper) & 0xFFFFF) | HIDDEND) << 10) | \
+ (fp.l.lower >> 22))
+#define MANTDMASK 0xFFFFFL /* mask of upper part */
+
+/* the following deal with IEEE extended-precision numbers */
+#define EXCESSX 16382L
+#define HIDDENX (1L << 31L)
+#define EXPXBITS 15
+#define EXPXMASK 0x7FFF
+#define EXPX(fp) (((fp.l.upper) >> 16) & EXPXMASK)
+#define SIGNX(fp) ((fp.l.upper) & SIGNBIT)
+#define MANTXMASK 0x7FFFFFFFL /* mask of upper part */
+
+union double_long
+{
+ double d;
+ struct {
+ long upper;
+ unsigned long lower;
+ } l;
+};
+
+union float_long {
+ float f;
+ long l;
+};
+
+union long_double_long
+{
+ long double ld;
+ struct
+ {
+ long upper;
+ unsigned long middle;
+ unsigned long lower;
+ } l;
+};
+
+#ifndef EXTFLOAT
+
+int
+__unordsf2(float a, float b)
+{
+ union float_long fl;
+
+ fl.f = a;
+ if (EXP(fl.l) == EXP(~0u) && (MANT(fl.l) & ~HIDDEN) != 0)
+ return 1;
+ fl.f = b;
+ if (EXP(fl.l) == EXP(~0u) && (MANT(fl.l) & ~HIDDEN) != 0)
+ return 1;
+ return 0;
+}
+
+int
+__unorddf2(double a, double b)
+{
+ union double_long dl;
+
+ dl.d = a;
+ if (EXPD(dl) == EXPDMASK
+ && ((dl.l.upper & MANTDMASK) != 0 || dl.l.lower != 0))
+ return 1;
+ dl.d = b;
+ if (EXPD(dl) == EXPDMASK
+ && ((dl.l.upper & MANTDMASK) != 0 || dl.l.lower != 0))
+ return 1;
+ return 0;
+}
+
+/* convert unsigned int to double */
+double
+__floatunsidf (unsigned long a1)
+{
+ long exp = 32 + EXCESSD;
+ union double_long dl;
+
+ if (!a1)
+ {
+ dl.l.upper = dl.l.lower = 0;
+ return dl.d;
+ }
+
+ while (a1 < 0x2000000L)
+ {
+ a1 <<= 4;
+ exp -= 4;
+ }
+
+ while (a1 < 0x80000000L)
+ {
+ a1 <<= 1;
+ exp--;
+ }
+
+ /* pack up and go home */
+ dl.l.upper = exp << 20L;
+ dl.l.upper |= (a1 >> 11L) & ~HIDDEND;
+ dl.l.lower = a1 << 21L;
+
+ return dl.d;
+}
+
+/* convert int to double */
+double
+__floatsidf (long a1)
+{
+ long sign = 0, exp = 31 + EXCESSD;
+ union double_long dl;
+
+ if (!a1)
+ {
+ dl.l.upper = dl.l.lower = 0;
+ return dl.d;
+ }
+
+ if (a1 < 0)
+ {
+ sign = SIGNBIT;
+ a1 = (long)-(unsigned long)a1;
+ if (a1 < 0)
+ {
+ dl.l.upper = SIGNBIT | ((32 + EXCESSD) << 20L);
+ dl.l.lower = 0;
+ return dl.d;
+ }
+ }
+
+ while (a1 < 0x1000000L)
+ {
+ a1 <<= 4;
+ exp -= 4;
+ }
+
+ while (a1 < 0x40000000L)
+ {
+ a1 <<= 1;
+ exp--;
+ }
+
+ /* pack up and go home */
+ dl.l.upper = sign;
+ dl.l.upper |= exp << 20L;
+ dl.l.upper |= (a1 >> 10L) & ~HIDDEND;
+ dl.l.lower = a1 << 22L;
+
+ return dl.d;
+}
+
+/* convert unsigned int to float */
+float
+__floatunsisf (unsigned long l)
+{
+ double foo = __floatunsidf (l);
+ return foo;
+}
+
+/* convert int to float */
+float
+__floatsisf (long l)
+{
+ double foo = __floatsidf (l);
+ return foo;
+}
+
+/* convert float to double */
+double
+__extendsfdf2 (float a1)
+{
+ register union float_long fl1;
+ register union double_long dl;
+ register long exp;
+ register long mant;
+
+ fl1.f = a1;
+
+ dl.l.upper = SIGN (fl1.l);
+ if ((fl1.l & ~SIGNBIT) == 0)
+ {
+ dl.l.lower = 0;
+ return dl.d;
+ }
+
+ exp = EXP(fl1.l);
+ mant = MANT (fl1.l) & ~HIDDEN;
+ if (exp == 0)
+ {
+ /* Denormal. */
+ exp = 1;
+ while (!(mant & HIDDEN))
+ {
+ mant <<= 1;
+ exp--;
+ }
+ mant &= ~HIDDEN;
+ }
+ exp = exp - EXCESS + EXCESSD;
+ dl.l.upper |= exp << 20;
+ dl.l.upper |= mant >> 3;
+ dl.l.lower = mant << 29;
+
+ return dl.d;
+}
+
+/* convert double to float */
+float
+__truncdfsf2 (double a1)
+{
+ register long exp;
+ register long mant;
+ register union float_long fl;
+ register union double_long dl1;
+ int sticky;
+ int shift;
+
+ dl1.d = a1;
+
+ if ((dl1.l.upper & ~SIGNBIT) == 0 && !dl1.l.lower)
+ {
+ fl.l = SIGND(dl1);
+ return fl.f;
+ }
+
+ exp = EXPD (dl1) - EXCESSD + EXCESS;
+
+ sticky = dl1.l.lower & ((1 << 22) - 1);
+ mant = MANTD (dl1);
+ /* shift double mantissa 6 bits so we can round */
+ sticky |= mant & ((1 << 6) - 1);
+ mant >>= 6;
+
+ /* Check for underflow and denormals. */
+ if (exp <= 0)
+ {
+ if (exp < -24)
+ {
+ sticky |= mant;
+ mant = 0;
+ }
+ else
+ {
+ sticky |= mant & ((1 << (1 - exp)) - 1);
+ mant >>= 1 - exp;
+ }
+ exp = 0;
+ }
+
+ /* now round */
+ shift = 1;
+ if ((mant & 1) && (sticky || (mant & 2)))
+ {
+ int rounding = exp ? 2 : 1;
+
+ mant += 1;
+
+ /* did the round overflow? */
+ if (mant >= (HIDDEN << rounding))
+ {
+ exp++;
+ shift = rounding;
+ }
+ }
+ /* shift down */
+ mant >>= shift;
+
+ mant &= ~HIDDEN;
+
+ /* pack up and go home */
+ fl.l = PACK (SIGND (dl1), exp, mant);
+ return (fl.f);
+}
+
+/* convert double to int */
+long
+__fixdfsi (double a1)
+{
+ register union double_long dl1;
+ register long exp;
+ register long l;
+
+ dl1.d = a1;
+
+ if (!dl1.l.upper && !dl1.l.lower)
+ return 0;
+
+ exp = EXPD (dl1) - EXCESSD - 31;
+ l = MANTD (dl1);
+
+ if (exp > 0)
+ {
+ /* Return largest integer. */
+ return SIGND (dl1) ? 0x80000000L : 0x7fffffffL;
+ }
+
+ if (exp <= -32)
+ return 0;
+
+ /* shift down until exp = 0 */
+ if (exp < 0)
+ l >>= -exp;
+
+ return (SIGND (dl1) ? -l : l);
+}
+
+/* convert float to int */
+long
+__fixsfsi (float a1)
+{
+ double foo = a1;
+ return __fixdfsi (foo);
+}
+
+#else /* EXTFLOAT */
+
+/* We do not need these routines for coldfire, as it has no extended
+ float format. */
+#if !defined (__mcoldfire__)
+
+/* Primitive extended precision floating point support.
+
+ We assume all numbers are normalized, don't do any rounding, etc. */
+
+/* Prototypes for the above in case we use them. */
+double __floatunsidf (unsigned long);
+double __floatsidf (long);
+float __floatsisf (long);
+double __extendsfdf2 (float);
+float __truncdfsf2 (double);
+long __fixdfsi (double);
+long __fixsfsi (float);
+
+int
+__unordxf2(long double a, long double b)
+{
+ union long_double_long ldl;
+
+ ldl.ld = a;
+ if (EXPX(ldl) == EXPXMASK
+ && ((ldl.l.middle & MANTXMASK) != 0 || ldl.l.lower != 0))
+ return 1;
+ ldl.ld = b;
+ if (EXPX(ldl) == EXPXMASK
+ && ((ldl.l.middle & MANTXMASK) != 0 || ldl.l.lower != 0))
+ return 1;
+ return 0;
+}
+
+/* convert double to long double */
+long double
+__extenddfxf2 (double d)
+{
+ register union double_long dl;
+ register union long_double_long ldl;
+ register long exp;
+
+ dl.d = d;
+ /*printf ("dfxf in: %g\n", d);*/
+
+ ldl.l.upper = SIGND (dl);
+ if ((dl.l.upper & ~SIGNBIT) == 0 && !dl.l.lower)
+ {
+ ldl.l.middle = 0;
+ ldl.l.lower = 0;
+ return ldl.ld;
+ }
+
+ exp = EXPD (dl) - EXCESSD + EXCESSX;
+ ldl.l.upper |= exp << 16;
+ ldl.l.middle = HIDDENX;
+ /* 31-20: # mantissa bits in ldl.l.middle - # mantissa bits in dl.l.upper */
+ ldl.l.middle |= (dl.l.upper & MANTDMASK) << (31 - 20);
+ /* 1+20: explicit-integer-bit + # mantissa bits in dl.l.upper */
+ ldl.l.middle |= dl.l.lower >> (1 + 20);
+ /* 32 - 21: # bits of dl.l.lower in ldl.l.middle */
+ ldl.l.lower = dl.l.lower << (32 - 21);
+
+ /*printf ("dfxf out: %s\n", dumpxf (ldl.ld));*/
+ return ldl.ld;
+}
+
+/* convert long double to double */
+double
+__truncxfdf2 (long double ld)
+{
+ register long exp;
+ register union double_long dl;
+ register union long_double_long ldl;
+
+ ldl.ld = ld;
+ /*printf ("xfdf in: %s\n", dumpxf (ld));*/
+
+ dl.l.upper = SIGNX (ldl);
+ if ((ldl.l.upper & ~SIGNBIT) == 0 && !ldl.l.middle && !ldl.l.lower)
+ {
+ dl.l.lower = 0;
+ return dl.d;
+ }
+
+ exp = EXPX (ldl) - EXCESSX + EXCESSD;
+ /* ??? quick and dirty: keep `exp' sane */
+ if (exp >= EXPDMASK)
+ exp = EXPDMASK - 1;
+ dl.l.upper |= exp << (32 - (EXPDBITS + 1));
+ /* +1-1: add one for sign bit, but take one off for explicit-integer-bit */
+ dl.l.upper |= (ldl.l.middle & MANTXMASK) >> (EXPDBITS + 1 - 1);
+ dl.l.lower = (ldl.l.middle & MANTXMASK) << (32 - (EXPDBITS + 1 - 1));
+ dl.l.lower |= ldl.l.lower >> (EXPDBITS + 1 - 1);
+
+ /*printf ("xfdf out: %g\n", dl.d);*/
+ return dl.d;
+}
+
+/* convert a float to a long double */
+long double
+__extendsfxf2 (float f)
+{
+ long double foo = __extenddfxf2 (__extendsfdf2 (f));
+ return foo;
+}
+
+/* convert a long double to a float */
+float
+__truncxfsf2 (long double ld)
+{
+ float foo = __truncdfsf2 (__truncxfdf2 (ld));
+ return foo;
+}
+
+/* convert an int to a long double */
+long double
+__floatsixf (long l)
+{
+ double foo = __floatsidf (l);
+ return foo;
+}
+
+/* convert an unsigned int to a long double */
+long double
+__floatunsixf (unsigned long l)
+{
+ double foo = __floatunsidf (l);
+ return foo;
+}
+
+/* convert a long double to an int */
+long
+__fixxfsi (long double ld)
+{
+ long foo = __fixdfsi ((double) ld);
+ return foo;
+}
+
+/* The remaining provide crude math support by working in double precision. */
+
+long double
+__addxf3 (long double x1, long double x2)
+{
+ return (double) x1 + (double) x2;
+}
+
+long double
+__subxf3 (long double x1, long double x2)
+{
+ return (double) x1 - (double) x2;
+}
+
+long double
+__mulxf3 (long double x1, long double x2)
+{
+ return (double) x1 * (double) x2;
+}
+
+long double
+__divxf3 (long double x1, long double x2)
+{
+ return (double) x1 / (double) x2;
+}
+
+long double
+__negxf2 (long double x1)
+{
+ return - (double) x1;
+}
+
+long
+__cmpxf2 (long double x1, long double x2)
+{
+ return __cmpdf2 ((double) x1, (double) x2);
+}
+
+long
+__eqxf2 (long double x1, long double x2)
+{
+ return __cmpdf2 ((double) x1, (double) x2);
+}
+
+long
+__nexf2 (long double x1, long double x2)
+{
+ return __cmpdf2 ((double) x1, (double) x2);
+}
+
+long
+__ltxf2 (long double x1, long double x2)
+{
+ return __cmpdf2 ((double) x1, (double) x2);
+}
+
+long
+__lexf2 (long double x1, long double x2)
+{
+ return __cmpdf2 ((double) x1, (double) x2);
+}
+
+long
+__gtxf2 (long double x1, long double x2)
+{
+ return __cmpdf2 ((double) x1, (double) x2);
+}
+
+long
+__gexf2 (long double x1, long double x2)
+{
+ return __cmpdf2 ((double) x1, (double) x2);
+}
+
+#endif /* !__mcoldfire__ */
+#endif /* EXTFLOAT */
diff --git a/libgcc/config/m68k/lb1sf68.S b/libgcc/config/m68k/lb1sf68.S
new file mode 100644
index 00000000000..0339a092c4f
--- /dev/null
+++ b/libgcc/config/m68k/lb1sf68.S
@@ -0,0 +1,4116 @@
+/* libgcc routines for 68000 w/o floating-point hardware.
+ Copyright (C) 1994, 1996, 1997, 1998, 2008, 2009 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 3, or (at your option) any
+later version.
+
+This file is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+/* Use this one for any 680x0; assumes no floating point hardware.
+ The trailing " '" appearing on some lines is for ANSI preprocessors. Yuk.
+ Some of this code comes from MINIX, via the folks at ericsson.
+ D. V. Henkel-Wallace (gumby@cygnus.com) Fete Bastille, 1992
+*/
+
+/* These are predefined by new versions of GNU cpp. */
+
+#ifndef __USER_LABEL_PREFIX__
+#define __USER_LABEL_PREFIX__ _
+#endif
+
+#ifndef __REGISTER_PREFIX__
+#define __REGISTER_PREFIX__
+#endif
+
+#ifndef __IMMEDIATE_PREFIX__
+#define __IMMEDIATE_PREFIX__ #
+#endif
+
+/* ANSI concatenation macros. */
+
+#define CONCAT1(a, b) CONCAT2(a, b)
+#define CONCAT2(a, b) a ## b
+
+/* Use the right prefix for global labels. */
+
+#define SYM(x) CONCAT1 (__USER_LABEL_PREFIX__, x)
+
+/* Note that X is a function. */
+
+#ifdef __ELF__
+#define FUNC(x) .type SYM(x),function
+#else
+/* The .proc pseudo-op is accepted, but ignored, by GAS. We could just
+ define this to the empty string for non-ELF systems, but defining it
+ to .proc means that the information is available to the assembler if
+ the need arises. */
+#define FUNC(x) .proc
+#endif
+
+/* Use the right prefix for registers. */
+
+#define REG(x) CONCAT1 (__REGISTER_PREFIX__, x)
+
+/* Use the right prefix for immediate values. */
+
+#define IMM(x) CONCAT1 (__IMMEDIATE_PREFIX__, x)
+
+#define d0 REG (d0)
+#define d1 REG (d1)
+#define d2 REG (d2)
+#define d3 REG (d3)
+#define d4 REG (d4)
+#define d5 REG (d5)
+#define d6 REG (d6)
+#define d7 REG (d7)
+#define a0 REG (a0)
+#define a1 REG (a1)
+#define a2 REG (a2)
+#define a3 REG (a3)
+#define a4 REG (a4)
+#define a5 REG (a5)
+#define a6 REG (a6)
+#define fp REG (fp)
+#define sp REG (sp)
+#define pc REG (pc)
+
+/* Provide a few macros to allow for PIC code support.
+ * With PIC, data is stored A5 relative so we've got to take a bit of special
+ * care to ensure that all loads of global data is via A5. PIC also requires
+ * jumps and subroutine calls to be PC relative rather than absolute. We cheat
+ * a little on this and in the PIC case, we use short offset branches and
+ * hope that the final object code is within range (which it should be).
+ */
+#ifndef __PIC__
+
+ /* Non PIC (absolute/relocatable) versions */
+
+ .macro PICCALL addr
+ jbsr \addr
+ .endm
+
+ .macro PICJUMP addr
+ jmp \addr
+ .endm
+
+ .macro PICLEA sym, reg
+ lea \sym, \reg
+ .endm
+
+ .macro PICPEA sym, areg
+ pea \sym
+ .endm
+
+#else /* __PIC__ */
+
+# if defined (__uClinux__)
+
+ /* Versions for uClinux */
+
+# if defined(__ID_SHARED_LIBRARY__)
+
+ /* -mid-shared-library versions */
+
+ .macro PICLEA sym, reg
+ movel a5@(_current_shared_library_a5_offset_), \reg
+ movel \sym@GOT(\reg), \reg
+ .endm
+
+ .macro PICPEA sym, areg
+ movel a5@(_current_shared_library_a5_offset_), \areg
+ movel \sym@GOT(\areg), sp@-
+ .endm
+
+ .macro PICCALL addr
+ PICLEA \addr,a0
+ jsr a0@
+ .endm
+
+ .macro PICJUMP addr
+ PICLEA \addr,a0
+ jmp a0@
+ .endm
+
+# else /* !__ID_SHARED_LIBRARY__ */
+
+ /* Versions for -msep-data */
+
+ .macro PICLEA sym, reg
+ movel \sym@GOT(a5), \reg
+ .endm
+
+ .macro PICPEA sym, areg
+ movel \sym@GOT(a5), sp@-
+ .endm
+
+ .macro PICCALL addr
+#if defined (__mcoldfire__) && !defined (__mcfisab__) && !defined (__mcfisac__)
+ lea \addr-.-8,a0
+ jsr pc@(a0)
+#else
+ jbsr \addr
+#endif
+ .endm
+
+ .macro PICJUMP addr
+ /* ISA C has no bra.l instruction, and since this assembly file
+ gets assembled into multiple object files, we avoid the
+ bra instruction entirely. */
+#if defined (__mcoldfire__) && !defined (__mcfisab__)
+ lea \addr-.-8,a0
+ jmp pc@(a0)
+#else
+ bra \addr
+#endif
+ .endm
+
+# endif
+
+# else /* !__uClinux__ */
+
+ /* Versions for Linux */
+
+ .macro PICLEA sym, reg
+ movel #_GLOBAL_OFFSET_TABLE_@GOTPC, \reg
+ lea (-6, pc, \reg), \reg
+ movel \sym@GOT(\reg), \reg
+ .endm
+
+ .macro PICPEA sym, areg
+ movel #_GLOBAL_OFFSET_TABLE_@GOTPC, \areg
+ lea (-6, pc, \areg), \areg
+ movel \sym@GOT(\areg), sp@-
+ .endm
+
+ .macro PICCALL addr
+#if defined (__mcoldfire__) && !defined (__mcfisab__) && !defined (__mcfisac__)
+ lea \addr-.-8,a0
+ jsr pc@(a0)
+#else
+ jbsr \addr
+#endif
+ .endm
+
+ .macro PICJUMP addr
+ /* ISA C has no bra.l instruction, and since this assembly file
+ gets assembled into multiple object files, we avoid the
+ bra instruction entirely. */
+#if defined (__mcoldfire__) && !defined (__mcfisab__)
+ lea \addr-.-8,a0
+ jmp pc@(a0)
+#else
+ bra \addr
+#endif
+ .endm
+
+# endif
+#endif /* __PIC__ */
+
+
+#ifdef L_floatex
+
+| This is an attempt at a decent floating point (single, double and
+| extended double) code for the GNU C compiler. It should be easy to
+| adapt to other compilers (but beware of the local labels!).
+
+| Starting date: 21 October, 1990
+
+| It is convenient to introduce the notation (s,e,f) for a floating point
+| number, where s=sign, e=exponent, f=fraction. We will call a floating
+| point number fpn to abbreviate, independently of the precision.
+| Let MAX_EXP be in each case the maximum exponent (255 for floats, 1023
+| for doubles and 16383 for long doubles). We then have the following
+| different cases:
+| 1. Normalized fpns have 0 < e < MAX_EXP. They correspond to
+| (-1)^s x 1.f x 2^(e-bias-1).
+| 2. Denormalized fpns have e=0. They correspond to numbers of the form
+| (-1)^s x 0.f x 2^(-bias).
+| 3. +/-INFINITY have e=MAX_EXP, f=0.
+| 4. Quiet NaN (Not a Number) have all bits set.
+| 5. Signaling NaN (Not a Number) have s=0, e=MAX_EXP, f=1.
+
+|=============================================================================
+| exceptions
+|=============================================================================
+
+| This is the floating point condition code register (_fpCCR):
+|
+| struct {
+| short _exception_bits;
+| short _trap_enable_bits;
+| short _sticky_bits;
+| short _rounding_mode;
+| short _format;
+| short _last_operation;
+| union {
+| float sf;
+| double df;
+| } _operand1;
+| union {
+| float sf;
+| double df;
+| } _operand2;
+| } _fpCCR;
+
+ .data
+ .even
+
+ .globl SYM (_fpCCR)
+
+SYM (_fpCCR):
+__exception_bits:
+ .word 0
+__trap_enable_bits:
+ .word 0
+__sticky_bits:
+ .word 0
+__rounding_mode:
+ .word ROUND_TO_NEAREST
+__format:
+ .word NIL
+__last_operation:
+ .word NOOP
+__operand1:
+ .long 0
+ .long 0
+__operand2:
+ .long 0
+ .long 0
+
+| Offsets:
+EBITS = __exception_bits - SYM (_fpCCR)
+TRAPE = __trap_enable_bits - SYM (_fpCCR)
+STICK = __sticky_bits - SYM (_fpCCR)
+ROUND = __rounding_mode - SYM (_fpCCR)
+FORMT = __format - SYM (_fpCCR)
+LASTO = __last_operation - SYM (_fpCCR)
+OPER1 = __operand1 - SYM (_fpCCR)
+OPER2 = __operand2 - SYM (_fpCCR)
+
+| The following exception types are supported:
+INEXACT_RESULT = 0x0001
+UNDERFLOW = 0x0002
+OVERFLOW = 0x0004
+DIVIDE_BY_ZERO = 0x0008
+INVALID_OPERATION = 0x0010
+
+| The allowed rounding modes are:
+UNKNOWN = -1
+ROUND_TO_NEAREST = 0 | round result to nearest representable value
+ROUND_TO_ZERO = 1 | round result towards zero
+ROUND_TO_PLUS = 2 | round result towards plus infinity
+ROUND_TO_MINUS = 3 | round result towards minus infinity
+
+| The allowed values of format are:
+NIL = 0
+SINGLE_FLOAT = 1
+DOUBLE_FLOAT = 2
+LONG_FLOAT = 3
+
+| The allowed values for the last operation are:
+NOOP = 0
+ADD = 1
+MULTIPLY = 2
+DIVIDE = 3
+NEGATE = 4
+COMPARE = 5
+EXTENDSFDF = 6
+TRUNCDFSF = 7
+
+|=============================================================================
+| __clear_sticky_bits
+|=============================================================================
+
+| The sticky bits are normally not cleared (thus the name), whereas the
+| exception type and exception value reflect the last computation.
+| This routine is provided to clear them (you can also write to _fpCCR,
+| since it is globally visible).
+
+ .globl SYM (__clear_sticky_bit)
+
+ .text
+ .even
+
+| void __clear_sticky_bits(void);
+SYM (__clear_sticky_bit):
+ PICLEA SYM (_fpCCR),a0
+#ifndef __mcoldfire__
+ movew IMM (0),a0@(STICK)
+#else
+ clr.w a0@(STICK)
+#endif
+ rts
+
+|=============================================================================
+| $_exception_handler
+|=============================================================================
+
+ .globl $_exception_handler
+
+ .text
+ .even
+
+| This is the common exit point if an exception occurs.
+| NOTE: it is NOT callable from C!
+| It expects the exception type in d7, the format (SINGLE_FLOAT,
+| DOUBLE_FLOAT or LONG_FLOAT) in d6, and the last operation code in d5.
+| It sets the corresponding exception and sticky bits, and the format.
+| Depending on the format if fills the corresponding slots for the
+| operands which produced the exception (all this information is provided
+| so if you write your own exception handlers you have enough information
+| to deal with the problem).
+| Then checks to see if the corresponding exception is trap-enabled,
+| in which case it pushes the address of _fpCCR and traps through
+| trap FPTRAP (15 for the moment).
+
+FPTRAP = 15
+
+$_exception_handler:
+ PICLEA SYM (_fpCCR),a0
+ movew d7,a0@(EBITS) | set __exception_bits
+#ifndef __mcoldfire__
+ orw d7,a0@(STICK) | and __sticky_bits
+#else
+ movew a0@(STICK),d4
+ orl d7,d4
+ movew d4,a0@(STICK)
+#endif
+ movew d6,a0@(FORMT) | and __format
+ movew d5,a0@(LASTO) | and __last_operation
+
+| Now put the operands in place:
+#ifndef __mcoldfire__
+ cmpw IMM (SINGLE_FLOAT),d6
+#else
+ cmpl IMM (SINGLE_FLOAT),d6
+#endif
+ beq 1f
+ movel a6@(8),a0@(OPER1)
+ movel a6@(12),a0@(OPER1+4)
+ movel a6@(16),a0@(OPER2)
+ movel a6@(20),a0@(OPER2+4)
+ bra 2f
+1: movel a6@(8),a0@(OPER1)
+ movel a6@(12),a0@(OPER2)
+2:
+| And check whether the exception is trap-enabled:
+#ifndef __mcoldfire__
+ andw a0@(TRAPE),d7 | is exception trap-enabled?
+#else
+ clrl d6
+ movew a0@(TRAPE),d6
+ andl d6,d7
+#endif
+ beq 1f | no, exit
+ PICPEA SYM (_fpCCR),a1 | yes, push address of _fpCCR
+ trap IMM (FPTRAP) | and trap
+#ifndef __mcoldfire__
+1: moveml sp@+,d2-d7 | restore data registers
+#else
+1: moveml sp@,d2-d7
+ | XXX if frame pointer is ever removed, stack pointer must
+ | be adjusted here.
+#endif
+ unlk a6 | and return
+ rts
+#endif /* L_floatex */
+
+#ifdef L_mulsi3
+ .text
+ FUNC(__mulsi3)
+ .globl SYM (__mulsi3)
+SYM (__mulsi3):
+ movew sp@(4), d0 /* x0 -> d0 */
+ muluw sp@(10), d0 /* x0*y1 */
+ movew sp@(6), d1 /* x1 -> d1 */
+ muluw sp@(8), d1 /* x1*y0 */
+#ifndef __mcoldfire__
+ addw d1, d0
+#else
+ addl d1, d0
+#endif
+ swap d0
+ clrw d0
+ movew sp@(6), d1 /* x1 -> d1 */
+ muluw sp@(10), d1 /* x1*y1 */
+ addl d1, d0
+
+ rts
+#endif /* L_mulsi3 */
+
+#ifdef L_udivsi3
+ .text
+ FUNC(__udivsi3)
+ .globl SYM (__udivsi3)
+SYM (__udivsi3):
+#ifndef __mcoldfire__
+ movel d2, sp@-
+ movel sp@(12), d1 /* d1 = divisor */
+ movel sp@(8), d0 /* d0 = dividend */
+
+ cmpl IMM (0x10000), d1 /* divisor >= 2 ^ 16 ? */
+ jcc L3 /* then try next algorithm */
+ movel d0, d2
+ clrw d2
+ swap d2
+ divu d1, d2 /* high quotient in lower word */
+ movew d2, d0 /* save high quotient */
+ swap d0
+ movew sp@(10), d2 /* get low dividend + high rest */
+ divu d1, d2 /* low quotient */
+ movew d2, d0
+ jra L6
+
+L3: movel d1, d2 /* use d2 as divisor backup */
+L4: lsrl IMM (1), d1 /* shift divisor */
+ lsrl IMM (1), d0 /* shift dividend */
+ cmpl IMM (0x10000), d1 /* still divisor >= 2 ^ 16 ? */
+ jcc L4
+ divu d1, d0 /* now we have 16-bit divisor */
+ andl IMM (0xffff), d0 /* mask out divisor, ignore remainder */
+
+/* Multiply the 16-bit tentative quotient with the 32-bit divisor. Because of
+ the operand ranges, this might give a 33-bit product. If this product is
+ greater than the dividend, the tentative quotient was too large. */
+ movel d2, d1
+ mulu d0, d1 /* low part, 32 bits */
+ swap d2
+ mulu d0, d2 /* high part, at most 17 bits */
+ swap d2 /* align high part with low part */
+ tstw d2 /* high part 17 bits? */
+ jne L5 /* if 17 bits, quotient was too large */
+ addl d2, d1 /* add parts */
+ jcs L5 /* if sum is 33 bits, quotient was too large */
+ cmpl sp@(8), d1 /* compare the sum with the dividend */
+ jls L6 /* if sum > dividend, quotient was too large */
+L5: subql IMM (1), d0 /* adjust quotient */
+
+L6: movel sp@+, d2
+ rts
+
+#else /* __mcoldfire__ */
+
+/* ColdFire implementation of non-restoring division algorithm from
+ Hennessy & Patterson, Appendix A. */
+ link a6,IMM (-12)
+ moveml d2-d4,sp@
+ movel a6@(8),d0
+ movel a6@(12),d1
+ clrl d2 | clear p
+ moveq IMM (31),d4
+L1: addl d0,d0 | shift reg pair (p,a) one bit left
+ addxl d2,d2
+ movl d2,d3 | subtract b from p, store in tmp.
+ subl d1,d3
+ jcs L2 | if no carry,
+ bset IMM (0),d0 | set the low order bit of a to 1,
+ movl d3,d2 | and store tmp in p.
+L2: subql IMM (1),d4
+ jcc L1
+ moveml sp@,d2-d4 | restore data registers
+ unlk a6 | and return
+ rts
+#endif /* __mcoldfire__ */
+
+#endif /* L_udivsi3 */
+
+#ifdef L_divsi3
+ .text
+ FUNC(__divsi3)
+ .globl SYM (__divsi3)
+SYM (__divsi3):
+ movel d2, sp@-
+
+ moveq IMM (1), d2 /* sign of result stored in d2 (=1 or =-1) */
+ movel sp@(12), d1 /* d1 = divisor */
+ jpl L1
+ negl d1
+#ifndef __mcoldfire__
+ negb d2 /* change sign because divisor <0 */
+#else
+ negl d2 /* change sign because divisor <0 */
+#endif
+L1: movel sp@(8), d0 /* d0 = dividend */
+ jpl L2
+ negl d0
+#ifndef __mcoldfire__
+ negb d2
+#else
+ negl d2
+#endif
+
+L2: movel d1, sp@-
+ movel d0, sp@-
+ PICCALL SYM (__udivsi3) /* divide abs(dividend) by abs(divisor) */
+ addql IMM (8), sp
+
+ tstb d2
+ jpl L3
+ negl d0
+
+L3: movel sp@+, d2
+ rts
+#endif /* L_divsi3 */
+
+#ifdef L_umodsi3
+ .text
+ FUNC(__umodsi3)
+ .globl SYM (__umodsi3)
+SYM (__umodsi3):
+ movel sp@(8), d1 /* d1 = divisor */
+ movel sp@(4), d0 /* d0 = dividend */
+ movel d1, sp@-
+ movel d0, sp@-
+ PICCALL SYM (__udivsi3)
+ addql IMM (8), sp
+ movel sp@(8), d1 /* d1 = divisor */
+#ifndef __mcoldfire__
+ movel d1, sp@-
+ movel d0, sp@-
+ PICCALL SYM (__mulsi3) /* d0 = (a/b)*b */
+ addql IMM (8), sp
+#else
+ mulsl d1,d0
+#endif
+ movel sp@(4), d1 /* d1 = dividend */
+ subl d0, d1 /* d1 = a - (a/b)*b */
+ movel d1, d0
+ rts
+#endif /* L_umodsi3 */
+
+#ifdef L_modsi3
+ .text
+ FUNC(__modsi3)
+ .globl SYM (__modsi3)
+SYM (__modsi3):
+ movel sp@(8), d1 /* d1 = divisor */
+ movel sp@(4), d0 /* d0 = dividend */
+ movel d1, sp@-
+ movel d0, sp@-
+ PICCALL SYM (__divsi3)
+ addql IMM (8), sp
+ movel sp@(8), d1 /* d1 = divisor */
+#ifndef __mcoldfire__
+ movel d1, sp@-
+ movel d0, sp@-
+ PICCALL SYM (__mulsi3) /* d0 = (a/b)*b */
+ addql IMM (8), sp
+#else
+ mulsl d1,d0
+#endif
+ movel sp@(4), d1 /* d1 = dividend */
+ subl d0, d1 /* d1 = a - (a/b)*b */
+ movel d1, d0
+ rts
+#endif /* L_modsi3 */
+
+
+#ifdef L_double
+
+ .globl SYM (_fpCCR)
+ .globl $_exception_handler
+
+QUIET_NaN = 0xffffffff
+
+D_MAX_EXP = 0x07ff
+D_BIAS = 1022
+DBL_MAX_EXP = D_MAX_EXP - D_BIAS
+DBL_MIN_EXP = 1 - D_BIAS
+DBL_MANT_DIG = 53
+
+INEXACT_RESULT = 0x0001
+UNDERFLOW = 0x0002
+OVERFLOW = 0x0004
+DIVIDE_BY_ZERO = 0x0008
+INVALID_OPERATION = 0x0010
+
+DOUBLE_FLOAT = 2
+
+NOOP = 0
+ADD = 1
+MULTIPLY = 2
+DIVIDE = 3
+NEGATE = 4
+COMPARE = 5
+EXTENDSFDF = 6
+TRUNCDFSF = 7
+
+UNKNOWN = -1
+ROUND_TO_NEAREST = 0 | round result to nearest representable value
+ROUND_TO_ZERO = 1 | round result towards zero
+ROUND_TO_PLUS = 2 | round result towards plus infinity
+ROUND_TO_MINUS = 3 | round result towards minus infinity
+
+| Entry points:
+
+ .globl SYM (__adddf3)
+ .globl SYM (__subdf3)
+ .globl SYM (__muldf3)
+ .globl SYM (__divdf3)
+ .globl SYM (__negdf2)
+ .globl SYM (__cmpdf2)
+ .globl SYM (__cmpdf2_internal)
+ .hidden SYM (__cmpdf2_internal)
+
+ .text
+ .even
+
+| These are common routines to return and signal exceptions.
+
+Ld$den:
+| Return and signal a denormalized number
+ orl d7,d0
+ movew IMM (INEXACT_RESULT+UNDERFLOW),d7
+ moveq IMM (DOUBLE_FLOAT),d6
+ PICJUMP $_exception_handler
+
+Ld$infty:
+Ld$overflow:
+| Return a properly signed INFINITY and set the exception flags
+ movel IMM (0x7ff00000),d0
+ movel IMM (0),d1
+ orl d7,d0
+ movew IMM (INEXACT_RESULT+OVERFLOW),d7
+ moveq IMM (DOUBLE_FLOAT),d6
+ PICJUMP $_exception_handler
+
+Ld$underflow:
+| Return 0 and set the exception flags
+ movel IMM (0),d0
+ movel d0,d1
+ movew IMM (INEXACT_RESULT+UNDERFLOW),d7
+ moveq IMM (DOUBLE_FLOAT),d6
+ PICJUMP $_exception_handler
+
+Ld$inop:
+| Return a quiet NaN and set the exception flags
+ movel IMM (QUIET_NaN),d0
+ movel d0,d1
+ movew IMM (INEXACT_RESULT+INVALID_OPERATION),d7
+ moveq IMM (DOUBLE_FLOAT),d6
+ PICJUMP $_exception_handler
+
+Ld$div$0:
+| Return a properly signed INFINITY and set the exception flags
+ movel IMM (0x7ff00000),d0
+ movel IMM (0),d1
+ orl d7,d0
+ movew IMM (INEXACT_RESULT+DIVIDE_BY_ZERO),d7
+ moveq IMM (DOUBLE_FLOAT),d6
+ PICJUMP $_exception_handler
+
+|=============================================================================
+|=============================================================================
+| double precision routines
+|=============================================================================
+|=============================================================================
+
+| A double precision floating point number (double) has the format:
+|
+| struct _double {
+| unsigned int sign : 1; /* sign bit */
+| unsigned int exponent : 11; /* exponent, shifted by 126 */
+| unsigned int fraction : 52; /* fraction */
+| } double;
+|
+| Thus sizeof(double) = 8 (64 bits).
+|
+| All the routines are callable from C programs, and return the result
+| in the register pair d0-d1. They also preserve all registers except
+| d0-d1 and a0-a1.
+
+|=============================================================================
+| __subdf3
+|=============================================================================
+
+| double __subdf3(double, double);
+ FUNC(__subdf3)
+SYM (__subdf3):
+ bchg IMM (31),sp@(12) | change sign of second operand
+ | and fall through, so we always add
+|=============================================================================
+| __adddf3
+|=============================================================================
+
+| double __adddf3(double, double);
+ FUNC(__adddf3)
+SYM (__adddf3):
+#ifndef __mcoldfire__
+ link a6,IMM (0) | everything will be done in registers
+ moveml d2-d7,sp@- | save all data registers and a2 (but d0-d1)
+#else
+ link a6,IMM (-24)
+ moveml d2-d7,sp@
+#endif
+ movel a6@(8),d0 | get first operand
+ movel a6@(12),d1 |
+ movel a6@(16),d2 | get second operand
+ movel a6@(20),d3 |
+
+ movel d0,d7 | get d0's sign bit in d7 '
+ addl d1,d1 | check and clear sign bit of a, and gain one
+ addxl d0,d0 | bit of extra precision
+ beq Ladddf$b | if zero return second operand
+
+ movel d2,d6 | save sign in d6
+ addl d3,d3 | get rid of sign bit and gain one bit of
+ addxl d2,d2 | extra precision
+ beq Ladddf$a | if zero return first operand
+
+ andl IMM (0x80000000),d7 | isolate a's sign bit '
+ swap d6 | and also b's sign bit '
+#ifndef __mcoldfire__
+ andw IMM (0x8000),d6 |
+ orw d6,d7 | and combine them into d7, so that a's sign '
+ | bit is in the high word and b's is in the '
+ | low word, so d6 is free to be used
+#else
+ andl IMM (0x8000),d6
+ orl d6,d7
+#endif
+ movel d7,a0 | now save d7 into a0, so d7 is free to
+ | be used also
+
+| Get the exponents and check for denormalized and/or infinity.
+
+ movel IMM (0x001fffff),d6 | mask for the fraction
+ movel IMM (0x00200000),d7 | mask to put hidden bit back
+
+ movel d0,d4 |
+ andl d6,d0 | get fraction in d0
+ notl d6 | make d6 into mask for the exponent
+ andl d6,d4 | get exponent in d4
+ beq Ladddf$a$den | branch if a is denormalized
+ cmpl d6,d4 | check for INFINITY or NaN
+ beq Ladddf$nf |
+ orl d7,d0 | and put hidden bit back
+Ladddf$1:
+ swap d4 | shift right exponent so that it starts
+#ifndef __mcoldfire__
+ lsrw IMM (5),d4 | in bit 0 and not bit 20
+#else
+ lsrl IMM (5),d4 | in bit 0 and not bit 20
+#endif
+| Now we have a's exponent in d4 and fraction in d0-d1 '
+ movel d2,d5 | save b to get exponent
+ andl d6,d5 | get exponent in d5
+ beq Ladddf$b$den | branch if b is denormalized
+ cmpl d6,d5 | check for INFINITY or NaN
+ beq Ladddf$nf
+ notl d6 | make d6 into mask for the fraction again
+ andl d6,d2 | and get fraction in d2
+ orl d7,d2 | and put hidden bit back
+Ladddf$2:
+ swap d5 | shift right exponent so that it starts
+#ifndef __mcoldfire__
+ lsrw IMM (5),d5 | in bit 0 and not bit 20
+#else
+ lsrl IMM (5),d5 | in bit 0 and not bit 20
+#endif
+
+| Now we have b's exponent in d5 and fraction in d2-d3. '
+
+| The situation now is as follows: the signs are combined in a0, the
+| numbers are in d0-d1 (a) and d2-d3 (b), and the exponents in d4 (a)
+| and d5 (b). To do the rounding correctly we need to keep all the
+| bits until the end, so we need to use d0-d1-d2-d3 for the first number
+| and d4-d5-d6-d7 for the second. To do this we store (temporarily) the
+| exponents in a2-a3.
+
+#ifndef __mcoldfire__
+ moveml a2-a3,sp@- | save the address registers
+#else
+ movel a2,sp@-
+ movel a3,sp@-
+ movel a4,sp@-
+#endif
+
+ movel d4,a2 | save the exponents
+ movel d5,a3 |
+
+ movel IMM (0),d7 | and move the numbers around
+ movel d7,d6 |
+ movel d3,d5 |
+ movel d2,d4 |
+ movel d7,d3 |
+ movel d7,d2 |
+
+| Here we shift the numbers until the exponents are the same, and put
+| the largest exponent in a2.
+#ifndef __mcoldfire__
+ exg d4,a2 | get exponents back
+ exg d5,a3 |
+ cmpw d4,d5 | compare the exponents
+#else
+ movel d4,a4 | get exponents back
+ movel a2,d4
+ movel a4,a2
+ movel d5,a4
+ movel a3,d5
+ movel a4,a3
+ cmpl d4,d5 | compare the exponents
+#endif
+ beq Ladddf$3 | if equal don't shift '
+ bhi 9f | branch if second exponent is higher
+
+| Here we have a's exponent larger than b's, so we have to shift b. We do
+| this by using as counter d2:
+1: movew d4,d2 | move largest exponent to d2
+#ifndef __mcoldfire__
+ subw d5,d2 | and subtract second exponent
+ exg d4,a2 | get back the longs we saved
+ exg d5,a3 |
+#else
+ subl d5,d2 | and subtract second exponent
+ movel d4,a4 | get back the longs we saved
+ movel a2,d4
+ movel a4,a2
+ movel d5,a4
+ movel a3,d5
+ movel a4,a3
+#endif
+| if difference is too large we don't shift (actually, we can just exit) '
+#ifndef __mcoldfire__
+ cmpw IMM (DBL_MANT_DIG+2),d2
+#else
+ cmpl IMM (DBL_MANT_DIG+2),d2
+#endif
+ bge Ladddf$b$small
+#ifndef __mcoldfire__
+ cmpw IMM (32),d2 | if difference >= 32, shift by longs
+#else
+ cmpl IMM (32),d2 | if difference >= 32, shift by longs
+#endif
+ bge 5f
+2:
+#ifndef __mcoldfire__
+ cmpw IMM (16),d2 | if difference >= 16, shift by words
+#else
+ cmpl IMM (16),d2 | if difference >= 16, shift by words
+#endif
+ bge 6f
+ bra 3f | enter dbra loop
+
+4:
+#ifndef __mcoldfire__
+ lsrl IMM (1),d4
+ roxrl IMM (1),d5
+ roxrl IMM (1),d6
+ roxrl IMM (1),d7
+#else
+ lsrl IMM (1),d7
+ btst IMM (0),d6
+ beq 10f
+ bset IMM (31),d7
+10: lsrl IMM (1),d6
+ btst IMM (0),d5
+ beq 11f
+ bset IMM (31),d6
+11: lsrl IMM (1),d5
+ btst IMM (0),d4
+ beq 12f
+ bset IMM (31),d5
+12: lsrl IMM (1),d4
+#endif
+3:
+#ifndef __mcoldfire__
+ dbra d2,4b
+#else
+ subql IMM (1),d2
+ bpl 4b
+#endif
+ movel IMM (0),d2
+ movel d2,d3
+ bra Ladddf$4
+5:
+ movel d6,d7
+ movel d5,d6
+ movel d4,d5
+ movel IMM (0),d4
+#ifndef __mcoldfire__
+ subw IMM (32),d2
+#else
+ subl IMM (32),d2
+#endif
+ bra 2b
+6:
+ movew d6,d7
+ swap d7
+ movew d5,d6
+ swap d6
+ movew d4,d5
+ swap d5
+ movew IMM (0),d4
+ swap d4
+#ifndef __mcoldfire__
+ subw IMM (16),d2
+#else
+ subl IMM (16),d2
+#endif
+ bra 3b
+
+9:
+#ifndef __mcoldfire__
+ exg d4,d5
+ movew d4,d6
+ subw d5,d6 | keep d5 (largest exponent) in d4
+ exg d4,a2
+ exg d5,a3
+#else
+ movel d5,d6
+ movel d4,d5
+ movel d6,d4
+ subl d5,d6
+ movel d4,a4
+ movel a2,d4
+ movel a4,a2
+ movel d5,a4
+ movel a3,d5
+ movel a4,a3
+#endif
+| if difference is too large we don't shift (actually, we can just exit) '
+#ifndef __mcoldfire__
+ cmpw IMM (DBL_MANT_DIG+2),d6
+#else
+ cmpl IMM (DBL_MANT_DIG+2),d6
+#endif
+ bge Ladddf$a$small
+#ifndef __mcoldfire__
+ cmpw IMM (32),d6 | if difference >= 32, shift by longs
+#else
+ cmpl IMM (32),d6 | if difference >= 32, shift by longs
+#endif
+ bge 5f
+2:
+#ifndef __mcoldfire__
+ cmpw IMM (16),d6 | if difference >= 16, shift by words
+#else
+ cmpl IMM (16),d6 | if difference >= 16, shift by words
+#endif
+ bge 6f
+ bra 3f | enter dbra loop
+
+4:
+#ifndef __mcoldfire__
+ lsrl IMM (1),d0
+ roxrl IMM (1),d1
+ roxrl IMM (1),d2
+ roxrl IMM (1),d3
+#else
+ lsrl IMM (1),d3
+ btst IMM (0),d2
+ beq 10f
+ bset IMM (31),d3
+10: lsrl IMM (1),d2
+ btst IMM (0),d1
+ beq 11f
+ bset IMM (31),d2
+11: lsrl IMM (1),d1
+ btst IMM (0),d0
+ beq 12f
+ bset IMM (31),d1
+12: lsrl IMM (1),d0
+#endif
+3:
+#ifndef __mcoldfire__
+ dbra d6,4b
+#else
+ subql IMM (1),d6
+ bpl 4b
+#endif
+ movel IMM (0),d7
+ movel d7,d6
+ bra Ladddf$4
+5:
+ movel d2,d3
+ movel d1,d2
+ movel d0,d1
+ movel IMM (0),d0
+#ifndef __mcoldfire__
+ subw IMM (32),d6
+#else
+ subl IMM (32),d6
+#endif
+ bra 2b
+6:
+ movew d2,d3
+ swap d3
+ movew d1,d2
+ swap d2
+ movew d0,d1
+ swap d1
+ movew IMM (0),d0
+ swap d0
+#ifndef __mcoldfire__
+ subw IMM (16),d6
+#else
+ subl IMM (16),d6
+#endif
+ bra 3b
+Ladddf$3:
+#ifndef __mcoldfire__
+ exg d4,a2
+ exg d5,a3
+#else
+ movel d4,a4
+ movel a2,d4
+ movel a4,a2
+ movel d5,a4
+ movel a3,d5
+ movel a4,a3
+#endif
+Ladddf$4:
+| Now we have the numbers in d0--d3 and d4--d7, the exponent in a2, and
+| the signs in a4.
+
+| Here we have to decide whether to add or subtract the numbers:
+#ifndef __mcoldfire__
+ exg d7,a0 | get the signs
+ exg d6,a3 | a3 is free to be used
+#else
+ movel d7,a4
+ movel a0,d7
+ movel a4,a0
+ movel d6,a4
+ movel a3,d6
+ movel a4,a3
+#endif
+ movel d7,d6 |
+ movew IMM (0),d7 | get a's sign in d7 '
+ swap d6 |
+ movew IMM (0),d6 | and b's sign in d6 '
+ eorl d7,d6 | compare the signs
+ bmi Lsubdf$0 | if the signs are different we have
+ | to subtract
+#ifndef __mcoldfire__
+ exg d7,a0 | else we add the numbers
+ exg d6,a3 |
+#else
+ movel d7,a4
+ movel a0,d7
+ movel a4,a0
+ movel d6,a4
+ movel a3,d6
+ movel a4,a3
+#endif
+ addl d7,d3 |
+ addxl d6,d2 |
+ addxl d5,d1 |
+ addxl d4,d0 |
+
+ movel a2,d4 | return exponent to d4
+ movel a0,d7 |
+ andl IMM (0x80000000),d7 | d7 now has the sign
+
+#ifndef __mcoldfire__
+ moveml sp@+,a2-a3
+#else
+ movel sp@+,a4
+ movel sp@+,a3
+ movel sp@+,a2
+#endif
+
+| Before rounding normalize so bit #DBL_MANT_DIG is set (we will consider
+| the case of denormalized numbers in the rounding routine itself).
+| As in the addition (not in the subtraction!) we could have set
+| one more bit we check this:
+ btst IMM (DBL_MANT_DIG+1),d0
+ beq 1f
+#ifndef __mcoldfire__
+ lsrl IMM (1),d0
+ roxrl IMM (1),d1
+ roxrl IMM (1),d2
+ roxrl IMM (1),d3
+ addw IMM (1),d4
+#else
+ lsrl IMM (1),d3
+ btst IMM (0),d2
+ beq 10f
+ bset IMM (31),d3
+10: lsrl IMM (1),d2
+ btst IMM (0),d1
+ beq 11f
+ bset IMM (31),d2
+11: lsrl IMM (1),d1
+ btst IMM (0),d0
+ beq 12f
+ bset IMM (31),d1
+12: lsrl IMM (1),d0
+ addl IMM (1),d4
+#endif
+1:
+ lea pc@(Ladddf$5),a0 | to return from rounding routine
+ PICLEA SYM (_fpCCR),a1 | check the rounding mode
+#ifdef __mcoldfire__
+ clrl d6
+#endif
+ movew a1@(6),d6 | rounding mode in d6
+ beq Lround$to$nearest
+#ifndef __mcoldfire__
+ cmpw IMM (ROUND_TO_PLUS),d6
+#else
+ cmpl IMM (ROUND_TO_PLUS),d6
+#endif
+ bhi Lround$to$minus
+ blt Lround$to$zero
+ bra Lround$to$plus
+Ladddf$5:
+| Put back the exponent and check for overflow
+#ifndef __mcoldfire__
+ cmpw IMM (0x7ff),d4 | is the exponent big?
+#else
+ cmpl IMM (0x7ff),d4 | is the exponent big?
+#endif
+ bge 1f
+ bclr IMM (DBL_MANT_DIG-1),d0
+#ifndef __mcoldfire__
+ lslw IMM (4),d4 | put exponent back into position
+#else
+ lsll IMM (4),d4 | put exponent back into position
+#endif
+ swap d0 |
+#ifndef __mcoldfire__
+ orw d4,d0 |
+#else
+ orl d4,d0 |
+#endif
+ swap d0 |
+ bra Ladddf$ret
+1:
+ moveq IMM (ADD),d5
+ bra Ld$overflow
+
+Lsubdf$0:
+| Here we do the subtraction.
+#ifndef __mcoldfire__
+ exg d7,a0 | put sign back in a0
+ exg d6,a3 |
+#else
+ movel d7,a4
+ movel a0,d7
+ movel a4,a0
+ movel d6,a4
+ movel a3,d6
+ movel a4,a3
+#endif
+ subl d7,d3 |
+ subxl d6,d2 |
+ subxl d5,d1 |
+ subxl d4,d0 |
+ beq Ladddf$ret$1 | if zero just exit
+ bpl 1f | if positive skip the following
+ movel a0,d7 |
+ bchg IMM (31),d7 | change sign bit in d7
+ movel d7,a0 |
+ negl d3 |
+ negxl d2 |
+ negxl d1 | and negate result
+ negxl d0 |
+1:
+ movel a2,d4 | return exponent to d4
+ movel a0,d7
+ andl IMM (0x80000000),d7 | isolate sign bit
+#ifndef __mcoldfire__
+ moveml sp@+,a2-a3 |
+#else
+ movel sp@+,a4
+ movel sp@+,a3
+ movel sp@+,a2
+#endif
+
+| Before rounding normalize so bit #DBL_MANT_DIG is set (we will consider
+| the case of denormalized numbers in the rounding routine itself).
+| As in the addition (not in the subtraction!) we could have set
+| one more bit we check this:
+ btst IMM (DBL_MANT_DIG+1),d0
+ beq 1f
+#ifndef __mcoldfire__
+ lsrl IMM (1),d0
+ roxrl IMM (1),d1
+ roxrl IMM (1),d2
+ roxrl IMM (1),d3
+ addw IMM (1),d4
+#else
+ lsrl IMM (1),d3
+ btst IMM (0),d2
+ beq 10f
+ bset IMM (31),d3
+10: lsrl IMM (1),d2
+ btst IMM (0),d1
+ beq 11f
+ bset IMM (31),d2
+11: lsrl IMM (1),d1
+ btst IMM (0),d0
+ beq 12f
+ bset IMM (31),d1
+12: lsrl IMM (1),d0
+ addl IMM (1),d4
+#endif
+1:
+ lea pc@(Lsubdf$1),a0 | to return from rounding routine
+ PICLEA SYM (_fpCCR),a1 | check the rounding mode
+#ifdef __mcoldfire__
+ clrl d6
+#endif
+ movew a1@(6),d6 | rounding mode in d6
+ beq Lround$to$nearest
+#ifndef __mcoldfire__
+ cmpw IMM (ROUND_TO_PLUS),d6
+#else
+ cmpl IMM (ROUND_TO_PLUS),d6
+#endif
+ bhi Lround$to$minus
+ blt Lround$to$zero
+ bra Lround$to$plus
+Lsubdf$1:
+| Put back the exponent and sign (we don't have overflow). '
+ bclr IMM (DBL_MANT_DIG-1),d0
+#ifndef __mcoldfire__
+ lslw IMM (4),d4 | put exponent back into position
+#else
+ lsll IMM (4),d4 | put exponent back into position
+#endif
+ swap d0 |
+#ifndef __mcoldfire__
+ orw d4,d0 |
+#else
+ orl d4,d0 |
+#endif
+ swap d0 |
+ bra Ladddf$ret
+
+| If one of the numbers was too small (difference of exponents >=
+| DBL_MANT_DIG+1) we return the other (and now we don't have to '
+| check for finiteness or zero).
+Ladddf$a$small:
+#ifndef __mcoldfire__
+ moveml sp@+,a2-a3
+#else
+ movel sp@+,a4
+ movel sp@+,a3
+ movel sp@+,a2
+#endif
+ movel a6@(16),d0
+ movel a6@(20),d1
+ PICLEA SYM (_fpCCR),a0
+ movew IMM (0),a0@
+#ifndef __mcoldfire__
+ moveml sp@+,d2-d7 | restore data registers
+#else
+ moveml sp@,d2-d7
+ | XXX if frame pointer is ever removed, stack pointer must
+ | be adjusted here.
+#endif
+ unlk a6 | and return
+ rts
+
+Ladddf$b$small:
+#ifndef __mcoldfire__
+ moveml sp@+,a2-a3
+#else
+ movel sp@+,a4
+ movel sp@+,a3
+ movel sp@+,a2
+#endif
+ movel a6@(8),d0
+ movel a6@(12),d1
+ PICLEA SYM (_fpCCR),a0
+ movew IMM (0),a0@
+#ifndef __mcoldfire__
+ moveml sp@+,d2-d7 | restore data registers
+#else
+ moveml sp@,d2-d7
+ | XXX if frame pointer is ever removed, stack pointer must
+ | be adjusted here.
+#endif
+ unlk a6 | and return
+ rts
+
+Ladddf$a$den:
+ movel d7,d4 | d7 contains 0x00200000
+ bra Ladddf$1
+
+Ladddf$b$den:
+ movel d7,d5 | d7 contains 0x00200000
+ notl d6
+ bra Ladddf$2
+
+Ladddf$b:
+| Return b (if a is zero)
+ movel d2,d0
+ movel d3,d1
+ bne 1f | Check if b is -0
+ cmpl IMM (0x80000000),d0
+ bne 1f
+ andl IMM (0x80000000),d7 | Use the sign of a
+ clrl d0
+ bra Ladddf$ret
+Ladddf$a:
+ movel a6@(8),d0
+ movel a6@(12),d1
+1:
+ moveq IMM (ADD),d5
+| Check for NaN and +/-INFINITY.
+ movel d0,d7 |
+ andl IMM (0x80000000),d7 |
+ bclr IMM (31),d0 |
+ cmpl IMM (0x7ff00000),d0 |
+ bge 2f |
+ movel d0,d0 | check for zero, since we don't '
+ bne Ladddf$ret | want to return -0 by mistake
+ bclr IMM (31),d7 |
+ bra Ladddf$ret |
+2:
+ andl IMM (0x000fffff),d0 | check for NaN (nonzero fraction)
+ orl d1,d0 |
+ bne Ld$inop |
+ bra Ld$infty |
+
+Ladddf$ret$1:
+#ifndef __mcoldfire__
+ moveml sp@+,a2-a3 | restore regs and exit
+#else
+ movel sp@+,a4
+ movel sp@+,a3
+ movel sp@+,a2
+#endif
+
+Ladddf$ret:
+| Normal exit.
+ PICLEA SYM (_fpCCR),a0
+ movew IMM (0),a0@
+ orl d7,d0 | put sign bit back
+#ifndef __mcoldfire__
+ moveml sp@+,d2-d7
+#else
+ moveml sp@,d2-d7
+ | XXX if frame pointer is ever removed, stack pointer must
+ | be adjusted here.
+#endif
+ unlk a6
+ rts
+
+Ladddf$ret$den:
+| Return a denormalized number.
+#ifndef __mcoldfire__
+ lsrl IMM (1),d0 | shift right once more
+ roxrl IMM (1),d1 |
+#else
+ lsrl IMM (1),d1
+ btst IMM (0),d0
+ beq 10f
+ bset IMM (31),d1
+10: lsrl IMM (1),d0
+#endif
+ bra Ladddf$ret
+
+Ladddf$nf:
+ moveq IMM (ADD),d5
+| This could be faster but it is not worth the effort, since it is not
+| executed very often. We sacrifice speed for clarity here.
+ movel a6@(8),d0 | get the numbers back (remember that we
+ movel a6@(12),d1 | did some processing already)
+ movel a6@(16),d2 |
+ movel a6@(20),d3 |
+ movel IMM (0x7ff00000),d4 | useful constant (INFINITY)
+ movel d0,d7 | save sign bits
+ movel d2,d6 |
+ bclr IMM (31),d0 | clear sign bits
+ bclr IMM (31),d2 |
+| We know that one of them is either NaN of +/-INFINITY
+| Check for NaN (if either one is NaN return NaN)
+ cmpl d4,d0 | check first a (d0)
+ bhi Ld$inop | if d0 > 0x7ff00000 or equal and
+ bne 2f
+ tstl d1 | d1 > 0, a is NaN
+ bne Ld$inop |
+2: cmpl d4,d2 | check now b (d1)
+ bhi Ld$inop |
+ bne 3f
+ tstl d3 |
+ bne Ld$inop |
+3:
+| Now comes the check for +/-INFINITY. We know that both are (maybe not
+| finite) numbers, but we have to check if both are infinite whether we
+| are adding or subtracting them.
+ eorl d7,d6 | to check sign bits
+ bmi 1f
+ andl IMM (0x80000000),d7 | get (common) sign bit
+ bra Ld$infty
+1:
+| We know one (or both) are infinite, so we test for equality between the
+| two numbers (if they are equal they have to be infinite both, so we
+| return NaN).
+ cmpl d2,d0 | are both infinite?
+ bne 1f | if d0 <> d2 they are not equal
+ cmpl d3,d1 | if d0 == d2 test d3 and d1
+ beq Ld$inop | if equal return NaN
+1:
+ andl IMM (0x80000000),d7 | get a's sign bit '
+ cmpl d4,d0 | test now for infinity
+ beq Ld$infty | if a is INFINITY return with this sign
+ bchg IMM (31),d7 | else we know b is INFINITY and has
+ bra Ld$infty | the opposite sign
+
+|=============================================================================
+| __muldf3
+|=============================================================================
+
+| double __muldf3(double, double);
+ FUNC(__muldf3)
+SYM (__muldf3):
+#ifndef __mcoldfire__
+ link a6,IMM (0)
+ moveml d2-d7,sp@-
+#else
+ link a6,IMM (-24)
+ moveml d2-d7,sp@
+#endif
+ movel a6@(8),d0 | get a into d0-d1
+ movel a6@(12),d1 |
+ movel a6@(16),d2 | and b into d2-d3
+ movel a6@(20),d3 |
+ movel d0,d7 | d7 will hold the sign of the product
+ eorl d2,d7 |
+ andl IMM (0x80000000),d7 |
+ movel d7,a0 | save sign bit into a0
+ movel IMM (0x7ff00000),d7 | useful constant (+INFINITY)
+ movel d7,d6 | another (mask for fraction)
+ notl d6 |
+ bclr IMM (31),d0 | get rid of a's sign bit '
+ movel d0,d4 |
+ orl d1,d4 |
+ beq Lmuldf$a$0 | branch if a is zero
+ movel d0,d4 |
+ bclr IMM (31),d2 | get rid of b's sign bit '
+ movel d2,d5 |
+ orl d3,d5 |
+ beq Lmuldf$b$0 | branch if b is zero
+ movel d2,d5 |
+ cmpl d7,d0 | is a big?
+ bhi Lmuldf$inop | if a is NaN return NaN
+ beq Lmuldf$a$nf | we still have to check d1 and b ...
+ cmpl d7,d2 | now compare b with INFINITY
+ bhi Lmuldf$inop | is b NaN?
+ beq Lmuldf$b$nf | we still have to check d3 ...
+| Here we have both numbers finite and nonzero (and with no sign bit).
+| Now we get the exponents into d4 and d5.
+ andl d7,d4 | isolate exponent in d4
+ beq Lmuldf$a$den | if exponent zero, have denormalized
+ andl d6,d0 | isolate fraction
+ orl IMM (0x00100000),d0 | and put hidden bit back
+ swap d4 | I like exponents in the first byte
+#ifndef __mcoldfire__
+ lsrw IMM (4),d4 |
+#else
+ lsrl IMM (4),d4 |
+#endif
+Lmuldf$1:
+ andl d7,d5 |
+ beq Lmuldf$b$den |
+ andl d6,d2 |
+ orl IMM (0x00100000),d2 | and put hidden bit back
+ swap d5 |
+#ifndef __mcoldfire__
+ lsrw IMM (4),d5 |
+#else
+ lsrl IMM (4),d5 |
+#endif
+Lmuldf$2: |
+#ifndef __mcoldfire__
+ addw d5,d4 | add exponents
+ subw IMM (D_BIAS+1),d4 | and subtract bias (plus one)
+#else
+ addl d5,d4 | add exponents
+ subl IMM (D_BIAS+1),d4 | and subtract bias (plus one)
+#endif
+
+| We are now ready to do the multiplication. The situation is as follows:
+| both a and b have bit 52 ( bit 20 of d0 and d2) set (even if they were
+| denormalized to start with!), which means that in the product bit 104
+| (which will correspond to bit 8 of the fourth long) is set.
+
+| Here we have to do the product.
+| To do it we have to juggle the registers back and forth, as there are not
+| enough to keep everything in them. So we use the address registers to keep
+| some intermediate data.
+
+#ifndef __mcoldfire__
+ moveml a2-a3,sp@- | save a2 and a3 for temporary use
+#else
+ movel a2,sp@-
+ movel a3,sp@-
+ movel a4,sp@-
+#endif
+ movel IMM (0),a2 | a2 is a null register
+ movel d4,a3 | and a3 will preserve the exponent
+
+| First, shift d2-d3 so bit 20 becomes bit 31:
+#ifndef __mcoldfire__
+ rorl IMM (5),d2 | rotate d2 5 places right
+ swap d2 | and swap it
+ rorl IMM (5),d3 | do the same thing with d3
+ swap d3 |
+ movew d3,d6 | get the rightmost 11 bits of d3
+ andw IMM (0x07ff),d6 |
+ orw d6,d2 | and put them into d2
+ andw IMM (0xf800),d3 | clear those bits in d3
+#else
+ moveq IMM (11),d7 | left shift d2 11 bits
+ lsll d7,d2
+ movel d3,d6 | get a copy of d3
+ lsll d7,d3 | left shift d3 11 bits
+ andl IMM (0xffe00000),d6 | get the top 11 bits of d3
+ moveq IMM (21),d7 | right shift them 21 bits
+ lsrl d7,d6
+ orl d6,d2 | stick them at the end of d2
+#endif
+
+ movel d2,d6 | move b into d6-d7
+ movel d3,d7 | move a into d4-d5
+ movel d0,d4 | and clear d0-d1-d2-d3 (to put result)
+ movel d1,d5 |
+ movel IMM (0),d3 |
+ movel d3,d2 |
+ movel d3,d1 |
+ movel d3,d0 |
+
+| We use a1 as counter:
+ movel IMM (DBL_MANT_DIG-1),a1
+#ifndef __mcoldfire__
+ exg d7,a1
+#else
+ movel d7,a4
+ movel a1,d7
+ movel a4,a1
+#endif
+
+1:
+#ifndef __mcoldfire__
+ exg d7,a1 | put counter back in a1
+#else
+ movel d7,a4
+ movel a1,d7
+ movel a4,a1
+#endif
+ addl d3,d3 | shift sum once left
+ addxl d2,d2 |
+ addxl d1,d1 |
+ addxl d0,d0 |
+ addl d7,d7 |
+ addxl d6,d6 |
+ bcc 2f | if bit clear skip the following
+#ifndef __mcoldfire__
+ exg d7,a2 |
+#else
+ movel d7,a4
+ movel a2,d7
+ movel a4,a2
+#endif
+ addl d5,d3 | else add a to the sum
+ addxl d4,d2 |
+ addxl d7,d1 |
+ addxl d7,d0 |
+#ifndef __mcoldfire__
+ exg d7,a2 |
+#else
+ movel d7,a4
+ movel a2,d7
+ movel a4,a2
+#endif
+2:
+#ifndef __mcoldfire__
+ exg d7,a1 | put counter in d7
+ dbf d7,1b | decrement and branch
+#else
+ movel d7,a4
+ movel a1,d7
+ movel a4,a1
+ subql IMM (1),d7
+ bpl 1b
+#endif
+
+ movel a3,d4 | restore exponent
+#ifndef __mcoldfire__
+ moveml sp@+,a2-a3
+#else
+ movel sp@+,a4
+ movel sp@+,a3
+ movel sp@+,a2
+#endif
+
+| Now we have the product in d0-d1-d2-d3, with bit 8 of d0 set. The
+| first thing to do now is to normalize it so bit 8 becomes bit
+| DBL_MANT_DIG-32 (to do the rounding); later we will shift right.
+ swap d0
+ swap d1
+ movew d1,d0
+ swap d2
+ movew d2,d1
+ swap d3
+ movew d3,d2
+ movew IMM (0),d3
+#ifndef __mcoldfire__
+ lsrl IMM (1),d0
+ roxrl IMM (1),d1
+ roxrl IMM (1),d2
+ roxrl IMM (1),d3
+ lsrl IMM (1),d0
+ roxrl IMM (1),d1
+ roxrl IMM (1),d2
+ roxrl IMM (1),d3
+ lsrl IMM (1),d0
+ roxrl IMM (1),d1
+ roxrl IMM (1),d2
+ roxrl IMM (1),d3
+#else
+ moveq IMM (29),d6
+ lsrl IMM (3),d3
+ movel d2,d7
+ lsll d6,d7
+ orl d7,d3
+ lsrl IMM (3),d2
+ movel d1,d7
+ lsll d6,d7
+ orl d7,d2
+ lsrl IMM (3),d1
+ movel d0,d7
+ lsll d6,d7
+ orl d7,d1
+ lsrl IMM (3),d0
+#endif
+
+| Now round, check for over- and underflow, and exit.
+ movel a0,d7 | get sign bit back into d7
+ moveq IMM (MULTIPLY),d5
+
+ btst IMM (DBL_MANT_DIG+1-32),d0
+ beq Lround$exit
+#ifndef __mcoldfire__
+ lsrl IMM (1),d0
+ roxrl IMM (1),d1
+ addw IMM (1),d4
+#else
+ lsrl IMM (1),d1
+ btst IMM (0),d0
+ beq 10f
+ bset IMM (31),d1
+10: lsrl IMM (1),d0
+ addl IMM (1),d4
+#endif
+ bra Lround$exit
+
+Lmuldf$inop:
+ moveq IMM (MULTIPLY),d5
+ bra Ld$inop
+
+Lmuldf$b$nf:
+ moveq IMM (MULTIPLY),d5
+ movel a0,d7 | get sign bit back into d7
+ tstl d3 | we know d2 == 0x7ff00000, so check d3
+ bne Ld$inop | if d3 <> 0 b is NaN
+ bra Ld$overflow | else we have overflow (since a is finite)
+
+Lmuldf$a$nf:
+ moveq IMM (MULTIPLY),d5
+ movel a0,d7 | get sign bit back into d7
+ tstl d1 | we know d0 == 0x7ff00000, so check d1
+ bne Ld$inop | if d1 <> 0 a is NaN
+ bra Ld$overflow | else signal overflow
+
+| If either number is zero return zero, unless the other is +/-INFINITY or
+| NaN, in which case we return NaN.
+Lmuldf$b$0:
+ moveq IMM (MULTIPLY),d5
+#ifndef __mcoldfire__
+ exg d2,d0 | put b (==0) into d0-d1
+ exg d3,d1 | and a (with sign bit cleared) into d2-d3
+ movel a0,d0 | set result sign
+#else
+ movel d0,d2 | put a into d2-d3
+ movel d1,d3
+ movel a0,d0 | put result zero into d0-d1
+ movq IMM(0),d1
+#endif
+ bra 1f
+Lmuldf$a$0:
+ movel a0,d0 | set result sign
+ movel a6@(16),d2 | put b into d2-d3 again
+ movel a6@(20),d3 |
+ bclr IMM (31),d2 | clear sign bit
+1: cmpl IMM (0x7ff00000),d2 | check for non-finiteness
+ bge Ld$inop | in case NaN or +/-INFINITY return NaN
+ PICLEA SYM (_fpCCR),a0
+ movew IMM (0),a0@
+#ifndef __mcoldfire__
+ moveml sp@+,d2-d7
+#else
+ moveml sp@,d2-d7
+ | XXX if frame pointer is ever removed, stack pointer must
+ | be adjusted here.
+#endif
+ unlk a6
+ rts
+
+| If a number is denormalized we put an exponent of 1 but do not put the
+| hidden bit back into the fraction; instead we shift left until bit 21
+| (the hidden bit) is set, adjusting the exponent accordingly. We do this
+| to ensure that the product of the fractions is close to 1.
+Lmuldf$a$den:
+ movel IMM (1),d4
+ andl d6,d0
+1: addl d1,d1 | shift a left until bit 20 is set
+ addxl d0,d0 |
+#ifndef __mcoldfire__
+ subw IMM (1),d4 | and adjust exponent
+#else
+ subl IMM (1),d4 | and adjust exponent
+#endif
+ btst IMM (20),d0 |
+ bne Lmuldf$1 |
+ bra 1b
+
+Lmuldf$b$den:
+ movel IMM (1),d5
+ andl d6,d2
+1: addl d3,d3 | shift b left until bit 20 is set
+ addxl d2,d2 |
+#ifndef __mcoldfire__
+ subw IMM (1),d5 | and adjust exponent
+#else
+ subql IMM (1),d5 | and adjust exponent
+#endif
+ btst IMM (20),d2 |
+ bne Lmuldf$2 |
+ bra 1b
+
+
+|=============================================================================
+| __divdf3
+|=============================================================================
+
+| double __divdf3(double, double);
+ FUNC(__divdf3)
+SYM (__divdf3):
+#ifndef __mcoldfire__
+ link a6,IMM (0)
+ moveml d2-d7,sp@-
+#else
+ link a6,IMM (-24)
+ moveml d2-d7,sp@
+#endif
+ movel a6@(8),d0 | get a into d0-d1
+ movel a6@(12),d1 |
+ movel a6@(16),d2 | and b into d2-d3
+ movel a6@(20),d3 |
+ movel d0,d7 | d7 will hold the sign of the result
+ eorl d2,d7 |
+ andl IMM (0x80000000),d7
+ movel d7,a0 | save sign into a0
+ movel IMM (0x7ff00000),d7 | useful constant (+INFINITY)
+ movel d7,d6 | another (mask for fraction)
+ notl d6 |
+ bclr IMM (31),d0 | get rid of a's sign bit '
+ movel d0,d4 |
+ orl d1,d4 |
+ beq Ldivdf$a$0 | branch if a is zero
+ movel d0,d4 |
+ bclr IMM (31),d2 | get rid of b's sign bit '
+ movel d2,d5 |
+ orl d3,d5 |
+ beq Ldivdf$b$0 | branch if b is zero
+ movel d2,d5
+ cmpl d7,d0 | is a big?
+ bhi Ldivdf$inop | if a is NaN return NaN
+ beq Ldivdf$a$nf | if d0 == 0x7ff00000 we check d1
+ cmpl d7,d2 | now compare b with INFINITY
+ bhi Ldivdf$inop | if b is NaN return NaN
+ beq Ldivdf$b$nf | if d2 == 0x7ff00000 we check d3
+| Here we have both numbers finite and nonzero (and with no sign bit).
+| Now we get the exponents into d4 and d5 and normalize the numbers to
+| ensure that the ratio of the fractions is around 1. We do this by
+| making sure that both numbers have bit #DBL_MANT_DIG-32-1 (hidden bit)
+| set, even if they were denormalized to start with.
+| Thus, the result will satisfy: 2 > result > 1/2.
+ andl d7,d4 | and isolate exponent in d4
+ beq Ldivdf$a$den | if exponent is zero we have a denormalized
+ andl d6,d0 | and isolate fraction
+ orl IMM (0x00100000),d0 | and put hidden bit back
+ swap d4 | I like exponents in the first byte
+#ifndef __mcoldfire__
+ lsrw IMM (4),d4 |
+#else
+ lsrl IMM (4),d4 |
+#endif
+Ldivdf$1: |
+ andl d7,d5 |
+ beq Ldivdf$b$den |
+ andl d6,d2 |
+ orl IMM (0x00100000),d2
+ swap d5 |
+#ifndef __mcoldfire__
+ lsrw IMM (4),d5 |
+#else
+ lsrl IMM (4),d5 |
+#endif
+Ldivdf$2: |
+#ifndef __mcoldfire__
+ subw d5,d4 | subtract exponents
+ addw IMM (D_BIAS),d4 | and add bias
+#else
+ subl d5,d4 | subtract exponents
+ addl IMM (D_BIAS),d4 | and add bias
+#endif
+
+| We are now ready to do the division. We have prepared things in such a way
+| that the ratio of the fractions will be less than 2 but greater than 1/2.
+| At this point the registers in use are:
+| d0-d1 hold a (first operand, bit DBL_MANT_DIG-32=0, bit
+| DBL_MANT_DIG-1-32=1)
+| d2-d3 hold b (second operand, bit DBL_MANT_DIG-32=1)
+| d4 holds the difference of the exponents, corrected by the bias
+| a0 holds the sign of the ratio
+
+| To do the rounding correctly we need to keep information about the
+| nonsignificant bits. One way to do this would be to do the division
+| using four registers; another is to use two registers (as originally
+| I did), but use a sticky bit to preserve information about the
+| fractional part. Note that we can keep that info in a1, which is not
+| used.
+ movel IMM (0),d6 | d6-d7 will hold the result
+ movel d6,d7 |
+ movel IMM (0),a1 | and a1 will hold the sticky bit
+
+ movel IMM (DBL_MANT_DIG-32+1),d5
+
+1: cmpl d0,d2 | is a < b?
+ bhi 3f | if b > a skip the following
+ beq 4f | if d0==d2 check d1 and d3
+2: subl d3,d1 |
+ subxl d2,d0 | a <-- a - b
+ bset d5,d6 | set the corresponding bit in d6
+3: addl d1,d1 | shift a by 1
+ addxl d0,d0 |
+#ifndef __mcoldfire__
+ dbra d5,1b | and branch back
+#else
+ subql IMM (1), d5
+ bpl 1b
+#endif
+ bra 5f
+4: cmpl d1,d3 | here d0==d2, so check d1 and d3
+ bhi 3b | if d1 > d2 skip the subtraction
+ bra 2b | else go do it
+5:
+| Here we have to start setting the bits in the second long.
+ movel IMM (31),d5 | again d5 is counter
+
+1: cmpl d0,d2 | is a < b?
+ bhi 3f | if b > a skip the following
+ beq 4f | if d0==d2 check d1 and d3
+2: subl d3,d1 |
+ subxl d2,d0 | a <-- a - b
+ bset d5,d7 | set the corresponding bit in d7
+3: addl d1,d1 | shift a by 1
+ addxl d0,d0 |
+#ifndef __mcoldfire__
+ dbra d5,1b | and branch back
+#else
+ subql IMM (1), d5
+ bpl 1b
+#endif
+ bra 5f
+4: cmpl d1,d3 | here d0==d2, so check d1 and d3
+ bhi 3b | if d1 > d2 skip the subtraction
+ bra 2b | else go do it
+5:
+| Now go ahead checking until we hit a one, which we store in d2.
+ movel IMM (DBL_MANT_DIG),d5
+1: cmpl d2,d0 | is a < b?
+ bhi 4f | if b < a, exit
+ beq 3f | if d0==d2 check d1 and d3
+2: addl d1,d1 | shift a by 1
+ addxl d0,d0 |
+#ifndef __mcoldfire__
+ dbra d5,1b | and branch back
+#else
+ subql IMM (1), d5
+ bpl 1b
+#endif
+ movel IMM (0),d2 | here no sticky bit was found
+ movel d2,d3
+ bra 5f
+3: cmpl d1,d3 | here d0==d2, so check d1 and d3
+ bhi 2b | if d1 > d2 go back
+4:
+| Here put the sticky bit in d2-d3 (in the position which actually corresponds
+| to it; if you don't do this the algorithm loses in some cases). '
+ movel IMM (0),d2
+ movel d2,d3
+#ifndef __mcoldfire__
+ subw IMM (DBL_MANT_DIG),d5
+ addw IMM (63),d5
+ cmpw IMM (31),d5
+#else
+ subl IMM (DBL_MANT_DIG),d5
+ addl IMM (63),d5
+ cmpl IMM (31),d5
+#endif
+ bhi 2f
+1: bset d5,d3
+ bra 5f
+#ifndef __mcoldfire__
+ subw IMM (32),d5
+#else
+ subl IMM (32),d5
+#endif
+2: bset d5,d2
+5:
+| Finally we are finished! Move the longs in the address registers to
+| their final destination:
+ movel d6,d0
+ movel d7,d1
+ movel IMM (0),d3
+
+| Here we have finished the division, with the result in d0-d1-d2-d3, with
+| 2^21 <= d6 < 2^23. Thus bit 23 is not set, but bit 22 could be set.
+| If it is not, then definitely bit 21 is set. Normalize so bit 22 is
+| not set:
+ btst IMM (DBL_MANT_DIG-32+1),d0
+ beq 1f
+#ifndef __mcoldfire__
+ lsrl IMM (1),d0
+ roxrl IMM (1),d1
+ roxrl IMM (1),d2
+ roxrl IMM (1),d3
+ addw IMM (1),d4
+#else
+ lsrl IMM (1),d3
+ btst IMM (0),d2
+ beq 10f
+ bset IMM (31),d3
+10: lsrl IMM (1),d2
+ btst IMM (0),d1
+ beq 11f
+ bset IMM (31),d2
+11: lsrl IMM (1),d1
+ btst IMM (0),d0
+ beq 12f
+ bset IMM (31),d1
+12: lsrl IMM (1),d0
+ addl IMM (1),d4
+#endif
+1:
+| Now round, check for over- and underflow, and exit.
+ movel a0,d7 | restore sign bit to d7
+ moveq IMM (DIVIDE),d5
+ bra Lround$exit
+
+Ldivdf$inop:
+ moveq IMM (DIVIDE),d5
+ bra Ld$inop
+
+Ldivdf$a$0:
+| If a is zero check to see whether b is zero also. In that case return
+| NaN; then check if b is NaN, and return NaN also in that case. Else
+| return a properly signed zero.
+ moveq IMM (DIVIDE),d5
+ bclr IMM (31),d2 |
+ movel d2,d4 |
+ orl d3,d4 |
+ beq Ld$inop | if b is also zero return NaN
+ cmpl IMM (0x7ff00000),d2 | check for NaN
+ bhi Ld$inop |
+ blt 1f |
+ tstl d3 |
+ bne Ld$inop |
+1: movel a0,d0 | else return signed zero
+ moveq IMM(0),d1 |
+ PICLEA SYM (_fpCCR),a0 | clear exception flags
+ movew IMM (0),a0@ |
+#ifndef __mcoldfire__
+ moveml sp@+,d2-d7 |
+#else
+ moveml sp@,d2-d7 |
+ | XXX if frame pointer is ever removed, stack pointer must
+ | be adjusted here.
+#endif
+ unlk a6 |
+ rts |
+
+Ldivdf$b$0:
+ moveq IMM (DIVIDE),d5
+| If we got here a is not zero. Check if a is NaN; in that case return NaN,
+| else return +/-INFINITY. Remember that a is in d0 with the sign bit
+| cleared already.
+ movel a0,d7 | put a's sign bit back in d7 '
+ cmpl IMM (0x7ff00000),d0 | compare d0 with INFINITY
+ bhi Ld$inop | if larger it is NaN
+ tstl d1 |
+ bne Ld$inop |
+ bra Ld$div$0 | else signal DIVIDE_BY_ZERO
+
+Ldivdf$b$nf:
+ moveq IMM (DIVIDE),d5
+| If d2 == 0x7ff00000 we have to check d3.
+ tstl d3 |
+ bne Ld$inop | if d3 <> 0, b is NaN
+ bra Ld$underflow | else b is +/-INFINITY, so signal underflow
+
+Ldivdf$a$nf:
+ moveq IMM (DIVIDE),d5
+| If d0 == 0x7ff00000 we have to check d1.
+ tstl d1 |
+ bne Ld$inop | if d1 <> 0, a is NaN
+| If a is INFINITY we have to check b
+ cmpl d7,d2 | compare b with INFINITY
+ bge Ld$inop | if b is NaN or INFINITY return NaN
+ tstl d3 |
+ bne Ld$inop |
+ bra Ld$overflow | else return overflow
+
+| If a number is denormalized we put an exponent of 1 but do not put the
+| bit back into the fraction.
+Ldivdf$a$den:
+ movel IMM (1),d4
+ andl d6,d0
+1: addl d1,d1 | shift a left until bit 20 is set
+ addxl d0,d0
+#ifndef __mcoldfire__
+ subw IMM (1),d4 | and adjust exponent
+#else
+ subl IMM (1),d4 | and adjust exponent
+#endif
+ btst IMM (DBL_MANT_DIG-32-1),d0
+ bne Ldivdf$1
+ bra 1b
+
+Ldivdf$b$den:
+ movel IMM (1),d5
+ andl d6,d2
+1: addl d3,d3 | shift b left until bit 20 is set
+ addxl d2,d2
+#ifndef __mcoldfire__
+ subw IMM (1),d5 | and adjust exponent
+#else
+ subql IMM (1),d5 | and adjust exponent
+#endif
+ btst IMM (DBL_MANT_DIG-32-1),d2
+ bne Ldivdf$2
+ bra 1b
+
+Lround$exit:
+| This is a common exit point for __muldf3 and __divdf3. When they enter
+| this point the sign of the result is in d7, the result in d0-d1, normalized
+| so that 2^21 <= d0 < 2^22, and the exponent is in the lower byte of d4.
+
+| First check for underlow in the exponent:
+#ifndef __mcoldfire__
+ cmpw IMM (-DBL_MANT_DIG-1),d4
+#else
+ cmpl IMM (-DBL_MANT_DIG-1),d4
+#endif
+ blt Ld$underflow
+| It could happen that the exponent is less than 1, in which case the
+| number is denormalized. In this case we shift right and adjust the
+| exponent until it becomes 1 or the fraction is zero (in the latter case
+| we signal underflow and return zero).
+ movel d7,a0 |
+ movel IMM (0),d6 | use d6-d7 to collect bits flushed right
+ movel d6,d7 | use d6-d7 to collect bits flushed right
+#ifndef __mcoldfire__
+ cmpw IMM (1),d4 | if the exponent is less than 1 we
+#else
+ cmpl IMM (1),d4 | if the exponent is less than 1 we
+#endif
+ bge 2f | have to shift right (denormalize)
+1:
+#ifndef __mcoldfire__
+ addw IMM (1),d4 | adjust the exponent
+ lsrl IMM (1),d0 | shift right once
+ roxrl IMM (1),d1 |
+ roxrl IMM (1),d2 |
+ roxrl IMM (1),d3 |
+ roxrl IMM (1),d6 |
+ roxrl IMM (1),d7 |
+ cmpw IMM (1),d4 | is the exponent 1 already?
+#else
+ addl IMM (1),d4 | adjust the exponent
+ lsrl IMM (1),d7
+ btst IMM (0),d6
+ beq 13f
+ bset IMM (31),d7
+13: lsrl IMM (1),d6
+ btst IMM (0),d3
+ beq 14f
+ bset IMM (31),d6
+14: lsrl IMM (1),d3
+ btst IMM (0),d2
+ beq 10f
+ bset IMM (31),d3
+10: lsrl IMM (1),d2
+ btst IMM (0),d1
+ beq 11f
+ bset IMM (31),d2
+11: lsrl IMM (1),d1
+ btst IMM (0),d0
+ beq 12f
+ bset IMM (31),d1
+12: lsrl IMM (1),d0
+ cmpl IMM (1),d4 | is the exponent 1 already?
+#endif
+ beq 2f | if not loop back
+ bra 1b |
+ bra Ld$underflow | safety check, shouldn't execute '
+2: orl d6,d2 | this is a trick so we don't lose '
+ orl d7,d3 | the bits which were flushed right
+ movel a0,d7 | get back sign bit into d7
+| Now call the rounding routine (which takes care of denormalized numbers):
+ lea pc@(Lround$0),a0 | to return from rounding routine
+ PICLEA SYM (_fpCCR),a1 | check the rounding mode
+#ifdef __mcoldfire__
+ clrl d6
+#endif
+ movew a1@(6),d6 | rounding mode in d6
+ beq Lround$to$nearest
+#ifndef __mcoldfire__
+ cmpw IMM (ROUND_TO_PLUS),d6
+#else
+ cmpl IMM (ROUND_TO_PLUS),d6
+#endif
+ bhi Lround$to$minus
+ blt Lround$to$zero
+ bra Lround$to$plus
+Lround$0:
+| Here we have a correctly rounded result (either normalized or denormalized).
+
+| Here we should have either a normalized number or a denormalized one, and
+| the exponent is necessarily larger or equal to 1 (so we don't have to '
+| check again for underflow!). We have to check for overflow or for a
+| denormalized number (which also signals underflow).
+| Check for overflow (i.e., exponent >= 0x7ff).
+#ifndef __mcoldfire__
+ cmpw IMM (0x07ff),d4
+#else
+ cmpl IMM (0x07ff),d4
+#endif
+ bge Ld$overflow
+| Now check for a denormalized number (exponent==0):
+ movew d4,d4
+ beq Ld$den
+1:
+| Put back the exponents and sign and return.
+#ifndef __mcoldfire__
+ lslw IMM (4),d4 | exponent back to fourth byte
+#else
+ lsll IMM (4),d4 | exponent back to fourth byte
+#endif
+ bclr IMM (DBL_MANT_DIG-32-1),d0
+ swap d0 | and put back exponent
+#ifndef __mcoldfire__
+ orw d4,d0 |
+#else
+ orl d4,d0 |
+#endif
+ swap d0 |
+ orl d7,d0 | and sign also
+
+ PICLEA SYM (_fpCCR),a0
+ movew IMM (0),a0@
+#ifndef __mcoldfire__
+ moveml sp@+,d2-d7
+#else
+ moveml sp@,d2-d7
+ | XXX if frame pointer is ever removed, stack pointer must
+ | be adjusted here.
+#endif
+ unlk a6
+ rts
+
+|=============================================================================
+| __negdf2
+|=============================================================================
+
+| double __negdf2(double, double);
+ FUNC(__negdf2)
+SYM (__negdf2):
+#ifndef __mcoldfire__
+ link a6,IMM (0)
+ moveml d2-d7,sp@-
+#else
+ link a6,IMM (-24)
+ moveml d2-d7,sp@
+#endif
+ moveq IMM (NEGATE),d5
+ movel a6@(8),d0 | get number to negate in d0-d1
+ movel a6@(12),d1 |
+ bchg IMM (31),d0 | negate
+ movel d0,d2 | make a positive copy (for the tests)
+ bclr IMM (31),d2 |
+ movel d2,d4 | check for zero
+ orl d1,d4 |
+ beq 2f | if zero (either sign) return +zero
+ cmpl IMM (0x7ff00000),d2 | compare to +INFINITY
+ blt 1f | if finite, return
+ bhi Ld$inop | if larger (fraction not zero) is NaN
+ tstl d1 | if d2 == 0x7ff00000 check d1
+ bne Ld$inop |
+ movel d0,d7 | else get sign and return INFINITY
+ andl IMM (0x80000000),d7
+ bra Ld$infty
+1: PICLEA SYM (_fpCCR),a0
+ movew IMM (0),a0@
+#ifndef __mcoldfire__
+ moveml sp@+,d2-d7
+#else
+ moveml sp@,d2-d7
+ | XXX if frame pointer is ever removed, stack pointer must
+ | be adjusted here.
+#endif
+ unlk a6
+ rts
+2: bclr IMM (31),d0
+ bra 1b
+
+|=============================================================================
+| __cmpdf2
+|=============================================================================
+
+GREATER = 1
+LESS = -1
+EQUAL = 0
+
+| int __cmpdf2_internal(double, double, int);
+SYM (__cmpdf2_internal):
+#ifndef __mcoldfire__
+ link a6,IMM (0)
+ moveml d2-d7,sp@- | save registers
+#else
+ link a6,IMM (-24)
+ moveml d2-d7,sp@
+#endif
+ moveq IMM (COMPARE),d5
+ movel a6@(8),d0 | get first operand
+ movel a6@(12),d1 |
+ movel a6@(16),d2 | get second operand
+ movel a6@(20),d3 |
+| First check if a and/or b are (+/-) zero and in that case clear
+| the sign bit.
+ movel d0,d6 | copy signs into d6 (a) and d7(b)
+ bclr IMM (31),d0 | and clear signs in d0 and d2
+ movel d2,d7 |
+ bclr IMM (31),d2 |
+ cmpl IMM (0x7ff00000),d0 | check for a == NaN
+ bhi Lcmpd$inop | if d0 > 0x7ff00000, a is NaN
+ beq Lcmpdf$a$nf | if equal can be INFINITY, so check d1
+ movel d0,d4 | copy into d4 to test for zero
+ orl d1,d4 |
+ beq Lcmpdf$a$0 |
+Lcmpdf$0:
+ cmpl IMM (0x7ff00000),d2 | check for b == NaN
+ bhi Lcmpd$inop | if d2 > 0x7ff00000, b is NaN
+ beq Lcmpdf$b$nf | if equal can be INFINITY, so check d3
+ movel d2,d4 |
+ orl d3,d4 |
+ beq Lcmpdf$b$0 |
+Lcmpdf$1:
+| Check the signs
+ eorl d6,d7
+ bpl 1f
+| If the signs are not equal check if a >= 0
+ tstl d6
+ bpl Lcmpdf$a$gt$b | if (a >= 0 && b < 0) => a > b
+ bmi Lcmpdf$b$gt$a | if (a < 0 && b >= 0) => a < b
+1:
+| If the signs are equal check for < 0
+ tstl d6
+ bpl 1f
+| If both are negative exchange them
+#ifndef __mcoldfire__
+ exg d0,d2
+ exg d1,d3
+#else
+ movel d0,d7
+ movel d2,d0
+ movel d7,d2
+ movel d1,d7
+ movel d3,d1
+ movel d7,d3
+#endif
+1:
+| Now that they are positive we just compare them as longs (does this also
+| work for denormalized numbers?).
+ cmpl d0,d2
+ bhi Lcmpdf$b$gt$a | |b| > |a|
+ bne Lcmpdf$a$gt$b | |b| < |a|
+| If we got here d0 == d2, so we compare d1 and d3.
+ cmpl d1,d3
+ bhi Lcmpdf$b$gt$a | |b| > |a|
+ bne Lcmpdf$a$gt$b | |b| < |a|
+| If we got here a == b.
+ movel IMM (EQUAL),d0
+#ifndef __mcoldfire__
+ moveml sp@+,d2-d7 | put back the registers
+#else
+ moveml sp@,d2-d7
+ | XXX if frame pointer is ever removed, stack pointer must
+ | be adjusted here.
+#endif
+ unlk a6
+ rts
+Lcmpdf$a$gt$b:
+ movel IMM (GREATER),d0
+#ifndef __mcoldfire__
+ moveml sp@+,d2-d7 | put back the registers
+#else
+ moveml sp@,d2-d7
+ | XXX if frame pointer is ever removed, stack pointer must
+ | be adjusted here.
+#endif
+ unlk a6
+ rts
+Lcmpdf$b$gt$a:
+ movel IMM (LESS),d0
+#ifndef __mcoldfire__
+ moveml sp@+,d2-d7 | put back the registers
+#else
+ moveml sp@,d2-d7
+ | XXX if frame pointer is ever removed, stack pointer must
+ | be adjusted here.
+#endif
+ unlk a6
+ rts
+
+Lcmpdf$a$0:
+ bclr IMM (31),d6
+ bra Lcmpdf$0
+Lcmpdf$b$0:
+ bclr IMM (31),d7
+ bra Lcmpdf$1
+
+Lcmpdf$a$nf:
+ tstl d1
+ bne Ld$inop
+ bra Lcmpdf$0
+
+Lcmpdf$b$nf:
+ tstl d3
+ bne Ld$inop
+ bra Lcmpdf$1
+
+Lcmpd$inop:
+ movl a6@(24),d0
+ moveq IMM (INEXACT_RESULT+INVALID_OPERATION),d7
+ moveq IMM (DOUBLE_FLOAT),d6
+ PICJUMP $_exception_handler
+
+| int __cmpdf2(double, double);
+ FUNC(__cmpdf2)
+SYM (__cmpdf2):
+ link a6,IMM (0)
+ pea 1
+ movl a6@(20),sp@-
+ movl a6@(16),sp@-
+ movl a6@(12),sp@-
+ movl a6@(8),sp@-
+ PICCALL SYM (__cmpdf2_internal)
+ unlk a6
+ rts
+
+|=============================================================================
+| rounding routines
+|=============================================================================
+
+| The rounding routines expect the number to be normalized in registers
+| d0-d1-d2-d3, with the exponent in register d4. They assume that the
+| exponent is larger or equal to 1. They return a properly normalized number
+| if possible, and a denormalized number otherwise. The exponent is returned
+| in d4.
+
+Lround$to$nearest:
+| We now normalize as suggested by D. Knuth ("Seminumerical Algorithms"):
+| Here we assume that the exponent is not too small (this should be checked
+| before entering the rounding routine), but the number could be denormalized.
+
+| Check for denormalized numbers:
+1: btst IMM (DBL_MANT_DIG-32),d0
+ bne 2f | if set the number is normalized
+| Normalize shifting left until bit #DBL_MANT_DIG-32 is set or the exponent
+| is one (remember that a denormalized number corresponds to an
+| exponent of -D_BIAS+1).
+#ifndef __mcoldfire__
+ cmpw IMM (1),d4 | remember that the exponent is at least one
+#else
+ cmpl IMM (1),d4 | remember that the exponent is at least one
+#endif
+ beq 2f | an exponent of one means denormalized
+ addl d3,d3 | else shift and adjust the exponent
+ addxl d2,d2 |
+ addxl d1,d1 |
+ addxl d0,d0 |
+#ifndef __mcoldfire__
+ dbra d4,1b |
+#else
+ subql IMM (1), d4
+ bpl 1b
+#endif
+2:
+| Now round: we do it as follows: after the shifting we can write the
+| fraction part as f + delta, where 1 < f < 2^25, and 0 <= delta <= 2.
+| If delta < 1, do nothing. If delta > 1, add 1 to f.
+| If delta == 1, we make sure the rounded number will be even (odd?)
+| (after shifting).
+ btst IMM (0),d1 | is delta < 1?
+ beq 2f | if so, do not do anything
+ orl d2,d3 | is delta == 1?
+ bne 1f | if so round to even
+ movel d1,d3 |
+ andl IMM (2),d3 | bit 1 is the last significant bit
+ movel IMM (0),d2 |
+ addl d3,d1 |
+ addxl d2,d0 |
+ bra 2f |
+1: movel IMM (1),d3 | else add 1
+ movel IMM (0),d2 |
+ addl d3,d1 |
+ addxl d2,d0
+| Shift right once (because we used bit #DBL_MANT_DIG-32!).
+2:
+#ifndef __mcoldfire__
+ lsrl IMM (1),d0
+ roxrl IMM (1),d1
+#else
+ lsrl IMM (1),d1
+ btst IMM (0),d0
+ beq 10f
+ bset IMM (31),d1
+10: lsrl IMM (1),d0
+#endif
+
+| Now check again bit #DBL_MANT_DIG-32 (rounding could have produced a
+| 'fraction overflow' ...).
+ btst IMM (DBL_MANT_DIG-32),d0
+ beq 1f
+#ifndef __mcoldfire__
+ lsrl IMM (1),d0
+ roxrl IMM (1),d1
+ addw IMM (1),d4
+#else
+ lsrl IMM (1),d1
+ btst IMM (0),d0
+ beq 10f
+ bset IMM (31),d1
+10: lsrl IMM (1),d0
+ addl IMM (1),d4
+#endif
+1:
+| If bit #DBL_MANT_DIG-32-1 is clear we have a denormalized number, so we
+| have to put the exponent to zero and return a denormalized number.
+ btst IMM (DBL_MANT_DIG-32-1),d0
+ beq 1f
+ jmp a0@
+1: movel IMM (0),d4
+ jmp a0@
+
+Lround$to$zero:
+Lround$to$plus:
+Lround$to$minus:
+ jmp a0@
+#endif /* L_double */
+
+#ifdef L_float
+
+ .globl SYM (_fpCCR)
+ .globl $_exception_handler
+
+QUIET_NaN = 0xffffffff
+SIGNL_NaN = 0x7f800001
+INFINITY = 0x7f800000
+
+F_MAX_EXP = 0xff
+F_BIAS = 126
+FLT_MAX_EXP = F_MAX_EXP - F_BIAS
+FLT_MIN_EXP = 1 - F_BIAS
+FLT_MANT_DIG = 24
+
+INEXACT_RESULT = 0x0001
+UNDERFLOW = 0x0002
+OVERFLOW = 0x0004
+DIVIDE_BY_ZERO = 0x0008
+INVALID_OPERATION = 0x0010
+
+SINGLE_FLOAT = 1
+
+NOOP = 0
+ADD = 1
+MULTIPLY = 2
+DIVIDE = 3
+NEGATE = 4
+COMPARE = 5
+EXTENDSFDF = 6
+TRUNCDFSF = 7
+
+UNKNOWN = -1
+ROUND_TO_NEAREST = 0 | round result to nearest representable value
+ROUND_TO_ZERO = 1 | round result towards zero
+ROUND_TO_PLUS = 2 | round result towards plus infinity
+ROUND_TO_MINUS = 3 | round result towards minus infinity
+
+| Entry points:
+
+ .globl SYM (__addsf3)
+ .globl SYM (__subsf3)
+ .globl SYM (__mulsf3)
+ .globl SYM (__divsf3)
+ .globl SYM (__negsf2)
+ .globl SYM (__cmpsf2)
+ .globl SYM (__cmpsf2_internal)
+ .hidden SYM (__cmpsf2_internal)
+
+| These are common routines to return and signal exceptions.
+
+ .text
+ .even
+
+Lf$den:
+| Return and signal a denormalized number
+ orl d7,d0
+ moveq IMM (INEXACT_RESULT+UNDERFLOW),d7
+ moveq IMM (SINGLE_FLOAT),d6
+ PICJUMP $_exception_handler
+
+Lf$infty:
+Lf$overflow:
+| Return a properly signed INFINITY and set the exception flags
+ movel IMM (INFINITY),d0
+ orl d7,d0
+ moveq IMM (INEXACT_RESULT+OVERFLOW),d7
+ moveq IMM (SINGLE_FLOAT),d6
+ PICJUMP $_exception_handler
+
+Lf$underflow:
+| Return 0 and set the exception flags
+ moveq IMM (0),d0
+ moveq IMM (INEXACT_RESULT+UNDERFLOW),d7
+ moveq IMM (SINGLE_FLOAT),d6
+ PICJUMP $_exception_handler
+
+Lf$inop:
+| Return a quiet NaN and set the exception flags
+ movel IMM (QUIET_NaN),d0
+ moveq IMM (INEXACT_RESULT+INVALID_OPERATION),d7
+ moveq IMM (SINGLE_FLOAT),d6
+ PICJUMP $_exception_handler
+
+Lf$div$0:
+| Return a properly signed INFINITY and set the exception flags
+ movel IMM (INFINITY),d0
+ orl d7,d0
+ moveq IMM (INEXACT_RESULT+DIVIDE_BY_ZERO),d7
+ moveq IMM (SINGLE_FLOAT),d6
+ PICJUMP $_exception_handler
+
+|=============================================================================
+|=============================================================================
+| single precision routines
+|=============================================================================
+|=============================================================================
+
+| A single precision floating point number (float) has the format:
+|
+| struct _float {
+| unsigned int sign : 1; /* sign bit */
+| unsigned int exponent : 8; /* exponent, shifted by 126 */
+| unsigned int fraction : 23; /* fraction */
+| } float;
+|
+| Thus sizeof(float) = 4 (32 bits).
+|
+| All the routines are callable from C programs, and return the result
+| in the single register d0. They also preserve all registers except
+| d0-d1 and a0-a1.
+
+|=============================================================================
+| __subsf3
+|=============================================================================
+
+| float __subsf3(float, float);
+ FUNC(__subsf3)
+SYM (__subsf3):
+ bchg IMM (31),sp@(8) | change sign of second operand
+ | and fall through
+|=============================================================================
+| __addsf3
+|=============================================================================
+
+| float __addsf3(float, float);
+ FUNC(__addsf3)
+SYM (__addsf3):
+#ifndef __mcoldfire__
+ link a6,IMM (0) | everything will be done in registers
+ moveml d2-d7,sp@- | save all data registers but d0-d1
+#else
+ link a6,IMM (-24)
+ moveml d2-d7,sp@
+#endif
+ movel a6@(8),d0 | get first operand
+ movel a6@(12),d1 | get second operand
+ movel d0,a0 | get d0's sign bit '
+ addl d0,d0 | check and clear sign bit of a
+ beq Laddsf$b | if zero return second operand
+ movel d1,a1 | save b's sign bit '
+ addl d1,d1 | get rid of sign bit
+ beq Laddsf$a | if zero return first operand
+
+| Get the exponents and check for denormalized and/or infinity.
+
+ movel IMM (0x00ffffff),d4 | mask to get fraction
+ movel IMM (0x01000000),d5 | mask to put hidden bit back
+
+ movel d0,d6 | save a to get exponent
+ andl d4,d0 | get fraction in d0
+ notl d4 | make d4 into a mask for the exponent
+ andl d4,d6 | get exponent in d6
+ beq Laddsf$a$den | branch if a is denormalized
+ cmpl d4,d6 | check for INFINITY or NaN
+ beq Laddsf$nf
+ swap d6 | put exponent into first word
+ orl d5,d0 | and put hidden bit back
+Laddsf$1:
+| Now we have a's exponent in d6 (second byte) and the mantissa in d0. '
+ movel d1,d7 | get exponent in d7
+ andl d4,d7 |
+ beq Laddsf$b$den | branch if b is denormalized
+ cmpl d4,d7 | check for INFINITY or NaN
+ beq Laddsf$nf
+ swap d7 | put exponent into first word
+ notl d4 | make d4 into a mask for the fraction
+ andl d4,d1 | get fraction in d1
+ orl d5,d1 | and put hidden bit back
+Laddsf$2:
+| Now we have b's exponent in d7 (second byte) and the mantissa in d1. '
+
+| Note that the hidden bit corresponds to bit #FLT_MANT_DIG-1, and we
+| shifted right once, so bit #FLT_MANT_DIG is set (so we have one extra
+| bit).
+
+ movel d1,d2 | move b to d2, since we want to use
+ | two registers to do the sum
+ movel IMM (0),d1 | and clear the new ones
+ movel d1,d3 |
+
+| Here we shift the numbers in registers d0 and d1 so the exponents are the
+| same, and put the largest exponent in d6. Note that we are using two
+| registers for each number (see the discussion by D. Knuth in "Seminumerical
+| Algorithms").
+#ifndef __mcoldfire__
+ cmpw d6,d7 | compare exponents
+#else
+ cmpl d6,d7 | compare exponents
+#endif
+ beq Laddsf$3 | if equal don't shift '
+ bhi 5f | branch if second exponent largest
+1:
+ subl d6,d7 | keep the largest exponent
+ negl d7
+#ifndef __mcoldfire__
+ lsrw IMM (8),d7 | put difference in lower byte
+#else
+ lsrl IMM (8),d7 | put difference in lower byte
+#endif
+| if difference is too large we don't shift (actually, we can just exit) '
+#ifndef __mcoldfire__
+ cmpw IMM (FLT_MANT_DIG+2),d7
+#else
+ cmpl IMM (FLT_MANT_DIG+2),d7
+#endif
+ bge Laddsf$b$small
+#ifndef __mcoldfire__
+ cmpw IMM (16),d7 | if difference >= 16 swap
+#else
+ cmpl IMM (16),d7 | if difference >= 16 swap
+#endif
+ bge 4f
+2:
+#ifndef __mcoldfire__
+ subw IMM (1),d7
+#else
+ subql IMM (1), d7
+#endif
+3:
+#ifndef __mcoldfire__
+ lsrl IMM (1),d2 | shift right second operand
+ roxrl IMM (1),d3
+ dbra d7,3b
+#else
+ lsrl IMM (1),d3
+ btst IMM (0),d2
+ beq 10f
+ bset IMM (31),d3
+10: lsrl IMM (1),d2
+ subql IMM (1), d7
+ bpl 3b
+#endif
+ bra Laddsf$3
+4:
+ movew d2,d3
+ swap d3
+ movew d3,d2
+ swap d2
+#ifndef __mcoldfire__
+ subw IMM (16),d7
+#else
+ subl IMM (16),d7
+#endif
+ bne 2b | if still more bits, go back to normal case
+ bra Laddsf$3
+5:
+#ifndef __mcoldfire__
+ exg d6,d7 | exchange the exponents
+#else
+ eorl d6,d7
+ eorl d7,d6
+ eorl d6,d7
+#endif
+ subl d6,d7 | keep the largest exponent
+ negl d7 |
+#ifndef __mcoldfire__
+ lsrw IMM (8),d7 | put difference in lower byte
+#else
+ lsrl IMM (8),d7 | put difference in lower byte
+#endif
+| if difference is too large we don't shift (and exit!) '
+#ifndef __mcoldfire__
+ cmpw IMM (FLT_MANT_DIG+2),d7
+#else
+ cmpl IMM (FLT_MANT_DIG+2),d7
+#endif
+ bge Laddsf$a$small
+#ifndef __mcoldfire__
+ cmpw IMM (16),d7 | if difference >= 16 swap
+#else
+ cmpl IMM (16),d7 | if difference >= 16 swap
+#endif
+ bge 8f
+6:
+#ifndef __mcoldfire__
+ subw IMM (1),d7
+#else
+ subl IMM (1),d7
+#endif
+7:
+#ifndef __mcoldfire__
+ lsrl IMM (1),d0 | shift right first operand
+ roxrl IMM (1),d1
+ dbra d7,7b
+#else
+ lsrl IMM (1),d1
+ btst IMM (0),d0
+ beq 10f
+ bset IMM (31),d1
+10: lsrl IMM (1),d0
+ subql IMM (1),d7
+ bpl 7b
+#endif
+ bra Laddsf$3
+8:
+ movew d0,d1
+ swap d1
+ movew d1,d0
+ swap d0
+#ifndef __mcoldfire__
+ subw IMM (16),d7
+#else
+ subl IMM (16),d7
+#endif
+ bne 6b | if still more bits, go back to normal case
+ | otherwise we fall through
+
+| Now we have a in d0-d1, b in d2-d3, and the largest exponent in d6 (the
+| signs are stored in a0 and a1).
+
+Laddsf$3:
+| Here we have to decide whether to add or subtract the numbers
+#ifndef __mcoldfire__
+ exg d6,a0 | get signs back
+ exg d7,a1 | and save the exponents
+#else
+ movel d6,d4
+ movel a0,d6
+ movel d4,a0
+ movel d7,d4
+ movel a1,d7
+ movel d4,a1
+#endif
+ eorl d6,d7 | combine sign bits
+ bmi Lsubsf$0 | if negative a and b have opposite
+ | sign so we actually subtract the
+ | numbers
+
+| Here we have both positive or both negative
+#ifndef __mcoldfire__
+ exg d6,a0 | now we have the exponent in d6
+#else
+ movel d6,d4
+ movel a0,d6
+ movel d4,a0
+#endif
+ movel a0,d7 | and sign in d7
+ andl IMM (0x80000000),d7
+| Here we do the addition.
+ addl d3,d1
+ addxl d2,d0
+| Note: now we have d2, d3, d4 and d5 to play with!
+
+| Put the exponent, in the first byte, in d2, to use the "standard" rounding
+| routines:
+ movel d6,d2
+#ifndef __mcoldfire__
+ lsrw IMM (8),d2
+#else
+ lsrl IMM (8),d2
+#endif
+
+| Before rounding normalize so bit #FLT_MANT_DIG is set (we will consider
+| the case of denormalized numbers in the rounding routine itself).
+| As in the addition (not in the subtraction!) we could have set
+| one more bit we check this:
+ btst IMM (FLT_MANT_DIG+1),d0
+ beq 1f
+#ifndef __mcoldfire__
+ lsrl IMM (1),d0
+ roxrl IMM (1),d1
+#else
+ lsrl IMM (1),d1
+ btst IMM (0),d0
+ beq 10f
+ bset IMM (31),d1
+10: lsrl IMM (1),d0
+#endif
+ addl IMM (1),d2
+1:
+ lea pc@(Laddsf$4),a0 | to return from rounding routine
+ PICLEA SYM (_fpCCR),a1 | check the rounding mode
+#ifdef __mcoldfire__
+ clrl d6
+#endif
+ movew a1@(6),d6 | rounding mode in d6
+ beq Lround$to$nearest
+#ifndef __mcoldfire__
+ cmpw IMM (ROUND_TO_PLUS),d6
+#else
+ cmpl IMM (ROUND_TO_PLUS),d6
+#endif
+ bhi Lround$to$minus
+ blt Lround$to$zero
+ bra Lround$to$plus
+Laddsf$4:
+| Put back the exponent, but check for overflow.
+#ifndef __mcoldfire__
+ cmpw IMM (0xff),d2
+#else
+ cmpl IMM (0xff),d2
+#endif
+ bhi 1f
+ bclr IMM (FLT_MANT_DIG-1),d0
+#ifndef __mcoldfire__
+ lslw IMM (7),d2
+#else
+ lsll IMM (7),d2
+#endif
+ swap d2
+ orl d2,d0
+ bra Laddsf$ret
+1:
+ moveq IMM (ADD),d5
+ bra Lf$overflow
+
+Lsubsf$0:
+| We are here if a > 0 and b < 0 (sign bits cleared).
+| Here we do the subtraction.
+ movel d6,d7 | put sign in d7
+ andl IMM (0x80000000),d7
+
+ subl d3,d1 | result in d0-d1
+ subxl d2,d0 |
+ beq Laddsf$ret | if zero just exit
+ bpl 1f | if positive skip the following
+ bchg IMM (31),d7 | change sign bit in d7
+ negl d1
+ negxl d0
+1:
+#ifndef __mcoldfire__
+ exg d2,a0 | now we have the exponent in d2
+ lsrw IMM (8),d2 | put it in the first byte
+#else
+ movel d2,d4
+ movel a0,d2
+ movel d4,a0
+ lsrl IMM (8),d2 | put it in the first byte
+#endif
+
+| Now d0-d1 is positive and the sign bit is in d7.
+
+| Note that we do not have to normalize, since in the subtraction bit
+| #FLT_MANT_DIG+1 is never set, and denormalized numbers are handled by
+| the rounding routines themselves.
+ lea pc@(Lsubsf$1),a0 | to return from rounding routine
+ PICLEA SYM (_fpCCR),a1 | check the rounding mode
+#ifdef __mcoldfire__
+ clrl d6
+#endif
+ movew a1@(6),d6 | rounding mode in d6
+ beq Lround$to$nearest
+#ifndef __mcoldfire__
+ cmpw IMM (ROUND_TO_PLUS),d6
+#else
+ cmpl IMM (ROUND_TO_PLUS),d6
+#endif
+ bhi Lround$to$minus
+ blt Lround$to$zero
+ bra Lround$to$plus
+Lsubsf$1:
+| Put back the exponent (we can't have overflow!). '
+ bclr IMM (FLT_MANT_DIG-1),d0
+#ifndef __mcoldfire__
+ lslw IMM (7),d2
+#else
+ lsll IMM (7),d2
+#endif
+ swap d2
+ orl d2,d0
+ bra Laddsf$ret
+
+| If one of the numbers was too small (difference of exponents >=
+| FLT_MANT_DIG+2) we return the other (and now we don't have to '
+| check for finiteness or zero).
+Laddsf$a$small:
+ movel a6@(12),d0
+ PICLEA SYM (_fpCCR),a0
+ movew IMM (0),a0@
+#ifndef __mcoldfire__
+ moveml sp@+,d2-d7 | restore data registers
+#else
+ moveml sp@,d2-d7
+ | XXX if frame pointer is ever removed, stack pointer must
+ | be adjusted here.
+#endif
+ unlk a6 | and return
+ rts
+
+Laddsf$b$small:
+ movel a6@(8),d0
+ PICLEA SYM (_fpCCR),a0
+ movew IMM (0),a0@
+#ifndef __mcoldfire__
+ moveml sp@+,d2-d7 | restore data registers
+#else
+ moveml sp@,d2-d7
+ | XXX if frame pointer is ever removed, stack pointer must
+ | be adjusted here.
+#endif
+ unlk a6 | and return
+ rts
+
+| If the numbers are denormalized remember to put exponent equal to 1.
+
+Laddsf$a$den:
+ movel d5,d6 | d5 contains 0x01000000
+ swap d6
+ bra Laddsf$1
+
+Laddsf$b$den:
+ movel d5,d7
+ swap d7
+ notl d4 | make d4 into a mask for the fraction
+ | (this was not executed after the jump)
+ bra Laddsf$2
+
+| The rest is mainly code for the different results which can be
+| returned (checking always for +/-INFINITY and NaN).
+
+Laddsf$b:
+| Return b (if a is zero).
+ movel a6@(12),d0
+ cmpl IMM (0x80000000),d0 | Check if b is -0
+ bne 1f
+ movel a0,d7
+ andl IMM (0x80000000),d7 | Use the sign of a
+ clrl d0
+ bra Laddsf$ret
+Laddsf$a:
+| Return a (if b is zero).
+ movel a6@(8),d0
+1:
+ moveq IMM (ADD),d5
+| We have to check for NaN and +/-infty.
+ movel d0,d7
+ andl IMM (0x80000000),d7 | put sign in d7
+ bclr IMM (31),d0 | clear sign
+ cmpl IMM (INFINITY),d0 | check for infty or NaN
+ bge 2f
+ movel d0,d0 | check for zero (we do this because we don't '
+ bne Laddsf$ret | want to return -0 by mistake
+ bclr IMM (31),d7 | if zero be sure to clear sign
+ bra Laddsf$ret | if everything OK just return
+2:
+| The value to be returned is either +/-infty or NaN
+ andl IMM (0x007fffff),d0 | check for NaN
+ bne Lf$inop | if mantissa not zero is NaN
+ bra Lf$infty
+
+Laddsf$ret:
+| Normal exit (a and b nonzero, result is not NaN nor +/-infty).
+| We have to clear the exception flags (just the exception type).
+ PICLEA SYM (_fpCCR),a0
+ movew IMM (0),a0@
+ orl d7,d0 | put sign bit
+#ifndef __mcoldfire__
+ moveml sp@+,d2-d7 | restore data registers
+#else
+ moveml sp@,d2-d7
+ | XXX if frame pointer is ever removed, stack pointer must
+ | be adjusted here.
+#endif
+ unlk a6 | and return
+ rts
+
+Laddsf$ret$den:
+| Return a denormalized number (for addition we don't signal underflow) '
+ lsrl IMM (1),d0 | remember to shift right back once
+ bra Laddsf$ret | and return
+
+| Note: when adding two floats of the same sign if either one is
+| NaN we return NaN without regard to whether the other is finite or
+| not. When subtracting them (i.e., when adding two numbers of
+| opposite signs) things are more complicated: if both are INFINITY
+| we return NaN, if only one is INFINITY and the other is NaN we return
+| NaN, but if it is finite we return INFINITY with the corresponding sign.
+
+Laddsf$nf:
+ moveq IMM (ADD),d5
+| This could be faster but it is not worth the effort, since it is not
+| executed very often. We sacrifice speed for clarity here.
+ movel a6@(8),d0 | get the numbers back (remember that we
+ movel a6@(12),d1 | did some processing already)
+ movel IMM (INFINITY),d4 | useful constant (INFINITY)
+ movel d0,d2 | save sign bits
+ movel d1,d3
+ bclr IMM (31),d0 | clear sign bits
+ bclr IMM (31),d1
+| We know that one of them is either NaN of +/-INFINITY
+| Check for NaN (if either one is NaN return NaN)
+ cmpl d4,d0 | check first a (d0)
+ bhi Lf$inop
+ cmpl d4,d1 | check now b (d1)
+ bhi Lf$inop
+| Now comes the check for +/-INFINITY. We know that both are (maybe not
+| finite) numbers, but we have to check if both are infinite whether we
+| are adding or subtracting them.
+ eorl d3,d2 | to check sign bits
+ bmi 1f
+ movel d0,d7
+ andl IMM (0x80000000),d7 | get (common) sign bit
+ bra Lf$infty
+1:
+| We know one (or both) are infinite, so we test for equality between the
+| two numbers (if they are equal they have to be infinite both, so we
+| return NaN).
+ cmpl d1,d0 | are both infinite?
+ beq Lf$inop | if so return NaN
+
+ movel d0,d7
+ andl IMM (0x80000000),d7 | get a's sign bit '
+ cmpl d4,d0 | test now for infinity
+ beq Lf$infty | if a is INFINITY return with this sign
+ bchg IMM (31),d7 | else we know b is INFINITY and has
+ bra Lf$infty | the opposite sign
+
+|=============================================================================
+| __mulsf3
+|=============================================================================
+
+| float __mulsf3(float, float);
+ FUNC(__mulsf3)
+SYM (__mulsf3):
+#ifndef __mcoldfire__
+ link a6,IMM (0)
+ moveml d2-d7,sp@-
+#else
+ link a6,IMM (-24)
+ moveml d2-d7,sp@
+#endif
+ movel a6@(8),d0 | get a into d0
+ movel a6@(12),d1 | and b into d1
+ movel d0,d7 | d7 will hold the sign of the product
+ eorl d1,d7 |
+ andl IMM (0x80000000),d7
+ movel IMM (INFINITY),d6 | useful constant (+INFINITY)
+ movel d6,d5 | another (mask for fraction)
+ notl d5 |
+ movel IMM (0x00800000),d4 | this is to put hidden bit back
+ bclr IMM (31),d0 | get rid of a's sign bit '
+ movel d0,d2 |
+ beq Lmulsf$a$0 | branch if a is zero
+ bclr IMM (31),d1 | get rid of b's sign bit '
+ movel d1,d3 |
+ beq Lmulsf$b$0 | branch if b is zero
+ cmpl d6,d0 | is a big?
+ bhi Lmulsf$inop | if a is NaN return NaN
+ beq Lmulsf$inf | if a is INFINITY we have to check b
+ cmpl d6,d1 | now compare b with INFINITY
+ bhi Lmulsf$inop | is b NaN?
+ beq Lmulsf$overflow | is b INFINITY?
+| Here we have both numbers finite and nonzero (and with no sign bit).
+| Now we get the exponents into d2 and d3.
+ andl d6,d2 | and isolate exponent in d2
+ beq Lmulsf$a$den | if exponent is zero we have a denormalized
+ andl d5,d0 | and isolate fraction
+ orl d4,d0 | and put hidden bit back
+ swap d2 | I like exponents in the first byte
+#ifndef __mcoldfire__
+ lsrw IMM (7),d2 |
+#else
+ lsrl IMM (7),d2 |
+#endif
+Lmulsf$1: | number
+ andl d6,d3 |
+ beq Lmulsf$b$den |
+ andl d5,d1 |
+ orl d4,d1 |
+ swap d3 |
+#ifndef __mcoldfire__
+ lsrw IMM (7),d3 |
+#else
+ lsrl IMM (7),d3 |
+#endif
+Lmulsf$2: |
+#ifndef __mcoldfire__
+ addw d3,d2 | add exponents
+ subw IMM (F_BIAS+1),d2 | and subtract bias (plus one)
+#else
+ addl d3,d2 | add exponents
+ subl IMM (F_BIAS+1),d2 | and subtract bias (plus one)
+#endif
+
+| We are now ready to do the multiplication. The situation is as follows:
+| both a and b have bit FLT_MANT_DIG-1 set (even if they were
+| denormalized to start with!), which means that in the product
+| bit 2*(FLT_MANT_DIG-1) (that is, bit 2*FLT_MANT_DIG-2-32 of the
+| high long) is set.
+
+| To do the multiplication let us move the number a little bit around ...
+ movel d1,d6 | second operand in d6
+ movel d0,d5 | first operand in d4-d5
+ movel IMM (0),d4
+ movel d4,d1 | the sums will go in d0-d1
+ movel d4,d0
+
+| now bit FLT_MANT_DIG-1 becomes bit 31:
+ lsll IMM (31-FLT_MANT_DIG+1),d6
+
+| Start the loop (we loop #FLT_MANT_DIG times):
+ moveq IMM (FLT_MANT_DIG-1),d3
+1: addl d1,d1 | shift sum
+ addxl d0,d0
+ lsll IMM (1),d6 | get bit bn
+ bcc 2f | if not set skip sum
+ addl d5,d1 | add a
+ addxl d4,d0
+2:
+#ifndef __mcoldfire__
+ dbf d3,1b | loop back
+#else
+ subql IMM (1),d3
+ bpl 1b
+#endif
+
+| Now we have the product in d0-d1, with bit (FLT_MANT_DIG - 1) + FLT_MANT_DIG
+| (mod 32) of d0 set. The first thing to do now is to normalize it so bit
+| FLT_MANT_DIG is set (to do the rounding).
+#ifndef __mcoldfire__
+ rorl IMM (6),d1
+ swap d1
+ movew d1,d3
+ andw IMM (0x03ff),d3
+ andw IMM (0xfd00),d1
+#else
+ movel d1,d3
+ lsll IMM (8),d1
+ addl d1,d1
+ addl d1,d1
+ moveq IMM (22),d5
+ lsrl d5,d3
+ orl d3,d1
+ andl IMM (0xfffffd00),d1
+#endif
+ lsll IMM (8),d0
+ addl d0,d0
+ addl d0,d0
+#ifndef __mcoldfire__
+ orw d3,d0
+#else
+ orl d3,d0
+#endif
+
+ moveq IMM (MULTIPLY),d5
+
+ btst IMM (FLT_MANT_DIG+1),d0
+ beq Lround$exit
+#ifndef __mcoldfire__
+ lsrl IMM (1),d0
+ roxrl IMM (1),d1
+ addw IMM (1),d2
+#else
+ lsrl IMM (1),d1
+ btst IMM (0),d0
+ beq 10f
+ bset IMM (31),d1
+10: lsrl IMM (1),d0
+ addql IMM (1),d2
+#endif
+ bra Lround$exit
+
+Lmulsf$inop:
+ moveq IMM (MULTIPLY),d5
+ bra Lf$inop
+
+Lmulsf$overflow:
+ moveq IMM (MULTIPLY),d5
+ bra Lf$overflow
+
+Lmulsf$inf:
+ moveq IMM (MULTIPLY),d5
+| If either is NaN return NaN; else both are (maybe infinite) numbers, so
+| return INFINITY with the correct sign (which is in d7).
+ cmpl d6,d1 | is b NaN?
+ bhi Lf$inop | if so return NaN
+ bra Lf$overflow | else return +/-INFINITY
+
+| If either number is zero return zero, unless the other is +/-INFINITY,
+| or NaN, in which case we return NaN.
+Lmulsf$b$0:
+| Here d1 (==b) is zero.
+ movel a6@(8),d1 | get a again to check for non-finiteness
+ bra 1f
+Lmulsf$a$0:
+ movel a6@(12),d1 | get b again to check for non-finiteness
+1: bclr IMM (31),d1 | clear sign bit
+ cmpl IMM (INFINITY),d1 | and check for a large exponent
+ bge Lf$inop | if b is +/-INFINITY or NaN return NaN
+ movel d7,d0 | else return signed zero
+ PICLEA SYM (_fpCCR),a0 |
+ movew IMM (0),a0@ |
+#ifndef __mcoldfire__
+ moveml sp@+,d2-d7 |
+#else
+ moveml sp@,d2-d7
+ | XXX if frame pointer is ever removed, stack pointer must
+ | be adjusted here.
+#endif
+ unlk a6 |
+ rts |
+
+| If a number is denormalized we put an exponent of 1 but do not put the
+| hidden bit back into the fraction; instead we shift left until bit 23
+| (the hidden bit) is set, adjusting the exponent accordingly. We do this
+| to ensure that the product of the fractions is close to 1.
+Lmulsf$a$den:
+ movel IMM (1),d2
+ andl d5,d0
+1: addl d0,d0 | shift a left (until bit 23 is set)
+#ifndef __mcoldfire__
+ subw IMM (1),d2 | and adjust exponent
+#else
+ subql IMM (1),d2 | and adjust exponent
+#endif
+ btst IMM (FLT_MANT_DIG-1),d0
+ bne Lmulsf$1 |
+ bra 1b | else loop back
+
+Lmulsf$b$den:
+ movel IMM (1),d3
+ andl d5,d1
+1: addl d1,d1 | shift b left until bit 23 is set
+#ifndef __mcoldfire__
+ subw IMM (1),d3 | and adjust exponent
+#else
+ subql IMM (1),d3 | and adjust exponent
+#endif
+ btst IMM (FLT_MANT_DIG-1),d1
+ bne Lmulsf$2 |
+ bra 1b | else loop back
+
+|=============================================================================
+| __divsf3
+|=============================================================================
+
+| float __divsf3(float, float);
+ FUNC(__divsf3)
+SYM (__divsf3):
+#ifndef __mcoldfire__
+ link a6,IMM (0)
+ moveml d2-d7,sp@-
+#else
+ link a6,IMM (-24)
+ moveml d2-d7,sp@
+#endif
+ movel a6@(8),d0 | get a into d0
+ movel a6@(12),d1 | and b into d1
+ movel d0,d7 | d7 will hold the sign of the result
+ eorl d1,d7 |
+ andl IMM (0x80000000),d7 |
+ movel IMM (INFINITY),d6 | useful constant (+INFINITY)
+ movel d6,d5 | another (mask for fraction)
+ notl d5 |
+ movel IMM (0x00800000),d4 | this is to put hidden bit back
+ bclr IMM (31),d0 | get rid of a's sign bit '
+ movel d0,d2 |
+ beq Ldivsf$a$0 | branch if a is zero
+ bclr IMM (31),d1 | get rid of b's sign bit '
+ movel d1,d3 |
+ beq Ldivsf$b$0 | branch if b is zero
+ cmpl d6,d0 | is a big?
+ bhi Ldivsf$inop | if a is NaN return NaN
+ beq Ldivsf$inf | if a is INFINITY we have to check b
+ cmpl d6,d1 | now compare b with INFINITY
+ bhi Ldivsf$inop | if b is NaN return NaN
+ beq Ldivsf$underflow
+| Here we have both numbers finite and nonzero (and with no sign bit).
+| Now we get the exponents into d2 and d3 and normalize the numbers to
+| ensure that the ratio of the fractions is close to 1. We do this by
+| making sure that bit #FLT_MANT_DIG-1 (hidden bit) is set.
+ andl d6,d2 | and isolate exponent in d2
+ beq Ldivsf$a$den | if exponent is zero we have a denormalized
+ andl d5,d0 | and isolate fraction
+ orl d4,d0 | and put hidden bit back
+ swap d2 | I like exponents in the first byte
+#ifndef __mcoldfire__
+ lsrw IMM (7),d2 |
+#else
+ lsrl IMM (7),d2 |
+#endif
+Ldivsf$1: |
+ andl d6,d3 |
+ beq Ldivsf$b$den |
+ andl d5,d1 |
+ orl d4,d1 |
+ swap d3 |
+#ifndef __mcoldfire__
+ lsrw IMM (7),d3 |
+#else
+ lsrl IMM (7),d3 |
+#endif
+Ldivsf$2: |
+#ifndef __mcoldfire__
+ subw d3,d2 | subtract exponents
+ addw IMM (F_BIAS),d2 | and add bias
+#else
+ subl d3,d2 | subtract exponents
+ addl IMM (F_BIAS),d2 | and add bias
+#endif
+
+| We are now ready to do the division. We have prepared things in such a way
+| that the ratio of the fractions will be less than 2 but greater than 1/2.
+| At this point the registers in use are:
+| d0 holds a (first operand, bit FLT_MANT_DIG=0, bit FLT_MANT_DIG-1=1)
+| d1 holds b (second operand, bit FLT_MANT_DIG=1)
+| d2 holds the difference of the exponents, corrected by the bias
+| d7 holds the sign of the ratio
+| d4, d5, d6 hold some constants
+ movel d7,a0 | d6-d7 will hold the ratio of the fractions
+ movel IMM (0),d6 |
+ movel d6,d7
+
+ moveq IMM (FLT_MANT_DIG+1),d3
+1: cmpl d0,d1 | is a < b?
+ bhi 2f |
+ bset d3,d6 | set a bit in d6
+ subl d1,d0 | if a >= b a <-- a-b
+ beq 3f | if a is zero, exit
+2: addl d0,d0 | multiply a by 2
+#ifndef __mcoldfire__
+ dbra d3,1b
+#else
+ subql IMM (1),d3
+ bpl 1b
+#endif
+
+| Now we keep going to set the sticky bit ...
+ moveq IMM (FLT_MANT_DIG),d3
+1: cmpl d0,d1
+ ble 2f
+ addl d0,d0
+#ifndef __mcoldfire__
+ dbra d3,1b
+#else
+ subql IMM(1),d3
+ bpl 1b
+#endif
+ movel IMM (0),d1
+ bra 3f
+2: movel IMM (0),d1
+#ifndef __mcoldfire__
+ subw IMM (FLT_MANT_DIG),d3
+ addw IMM (31),d3
+#else
+ subl IMM (FLT_MANT_DIG),d3
+ addl IMM (31),d3
+#endif
+ bset d3,d1
+3:
+ movel d6,d0 | put the ratio in d0-d1
+ movel a0,d7 | get sign back
+
+| Because of the normalization we did before we are guaranteed that
+| d0 is smaller than 2^26 but larger than 2^24. Thus bit 26 is not set,
+| bit 25 could be set, and if it is not set then bit 24 is necessarily set.
+ btst IMM (FLT_MANT_DIG+1),d0
+ beq 1f | if it is not set, then bit 24 is set
+ lsrl IMM (1),d0 |
+#ifndef __mcoldfire__
+ addw IMM (1),d2 |
+#else
+ addl IMM (1),d2 |
+#endif
+1:
+| Now round, check for over- and underflow, and exit.
+ moveq IMM (DIVIDE),d5
+ bra Lround$exit
+
+Ldivsf$inop:
+ moveq IMM (DIVIDE),d5
+ bra Lf$inop
+
+Ldivsf$overflow:
+ moveq IMM (DIVIDE),d5
+ bra Lf$overflow
+
+Ldivsf$underflow:
+ moveq IMM (DIVIDE),d5
+ bra Lf$underflow
+
+Ldivsf$a$0:
+ moveq IMM (DIVIDE),d5
+| If a is zero check to see whether b is zero also. In that case return
+| NaN; then check if b is NaN, and return NaN also in that case. Else
+| return a properly signed zero.
+ andl IMM (0x7fffffff),d1 | clear sign bit and test b
+ beq Lf$inop | if b is also zero return NaN
+ cmpl IMM (INFINITY),d1 | check for NaN
+ bhi Lf$inop |
+ movel d7,d0 | else return signed zero
+ PICLEA SYM (_fpCCR),a0 |
+ movew IMM (0),a0@ |
+#ifndef __mcoldfire__
+ moveml sp@+,d2-d7 |
+#else
+ moveml sp@,d2-d7 |
+ | XXX if frame pointer is ever removed, stack pointer must
+ | be adjusted here.
+#endif
+ unlk a6 |
+ rts |
+
+Ldivsf$b$0:
+ moveq IMM (DIVIDE),d5
+| If we got here a is not zero. Check if a is NaN; in that case return NaN,
+| else return +/-INFINITY. Remember that a is in d0 with the sign bit
+| cleared already.
+ cmpl IMM (INFINITY),d0 | compare d0 with INFINITY
+ bhi Lf$inop | if larger it is NaN
+ bra Lf$div$0 | else signal DIVIDE_BY_ZERO
+
+Ldivsf$inf:
+ moveq IMM (DIVIDE),d5
+| If a is INFINITY we have to check b
+ cmpl IMM (INFINITY),d1 | compare b with INFINITY
+ bge Lf$inop | if b is NaN or INFINITY return NaN
+ bra Lf$overflow | else return overflow
+
+| If a number is denormalized we put an exponent of 1 but do not put the
+| bit back into the fraction.
+Ldivsf$a$den:
+ movel IMM (1),d2
+ andl d5,d0
+1: addl d0,d0 | shift a left until bit FLT_MANT_DIG-1 is set
+#ifndef __mcoldfire__
+ subw IMM (1),d2 | and adjust exponent
+#else
+ subl IMM (1),d2 | and adjust exponent
+#endif
+ btst IMM (FLT_MANT_DIG-1),d0
+ bne Ldivsf$1
+ bra 1b
+
+Ldivsf$b$den:
+ movel IMM (1),d3
+ andl d5,d1
+1: addl d1,d1 | shift b left until bit FLT_MANT_DIG is set
+#ifndef __mcoldfire__
+ subw IMM (1),d3 | and adjust exponent
+#else
+ subl IMM (1),d3 | and adjust exponent
+#endif
+ btst IMM (FLT_MANT_DIG-1),d1
+ bne Ldivsf$2
+ bra 1b
+
+Lround$exit:
+| This is a common exit point for __mulsf3 and __divsf3.
+
+| First check for underlow in the exponent:
+#ifndef __mcoldfire__
+ cmpw IMM (-FLT_MANT_DIG-1),d2
+#else
+ cmpl IMM (-FLT_MANT_DIG-1),d2
+#endif
+ blt Lf$underflow
+| It could happen that the exponent is less than 1, in which case the
+| number is denormalized. In this case we shift right and adjust the
+| exponent until it becomes 1 or the fraction is zero (in the latter case
+| we signal underflow and return zero).
+ movel IMM (0),d6 | d6 is used temporarily
+#ifndef __mcoldfire__
+ cmpw IMM (1),d2 | if the exponent is less than 1 we
+#else
+ cmpl IMM (1),d2 | if the exponent is less than 1 we
+#endif
+ bge 2f | have to shift right (denormalize)
+1:
+#ifndef __mcoldfire__
+ addw IMM (1),d2 | adjust the exponent
+ lsrl IMM (1),d0 | shift right once
+ roxrl IMM (1),d1 |
+ roxrl IMM (1),d6 | d6 collect bits we would lose otherwise
+ cmpw IMM (1),d2 | is the exponent 1 already?
+#else
+ addql IMM (1),d2 | adjust the exponent
+ lsrl IMM (1),d6
+ btst IMM (0),d1
+ beq 11f
+ bset IMM (31),d6
+11: lsrl IMM (1),d1
+ btst IMM (0),d0
+ beq 10f
+ bset IMM (31),d1
+10: lsrl IMM (1),d0
+ cmpl IMM (1),d2 | is the exponent 1 already?
+#endif
+ beq 2f | if not loop back
+ bra 1b |
+ bra Lf$underflow | safety check, shouldn't execute '
+2: orl d6,d1 | this is a trick so we don't lose '
+ | the extra bits which were flushed right
+| Now call the rounding routine (which takes care of denormalized numbers):
+ lea pc@(Lround$0),a0 | to return from rounding routine
+ PICLEA SYM (_fpCCR),a1 | check the rounding mode
+#ifdef __mcoldfire__
+ clrl d6
+#endif
+ movew a1@(6),d6 | rounding mode in d6
+ beq Lround$to$nearest
+#ifndef __mcoldfire__
+ cmpw IMM (ROUND_TO_PLUS),d6
+#else
+ cmpl IMM (ROUND_TO_PLUS),d6
+#endif
+ bhi Lround$to$minus
+ blt Lround$to$zero
+ bra Lround$to$plus
+Lround$0:
+| Here we have a correctly rounded result (either normalized or denormalized).
+
+| Here we should have either a normalized number or a denormalized one, and
+| the exponent is necessarily larger or equal to 1 (so we don't have to '
+| check again for underflow!). We have to check for overflow or for a
+| denormalized number (which also signals underflow).
+| Check for overflow (i.e., exponent >= 255).
+#ifndef __mcoldfire__
+ cmpw IMM (0x00ff),d2
+#else
+ cmpl IMM (0x00ff),d2
+#endif
+ bge Lf$overflow
+| Now check for a denormalized number (exponent==0).
+ movew d2,d2
+ beq Lf$den
+1:
+| Put back the exponents and sign and return.
+#ifndef __mcoldfire__
+ lslw IMM (7),d2 | exponent back to fourth byte
+#else
+ lsll IMM (7),d2 | exponent back to fourth byte
+#endif
+ bclr IMM (FLT_MANT_DIG-1),d0
+ swap d0 | and put back exponent
+#ifndef __mcoldfire__
+ orw d2,d0 |
+#else
+ orl d2,d0
+#endif
+ swap d0 |
+ orl d7,d0 | and sign also
+
+ PICLEA SYM (_fpCCR),a0
+ movew IMM (0),a0@
+#ifndef __mcoldfire__
+ moveml sp@+,d2-d7
+#else
+ moveml sp@,d2-d7
+ | XXX if frame pointer is ever removed, stack pointer must
+ | be adjusted here.
+#endif
+ unlk a6
+ rts
+
+|=============================================================================
+| __negsf2
+|=============================================================================
+
+| This is trivial and could be shorter if we didn't bother checking for NaN '
+| and +/-INFINITY.
+
+| float __negsf2(float);
+ FUNC(__negsf2)
+SYM (__negsf2):
+#ifndef __mcoldfire__
+ link a6,IMM (0)
+ moveml d2-d7,sp@-
+#else
+ link a6,IMM (-24)
+ moveml d2-d7,sp@
+#endif
+ moveq IMM (NEGATE),d5
+ movel a6@(8),d0 | get number to negate in d0
+ bchg IMM (31),d0 | negate
+ movel d0,d1 | make a positive copy
+ bclr IMM (31),d1 |
+ tstl d1 | check for zero
+ beq 2f | if zero (either sign) return +zero
+ cmpl IMM (INFINITY),d1 | compare to +INFINITY
+ blt 1f |
+ bhi Lf$inop | if larger (fraction not zero) is NaN
+ movel d0,d7 | else get sign and return INFINITY
+ andl IMM (0x80000000),d7
+ bra Lf$infty
+1: PICLEA SYM (_fpCCR),a0
+ movew IMM (0),a0@
+#ifndef __mcoldfire__
+ moveml sp@+,d2-d7
+#else
+ moveml sp@,d2-d7
+ | XXX if frame pointer is ever removed, stack pointer must
+ | be adjusted here.
+#endif
+ unlk a6
+ rts
+2: bclr IMM (31),d0
+ bra 1b
+
+|=============================================================================
+| __cmpsf2
+|=============================================================================
+
+GREATER = 1
+LESS = -1
+EQUAL = 0
+
+| int __cmpsf2_internal(float, float, int);
+SYM (__cmpsf2_internal):
+#ifndef __mcoldfire__
+ link a6,IMM (0)
+ moveml d2-d7,sp@- | save registers
+#else
+ link a6,IMM (-24)
+ moveml d2-d7,sp@
+#endif
+ moveq IMM (COMPARE),d5
+ movel a6@(8),d0 | get first operand
+ movel a6@(12),d1 | get second operand
+| Check if either is NaN, and in that case return garbage and signal
+| INVALID_OPERATION. Check also if either is zero, and clear the signs
+| if necessary.
+ movel d0,d6
+ andl IMM (0x7fffffff),d0
+ beq Lcmpsf$a$0
+ cmpl IMM (0x7f800000),d0
+ bhi Lcmpf$inop
+Lcmpsf$1:
+ movel d1,d7
+ andl IMM (0x7fffffff),d1
+ beq Lcmpsf$b$0
+ cmpl IMM (0x7f800000),d1
+ bhi Lcmpf$inop
+Lcmpsf$2:
+| Check the signs
+ eorl d6,d7
+ bpl 1f
+| If the signs are not equal check if a >= 0
+ tstl d6
+ bpl Lcmpsf$a$gt$b | if (a >= 0 && b < 0) => a > b
+ bmi Lcmpsf$b$gt$a | if (a < 0 && b >= 0) => a < b
+1:
+| If the signs are equal check for < 0
+ tstl d6
+ bpl 1f
+| If both are negative exchange them
+#ifndef __mcoldfire__
+ exg d0,d1
+#else
+ movel d0,d7
+ movel d1,d0
+ movel d7,d1
+#endif
+1:
+| Now that they are positive we just compare them as longs (does this also
+| work for denormalized numbers?).
+ cmpl d0,d1
+ bhi Lcmpsf$b$gt$a | |b| > |a|
+ bne Lcmpsf$a$gt$b | |b| < |a|
+| If we got here a == b.
+ movel IMM (EQUAL),d0
+#ifndef __mcoldfire__
+ moveml sp@+,d2-d7 | put back the registers
+#else
+ moveml sp@,d2-d7
+#endif
+ unlk a6
+ rts
+Lcmpsf$a$gt$b:
+ movel IMM (GREATER),d0
+#ifndef __mcoldfire__
+ moveml sp@+,d2-d7 | put back the registers
+#else
+ moveml sp@,d2-d7
+ | XXX if frame pointer is ever removed, stack pointer must
+ | be adjusted here.
+#endif
+ unlk a6
+ rts
+Lcmpsf$b$gt$a:
+ movel IMM (LESS),d0
+#ifndef __mcoldfire__
+ moveml sp@+,d2-d7 | put back the registers
+#else
+ moveml sp@,d2-d7
+ | XXX if frame pointer is ever removed, stack pointer must
+ | be adjusted here.
+#endif
+ unlk a6
+ rts
+
+Lcmpsf$a$0:
+ bclr IMM (31),d6
+ bra Lcmpsf$1
+Lcmpsf$b$0:
+ bclr IMM (31),d7
+ bra Lcmpsf$2
+
+Lcmpf$inop:
+ movl a6@(16),d0
+ moveq IMM (INEXACT_RESULT+INVALID_OPERATION),d7
+ moveq IMM (SINGLE_FLOAT),d6
+ PICJUMP $_exception_handler
+
+| int __cmpsf2(float, float);
+ FUNC(__cmpsf2)
+SYM (__cmpsf2):
+ link a6,IMM (0)
+ pea 1
+ movl a6@(12),sp@-
+ movl a6@(8),sp@-
+ PICCALL SYM (__cmpsf2_internal)
+ unlk a6
+ rts
+
+|=============================================================================
+| rounding routines
+|=============================================================================
+
+| The rounding routines expect the number to be normalized in registers
+| d0-d1, with the exponent in register d2. They assume that the
+| exponent is larger or equal to 1. They return a properly normalized number
+| if possible, and a denormalized number otherwise. The exponent is returned
+| in d2.
+
+Lround$to$nearest:
+| We now normalize as suggested by D. Knuth ("Seminumerical Algorithms"):
+| Here we assume that the exponent is not too small (this should be checked
+| before entering the rounding routine), but the number could be denormalized.
+
+| Check for denormalized numbers:
+1: btst IMM (FLT_MANT_DIG),d0
+ bne 2f | if set the number is normalized
+| Normalize shifting left until bit #FLT_MANT_DIG is set or the exponent
+| is one (remember that a denormalized number corresponds to an
+| exponent of -F_BIAS+1).
+#ifndef __mcoldfire__
+ cmpw IMM (1),d2 | remember that the exponent is at least one
+#else
+ cmpl IMM (1),d2 | remember that the exponent is at least one
+#endif
+ beq 2f | an exponent of one means denormalized
+ addl d1,d1 | else shift and adjust the exponent
+ addxl d0,d0 |
+#ifndef __mcoldfire__
+ dbra d2,1b |
+#else
+ subql IMM (1),d2
+ bpl 1b
+#endif
+2:
+| Now round: we do it as follows: after the shifting we can write the
+| fraction part as f + delta, where 1 < f < 2^25, and 0 <= delta <= 2.
+| If delta < 1, do nothing. If delta > 1, add 1 to f.
+| If delta == 1, we make sure the rounded number will be even (odd?)
+| (after shifting).
+ btst IMM (0),d0 | is delta < 1?
+ beq 2f | if so, do not do anything
+ tstl d1 | is delta == 1?
+ bne 1f | if so round to even
+ movel d0,d1 |
+ andl IMM (2),d1 | bit 1 is the last significant bit
+ addl d1,d0 |
+ bra 2f |
+1: movel IMM (1),d1 | else add 1
+ addl d1,d0 |
+| Shift right once (because we used bit #FLT_MANT_DIG!).
+2: lsrl IMM (1),d0
+| Now check again bit #FLT_MANT_DIG (rounding could have produced a
+| 'fraction overflow' ...).
+ btst IMM (FLT_MANT_DIG),d0
+ beq 1f
+ lsrl IMM (1),d0
+#ifndef __mcoldfire__
+ addw IMM (1),d2
+#else
+ addql IMM (1),d2
+#endif
+1:
+| If bit #FLT_MANT_DIG-1 is clear we have a denormalized number, so we
+| have to put the exponent to zero and return a denormalized number.
+ btst IMM (FLT_MANT_DIG-1),d0
+ beq 1f
+ jmp a0@
+1: movel IMM (0),d2
+ jmp a0@
+
+Lround$to$zero:
+Lround$to$plus:
+Lround$to$minus:
+ jmp a0@
+#endif /* L_float */
+
+| gcc expects the routines __eqdf2, __nedf2, __gtdf2, __gedf2,
+| __ledf2, __ltdf2 to all return the same value as a direct call to
+| __cmpdf2 would. In this implementation, each of these routines
+| simply calls __cmpdf2. It would be more efficient to give the
+| __cmpdf2 routine several names, but separating them out will make it
+| easier to write efficient versions of these routines someday.
+| If the operands recompare unordered unordered __gtdf2 and __gedf2 return -1.
+| The other routines return 1.
+
+#ifdef L_eqdf2
+ .text
+ FUNC(__eqdf2)
+ .globl SYM (__eqdf2)
+SYM (__eqdf2):
+ link a6,IMM (0)
+ pea 1
+ movl a6@(20),sp@-
+ movl a6@(16),sp@-
+ movl a6@(12),sp@-
+ movl a6@(8),sp@-
+ PICCALL SYM (__cmpdf2_internal)
+ unlk a6
+ rts
+#endif /* L_eqdf2 */
+
+#ifdef L_nedf2
+ .text
+ FUNC(__nedf2)
+ .globl SYM (__nedf2)
+SYM (__nedf2):
+ link a6,IMM (0)
+ pea 1
+ movl a6@(20),sp@-
+ movl a6@(16),sp@-
+ movl a6@(12),sp@-
+ movl a6@(8),sp@-
+ PICCALL SYM (__cmpdf2_internal)
+ unlk a6
+ rts
+#endif /* L_nedf2 */
+
+#ifdef L_gtdf2
+ .text
+ FUNC(__gtdf2)
+ .globl SYM (__gtdf2)
+SYM (__gtdf2):
+ link a6,IMM (0)
+ pea -1
+ movl a6@(20),sp@-
+ movl a6@(16),sp@-
+ movl a6@(12),sp@-
+ movl a6@(8),sp@-
+ PICCALL SYM (__cmpdf2_internal)
+ unlk a6
+ rts
+#endif /* L_gtdf2 */
+
+#ifdef L_gedf2
+ .text
+ FUNC(__gedf2)
+ .globl SYM (__gedf2)
+SYM (__gedf2):
+ link a6,IMM (0)
+ pea -1
+ movl a6@(20),sp@-
+ movl a6@(16),sp@-
+ movl a6@(12),sp@-
+ movl a6@(8),sp@-
+ PICCALL SYM (__cmpdf2_internal)
+ unlk a6
+ rts
+#endif /* L_gedf2 */
+
+#ifdef L_ltdf2
+ .text
+ FUNC(__ltdf2)
+ .globl SYM (__ltdf2)
+SYM (__ltdf2):
+ link a6,IMM (0)
+ pea 1
+ movl a6@(20),sp@-
+ movl a6@(16),sp@-
+ movl a6@(12),sp@-
+ movl a6@(8),sp@-
+ PICCALL SYM (__cmpdf2_internal)
+ unlk a6
+ rts
+#endif /* L_ltdf2 */
+
+#ifdef L_ledf2
+ .text
+ FUNC(__ledf2)
+ .globl SYM (__ledf2)
+SYM (__ledf2):
+ link a6,IMM (0)
+ pea 1
+ movl a6@(20),sp@-
+ movl a6@(16),sp@-
+ movl a6@(12),sp@-
+ movl a6@(8),sp@-
+ PICCALL SYM (__cmpdf2_internal)
+ unlk a6
+ rts
+#endif /* L_ledf2 */
+
+| The comments above about __eqdf2, et. al., also apply to __eqsf2,
+| et. al., except that the latter call __cmpsf2 rather than __cmpdf2.
+
+#ifdef L_eqsf2
+ .text
+ FUNC(__eqsf2)
+ .globl SYM (__eqsf2)
+SYM (__eqsf2):
+ link a6,IMM (0)
+ pea 1
+ movl a6@(12),sp@-
+ movl a6@(8),sp@-
+ PICCALL SYM (__cmpsf2_internal)
+ unlk a6
+ rts
+#endif /* L_eqsf2 */
+
+#ifdef L_nesf2
+ .text
+ FUNC(__nesf2)
+ .globl SYM (__nesf2)
+SYM (__nesf2):
+ link a6,IMM (0)
+ pea 1
+ movl a6@(12),sp@-
+ movl a6@(8),sp@-
+ PICCALL SYM (__cmpsf2_internal)
+ unlk a6
+ rts
+#endif /* L_nesf2 */
+
+#ifdef L_gtsf2
+ .text
+ FUNC(__gtsf2)
+ .globl SYM (__gtsf2)
+SYM (__gtsf2):
+ link a6,IMM (0)
+ pea -1
+ movl a6@(12),sp@-
+ movl a6@(8),sp@-
+ PICCALL SYM (__cmpsf2_internal)
+ unlk a6
+ rts
+#endif /* L_gtsf2 */
+
+#ifdef L_gesf2
+ .text
+ FUNC(__gesf2)
+ .globl SYM (__gesf2)
+SYM (__gesf2):
+ link a6,IMM (0)
+ pea -1
+ movl a6@(12),sp@-
+ movl a6@(8),sp@-
+ PICCALL SYM (__cmpsf2_internal)
+ unlk a6
+ rts
+#endif /* L_gesf2 */
+
+#ifdef L_ltsf2
+ .text
+ FUNC(__ltsf2)
+ .globl SYM (__ltsf2)
+SYM (__ltsf2):
+ link a6,IMM (0)
+ pea 1
+ movl a6@(12),sp@-
+ movl a6@(8),sp@-
+ PICCALL SYM (__cmpsf2_internal)
+ unlk a6
+ rts
+#endif /* L_ltsf2 */
+
+#ifdef L_lesf2
+ .text
+ FUNC(__lesf2)
+ .globl SYM (__lesf2)
+SYM (__lesf2):
+ link a6,IMM (0)
+ pea 1
+ movl a6@(12),sp@-
+ movl a6@(8),sp@-
+ PICCALL SYM (__cmpsf2_internal)
+ unlk a6
+ rts
+#endif /* L_lesf2 */
+
+#if defined (__ELF__) && defined (__linux__)
+ /* Make stack non-executable for ELF linux targets. */
+ .section .note.GNU-stack,"",@progbits
+#endif
diff --git a/libgcc/config/m68k/t-floatlib b/libgcc/config/m68k/t-floatlib
new file mode 100644
index 00000000000..1ee8782d9fd
--- /dev/null
+++ b/libgcc/config/m68k/t-floatlib
@@ -0,0 +1,11 @@
+LIB1ASMSRC = m68k/lb1sf68.S
+LIB1ASMFUNCS = _mulsi3 _udivsi3 _divsi3 _umodsi3 _modsi3 \
+ _double _float _floatex \
+ _eqdf2 _nedf2 _gtdf2 _gedf2 _ltdf2 _ledf2 \
+ _eqsf2 _nesf2 _gtsf2 _gesf2 _ltsf2 _lesf2
+
+LIB2ADD = $(srcdir)/config/m68k/fpgnulib.c xfgnulib.c
+
+xfgnulib.c: $(srcdir)/config/m68k/fpgnulib.c
+ echo '#define EXTFLOAT' > xfgnulib.c
+ cat $< >> xfgnulib.c
diff --git a/libgcc/config/m68k/t-slibgcc-elf-ver b/libgcc/config/m68k/t-slibgcc-elf-ver
new file mode 100644
index 00000000000..6aac37cc08f
--- /dev/null
+++ b/libgcc/config/m68k/t-slibgcc-elf-ver
@@ -0,0 +1,3 @@
+# Bump the version number of the shared libgcc library
+
+SHLIB_SOVERSION = 2
diff --git a/libgcc/config/mcore/crti.S b/libgcc/config/mcore/crti.S
new file mode 100644
index 00000000000..467e944fb37
--- /dev/null
+++ b/libgcc/config/mcore/crti.S
@@ -0,0 +1,63 @@
+# crti.S for ELF based systems
+
+# Copyright (C) 1992, 1998, 1999, 2008, 2009, 2011
+# Free Software Foundation, Inc.
+# Written By David Vinayak Henkel-Wallace, June 1992
+#
+# This file is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3, or (at your option) any
+# later version.
+#
+# This file is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# Under Section 7 of GPL version 3, you are granted additional
+# permissions described in the GCC Runtime Library Exception, version
+# 3.1, as published by the Free Software Foundation.
+#
+# You should have received a copy of the GNU General Public License and
+# a copy of the GCC Runtime Library Exception along with this program;
+# see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+# <http://www.gnu.org/licenses/>.
+
+
+# This file just makes a stack frame for the contents of the .fini and
+# .init sections. Users may put any desired instructions in those
+# sections.
+
+ .section ".init"
+ .global _init
+ .type _init,@function
+ .align 4
+_init:
+ subi r0, 16
+ st.w r15, (r0, 12)
+
+ # These nops are here to align the end of this code with a 16 byte
+ # boundary. The linker will start inserting code into the .init
+ # section at such a boundary.
+
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+
+
+ .section ".fini"
+ .global _fini
+ .type _fini,@function
+ .align 4
+_fini:
+ subi r0, 16
+ st.w r15, (r0, 12)
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
diff --git a/libgcc/config/mcore/crtn.S b/libgcc/config/mcore/crtn.S
new file mode 100644
index 00000000000..85f75b1d42f
--- /dev/null
+++ b/libgcc/config/mcore/crtn.S
@@ -0,0 +1,45 @@
+# crtn.S for ELF based systems
+
+# Copyright (C) 1992, 1999, 2000, 2008, 2009, 2011
+# Free Software Foundation, Inc.
+# Written By David Vinayak Henkel-Wallace, June 1992
+#
+# This file is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3, or (at your option) any
+# later version.
+#
+# This file is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# Under Section 7 of GPL version 3, you are granted additional
+# permissions described in the GCC Runtime Library Exception, version
+# 3.1, as published by the Free Software Foundation.
+#
+# You should have received a copy of the GNU General Public License and
+# a copy of the GCC Runtime Library Exception along with this program;
+# see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+# <http://www.gnu.org/licenses/>.
+
+# This file just makes sure that the .fini and .init sections do in
+# fact return. Users may put any desired instructions in those sections.
+# This file is the last thing linked into any executable.
+
+ .section ".init"
+ .align 4
+
+ ldw r15,(r0, 12)
+ addi r0,16
+ jmp r15
+
+ .section ".fini"
+ .align 4
+
+ ldw r15, (r0, 12)
+ addi r0,16
+ jmp r15
+
+# Th-th-th-that is all folks!
+
diff --git a/libgcc/config/mcore/lib1funcs.S b/libgcc/config/mcore/lib1funcs.S
new file mode 100644
index 00000000000..701762f2a3c
--- /dev/null
+++ b/libgcc/config/mcore/lib1funcs.S
@@ -0,0 +1,303 @@
+/* libgcc routines for the MCore.
+ Copyright (C) 1993, 1999, 2000, 2009 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 3, or (at your option) any
+later version.
+
+This file is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+#define CONCAT1(a, b) CONCAT2(a, b)
+#define CONCAT2(a, b) a ## b
+
+/* Use the right prefix for global labels. */
+
+#define SYM(x) CONCAT1 (__, x)
+
+#ifdef __ELF__
+#define TYPE(x) .type SYM (x),@function
+#define SIZE(x) .size SYM (x), . - SYM (x)
+#else
+#define TYPE(x)
+#define SIZE(x)
+#endif
+
+.macro FUNC_START name
+ .text
+ .globl SYM (\name)
+ TYPE (\name)
+SYM (\name):
+.endm
+
+.macro FUNC_END name
+ SIZE (\name)
+.endm
+
+#ifdef L_udivsi3
+FUNC_START udiv32
+FUNC_START udivsi32
+
+ movi r1,0 // r1-r2 form 64 bit dividend
+ movi r4,1 // r4 is quotient (1 for a sentinel)
+
+ cmpnei r3,0 // look for 0 divisor
+ bt 9f
+ trap 3 // divide by 0
+9:
+ // control iterations; skip across high order 0 bits in dividend
+ mov r7,r2
+ cmpnei r7,0
+ bt 8f
+ movi r2,0 // 0 dividend
+ jmp r15 // quick return
+8:
+ ff1 r7 // figure distance to skip
+ lsl r4,r7 // move the sentinel along (with 0's behind)
+ lsl r2,r7 // and the low 32 bits of numerator
+
+// appears to be wrong...
+// tested out incorrectly in our OS work...
+// mov r7,r3 // looking at divisor
+// ff1 r7 // I can move 32-r7 more bits to left.
+// addi r7,1 // ok, one short of that...
+// mov r1,r2
+// lsr r1,r7 // bits that came from low order...
+// rsubi r7,31 // r7 == "32-n" == LEFT distance
+// addi r7,1 // this is (32-n)
+// lsl r4,r7 // fixes the high 32 (quotient)
+// lsl r2,r7
+// cmpnei r4,0
+// bf 4f // the sentinel went away...
+
+ // run the remaining bits
+
+1: lslc r2,1 // 1 bit left shift of r1-r2
+ addc r1,r1
+ cmphs r1,r3 // upper 32 of dividend >= divisor?
+ bf 2f
+ sub r1,r3 // if yes, subtract divisor
+2: addc r4,r4 // shift by 1 and count subtracts
+ bf 1b // if sentinel falls out of quotient, stop
+
+4: mov r2,r4 // return quotient
+ mov r3,r1 // and piggyback the remainder
+ jmp r15
+FUNC_END udiv32
+FUNC_END udivsi32
+#endif
+
+#ifdef L_umodsi3
+FUNC_START urem32
+FUNC_START umodsi3
+ movi r1,0 // r1-r2 form 64 bit dividend
+ movi r4,1 // r4 is quotient (1 for a sentinel)
+ cmpnei r3,0 // look for 0 divisor
+ bt 9f
+ trap 3 // divide by 0
+9:
+ // control iterations; skip across high order 0 bits in dividend
+ mov r7,r2
+ cmpnei r7,0
+ bt 8f
+ movi r2,0 // 0 dividend
+ jmp r15 // quick return
+8:
+ ff1 r7 // figure distance to skip
+ lsl r4,r7 // move the sentinel along (with 0's behind)
+ lsl r2,r7 // and the low 32 bits of numerator
+
+1: lslc r2,1 // 1 bit left shift of r1-r2
+ addc r1,r1
+ cmphs r1,r3 // upper 32 of dividend >= divisor?
+ bf 2f
+ sub r1,r3 // if yes, subtract divisor
+2: addc r4,r4 // shift by 1 and count subtracts
+ bf 1b // if sentinel falls out of quotient, stop
+ mov r2,r1 // return remainder
+ jmp r15
+FUNC_END urem32
+FUNC_END umodsi3
+#endif
+
+#ifdef L_divsi3
+FUNC_START div32
+FUNC_START divsi3
+ mov r5,r2 // calc sign of quotient
+ xor r5,r3
+ abs r2 // do unsigned divide
+ abs r3
+ movi r1,0 // r1-r2 form 64 bit dividend
+ movi r4,1 // r4 is quotient (1 for a sentinel)
+ cmpnei r3,0 // look for 0 divisor
+ bt 9f
+ trap 3 // divide by 0
+9:
+ // control iterations; skip across high order 0 bits in dividend
+ mov r7,r2
+ cmpnei r7,0
+ bt 8f
+ movi r2,0 // 0 dividend
+ jmp r15 // quick return
+8:
+ ff1 r7 // figure distance to skip
+ lsl r4,r7 // move the sentinel along (with 0's behind)
+ lsl r2,r7 // and the low 32 bits of numerator
+
+// tested out incorrectly in our OS work...
+// mov r7,r3 // looking at divisor
+// ff1 r7 // I can move 32-r7 more bits to left.
+// addi r7,1 // ok, one short of that...
+// mov r1,r2
+// lsr r1,r7 // bits that came from low order...
+// rsubi r7,31 // r7 == "32-n" == LEFT distance
+// addi r7,1 // this is (32-n)
+// lsl r4,r7 // fixes the high 32 (quotient)
+// lsl r2,r7
+// cmpnei r4,0
+// bf 4f // the sentinel went away...
+
+ // run the remaining bits
+1: lslc r2,1 // 1 bit left shift of r1-r2
+ addc r1,r1
+ cmphs r1,r3 // upper 32 of dividend >= divisor?
+ bf 2f
+ sub r1,r3 // if yes, subtract divisor
+2: addc r4,r4 // shift by 1 and count subtracts
+ bf 1b // if sentinel falls out of quotient, stop
+
+4: mov r2,r4 // return quotient
+ mov r3,r1 // piggyback the remainder
+ btsti r5,31 // after adjusting for sign
+ bf 3f
+ rsubi r2,0
+ rsubi r3,0
+3: jmp r15
+FUNC_END div32
+FUNC_END divsi3
+#endif
+
+#ifdef L_modsi3
+FUNC_START rem32
+FUNC_START modsi3
+ mov r5,r2 // calc sign of remainder
+ abs r2 // do unsigned divide
+ abs r3
+ movi r1,0 // r1-r2 form 64 bit dividend
+ movi r4,1 // r4 is quotient (1 for a sentinel)
+ cmpnei r3,0 // look for 0 divisor
+ bt 9f
+ trap 3 // divide by 0
+9:
+ // control iterations; skip across high order 0 bits in dividend
+ mov r7,r2
+ cmpnei r7,0
+ bt 8f
+ movi r2,0 // 0 dividend
+ jmp r15 // quick return
+8:
+ ff1 r7 // figure distance to skip
+ lsl r4,r7 // move the sentinel along (with 0's behind)
+ lsl r2,r7 // and the low 32 bits of numerator
+
+1: lslc r2,1 // 1 bit left shift of r1-r2
+ addc r1,r1
+ cmphs r1,r3 // upper 32 of dividend >= divisor?
+ bf 2f
+ sub r1,r3 // if yes, subtract divisor
+2: addc r4,r4 // shift by 1 and count subtracts
+ bf 1b // if sentinel falls out of quotient, stop
+ mov r2,r1 // return remainder
+ btsti r5,31 // after adjusting for sign
+ bf 3f
+ rsubi r2,0
+3: jmp r15
+FUNC_END rem32
+FUNC_END modsi3
+#endif
+
+
+/* GCC expects that {__eq,__ne,__gt,__ge,__le,__lt}{df2,sf2}
+ will behave as __cmpdf2. So, we stub the implementations to
+ jump on to __cmpdf2 and __cmpsf2.
+
+ All of these shortcircuit the return path so that __cmp{sd}f2
+ will go directly back to the caller. */
+
+.macro COMPARE_DF_JUMP name
+ .import SYM (cmpdf2)
+FUNC_START \name
+ jmpi SYM (cmpdf2)
+FUNC_END \name
+.endm
+
+#ifdef L_eqdf2
+COMPARE_DF_JUMP eqdf2
+#endif /* L_eqdf2 */
+
+#ifdef L_nedf2
+COMPARE_DF_JUMP nedf2
+#endif /* L_nedf2 */
+
+#ifdef L_gtdf2
+COMPARE_DF_JUMP gtdf2
+#endif /* L_gtdf2 */
+
+#ifdef L_gedf2
+COMPARE_DF_JUMP gedf2
+#endif /* L_gedf2 */
+
+#ifdef L_ltdf2
+COMPARE_DF_JUMP ltdf2
+#endif /* L_ltdf2 */
+
+#ifdef L_ledf2
+COMPARE_DF_JUMP ledf2
+#endif /* L_ledf2 */
+
+/* SINGLE PRECISION FLOATING POINT STUBS */
+
+.macro COMPARE_SF_JUMP name
+ .import SYM (cmpsf2)
+FUNC_START \name
+ jmpi SYM (cmpsf2)
+FUNC_END \name
+.endm
+
+#ifdef L_eqsf2
+COMPARE_SF_JUMP eqsf2
+#endif /* L_eqsf2 */
+
+#ifdef L_nesf2
+COMPARE_SF_JUMP nesf2
+#endif /* L_nesf2 */
+
+#ifdef L_gtsf2
+COMPARE_SF_JUMP gtsf2
+#endif /* L_gtsf2 */
+
+#ifdef L_gesf2
+COMPARE_SF_JUMP __gesf2
+#endif /* L_gesf2 */
+
+#ifdef L_ltsf2
+COMPARE_SF_JUMP __ltsf2
+#endif /* L_ltsf2 */
+
+#ifdef L_lesf2
+COMPARE_SF_JUMP lesf2
+#endif /* L_lesf2 */
diff --git a/libgcc/config/mcore/t-mcore b/libgcc/config/mcore/t-mcore
new file mode 100644
index 00000000000..8268a17e7ad
--- /dev/null
+++ b/libgcc/config/mcore/t-mcore
@@ -0,0 +1,5 @@
+LIB1ASMSRC = mcore/lib1funcs.S
+LIB1ASMFUNCS = _divsi3 _udivsi3 _modsi3 _umodsi3
+
+# could use -msifilter to be safe from interrupt/jmp interactions and others.
+HOST_LIBGCC2_CFLAGS += -O3 -DNO_FLOATLIB_FIXUNSDFSI #-msifilter
diff --git a/libgcc/config/mep/lib1funcs.S b/libgcc/config/mep/lib1funcs.S
new file mode 100644
index 00000000000..0a18913f927
--- /dev/null
+++ b/libgcc/config/mep/lib1funcs.S
@@ -0,0 +1,125 @@
+/* libgcc routines for Toshiba Media Processor.
+ Copyright (C) 2001, 2002, 2005, 2009 Free Software Foundation, Inc.
+
+This file is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+This file is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+#define SAVEALL \
+ add3 $sp, $sp, -16*4 ; \
+ sw $0, ($sp) ; \
+ sw $1, 4($sp) ; \
+ sw $2, 8($sp) ; \
+ sw $3, 12($sp) ; \
+ sw $4, 16($sp) ; \
+ sw $5, 20($sp) ; \
+ sw $6, 24($sp) ; \
+ sw $7, 28($sp) ; \
+ sw $8, 32($sp) ; \
+ sw $9, 36($sp) ; \
+ sw $10, 40($sp) ; \
+ sw $11, 44($sp) ; \
+ sw $12, 48($sp) ; \
+ sw $13, 52($sp) ; \
+ sw $14, 56($sp) ; \
+ ldc $5, $lp ; \
+ add $5, 3 ; \
+ mov $6, -4 ; \
+ and $5, $6
+
+#define RESTOREALL \
+ stc $5, $lp ; \
+ lw $14, 56($sp) ; \
+ lw $13, 52($sp) ; \
+ lw $12, 48($sp) ; \
+ lw $11, 44($sp) ; \
+ lw $10, 40($sp) ; \
+ lw $9, 36($sp) ; \
+ lw $8, 32($sp) ; \
+ lw $7, 28($sp) ; \
+ lw $6, 24($sp) ; \
+ lw $5, 20($sp) ; \
+ lw $4, 16($sp) ; \
+ lw $3, 12($sp) ; \
+ lw $2, 8($sp) ; \
+ lw $1, 4($sp) ; \
+ lw $0, ($sp) ; \
+ add3 $sp, $sp, 16*4 ; \
+ ret
+
+#ifdef L_mep_profile
+ .text
+ .global __mep_mcount
+__mep_mcount:
+ SAVEALL
+ ldc $1, $lp
+ mov $2, $0
+ bsr __mep_mcount_2
+ RESTOREALL
+#endif
+
+#ifdef L_mep_bb_init_trace
+ .text
+ .global __mep_bb_init_trace_func
+__mep_bb_init_trace_func:
+ SAVEALL
+ lw $1, ($5)
+ lw $2, 4($5)
+ add $5, 8
+ bsr __bb_init_trace_func
+ RESTOREALL
+#endif
+
+#ifdef L_mep_bb_init
+ .text
+ .global __mep_bb_init_func
+__mep_bb_init_func:
+ SAVEALL
+ lw $1, ($5)
+ add $5, 4
+ bsr __bb_init_func
+ RESTOREALL
+#endif
+
+#ifdef L_mep_bb_trace
+ .text
+ .global __mep_bb_trace_func
+__mep_bb_trace_func:
+ SAVEALL
+ movu $3, __bb
+ lw $1, ($5)
+ sw $1, ($3)
+ lw $2, 4($5)
+ sw $2, 4($3)
+ add $5, 8
+ bsr __bb_trace_func
+ RESTOREALL
+#endif
+
+#ifdef L_mep_bb_increment
+ .text
+ .global __mep_bb_increment_func
+__mep_bb_increment_func:
+ SAVEALL
+ lw $1, ($5)
+ lw $0, ($1)
+ add $0, 1
+ sw $0, ($1)
+ add $5, 4
+ RESTOREALL
+#endif
diff --git a/libgcc/config/mep/lib2funcs.c b/libgcc/config/mep/lib2funcs.c
new file mode 100644
index 00000000000..1dbf57d9535
--- /dev/null
+++ b/libgcc/config/mep/lib2funcs.c
@@ -0,0 +1,139 @@
+/* libgcc routines for MeP.
+ Copyright 2001, 2002, 2009 Free Software Foundation, Inc
+
+This file is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+This file is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+typedef int SItype __attribute__ ((mode (SI)));
+typedef unsigned int USItype __attribute__ ((mode (SI)));
+
+typedef int word_type __attribute__ ((mode (__word__)));
+
+USItype
+__mulsi3 (USItype a, USItype b)
+{
+ USItype c = 0;
+
+ while (a != 0)
+ {
+ if (a & 1)
+ c += b;
+ a >>= 1;
+ b <<= 1;
+ }
+
+ return c;
+}
+
+
+
+USItype
+udivmodsi4(USItype num, USItype den, word_type modwanted)
+{
+ USItype bit = 1;
+ USItype res = 0;
+
+ while (den < num && bit && !(den & (1L<<31)))
+ {
+ den <<=1;
+ bit <<=1;
+ }
+ while (bit)
+ {
+ if (num >= den)
+ {
+ num -= den;
+ res |= bit;
+ }
+ bit >>=1;
+ den >>=1;
+ }
+ if (modwanted) return num;
+ return res;
+}
+
+
+
+SItype
+__divsi3 (SItype a, SItype b)
+{
+ word_type neg = 0;
+ SItype res;
+
+ if (a < 0)
+ {
+ a = -a;
+ neg = !neg;
+ }
+
+ if (b < 0)
+ {
+ b = -b;
+ neg = !neg;
+ }
+
+ res = udivmodsi4 (a, b, 0);
+
+ if (neg)
+ res = -res;
+
+ return res;
+}
+
+
+
+SItype
+__modsi3 (SItype a, SItype b)
+{
+ word_type neg = 0;
+ SItype res;
+
+ if (a < 0)
+ {
+ a = -a;
+ neg = 1;
+ }
+
+ if (b < 0)
+ b = -b;
+
+ res = udivmodsi4 (a, b, 1);
+
+ if (neg)
+ res = -res;
+
+ return res;
+}
+
+
+
+
+SItype
+__udivsi3 (SItype a, SItype b)
+{
+ return udivmodsi4 (a, b, 0);
+}
+
+
+
+SItype
+__umodsi3 (SItype a, SItype b)
+{
+ return udivmodsi4 (a, b, 1);
+}
diff --git a/libgcc/config/mep/t-mep b/libgcc/config/mep/t-mep
new file mode 100644
index 00000000000..fb3a0d60c4d
--- /dev/null
+++ b/libgcc/config/mep/t-mep
@@ -0,0 +1,16 @@
+# profiling support
+LIB1ASMSRC = mep/lib1funcs.S
+
+LIB1ASMFUNCS = _mep_profile \
+ _mep_bb_init_trace \
+ _mep_bb_init \
+ _mep_bb_trace \
+ _mep_bb_increment
+
+# multiply and divide routines
+LIB2ADD = \
+ $(srcdir)/config/mep/lib2funcs.c \
+ $(srcdir)/config/mep/tramp.c
+
+# Use -O0 instead of -O2 so we don't get complex relocations
+CRTSTUFF_CFLAGS += -O0
diff --git a/libgcc/config/mep/tramp.c b/libgcc/config/mep/tramp.c
new file mode 100644
index 00000000000..bf484ca4e95
--- /dev/null
+++ b/libgcc/config/mep/tramp.c
@@ -0,0 +1,103 @@
+/* Trampoline support for MeP
+ Copyright (C) 2004, 2007 Free Software Foundation, Inc.
+ Contributed by Red Hat Inc.
+
+This file is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+This file is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+/*
+ 7a0a ldc $10,$pc
+ c0ae000a lw $0,10($10)
+ caae000e lw $10,14($10)
+ 10ae jmp $10
+ 00000000 static chain
+ 00000000 function address
+*/
+
+static inline int
+cache_config_register(void) {
+ int rv;
+ asm ("ldc\t%0, $ccfg" : "=r" (rv));
+ return rv;
+}
+
+#define ICACHE_SIZE ((cache_config_register() >> 16) & 0x7f)
+#define DCACHE_SIZE (cache_config_register() & 0x7f)
+
+#define ICACHE_DATA_BASE 0x00300000
+#define ICACHE_TAG_BASE 0x00310000
+#define DCACHE_DATA_BASE 0x00320000
+#define DCACHE_TAG_BASE 0x00330000
+
+static inline void
+flush_dcache (int addr)
+{
+ asm volatile ("cache\t0, (%0)" : : "r" (addr));
+}
+
+void
+__mep_trampoline_helper (unsigned long *tramp,
+ int function_address,
+ int static_chain);
+
+void
+__mep_trampoline_helper (unsigned long *tramp,
+ int function_address,
+ int static_chain)
+{
+ int dsize, isize;
+
+#ifdef __LITTLE_ENDIAN__
+ tramp[0] = 0xc0ae7a0a;
+ tramp[1] = 0xcaae000a;
+ tramp[2] = 0x10ae000e;
+#else
+ tramp[0] = 0x7a0ac0ae;
+ tramp[1] = 0x000acaae;
+ tramp[2] = 0x000e10ae;
+#endif
+ tramp[3] = static_chain;
+ tramp[4] = function_address;
+
+ dsize = DCACHE_SIZE;
+ isize = ICACHE_SIZE;
+
+ if (dsize)
+ {
+ flush_dcache ((int)tramp);
+ flush_dcache ((int)tramp+16);
+ }
+
+ if (isize)
+ {
+ int imask = (isize * 1024) - 1;
+ int tmask = ~imask;
+ unsigned int i;
+ volatile unsigned int *tags;
+
+ imask &= 0xffe0;
+
+ for (i=(unsigned int)tramp; i<(unsigned int)tramp+20; i+=16)
+ {
+ tags = (unsigned int *)(ICACHE_TAG_BASE + (i & imask));
+ if ((*tags & tmask) == (i & tmask))
+ *tags &= ~1;
+ }
+ }
+}
diff --git a/libgcc/config/microblaze/crti.S b/libgcc/config/microblaze/crti.S
new file mode 100644
index 00000000000..3944443b437
--- /dev/null
+++ b/libgcc/config/microblaze/crti.S
@@ -0,0 +1,39 @@
+/* crti.s for __init, __fini
+ This file supplies the prologue for __init and __fini routines
+
+ Copyright 2009, 2010 Free Software Foundation, Inc.
+
+ Contributed by Michael Eager <eager@eagercon.com>.
+
+ This file is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by the
+ Free Software Foundation; either version 3, or (at your option) any
+ later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+ .section .init, "ax"
+ .global __init
+ .align 2
+__init:
+ addik r1, r1, -8
+ sw r15, r0, r1
+
+ .section .fini, "ax"
+ .global __fini
+ .align 2
+__fini:
+ addik r1, r1, -8
+ sw r15, r0, r1
diff --git a/libgcc/config/microblaze/crtn.S b/libgcc/config/microblaze/crtn.S
new file mode 100644
index 00000000000..7970dee1c93
--- /dev/null
+++ b/libgcc/config/microblaze/crtn.S
@@ -0,0 +1,35 @@
+/* crtn.s for __init, __fini
+ This file supplies the epilogue for __init and __fini routines
+
+ Copyright 2009, 2010 Free Software Foundation, Inc.
+
+ Contributed by Michael Eager <eager@eagercon.com>.
+
+ This file is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by the
+ Free Software Foundation; either version 3, or (at your option) any
+ later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+ .section .init, "ax"
+ lw r15, r0, r1
+ rtsd r15, 8
+ addik r1, r1, 8
+
+ .section .fini, "ax"
+ lw r15, r0, r1
+ rtsd r15, 8
+ addik r1, r1, 8
diff --git a/libgcc/config/microblaze/divsi3.asm b/libgcc/config/microblaze/divsi3.S
index 7d888b32e8d..f3b7a198306 100644
--- a/libgcc/config/microblaze/divsi3.asm
+++ b/libgcc/config/microblaze/divsi3.S
@@ -1,6 +1,6 @@
###################################-
#
-# Copyright 2009, 2010 Free Software Foundation, Inc.
+# Copyright 2009, 2010, 2011 Free Software Foundation, Inc.
#
# Contributed by Michael Eager <eager@eagercon.com>.
#
@@ -23,7 +23,7 @@
# see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
# <http://www.gnu.org/licenses/>.
#
-# divsi3.asm
+# divsi3.S
#
# Divide operation for 32 bit integers.
# Input : Dividend in Reg r5
diff --git a/libgcc/config/microblaze/moddi3.asm b/libgcc/config/microblaze/moddi3.S
index 4923b45ffeb..3e8d94f70a9 100644
--- a/libgcc/config/microblaze/moddi3.asm
+++ b/libgcc/config/microblaze/moddi3.S
@@ -1,6 +1,6 @@
###################################
#
-# Copyright 2009, 2010 Free Software Foundation, Inc.
+# Copyright 2009, 2010, 2011 Free Software Foundation, Inc.
#
# Contributed by Michael Eager <eager@eagercon.com>.
#
@@ -23,7 +23,7 @@
# see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
# <http://www.gnu.org/licenses/>.
#
-# modsi3.asm
+# modsi3.S
#
# modulo operation for 64 bit integers.
#
diff --git a/libgcc/config/microblaze/modsi3.asm b/libgcc/config/microblaze/modsi3.S
index cae95c8bc63..4be6be42616 100644
--- a/libgcc/config/microblaze/modsi3.asm
+++ b/libgcc/config/microblaze/modsi3.S
@@ -1,6 +1,6 @@
###################################
#
-# Copyright 2009, 2010 Free Software Foundation, Inc.
+# Copyright 2009, 2010, 2011 Free Software Foundation, Inc.
#
# Contributed by Michael Eager <eager@eagercon.com>.
#
@@ -23,7 +23,7 @@
# see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
# <http://www.gnu.org/licenses/>.
#
-# modsi3.asm
+# modsi3.S
#
# modulo operation for 32 bit integers.
# Input : op1 in Reg r5
diff --git a/libgcc/config/microblaze/muldi3_hard.asm b/libgcc/config/microblaze/muldi3_hard.S
index 0499e2a550b..14cfff59772 100644
--- a/libgcc/config/microblaze/muldi3_hard.asm
+++ b/libgcc/config/microblaze/muldi3_hard.S
@@ -1,6 +1,6 @@
###################################-
#
-# Copyright 2009, 2010 Free Software Foundation, Inc.
+# Copyright 2009, 2010, 2011 Free Software Foundation, Inc.
#
# Contributed by Michael Eager <eager@eagercon.com>.
#
@@ -23,7 +23,7 @@
# see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
# <http://www.gnu.org/licenses/>.
#
-# muldi3_hard.asm
+# muldi3_hard.S
#
# Multiply operation for 64 bit integers, for devices with hard multiply
# Input : Operand1[H] in Reg r5
diff --git a/libgcc/config/microblaze/mulsi3.asm b/libgcc/config/microblaze/mulsi3.S
index 03fe0288df8..77d2daa9270 100644
--- a/libgcc/config/microblaze/mulsi3.asm
+++ b/libgcc/config/microblaze/mulsi3.S
@@ -1,6 +1,6 @@
###################################-*-asm*-
#
-# Copyright 2009, 2010 Free Software Foundation, Inc.
+# Copyright 2009, 2010, 2011 Free Software Foundation, Inc.
#
# Contributed by Michael Eager <eager@eagercon.com>.
#
@@ -23,7 +23,7 @@
# see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
# <http://www.gnu.org/licenses/>.
#
-# mulsi3.asm
+# mulsi3.S
#
# Multiply operation for 32 bit integers.
# Input : Operand1 in Reg r5
diff --git a/libgcc/config/microblaze/stack_overflow_exit.asm b/libgcc/config/microblaze/stack_overflow_exit.S
index 30b31f0a5ba..98182a2b361 100644
--- a/libgcc/config/microblaze/stack_overflow_exit.asm
+++ b/libgcc/config/microblaze/stack_overflow_exit.S
@@ -1,6 +1,6 @@
###################################-*-asm*-
#
-# Copyright 2009 Free Software Foundation, Inc.
+# Copyright 2009, 2011 Free Software Foundation, Inc.
#
#
# Contributed by Michael Eager <eager@eagercon.com>.
@@ -24,7 +24,7 @@
# see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
# <http://www.gnu.org/licenses/>.
#
-# stack_overflow_exit.asm
+# stack_overflow_exit.S
#
# Checks for stack overflows and sets the global variable
# stack_overflow_error with the value of current stack pointer
diff --git a/libgcc/config/microblaze/t-microblaze b/libgcc/config/microblaze/t-microblaze
index 85fc8d39d8a..3a9c7ff23fe 100644
--- a/libgcc/config/microblaze/t-microblaze
+++ b/libgcc/config/microblaze/t-microblaze
@@ -1,12 +1,10 @@
LIB2ADD += \
- $(srcdir)/config/microblaze/divsi3.asm \
- $(srcdir)/config/microblaze/moddi3.asm \
- $(srcdir)/config/microblaze/modsi3.asm \
- $(srcdir)/config/microblaze/muldi3_hard.asm \
- $(srcdir)/config/microblaze/mulsi3.asm \
- $(srcdir)/config/microblaze/stack_overflow_exit.asm \
- $(srcdir)/config/microblaze/udivsi3.asm \
- $(srcdir)/config/microblaze/umodsi3.asm \
+ $(srcdir)/config/microblaze/divsi3.S \
+ $(srcdir)/config/microblaze/moddi3.S \
+ $(srcdir)/config/microblaze/modsi3.S \
+ $(srcdir)/config/microblaze/muldi3_hard.S \
+ $(srcdir)/config/microblaze/mulsi3.S \
+ $(srcdir)/config/microblaze/stack_overflow_exit.S \
+ $(srcdir)/config/microblaze/udivsi3.S \
+ $(srcdir)/config/microblaze/umodsi3.S \
$(srcdir)/config/microblaze/divsi3_table.c
-
-MULTILIB_OPTIONS = mxl-barrel-shift mno-xl-soft-mul mxl-multiply-high
diff --git a/libgcc/config/microblaze/udivsi3.asm b/libgcc/config/microblaze/udivsi3.S
index 879cd349ca7..07a2d658092 100644
--- a/libgcc/config/microblaze/udivsi3.asm
+++ b/libgcc/config/microblaze/udivsi3.S
@@ -1,6 +1,6 @@
###################################-
#
-# Copyright 2009, 2010 Free Software Foundation, Inc.
+# Copyright 2009, 2010, 2011 Free Software Foundation, Inc.
#
# Contributed by Michael Eager <eager@eagercon.com>.
#
@@ -23,7 +23,7 @@
# see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
# <http://www.gnu.org/licenses/>.
#
-# udivsi3.asm
+# udivsi3.S
#
# Unsigned divide operation.
# Input : Divisor in Reg r5
diff --git a/libgcc/config/microblaze/umodsi3.asm b/libgcc/config/microblaze/umodsi3.S
index f7fd0087965..67de12c84ac 100644
--- a/libgcc/config/microblaze/umodsi3.asm
+++ b/libgcc/config/microblaze/umodsi3.S
@@ -1,6 +1,6 @@
###################################
#
-# Copyright 2009, 2010 Free Software Foundation, Inc.
+# Copyright 2009, 2010, 2011 Free Software Foundation, Inc.
#
# Contributed by Michael Eager <eager@eagercon.com>.
#
@@ -23,7 +23,7 @@
# see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
# <http://www.gnu.org/licenses/>.
#
-# umodsi3.asm
+# umodsi3.S
#
# Unsigned modulo operation for 32 bit integers.
# Input : op1 in Reg r5
diff --git a/libgcc/config/mips/crti.S b/libgcc/config/mips/crti.S
new file mode 100644
index 00000000000..ac04271c598
--- /dev/null
+++ b/libgcc/config/mips/crti.S
@@ -0,0 +1,49 @@
+/* Copyright (C) 2001, 2002 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+/* 4 slots for argument spill area. 1 for cpreturn, 1 for stack.
+ Return spill offset of 40 and 20. Aligned to 16 bytes for n32. */
+
+ .section .init,"ax",@progbits
+ .globl _init
+ .type _init,@function
+_init:
+#ifdef __mips64
+ daddu $sp,$sp,-48
+ sd $31,40($sp)
+#else
+ addu $sp,$sp,-32
+ sw $31,20($sp)
+#endif
+
+ .section .fini,"ax",@progbits
+ .globl _fini
+ .type _fini,@function
+_fini:
+#ifdef __mips64
+ daddu $sp,$sp,-48
+ sd $31,40($sp)
+#else
+ addu $sp,$sp,-32
+ sw $31,20($sp)
+#endif
diff --git a/libgcc/config/mips/crtn.S b/libgcc/config/mips/crtn.S
new file mode 100644
index 00000000000..03a6b68c9cf
--- /dev/null
+++ b/libgcc/config/mips/crtn.S
@@ -0,0 +1,52 @@
+/* Copyright (C) 2001, 2002 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+/* 4 slots for argument spill area. 1 for cpreturn, 1 for stack.
+ Return spill offset of 40 and 20. Aligned to 16 bytes for n32. */
+
+#ifdef __mips16
+#define RA $7
+#else
+#define RA $31
+#endif
+
+ .section .init,"ax",@progbits
+#ifdef __mips64
+ ld RA,40($sp)
+ daddu $sp,$sp,48
+#else
+ lw RA,20($sp)
+ addu $sp,$sp,32
+#endif
+ j RA
+
+ .section .fini,"ax",@progbits
+#ifdef __mips64
+ ld RA,40($sp)
+ daddu $sp,$sp,48
+#else
+ lw RA,20($sp)
+ addu $sp,$sp,32
+#endif
+ j RA
+
diff --git a/libgcc/config/mips/gthr-mipssde.h b/libgcc/config/mips/gthr-mipssde.h
new file mode 100644
index 00000000000..34f9b6cf54b
--- /dev/null
+++ b/libgcc/config/mips/gthr-mipssde.h
@@ -0,0 +1,230 @@
+/* MIPS SDE threads compatibility routines for libgcc2 and libobjc. */
+/* Compile this one with gcc. */
+/* Copyright (C) 2006, 2007, 2008, 2009 Free Software Foundation, Inc.
+ Contributed by Nigel Stephens <nigel@mips.com>
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_GTHR_MIPSSDE_H
+#define GCC_GTHR_MIPSSDE_H
+
+/* MIPS SDE threading API specific definitions.
+ Easy, since the interface is pretty much one-to-one. */
+
+#define __GTHREADS 1
+
+#include <sdethread.h>
+#include <unistd.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef __sdethread_key_t __gthread_key_t;
+typedef __sdethread_once_t __gthread_once_t;
+typedef __sdethread_mutex_t __gthread_mutex_t;
+
+typedef struct {
+ long depth;
+ __sdethread_t owner;
+ __sdethread_mutex_t actual;
+} __gthread_recursive_mutex_t;
+
+#define __GTHREAD_MUTEX_INIT __SDETHREAD_MUTEX_INITIALIZER("gthr")
+#define __GTHREAD_ONCE_INIT __SDETHREAD_ONCE_INIT
+static inline int
+__gthread_recursive_mutex_init_function(__gthread_recursive_mutex_t *__mutex);
+#define __GTHREAD_RECURSIVE_MUTEX_INIT_FUNCTION __gthread_recursive_mutex_init_function
+
+#if SUPPORTS_WEAK && GTHREAD_USE_WEAK
+# define __gthrw(name) \
+ static __typeof(name) __gthrw_ ## name __attribute__ ((__weakref__(#name)));
+# define __gthrw_(name) __gthrw_ ## name
+#else
+# define __gthrw(name)
+# define __gthrw_(name) name
+#endif
+
+__gthrw(__sdethread_once)
+__gthrw(__sdethread_key_create)
+__gthrw(__sdethread_key_delete)
+__gthrw(__sdethread_getspecific)
+__gthrw(__sdethread_setspecific)
+
+__gthrw(__sdethread_self)
+
+__gthrw(__sdethread_mutex_lock)
+__gthrw(__sdethread_mutex_trylock)
+__gthrw(__sdethread_mutex_unlock)
+
+__gthrw(__sdethread_mutex_init)
+
+__gthrw(__sdethread_threading)
+
+#if SUPPORTS_WEAK && GTHREAD_USE_WEAK
+
+static inline int
+__gthread_active_p (void)
+{
+ return !!(void *)&__sdethread_threading;
+}
+
+#else /* not SUPPORTS_WEAK */
+
+static inline int
+__gthread_active_p (void)
+{
+ return 1;
+}
+
+#endif /* SUPPORTS_WEAK */
+
+static inline int
+__gthread_once (__gthread_once_t *__once, void (*__func) (void))
+{
+ if (__gthread_active_p ())
+ return __gthrw_(__sdethread_once) (__once, __func);
+ else
+ return -1;
+}
+
+static inline int
+__gthread_key_create (__gthread_key_t *__key, void (*__dtor) (void *))
+{
+ return __gthrw_(__sdethread_key_create) (__key, __dtor);
+}
+
+static inline int
+__gthread_key_delete (__gthread_key_t __key)
+{
+ return __gthrw_(__sdethread_key_delete) (__key);
+}
+
+static inline void *
+__gthread_getspecific (__gthread_key_t __key)
+{
+ return __gthrw_(__sdethread_getspecific) (__key);
+}
+
+static inline int
+__gthread_setspecific (__gthread_key_t __key, const void *__ptr)
+{
+ return __gthrw_(__sdethread_setspecific) (__key, __ptr);
+}
+
+static inline int
+__gthread_mutex_destroy (__gthread_mutex_t * UNUSED(__mutex))
+{
+ return 0;
+}
+
+static inline int
+__gthread_mutex_lock (__gthread_mutex_t *__mutex)
+{
+ if (__gthread_active_p ())
+ return __gthrw_(__sdethread_mutex_lock) (__mutex);
+ else
+ return 0;
+}
+
+static inline int
+__gthread_mutex_trylock (__gthread_mutex_t *__mutex)
+{
+ if (__gthread_active_p ())
+ return __gthrw_(__sdethread_mutex_trylock) (__mutex);
+ else
+ return 0;
+}
+
+static inline int
+__gthread_mutex_unlock (__gthread_mutex_t *__mutex)
+{
+ if (__gthread_active_p ())
+ return __gthrw_(__sdethread_mutex_unlock) (__mutex);
+ else
+ return 0;
+}
+
+static inline int
+__gthread_recursive_mutex_init_function (__gthread_recursive_mutex_t *__mutex)
+{
+ __mutex->depth = 0;
+ __mutex->owner = __gthrw_(__sdethread_self) ();
+ return __gthrw_(__sdethread_mutex_init) (&__mutex->actual, NULL);
+}
+
+static inline int
+__gthread_recursive_mutex_lock (__gthread_recursive_mutex_t *__mutex)
+{
+ if (__gthread_active_p ())
+ {
+ __sdethread_t __me = __gthrw_(__sdethread_self) ();
+
+ if (__mutex->owner != __me)
+ {
+ __gthrw_(__sdethread_mutex_lock) (&__mutex->actual);
+ __mutex->owner = __me;
+ }
+
+ __mutex->depth++;
+ }
+ return 0;
+}
+
+static inline int
+__gthread_recursive_mutex_trylock (__gthread_recursive_mutex_t *__mutex)
+{
+ if (__gthread_active_p ())
+ {
+ __sdethread_t __me = __gthrw_(__sdethread_self) ();
+
+ if (__mutex->owner != __me)
+ {
+ if (__gthrw_(__sdethread_mutex_trylock) (&__mutex->actual))
+ return 1;
+ __mutex->owner = __me;
+ }
+
+ __mutex->depth++;
+ }
+ return 0;
+}
+
+static inline int
+__gthread_recursive_mutex_unlock (__gthread_recursive_mutex_t *__mutex)
+{
+ if (__gthread_active_p ())
+ {
+ if (--__mutex->depth == 0)
+ {
+ __mutex->owner = (__sdethread_t) 0;
+ __gthrw_(__sdethread_mutex_unlock) (&__mutex->actual);
+ }
+ }
+ return 0;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* ! GCC_GTHR_MIPSSDE_H */
diff --git a/libgcc/config/mips/libgcc-mips16.ver b/libgcc/config/mips/libgcc-mips16.ver
new file mode 100644
index 00000000000..ddb23e7e750
--- /dev/null
+++ b/libgcc/config/mips/libgcc-mips16.ver
@@ -0,0 +1,86 @@
+# Copyright (C) 2008 Free Software Foundation, Inc.
+#
+# This file is part of GCC.
+#
+# GCC is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GCC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>.
+
+GCC_4.4.0 {
+ __mips16_addsf3
+ __mips16_subsf3
+ __mips16_mulsf3
+ __mips16_divsf3
+ __mips16_eqsf2
+ __mips16_nesf2
+ __mips16_gtsf2
+ __mips16_gesf2
+ __mips16_lesf2
+ __mips16_ltsf2
+ __mips16_floatsisf
+ __mips16_floatunsisf
+ __mips16_fix_truncsfsi
+ __mips16_adddf3
+ __mips16_subdf3
+ __mips16_muldf3
+ __mips16_divdf3
+ __mips16_extendsfdf2
+ __mips16_truncdfsf2
+ __mips16_eqdf2
+ __mips16_nedf2
+ __mips16_gtdf2
+ __mips16_gedf2
+ __mips16_ledf2
+ __mips16_ltdf2
+ __mips16_floatsidf
+ __mips16_floatunsidf
+ __mips16_fix_truncdfsi
+ __mips16_ret_sf
+ __mips16_ret_sc
+ __mips16_ret_df
+ __mips16_ret_dc
+ __mips16_call_stub_1
+ __mips16_call_stub_5
+ __mips16_call_stub_2
+ __mips16_call_stub_6
+ __mips16_call_stub_9
+ __mips16_call_stub_10
+ __mips16_call_stub_sf_0
+ __mips16_call_stub_sf_1
+ __mips16_call_stub_sf_5
+ __mips16_call_stub_sf_2
+ __mips16_call_stub_sf_6
+ __mips16_call_stub_sf_9
+ __mips16_call_stub_sf_10
+ __mips16_call_stub_sc_0
+ __mips16_call_stub_sc_1
+ __mips16_call_stub_sc_5
+ __mips16_call_stub_sc_2
+ __mips16_call_stub_sc_6
+ __mips16_call_stub_sc_9
+ __mips16_call_stub_sc_10
+ __mips16_call_stub_df_0
+ __mips16_call_stub_df_1
+ __mips16_call_stub_df_5
+ __mips16_call_stub_df_2
+ __mips16_call_stub_df_6
+ __mips16_call_stub_df_9
+ __mips16_call_stub_df_10
+ __mips16_call_stub_dc_0
+ __mips16_call_stub_dc_1
+ __mips16_call_stub_dc_5
+ __mips16_call_stub_dc_2
+ __mips16_call_stub_dc_6
+ __mips16_call_stub_dc_9
+ __mips16_call_stub_dc_10
+}
diff --git a/libgcc/config/mips/mips16.S b/libgcc/config/mips/mips16.S
new file mode 100644
index 00000000000..ec331b5f65e
--- /dev/null
+++ b/libgcc/config/mips/mips16.S
@@ -0,0 +1,712 @@
+/* mips16 floating point support code
+ Copyright (C) 1996, 1997, 1998, 2008, 2009, 2010
+ Free Software Foundation, Inc.
+ Contributed by Cygnus Support
+
+This file is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 3, or (at your option) any
+later version.
+
+This file is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+/* This file contains mips16 floating point support functions. These
+ functions are called by mips16 code to handle floating point when
+ -msoft-float is not used. They accept the arguments and return
+ values using the soft-float calling convention, but do the actual
+ operation using the hard floating point instructions. */
+
+#if defined _MIPS_SIM && (_MIPS_SIM == _ABIO32 || _MIPS_SIM == _ABIO64)
+
+/* This file contains 32-bit assembly code. */
+ .set nomips16
+
+/* Start a function. */
+
+#define STARTFN(NAME) .globl NAME; .ent NAME; NAME:
+
+/* Finish a function. */
+
+#define ENDFN(NAME) .end NAME
+
+/* ARG1
+ The FPR that holds the first floating-point argument.
+
+ ARG2
+ The FPR that holds the second floating-point argument.
+
+ RET
+ The FPR that holds a floating-point return value. */
+
+#define RET $f0
+#define ARG1 $f12
+#ifdef __mips64
+#define ARG2 $f13
+#else
+#define ARG2 $f14
+#endif
+
+/* Set 64-bit register GPR so that its high 32 bits contain HIGH_FPR
+ and so that its low 32 bits contain LOW_FPR. */
+#define MERGE_GPRf(GPR, HIGH_FPR, LOW_FPR) \
+ .set noat; \
+ mfc1 $1, LOW_FPR; \
+ mfc1 GPR, HIGH_FPR; \
+ dsll $1, $1, 32; \
+ dsll GPR, GPR, 32; \
+ dsrl $1, $1, 32; \
+ or GPR, GPR, $1; \
+ .set at
+
+/* Move the high 32 bits of GPR to HIGH_FPR and the low 32 bits of
+ GPR to LOW_FPR. */
+#define MERGE_GPRt(GPR, HIGH_FPR, LOW_FPR) \
+ .set noat; \
+ dsrl $1, GPR, 32; \
+ mtc1 GPR, LOW_FPR; \
+ mtc1 $1, HIGH_FPR; \
+ .set at
+
+/* Jump to T, and use "OPCODE, OP2" to implement a delayed move. */
+#define DELAYt(T, OPCODE, OP2) \
+ .set noreorder; \
+ jr T; \
+ OPCODE, OP2; \
+ .set reorder
+
+/* Use "OPCODE. OP2" and jump to T. */
+#define DELAYf(T, OPCODE, OP2) OPCODE, OP2; jr T
+
+/* MOVE_SF_BYTE0(D)
+ Move the first single-precision floating-point argument between
+ GPRs and FPRs.
+
+ MOVE_SI_BYTE0(D)
+ Likewise the first single-precision integer argument.
+
+ MOVE_SF_BYTE4(D)
+ Move the second single-precision floating-point argument between
+ GPRs and FPRs, given that the first argument occupies 4 bytes.
+
+ MOVE_SF_BYTE8(D)
+ Move the second single-precision floating-point argument between
+ GPRs and FPRs, given that the first argument occupies 8 bytes.
+
+ MOVE_DF_BYTE0(D)
+ Move the first double-precision floating-point argument between
+ GPRs and FPRs.
+
+ MOVE_DF_BYTE8(D)
+ Likewise the second double-precision floating-point argument.
+
+ MOVE_SF_RET(D, T)
+ Likewise a single-precision floating-point return value,
+ then jump to T.
+
+ MOVE_SC_RET(D, T)
+ Likewise a complex single-precision floating-point return value.
+
+ MOVE_DF_RET(D, T)
+ Likewise a double-precision floating-point return value.
+
+ MOVE_DC_RET(D, T)
+ Likewise a complex double-precision floating-point return value.
+
+ MOVE_SI_RET(D, T)
+ Likewise a single-precision integer return value.
+
+ The D argument is "t" to move to FPRs and "f" to move from FPRs.
+ The return macros may assume that the target of the jump does not
+ use a floating-point register. */
+
+#define MOVE_SF_RET(D, T) DELAY##D (T, m##D##c1 $2,$f0)
+#define MOVE_SI_RET(D, T) DELAY##D (T, m##D##c1 $2,$f0)
+
+#if defined(__mips64) && defined(__MIPSEB__)
+#define MOVE_SC_RET(D, T) MERGE_GPR##D ($2, $f0, $f1); jr T
+#elif defined(__mips64)
+/* The high 32 bits of $2 correspond to the second word in memory;
+ i.e. the imaginary part. */
+#define MOVE_SC_RET(D, T) MERGE_GPR##D ($2, $f1, $f0); jr T
+#elif __mips_fpr == 64
+#define MOVE_SC_RET(D, T) m##D##c1 $2,$f0; DELAY##D (T, m##D##c1 $3,$f1)
+#else
+#define MOVE_SC_RET(D, T) m##D##c1 $2,$f0; DELAY##D (T, m##D##c1 $3,$f2)
+#endif
+
+#if defined(__mips64)
+#define MOVE_SF_BYTE0(D) m##D##c1 $4,$f12
+#define MOVE_SF_BYTE4(D) m##D##c1 $5,$f13
+#define MOVE_SF_BYTE8(D) m##D##c1 $5,$f13
+#else
+#define MOVE_SF_BYTE0(D) m##D##c1 $4,$f12
+#define MOVE_SF_BYTE4(D) m##D##c1 $5,$f14
+#define MOVE_SF_BYTE8(D) m##D##c1 $6,$f14
+#endif
+#define MOVE_SI_BYTE0(D) MOVE_SF_BYTE0(D)
+
+#if defined(__mips64)
+#define MOVE_DF_BYTE0(D) dm##D##c1 $4,$f12
+#define MOVE_DF_BYTE8(D) dm##D##c1 $5,$f13
+#define MOVE_DF_RET(D, T) DELAY##D (T, dm##D##c1 $2,$f0)
+#define MOVE_DC_RET(D, T) dm##D##c1 $3,$f1; MOVE_DF_RET (D, T)
+#elif __mips_fpr == 64 && defined(__MIPSEB__)
+#define MOVE_DF_BYTE0(D) m##D##c1 $5,$f12; m##D##hc1 $4,$f12
+#define MOVE_DF_BYTE8(D) m##D##c1 $7,$f14; m##D##hc1 $6,$f14
+#define MOVE_DF_RET(D, T) m##D##c1 $3,$f0; DELAY##D (T, m##D##hc1 $2,$f0)
+#define MOVE_DC_RET(D, T) m##D##c1 $5,$f1; m##D##hc1 $4,$f1; MOVE_DF_RET (D, T)
+#elif __mips_fpr == 64
+#define MOVE_DF_BYTE0(D) m##D##c1 $4,$f12; m##D##hc1 $5,$f12
+#define MOVE_DF_BYTE8(D) m##D##c1 $6,$f14; m##D##hc1 $7,$f14
+#define MOVE_DF_RET(D, T) m##D##c1 $2,$f0; DELAY##D (T, m##D##hc1 $3,$f0)
+#define MOVE_DC_RET(D, T) m##D##c1 $4,$f1; m##D##hc1 $5,$f1; MOVE_DF_RET (D, T)
+#elif defined(__MIPSEB__)
+/* FPRs are little-endian. */
+#define MOVE_DF_BYTE0(D) m##D##c1 $4,$f13; m##D##c1 $5,$f12
+#define MOVE_DF_BYTE8(D) m##D##c1 $6,$f15; m##D##c1 $7,$f14
+#define MOVE_DF_RET(D, T) m##D##c1 $2,$f1; DELAY##D (T, m##D##c1 $3,$f0)
+#define MOVE_DC_RET(D, T) m##D##c1 $4,$f3; m##D##c1 $5,$f2; MOVE_DF_RET (D, T)
+#else
+#define MOVE_DF_BYTE0(D) m##D##c1 $4,$f12; m##D##c1 $5,$f13
+#define MOVE_DF_BYTE8(D) m##D##c1 $6,$f14; m##D##c1 $7,$f15
+#define MOVE_DF_RET(D, T) m##D##c1 $2,$f0; DELAY##D (T, m##D##c1 $3,$f1)
+#define MOVE_DC_RET(D, T) m##D##c1 $4,$f2; m##D##c1 $5,$f3; MOVE_DF_RET (D, T)
+#endif
+
+/* Single-precision math. */
+
+/* Define a function NAME that loads two single-precision values,
+ performs FPU operation OPCODE on them, and returns the single-
+ precision result. */
+
+#define OPSF3(NAME, OPCODE) \
+STARTFN (NAME); \
+ MOVE_SF_BYTE0 (t); \
+ MOVE_SF_BYTE4 (t); \
+ OPCODE RET,ARG1,ARG2; \
+ MOVE_SF_RET (f, $31); \
+ ENDFN (NAME)
+
+#ifdef L_m16addsf3
+OPSF3 (__mips16_addsf3, add.s)
+#endif
+#ifdef L_m16subsf3
+OPSF3 (__mips16_subsf3, sub.s)
+#endif
+#ifdef L_m16mulsf3
+OPSF3 (__mips16_mulsf3, mul.s)
+#endif
+#ifdef L_m16divsf3
+OPSF3 (__mips16_divsf3, div.s)
+#endif
+
+/* Define a function NAME that loads a single-precision value,
+ performs FPU operation OPCODE on it, and returns the single-
+ precision result. */
+
+#define OPSF2(NAME, OPCODE) \
+STARTFN (NAME); \
+ MOVE_SF_BYTE0 (t); \
+ OPCODE RET,ARG1; \
+ MOVE_SF_RET (f, $31); \
+ ENDFN (NAME)
+
+#ifdef L_m16negsf2
+OPSF2 (__mips16_negsf2, neg.s)
+#endif
+#ifdef L_m16abssf2
+OPSF2 (__mips16_abssf2, abs.s)
+#endif
+
+/* Single-precision comparisons. */
+
+/* Define a function NAME that loads two single-precision values,
+ performs floating point comparison OPCODE, and returns TRUE or
+ FALSE depending on the result. */
+
+#define CMPSF(NAME, OPCODE, TRUE, FALSE) \
+STARTFN (NAME); \
+ MOVE_SF_BYTE0 (t); \
+ MOVE_SF_BYTE4 (t); \
+ OPCODE ARG1,ARG2; \
+ li $2,TRUE; \
+ bc1t 1f; \
+ li $2,FALSE; \
+1:; \
+ j $31; \
+ ENDFN (NAME)
+
+/* Like CMPSF, but reverse the comparison operands. */
+
+#define REVCMPSF(NAME, OPCODE, TRUE, FALSE) \
+STARTFN (NAME); \
+ MOVE_SF_BYTE0 (t); \
+ MOVE_SF_BYTE4 (t); \
+ OPCODE ARG2,ARG1; \
+ li $2,TRUE; \
+ bc1t 1f; \
+ li $2,FALSE; \
+1:; \
+ j $31; \
+ ENDFN (NAME)
+
+#ifdef L_m16eqsf2
+CMPSF (__mips16_eqsf2, c.eq.s, 0, 1)
+#endif
+#ifdef L_m16nesf2
+CMPSF (__mips16_nesf2, c.eq.s, 0, 1)
+#endif
+#ifdef L_m16gtsf2
+REVCMPSF (__mips16_gtsf2, c.lt.s, 1, 0)
+#endif
+#ifdef L_m16gesf2
+REVCMPSF (__mips16_gesf2, c.le.s, 0, -1)
+#endif
+#ifdef L_m16lesf2
+CMPSF (__mips16_lesf2, c.le.s, 0, 1)
+#endif
+#ifdef L_m16ltsf2
+CMPSF (__mips16_ltsf2, c.lt.s, -1, 0)
+#endif
+#ifdef L_m16unordsf2
+CMPSF(__mips16_unordsf2, c.un.s, 1, 0)
+#endif
+
+
+/* Single-precision conversions. */
+
+#ifdef L_m16fltsisf
+STARTFN (__mips16_floatsisf)
+ MOVE_SF_BYTE0 (t)
+ cvt.s.w RET,ARG1
+ MOVE_SF_RET (f, $31)
+ ENDFN (__mips16_floatsisf)
+#endif
+
+#ifdef L_m16fltunsisf
+STARTFN (__mips16_floatunsisf)
+ .set noreorder
+ bltz $4,1f
+ MOVE_SF_BYTE0 (t)
+ .set reorder
+ cvt.s.w RET,ARG1
+ MOVE_SF_RET (f, $31)
+1:
+ and $2,$4,1
+ srl $3,$4,1
+ or $2,$2,$3
+ mtc1 $2,RET
+ cvt.s.w RET,RET
+ add.s RET,RET,RET
+ MOVE_SF_RET (f, $31)
+ ENDFN (__mips16_floatunsisf)
+#endif
+
+#ifdef L_m16fix_truncsfsi
+STARTFN (__mips16_fix_truncsfsi)
+ MOVE_SF_BYTE0 (t)
+ trunc.w.s RET,ARG1,$4
+ MOVE_SI_RET (f, $31)
+ ENDFN (__mips16_fix_truncsfsi)
+#endif
+
+#if !defined(__mips_single_float) && !defined(__SINGLE_FLOAT)
+
+/* Double-precision math. */
+
+/* Define a function NAME that loads two double-precision values,
+ performs FPU operation OPCODE on them, and returns the double-
+ precision result. */
+
+#define OPDF3(NAME, OPCODE) \
+STARTFN (NAME); \
+ MOVE_DF_BYTE0 (t); \
+ MOVE_DF_BYTE8 (t); \
+ OPCODE RET,ARG1,ARG2; \
+ MOVE_DF_RET (f, $31); \
+ ENDFN (NAME)
+
+#ifdef L_m16adddf3
+OPDF3 (__mips16_adddf3, add.d)
+#endif
+#ifdef L_m16subdf3
+OPDF3 (__mips16_subdf3, sub.d)
+#endif
+#ifdef L_m16muldf3
+OPDF3 (__mips16_muldf3, mul.d)
+#endif
+#ifdef L_m16divdf3
+OPDF3 (__mips16_divdf3, div.d)
+#endif
+
+/* Define a function NAME that loads a double-precision value,
+ performs FPU operation OPCODE on it, and returns the double-
+ precision result. */
+
+#define OPDF2(NAME, OPCODE) \
+STARTFN (NAME); \
+ MOVE_DF_BYTE0 (t); \
+ OPCODE RET,ARG1; \
+ MOVE_DF_RET (f, $31); \
+ ENDFN (NAME)
+
+#ifdef L_m16negdf2
+OPDF2 (__mips16_negdf2, neg.d)
+#endif
+#ifdef L_m16absdf2
+OPDF2 (__mips16_absdf2, abs.d)
+#endif
+
+/* Conversions between single and double precision. */
+
+#ifdef L_m16extsfdf2
+STARTFN (__mips16_extendsfdf2)
+ MOVE_SF_BYTE0 (t)
+ cvt.d.s RET,ARG1
+ MOVE_DF_RET (f, $31)
+ ENDFN (__mips16_extendsfdf2)
+#endif
+
+#ifdef L_m16trdfsf2
+STARTFN (__mips16_truncdfsf2)
+ MOVE_DF_BYTE0 (t)
+ cvt.s.d RET,ARG1
+ MOVE_SF_RET (f, $31)
+ ENDFN (__mips16_truncdfsf2)
+#endif
+
+/* Double-precision comparisons. */
+
+/* Define a function NAME that loads two double-precision values,
+ performs floating point comparison OPCODE, and returns TRUE or
+ FALSE depending on the result. */
+
+#define CMPDF(NAME, OPCODE, TRUE, FALSE) \
+STARTFN (NAME); \
+ MOVE_DF_BYTE0 (t); \
+ MOVE_DF_BYTE8 (t); \
+ OPCODE ARG1,ARG2; \
+ li $2,TRUE; \
+ bc1t 1f; \
+ li $2,FALSE; \
+1:; \
+ j $31; \
+ ENDFN (NAME)
+
+/* Like CMPDF, but reverse the comparison operands. */
+
+#define REVCMPDF(NAME, OPCODE, TRUE, FALSE) \
+STARTFN (NAME); \
+ MOVE_DF_BYTE0 (t); \
+ MOVE_DF_BYTE8 (t); \
+ OPCODE ARG2,ARG1; \
+ li $2,TRUE; \
+ bc1t 1f; \
+ li $2,FALSE; \
+1:; \
+ j $31; \
+ ENDFN (NAME)
+
+#ifdef L_m16eqdf2
+CMPDF (__mips16_eqdf2, c.eq.d, 0, 1)
+#endif
+#ifdef L_m16nedf2
+CMPDF (__mips16_nedf2, c.eq.d, 0, 1)
+#endif
+#ifdef L_m16gtdf2
+REVCMPDF (__mips16_gtdf2, c.lt.d, 1, 0)
+#endif
+#ifdef L_m16gedf2
+REVCMPDF (__mips16_gedf2, c.le.d, 0, -1)
+#endif
+#ifdef L_m16ledf2
+CMPDF (__mips16_ledf2, c.le.d, 0, 1)
+#endif
+#ifdef L_m16ltdf2
+CMPDF (__mips16_ltdf2, c.lt.d, -1, 0)
+#endif
+#ifdef L_m16unorddf2
+CMPDF(__mips16_unorddf2, c.un.d, 1, 0)
+#endif
+
+/* Double-precision conversions. */
+
+#ifdef L_m16fltsidf
+STARTFN (__mips16_floatsidf)
+ MOVE_SI_BYTE0 (t)
+ cvt.d.w RET,ARG1
+ MOVE_DF_RET (f, $31)
+ ENDFN (__mips16_floatsidf)
+#endif
+
+#ifdef L_m16fltunsidf
+STARTFN (__mips16_floatunsidf)
+ MOVE_SI_BYTE0 (t)
+ cvt.d.w RET,ARG1
+ bgez $4,1f
+ li.d ARG1, 4.294967296e+9
+ add.d RET, RET, ARG1
+1: MOVE_DF_RET (f, $31)
+ ENDFN (__mips16_floatunsidf)
+#endif
+
+#ifdef L_m16fix_truncdfsi
+STARTFN (__mips16_fix_truncdfsi)
+ MOVE_DF_BYTE0 (t)
+ trunc.w.d RET,ARG1,$4
+ MOVE_SI_RET (f, $31)
+ ENDFN (__mips16_fix_truncdfsi)
+#endif
+#endif /* !__mips_single_float */
+
+/* Define a function NAME that moves a return value of mode MODE from
+ FPRs to GPRs. */
+
+#define RET_FUNCTION(NAME, MODE) \
+STARTFN (NAME); \
+ MOVE_##MODE##_RET (t, $31); \
+ ENDFN (NAME)
+
+#ifdef L_m16retsf
+RET_FUNCTION (__mips16_ret_sf, SF)
+#endif
+
+#ifdef L_m16retsc
+RET_FUNCTION (__mips16_ret_sc, SC)
+#endif
+
+#if !defined(__mips_single_float) && !defined(__SINGLE_FLOAT)
+#ifdef L_m16retdf
+RET_FUNCTION (__mips16_ret_df, DF)
+#endif
+
+#ifdef L_m16retdc
+RET_FUNCTION (__mips16_ret_dc, DC)
+#endif
+#endif /* !__mips_single_float */
+
+/* STUB_ARGS_X copies the arguments from GPRs to FPRs for argument
+ code X. X is calculated as ARG1 + ARG2 * 4, where ARG1 and ARG2
+ classify the first and second arguments as follows:
+
+ 1: a single-precision argument
+ 2: a double-precision argument
+ 0: no argument, or not one of the above. */
+
+#define STUB_ARGS_0 /* () */
+#define STUB_ARGS_1 MOVE_SF_BYTE0 (t) /* (sf) */
+#define STUB_ARGS_5 MOVE_SF_BYTE0 (t); MOVE_SF_BYTE4 (t) /* (sf, sf) */
+#define STUB_ARGS_9 MOVE_SF_BYTE0 (t); MOVE_DF_BYTE8 (t) /* (sf, df) */
+#define STUB_ARGS_2 MOVE_DF_BYTE0 (t) /* (df) */
+#define STUB_ARGS_6 MOVE_DF_BYTE0 (t); MOVE_SF_BYTE8 (t) /* (df, sf) */
+#define STUB_ARGS_10 MOVE_DF_BYTE0 (t); MOVE_DF_BYTE8 (t) /* (df, df) */
+
+/* These functions are used by 16-bit code when calling via a function
+ pointer. They must copy the floating point arguments from the GPRs
+ to FPRs and then call function $2. */
+
+#define CALL_STUB_NO_RET(NAME, CODE) \
+STARTFN (NAME); \
+ STUB_ARGS_##CODE; \
+ .set noreorder; \
+ jr $2; \
+ move $25,$2; \
+ .set reorder; \
+ ENDFN (NAME)
+
+#ifdef L_m16stub1
+CALL_STUB_NO_RET (__mips16_call_stub_1, 1)
+#endif
+
+#ifdef L_m16stub5
+CALL_STUB_NO_RET (__mips16_call_stub_5, 5)
+#endif
+
+#if !defined(__mips_single_float) && !defined(__SINGLE_FLOAT)
+
+#ifdef L_m16stub2
+CALL_STUB_NO_RET (__mips16_call_stub_2, 2)
+#endif
+
+#ifdef L_m16stub6
+CALL_STUB_NO_RET (__mips16_call_stub_6, 6)
+#endif
+
+#ifdef L_m16stub9
+CALL_STUB_NO_RET (__mips16_call_stub_9, 9)
+#endif
+
+#ifdef L_m16stub10
+CALL_STUB_NO_RET (__mips16_call_stub_10, 10)
+#endif
+#endif /* !__mips_single_float */
+
+/* Now we have the same set of functions, except that this time the
+ function being called returns an SFmode, SCmode, DFmode or DCmode
+ value; we need to instantiate a set for each case. The calling
+ function will arrange to preserve $18, so these functions are free
+ to use it to hold the return address.
+
+ Note that we do not know whether the function we are calling is 16
+ bit or 32 bit. However, it does not matter, because 16-bit
+ functions always return floating point values in both the gp and
+ the fp regs. It would be possible to check whether the function
+ being called is 16 bits, in which case the copy is unnecessary;
+ however, it's faster to always do the copy. */
+
+#define CALL_STUB_RET(NAME, CODE, MODE) \
+STARTFN (NAME); \
+ move $18,$31; \
+ STUB_ARGS_##CODE; \
+ .set noreorder; \
+ jalr $2; \
+ move $25,$2; \
+ .set reorder; \
+ MOVE_##MODE##_RET (f, $18); \
+ ENDFN (NAME)
+
+/* First, instantiate the single-float set. */
+
+#ifdef L_m16stubsf0
+CALL_STUB_RET (__mips16_call_stub_sf_0, 0, SF)
+#endif
+
+#ifdef L_m16stubsf1
+CALL_STUB_RET (__mips16_call_stub_sf_1, 1, SF)
+#endif
+
+#ifdef L_m16stubsf5
+CALL_STUB_RET (__mips16_call_stub_sf_5, 5, SF)
+#endif
+
+#if !defined(__mips_single_float) && !defined(__SINGLE_FLOAT)
+#ifdef L_m16stubsf2
+CALL_STUB_RET (__mips16_call_stub_sf_2, 2, SF)
+#endif
+
+#ifdef L_m16stubsf6
+CALL_STUB_RET (__mips16_call_stub_sf_6, 6, SF)
+#endif
+
+#ifdef L_m16stubsf9
+CALL_STUB_RET (__mips16_call_stub_sf_9, 9, SF)
+#endif
+
+#ifdef L_m16stubsf10
+CALL_STUB_RET (__mips16_call_stub_sf_10, 10, SF)
+#endif
+#endif /* !__mips_single_float */
+
+
+/* Now we have the same set of functions again, except that this time
+ the function being called returns an DFmode value. */
+
+#if !defined(__mips_single_float) && !defined(__SINGLE_FLOAT)
+#ifdef L_m16stubdf0
+CALL_STUB_RET (__mips16_call_stub_df_0, 0, DF)
+#endif
+
+#ifdef L_m16stubdf1
+CALL_STUB_RET (__mips16_call_stub_df_1, 1, DF)
+#endif
+
+#ifdef L_m16stubdf5
+CALL_STUB_RET (__mips16_call_stub_df_5, 5, DF)
+#endif
+
+#ifdef L_m16stubdf2
+CALL_STUB_RET (__mips16_call_stub_df_2, 2, DF)
+#endif
+
+#ifdef L_m16stubdf6
+CALL_STUB_RET (__mips16_call_stub_df_6, 6, DF)
+#endif
+
+#ifdef L_m16stubdf9
+CALL_STUB_RET (__mips16_call_stub_df_9, 9, DF)
+#endif
+
+#ifdef L_m16stubdf10
+CALL_STUB_RET (__mips16_call_stub_df_10, 10, DF)
+#endif
+#endif /* !__mips_single_float */
+
+
+/* Ho hum. Here we have the same set of functions again, this time
+ for when the function being called returns an SCmode value. */
+
+#ifdef L_m16stubsc0
+CALL_STUB_RET (__mips16_call_stub_sc_0, 0, SC)
+#endif
+
+#ifdef L_m16stubsc1
+CALL_STUB_RET (__mips16_call_stub_sc_1, 1, SC)
+#endif
+
+#ifdef L_m16stubsc5
+CALL_STUB_RET (__mips16_call_stub_sc_5, 5, SC)
+#endif
+
+#if !defined(__mips_single_float) && !defined(__SINGLE_FLOAT)
+#ifdef L_m16stubsc2
+CALL_STUB_RET (__mips16_call_stub_sc_2, 2, SC)
+#endif
+
+#ifdef L_m16stubsc6
+CALL_STUB_RET (__mips16_call_stub_sc_6, 6, SC)
+#endif
+
+#ifdef L_m16stubsc9
+CALL_STUB_RET (__mips16_call_stub_sc_9, 9, SC)
+#endif
+
+#ifdef L_m16stubsc10
+CALL_STUB_RET (__mips16_call_stub_sc_10, 10, SC)
+#endif
+#endif /* !__mips_single_float */
+
+
+/* Finally, another set of functions for DCmode. */
+
+#if !defined(__mips_single_float) && !defined(__SINGLE_FLOAT)
+#ifdef L_m16stubdc0
+CALL_STUB_RET (__mips16_call_stub_dc_0, 0, DC)
+#endif
+
+#ifdef L_m16stubdc1
+CALL_STUB_RET (__mips16_call_stub_dc_1, 1, DC)
+#endif
+
+#ifdef L_m16stubdc5
+CALL_STUB_RET (__mips16_call_stub_dc_5, 5, DC)
+#endif
+
+#ifdef L_m16stubdc2
+CALL_STUB_RET (__mips16_call_stub_dc_2, 2, DC)
+#endif
+
+#ifdef L_m16stubdc6
+CALL_STUB_RET (__mips16_call_stub_dc_6, 6, DC)
+#endif
+
+#ifdef L_m16stubdc9
+CALL_STUB_RET (__mips16_call_stub_dc_9, 9, DC)
+#endif
+
+#ifdef L_m16stubdc10
+CALL_STUB_RET (__mips16_call_stub_dc_10, 10, DC)
+#endif
+#endif /* !__mips_single_float */
+#endif
diff --git a/libgcc/config/mips/t-crtstuff b/libgcc/config/mips/t-crtstuff
new file mode 100644
index 00000000000..d8ab43be2ae
--- /dev/null
+++ b/libgcc/config/mips/t-crtstuff
@@ -0,0 +1,2 @@
+# Don't let CTOR_LIST end up in sdata section.
+CRTSTUFF_T_CFLAGS = -G 0
diff --git a/libgcc/config/mips/t-elf b/libgcc/config/mips/t-elf
new file mode 100644
index 00000000000..651f10a5303
--- /dev/null
+++ b/libgcc/config/mips/t-elf
@@ -0,0 +1,3 @@
+# We must build libgcc2.a with -G 0, in case the user wants to link
+# without the $gp register.
+HOST_LIBGCC2_CFLAGS += -G 0
diff --git a/libgcc/config/mips/t-mips b/libgcc/config/mips/t-mips
index b7d13b3ddb3..719c062ef0d 100644
--- a/libgcc/config/mips/t-mips
+++ b/libgcc/config/mips/t-mips
@@ -1,3 +1,5 @@
+LIB2_SIDITI_CONV_FUNCS = yes
+
FPBIT = true
FPBIT_CFLAGS = -DQUIET_NAN_NEGATED
DPBIT = true
diff --git a/libgcc/config/mips/t-mips16 b/libgcc/config/mips/t-mips16
index 94fa0e94275..5553ed76e2d 100644
--- a/libgcc/config/mips/t-mips16
+++ b/libgcc/config/mips/t-mips16
@@ -1,2 +1,45 @@
+# Copyright (C) 2007, 2008, 2011 Free Software Foundation, Inc.
+#
+# This file is part of GCC.
+#
+# GCC is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GCC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>.
+
+LIB1ASMSRC = mips/mips16.S
+LIB1ASMFUNCS = _m16addsf3 _m16subsf3 _m16mulsf3 _m16divsf3 \
+ _m16eqsf2 _m16nesf2 _m16gtsf2 _m16gesf2 _m16lesf2 _m16ltsf2 \
+ _m16unordsf2 \
+ _m16fltsisf _m16fix_truncsfsi _m16fltunsisf \
+ _m16adddf3 _m16subdf3 _m16muldf3 _m16divdf3 \
+ _m16extsfdf2 _m16trdfsf2 \
+ _m16eqdf2 _m16nedf2 _m16gtdf2 _m16gedf2 _m16ledf2 _m16ltdf2 \
+ _m16unorddf2 \
+ _m16fltsidf _m16fix_truncdfsi _m16fltunsidf \
+ _m16retsf _m16retdf \
+ _m16retsc _m16retdc \
+ _m16stub1 _m16stub2 _m16stub5 _m16stub6 _m16stub9 _m16stub10 \
+ _m16stubsf0 _m16stubsf1 _m16stubsf2 _m16stubsf5 _m16stubsf6 \
+ _m16stubsf9 _m16stubsf10 \
+ _m16stubdf0 _m16stubdf1 _m16stubdf2 _m16stubdf5 _m16stubdf6 \
+ _m16stubdf9 _m16stubdf10 \
+ _m16stubsc0 _m16stubsc1 _m16stubsc2 _m16stubsc5 _m16stubsc6 \
+ _m16stubsc9 _m16stubsc10 \
+ _m16stubdc0 _m16stubdc1 _m16stubdc2 _m16stubdc5 _m16stubdc6 \
+ _m16stubdc9 _m16stubdc10
+
SYNC = yes
SYNC_CFLAGS = -mno-mips16
+
+# Version these symbols if building libgcc.so.
+SHLIB_MAPFILES += $(srcdir)/config/mips/libgcc-mips16.ver
diff --git a/libgcc/config/mips/t-slibgcc-irix b/libgcc/config/mips/t-slibgcc-irix
index 6e0ac365437..47b2e0eec21 100644
--- a/libgcc/config/mips/t-slibgcc-irix
+++ b/libgcc/config/mips/t-slibgcc-irix
@@ -3,5 +3,5 @@
SHLIB_LDFLAGS = -Wl,-soname,$(SHLIB_SONAME) \
-Wl,-exports_file,$(SHLIB_MAP)
-SHLIB_MKMAP = $(gcc_srcdir)/mkmap-flat.awk
+SHLIB_MKMAP = $(srcdir)/mkmap-flat.awk
SHLIB_MAPFILES = libgcc-std.ver
diff --git a/libgcc/config/mips/t-vr b/libgcc/config/mips/t-vr
new file mode 100644
index 00000000000..601fbdece1a
--- /dev/null
+++ b/libgcc/config/mips/t-vr
@@ -0,0 +1,2 @@
+LIB2ADD_ST = $(srcdir)/config/mips/mips16.S \
+ $(srcdir)/config/mips/vr4120-div.S
diff --git a/libgcc/config/mips/vr4120-div.S b/libgcc/config/mips/vr4120-div.S
new file mode 100644
index 00000000000..79ede3de955
--- /dev/null
+++ b/libgcc/config/mips/vr4120-div.S
@@ -0,0 +1,74 @@
+/* Support file for -mfix-vr4120.
+ Copyright (C) 2002, 2004, 2007 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+/* This file contains functions which implement divsi3 and modsi3 for
+ -mfix-vr4120. div and ddiv do not give the correct result when one
+ of the operands is negative. */
+
+ .set nomips16
+
+#define DIV \
+ xor $3,$4,$5 /* t = x ^ y */ ; \
+ li $2,0x80000000; \
+ .set noreorder; \
+ bgez $4,1f /* x >= 0 */; \
+ and $3,$3,$2 /* t = (x ^ y) & 0x80000000 in delay slot */ ;\
+ .set reorder; \
+ subu $4,$0,$4 /* x = -x */ ; \
+1:; \
+ .set noreorder; \
+ bgez $5,2f /* y >= 0 */ ; \
+ nop; \
+ subu $5,$0,$5 /* y = -y */ ; \
+ .set reorder; \
+2:; \
+ divu $0,$4,$5; /* we use divu because of INT_MIN */ \
+ .set noreorder; \
+ bne $5,$0,3f; \
+ nop; \
+ break 7 /* division on zero y */ ; \
+3:; \
+ .set reorder; \
+ mflo $2 /* r = x / y */ ; \
+ .set noreorder; \
+ beq $3,$0,4f /* t == 0 */ ; \
+ nop; \
+ subu $2,$0,$2 /* r = -r */ ; \
+ .set reorder; \
+4:
+
+ .globl __vr4120_divsi3
+ .ent __vr4120_divsi3
+__vr4120_divsi3:
+ DIV
+ j $31
+ .end __vr4120_divsi3
+
+ .globl __vr4120_modsi3
+ .ent __vr4120_modsi3
+__vr4120_modsi3:
+ move $6,$4 # x1 = x
+ move $7,$5 # y1 = y
+ DIV
+ mult $2,$7 # r = r * y1
+ mflo $2
+ .set noreorder
+ j $31
+ subu $2,$6,$2 # r = x1 - r in delay slot
+ .end __vr4120_modsi3
diff --git a/libgcc/config/mmix/crti.S b/libgcc/config/mmix/crti.S
new file mode 100644
index 00000000000..69858046975
--- /dev/null
+++ b/libgcc/config/mmix/crti.S
@@ -0,0 +1,116 @@
+/* Copyright (C) 2001, 2002, 2011 Free Software Foundation, Inc.
+ Contributed by Hans-Peter Nilsson <hp@bitrange.com>
+
+This file is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 3, or (at your option) any
+later version.
+
+This file is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+% This is the crt0 equivalent for mmix-knuth-mmixware, for setting up
+% things for compiler-generated assembly-code and for setting up things
+% between where the simulator calls and main, and shutting things down on
+% the way back. There's an actual crt0.o elsewhere, but that's a dummy.
+
+% This file and the GCC output are supposed to be *reasonably*
+% mmixal-compatible to enable people to re-use output with Knuth's mmixal.
+% However, forward references are used more freely: we are using the
+% binutils tools. Users of mmixal beware; you will sometimes have to
+% re-order things or use temporary variables.
+
+% Users of mmixal will want to set up 8H and 9H to be .text and .data
+% respectively, so the compiler can switch between them pretending they're
+% segments.
+
+% This little treasure is here so the 32 lowest address bits of user data
+% will not be zero. Because of truncation, that would cause testcase
+% gcc.c-torture/execute/980701-1.c to incorrectly fail.
+
+ .data ! mmixal:= 8H LOC Data_Segment
+ .p2align 3
+ LOC @+(8-@)@7
+ OCTA 2009
+
+ .text ! mmixal:= 9H LOC 8B; LOC #100
+ .global Main
+
+% The __Stack_start symbol is provided by the link script.
+stackpp OCTA __Stack_start
+
+% "Main" is the magic symbol the simulator jumps to. We want to go
+% on to "main".
+% We need to set rG explicitly to avoid hard-to-debug situations.
+Main SETL $255,32
+ PUT rG,$255
+
+% Initialize the stack pointer. It is supposedly made a global
+% zero-initialized (allowed to change) register in crtn.S; we use the
+% explicit number.
+ GETA $255,stackpp
+ LDOU $254,$255,0
+
+% Make sure we get more than one mem, to simplify counting cycles.
+ LDBU $255,$1,0
+ LDBU $255,$1,1
+
+ PUSHJ $2,_init
+
+#ifdef __MMIX_ABI_GNU__
+% Copy argc and argv from their initial position to argument registers
+% where necessary.
+ SET $231,$0
+ SET $232,$1
+#else
+% For the mmixware ABI, we need to move arguments. The return value will
+% appear in $0.
+ SET $2,$1
+ SET $1,$0
+#endif
+
+ PUSHJ $0,main
+ JMP exit
+
+% Provide the first part of _init and _fini. Save the return address on the
+% register stack. We eventually ignore the return address of these
+% PUSHJ:s, so it doesn't matter that whether .init and .fini code calls
+% functions or where they store rJ. We shouldn't get there, so die
+% (TRAP Halt) if that happens.
+
+ .section .init,"ax",@progbits
+ .global _init
+_init:
+ GET $0,:rJ
+ PUSHJ $1,0F
+ SETL $255,255
+ TRAP 0,0,0
+0H IS @
+
+% Register _fini to be executed as the last atexit function.
+#ifdef __MMIX_ABI_GNU__
+ GETA $231,_fini
+#else
+ GETA $1,_fini
+#endif
+ PUSHJ $0,atexit
+
+ .section .fini,"ax",@progbits
+ .global _fini
+_fini:
+ GET $0,:rJ
+ PUSHJ $1,0F
+ SETL $255,255
+ TRAP 0,0,0
+0H IS @
diff --git a/libgcc/config/mmix/crtn.S b/libgcc/config/mmix/crtn.S
new file mode 100644
index 00000000000..c109e54db01
--- /dev/null
+++ b/libgcc/config/mmix/crtn.S
@@ -0,0 +1,87 @@
+/* Copyright (C) 2001, 2002, 2009 Free Software Foundation, Inc.
+ Contributed by Hans-Peter Nilsson <hp@bitrange.com>
+
+This file is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 3, or (at your option) any
+later version.
+
+This file is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+% This must be the last file on the link-line, allocating global registers
+% from the top.
+
+% Register $254 is the stack-pointer.
+sp GREG
+
+% Register $253 is frame-pointer. It's not supposed to be used in most
+% functions.
+fp GREG
+
+% $252 is the static chain register; nested functions receive the
+% context of the surrounding function through a pointer passed in this
+% register.
+static_chain GREG
+struct_value_reg GREG
+
+% These registers are used to pass state at an exceptional return (C++).
+eh_state_3 GREG
+eh_state_2 GREG
+eh_state_1 GREG
+eh_state_0 GREG
+
+#ifdef __MMIX_ABI_GNU__
+
+% Allocate global registers used by the GNU ABI.
+gnu_parm_reg_16 GREG
+gnu_parm_reg_15 GREG
+gnu_parm_reg_14 GREG
+gnu_parm_reg_13 GREG
+gnu_parm_reg_12 GREG
+gnu_parm_reg_11 GREG
+gnu_parm_reg_10 GREG
+gnu_parm_reg_9 GREG
+gnu_parm_reg_8 GREG
+gnu_parm_reg_7 GREG
+gnu_parm_reg_6 GREG
+gnu_parm_reg_5 GREG
+gnu_parm_reg_4 GREG
+gnu_parm_reg_3 GREG
+gnu_parm_reg_2 GREG
+gnu_parm_reg_1 GREG
+
+#endif /* __MMIX_ABI_GNU__ */
+
+% Provide last part of _init and _fini.
+
+% The return address is stored in the topmost stored register in the
+% register-stack. We ignore the current value in rJ. It is probably
+% garbage because each fragment of _init and _fini may have their own idea
+% of the current stack frame, if they're cut out from a "real" function
+% like in gcc/crtstuff.c.
+
+ .section .init,"ax",@progbits
+ GETA $255,0F
+ PUT rJ,$255
+ POP 0,0
+0H PUT rJ,$0
+ POP 0,0
+
+ .section .fini,"ax",@progbits
+ GETA $255,0F
+ PUT rJ,$255
+ POP 0,0
+0H PUT rJ,$0
+ POP 0,0
diff --git a/libgcc/config/mmix/t-mmix b/libgcc/config/mmix/t-mmix
index 9d66737e256..2ee4f001ad6 100644
--- a/libgcc/config/mmix/t-mmix
+++ b/libgcc/config/mmix/t-mmix
@@ -1,14 +1,22 @@
+HOST_LIBGCC2_CFLAGS += -mlibfuncs -O2
+
+# We need to turn off some assumptions on normality for code in crtstuff.c
+# and crt{i,n}.S, specifically about execution not continuing past the
+# end of the section in the file being compiled. Thus we must stop the
+# assembler from generating stubbable PUSHJ relocs, because that will add
+# stubs at the end of the current section when necessary.
+CRTSTUFF_T_CFLAGS = -Wa,--no-stubs
+
# Don't use global registers in libraries.
# FIXME: Not applied at "root" level, so disabled at the moment to stop
# incorrect comparisons with -mabi=gnu.
#MULTILIB_EXTRA_OPTS = mno-base-addresses
-$(T)crti.o: $(gcc_srcdir)/config/mmix/crti.asm $(GCC_PASSES)
- $(crt_compile) $(INCLUDES) \
- $(CRTSTUFF_T_CFLAGS) -c -x assembler-with-cpp \
- $(gcc_srcdir)/config/mmix/crti.asm
+# Cannot use default rules due to $(CRTSTUFF_T_CFLAGS).
+CUSTOM_CRTIN = yes
+
+crti.o: $(srcdir)/config/mmix/crti.S
+ $(crt_compile) $(CRTSTUFF_T_CFLAGS) -c $<
-$(T)crtn.o: $(gcc_srcdir)/config/mmix/crtn.asm $(GCC_PASSES)
- $(crt_compile) $(INCLUDES) \
- $(CRTSTUFF_T_CFLAGS) -c -x assembler-with-cpp \
- $(gcc_srcdir)/config/mmix/crtn.asm
+crtn.o: $(srcdir)/config/mmix/crtn.S
+ $(crt_compile) $(CRTSTUFF_T_CFLAGS) -c $<
diff --git a/libgcc/config/moxie/crti.asm b/libgcc/config/moxie/crti.S
index f44582799a3..c59d3ecf0ef 100644
--- a/libgcc/config/moxie/crti.asm
+++ b/libgcc/config/moxie/crti.S
@@ -1,6 +1,6 @@
-# crti.asm for moxie
+# crti.S for moxie
#
-# Copyright (C) 2009 Free Software Foundation
+# Copyright (C) 2009, 2011 Free Software Foundation
#
# This file is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
@@ -25,7 +25,7 @@
# .init sections. Users may put any desired instructions in those
# sections.
- .file "crti.asm"
+ .file "crti.S"
.section ".init"
.global _init
diff --git a/libgcc/config/moxie/crtn.asm b/libgcc/config/moxie/crtn.S
index 3ac9d31eed8..4455237a602 100644
--- a/libgcc/config/moxie/crtn.asm
+++ b/libgcc/config/moxie/crtn.S
@@ -1,6 +1,6 @@
-# crtn.asm for moxie
+# crtn.S for moxie
#
-# Copyright (C) 2009 Free Software Foundation
+# Copyright (C) 2009, 2011 Free Software Foundation
#
# This file is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
@@ -25,7 +25,7 @@
# fact return. Users may put any desired instructions in those sections.
# This file is the last thing linked into any executable.
- .file "crtn.asm"
+ .file "crtn.S"
.section ".init"
ret
diff --git a/libgcc/config/moxie/t-moxie b/libgcc/config/moxie/t-moxie
deleted file mode 100644
index 6e62aec2224..00000000000
--- a/libgcc/config/moxie/t-moxie
+++ /dev/null
@@ -1,9 +0,0 @@
-# Assemble startup files.
-
-$(T)crti.o: $(srcdir)/config/moxie/crti.asm $(GCC_PASSES)
- $(GCC_FOR_TARGET) $(GCC_CFLAGS) $(MULTILIB_CFLAGS) $(INCLUDES) \
- -c -o $(T)crti.o -x assembler-with-cpp $(srcdir)/config/moxie/crti.asm
-
-$(T)crtn.o: $(srcdir)/config/moxie/crtn.asm $(GCC_PASSES)
- $(GCC_FOR_TARGET) $(GCC_CFLAGS) $(MULTILIB_CFLAGS) $(INCLUDES) \
- -c -o $(T)crtn.o -x assembler-with-cpp $(srcdir)/config/moxie/crtn.asm
diff --git a/libgcc/config/pa/fptr.c b/libgcc/config/pa/fptr.c
new file mode 100644
index 00000000000..320d18267c8
--- /dev/null
+++ b/libgcc/config/pa/fptr.c
@@ -0,0 +1,131 @@
+/* Subroutine for function pointer canonicalization on PA-RISC with ELF32.
+ Copyright 2002, 2003, 2004, 2007, 2009 Free Software Foundation, Inc.
+ Contributed by John David Anglin (dave.anglin@nrc.ca).
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+
+/* WARNING: The code is this function depends on internal and undocumented
+ details of the GNU linker and dynamic loader as implemented for parisc
+ linux. */
+
+/* This MUST match the defines sysdeps/hppa/dl-machine.h and
+ bfd/elf32-hppa.c. */
+#define GOT_FROM_PLT_STUB (4*4)
+
+/* List of byte offsets in _dl_runtime_resolve to search for "bl" branches.
+ The first "bl" branch instruction found MUST be a call to fixup. See
+ the define for TRAMPOLINE_TEMPLATE in sysdeps/hppa/dl-machine.h. If
+ the trampoline template is changed, the list must be appropriately
+ updated. The offset of -4 allows for a magic branch at the start of
+ the template should it be necessary to change the current branch
+ position. */
+#define NOFFSETS 2
+static int fixup_branch_offset[NOFFSETS] = { 32, -4 };
+
+#define GET_FIELD(X, FROM, TO) \
+ ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1))
+#define SIGN_EXTEND(VAL,BITS) \
+ ((int) ((VAL) >> ((BITS) - 1) ? (-1 << (BITS)) | (VAL) : (VAL)))
+
+struct link_map;
+typedef int (*fptr_t) (void);
+typedef int (*fixup_t) (struct link_map *, unsigned int);
+extern unsigned int _GLOBAL_OFFSET_TABLE_;
+
+/* __canonicalize_funcptr_for_compare must be hidden so that it is not
+ placed in the dynamic symbol table. Like millicode functions, it
+ must be linked into all binaries in order access the got table of
+ that binary. However, we don't use the millicode calling convention
+ and the routine must be a normal function so that it can be compiled
+ as pic code. */
+unsigned int __canonicalize_funcptr_for_compare (fptr_t)
+ __attribute__ ((visibility ("hidden")));
+
+unsigned int
+__canonicalize_funcptr_for_compare (fptr_t fptr)
+{
+ static unsigned int fixup_plabel[2];
+ static fixup_t fixup;
+ unsigned int *plabel, *got;
+
+ /* -1 and page 0 are special. -1 is used in crtend to mark the end of
+ a list of function pointers. Also return immediately if the plabel
+ bit is not set in the function pointer. In this case, the function
+ pointer points directly to the function. */
+ if ((int) fptr == -1 || (unsigned int) fptr < 4096 || !((int) fptr & 2))
+ return (unsigned int) fptr;
+
+ /* The function pointer points to a function descriptor (plabel). If
+ the plabel hasn't been resolved, the first word of the plabel points
+ to the entry of the PLT stub just before the global offset table.
+ The second word in the plabel contains the relocation offset for the
+ function. */
+ plabel = (unsigned int *) ((unsigned int) fptr & ~3);
+ got = (unsigned int *) (plabel[0] + GOT_FROM_PLT_STUB);
+
+ /* Return the address of the function if the plabel has been resolved. */
+ if (got != &_GLOBAL_OFFSET_TABLE_)
+ return plabel[0];
+
+ /* Initialize our plabel for calling fixup if we haven't done so already.
+ This code needs to be thread safe but we don't have to be too careful
+ as the result is invariant. */
+ if (!fixup)
+ {
+ int i;
+ unsigned int *iptr;
+
+ /* Find the first "bl" branch in the offset search list. This is a
+ call to fixup or a magic branch to fixup at the beginning of the
+ trampoline template. The fixup function does the actual runtime
+ resolution of function descriptors. We only look for "bl" branches
+ with a 17-bit pc-relative displacement. */
+ for (i = 0; i < NOFFSETS; i++)
+ {
+ iptr = (unsigned int *) (got[-2] + fixup_branch_offset[i]);
+ if ((*iptr & 0xfc00e000) == 0xe8000000)
+ break;
+ }
+
+ /* This should not happen... */
+ if (i == NOFFSETS)
+ return ~0;
+
+ /* Extract the 17-bit displacement from the instruction. */
+ iptr += SIGN_EXTEND (GET_FIELD (*iptr, 19, 28) |
+ GET_FIELD (*iptr, 29, 29) << 10 |
+ GET_FIELD (*iptr, 11, 15) << 11 |
+ GET_FIELD (*iptr, 31, 31) << 16, 17);
+
+ /* Build a plabel for an indirect call to fixup. */
+ fixup_plabel[0] = (unsigned int) iptr + 8; /* address of fixup */
+ fixup_plabel[1] = got[-1]; /* ltp for fixup */
+ fixup = (fixup_t) ((int) fixup_plabel | 3);
+ }
+
+ /* Call fixup to resolve the function address. got[1] contains the
+ link_map pointer and plabel[1] the relocation offset. */
+ fixup ((struct link_map *) got[1], plabel[1]);
+
+ return plabel[0];
+}
diff --git a/libgcc/config/pa/gthr-dce.h b/libgcc/config/pa/gthr-dce.h
new file mode 100644
index 00000000000..d32155a9352
--- /dev/null
+++ b/libgcc/config/pa/gthr-dce.h
@@ -0,0 +1,563 @@
+/* Threads compatibility routines for libgcc2 and libobjc. */
+/* Compile this one with gcc. */
+/* Copyright (C) 1997, 1999, 2000, 2001, 2004, 2005, 2008, 2009
+ Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_GTHR_DCE_H
+#define GCC_GTHR_DCE_H
+
+/* If _DCE_THREADS is not defined, then we're building the single
+ threaded version of the libraries and do not want to reference
+ anything related to pthreads or dce. */
+#ifndef _DCE_THREADS
+#include "gthr-single.h"
+#else
+/* DCE threads interface.
+ DCE threads are based on POSIX threads draft 4, and many things
+ have changed since then. */
+
+/* Make sure CONST_CAST2 (original in system.h) is defined. */
+#ifndef CONST_CAST2
+#ifdef __cplusplus
+#define CONST_CAST2(TOTYPE,FROMTYPE,X) (const_cast<TOTYPE> (X))
+#else
+#define CONST_CAST2(TOTYPE,FROMTYPE,X) ((__extension__(union {FROMTYPE _q; TOTYPE _nq;})(X))._nq)
+#endif
+#endif
+
+#define __GTHREADS 1
+
+#include <pthread.h>
+
+typedef pthread_key_t __gthread_key_t;
+typedef pthread_once_t __gthread_once_t;
+typedef pthread_mutex_t __gthread_mutex_t;
+typedef pthread_mutex_t __gthread_recursive_mutex_t;
+
+#define __GTHREAD_ONCE_INIT pthread_once_init
+
+#define __GTHREAD_MUTEX_INIT_FUNCTION __gthread_mutex_init_function
+#define __GTHREAD_RECURSIVE_MUTEX_INIT_FUNCTION __gthread_recursive_mutex_init_function
+
+#define __GTHREAD_MUTEX_INIT_DEFAULT pthread_once_init
+
+#if SUPPORTS_WEAK && GTHREAD_USE_WEAK
+# define __gthrw(name) \
+ static __typeof(name) __gthrw_ ## name __attribute__ ((__weakref__(#name)));
+# define __gthrw_(name) __gthrw_ ## name
+#else
+# define __gthrw(name)
+# define __gthrw_(name) name
+#endif
+
+__gthrw(pthread_once)
+__gthrw(pthread_keycreate)
+__gthrw(pthread_getspecific)
+__gthrw(pthread_setspecific)
+__gthrw(pthread_create)
+__gthrw(pthread_mutex_init)
+__gthrw(pthread_mutex_destroy)
+__gthrw(pthread_mutex_lock)
+__gthrw(pthread_mutex_trylock)
+__gthrw(pthread_mutex_unlock)
+__gthrw(pthread_mutexattr_create)
+__gthrw(pthread_mutexattr_setkind_np)
+__gthrw(pthread_mutexattr_delete)
+
+#ifdef _LIBOBJC
+/* Objective-C. */
+__gthrw(pthread_cond_broadcast)
+__gthrw(pthread_cond_destroy)
+__gthrw(pthread_cond_init)
+__gthrw(pthread_cond_signal)
+__gthrw(pthread_cond_wait)
+__gthrw(pthread_exit)
+
+#ifdef pthread_getunique_np
+# define __gthrw_pthread_getunique_np pthread_getunique_np
+#else
+__gthrw(pthread_getunique_np)
+# define __gthrw_pthread_getunique_np __gthrw_(pthread_getunique_np)
+#endif
+
+__gthrw(pthread_mutex_destroy)
+__gthrw(pthread_self)
+__gthrw(pthread_yield)
+#endif
+
+#if SUPPORTS_WEAK && GTHREAD_USE_WEAK
+
+static inline int
+__gthread_active_p (void)
+{
+ static void *const __gthread_active_ptr = (void *) &__gthrw_(pthread_create);
+ return __gthread_active_ptr != 0;
+}
+
+#else /* not SUPPORTS_WEAK */
+
+static inline int
+__gthread_active_p (void)
+{
+ return 1;
+}
+
+#endif /* SUPPORTS_WEAK */
+
+#ifdef _LIBOBJC
+
+/* Key structure for maintaining thread specific storage */
+static pthread_key_t _objc_thread_storage;
+
+/* Thread local storage for a single thread */
+static void *thread_local_storage = NULL;
+
+/* Backend initialization functions */
+
+/* Initialize the threads subsystem. */
+static inline int
+__gthread_objc_init_thread_system (void)
+{
+ if (__gthread_active_p ())
+ /* Initialize the thread storage key. */
+ return __gthrw_(pthread_keycreate) (&_objc_thread_storage, NULL);
+ else
+ return -1;
+}
+
+/* Close the threads subsystem. */
+static inline int
+__gthread_objc_close_thread_system (void)
+{
+ if (__gthread_active_p ())
+ return 0;
+ else
+ return -1;
+}
+
+/* Backend thread functions */
+
+/* Create a new thread of execution. */
+static inline objc_thread_t
+__gthread_objc_thread_detach (void (*func)(void *), void *arg)
+{
+ objc_thread_t thread_id;
+ pthread_t new_thread_handle;
+
+ if (!__gthread_active_p ())
+ return NULL;
+
+ if (!(__gthrw_(pthread_create) (&new_thread_handle, pthread_attr_default,
+ (void *) func, arg)))
+ {
+ /* ??? May not work! (64bit) */
+ thread_id = *(objc_thread_t *) &new_thread_handle;
+ pthread_detach (&new_thread_handle); /* Fully detach thread. */
+ }
+ else
+ thread_id = NULL;
+
+ return thread_id;
+}
+
+/* Set the current thread's priority. */
+static inline int
+__gthread_objc_thread_set_priority (int priority)
+{
+ int sys_priority = 0;
+
+ if (!__gthread_active_p ())
+ return -1;
+
+ switch (priority)
+ {
+ case OBJC_THREAD_INTERACTIVE_PRIORITY:
+ sys_priority = (PRI_FG_MIN_NP + PRI_FG_MAX_NP) / 2;
+ break;
+ default:
+ case OBJC_THREAD_BACKGROUND_PRIORITY:
+ sys_priority = (PRI_BG_MIN_NP + PRI_BG_MAX_NP) / 2;
+ break;
+ case OBJC_THREAD_LOW_PRIORITY:
+ sys_priority = (PRI_BG_MIN_NP + PRI_BG_MAX_NP) / 2;
+ break;
+ }
+
+ /* Change the priority. */
+ if (pthread_setprio (__gthrw_(pthread_self) (), sys_priority) >= 0)
+ return 0;
+ else
+ /* Failed */
+ return -1;
+}
+
+/* Return the current thread's priority. */
+static inline int
+__gthread_objc_thread_get_priority (void)
+{
+ int sys_priority;
+
+ if (__gthread_active_p ())
+ {
+ if ((sys_priority = pthread_getprio (__gthrw_(pthread_self) ())) >= 0)
+ {
+ if (sys_priority >= PRI_FG_MIN_NP
+ && sys_priority <= PRI_FG_MAX_NP)
+ return OBJC_THREAD_INTERACTIVE_PRIORITY;
+ if (sys_priority >= PRI_BG_MIN_NP
+ && sys_priority <= PRI_BG_MAX_NP)
+ return OBJC_THREAD_BACKGROUND_PRIORITY;
+ return OBJC_THREAD_LOW_PRIORITY;
+ }
+
+ /* Failed */
+ return -1;
+ }
+ else
+ return OBJC_THREAD_INTERACTIVE_PRIORITY;
+}
+
+/* Yield our process time to another thread. */
+static inline void
+__gthread_objc_thread_yield (void)
+{
+ if (__gthread_active_p ())
+ __gthrw_(pthread_yield) ();
+}
+
+/* Terminate the current thread. */
+static inline int
+__gthread_objc_thread_exit (void)
+{
+ if (__gthread_active_p ())
+ /* exit the thread */
+ __gthrw_(pthread_exit) (&__objc_thread_exit_status);
+
+ /* Failed if we reached here */
+ return -1;
+}
+
+/* Returns an integer value which uniquely describes a thread. */
+static inline objc_thread_t
+__gthread_objc_thread_id (void)
+{
+ if (__gthread_active_p ())
+ {
+ pthread_t self = __gthrw_(pthread_self) ();
+
+ return (objc_thread_t) __gthrw_pthread_getunique_np (&self);
+ }
+ else
+ return (objc_thread_t) 1;
+}
+
+/* Sets the thread's local storage pointer. */
+static inline int
+__gthread_objc_thread_set_data (void *value)
+{
+ if (__gthread_active_p ())
+ return __gthrw_(pthread_setspecific) (_objc_thread_storage, value);
+ else
+ {
+ thread_local_storage = value;
+ return 0;
+ }
+}
+
+/* Returns the thread's local storage pointer. */
+static inline void *
+__gthread_objc_thread_get_data (void)
+{
+ void *value = NULL;
+
+ if (__gthread_active_p ())
+ {
+ if (!(__gthrw_(pthread_getspecific) (_objc_thread_storage, &value)))
+ return value;
+
+ return NULL;
+ }
+ else
+ return thread_local_storage;
+}
+
+/* Backend mutex functions */
+
+/* Allocate a mutex. */
+static inline int
+__gthread_objc_mutex_allocate (objc_mutex_t mutex)
+{
+ if (__gthread_active_p ())
+ {
+ mutex->backend = objc_malloc (sizeof (pthread_mutex_t));
+
+ if (__gthrw_(pthread_mutex_init) ((pthread_mutex_t *) mutex->backend,
+ pthread_mutexattr_default))
+ {
+ objc_free (mutex->backend);
+ mutex->backend = NULL;
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+/* Deallocate a mutex. */
+static inline int
+__gthread_objc_mutex_deallocate (objc_mutex_t mutex)
+{
+ if (__gthread_active_p ())
+ {
+ if (__gthrw_(pthread_mutex_destroy) ((pthread_mutex_t *) mutex->backend))
+ return -1;
+
+ objc_free (mutex->backend);
+ mutex->backend = NULL;
+ }
+
+ return 0;
+}
+
+/* Grab a lock on a mutex. */
+static inline int
+__gthread_objc_mutex_lock (objc_mutex_t mutex)
+{
+ if (__gthread_active_p ())
+ return __gthrw_(pthread_mutex_lock) ((pthread_mutex_t *) mutex->backend);
+ else
+ return 0;
+}
+
+/* Try to grab a lock on a mutex. */
+static inline int
+__gthread_objc_mutex_trylock (objc_mutex_t mutex)
+{
+ if (__gthread_active_p ()
+ && __gthrw_(pthread_mutex_trylock) ((pthread_mutex_t *) mutex->backend) != 1)
+ return -1;
+
+ return 0;
+}
+
+/* Unlock the mutex */
+static inline int
+__gthread_objc_mutex_unlock (objc_mutex_t mutex)
+{
+ if (__gthread_active_p ())
+ return __gthrw_(pthread_mutex_unlock) ((pthread_mutex_t *) mutex->backend);
+ else
+ return 0;
+}
+
+/* Backend condition mutex functions */
+
+/* Allocate a condition. */
+static inline int
+__gthread_objc_condition_allocate (objc_condition_t condition
+ __attribute__ ((__unused__)))
+{
+ if (__gthread_active_p ())
+ /* Unimplemented. */
+ return -1;
+ else
+ return 0;
+}
+
+/* Deallocate a condition. */
+static inline int
+__gthread_objc_condition_deallocate (objc_condition_t condition
+ __attribute__ ((__unused__)))
+{
+ if (__gthread_active_p ())
+ /* Unimplemented. */
+ return -1;
+ else
+ return 0;
+}
+
+/* Wait on the condition */
+static inline int
+__gthread_objc_condition_wait (objc_condition_t condition
+ __attribute__ ((__unused__)),
+ objc_mutex_t mutex __attribute__ ((__unused__)))
+{
+ if (__gthread_active_p ())
+ /* Unimplemented. */
+ return -1;
+ else
+ return 0;
+}
+
+/* Wake up all threads waiting on this condition. */
+static inline int
+__gthread_objc_condition_broadcast (objc_condition_t condition
+ __attribute__ ((__unused__)))
+{
+ if (__gthread_active_p ())
+ /* Unimplemented. */
+ return -1;
+ else
+ return 0;
+}
+
+/* Wake up one thread waiting on this condition. */
+static inline int
+__gthread_objc_condition_signal (objc_condition_t condition
+ __attribute__ ((__unused__)))
+{
+ if (__gthread_active_p ())
+ /* Unimplemented. */
+ return -1;
+ else
+ return 0;
+}
+
+#else /* _LIBOBJC */
+
+static inline int
+__gthread_once (__gthread_once_t *__once, void (*__func) (void))
+{
+ if (__gthread_active_p ())
+ return __gthrw_(pthread_once) (__once, __func);
+ else
+ return -1;
+}
+
+static inline int
+__gthread_key_create (__gthread_key_t *__key, void (*__dtor) (void *))
+{
+ return __gthrw_(pthread_keycreate) (__key, __dtor);
+}
+
+static inline int
+__gthread_key_delete (__gthread_key_t __key __attribute__ ((__unused__)))
+{
+ /* Operation is not supported. */
+ return -1;
+}
+
+static inline void *
+__gthread_getspecific (__gthread_key_t __key)
+{
+ void *__ptr;
+ if (__gthrw_(pthread_getspecific) (__key, &__ptr) == 0)
+ return __ptr;
+ else
+ return 0;
+}
+
+static inline int
+__gthread_setspecific (__gthread_key_t __key, const void *__ptr)
+{
+ return __gthrw_(pthread_setspecific)
+ (__key, CONST_CAST2(void *, const void *, __ptr));
+}
+
+static inline void
+__gthread_mutex_init_function (__gthread_mutex_t *__mutex)
+{
+ if (__gthread_active_p ())
+ __gthrw_(pthread_mutex_init) (__mutex, pthread_mutexattr_default);
+}
+
+static inline int
+__gthread_mutex_destroy (__gthread_mutex_t *__mutex)
+{
+ if (__gthread_active_p ())
+ return __gthrw_(pthread_mutex_destroy) (__mutex);
+ else
+ return 0;
+}
+
+static inline int
+__gthread_mutex_lock (__gthread_mutex_t *__mutex)
+{
+ if (__gthread_active_p ())
+ return __gthrw_(pthread_mutex_lock) (__mutex);
+ else
+ return 0;
+}
+
+static inline int
+__gthread_mutex_trylock (__gthread_mutex_t *__mutex)
+{
+ if (__gthread_active_p ())
+ return __gthrw_(pthread_mutex_trylock) (__mutex);
+ else
+ return 0;
+}
+
+static inline int
+__gthread_mutex_unlock (__gthread_mutex_t *__mutex)
+{
+ if (__gthread_active_p ())
+ return __gthrw_(pthread_mutex_unlock) (__mutex);
+ else
+ return 0;
+}
+
+static inline int
+__gthread_recursive_mutex_init_function (__gthread_recursive_mutex_t *__mutex)
+{
+ if (__gthread_active_p ())
+ {
+ pthread_mutexattr_t __attr;
+ int __r;
+
+ __r = __gthrw_(pthread_mutexattr_create) (&__attr);
+ if (!__r)
+ __r = __gthrw_(pthread_mutexattr_setkind_np) (&__attr,
+ MUTEX_RECURSIVE_NP);
+ if (!__r)
+ __r = __gthrw_(pthread_mutex_init) (__mutex, __attr);
+ if (!__r)
+ __r = __gthrw_(pthread_mutexattr_delete) (&__attr);
+ return __r;
+ }
+ return 0;
+}
+
+static inline int
+__gthread_recursive_mutex_lock (__gthread_recursive_mutex_t *__mutex)
+{
+ return __gthread_mutex_lock (__mutex);
+}
+
+static inline int
+__gthread_recursive_mutex_trylock (__gthread_recursive_mutex_t *__mutex)
+{
+ return __gthread_mutex_trylock (__mutex);
+}
+
+static inline int
+__gthread_recursive_mutex_unlock (__gthread_recursive_mutex_t *__mutex)
+{
+ return __gthread_mutex_unlock (__mutex);
+}
+
+#endif /* _LIBOBJC */
+
+#endif
+#endif /* ! GCC_GTHR_DCE_H */
diff --git a/libgcc/config/pa/lib2funcs.S b/libgcc/config/pa/lib2funcs.S
new file mode 100644
index 00000000000..8aa398c8797
--- /dev/null
+++ b/libgcc/config/pa/lib2funcs.S
@@ -0,0 +1,74 @@
+; Subroutines for calling unbound dynamic functions from within GDB for HPPA.
+; Subroutines for out of line prologues and epilogues on for the HPPA
+; Copyright (C) 1994, 1995, 1996, 2009 Free Software Foundation, Inc.
+
+; This file is part of GCC.
+
+; GCC is free software; you can redistribute it and/or modify
+; it under the terms of the GNU General Public License as published by
+; the Free Software Foundation; either version 3, or (at your option)
+; any later version.
+
+; GCC is distributed in the hope that it will be useful,
+; but WITHOUT ANY WARRANTY; without even the implied warranty of
+; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+; GNU General Public License for more details.
+
+; Under Section 7 of GPL version 3, you are granted additional
+; permissions described in the GCC Runtime Library Exception, version
+; 3.1, as published by the Free Software Foundation.
+
+; You should have received a copy of the GNU General Public License and
+; a copy of the GCC Runtime Library Exception along with this program;
+; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+; <http://www.gnu.org/licenses/>.
+
+#if !defined(__pro__) && !defined(__rtems__)
+ .SPACE $PRIVATE$
+ .SUBSPA $DATA$,QUAD=1,ALIGN=8,ACCESS=31
+ .SUBSPA $BSS$,QUAD=1,ALIGN=8,ACCESS=31,ZERO,SORT=82
+ .SPACE $TEXT$
+ .SUBSPA $LIT$,QUAD=0,ALIGN=8,ACCESS=44
+ .SUBSPA $CODE$,QUAD=0,ALIGN=8,ACCESS=44,CODE_ONLY
+ .SUBSPA $MILLICODE$,QUAD=0,ALIGN=8,ACCESS=44,SORT=8
+#endif
+ .IMPORT $$dyncall,MILLICODE
+#if !defined(__pro__) && !defined(__rtems__)
+ .SPACE $TEXT$
+ .SUBSPA $CODE$
+#else
+ .text
+#endif
+
+; Simply call with the address of the desired import stub in %r22 and
+; arguments in the normal place (%r26-%r23 and stack slots).
+;
+ .align 4
+ .EXPORT __gcc_plt_call,ENTRY,PRIV_LEV=3,RTNVAL=GR
+__gcc_plt_call
+ .PROC
+ .CALLINFO
+ .ENTRY
+ ; Our return address comes in %r31, not %r2!
+ stw %r31,-8(%r30)
+
+ ; An inline version of dyncall so we don't have to worry
+ ; about long calls to millicode, PIC and other complexities.
+ bb,>=,n %r22,30,L$foo
+ depi 0,31,2,%r22
+ ldw 4(%r22),%r19
+ ldw 0(%r22),%r22
+L$foo
+ ldsid (%r22),%r1
+ mtsp %r1,%sr0
+ ble 0(%sr0,%r22)
+ copy %r31,%r2
+ ldw -8(%r30),%r2
+
+ ; We're going to be returning to a stack address, so we
+ ; need to do an intra-space return.
+ ldsid (%rp),%r1
+ mtsp %r1,%sr0
+ be,n 0(%sr0,%rp)
+ .EXIT
+ .PROCEND
diff --git a/libgcc/config/pa/linux-atomic.c b/libgcc/config/pa/linux-atomic.c
new file mode 100644
index 00000000000..2ae2426357a
--- /dev/null
+++ b/libgcc/config/pa/linux-atomic.c
@@ -0,0 +1,305 @@
+/* Linux-specific atomic operations for PA Linux.
+ Copyright (C) 2008, 2009, 2010 Free Software Foundation, Inc.
+ Based on code contributed by CodeSourcery for ARM EABI Linux.
+ Modifications for PA Linux by Helge Deller <deller@gmx.de>
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+#define EFAULT 14
+#define EBUSY 16
+#define ENOSYS 251
+
+/* All PA-RISC implementations supported by linux have strongly
+ ordered loads and stores. Only cache flushes and purges can be
+ delayed. The data cache implementations are all globally
+ coherent. Thus, there is no need to synchonize memory accesses.
+
+ GCC automatically issues a asm memory barrier when it encounters
+ a __sync_synchronize builtin. Thus, we do not need to define this
+ builtin.
+
+ We implement byte, short and int versions of each atomic operation
+ using the kernel helper defined below. There is no support for
+ 64-bit operations yet. */
+
+/* A privileged instruction to crash a userspace program with SIGILL. */
+#define ABORT_INSTRUCTION asm ("iitlbp %r0,(%sr0, %r0)")
+
+/* Determine kernel LWS function call (0=32-bit, 1=64-bit userspace). */
+#define LWS_CAS (sizeof(unsigned long) == 4 ? 0 : 1)
+
+/* Kernel helper for compare-and-exchange a 32-bit value. */
+static inline long
+__kernel_cmpxchg (int oldval, int newval, int *mem)
+{
+ register unsigned long lws_mem asm("r26") = (unsigned long) (mem);
+ register long lws_ret asm("r28");
+ register long lws_errno asm("r21");
+ register int lws_old asm("r25") = oldval;
+ register int lws_new asm("r24") = newval;
+ asm volatile ( "ble 0xb0(%%sr2, %%r0) \n\t"
+ "ldi %5, %%r20 \n\t"
+ : "=r" (lws_ret), "=r" (lws_errno), "=r" (lws_mem),
+ "=r" (lws_old), "=r" (lws_new)
+ : "i" (LWS_CAS), "2" (lws_mem), "3" (lws_old), "4" (lws_new)
+ : "r1", "r20", "r22", "r23", "r29", "r31", "memory"
+ );
+ if (__builtin_expect (lws_errno == -EFAULT || lws_errno == -ENOSYS, 0))
+ ABORT_INSTRUCTION;
+
+ /* If the kernel LWS call succeeded (lws_errno == 0), lws_ret contains
+ the old value from memory. If this value is equal to OLDVAL, the
+ new value was written to memory. If not, return -EBUSY. */
+ if (!lws_errno && lws_ret != oldval)
+ lws_errno = -EBUSY;
+
+ return lws_errno;
+}
+
+#define HIDDEN __attribute__ ((visibility ("hidden")))
+
+/* Big endian masks */
+#define INVERT_MASK_1 24
+#define INVERT_MASK_2 16
+
+#define MASK_1 0xffu
+#define MASK_2 0xffffu
+
+#define FETCH_AND_OP_WORD(OP, PFX_OP, INF_OP) \
+ int HIDDEN \
+ __sync_fetch_and_##OP##_4 (int *ptr, int val) \
+ { \
+ int failure, tmp; \
+ \
+ do { \
+ tmp = *ptr; \
+ failure = __kernel_cmpxchg (tmp, PFX_OP (tmp INF_OP val), ptr); \
+ } while (failure != 0); \
+ \
+ return tmp; \
+ }
+
+FETCH_AND_OP_WORD (add, , +)
+FETCH_AND_OP_WORD (sub, , -)
+FETCH_AND_OP_WORD (or, , |)
+FETCH_AND_OP_WORD (and, , &)
+FETCH_AND_OP_WORD (xor, , ^)
+FETCH_AND_OP_WORD (nand, ~, &)
+
+#define NAME_oldval(OP, WIDTH) __sync_fetch_and_##OP##_##WIDTH
+#define NAME_newval(OP, WIDTH) __sync_##OP##_and_fetch_##WIDTH
+
+/* Implement both __sync_<op>_and_fetch and __sync_fetch_and_<op> for
+ subword-sized quantities. */
+
+#define SUBWORD_SYNC_OP(OP, PFX_OP, INF_OP, TYPE, WIDTH, RETURN) \
+ TYPE HIDDEN \
+ NAME##_##RETURN (OP, WIDTH) (TYPE *ptr, TYPE val) \
+ { \
+ int *wordptr = (int *) ((unsigned long) ptr & ~3); \
+ unsigned int mask, shift, oldval, newval; \
+ int failure; \
+ \
+ shift = (((unsigned long) ptr & 3) << 3) ^ INVERT_MASK_##WIDTH; \
+ mask = MASK_##WIDTH << shift; \
+ \
+ do { \
+ oldval = *wordptr; \
+ newval = ((PFX_OP (((oldval & mask) >> shift) \
+ INF_OP (unsigned int) val)) << shift) & mask; \
+ newval |= oldval & ~mask; \
+ failure = __kernel_cmpxchg (oldval, newval, wordptr); \
+ } while (failure != 0); \
+ \
+ return (RETURN & mask) >> shift; \
+ }
+
+SUBWORD_SYNC_OP (add, , +, unsigned short, 2, oldval)
+SUBWORD_SYNC_OP (sub, , -, unsigned short, 2, oldval)
+SUBWORD_SYNC_OP (or, , |, unsigned short, 2, oldval)
+SUBWORD_SYNC_OP (and, , &, unsigned short, 2, oldval)
+SUBWORD_SYNC_OP (xor, , ^, unsigned short, 2, oldval)
+SUBWORD_SYNC_OP (nand, ~, &, unsigned short, 2, oldval)
+
+SUBWORD_SYNC_OP (add, , +, unsigned char, 1, oldval)
+SUBWORD_SYNC_OP (sub, , -, unsigned char, 1, oldval)
+SUBWORD_SYNC_OP (or, , |, unsigned char, 1, oldval)
+SUBWORD_SYNC_OP (and, , &, unsigned char, 1, oldval)
+SUBWORD_SYNC_OP (xor, , ^, unsigned char, 1, oldval)
+SUBWORD_SYNC_OP (nand, ~, &, unsigned char, 1, oldval)
+
+#define OP_AND_FETCH_WORD(OP, PFX_OP, INF_OP) \
+ int HIDDEN \
+ __sync_##OP##_and_fetch_4 (int *ptr, int val) \
+ { \
+ int tmp, failure; \
+ \
+ do { \
+ tmp = *ptr; \
+ failure = __kernel_cmpxchg (tmp, PFX_OP (tmp INF_OP val), ptr); \
+ } while (failure != 0); \
+ \
+ return PFX_OP (tmp INF_OP val); \
+ }
+
+OP_AND_FETCH_WORD (add, , +)
+OP_AND_FETCH_WORD (sub, , -)
+OP_AND_FETCH_WORD (or, , |)
+OP_AND_FETCH_WORD (and, , &)
+OP_AND_FETCH_WORD (xor, , ^)
+OP_AND_FETCH_WORD (nand, ~, &)
+
+SUBWORD_SYNC_OP (add, , +, unsigned short, 2, newval)
+SUBWORD_SYNC_OP (sub, , -, unsigned short, 2, newval)
+SUBWORD_SYNC_OP (or, , |, unsigned short, 2, newval)
+SUBWORD_SYNC_OP (and, , &, unsigned short, 2, newval)
+SUBWORD_SYNC_OP (xor, , ^, unsigned short, 2, newval)
+SUBWORD_SYNC_OP (nand, ~, &, unsigned short, 2, newval)
+
+SUBWORD_SYNC_OP (add, , +, unsigned char, 1, newval)
+SUBWORD_SYNC_OP (sub, , -, unsigned char, 1, newval)
+SUBWORD_SYNC_OP (or, , |, unsigned char, 1, newval)
+SUBWORD_SYNC_OP (and, , &, unsigned char, 1, newval)
+SUBWORD_SYNC_OP (xor, , ^, unsigned char, 1, newval)
+SUBWORD_SYNC_OP (nand, ~, &, unsigned char, 1, newval)
+
+int HIDDEN
+__sync_val_compare_and_swap_4 (int *ptr, int oldval, int newval)
+{
+ int actual_oldval, fail;
+
+ while (1)
+ {
+ actual_oldval = *ptr;
+
+ if (__builtin_expect (oldval != actual_oldval, 0))
+ return actual_oldval;
+
+ fail = __kernel_cmpxchg (actual_oldval, newval, ptr);
+
+ if (__builtin_expect (!fail, 1))
+ return actual_oldval;
+ }
+}
+
+#define SUBWORD_VAL_CAS(TYPE, WIDTH) \
+ TYPE HIDDEN \
+ __sync_val_compare_and_swap_##WIDTH (TYPE *ptr, TYPE oldval, \
+ TYPE newval) \
+ { \
+ int *wordptr = (int *)((unsigned long) ptr & ~3), fail; \
+ unsigned int mask, shift, actual_oldval, actual_newval; \
+ \
+ shift = (((unsigned long) ptr & 3) << 3) ^ INVERT_MASK_##WIDTH; \
+ mask = MASK_##WIDTH << shift; \
+ \
+ while (1) \
+ { \
+ actual_oldval = *wordptr; \
+ \
+ if (__builtin_expect (((actual_oldval & mask) >> shift) \
+ != (unsigned int) oldval, 0)) \
+ return (actual_oldval & mask) >> shift; \
+ \
+ actual_newval = (actual_oldval & ~mask) \
+ | (((unsigned int) newval << shift) & mask); \
+ \
+ fail = __kernel_cmpxchg (actual_oldval, actual_newval, \
+ wordptr); \
+ \
+ if (__builtin_expect (!fail, 1)) \
+ return (actual_oldval & mask) >> shift; \
+ } \
+ }
+
+SUBWORD_VAL_CAS (unsigned short, 2)
+SUBWORD_VAL_CAS (unsigned char, 1)
+
+typedef unsigned char bool;
+
+bool HIDDEN
+__sync_bool_compare_and_swap_4 (int *ptr, int oldval, int newval)
+{
+ int failure = __kernel_cmpxchg (oldval, newval, ptr);
+ return (failure == 0);
+}
+
+#define SUBWORD_BOOL_CAS(TYPE, WIDTH) \
+ bool HIDDEN \
+ __sync_bool_compare_and_swap_##WIDTH (TYPE *ptr, TYPE oldval, \
+ TYPE newval) \
+ { \
+ TYPE actual_oldval \
+ = __sync_val_compare_and_swap_##WIDTH (ptr, oldval, newval); \
+ return (oldval == actual_oldval); \
+ }
+
+SUBWORD_BOOL_CAS (unsigned short, 2)
+SUBWORD_BOOL_CAS (unsigned char, 1)
+
+int HIDDEN
+__sync_lock_test_and_set_4 (int *ptr, int val)
+{
+ int failure, oldval;
+
+ do {
+ oldval = *ptr;
+ failure = __kernel_cmpxchg (oldval, val, ptr);
+ } while (failure != 0);
+
+ return oldval;
+}
+
+#define SUBWORD_TEST_AND_SET(TYPE, WIDTH) \
+ TYPE HIDDEN \
+ __sync_lock_test_and_set_##WIDTH (TYPE *ptr, TYPE val) \
+ { \
+ int failure; \
+ unsigned int oldval, newval, shift, mask; \
+ int *wordptr = (int *) ((unsigned long) ptr & ~3); \
+ \
+ shift = (((unsigned long) ptr & 3) << 3) ^ INVERT_MASK_##WIDTH; \
+ mask = MASK_##WIDTH << shift; \
+ \
+ do { \
+ oldval = *wordptr; \
+ newval = (oldval & ~mask) \
+ | (((unsigned int) val << shift) & mask); \
+ failure = __kernel_cmpxchg (oldval, newval, wordptr); \
+ } while (failure != 0); \
+ \
+ return (oldval & mask) >> shift; \
+ }
+
+SUBWORD_TEST_AND_SET (unsigned short, 2)
+SUBWORD_TEST_AND_SET (unsigned char, 1)
+
+#define SYNC_LOCK_RELEASE(TYPE, WIDTH) \
+ void HIDDEN \
+ __sync_lock_release_##WIDTH (TYPE *ptr) \
+ { \
+ *ptr = 0; \
+ }
+
+SYNC_LOCK_RELEASE (int, 4)
+SYNC_LOCK_RELEASE (short, 2)
+SYNC_LOCK_RELEASE (char, 1)
diff --git a/libgcc/config/pa/milli64.S b/libgcc/config/pa/milli64.S
new file mode 100644
index 00000000000..2e9c4f741b6
--- /dev/null
+++ b/libgcc/config/pa/milli64.S
@@ -0,0 +1,2134 @@
+/* 32 and 64-bit millicode, original author Hewlett-Packard
+ adapted for gcc by Paul Bame <bame@debian.org>
+ and Alan Modra <alan@linuxcare.com.au>.
+
+ Copyright 2001, 2002, 2003, 2007, 2009 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+#ifdef pa64
+ .level 2.0w
+#endif
+
+/* Hardware General Registers. */
+r0: .reg %r0
+r1: .reg %r1
+r2: .reg %r2
+r3: .reg %r3
+r4: .reg %r4
+r5: .reg %r5
+r6: .reg %r6
+r7: .reg %r7
+r8: .reg %r8
+r9: .reg %r9
+r10: .reg %r10
+r11: .reg %r11
+r12: .reg %r12
+r13: .reg %r13
+r14: .reg %r14
+r15: .reg %r15
+r16: .reg %r16
+r17: .reg %r17
+r18: .reg %r18
+r19: .reg %r19
+r20: .reg %r20
+r21: .reg %r21
+r22: .reg %r22
+r23: .reg %r23
+r24: .reg %r24
+r25: .reg %r25
+r26: .reg %r26
+r27: .reg %r27
+r28: .reg %r28
+r29: .reg %r29
+r30: .reg %r30
+r31: .reg %r31
+
+/* Hardware Space Registers. */
+sr0: .reg %sr0
+sr1: .reg %sr1
+sr2: .reg %sr2
+sr3: .reg %sr3
+sr4: .reg %sr4
+sr5: .reg %sr5
+sr6: .reg %sr6
+sr7: .reg %sr7
+
+/* Hardware Floating Point Registers. */
+fr0: .reg %fr0
+fr1: .reg %fr1
+fr2: .reg %fr2
+fr3: .reg %fr3
+fr4: .reg %fr4
+fr5: .reg %fr5
+fr6: .reg %fr6
+fr7: .reg %fr7
+fr8: .reg %fr8
+fr9: .reg %fr9
+fr10: .reg %fr10
+fr11: .reg %fr11
+fr12: .reg %fr12
+fr13: .reg %fr13
+fr14: .reg %fr14
+fr15: .reg %fr15
+
+/* Hardware Control Registers. */
+cr11: .reg %cr11
+sar: .reg %cr11 /* Shift Amount Register */
+
+/* Software Architecture General Registers. */
+rp: .reg r2 /* return pointer */
+#ifdef pa64
+mrp: .reg r2 /* millicode return pointer */
+#else
+mrp: .reg r31 /* millicode return pointer */
+#endif
+ret0: .reg r28 /* return value */
+ret1: .reg r29 /* return value (high part of double) */
+sp: .reg r30 /* stack pointer */
+dp: .reg r27 /* data pointer */
+arg0: .reg r26 /* argument */
+arg1: .reg r25 /* argument or high part of double argument */
+arg2: .reg r24 /* argument */
+arg3: .reg r23 /* argument or high part of double argument */
+
+/* Software Architecture Space Registers. */
+/* sr0 ; return link from BLE */
+sret: .reg sr1 /* return value */
+sarg: .reg sr1 /* argument */
+/* sr4 ; PC SPACE tracker */
+/* sr5 ; process private data */
+
+/* Frame Offsets (millicode convention!) Used when calling other
+ millicode routines. Stack unwinding is dependent upon these
+ definitions. */
+r31_slot: .equ -20 /* "current RP" slot */
+sr0_slot: .equ -16 /* "static link" slot */
+#if defined(pa64)
+mrp_slot: .equ -16 /* "current RP" slot */
+psp_slot: .equ -8 /* "previous SP" slot */
+#else
+mrp_slot: .equ -20 /* "current RP" slot (replacing "r31_slot") */
+#endif
+
+
+#define DEFINE(name,value)name: .EQU value
+#define RDEFINE(name,value)name: .REG value
+#ifdef milliext
+#define MILLI_BE(lbl) BE lbl(sr7,r0)
+#define MILLI_BEN(lbl) BE,n lbl(sr7,r0)
+#define MILLI_BLE(lbl) BLE lbl(sr7,r0)
+#define MILLI_BLEN(lbl) BLE,n lbl(sr7,r0)
+#define MILLIRETN BE,n 0(sr0,mrp)
+#define MILLIRET BE 0(sr0,mrp)
+#define MILLI_RETN BE,n 0(sr0,mrp)
+#define MILLI_RET BE 0(sr0,mrp)
+#else
+#define MILLI_BE(lbl) B lbl
+#define MILLI_BEN(lbl) B,n lbl
+#define MILLI_BLE(lbl) BL lbl,mrp
+#define MILLI_BLEN(lbl) BL,n lbl,mrp
+#define MILLIRETN BV,n 0(mrp)
+#define MILLIRET BV 0(mrp)
+#define MILLI_RETN BV,n 0(mrp)
+#define MILLI_RET BV 0(mrp)
+#endif
+
+#ifdef __STDC__
+#define CAT(a,b) a##b
+#else
+#define CAT(a,b) a/**/b
+#endif
+
+#ifdef ELF
+#define SUBSPA_MILLI .section .text
+#define SUBSPA_MILLI_DIV .section .text.div,"ax",@progbits! .align 16
+#define SUBSPA_MILLI_MUL .section .text.mul,"ax",@progbits! .align 16
+#define ATTR_MILLI
+#define SUBSPA_DATA .section .data
+#define ATTR_DATA
+#define GLOBAL $global$
+#define GSYM(sym) !sym:
+#define LSYM(sym) !CAT(.L,sym:)
+#define LREF(sym) CAT(.L,sym)
+
+#else
+
+#ifdef coff
+/* This used to be .milli but since link32 places different named
+ sections in different segments millicode ends up a long ways away
+ from .text (1meg?). This way they will be a lot closer.
+
+ The SUBSPA_MILLI_* specify locality sets for certain millicode
+ modules in order to ensure that modules that call one another are
+ placed close together. Without locality sets this is unlikely to
+ happen because of the Dynamite linker library search algorithm. We
+ want these modules close together so that short calls always reach
+ (we don't want to require long calls or use long call stubs). */
+
+#define SUBSPA_MILLI .subspa .text
+#define SUBSPA_MILLI_DIV .subspa .text$dv,align=16
+#define SUBSPA_MILLI_MUL .subspa .text$mu,align=16
+#define ATTR_MILLI .attr code,read,execute
+#define SUBSPA_DATA .subspa .data
+#define ATTR_DATA .attr init_data,read,write
+#define GLOBAL _gp
+#else
+#define SUBSPA_MILLI .subspa $MILLICODE$,QUAD=0,ALIGN=4,ACCESS=0x2c,SORT=8
+#define SUBSPA_MILLI_DIV SUBSPA_MILLI
+#define SUBSPA_MILLI_MUL SUBSPA_MILLI
+#define ATTR_MILLI
+#define SUBSPA_DATA .subspa $BSS$,quad=1,align=8,access=0x1f,sort=80,zero
+#define ATTR_DATA
+#define GLOBAL $global$
+#endif
+#define SPACE_DATA .space $PRIVATE$,spnum=1,sort=16
+
+#define GSYM(sym) !sym
+#define LSYM(sym) !CAT(L$,sym)
+#define LREF(sym) CAT(L$,sym)
+#endif
+
+#ifdef L_dyncall
+ SUBSPA_MILLI
+ ATTR_DATA
+GSYM($$dyncall)
+ .export $$dyncall,millicode
+ .proc
+ .callinfo millicode
+ .entry
+ bb,>=,n %r22,30,LREF(1) ; branch if not plabel address
+ depi 0,31,2,%r22 ; clear the two least significant bits
+ ldw 4(%r22),%r19 ; load new LTP value
+ ldw 0(%r22),%r22 ; load address of target
+LSYM(1)
+#ifdef LINUX
+ bv %r0(%r22) ; branch to the real target
+#else
+ ldsid (%sr0,%r22),%r1 ; get the "space ident" selected by r22
+ mtsp %r1,%sr0 ; move that space identifier into sr0
+ be 0(%sr0,%r22) ; branch to the real target
+#endif
+ stw %r2,-24(%r30) ; save return address into frame marker
+ .exit
+ .procend
+#endif
+
+#ifdef L_divI
+/* ROUTINES: $$divI, $$divoI
+
+ Single precision divide for signed binary integers.
+
+ The quotient is truncated towards zero.
+ The sign of the quotient is the XOR of the signs of the dividend and
+ divisor.
+ Divide by zero is trapped.
+ Divide of -2**31 by -1 is trapped for $$divoI but not for $$divI.
+
+ INPUT REGISTERS:
+ . arg0 == dividend
+ . arg1 == divisor
+ . mrp == return pc
+ . sr0 == return space when called externally
+
+ OUTPUT REGISTERS:
+ . arg0 = undefined
+ . arg1 = undefined
+ . ret1 = quotient
+
+ OTHER REGISTERS AFFECTED:
+ . r1 = undefined
+
+ SIDE EFFECTS:
+ . Causes a trap under the following conditions:
+ . divisor is zero (traps with ADDIT,= 0,25,0)
+ . dividend==-2**31 and divisor==-1 and routine is $$divoI
+ . (traps with ADDO 26,25,0)
+ . Changes memory at the following places:
+ . NONE
+
+ PERMISSIBLE CONTEXT:
+ . Unwindable.
+ . Suitable for internal or external millicode.
+ . Assumes the special millicode register conventions.
+
+ DISCUSSION:
+ . Branchs to other millicode routines using BE
+ . $$div_# for # being 2,3,4,5,6,7,8,9,10,12,14,15
+ .
+ . For selected divisors, calls a divide by constant routine written by
+ . Karl Pettis. Eligible divisors are 1..15 excluding 11 and 13.
+ .
+ . The only overflow case is -2**31 divided by -1.
+ . Both routines return -2**31 but only $$divoI traps. */
+
+RDEFINE(temp,r1)
+RDEFINE(retreg,ret1) /* r29 */
+RDEFINE(temp1,arg0)
+ SUBSPA_MILLI_DIV
+ ATTR_MILLI
+ .import $$divI_2,millicode
+ .import $$divI_3,millicode
+ .import $$divI_4,millicode
+ .import $$divI_5,millicode
+ .import $$divI_6,millicode
+ .import $$divI_7,millicode
+ .import $$divI_8,millicode
+ .import $$divI_9,millicode
+ .import $$divI_10,millicode
+ .import $$divI_12,millicode
+ .import $$divI_14,millicode
+ .import $$divI_15,millicode
+ .export $$divI,millicode
+ .export $$divoI,millicode
+ .proc
+ .callinfo millicode
+ .entry
+GSYM($$divoI)
+ comib,=,n -1,arg1,LREF(negative1) /* when divisor == -1 */
+GSYM($$divI)
+ ldo -1(arg1),temp /* is there at most one bit set ? */
+ and,<> arg1,temp,r0 /* if not, don't use power of 2 divide */
+ addi,> 0,arg1,r0 /* if divisor > 0, use power of 2 divide */
+ b,n LREF(neg_denom)
+LSYM(pow2)
+ addi,>= 0,arg0,retreg /* if numerator is negative, add the */
+ add arg0,temp,retreg /* (denominaotr -1) to correct for shifts */
+ extru,= arg1,15,16,temp /* test denominator with 0xffff0000 */
+ extrs retreg,15,16,retreg /* retreg = retreg >> 16 */
+ or arg1,temp,arg1 /* arg1 = arg1 | (arg1 >> 16) */
+ ldi 0xcc,temp1 /* setup 0xcc in temp1 */
+ extru,= arg1,23,8,temp /* test denominator with 0xff00 */
+ extrs retreg,23,24,retreg /* retreg = retreg >> 8 */
+ or arg1,temp,arg1 /* arg1 = arg1 | (arg1 >> 8) */
+ ldi 0xaa,temp /* setup 0xaa in temp */
+ extru,= arg1,27,4,r0 /* test denominator with 0xf0 */
+ extrs retreg,27,28,retreg /* retreg = retreg >> 4 */
+ and,= arg1,temp1,r0 /* test denominator with 0xcc */
+ extrs retreg,29,30,retreg /* retreg = retreg >> 2 */
+ and,= arg1,temp,r0 /* test denominator with 0xaa */
+ extrs retreg,30,31,retreg /* retreg = retreg >> 1 */
+ MILLIRETN
+LSYM(neg_denom)
+ addi,< 0,arg1,r0 /* if arg1 >= 0, it's not power of 2 */
+ b,n LREF(regular_seq)
+ sub r0,arg1,temp /* make denominator positive */
+ comb,=,n arg1,temp,LREF(regular_seq) /* test against 0x80000000 and 0 */
+ ldo -1(temp),retreg /* is there at most one bit set ? */
+ and,= temp,retreg,r0 /* if so, the denominator is power of 2 */
+ b,n LREF(regular_seq)
+ sub r0,arg0,retreg /* negate numerator */
+ comb,=,n arg0,retreg,LREF(regular_seq) /* test against 0x80000000 */
+ copy retreg,arg0 /* set up arg0, arg1 and temp */
+ copy temp,arg1 /* before branching to pow2 */
+ b LREF(pow2)
+ ldo -1(arg1),temp
+LSYM(regular_seq)
+ comib,>>=,n 15,arg1,LREF(small_divisor)
+ add,>= 0,arg0,retreg /* move dividend, if retreg < 0, */
+LSYM(normal)
+ subi 0,retreg,retreg /* make it positive */
+ sub 0,arg1,temp /* clear carry, */
+ /* negate the divisor */
+ ds 0,temp,0 /* set V-bit to the comple- */
+ /* ment of the divisor sign */
+ add retreg,retreg,retreg /* shift msb bit into carry */
+ ds r0,arg1,temp /* 1st divide step, if no carry */
+ addc retreg,retreg,retreg /* shift retreg with/into carry */
+ ds temp,arg1,temp /* 2nd divide step */
+ addc retreg,retreg,retreg /* shift retreg with/into carry */
+ ds temp,arg1,temp /* 3rd divide step */
+ addc retreg,retreg,retreg /* shift retreg with/into carry */
+ ds temp,arg1,temp /* 4th divide step */
+ addc retreg,retreg,retreg /* shift retreg with/into carry */
+ ds temp,arg1,temp /* 5th divide step */
+ addc retreg,retreg,retreg /* shift retreg with/into carry */
+ ds temp,arg1,temp /* 6th divide step */
+ addc retreg,retreg,retreg /* shift retreg with/into carry */
+ ds temp,arg1,temp /* 7th divide step */
+ addc retreg,retreg,retreg /* shift retreg with/into carry */
+ ds temp,arg1,temp /* 8th divide step */
+ addc retreg,retreg,retreg /* shift retreg with/into carry */
+ ds temp,arg1,temp /* 9th divide step */
+ addc retreg,retreg,retreg /* shift retreg with/into carry */
+ ds temp,arg1,temp /* 10th divide step */
+ addc retreg,retreg,retreg /* shift retreg with/into carry */
+ ds temp,arg1,temp /* 11th divide step */
+ addc retreg,retreg,retreg /* shift retreg with/into carry */
+ ds temp,arg1,temp /* 12th divide step */
+ addc retreg,retreg,retreg /* shift retreg with/into carry */
+ ds temp,arg1,temp /* 13th divide step */
+ addc retreg,retreg,retreg /* shift retreg with/into carry */
+ ds temp,arg1,temp /* 14th divide step */
+ addc retreg,retreg,retreg /* shift retreg with/into carry */
+ ds temp,arg1,temp /* 15th divide step */
+ addc retreg,retreg,retreg /* shift retreg with/into carry */
+ ds temp,arg1,temp /* 16th divide step */
+ addc retreg,retreg,retreg /* shift retreg with/into carry */
+ ds temp,arg1,temp /* 17th divide step */
+ addc retreg,retreg,retreg /* shift retreg with/into carry */
+ ds temp,arg1,temp /* 18th divide step */
+ addc retreg,retreg,retreg /* shift retreg with/into carry */
+ ds temp,arg1,temp /* 19th divide step */
+ addc retreg,retreg,retreg /* shift retreg with/into carry */
+ ds temp,arg1,temp /* 20th divide step */
+ addc retreg,retreg,retreg /* shift retreg with/into carry */
+ ds temp,arg1,temp /* 21st divide step */
+ addc retreg,retreg,retreg /* shift retreg with/into carry */
+ ds temp,arg1,temp /* 22nd divide step */
+ addc retreg,retreg,retreg /* shift retreg with/into carry */
+ ds temp,arg1,temp /* 23rd divide step */
+ addc retreg,retreg,retreg /* shift retreg with/into carry */
+ ds temp,arg1,temp /* 24th divide step */
+ addc retreg,retreg,retreg /* shift retreg with/into carry */
+ ds temp,arg1,temp /* 25th divide step */
+ addc retreg,retreg,retreg /* shift retreg with/into carry */
+ ds temp,arg1,temp /* 26th divide step */
+ addc retreg,retreg,retreg /* shift retreg with/into carry */
+ ds temp,arg1,temp /* 27th divide step */
+ addc retreg,retreg,retreg /* shift retreg with/into carry */
+ ds temp,arg1,temp /* 28th divide step */
+ addc retreg,retreg,retreg /* shift retreg with/into carry */
+ ds temp,arg1,temp /* 29th divide step */
+ addc retreg,retreg,retreg /* shift retreg with/into carry */
+ ds temp,arg1,temp /* 30th divide step */
+ addc retreg,retreg,retreg /* shift retreg with/into carry */
+ ds temp,arg1,temp /* 31st divide step */
+ addc retreg,retreg,retreg /* shift retreg with/into carry */
+ ds temp,arg1,temp /* 32nd divide step, */
+ addc retreg,retreg,retreg /* shift last retreg bit into retreg */
+ xor,>= arg0,arg1,0 /* get correct sign of quotient */
+ sub 0,retreg,retreg /* based on operand signs */
+ MILLIRETN
+ nop
+
+LSYM(small_divisor)
+
+#if defined(pa64)
+/* Clear the upper 32 bits of the arg1 register. We are working with */
+/* small divisors (and 32-bit integers) We must not be mislead */
+/* by "1" bits left in the upper 32 bits. */
+ depd %r0,31,32,%r25
+#endif
+ blr,n arg1,r0
+ nop
+/* table for divisor == 0,1, ... ,15 */
+ addit,= 0,arg1,r0 /* trap if divisor == 0 */
+ nop
+ MILLIRET /* divisor == 1 */
+ copy arg0,retreg
+ MILLI_BEN($$divI_2) /* divisor == 2 */
+ nop
+ MILLI_BEN($$divI_3) /* divisor == 3 */
+ nop
+ MILLI_BEN($$divI_4) /* divisor == 4 */
+ nop
+ MILLI_BEN($$divI_5) /* divisor == 5 */
+ nop
+ MILLI_BEN($$divI_6) /* divisor == 6 */
+ nop
+ MILLI_BEN($$divI_7) /* divisor == 7 */
+ nop
+ MILLI_BEN($$divI_8) /* divisor == 8 */
+ nop
+ MILLI_BEN($$divI_9) /* divisor == 9 */
+ nop
+ MILLI_BEN($$divI_10) /* divisor == 10 */
+ nop
+ b LREF(normal) /* divisor == 11 */
+ add,>= 0,arg0,retreg
+ MILLI_BEN($$divI_12) /* divisor == 12 */
+ nop
+ b LREF(normal) /* divisor == 13 */
+ add,>= 0,arg0,retreg
+ MILLI_BEN($$divI_14) /* divisor == 14 */
+ nop
+ MILLI_BEN($$divI_15) /* divisor == 15 */
+ nop
+
+LSYM(negative1)
+ sub 0,arg0,retreg /* result is negation of dividend */
+ MILLIRET
+ addo arg0,arg1,r0 /* trap iff dividend==0x80000000 && divisor==-1 */
+ .exit
+ .procend
+ .end
+#endif
+
+#ifdef L_divU
+/* ROUTINE: $$divU
+ .
+ . Single precision divide for unsigned integers.
+ .
+ . Quotient is truncated towards zero.
+ . Traps on divide by zero.
+
+ INPUT REGISTERS:
+ . arg0 == dividend
+ . arg1 == divisor
+ . mrp == return pc
+ . sr0 == return space when called externally
+
+ OUTPUT REGISTERS:
+ . arg0 = undefined
+ . arg1 = undefined
+ . ret1 = quotient
+
+ OTHER REGISTERS AFFECTED:
+ . r1 = undefined
+
+ SIDE EFFECTS:
+ . Causes a trap under the following conditions:
+ . divisor is zero
+ . Changes memory at the following places:
+ . NONE
+
+ PERMISSIBLE CONTEXT:
+ . Unwindable.
+ . Does not create a stack frame.
+ . Suitable for internal or external millicode.
+ . Assumes the special millicode register conventions.
+
+ DISCUSSION:
+ . Branchs to other millicode routines using BE:
+ . $$divU_# for 3,5,6,7,9,10,12,14,15
+ .
+ . For selected small divisors calls the special divide by constant
+ . routines written by Karl Pettis. These are: 3,5,6,7,9,10,12,14,15. */
+
+RDEFINE(temp,r1)
+RDEFINE(retreg,ret1) /* r29 */
+RDEFINE(temp1,arg0)
+ SUBSPA_MILLI_DIV
+ ATTR_MILLI
+ .export $$divU,millicode
+ .import $$divU_3,millicode
+ .import $$divU_5,millicode
+ .import $$divU_6,millicode
+ .import $$divU_7,millicode
+ .import $$divU_9,millicode
+ .import $$divU_10,millicode
+ .import $$divU_12,millicode
+ .import $$divU_14,millicode
+ .import $$divU_15,millicode
+ .proc
+ .callinfo millicode
+ .entry
+GSYM($$divU)
+/* The subtract is not nullified since it does no harm and can be used
+ by the two cases that branch back to "normal". */
+ ldo -1(arg1),temp /* is there at most one bit set ? */
+ and,= arg1,temp,r0 /* if so, denominator is power of 2 */
+ b LREF(regular_seq)
+ addit,= 0,arg1,0 /* trap for zero dvr */
+ copy arg0,retreg
+ extru,= arg1,15,16,temp /* test denominator with 0xffff0000 */
+ extru retreg,15,16,retreg /* retreg = retreg >> 16 */
+ or arg1,temp,arg1 /* arg1 = arg1 | (arg1 >> 16) */
+ ldi 0xcc,temp1 /* setup 0xcc in temp1 */
+ extru,= arg1,23,8,temp /* test denominator with 0xff00 */
+ extru retreg,23,24,retreg /* retreg = retreg >> 8 */
+ or arg1,temp,arg1 /* arg1 = arg1 | (arg1 >> 8) */
+ ldi 0xaa,temp /* setup 0xaa in temp */
+ extru,= arg1,27,4,r0 /* test denominator with 0xf0 */
+ extru retreg,27,28,retreg /* retreg = retreg >> 4 */
+ and,= arg1,temp1,r0 /* test denominator with 0xcc */
+ extru retreg,29,30,retreg /* retreg = retreg >> 2 */
+ and,= arg1,temp,r0 /* test denominator with 0xaa */
+ extru retreg,30,31,retreg /* retreg = retreg >> 1 */
+ MILLIRETN
+ nop
+LSYM(regular_seq)
+ comib,>= 15,arg1,LREF(special_divisor)
+ subi 0,arg1,temp /* clear carry, negate the divisor */
+ ds r0,temp,r0 /* set V-bit to 1 */
+LSYM(normal)
+ add arg0,arg0,retreg /* shift msb bit into carry */
+ ds r0,arg1,temp /* 1st divide step, if no carry */
+ addc retreg,retreg,retreg /* shift retreg with/into carry */
+ ds temp,arg1,temp /* 2nd divide step */
+ addc retreg,retreg,retreg /* shift retreg with/into carry */
+ ds temp,arg1,temp /* 3rd divide step */
+ addc retreg,retreg,retreg /* shift retreg with/into carry */
+ ds temp,arg1,temp /* 4th divide step */
+ addc retreg,retreg,retreg /* shift retreg with/into carry */
+ ds temp,arg1,temp /* 5th divide step */
+ addc retreg,retreg,retreg /* shift retreg with/into carry */
+ ds temp,arg1,temp /* 6th divide step */
+ addc retreg,retreg,retreg /* shift retreg with/into carry */
+ ds temp,arg1,temp /* 7th divide step */
+ addc retreg,retreg,retreg /* shift retreg with/into carry */
+ ds temp,arg1,temp /* 8th divide step */
+ addc retreg,retreg,retreg /* shift retreg with/into carry */
+ ds temp,arg1,temp /* 9th divide step */
+ addc retreg,retreg,retreg /* shift retreg with/into carry */
+ ds temp,arg1,temp /* 10th divide step */
+ addc retreg,retreg,retreg /* shift retreg with/into carry */
+ ds temp,arg1,temp /* 11th divide step */
+ addc retreg,retreg,retreg /* shift retreg with/into carry */
+ ds temp,arg1,temp /* 12th divide step */
+ addc retreg,retreg,retreg /* shift retreg with/into carry */
+ ds temp,arg1,temp /* 13th divide step */
+ addc retreg,retreg,retreg /* shift retreg with/into carry */
+ ds temp,arg1,temp /* 14th divide step */
+ addc retreg,retreg,retreg /* shift retreg with/into carry */
+ ds temp,arg1,temp /* 15th divide step */
+ addc retreg,retreg,retreg /* shift retreg with/into carry */
+ ds temp,arg1,temp /* 16th divide step */
+ addc retreg,retreg,retreg /* shift retreg with/into carry */
+ ds temp,arg1,temp /* 17th divide step */
+ addc retreg,retreg,retreg /* shift retreg with/into carry */
+ ds temp,arg1,temp /* 18th divide step */
+ addc retreg,retreg,retreg /* shift retreg with/into carry */
+ ds temp,arg1,temp /* 19th divide step */
+ addc retreg,retreg,retreg /* shift retreg with/into carry */
+ ds temp,arg1,temp /* 20th divide step */
+ addc retreg,retreg,retreg /* shift retreg with/into carry */
+ ds temp,arg1,temp /* 21st divide step */
+ addc retreg,retreg,retreg /* shift retreg with/into carry */
+ ds temp,arg1,temp /* 22nd divide step */
+ addc retreg,retreg,retreg /* shift retreg with/into carry */
+ ds temp,arg1,temp /* 23rd divide step */
+ addc retreg,retreg,retreg /* shift retreg with/into carry */
+ ds temp,arg1,temp /* 24th divide step */
+ addc retreg,retreg,retreg /* shift retreg with/into carry */
+ ds temp,arg1,temp /* 25th divide step */
+ addc retreg,retreg,retreg /* shift retreg with/into carry */
+ ds temp,arg1,temp /* 26th divide step */
+ addc retreg,retreg,retreg /* shift retreg with/into carry */
+ ds temp,arg1,temp /* 27th divide step */
+ addc retreg,retreg,retreg /* shift retreg with/into carry */
+ ds temp,arg1,temp /* 28th divide step */
+ addc retreg,retreg,retreg /* shift retreg with/into carry */
+ ds temp,arg1,temp /* 29th divide step */
+ addc retreg,retreg,retreg /* shift retreg with/into carry */
+ ds temp,arg1,temp /* 30th divide step */
+ addc retreg,retreg,retreg /* shift retreg with/into carry */
+ ds temp,arg1,temp /* 31st divide step */
+ addc retreg,retreg,retreg /* shift retreg with/into carry */
+ ds temp,arg1,temp /* 32nd divide step, */
+ MILLIRET
+ addc retreg,retreg,retreg /* shift last retreg bit into retreg */
+
+/* Handle the cases where divisor is a small constant or has high bit on. */
+LSYM(special_divisor)
+/* blr arg1,r0 */
+/* comib,>,n 0,arg1,LREF(big_divisor) ; nullify previous instruction */
+
+/* Pratap 8/13/90. The 815 Stirling chip set has a bug that prevents us from
+ generating such a blr, comib sequence. A problem in nullification. So I
+ rewrote this code. */
+
+#if defined(pa64)
+/* Clear the upper 32 bits of the arg1 register. We are working with
+ small divisors (and 32-bit unsigned integers) We must not be mislead
+ by "1" bits left in the upper 32 bits. */
+ depd %r0,31,32,%r25
+#endif
+ comib,> 0,arg1,LREF(big_divisor)
+ nop
+ blr arg1,r0
+ nop
+
+LSYM(zero_divisor) /* this label is here to provide external visibility */
+ addit,= 0,arg1,0 /* trap for zero dvr */
+ nop
+ MILLIRET /* divisor == 1 */
+ copy arg0,retreg
+ MILLIRET /* divisor == 2 */
+ extru arg0,30,31,retreg
+ MILLI_BEN($$divU_3) /* divisor == 3 */
+ nop
+ MILLIRET /* divisor == 4 */
+ extru arg0,29,30,retreg
+ MILLI_BEN($$divU_5) /* divisor == 5 */
+ nop
+ MILLI_BEN($$divU_6) /* divisor == 6 */
+ nop
+ MILLI_BEN($$divU_7) /* divisor == 7 */
+ nop
+ MILLIRET /* divisor == 8 */
+ extru arg0,28,29,retreg
+ MILLI_BEN($$divU_9) /* divisor == 9 */
+ nop
+ MILLI_BEN($$divU_10) /* divisor == 10 */
+ nop
+ b LREF(normal) /* divisor == 11 */
+ ds r0,temp,r0 /* set V-bit to 1 */
+ MILLI_BEN($$divU_12) /* divisor == 12 */
+ nop
+ b LREF(normal) /* divisor == 13 */
+ ds r0,temp,r0 /* set V-bit to 1 */
+ MILLI_BEN($$divU_14) /* divisor == 14 */
+ nop
+ MILLI_BEN($$divU_15) /* divisor == 15 */
+ nop
+
+/* Handle the case where the high bit is on in the divisor.
+ Compute: if( dividend>=divisor) quotient=1; else quotient=0;
+ Note: dividend>==divisor iff dividend-divisor does not borrow
+ and not borrow iff carry. */
+LSYM(big_divisor)
+ sub arg0,arg1,r0
+ MILLIRET
+ addc r0,r0,retreg
+ .exit
+ .procend
+ .end
+#endif
+
+#ifdef L_remI
+/* ROUTINE: $$remI
+
+ DESCRIPTION:
+ . $$remI returns the remainder of the division of two signed 32-bit
+ . integers. The sign of the remainder is the same as the sign of
+ . the dividend.
+
+
+ INPUT REGISTERS:
+ . arg0 == dividend
+ . arg1 == divisor
+ . mrp == return pc
+ . sr0 == return space when called externally
+
+ OUTPUT REGISTERS:
+ . arg0 = destroyed
+ . arg1 = destroyed
+ . ret1 = remainder
+
+ OTHER REGISTERS AFFECTED:
+ . r1 = undefined
+
+ SIDE EFFECTS:
+ . Causes a trap under the following conditions: DIVIDE BY ZERO
+ . Changes memory at the following places: NONE
+
+ PERMISSIBLE CONTEXT:
+ . Unwindable
+ . Does not create a stack frame
+ . Is usable for internal or external microcode
+
+ DISCUSSION:
+ . Calls other millicode routines via mrp: NONE
+ . Calls other millicode routines: NONE */
+
+RDEFINE(tmp,r1)
+RDEFINE(retreg,ret1)
+
+ SUBSPA_MILLI
+ ATTR_MILLI
+ .proc
+ .callinfo millicode
+ .entry
+GSYM($$remI)
+GSYM($$remoI)
+ .export $$remI,MILLICODE
+ .export $$remoI,MILLICODE
+ ldo -1(arg1),tmp /* is there at most one bit set ? */
+ and,<> arg1,tmp,r0 /* if not, don't use power of 2 */
+ addi,> 0,arg1,r0 /* if denominator > 0, use power */
+ /* of 2 */
+ b,n LREF(neg_denom)
+LSYM(pow2)
+ comb,>,n 0,arg0,LREF(neg_num) /* is numerator < 0 ? */
+ and arg0,tmp,retreg /* get the result */
+ MILLIRETN
+LSYM(neg_num)
+ subi 0,arg0,arg0 /* negate numerator */
+ and arg0,tmp,retreg /* get the result */
+ subi 0,retreg,retreg /* negate result */
+ MILLIRETN
+LSYM(neg_denom)
+ addi,< 0,arg1,r0 /* if arg1 >= 0, it's not power */
+ /* of 2 */
+ b,n LREF(regular_seq)
+ sub r0,arg1,tmp /* make denominator positive */
+ comb,=,n arg1,tmp,LREF(regular_seq) /* test against 0x80000000 and 0 */
+ ldo -1(tmp),retreg /* is there at most one bit set ? */
+ and,= tmp,retreg,r0 /* if not, go to regular_seq */
+ b,n LREF(regular_seq)
+ comb,>,n 0,arg0,LREF(neg_num_2) /* if arg0 < 0, negate it */
+ and arg0,retreg,retreg
+ MILLIRETN
+LSYM(neg_num_2)
+ subi 0,arg0,tmp /* test against 0x80000000 */
+ and tmp,retreg,retreg
+ subi 0,retreg,retreg
+ MILLIRETN
+LSYM(regular_seq)
+ addit,= 0,arg1,0 /* trap if div by zero */
+ add,>= 0,arg0,retreg /* move dividend, if retreg < 0, */
+ sub 0,retreg,retreg /* make it positive */
+ sub 0,arg1, tmp /* clear carry, */
+ /* negate the divisor */
+ ds 0, tmp,0 /* set V-bit to the comple- */
+ /* ment of the divisor sign */
+ or 0,0, tmp /* clear tmp */
+ add retreg,retreg,retreg /* shift msb bit into carry */
+ ds tmp,arg1, tmp /* 1st divide step, if no carry */
+ /* out, msb of quotient = 0 */
+ addc retreg,retreg,retreg /* shift retreg with/into carry */
+LSYM(t1)
+ ds tmp,arg1, tmp /* 2nd divide step */
+ addc retreg,retreg,retreg /* shift retreg with/into carry */
+ ds tmp,arg1, tmp /* 3rd divide step */
+ addc retreg,retreg,retreg /* shift retreg with/into carry */
+ ds tmp,arg1, tmp /* 4th divide step */
+ addc retreg,retreg,retreg /* shift retreg with/into carry */
+ ds tmp,arg1, tmp /* 5th divide step */
+ addc retreg,retreg,retreg /* shift retreg with/into carry */
+ ds tmp,arg1, tmp /* 6th divide step */
+ addc retreg,retreg,retreg /* shift retreg with/into carry */
+ ds tmp,arg1, tmp /* 7th divide step */
+ addc retreg,retreg,retreg /* shift retreg with/into carry */
+ ds tmp,arg1, tmp /* 8th divide step */
+ addc retreg,retreg,retreg /* shift retreg with/into carry */
+ ds tmp,arg1, tmp /* 9th divide step */
+ addc retreg,retreg,retreg /* shift retreg with/into carry */
+ ds tmp,arg1, tmp /* 10th divide step */
+ addc retreg,retreg,retreg /* shift retreg with/into carry */
+ ds tmp,arg1, tmp /* 11th divide step */
+ addc retreg,retreg,retreg /* shift retreg with/into carry */
+ ds tmp,arg1, tmp /* 12th divide step */
+ addc retreg,retreg,retreg /* shift retreg with/into carry */
+ ds tmp,arg1, tmp /* 13th divide step */
+ addc retreg,retreg,retreg /* shift retreg with/into carry */
+ ds tmp,arg1, tmp /* 14th divide step */
+ addc retreg,retreg,retreg /* shift retreg with/into carry */
+ ds tmp,arg1, tmp /* 15th divide step */
+ addc retreg,retreg,retreg /* shift retreg with/into carry */
+ ds tmp,arg1, tmp /* 16th divide step */
+ addc retreg,retreg,retreg /* shift retreg with/into carry */
+ ds tmp,arg1, tmp /* 17th divide step */
+ addc retreg,retreg,retreg /* shift retreg with/into carry */
+ ds tmp,arg1, tmp /* 18th divide step */
+ addc retreg,retreg,retreg /* shift retreg with/into carry */
+ ds tmp,arg1, tmp /* 19th divide step */
+ addc retreg,retreg,retreg /* shift retreg with/into carry */
+ ds tmp,arg1, tmp /* 20th divide step */
+ addc retreg,retreg,retreg /* shift retreg with/into carry */
+ ds tmp,arg1, tmp /* 21st divide step */
+ addc retreg,retreg,retreg /* shift retreg with/into carry */
+ ds tmp,arg1, tmp /* 22nd divide step */
+ addc retreg,retreg,retreg /* shift retreg with/into carry */
+ ds tmp,arg1, tmp /* 23rd divide step */
+ addc retreg,retreg,retreg /* shift retreg with/into carry */
+ ds tmp,arg1, tmp /* 24th divide step */
+ addc retreg,retreg,retreg /* shift retreg with/into carry */
+ ds tmp,arg1, tmp /* 25th divide step */
+ addc retreg,retreg,retreg /* shift retreg with/into carry */
+ ds tmp,arg1, tmp /* 26th divide step */
+ addc retreg,retreg,retreg /* shift retreg with/into carry */
+ ds tmp,arg1, tmp /* 27th divide step */
+ addc retreg,retreg,retreg /* shift retreg with/into carry */
+ ds tmp,arg1, tmp /* 28th divide step */
+ addc retreg,retreg,retreg /* shift retreg with/into carry */
+ ds tmp,arg1, tmp /* 29th divide step */
+ addc retreg,retreg,retreg /* shift retreg with/into carry */
+ ds tmp,arg1, tmp /* 30th divide step */
+ addc retreg,retreg,retreg /* shift retreg with/into carry */
+ ds tmp,arg1, tmp /* 31st divide step */
+ addc retreg,retreg,retreg /* shift retreg with/into carry */
+ ds tmp,arg1, tmp /* 32nd divide step, */
+ addc retreg,retreg,retreg /* shift last bit into retreg */
+ movb,>=,n tmp,retreg,LREF(finish) /* branch if pos. tmp */
+ add,< arg1,0,0 /* if arg1 > 0, add arg1 */
+ add,tr tmp,arg1,retreg /* for correcting remainder tmp */
+ sub tmp,arg1,retreg /* else add absolute value arg1 */
+LSYM(finish)
+ add,>= arg0,0,0 /* set sign of remainder */
+ sub 0,retreg,retreg /* to sign of dividend */
+ MILLIRET
+ nop
+ .exit
+ .procend
+#ifdef milliext
+ .origin 0x00000200
+#endif
+ .end
+#endif
+
+#ifdef L_remU
+/* ROUTINE: $$remU
+ . Single precision divide for remainder with unsigned binary integers.
+ .
+ . The remainder must be dividend-(dividend/divisor)*divisor.
+ . Divide by zero is trapped.
+
+ INPUT REGISTERS:
+ . arg0 == dividend
+ . arg1 == divisor
+ . mrp == return pc
+ . sr0 == return space when called externally
+
+ OUTPUT REGISTERS:
+ . arg0 = undefined
+ . arg1 = undefined
+ . ret1 = remainder
+
+ OTHER REGISTERS AFFECTED:
+ . r1 = undefined
+
+ SIDE EFFECTS:
+ . Causes a trap under the following conditions: DIVIDE BY ZERO
+ . Changes memory at the following places: NONE
+
+ PERMISSIBLE CONTEXT:
+ . Unwindable.
+ . Does not create a stack frame.
+ . Suitable for internal or external millicode.
+ . Assumes the special millicode register conventions.
+
+ DISCUSSION:
+ . Calls other millicode routines using mrp: NONE
+ . Calls other millicode routines: NONE */
+
+
+RDEFINE(temp,r1)
+RDEFINE(rmndr,ret1) /* r29 */
+ SUBSPA_MILLI
+ ATTR_MILLI
+ .export $$remU,millicode
+ .proc
+ .callinfo millicode
+ .entry
+GSYM($$remU)
+ ldo -1(arg1),temp /* is there at most one bit set ? */
+ and,= arg1,temp,r0 /* if not, don't use power of 2 */
+ b LREF(regular_seq)
+ addit,= 0,arg1,r0 /* trap on div by zero */
+ and arg0,temp,rmndr /* get the result for power of 2 */
+ MILLIRETN
+LSYM(regular_seq)
+ comib,>=,n 0,arg1,LREF(special_case)
+ subi 0,arg1,rmndr /* clear carry, negate the divisor */
+ ds r0,rmndr,r0 /* set V-bit to 1 */
+ add arg0,arg0,temp /* shift msb bit into carry */
+ ds r0,arg1,rmndr /* 1st divide step, if no carry */
+ addc temp,temp,temp /* shift temp with/into carry */
+ ds rmndr,arg1,rmndr /* 2nd divide step */
+ addc temp,temp,temp /* shift temp with/into carry */
+ ds rmndr,arg1,rmndr /* 3rd divide step */
+ addc temp,temp,temp /* shift temp with/into carry */
+ ds rmndr,arg1,rmndr /* 4th divide step */
+ addc temp,temp,temp /* shift temp with/into carry */
+ ds rmndr,arg1,rmndr /* 5th divide step */
+ addc temp,temp,temp /* shift temp with/into carry */
+ ds rmndr,arg1,rmndr /* 6th divide step */
+ addc temp,temp,temp /* shift temp with/into carry */
+ ds rmndr,arg1,rmndr /* 7th divide step */
+ addc temp,temp,temp /* shift temp with/into carry */
+ ds rmndr,arg1,rmndr /* 8th divide step */
+ addc temp,temp,temp /* shift temp with/into carry */
+ ds rmndr,arg1,rmndr /* 9th divide step */
+ addc temp,temp,temp /* shift temp with/into carry */
+ ds rmndr,arg1,rmndr /* 10th divide step */
+ addc temp,temp,temp /* shift temp with/into carry */
+ ds rmndr,arg1,rmndr /* 11th divide step */
+ addc temp,temp,temp /* shift temp with/into carry */
+ ds rmndr,arg1,rmndr /* 12th divide step */
+ addc temp,temp,temp /* shift temp with/into carry */
+ ds rmndr,arg1,rmndr /* 13th divide step */
+ addc temp,temp,temp /* shift temp with/into carry */
+ ds rmndr,arg1,rmndr /* 14th divide step */
+ addc temp,temp,temp /* shift temp with/into carry */
+ ds rmndr,arg1,rmndr /* 15th divide step */
+ addc temp,temp,temp /* shift temp with/into carry */
+ ds rmndr,arg1,rmndr /* 16th divide step */
+ addc temp,temp,temp /* shift temp with/into carry */
+ ds rmndr,arg1,rmndr /* 17th divide step */
+ addc temp,temp,temp /* shift temp with/into carry */
+ ds rmndr,arg1,rmndr /* 18th divide step */
+ addc temp,temp,temp /* shift temp with/into carry */
+ ds rmndr,arg1,rmndr /* 19th divide step */
+ addc temp,temp,temp /* shift temp with/into carry */
+ ds rmndr,arg1,rmndr /* 20th divide step */
+ addc temp,temp,temp /* shift temp with/into carry */
+ ds rmndr,arg1,rmndr /* 21st divide step */
+ addc temp,temp,temp /* shift temp with/into carry */
+ ds rmndr,arg1,rmndr /* 22nd divide step */
+ addc temp,temp,temp /* shift temp with/into carry */
+ ds rmndr,arg1,rmndr /* 23rd divide step */
+ addc temp,temp,temp /* shift temp with/into carry */
+ ds rmndr,arg1,rmndr /* 24th divide step */
+ addc temp,temp,temp /* shift temp with/into carry */
+ ds rmndr,arg1,rmndr /* 25th divide step */
+ addc temp,temp,temp /* shift temp with/into carry */
+ ds rmndr,arg1,rmndr /* 26th divide step */
+ addc temp,temp,temp /* shift temp with/into carry */
+ ds rmndr,arg1,rmndr /* 27th divide step */
+ addc temp,temp,temp /* shift temp with/into carry */
+ ds rmndr,arg1,rmndr /* 28th divide step */
+ addc temp,temp,temp /* shift temp with/into carry */
+ ds rmndr,arg1,rmndr /* 29th divide step */
+ addc temp,temp,temp /* shift temp with/into carry */
+ ds rmndr,arg1,rmndr /* 30th divide step */
+ addc temp,temp,temp /* shift temp with/into carry */
+ ds rmndr,arg1,rmndr /* 31st divide step */
+ addc temp,temp,temp /* shift temp with/into carry */
+ ds rmndr,arg1,rmndr /* 32nd divide step, */
+ comiclr,<= 0,rmndr,r0
+ add rmndr,arg1,rmndr /* correction */
+ MILLIRETN
+ nop
+
+/* Putting >= on the last DS and deleting COMICLR does not work! */
+LSYM(special_case)
+ sub,>>= arg0,arg1,rmndr
+ copy arg0,rmndr
+ MILLIRETN
+ nop
+ .exit
+ .procend
+ .end
+#endif
+
+#ifdef L_div_const
+/* ROUTINE: $$divI_2
+ . $$divI_3 $$divU_3
+ . $$divI_4
+ . $$divI_5 $$divU_5
+ . $$divI_6 $$divU_6
+ . $$divI_7 $$divU_7
+ . $$divI_8
+ . $$divI_9 $$divU_9
+ . $$divI_10 $$divU_10
+ .
+ . $$divI_12 $$divU_12
+ .
+ . $$divI_14 $$divU_14
+ . $$divI_15 $$divU_15
+ . $$divI_16
+ . $$divI_17 $$divU_17
+ .
+ . Divide by selected constants for single precision binary integers.
+
+ INPUT REGISTERS:
+ . arg0 == dividend
+ . mrp == return pc
+ . sr0 == return space when called externally
+
+ OUTPUT REGISTERS:
+ . arg0 = undefined
+ . arg1 = undefined
+ . ret1 = quotient
+
+ OTHER REGISTERS AFFECTED:
+ . r1 = undefined
+
+ SIDE EFFECTS:
+ . Causes a trap under the following conditions: NONE
+ . Changes memory at the following places: NONE
+
+ PERMISSIBLE CONTEXT:
+ . Unwindable.
+ . Does not create a stack frame.
+ . Suitable for internal or external millicode.
+ . Assumes the special millicode register conventions.
+
+ DISCUSSION:
+ . Calls other millicode routines using mrp: NONE
+ . Calls other millicode routines: NONE */
+
+
+/* TRUNCATED DIVISION BY SMALL INTEGERS
+
+ We are interested in q(x) = floor(x/y), where x >= 0 and y > 0
+ (with y fixed).
+
+ Let a = floor(z/y), for some choice of z. Note that z will be
+ chosen so that division by z is cheap.
+
+ Let r be the remainder(z/y). In other words, r = z - ay.
+
+ Now, our method is to choose a value for b such that
+
+ q'(x) = floor((ax+b)/z)
+
+ is equal to q(x) over as large a range of x as possible. If the
+ two are equal over a sufficiently large range, and if it is easy to
+ form the product (ax), and it is easy to divide by z, then we can
+ perform the division much faster than the general division algorithm.
+
+ So, we want the following to be true:
+
+ . For x in the following range:
+ .
+ . ky <= x < (k+1)y
+ .
+ . implies that
+ .
+ . k <= (ax+b)/z < (k+1)
+
+ We want to determine b such that this is true for all k in the
+ range {0..K} for some maximum K.
+
+ Since (ax+b) is an increasing function of x, we can take each
+ bound separately to determine the "best" value for b.
+
+ (ax+b)/z < (k+1) implies
+
+ (a((k+1)y-1)+b < (k+1)z implies
+
+ b < a + (k+1)(z-ay) implies
+
+ b < a + (k+1)r
+
+ This needs to be true for all k in the range {0..K}. In
+ particular, it is true for k = 0 and this leads to a maximum
+ acceptable value for b.
+
+ b < a+r or b <= a+r-1
+
+ Taking the other bound, we have
+
+ k <= (ax+b)/z implies
+
+ k <= (aky+b)/z implies
+
+ k(z-ay) <= b implies
+
+ kr <= b
+
+ Clearly, the largest range for k will be achieved by maximizing b,
+ when r is not zero. When r is zero, then the simplest choice for b
+ is 0. When r is not 0, set
+
+ . b = a+r-1
+
+ Now, by construction, q'(x) = floor((ax+b)/z) = q(x) = floor(x/y)
+ for all x in the range:
+
+ . 0 <= x < (K+1)y
+
+ We need to determine what K is. Of our two bounds,
+
+ . b < a+(k+1)r is satisfied for all k >= 0, by construction.
+
+ The other bound is
+
+ . kr <= b
+
+ This is always true if r = 0. If r is not 0 (the usual case), then
+ K = floor((a+r-1)/r), is the maximum value for k.
+
+ Therefore, the formula q'(x) = floor((ax+b)/z) yields the correct
+ answer for q(x) = floor(x/y) when x is in the range
+
+ (0,(K+1)y-1) K = floor((a+r-1)/r)
+
+ To be most useful, we want (K+1)y-1 = (max x) >= 2**32-1 so that
+ the formula for q'(x) yields the correct value of q(x) for all x
+ representable by a single word in HPPA.
+
+ We are also constrained in that computing the product (ax), adding
+ b, and dividing by z must all be done quickly, otherwise we will be
+ better off going through the general algorithm using the DS
+ instruction, which uses approximately 70 cycles.
+
+ For each y, there is a choice of z which satisfies the constraints
+ for (K+1)y >= 2**32. We may not, however, be able to satisfy the
+ timing constraints for arbitrary y. It seems that z being equal to
+ a power of 2 or a power of 2 minus 1 is as good as we can do, since
+ it minimizes the time to do division by z. We want the choice of z
+ to also result in a value for (a) that minimizes the computation of
+ the product (ax). This is best achieved if (a) has a regular bit
+ pattern (so the multiplication can be done with shifts and adds).
+ The value of (a) also needs to be less than 2**32 so the product is
+ always guaranteed to fit in 2 words.
+
+ In actual practice, the following should be done:
+
+ 1) For negative x, you should take the absolute value and remember
+ . the fact so that the result can be negated. This obviously does
+ . not apply in the unsigned case.
+ 2) For even y, you should factor out the power of 2 that divides y
+ . and divide x by it. You can then proceed by dividing by the
+ . odd factor of y.
+
+ Here is a table of some odd values of y, and corresponding choices
+ for z which are "good".
+
+ y z r a (hex) max x (hex)
+
+ 3 2**32 1 55555555 100000001
+ 5 2**32 1 33333333 100000003
+ 7 2**24-1 0 249249 (infinite)
+ 9 2**24-1 0 1c71c7 (infinite)
+ 11 2**20-1 0 1745d (infinite)
+ 13 2**24-1 0 13b13b (infinite)
+ 15 2**32 1 11111111 10000000d
+ 17 2**32 1 f0f0f0f 10000000f
+
+ If r is 1, then b = a+r-1 = a. This simplifies the computation
+ of (ax+b), since you can compute (x+1)(a) instead. If r is 0,
+ then b = 0 is ok to use which simplifies (ax+b).
+
+ The bit patterns for 55555555, 33333333, and 11111111 are obviously
+ very regular. The bit patterns for the other values of a above are:
+
+ y (hex) (binary)
+
+ 7 249249 001001001001001001001001 << regular >>
+ 9 1c71c7 000111000111000111000111 << regular >>
+ 11 1745d 000000010111010001011101 << irregular >>
+ 13 13b13b 000100111011000100111011 << irregular >>
+
+ The bit patterns for (a) corresponding to (y) of 11 and 13 may be
+ too irregular to warrant using this method.
+
+ When z is a power of 2 minus 1, then the division by z is slightly
+ more complicated, involving an iterative solution.
+
+ The code presented here solves division by 1 through 17, except for
+ 11 and 13. There are algorithms for both signed and unsigned
+ quantities given.
+
+ TIMINGS (cycles)
+
+ divisor positive negative unsigned
+
+ . 1 2 2 2
+ . 2 4 4 2
+ . 3 19 21 19
+ . 4 4 4 2
+ . 5 18 22 19
+ . 6 19 22 19
+ . 8 4 4 2
+ . 10 18 19 17
+ . 12 18 20 18
+ . 15 16 18 16
+ . 16 4 4 2
+ . 17 16 18 16
+
+ Now, the algorithm for 7, 9, and 14 is an iterative one. That is,
+ a loop body is executed until the tentative quotient is 0. The
+ number of times the loop body is executed varies depending on the
+ dividend, but is never more than two times. If the dividend is
+ less than the divisor, then the loop body is not executed at all.
+ Each iteration adds 4 cycles to the timings.
+
+ divisor positive negative unsigned
+
+ . 7 19+4n 20+4n 20+4n n = number of iterations
+ . 9 21+4n 22+4n 21+4n
+ . 14 21+4n 22+4n 20+4n
+
+ To give an idea of how the number of iterations varies, here is a
+ table of dividend versus number of iterations when dividing by 7.
+
+ smallest largest required
+ dividend dividend iterations
+
+ . 0 6 0
+ . 7 0x6ffffff 1
+ 0x1000006 0xffffffff 2
+
+ There is some overlap in the range of numbers requiring 1 and 2
+ iterations. */
+
+RDEFINE(t2,r1)
+RDEFINE(x2,arg0) /* r26 */
+RDEFINE(t1,arg1) /* r25 */
+RDEFINE(x1,ret1) /* r29 */
+
+ SUBSPA_MILLI_DIV
+ ATTR_MILLI
+
+ .proc
+ .callinfo millicode
+ .entry
+/* NONE of these routines require a stack frame
+ ALL of these routines are unwindable from millicode */
+
+GSYM($$divide_by_constant)
+ .export $$divide_by_constant,millicode
+/* Provides a "nice" label for the code covered by the unwind descriptor
+ for things like gprof. */
+
+/* DIVISION BY 2 (shift by 1) */
+GSYM($$divI_2)
+ .export $$divI_2,millicode
+ comclr,>= arg0,0,0
+ addi 1,arg0,arg0
+ MILLIRET
+ extrs arg0,30,31,ret1
+
+
+/* DIVISION BY 4 (shift by 2) */
+GSYM($$divI_4)
+ .export $$divI_4,millicode
+ comclr,>= arg0,0,0
+ addi 3,arg0,arg0
+ MILLIRET
+ extrs arg0,29,30,ret1
+
+
+/* DIVISION BY 8 (shift by 3) */
+GSYM($$divI_8)
+ .export $$divI_8,millicode
+ comclr,>= arg0,0,0
+ addi 7,arg0,arg0
+ MILLIRET
+ extrs arg0,28,29,ret1
+
+/* DIVISION BY 16 (shift by 4) */
+GSYM($$divI_16)
+ .export $$divI_16,millicode
+ comclr,>= arg0,0,0
+ addi 15,arg0,arg0
+ MILLIRET
+ extrs arg0,27,28,ret1
+
+/****************************************************************************
+*
+* DIVISION BY DIVISORS OF FFFFFFFF, and powers of 2 times these
+*
+* includes 3,5,15,17 and also 6,10,12
+*
+****************************************************************************/
+
+/* DIVISION BY 3 (use z = 2**32; a = 55555555) */
+
+GSYM($$divI_3)
+ .export $$divI_3,millicode
+ comb,<,N x2,0,LREF(neg3)
+
+ addi 1,x2,x2 /* this cannot overflow */
+ extru x2,1,2,x1 /* multiply by 5 to get started */
+ sh2add x2,x2,x2
+ b LREF(pos)
+ addc x1,0,x1
+
+LSYM(neg3)
+ subi 1,x2,x2 /* this cannot overflow */
+ extru x2,1,2,x1 /* multiply by 5 to get started */
+ sh2add x2,x2,x2
+ b LREF(neg)
+ addc x1,0,x1
+
+GSYM($$divU_3)
+ .export $$divU_3,millicode
+ addi 1,x2,x2 /* this CAN overflow */
+ addc 0,0,x1
+ shd x1,x2,30,t1 /* multiply by 5 to get started */
+ sh2add x2,x2,x2
+ b LREF(pos)
+ addc x1,t1,x1
+
+/* DIVISION BY 5 (use z = 2**32; a = 33333333) */
+
+GSYM($$divI_5)
+ .export $$divI_5,millicode
+ comb,<,N x2,0,LREF(neg5)
+
+ addi 3,x2,t1 /* this cannot overflow */
+ sh1add x2,t1,x2 /* multiply by 3 to get started */
+ b LREF(pos)
+ addc 0,0,x1
+
+LSYM(neg5)
+ sub 0,x2,x2 /* negate x2 */
+ addi 1,x2,x2 /* this cannot overflow */
+ shd 0,x2,31,x1 /* get top bit (can be 1) */
+ sh1add x2,x2,x2 /* multiply by 3 to get started */
+ b LREF(neg)
+ addc x1,0,x1
+
+GSYM($$divU_5)
+ .export $$divU_5,millicode
+ addi 1,x2,x2 /* this CAN overflow */
+ addc 0,0,x1
+ shd x1,x2,31,t1 /* multiply by 3 to get started */
+ sh1add x2,x2,x2
+ b LREF(pos)
+ addc t1,x1,x1
+
+/* DIVISION BY 6 (shift to divide by 2 then divide by 3) */
+GSYM($$divI_6)
+ .export $$divI_6,millicode
+ comb,<,N x2,0,LREF(neg6)
+ extru x2,30,31,x2 /* divide by 2 */
+ addi 5,x2,t1 /* compute 5*(x2+1) = 5*x2+5 */
+ sh2add x2,t1,x2 /* multiply by 5 to get started */
+ b LREF(pos)
+ addc 0,0,x1
+
+LSYM(neg6)
+ subi 2,x2,x2 /* negate, divide by 2, and add 1 */
+ /* negation and adding 1 are done */
+ /* at the same time by the SUBI */
+ extru x2,30,31,x2
+ shd 0,x2,30,x1
+ sh2add x2,x2,x2 /* multiply by 5 to get started */
+ b LREF(neg)
+ addc x1,0,x1
+
+GSYM($$divU_6)
+ .export $$divU_6,millicode
+ extru x2,30,31,x2 /* divide by 2 */
+ addi 1,x2,x2 /* cannot carry */
+ shd 0,x2,30,x1 /* multiply by 5 to get started */
+ sh2add x2,x2,x2
+ b LREF(pos)
+ addc x1,0,x1
+
+/* DIVISION BY 10 (shift to divide by 2 then divide by 5) */
+GSYM($$divU_10)
+ .export $$divU_10,millicode
+ extru x2,30,31,x2 /* divide by 2 */
+ addi 3,x2,t1 /* compute 3*(x2+1) = (3*x2)+3 */
+ sh1add x2,t1,x2 /* multiply by 3 to get started */
+ addc 0,0,x1
+LSYM(pos)
+ shd x1,x2,28,t1 /* multiply by 0x11 */
+ shd x2,0,28,t2
+ add x2,t2,x2
+ addc x1,t1,x1
+LSYM(pos_for_17)
+ shd x1,x2,24,t1 /* multiply by 0x101 */
+ shd x2,0,24,t2
+ add x2,t2,x2
+ addc x1,t1,x1
+
+ shd x1,x2,16,t1 /* multiply by 0x10001 */
+ shd x2,0,16,t2
+ add x2,t2,x2
+ MILLIRET
+ addc x1,t1,x1
+
+GSYM($$divI_10)
+ .export $$divI_10,millicode
+ comb,< x2,0,LREF(neg10)
+ copy 0,x1
+ extru x2,30,31,x2 /* divide by 2 */
+ addib,TR 1,x2,LREF(pos) /* add 1 (cannot overflow) */
+ sh1add x2,x2,x2 /* multiply by 3 to get started */
+
+LSYM(neg10)
+ subi 2,x2,x2 /* negate, divide by 2, and add 1 */
+ /* negation and adding 1 are done */
+ /* at the same time by the SUBI */
+ extru x2,30,31,x2
+ sh1add x2,x2,x2 /* multiply by 3 to get started */
+LSYM(neg)
+ shd x1,x2,28,t1 /* multiply by 0x11 */
+ shd x2,0,28,t2
+ add x2,t2,x2
+ addc x1,t1,x1
+LSYM(neg_for_17)
+ shd x1,x2,24,t1 /* multiply by 0x101 */
+ shd x2,0,24,t2
+ add x2,t2,x2
+ addc x1,t1,x1
+
+ shd x1,x2,16,t1 /* multiply by 0x10001 */
+ shd x2,0,16,t2
+ add x2,t2,x2
+ addc x1,t1,x1
+ MILLIRET
+ sub 0,x1,x1
+
+/* DIVISION BY 12 (shift to divide by 4 then divide by 3) */
+GSYM($$divI_12)
+ .export $$divI_12,millicode
+ comb,< x2,0,LREF(neg12)
+ copy 0,x1
+ extru x2,29,30,x2 /* divide by 4 */
+ addib,tr 1,x2,LREF(pos) /* compute 5*(x2+1) = 5*x2+5 */
+ sh2add x2,x2,x2 /* multiply by 5 to get started */
+
+LSYM(neg12)
+ subi 4,x2,x2 /* negate, divide by 4, and add 1 */
+ /* negation and adding 1 are done */
+ /* at the same time by the SUBI */
+ extru x2,29,30,x2
+ b LREF(neg)
+ sh2add x2,x2,x2 /* multiply by 5 to get started */
+
+GSYM($$divU_12)
+ .export $$divU_12,millicode
+ extru x2,29,30,x2 /* divide by 4 */
+ addi 5,x2,t1 /* cannot carry */
+ sh2add x2,t1,x2 /* multiply by 5 to get started */
+ b LREF(pos)
+ addc 0,0,x1
+
+/* DIVISION BY 15 (use z = 2**32; a = 11111111) */
+GSYM($$divI_15)
+ .export $$divI_15,millicode
+ comb,< x2,0,LREF(neg15)
+ copy 0,x1
+ addib,tr 1,x2,LREF(pos)+4
+ shd x1,x2,28,t1
+
+LSYM(neg15)
+ b LREF(neg)
+ subi 1,x2,x2
+
+GSYM($$divU_15)
+ .export $$divU_15,millicode
+ addi 1,x2,x2 /* this CAN overflow */
+ b LREF(pos)
+ addc 0,0,x1
+
+/* DIVISION BY 17 (use z = 2**32; a = f0f0f0f) */
+GSYM($$divI_17)
+ .export $$divI_17,millicode
+ comb,<,n x2,0,LREF(neg17)
+ addi 1,x2,x2 /* this cannot overflow */
+ shd 0,x2,28,t1 /* multiply by 0xf to get started */
+ shd x2,0,28,t2
+ sub t2,x2,x2
+ b LREF(pos_for_17)
+ subb t1,0,x1
+
+LSYM(neg17)
+ subi 1,x2,x2 /* this cannot overflow */
+ shd 0,x2,28,t1 /* multiply by 0xf to get started */
+ shd x2,0,28,t2
+ sub t2,x2,x2
+ b LREF(neg_for_17)
+ subb t1,0,x1
+
+GSYM($$divU_17)
+ .export $$divU_17,millicode
+ addi 1,x2,x2 /* this CAN overflow */
+ addc 0,0,x1
+ shd x1,x2,28,t1 /* multiply by 0xf to get started */
+LSYM(u17)
+ shd x2,0,28,t2
+ sub t2,x2,x2
+ b LREF(pos_for_17)
+ subb t1,x1,x1
+
+
+/* DIVISION BY DIVISORS OF FFFFFF, and powers of 2 times these
+ includes 7,9 and also 14
+
+
+ z = 2**24-1
+ r = z mod x = 0
+
+ so choose b = 0
+
+ Also, in order to divide by z = 2**24-1, we approximate by dividing
+ by (z+1) = 2**24 (which is easy), and then correcting.
+
+ (ax) = (z+1)q' + r
+ . = zq' + (q'+r)
+
+ So to compute (ax)/z, compute q' = (ax)/(z+1) and r = (ax) mod (z+1)
+ Then the true remainder of (ax)/z is (q'+r). Repeat the process
+ with this new remainder, adding the tentative quotients together,
+ until a tentative quotient is 0 (and then we are done). There is
+ one last correction to be done. It is possible that (q'+r) = z.
+ If so, then (q'+r)/(z+1) = 0 and it looks like we are done. But,
+ in fact, we need to add 1 more to the quotient. Now, it turns
+ out that this happens if and only if the original value x is
+ an exact multiple of y. So, to avoid a three instruction test at
+ the end, instead use 1 instruction to add 1 to x at the beginning. */
+
+/* DIVISION BY 7 (use z = 2**24-1; a = 249249) */
+GSYM($$divI_7)
+ .export $$divI_7,millicode
+ comb,<,n x2,0,LREF(neg7)
+LSYM(7)
+ addi 1,x2,x2 /* cannot overflow */
+ shd 0,x2,29,x1
+ sh3add x2,x2,x2
+ addc x1,0,x1
+LSYM(pos7)
+ shd x1,x2,26,t1
+ shd x2,0,26,t2
+ add x2,t2,x2
+ addc x1,t1,x1
+
+ shd x1,x2,20,t1
+ shd x2,0,20,t2
+ add x2,t2,x2
+ addc x1,t1,t1
+
+ /* computed <t1,x2>. Now divide it by (2**24 - 1) */
+
+ copy 0,x1
+ shd,= t1,x2,24,t1 /* tentative quotient */
+LSYM(1)
+ addb,tr t1,x1,LREF(2) /* add to previous quotient */
+ extru x2,31,24,x2 /* new remainder (unadjusted) */
+
+ MILLIRETN
+
+LSYM(2)
+ addb,tr t1,x2,LREF(1) /* adjust remainder */
+ extru,= x2,7,8,t1 /* new quotient */
+
+LSYM(neg7)
+ subi 1,x2,x2 /* negate x2 and add 1 */
+LSYM(8)
+ shd 0,x2,29,x1
+ sh3add x2,x2,x2
+ addc x1,0,x1
+
+LSYM(neg7_shift)
+ shd x1,x2,26,t1
+ shd x2,0,26,t2
+ add x2,t2,x2
+ addc x1,t1,x1
+
+ shd x1,x2,20,t1
+ shd x2,0,20,t2
+ add x2,t2,x2
+ addc x1,t1,t1
+
+ /* computed <t1,x2>. Now divide it by (2**24 - 1) */
+
+ copy 0,x1
+ shd,= t1,x2,24,t1 /* tentative quotient */
+LSYM(3)
+ addb,tr t1,x1,LREF(4) /* add to previous quotient */
+ extru x2,31,24,x2 /* new remainder (unadjusted) */
+
+ MILLIRET
+ sub 0,x1,x1 /* negate result */
+
+LSYM(4)
+ addb,tr t1,x2,LREF(3) /* adjust remainder */
+ extru,= x2,7,8,t1 /* new quotient */
+
+GSYM($$divU_7)
+ .export $$divU_7,millicode
+ addi 1,x2,x2 /* can carry */
+ addc 0,0,x1
+ shd x1,x2,29,t1
+ sh3add x2,x2,x2
+ b LREF(pos7)
+ addc t1,x1,x1
+
+/* DIVISION BY 9 (use z = 2**24-1; a = 1c71c7) */
+GSYM($$divI_9)
+ .export $$divI_9,millicode
+ comb,<,n x2,0,LREF(neg9)
+ addi 1,x2,x2 /* cannot overflow */
+ shd 0,x2,29,t1
+ shd x2,0,29,t2
+ sub t2,x2,x2
+ b LREF(pos7)
+ subb t1,0,x1
+
+LSYM(neg9)
+ subi 1,x2,x2 /* negate and add 1 */
+ shd 0,x2,29,t1
+ shd x2,0,29,t2
+ sub t2,x2,x2
+ b LREF(neg7_shift)
+ subb t1,0,x1
+
+GSYM($$divU_9)
+ .export $$divU_9,millicode
+ addi 1,x2,x2 /* can carry */
+ addc 0,0,x1
+ shd x1,x2,29,t1
+ shd x2,0,29,t2
+ sub t2,x2,x2
+ b LREF(pos7)
+ subb t1,x1,x1
+
+/* DIVISION BY 14 (shift to divide by 2 then divide by 7) */
+GSYM($$divI_14)
+ .export $$divI_14,millicode
+ comb,<,n x2,0,LREF(neg14)
+GSYM($$divU_14)
+ .export $$divU_14,millicode
+ b LREF(7) /* go to 7 case */
+ extru x2,30,31,x2 /* divide by 2 */
+
+LSYM(neg14)
+ subi 2,x2,x2 /* negate (and add 2) */
+ b LREF(8)
+ extru x2,30,31,x2 /* divide by 2 */
+ .exit
+ .procend
+ .end
+#endif
+
+#ifdef L_mulI
+/* VERSION "@(#)$$mulI $ Revision: 12.4 $ $ Date: 94/03/17 17:18:51 $" */
+/******************************************************************************
+This routine is used on PA2.0 processors when gcc -mno-fpregs is used
+
+ROUTINE: $$mulI
+
+
+DESCRIPTION:
+
+ $$mulI multiplies two single word integers, giving a single
+ word result.
+
+
+INPUT REGISTERS:
+
+ arg0 = Operand 1
+ arg1 = Operand 2
+ r31 == return pc
+ sr0 == return space when called externally
+
+
+OUTPUT REGISTERS:
+
+ arg0 = undefined
+ arg1 = undefined
+ ret1 = result
+
+OTHER REGISTERS AFFECTED:
+
+ r1 = undefined
+
+SIDE EFFECTS:
+
+ Causes a trap under the following conditions: NONE
+ Changes memory at the following places: NONE
+
+PERMISSIBLE CONTEXT:
+
+ Unwindable
+ Does not create a stack frame
+ Is usable for internal or external microcode
+
+DISCUSSION:
+
+ Calls other millicode routines via mrp: NONE
+ Calls other millicode routines: NONE
+
+***************************************************************************/
+
+
+#define a0 %arg0
+#define a1 %arg1
+#define t0 %r1
+#define r %ret1
+
+#define a0__128a0 zdep a0,24,25,a0
+#define a0__256a0 zdep a0,23,24,a0
+#define a1_ne_0_b_l0 comb,<> a1,0,LREF(l0)
+#define a1_ne_0_b_l1 comb,<> a1,0,LREF(l1)
+#define a1_ne_0_b_l2 comb,<> a1,0,LREF(l2)
+#define b_n_ret_t0 b,n LREF(ret_t0)
+#define b_e_shift b LREF(e_shift)
+#define b_e_t0ma0 b LREF(e_t0ma0)
+#define b_e_t0 b LREF(e_t0)
+#define b_e_t0a0 b LREF(e_t0a0)
+#define b_e_t02a0 b LREF(e_t02a0)
+#define b_e_t04a0 b LREF(e_t04a0)
+#define b_e_2t0 b LREF(e_2t0)
+#define b_e_2t0a0 b LREF(e_2t0a0)
+#define b_e_2t04a0 b LREF(e2t04a0)
+#define b_e_3t0 b LREF(e_3t0)
+#define b_e_4t0 b LREF(e_4t0)
+#define b_e_4t0a0 b LREF(e_4t0a0)
+#define b_e_4t08a0 b LREF(e4t08a0)
+#define b_e_5t0 b LREF(e_5t0)
+#define b_e_8t0 b LREF(e_8t0)
+#define b_e_8t0a0 b LREF(e_8t0a0)
+#define r__r_a0 add r,a0,r
+#define r__r_2a0 sh1add a0,r,r
+#define r__r_4a0 sh2add a0,r,r
+#define r__r_8a0 sh3add a0,r,r
+#define r__r_t0 add r,t0,r
+#define r__r_2t0 sh1add t0,r,r
+#define r__r_4t0 sh2add t0,r,r
+#define r__r_8t0 sh3add t0,r,r
+#define t0__3a0 sh1add a0,a0,t0
+#define t0__4a0 sh2add a0,0,t0
+#define t0__5a0 sh2add a0,a0,t0
+#define t0__8a0 sh3add a0,0,t0
+#define t0__9a0 sh3add a0,a0,t0
+#define t0__16a0 zdep a0,27,28,t0
+#define t0__32a0 zdep a0,26,27,t0
+#define t0__64a0 zdep a0,25,26,t0
+#define t0__128a0 zdep a0,24,25,t0
+#define t0__t0ma0 sub t0,a0,t0
+#define t0__t0_a0 add t0,a0,t0
+#define t0__t0_2a0 sh1add a0,t0,t0
+#define t0__t0_4a0 sh2add a0,t0,t0
+#define t0__t0_8a0 sh3add a0,t0,t0
+#define t0__2t0_a0 sh1add t0,a0,t0
+#define t0__3t0 sh1add t0,t0,t0
+#define t0__4t0 sh2add t0,0,t0
+#define t0__4t0_a0 sh2add t0,a0,t0
+#define t0__5t0 sh2add t0,t0,t0
+#define t0__8t0 sh3add t0,0,t0
+#define t0__8t0_a0 sh3add t0,a0,t0
+#define t0__9t0 sh3add t0,t0,t0
+#define t0__16t0 zdep t0,27,28,t0
+#define t0__32t0 zdep t0,26,27,t0
+#define t0__256a0 zdep a0,23,24,t0
+
+
+ SUBSPA_MILLI
+ ATTR_MILLI
+ .align 16
+ .proc
+ .callinfo millicode
+ .export $$mulI,millicode
+GSYM($$mulI)
+ combt,<<= a1,a0,LREF(l4) /* swap args if unsigned a1>a0 */
+ copy 0,r /* zero out the result */
+ xor a0,a1,a0 /* swap a0 & a1 using the */
+ xor a0,a1,a1 /* old xor trick */
+ xor a0,a1,a0
+LSYM(l4)
+ combt,<= 0,a0,LREF(l3) /* if a0>=0 then proceed like unsigned */
+ zdep a1,30,8,t0 /* t0 = (a1&0xff)<<1 ********* */
+ sub,> 0,a1,t0 /* otherwise negate both and */
+ combt,<=,n a0,t0,LREF(l2) /* swap back if |a0|<|a1| */
+ sub 0,a0,a1
+ movb,tr,n t0,a0,LREF(l2) /* 10th inst. */
+
+LSYM(l0) r__r_t0 /* add in this partial product */
+LSYM(l1) a0__256a0 /* a0 <<= 8 ****************** */
+LSYM(l2) zdep a1,30,8,t0 /* t0 = (a1&0xff)<<1 ********* */
+LSYM(l3) blr t0,0 /* case on these 8 bits ****** */
+ extru a1,23,24,a1 /* a1 >>= 8 ****************** */
+
+/*16 insts before this. */
+/* a0 <<= 8 ************************** */
+LSYM(x0) a1_ne_0_b_l2 ! a0__256a0 ! MILLIRETN ! nop
+LSYM(x1) a1_ne_0_b_l1 ! r__r_a0 ! MILLIRETN ! nop
+LSYM(x2) a1_ne_0_b_l1 ! r__r_2a0 ! MILLIRETN ! nop
+LSYM(x3) a1_ne_0_b_l0 ! t0__3a0 ! MILLIRET ! r__r_t0
+LSYM(x4) a1_ne_0_b_l1 ! r__r_4a0 ! MILLIRETN ! nop
+LSYM(x5) a1_ne_0_b_l0 ! t0__5a0 ! MILLIRET ! r__r_t0
+LSYM(x6) t0__3a0 ! a1_ne_0_b_l1 ! r__r_2t0 ! MILLIRETN
+LSYM(x7) t0__3a0 ! a1_ne_0_b_l0 ! r__r_4a0 ! b_n_ret_t0
+LSYM(x8) a1_ne_0_b_l1 ! r__r_8a0 ! MILLIRETN ! nop
+LSYM(x9) a1_ne_0_b_l0 ! t0__9a0 ! MILLIRET ! r__r_t0
+LSYM(x10) t0__5a0 ! a1_ne_0_b_l1 ! r__r_2t0 ! MILLIRETN
+LSYM(x11) t0__3a0 ! a1_ne_0_b_l0 ! r__r_8a0 ! b_n_ret_t0
+LSYM(x12) t0__3a0 ! a1_ne_0_b_l1 ! r__r_4t0 ! MILLIRETN
+LSYM(x13) t0__5a0 ! a1_ne_0_b_l0 ! r__r_8a0 ! b_n_ret_t0
+LSYM(x14) t0__3a0 ! t0__2t0_a0 ! b_e_shift ! r__r_2t0
+LSYM(x15) t0__5a0 ! a1_ne_0_b_l0 ! t0__3t0 ! b_n_ret_t0
+LSYM(x16) t0__16a0 ! a1_ne_0_b_l1 ! r__r_t0 ! MILLIRETN
+LSYM(x17) t0__9a0 ! a1_ne_0_b_l0 ! t0__t0_8a0 ! b_n_ret_t0
+LSYM(x18) t0__9a0 ! a1_ne_0_b_l1 ! r__r_2t0 ! MILLIRETN
+LSYM(x19) t0__9a0 ! a1_ne_0_b_l0 ! t0__2t0_a0 ! b_n_ret_t0
+LSYM(x20) t0__5a0 ! a1_ne_0_b_l1 ! r__r_4t0 ! MILLIRETN
+LSYM(x21) t0__5a0 ! a1_ne_0_b_l0 ! t0__4t0_a0 ! b_n_ret_t0
+LSYM(x22) t0__5a0 ! t0__2t0_a0 ! b_e_shift ! r__r_2t0
+LSYM(x23) t0__5a0 ! t0__2t0_a0 ! b_e_t0 ! t0__2t0_a0
+LSYM(x24) t0__3a0 ! a1_ne_0_b_l1 ! r__r_8t0 ! MILLIRETN
+LSYM(x25) t0__5a0 ! a1_ne_0_b_l0 ! t0__5t0 ! b_n_ret_t0
+LSYM(x26) t0__3a0 ! t0__4t0_a0 ! b_e_shift ! r__r_2t0
+LSYM(x27) t0__3a0 ! a1_ne_0_b_l0 ! t0__9t0 ! b_n_ret_t0
+LSYM(x28) t0__3a0 ! t0__2t0_a0 ! b_e_shift ! r__r_4t0
+LSYM(x29) t0__3a0 ! t0__2t0_a0 ! b_e_t0 ! t0__4t0_a0
+LSYM(x30) t0__5a0 ! t0__3t0 ! b_e_shift ! r__r_2t0
+LSYM(x31) t0__32a0 ! a1_ne_0_b_l0 ! t0__t0ma0 ! b_n_ret_t0
+LSYM(x32) t0__32a0 ! a1_ne_0_b_l1 ! r__r_t0 ! MILLIRETN
+LSYM(x33) t0__8a0 ! a1_ne_0_b_l0 ! t0__4t0_a0 ! b_n_ret_t0
+LSYM(x34) t0__16a0 ! t0__t0_a0 ! b_e_shift ! r__r_2t0
+LSYM(x35) t0__9a0 ! t0__3t0 ! b_e_t0 ! t0__t0_8a0
+LSYM(x36) t0__9a0 ! a1_ne_0_b_l1 ! r__r_4t0 ! MILLIRETN
+LSYM(x37) t0__9a0 ! a1_ne_0_b_l0 ! t0__4t0_a0 ! b_n_ret_t0
+LSYM(x38) t0__9a0 ! t0__2t0_a0 ! b_e_shift ! r__r_2t0
+LSYM(x39) t0__9a0 ! t0__2t0_a0 ! b_e_t0 ! t0__2t0_a0
+LSYM(x40) t0__5a0 ! a1_ne_0_b_l1 ! r__r_8t0 ! MILLIRETN
+LSYM(x41) t0__5a0 ! a1_ne_0_b_l0 ! t0__8t0_a0 ! b_n_ret_t0
+LSYM(x42) t0__5a0 ! t0__4t0_a0 ! b_e_shift ! r__r_2t0
+LSYM(x43) t0__5a0 ! t0__4t0_a0 ! b_e_t0 ! t0__2t0_a0
+LSYM(x44) t0__5a0 ! t0__2t0_a0 ! b_e_shift ! r__r_4t0
+LSYM(x45) t0__9a0 ! a1_ne_0_b_l0 ! t0__5t0 ! b_n_ret_t0
+LSYM(x46) t0__9a0 ! t0__5t0 ! b_e_t0 ! t0__t0_a0
+LSYM(x47) t0__9a0 ! t0__5t0 ! b_e_t0 ! t0__t0_2a0
+LSYM(x48) t0__3a0 ! a1_ne_0_b_l0 ! t0__16t0 ! b_n_ret_t0
+LSYM(x49) t0__9a0 ! t0__5t0 ! b_e_t0 ! t0__t0_4a0
+LSYM(x50) t0__5a0 ! t0__5t0 ! b_e_shift ! r__r_2t0
+LSYM(x51) t0__9a0 ! t0__t0_8a0 ! b_e_t0 ! t0__3t0
+LSYM(x52) t0__3a0 ! t0__4t0_a0 ! b_e_shift ! r__r_4t0
+LSYM(x53) t0__3a0 ! t0__4t0_a0 ! b_e_t0 ! t0__4t0_a0
+LSYM(x54) t0__9a0 ! t0__3t0 ! b_e_shift ! r__r_2t0
+LSYM(x55) t0__9a0 ! t0__3t0 ! b_e_t0 ! t0__2t0_a0
+LSYM(x56) t0__3a0 ! t0__2t0_a0 ! b_e_shift ! r__r_8t0
+LSYM(x57) t0__9a0 ! t0__2t0_a0 ! b_e_t0 ! t0__3t0
+LSYM(x58) t0__3a0 ! t0__2t0_a0 ! b_e_2t0 ! t0__4t0_a0
+LSYM(x59) t0__9a0 ! t0__2t0_a0 ! b_e_t02a0 ! t0__3t0
+LSYM(x60) t0__5a0 ! t0__3t0 ! b_e_shift ! r__r_4t0
+LSYM(x61) t0__5a0 ! t0__3t0 ! b_e_t0 ! t0__4t0_a0
+LSYM(x62) t0__32a0 ! t0__t0ma0 ! b_e_shift ! r__r_2t0
+LSYM(x63) t0__64a0 ! a1_ne_0_b_l0 ! t0__t0ma0 ! b_n_ret_t0
+LSYM(x64) t0__64a0 ! a1_ne_0_b_l1 ! r__r_t0 ! MILLIRETN
+LSYM(x65) t0__8a0 ! a1_ne_0_b_l0 ! t0__8t0_a0 ! b_n_ret_t0
+LSYM(x66) t0__32a0 ! t0__t0_a0 ! b_e_shift ! r__r_2t0
+LSYM(x67) t0__8a0 ! t0__4t0_a0 ! b_e_t0 ! t0__2t0_a0
+LSYM(x68) t0__8a0 ! t0__2t0_a0 ! b_e_shift ! r__r_4t0
+LSYM(x69) t0__8a0 ! t0__2t0_a0 ! b_e_t0 ! t0__4t0_a0
+LSYM(x70) t0__64a0 ! t0__t0_4a0 ! b_e_t0 ! t0__t0_2a0
+LSYM(x71) t0__9a0 ! t0__8t0 ! b_e_t0 ! t0__t0ma0
+LSYM(x72) t0__9a0 ! a1_ne_0_b_l1 ! r__r_8t0 ! MILLIRETN
+LSYM(x73) t0__9a0 ! t0__8t0_a0 ! b_e_shift ! r__r_t0
+LSYM(x74) t0__9a0 ! t0__4t0_a0 ! b_e_shift ! r__r_2t0
+LSYM(x75) t0__9a0 ! t0__4t0_a0 ! b_e_t0 ! t0__2t0_a0
+LSYM(x76) t0__9a0 ! t0__2t0_a0 ! b_e_shift ! r__r_4t0
+LSYM(x77) t0__9a0 ! t0__2t0_a0 ! b_e_t0 ! t0__4t0_a0
+LSYM(x78) t0__9a0 ! t0__2t0_a0 ! b_e_2t0 ! t0__2t0_a0
+LSYM(x79) t0__16a0 ! t0__5t0 ! b_e_t0 ! t0__t0ma0
+LSYM(x80) t0__16a0 ! t0__5t0 ! b_e_shift ! r__r_t0
+LSYM(x81) t0__9a0 ! t0__9t0 ! b_e_shift ! r__r_t0
+LSYM(x82) t0__5a0 ! t0__8t0_a0 ! b_e_shift ! r__r_2t0
+LSYM(x83) t0__5a0 ! t0__8t0_a0 ! b_e_t0 ! t0__2t0_a0
+LSYM(x84) t0__5a0 ! t0__4t0_a0 ! b_e_shift ! r__r_4t0
+LSYM(x85) t0__8a0 ! t0__2t0_a0 ! b_e_t0 ! t0__5t0
+LSYM(x86) t0__5a0 ! t0__4t0_a0 ! b_e_2t0 ! t0__2t0_a0
+LSYM(x87) t0__9a0 ! t0__9t0 ! b_e_t02a0 ! t0__t0_4a0
+LSYM(x88) t0__5a0 ! t0__2t0_a0 ! b_e_shift ! r__r_8t0
+LSYM(x89) t0__5a0 ! t0__2t0_a0 ! b_e_t0 ! t0__8t0_a0
+LSYM(x90) t0__9a0 ! t0__5t0 ! b_e_shift ! r__r_2t0
+LSYM(x91) t0__9a0 ! t0__5t0 ! b_e_t0 ! t0__2t0_a0
+LSYM(x92) t0__5a0 ! t0__2t0_a0 ! b_e_4t0 ! t0__2t0_a0
+LSYM(x93) t0__32a0 ! t0__t0ma0 ! b_e_t0 ! t0__3t0
+LSYM(x94) t0__9a0 ! t0__5t0 ! b_e_2t0 ! t0__t0_2a0
+LSYM(x95) t0__9a0 ! t0__2t0_a0 ! b_e_t0 ! t0__5t0
+LSYM(x96) t0__8a0 ! t0__3t0 ! b_e_shift ! r__r_4t0
+LSYM(x97) t0__8a0 ! t0__3t0 ! b_e_t0 ! t0__4t0_a0
+LSYM(x98) t0__32a0 ! t0__3t0 ! b_e_t0 ! t0__t0_2a0
+LSYM(x99) t0__8a0 ! t0__4t0_a0 ! b_e_t0 ! t0__3t0
+LSYM(x100) t0__5a0 ! t0__5t0 ! b_e_shift ! r__r_4t0
+LSYM(x101) t0__5a0 ! t0__5t0 ! b_e_t0 ! t0__4t0_a0
+LSYM(x102) t0__32a0 ! t0__t0_2a0 ! b_e_t0 ! t0__3t0
+LSYM(x103) t0__5a0 ! t0__5t0 ! b_e_t02a0 ! t0__4t0_a0
+LSYM(x104) t0__3a0 ! t0__4t0_a0 ! b_e_shift ! r__r_8t0
+LSYM(x105) t0__5a0 ! t0__4t0_a0 ! b_e_t0 ! t0__5t0
+LSYM(x106) t0__3a0 ! t0__4t0_a0 ! b_e_2t0 ! t0__4t0_a0
+LSYM(x107) t0__9a0 ! t0__t0_4a0 ! b_e_t02a0 ! t0__8t0_a0
+LSYM(x108) t0__9a0 ! t0__3t0 ! b_e_shift ! r__r_4t0
+LSYM(x109) t0__9a0 ! t0__3t0 ! b_e_t0 ! t0__4t0_a0
+LSYM(x110) t0__9a0 ! t0__3t0 ! b_e_2t0 ! t0__2t0_a0
+LSYM(x111) t0__9a0 ! t0__4t0_a0 ! b_e_t0 ! t0__3t0
+LSYM(x112) t0__3a0 ! t0__2t0_a0 ! b_e_t0 ! t0__16t0
+LSYM(x113) t0__9a0 ! t0__4t0_a0 ! b_e_t02a0 ! t0__3t0
+LSYM(x114) t0__9a0 ! t0__2t0_a0 ! b_e_2t0 ! t0__3t0
+LSYM(x115) t0__9a0 ! t0__2t0_a0 ! b_e_2t0a0 ! t0__3t0
+LSYM(x116) t0__3a0 ! t0__2t0_a0 ! b_e_4t0 ! t0__4t0_a0
+LSYM(x117) t0__3a0 ! t0__4t0_a0 ! b_e_t0 ! t0__9t0
+LSYM(x118) t0__3a0 ! t0__4t0_a0 ! b_e_t0a0 ! t0__9t0
+LSYM(x119) t0__3a0 ! t0__4t0_a0 ! b_e_t02a0 ! t0__9t0
+LSYM(x120) t0__5a0 ! t0__3t0 ! b_e_shift ! r__r_8t0
+LSYM(x121) t0__5a0 ! t0__3t0 ! b_e_t0 ! t0__8t0_a0
+LSYM(x122) t0__5a0 ! t0__3t0 ! b_e_2t0 ! t0__4t0_a0
+LSYM(x123) t0__5a0 ! t0__8t0_a0 ! b_e_t0 ! t0__3t0
+LSYM(x124) t0__32a0 ! t0__t0ma0 ! b_e_shift ! r__r_4t0
+LSYM(x125) t0__5a0 ! t0__5t0 ! b_e_t0 ! t0__5t0
+LSYM(x126) t0__64a0 ! t0__t0ma0 ! b_e_shift ! r__r_2t0
+LSYM(x127) t0__128a0 ! a1_ne_0_b_l0 ! t0__t0ma0 ! b_n_ret_t0
+LSYM(x128) t0__128a0 ! a1_ne_0_b_l1 ! r__r_t0 ! MILLIRETN
+LSYM(x129) t0__128a0 ! a1_ne_0_b_l0 ! t0__t0_a0 ! b_n_ret_t0
+LSYM(x130) t0__64a0 ! t0__t0_a0 ! b_e_shift ! r__r_2t0
+LSYM(x131) t0__8a0 ! t0__8t0_a0 ! b_e_t0 ! t0__2t0_a0
+LSYM(x132) t0__8a0 ! t0__4t0_a0 ! b_e_shift ! r__r_4t0
+LSYM(x133) t0__8a0 ! t0__4t0_a0 ! b_e_t0 ! t0__4t0_a0
+LSYM(x134) t0__8a0 ! t0__4t0_a0 ! b_e_2t0 ! t0__2t0_a0
+LSYM(x135) t0__9a0 ! t0__5t0 ! b_e_t0 ! t0__3t0
+LSYM(x136) t0__8a0 ! t0__2t0_a0 ! b_e_shift ! r__r_8t0
+LSYM(x137) t0__8a0 ! t0__2t0_a0 ! b_e_t0 ! t0__8t0_a0
+LSYM(x138) t0__8a0 ! t0__2t0_a0 ! b_e_2t0 ! t0__4t0_a0
+LSYM(x139) t0__8a0 ! t0__2t0_a0 ! b_e_2t0a0 ! t0__4t0_a0
+LSYM(x140) t0__3a0 ! t0__2t0_a0 ! b_e_4t0 ! t0__5t0
+LSYM(x141) t0__8a0 ! t0__2t0_a0 ! b_e_4t0a0 ! t0__2t0_a0
+LSYM(x142) t0__9a0 ! t0__8t0 ! b_e_2t0 ! t0__t0ma0
+LSYM(x143) t0__16a0 ! t0__9t0 ! b_e_t0 ! t0__t0ma0
+LSYM(x144) t0__9a0 ! t0__8t0 ! b_e_shift ! r__r_2t0
+LSYM(x145) t0__9a0 ! t0__8t0 ! b_e_t0 ! t0__2t0_a0
+LSYM(x146) t0__9a0 ! t0__8t0_a0 ! b_e_shift ! r__r_2t0
+LSYM(x147) t0__9a0 ! t0__8t0_a0 ! b_e_t0 ! t0__2t0_a0
+LSYM(x148) t0__9a0 ! t0__4t0_a0 ! b_e_shift ! r__r_4t0
+LSYM(x149) t0__9a0 ! t0__4t0_a0 ! b_e_t0 ! t0__4t0_a0
+LSYM(x150) t0__9a0 ! t0__4t0_a0 ! b_e_2t0 ! t0__2t0_a0
+LSYM(x151) t0__9a0 ! t0__4t0_a0 ! b_e_2t0a0 ! t0__2t0_a0
+LSYM(x152) t0__9a0 ! t0__2t0_a0 ! b_e_shift ! r__r_8t0
+LSYM(x153) t0__9a0 ! t0__2t0_a0 ! b_e_t0 ! t0__8t0_a0
+LSYM(x154) t0__9a0 ! t0__2t0_a0 ! b_e_2t0 ! t0__4t0_a0
+LSYM(x155) t0__32a0 ! t0__t0ma0 ! b_e_t0 ! t0__5t0
+LSYM(x156) t0__9a0 ! t0__2t0_a0 ! b_e_4t0 ! t0__2t0_a0
+LSYM(x157) t0__32a0 ! t0__t0ma0 ! b_e_t02a0 ! t0__5t0
+LSYM(x158) t0__16a0 ! t0__5t0 ! b_e_2t0 ! t0__t0ma0
+LSYM(x159) t0__32a0 ! t0__5t0 ! b_e_t0 ! t0__t0ma0
+LSYM(x160) t0__5a0 ! t0__4t0 ! b_e_shift ! r__r_8t0
+LSYM(x161) t0__8a0 ! t0__5t0 ! b_e_t0 ! t0__4t0_a0
+LSYM(x162) t0__9a0 ! t0__9t0 ! b_e_shift ! r__r_2t0
+LSYM(x163) t0__9a0 ! t0__9t0 ! b_e_t0 ! t0__2t0_a0
+LSYM(x164) t0__5a0 ! t0__8t0_a0 ! b_e_shift ! r__r_4t0
+LSYM(x165) t0__8a0 ! t0__4t0_a0 ! b_e_t0 ! t0__5t0
+LSYM(x166) t0__5a0 ! t0__8t0_a0 ! b_e_2t0 ! t0__2t0_a0
+LSYM(x167) t0__5a0 ! t0__8t0_a0 ! b_e_2t0a0 ! t0__2t0_a0
+LSYM(x168) t0__5a0 ! t0__4t0_a0 ! b_e_shift ! r__r_8t0
+LSYM(x169) t0__5a0 ! t0__4t0_a0 ! b_e_t0 ! t0__8t0_a0
+LSYM(x170) t0__32a0 ! t0__t0_2a0 ! b_e_t0 ! t0__5t0
+LSYM(x171) t0__9a0 ! t0__2t0_a0 ! b_e_t0 ! t0__9t0
+LSYM(x172) t0__5a0 ! t0__4t0_a0 ! b_e_4t0 ! t0__2t0_a0
+LSYM(x173) t0__9a0 ! t0__2t0_a0 ! b_e_t02a0 ! t0__9t0
+LSYM(x174) t0__32a0 ! t0__t0_2a0 ! b_e_t04a0 ! t0__5t0
+LSYM(x175) t0__8a0 ! t0__2t0_a0 ! b_e_5t0 ! t0__2t0_a0
+LSYM(x176) t0__5a0 ! t0__4t0_a0 ! b_e_8t0 ! t0__t0_a0
+LSYM(x177) t0__5a0 ! t0__4t0_a0 ! b_e_8t0a0 ! t0__t0_a0
+LSYM(x178) t0__5a0 ! t0__2t0_a0 ! b_e_2t0 ! t0__8t0_a0
+LSYM(x179) t0__5a0 ! t0__2t0_a0 ! b_e_2t0a0 ! t0__8t0_a0
+LSYM(x180) t0__9a0 ! t0__5t0 ! b_e_shift ! r__r_4t0
+LSYM(x181) t0__9a0 ! t0__5t0 ! b_e_t0 ! t0__4t0_a0
+LSYM(x182) t0__9a0 ! t0__5t0 ! b_e_2t0 ! t0__2t0_a0
+LSYM(x183) t0__9a0 ! t0__5t0 ! b_e_2t0a0 ! t0__2t0_a0
+LSYM(x184) t0__5a0 ! t0__9t0 ! b_e_4t0 ! t0__t0_a0
+LSYM(x185) t0__9a0 ! t0__4t0_a0 ! b_e_t0 ! t0__5t0
+LSYM(x186) t0__32a0 ! t0__t0ma0 ! b_e_2t0 ! t0__3t0
+LSYM(x187) t0__9a0 ! t0__4t0_a0 ! b_e_t02a0 ! t0__5t0
+LSYM(x188) t0__9a0 ! t0__5t0 ! b_e_4t0 ! t0__t0_2a0
+LSYM(x189) t0__5a0 ! t0__4t0_a0 ! b_e_t0 ! t0__9t0
+LSYM(x190) t0__9a0 ! t0__2t0_a0 ! b_e_2t0 ! t0__5t0
+LSYM(x191) t0__64a0 ! t0__3t0 ! b_e_t0 ! t0__t0ma0
+LSYM(x192) t0__8a0 ! t0__3t0 ! b_e_shift ! r__r_8t0
+LSYM(x193) t0__8a0 ! t0__3t0 ! b_e_t0 ! t0__8t0_a0
+LSYM(x194) t0__8a0 ! t0__3t0 ! b_e_2t0 ! t0__4t0_a0
+LSYM(x195) t0__8a0 ! t0__8t0_a0 ! b_e_t0 ! t0__3t0
+LSYM(x196) t0__8a0 ! t0__3t0 ! b_e_4t0 ! t0__2t0_a0
+LSYM(x197) t0__8a0 ! t0__3t0 ! b_e_4t0a0 ! t0__2t0_a0
+LSYM(x198) t0__64a0 ! t0__t0_2a0 ! b_e_t0 ! t0__3t0
+LSYM(x199) t0__8a0 ! t0__4t0_a0 ! b_e_2t0a0 ! t0__3t0
+LSYM(x200) t0__5a0 ! t0__5t0 ! b_e_shift ! r__r_8t0
+LSYM(x201) t0__5a0 ! t0__5t0 ! b_e_t0 ! t0__8t0_a0
+LSYM(x202) t0__5a0 ! t0__5t0 ! b_e_2t0 ! t0__4t0_a0
+LSYM(x203) t0__5a0 ! t0__5t0 ! b_e_2t0a0 ! t0__4t0_a0
+LSYM(x204) t0__8a0 ! t0__2t0_a0 ! b_e_4t0 ! t0__3t0
+LSYM(x205) t0__5a0 ! t0__8t0_a0 ! b_e_t0 ! t0__5t0
+LSYM(x206) t0__64a0 ! t0__t0_4a0 ! b_e_t02a0 ! t0__3t0
+LSYM(x207) t0__8a0 ! t0__2t0_a0 ! b_e_3t0 ! t0__4t0_a0
+LSYM(x208) t0__5a0 ! t0__5t0 ! b_e_8t0 ! t0__t0_a0
+LSYM(x209) t0__5a0 ! t0__5t0 ! b_e_8t0a0 ! t0__t0_a0
+LSYM(x210) t0__5a0 ! t0__4t0_a0 ! b_e_2t0 ! t0__5t0
+LSYM(x211) t0__5a0 ! t0__4t0_a0 ! b_e_2t0a0 ! t0__5t0
+LSYM(x212) t0__3a0 ! t0__4t0_a0 ! b_e_4t0 ! t0__4t0_a0
+LSYM(x213) t0__3a0 ! t0__4t0_a0 ! b_e_4t0a0 ! t0__4t0_a0
+LSYM(x214) t0__9a0 ! t0__t0_4a0 ! b_e_2t04a0 ! t0__8t0_a0
+LSYM(x215) t0__5a0 ! t0__4t0_a0 ! b_e_5t0 ! t0__2t0_a0
+LSYM(x216) t0__9a0 ! t0__3t0 ! b_e_shift ! r__r_8t0
+LSYM(x217) t0__9a0 ! t0__3t0 ! b_e_t0 ! t0__8t0_a0
+LSYM(x218) t0__9a0 ! t0__3t0 ! b_e_2t0 ! t0__4t0_a0
+LSYM(x219) t0__9a0 ! t0__8t0_a0 ! b_e_t0 ! t0__3t0
+LSYM(x220) t0__3a0 ! t0__9t0 ! b_e_4t0 ! t0__2t0_a0
+LSYM(x221) t0__3a0 ! t0__9t0 ! b_e_4t0a0 ! t0__2t0_a0
+LSYM(x222) t0__9a0 ! t0__4t0_a0 ! b_e_2t0 ! t0__3t0
+LSYM(x223) t0__9a0 ! t0__4t0_a0 ! b_e_2t0a0 ! t0__3t0
+LSYM(x224) t0__9a0 ! t0__3t0 ! b_e_8t0 ! t0__t0_a0
+LSYM(x225) t0__9a0 ! t0__5t0 ! b_e_t0 ! t0__5t0
+LSYM(x226) t0__3a0 ! t0__2t0_a0 ! b_e_t02a0 ! t0__32t0
+LSYM(x227) t0__9a0 ! t0__5t0 ! b_e_t02a0 ! t0__5t0
+LSYM(x228) t0__9a0 ! t0__2t0_a0 ! b_e_4t0 ! t0__3t0
+LSYM(x229) t0__9a0 ! t0__2t0_a0 ! b_e_4t0a0 ! t0__3t0
+LSYM(x230) t0__9a0 ! t0__5t0 ! b_e_5t0 ! t0__t0_a0
+LSYM(x231) t0__9a0 ! t0__2t0_a0 ! b_e_3t0 ! t0__4t0_a0
+LSYM(x232) t0__3a0 ! t0__2t0_a0 ! b_e_8t0 ! t0__4t0_a0
+LSYM(x233) t0__3a0 ! t0__2t0_a0 ! b_e_8t0a0 ! t0__4t0_a0
+LSYM(x234) t0__3a0 ! t0__4t0_a0 ! b_e_2t0 ! t0__9t0
+LSYM(x235) t0__3a0 ! t0__4t0_a0 ! b_e_2t0a0 ! t0__9t0
+LSYM(x236) t0__9a0 ! t0__2t0_a0 ! b_e_4t08a0 ! t0__3t0
+LSYM(x237) t0__16a0 ! t0__5t0 ! b_e_3t0 ! t0__t0ma0
+LSYM(x238) t0__3a0 ! t0__4t0_a0 ! b_e_2t04a0 ! t0__9t0
+LSYM(x239) t0__16a0 ! t0__5t0 ! b_e_t0ma0 ! t0__3t0
+LSYM(x240) t0__9a0 ! t0__t0_a0 ! b_e_8t0 ! t0__3t0
+LSYM(x241) t0__9a0 ! t0__t0_a0 ! b_e_8t0a0 ! t0__3t0
+LSYM(x242) t0__5a0 ! t0__3t0 ! b_e_2t0 ! t0__8t0_a0
+LSYM(x243) t0__9a0 ! t0__9t0 ! b_e_t0 ! t0__3t0
+LSYM(x244) t0__5a0 ! t0__3t0 ! b_e_4t0 ! t0__4t0_a0
+LSYM(x245) t0__8a0 ! t0__3t0 ! b_e_5t0 ! t0__2t0_a0
+LSYM(x246) t0__5a0 ! t0__8t0_a0 ! b_e_2t0 ! t0__3t0
+LSYM(x247) t0__5a0 ! t0__8t0_a0 ! b_e_2t0a0 ! t0__3t0
+LSYM(x248) t0__32a0 ! t0__t0ma0 ! b_e_shift ! r__r_8t0
+LSYM(x249) t0__32a0 ! t0__t0ma0 ! b_e_t0 ! t0__8t0_a0
+LSYM(x250) t0__5a0 ! t0__5t0 ! b_e_2t0 ! t0__5t0
+LSYM(x251) t0__5a0 ! t0__5t0 ! b_e_2t0a0 ! t0__5t0
+LSYM(x252) t0__64a0 ! t0__t0ma0 ! b_e_shift ! r__r_4t0
+LSYM(x253) t0__64a0 ! t0__t0ma0 ! b_e_t0 ! t0__4t0_a0
+LSYM(x254) t0__128a0 ! t0__t0ma0 ! b_e_shift ! r__r_2t0
+LSYM(x255) t0__256a0 ! a1_ne_0_b_l0 ! t0__t0ma0 ! b_n_ret_t0
+/*1040 insts before this. */
+LSYM(ret_t0) MILLIRET
+LSYM(e_t0) r__r_t0
+LSYM(e_shift) a1_ne_0_b_l2
+ a0__256a0 /* a0 <<= 8 *********** */
+ MILLIRETN
+LSYM(e_t0ma0) a1_ne_0_b_l0
+ t0__t0ma0
+ MILLIRET
+ r__r_t0
+LSYM(e_t0a0) a1_ne_0_b_l0
+ t0__t0_a0
+ MILLIRET
+ r__r_t0
+LSYM(e_t02a0) a1_ne_0_b_l0
+ t0__t0_2a0
+ MILLIRET
+ r__r_t0
+LSYM(e_t04a0) a1_ne_0_b_l0
+ t0__t0_4a0
+ MILLIRET
+ r__r_t0
+LSYM(e_2t0) a1_ne_0_b_l1
+ r__r_2t0
+ MILLIRETN
+LSYM(e_2t0a0) a1_ne_0_b_l0
+ t0__2t0_a0
+ MILLIRET
+ r__r_t0
+LSYM(e2t04a0) t0__t0_2a0
+ a1_ne_0_b_l1
+ r__r_2t0
+ MILLIRETN
+LSYM(e_3t0) a1_ne_0_b_l0
+ t0__3t0
+ MILLIRET
+ r__r_t0
+LSYM(e_4t0) a1_ne_0_b_l1
+ r__r_4t0
+ MILLIRETN
+LSYM(e_4t0a0) a1_ne_0_b_l0
+ t0__4t0_a0
+ MILLIRET
+ r__r_t0
+LSYM(e4t08a0) t0__t0_2a0
+ a1_ne_0_b_l1
+ r__r_4t0
+ MILLIRETN
+LSYM(e_5t0) a1_ne_0_b_l0
+ t0__5t0
+ MILLIRET
+ r__r_t0
+LSYM(e_8t0) a1_ne_0_b_l1
+ r__r_8t0
+ MILLIRETN
+LSYM(e_8t0a0) a1_ne_0_b_l0
+ t0__8t0_a0
+ MILLIRET
+ r__r_t0
+
+ .procend
+ .end
+#endif
diff --git a/libgcc/config/pa/quadlib.c b/libgcc/config/pa/quadlib.c
new file mode 100644
index 00000000000..2c1160015ed
--- /dev/null
+++ b/libgcc/config/pa/quadlib.c
@@ -0,0 +1,245 @@
+/* Subroutines for long double support.
+ Copyright (C) 2000, 2002, 2004, 2005, 2006, 2009 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+/* HPUX TFmode compare requires a library call to _U_Qfcmp. It takes
+ a magic number as its third argument which indicates what to do.
+ The return value is an integer to be compared against zero. The
+ comparison conditions are the same as those listed in Table 8-12
+ of the PA-RISC 2.0 Architecture book for the fcmp instruction. */
+
+/* Raise FP_INVALID on SNaN as a side effect. */
+#define QCMP_INV 1
+
+/* Comparison relations. */
+#define QCMP_UNORD 2
+#define QCMP_EQ 4
+#define QCMP_LT 8
+#define QCMP_GT 16
+
+int _U_Qfcmp (long double a, long double b, int);
+long _U_Qfcnvfxt_quad_to_sgl (long double);
+
+int _U_Qfeq (long double, long double);
+int _U_Qfne (long double, long double);
+int _U_Qfgt (long double, long double);
+int _U_Qfge (long double, long double);
+int _U_Qflt (long double, long double);
+int _U_Qfle (long double, long double);
+int _U_Qfltgt (long double, long double);
+int _U_Qfunle (long double, long double);
+int _U_Qfunlt (long double, long double);
+int _U_Qfunge (long double, long double);
+int _U_Qfungt (long double, long double);
+int _U_Qfuneq (long double, long double);
+int _U_Qfunord (long double, long double);
+int _U_Qford (long double, long double);
+
+int _U_Qfcomp (long double, long double);
+
+long double _U_Qfneg (long double);
+long double _U_Qfcopysign (long double, long double);
+
+#ifdef __LP64__
+int __U_Qfcnvfxt_quad_to_sgl (long double);
+#endif
+unsigned int _U_Qfcnvfxt_quad_to_usgl(long double);
+long double _U_Qfcnvxf_usgl_to_quad (unsigned int);
+unsigned long long _U_Qfcnvfxt_quad_to_udbl(long double);
+long double _U_Qfcnvxf_udbl_to_quad (unsigned long long);
+
+int
+_U_Qfeq (long double a, long double b)
+{
+ return (_U_Qfcmp (a, b, QCMP_EQ) != 0);
+}
+
+int
+_U_Qfne (long double a, long double b)
+{
+ return (_U_Qfcmp (a, b, QCMP_EQ) == 0);
+}
+
+int
+_U_Qfgt (long double a, long double b)
+{
+ return (_U_Qfcmp (a, b, QCMP_INV | QCMP_GT) != 0);
+}
+
+int
+_U_Qfge (long double a, long double b)
+{
+ return (_U_Qfcmp (a, b, QCMP_INV | QCMP_EQ | QCMP_GT) != 0);
+}
+
+int
+_U_Qflt (long double a, long double b)
+{
+ return (_U_Qfcmp (a, b, QCMP_INV | QCMP_LT) != 0);
+}
+
+int
+_U_Qfle (long double a, long double b)
+{
+ return (_U_Qfcmp (a, b, QCMP_INV | QCMP_EQ | QCMP_LT) != 0);
+}
+
+int
+_U_Qfltgt (long double a, long double b)
+{
+ return (_U_Qfcmp (a, b, QCMP_INV | QCMP_LT | QCMP_GT) != 0);
+}
+
+int
+_U_Qfunle (long double a, long double b)
+{
+ return (_U_Qfcmp (a, b, QCMP_INV | QCMP_UNORD | QCMP_EQ | QCMP_LT) != 0);
+}
+
+int
+_U_Qfunlt (long double a, long double b)
+{
+ return (_U_Qfcmp (a, b, QCMP_INV | QCMP_UNORD | QCMP_LT) != 0);
+}
+
+int
+_U_Qfunge (long double a, long double b)
+{
+ return (_U_Qfcmp (a, b, QCMP_INV | QCMP_UNORD | QCMP_EQ | QCMP_GT) != 0);
+}
+
+int
+_U_Qfungt (long double a, long double b)
+{
+ return (_U_Qfcmp (a, b, QCMP_INV | QCMP_UNORD | QCMP_GT) != 0);
+}
+
+int
+_U_Qfuneq (long double a, long double b)
+{
+ return (_U_Qfcmp (a, b, QCMP_INV | QCMP_UNORD | QCMP_EQ) != 0);
+}
+
+int
+_U_Qfunord (long double a, long double b)
+{
+ return (_U_Qfcmp (a, b, QCMP_INV | QCMP_UNORD) != 0);
+}
+
+int
+_U_Qford (long double a, long double b)
+{
+ return (_U_Qfcmp (a, b, QCMP_INV | QCMP_EQ | QCMP_LT | QCMP_GT) != 0);
+}
+
+int
+_U_Qfcomp (long double a, long double b)
+{
+ if (_U_Qfcmp (a, b, QCMP_EQ) == 0)
+ return 0;
+
+ return (_U_Qfcmp (a, b, QCMP_UNORD | QCMP_EQ | QCMP_GT) != 0 ? 1 : -1);
+}
+
+/* Negate long double A. */
+long double
+_U_Qfneg (long double a)
+{
+ union
+ {
+ long double ld;
+ int i[4];
+ } u;
+
+ u.ld = a;
+ u.i[0] ^= 0x80000000;
+ return u.ld;
+}
+
+/* Return long double A with sign changed to sign of long double B. */
+long double
+_U_Qfcopysign (long double a, long double b)
+{
+ union
+ {
+ long double ld;
+ int i[4];
+ } ua, ub;
+
+ ua.ld = a;
+ ub.ld = b;
+ ua.i[0] &= 0x7fffffff;
+ ua.i[0] |= (0x80000000 & ub.i[0]);
+ return ua.ld;
+}
+
+#ifdef __LP64__
+/* This routine is only necessary for the PA64 port; for reasons unknown
+ _U_Qfcnvfxt_quad_to_sgl returns the integer in the high 32bits of the
+ return value. Ugh. */
+int
+__U_Qfcnvfxt_quad_to_sgl (long double a)
+{
+ return _U_Qfcnvfxt_quad_to_sgl (a) >> 32;
+}
+#endif
+
+/* HP only has signed conversion in the C library, so need to synthesize
+ unsigned versions. */
+unsigned int
+_U_Qfcnvfxt_quad_to_usgl (long double a)
+{
+ extern long long _U_Qfcnvfxt_quad_to_dbl (long double a);
+ return (unsigned int) _U_Qfcnvfxt_quad_to_dbl (a);
+}
+
+long double
+_U_Qfcnvxf_usgl_to_quad (unsigned int a)
+{
+ extern long double _U_Qfcnvxf_dbl_to_quad (long long);
+ return _U_Qfcnvxf_dbl_to_quad ((long long) a);
+}
+
+typedef union {
+ unsigned long long u[2];
+ long double d[1];
+} quad_type;
+
+unsigned long long
+_U_Qfcnvfxt_quad_to_udbl (long double a)
+{
+ extern quad_type _U_Qfcnvfxt_quad_to_quad (long double a);
+ quad_type u;
+ u = _U_Qfcnvfxt_quad_to_quad(a);
+ return u.u[1];
+}
+
+long double
+_U_Qfcnvxf_udbl_to_quad (unsigned long long a)
+{
+ extern long double _U_Qfcnvxf_quad_to_quad (quad_type a);
+ quad_type u;
+ u.u[0] = 0;
+ u.u[1] = a;
+ return _U_Qfcnvxf_quad_to_quad (u);
+}
diff --git a/libgcc/config/pa/stublib.c b/libgcc/config/pa/stublib.c
new file mode 100644
index 00000000000..d3cf559c8ab
--- /dev/null
+++ b/libgcc/config/pa/stublib.c
@@ -0,0 +1,97 @@
+/* Stub functions.
+ Copyright (C) 2006, 2009, 2010 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+#ifdef L_register_frame_info
+struct object;
+void __register_frame_info (const void * __attribute__((unused)),
+ struct object * __attribute__((unused)));
+void
+__register_frame_info (const void *p, struct object *ob)
+{
+}
+#endif
+
+#ifdef L_deregister_frame_info
+void *__deregister_frame_info (const void * __attribute__((unused)));
+void *
+__deregister_frame_info (const void *p)
+{
+ return (void *)0;
+}
+#endif
+
+#ifdef L_cxa_finalize
+void __cxa_finalize (void * __attribute__((unused)));
+void
+__cxa_finalize (void *p)
+{
+}
+#endif
+
+#ifdef L_Jv_RegisterClasses
+void _Jv_RegisterClasses (void * __attribute__((unused)));
+void
+_Jv_RegisterClasses (void *p)
+{
+}
+#endif
+
+#ifdef L_pthread_default_stacksize_np
+int pthread_default_stacksize_np (unsigned long __attribute__((unused)),
+ unsigned long *);
+int
+pthread_default_stacksize_np (unsigned long new, unsigned long *old)
+{
+ if (old)
+ *old = 0;
+ return 0;
+}
+#endif
+
+#ifdef L_pthread_mutex_lock
+int pthread_mutex_lock (void);
+int
+pthread_mutex_lock (void)
+{
+ return 0;
+}
+#endif
+
+#ifdef L_pthread_mutex_unlock
+int pthread_mutex_unlock (void);
+int
+pthread_mutex_unlock (void)
+{
+ return 0;
+}
+#endif
+
+#ifdef L_pthread_once
+int pthread_once (void);
+int
+pthread_once (void)
+{
+ return 0;
+}
+#endif
diff --git a/libgcc/config/pa/t-hpux b/libgcc/config/pa/t-hpux
new file mode 100644
index 00000000000..fcf93aba3ab
--- /dev/null
+++ b/libgcc/config/pa/t-hpux
@@ -0,0 +1,3 @@
+LIB2ADD = $(srcdir)/config/pa/lib2funcs.S $(srcdir)/config/pa/quadlib.c
+
+HOST_LIBGCC2_CFLAGS += -frandom-seed=fixed-seed
diff --git a/libgcc/config/pa/t-hpux10 b/libgcc/config/pa/t-hpux10
new file mode 100644
index 00000000000..5620f314a62
--- /dev/null
+++ b/libgcc/config/pa/t-hpux10
@@ -0,0 +1 @@
+HOST_LIBGCC2_CFLAGS += -D_T_HPUX10
diff --git a/libgcc/config/pa/t-linux b/libgcc/config/pa/t-linux
new file mode 100644
index 00000000000..2157de9b007
--- /dev/null
+++ b/libgcc/config/pa/t-linux
@@ -0,0 +1,10 @@
+#Plug millicode routines into libgcc.a We want these on both native and
+#cross compiles. We use the "64-bit" routines because the "32-bit" code
+#is broken for certain corner cases.
+LIB1ASMSRC = pa/milli64.S
+LIB1ASMFUNCS = _divI _divU _remI _remU _div_const _mulI _dyncall
+
+HOST_LIBGCC2_CFLAGS += -DELF=1 -DLINUX=1
+
+LIB2ADD = $(srcdir)/config/pa/fptr.c
+LIB2ADD_ST = $(srcdir)/config/pa/linux-atomic.c
diff --git a/libgcc/config/pa/t-linux64 b/libgcc/config/pa/t-linux64
new file mode 100644
index 00000000000..1d0a6ada1a1
--- /dev/null
+++ b/libgcc/config/pa/t-linux64
@@ -0,0 +1,8 @@
+# Plug millicode routines into libgcc.a We want these on both native and
+# cross compiles.
+# FIXME: Explain.
+LIB1ASMFUNCS := $(filter-out _dyncall, $(LIB1ASMFUNCS))
+
+LIB2ADD_ST = $(srcdir)/config/pa/linux-atomic.c
+
+HOST_LIBGCC2_CFLAGS += -Dpa64=1 -DELF=1
diff --git a/libgcc/config/pa/t-pa64 b/libgcc/config/pa/t-pa64
new file mode 100644
index 00000000000..98f28edb1c6
--- /dev/null
+++ b/libgcc/config/pa/t-pa64
@@ -0,0 +1,3 @@
+LIB2ADD = $(srcdir)/config/pa/quadlib.c
+
+HOST_LIBGCC2_CFLAGS += -Dpa64=1 -DELF=1 -mlong-calls
diff --git a/libgcc/config/pa/t-slibgcc-dwarf-ver b/libgcc/config/pa/t-slibgcc-dwarf-ver
new file mode 100644
index 00000000000..fa4688d6994
--- /dev/null
+++ b/libgcc/config/pa/t-slibgcc-dwarf-ver
@@ -0,0 +1,3 @@
+# Set the version number of the shared libgcc library (DWARF2 EH).
+
+SHLIB_SOVERSION = 4
diff --git a/libgcc/config/pa/t-slibgcc-hpux b/libgcc/config/pa/t-slibgcc-hpux
new file mode 100644
index 00000000000..d7ed64a8652
--- /dev/null
+++ b/libgcc/config/pa/t-slibgcc-hpux
@@ -0,0 +1,25 @@
+# Copyright (C) 2001, 2003, 2004, 2005, 2006, 2011
+# Free Software Foundation, Inc.
+#
+# This file is part of GCC.
+#
+# GCC is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GCC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>.
+
+# Build a shared libgcc library with the HP-UX linker on PA.
+
+SHLIB_EXT = .sl
+SHLIB_SOLINK = @shlib_base_name@$(SHLIB_EXT)
+SHLIB_SONAME = @shlib_base_name@.$(SHLIB_SOVERSION)
+SHLIB_LC =
diff --git a/libgcc/config/pa/t-slibgcc-sjlj-ver b/libgcc/config/pa/t-slibgcc-sjlj-ver
new file mode 100644
index 00000000000..00140cf204f
--- /dev/null
+++ b/libgcc/config/pa/t-slibgcc-sjlj-ver
@@ -0,0 +1,3 @@
+# Set the version number of the shared libgcc library (SJLJ EH).
+
+SHLIB_SOVERSION = 3
diff --git a/libgcc/config/pa/t-stublib b/libgcc/config/pa/t-stublib
new file mode 100644
index 00000000000..017998ff689
--- /dev/null
+++ b/libgcc/config/pa/t-stublib
@@ -0,0 +1,21 @@
+LIBGCCSTUB_OBJS = pthread_default_stacksize_np-stub.o \
+ pthread_mutex_lock-stub.o \
+ pthread_mutex_unlock-stub.o \
+ pthread_once-stub.o
+
+pthread_default_stacksize_np-stub.o: $(srcdir)/config/pa/stublib.c
+ $(gcc_compile) -c -O2 -DL_pthread_default_stacksize_np $<
+
+pthread_mutex_lock-stub.o: $(srcdir)/config/pa/stublib.c
+ $(gcc_compile) -c -O2 -DL_pthread_mutex_lock $<
+
+pthread_mutex_unlock-stub.o: $(srcdir)/config/pa/stublib.c
+ $(gcc_compile) -c -O2 -DL_pthread_mutex_unlock $<
+
+pthread_once-stub.o: $(srcdir)/config/pa/stublib.c
+ $(gcc_compile) -c -O2 -DL_pthread_once $<
+
+libgcc_stub.a: $(LIBGCCSTUB_OBJS)
+ -rm -rf $@
+ $(AR) rc $@ $<
+ $(RANLIB) $@
diff --git a/libgcc/config/pa/t-stublib64 b/libgcc/config/pa/t-stublib64
new file mode 100644
index 00000000000..5d0d9682abf
--- /dev/null
+++ b/libgcc/config/pa/t-stublib64
@@ -0,0 +1,13 @@
+LIBGCCSTUB_OBJS += rfi-stub.o dfi-stub.o jvrc-stub.o cxaf-stub.o
+
+rfi-stub.o: $(srcdir)/config/pa/stublib.c
+ $(gcc_compile) -c -O2 -DL_register_frame_info $<
+
+dfi-stub.o: $(srcdir)/config/pa/stublib.c
+ $(gcc_compile) -c -O2 -DL_deregister_frame_info $<
+
+cxaf-stub.o: $(srcdir)/config/pa/stublib.c
+ $(gcc_compile) -c -O2 -DL_cxa_finalize $<
+
+jvrc-stub.o: $(srcdir)/config/pa/stublib.c
+ $(gcc_compile) -c -O2 -DL_Jv_RegisterClasses $<
diff --git a/libgcc/config/pdp11/t-pdp11 b/libgcc/config/pdp11/t-pdp11
new file mode 100644
index 00000000000..7cadae11a2a
--- /dev/null
+++ b/libgcc/config/pdp11/t-pdp11
@@ -0,0 +1,8 @@
+LIB2ADD = $(srcdir)/udivmod.c \
+ $(srcdir)/udivmodsi4.c \
+ $(srcdir)/memcmp.c \
+ $(srcdir)/memcpy.c \
+ $(srcdir)/memmove.c \
+ $(srcdir)/memset.c
+
+HOST_LIBGCC2_CFLAGS += -O2 -mfloat32
diff --git a/libgcc/config/picochip/adddi3.S b/libgcc/config/picochip/adddi3.S
new file mode 100644
index 00000000000..77373ed9f64
--- /dev/null
+++ b/libgcc/config/picochip/adddi3.S
@@ -0,0 +1,194 @@
+// picoChip ASM file
+//
+// Support for 64-bit addition.
+//
+// Copyright (C) 2003, 2004, 2005, 2008, 2009 Free Software Foundation, Inc.
+// Contributed by Picochip Ltd.
+// Maintained by Hariharan Sandanagobalane (hariharan@picochip.com)
+//
+// This file is free software; you can redistribute it and/or modify it
+// under the terms of the GNU General Public License as published by the
+// Free Software Foundation; either version 3, or (at your option) any
+// later version.
+//
+// This file is distributed in the hope that it will be useful, but
+// WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// General Public License for more details.
+//
+// Under Section 7 of GPL version 3, you are granted additional
+// permissions described in the GCC Runtime Library Exception, version
+// 3.1, as published by the Free Software Foundation.
+//
+// You should have received a copy of the GNU General Public License and
+// a copy of the GCC Runtime Library Exception along with this program;
+// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+// <http://www.gnu.org/licenses/>.
+
+.section .text
+
+.align 8
+.global __adddi3
+__adddi3:
+_picoMark_FUNCTION_BEGIN=
+
+// picoChip Function Prologue : &__adddi3 = 12 bytes
+
+ // The first operand of add is completely in registers r[2-5]
+ // The second operand of sub is in stack FP(0-3)
+ // and result need to be written pointed to by the register r0.
+ // All we need to do is to load the appropriate values, add them
+ // appropriately (with add or addc ) and then store the values back.
+
+ ldw (FP)0, r1
+ stl r[7:6], (FP)-1
+ add.0 r2, r1, r6
+ ldw (FP)1, r1
+ addc.0 r3, r1, r7
+ ldl (FP)1, r[3:2]
+ stl r[7:6], (r0)0
+ addc.0 r4, r2, r6
+ addc.0 r5, r3, r7
+ stl r[7:6], (r0)1
+ jr (r12)
+=-> ldl (FP)-1, r[7:6]
+
+_picoMark_FUNCTION_END=
+
+// picoChip Function Epilogue : __adddi3
+
+
+//============================================================================
+// All DWARF information between this marker, and the END OF DWARF
+// marker should be included in the source file. Search for
+// FUNCTION_STACK_SIZE_GOES_HERE and FUNCTION NAME GOES HERE, and
+// provide the relevent information. Add markers called
+// _picoMark_FUNCTION_BEGIN and _picoMark_FUNCTION_END around the
+// function in question.
+//============================================================================
+
+//============================================================================
+// Frame information.
+//============================================================================
+
+.section .debug_frame
+_picoMark_DebugFrame=
+
+// Common CIE header.
+.unalignedInitLong _picoMark_CieEnd-_picoMark_CieBegin
+_picoMark_CieBegin=
+.unalignedInitLong 0xffffffff
+.initByte 0x1 // CIE Version
+.ascii 16#0# // CIE Augmentation
+.uleb128 0x1 // CIE Code Alignment Factor
+.sleb128 2 // CIE Data Alignment Factor
+.initByte 0xc // CIE RA Column
+.initByte 0xc // DW_CFA_def_cfa
+.uleb128 0xd
+.uleb128 0x0
+.align 2
+_picoMark_CieEnd=
+
+// FDE
+_picoMark_LSFDE0I900821033007563=
+.unalignedInitLong _picoMark_FdeEnd-_picoMark_FdeBegin
+_picoMark_FdeBegin=
+.unalignedInitLong _picoMark_DebugFrame // FDE CIE offset
+.unalignedInitWord _picoMark_FUNCTION_BEGIN // FDE initial location
+.unalignedInitWord _picoMark_FUNCTION_END-_picoMark_FUNCTION_BEGIN
+.initByte 0xe // DW_CFA_def_cfa_offset
+.uleb128 0xe // <-- FUNCTION_STACK_SIZE_GOES_HERE
+.initByte 0x4 // DW_CFA_advance_loc4
+.unalignedInitLong _picoMark_FUNCTION_END-_picoMark_FUNCTION_BEGIN
+.initByte 0xe // DW_CFA_def_cfa_offset
+.uleb128 0x0
+.align 2
+_picoMark_FdeEnd=
+
+//============================================================================
+// Abbrevation information.
+//============================================================================
+
+.section .debug_abbrev
+_picoMark_ABBREVIATIONS=
+
+.section .debug_abbrev
+ .uleb128 0x1 // (abbrev code)
+ .uleb128 0x11 // (TAG: DW_TAG_compile_unit)
+ .initByte 0x1 // DW_children_yes
+ .uleb128 0x10 // (DW_AT_stmt_list)
+ .uleb128 0x6 // (DW_FORM_data4)
+ .uleb128 0x12 // (DW_AT_high_pc)
+ .uleb128 0x1 // (DW_FORM_addr)
+ .uleb128 0x11 // (DW_AT_low_pc)
+ .uleb128 0x1 // (DW_FORM_addr)
+ .uleb128 0x25 // (DW_AT_producer)
+ .uleb128 0x8 // (DW_FORM_string)
+ .uleb128 0x13 // (DW_AT_language)
+ .uleb128 0x5 // (DW_FORM_data2)
+ .uleb128 0x3 // (DW_AT_name)
+ .uleb128 0x8 // (DW_FORM_string)
+.initByte 0x0
+.initByte 0x0
+
+ .uleb128 0x2 ;# (abbrev code)
+ .uleb128 0x2e ;# (TAG: DW_TAG_subprogram)
+.initByte 0x0 ;# DW_children_no
+ .uleb128 0x3 ;# (DW_AT_name)
+ .uleb128 0x8 ;# (DW_FORM_string)
+ .uleb128 0x11 ;# (DW_AT_low_pc)
+ .uleb128 0x1 ;# (DW_FORM_addr)
+ .uleb128 0x12 ;# (DW_AT_high_pc)
+ .uleb128 0x1 ;# (DW_FORM_addr)
+.initByte 0x0
+.initByte 0x0
+
+.initByte 0x0
+
+//============================================================================
+// Line information. DwarfLib requires this to be present, but it can
+// be empty.
+//============================================================================
+
+.section .debug_line
+_picoMark_LINES=
+
+//============================================================================
+// Debug Information
+//============================================================================
+.section .debug_info
+
+//Fixed header.
+.unalignedInitLong _picoMark_DEBUG_INFO_END-_picoMark_DEBUG_INFO_BEGIN
+_picoMark_DEBUG_INFO_BEGIN=
+.unalignedInitWord 0x2
+.unalignedInitLong _picoMark_ABBREVIATIONS
+.initByte 0x2
+
+// Compile unit information.
+.uleb128 0x1 // (DIE 0xb) DW_TAG_compile_unit)
+.unalignedInitLong _picoMark_LINES
+.unalignedInitWord _picoMark_FUNCTION_END
+.unalignedInitWord _picoMark_FUNCTION_BEGIN
+// Producer is `picoChip'
+.ascii 16#70# 16#69# 16#63# 16#6f# 16#43# 16#68# 16#69# 16#70# 16#00#
+.unalignedInitWord 0xcafe // ASM language
+.ascii 16#0# // Name. DwarfLib expects this to be present.
+
+.uleb128 0x2 ;# (DIE DW_TAG_subprogram)
+
+// FUNCTION NAME GOES HERE. Use `echo name | od -t x1' to get the hex. Each hex
+// digit is specified using the format 16#XX#
+.ascii 16#5f# 16#61# 16#64# 16#64# 16#63# 16#69# 16#33# 16#0# // Function name `_adddi3'
+.unalignedInitWord _picoMark_FUNCTION_BEGIN // DW_AT_low_pc
+.unalignedInitWord _picoMark_FUNCTION_END // DW_AT_high_pc
+
+.initByte 0x0 // end of compile unit children.
+
+_picoMark_DEBUG_INFO_END=
+
+//============================================================================
+// END OF DWARF
+//============================================================================
+
+.section .endFile
diff --git a/libgcc/config/picochip/ashlsi3.S b/libgcc/config/picochip/ashlsi3.S
new file mode 100644
index 00000000000..688cd8d96ff
--- /dev/null
+++ b/libgcc/config/picochip/ashlsi3.S
@@ -0,0 +1,193 @@
+// picoChip ASM file
+// picoChip ASM file
+//
+// Support for 32-bit arithmetic shift left.
+//
+// Copyright (C) 2003, 2004, 2005, 2008, 2009 Free Software Foundation, Inc.
+// Contributed by Picochip Ltd.
+// Maintained by Hariharan Sandanagobalane (hariharan@picochip.com)
+//
+// This file is free software; you can redistribute it and/or modify it
+// under the terms of the GNU General Public License as published by the
+// Free Software Foundation; either version 3, or (at your option) any
+// later version.
+//
+// This file is distributed in the hope that it will be useful, but
+// WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// General Public License for more details.
+//
+// Under Section 7 of GPL version 3, you are granted additional
+// permissions described in the GCC Runtime Library Exception, version
+// 3.1, as published by the Free Software Foundation.
+//
+// You should have received a copy of the GNU General Public License and
+// a copy of the GCC Runtime Library Exception along with this program;
+// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+// <http://www.gnu.org/licenses/>.
+
+.section .text
+
+.global ___ashlsi3
+___ashlsi3:
+_picoMark_FUNCTION_BEGIN=
+// picoChip Function Prologue : &___ashlsi3 = 0 bytes
+
+ // if (R2 > 15) goto _L2
+ SUB.0 15,R2,r15
+ JMPLT _L2
+=-> SUB.0 16,R2,R5 // R5 := R5 - R4 (HI)
+
+ LSL.0 R1,R2,R1 // R3 := R1 << R2
+ LSL.0 R0,R2,R4 // R2 := R0 << R2
+
+ LSR.0 R0,R5,R5 // R5 := R12 >> R5 NEED TO CHECK - HARI
+ OR.0 R5,R1,R5 // R3 := R5 IOR R0 (HI)
+ SUB.0 R2,0,r15
+ COPYNE R5,R1
+ JR (R12) // Return to caller
+=-> COPY.0 R4,R0
+
+_L2:
+ LSL.0 R0,R2,R1 // R3 := R0 << R2
+ JR (R12) // Return to caller
+=-> COPY.0 0,R0 // R2 := 0 (short constant)
+
+_picoMark_FUNCTION_END=
+
+// picoChip Function Epilogue : __ashlsi3
+
+//============================================================================
+// All DWARF information between this marker, and the END OF DWARF
+// marker should be included in the source file. Search for
+// FUNCTION_STACK_SIZE_GOES_HERE and FUNCTION NAME GOES HERE, and
+// provide the relevent information. Add markers called
+// _picoMark_FUNCTION_BEGIN and _picoMark_FUNCTION_END around the
+// function in question.
+//============================================================================
+
+//============================================================================
+// Frame information.
+//============================================================================
+
+.section .debug_frame
+_picoMark_DebugFrame=
+
+// Common CIE header.
+.unalignedInitLong _picoMark_CieEnd-_picoMark_CieBegin
+_picoMark_CieBegin=
+.unalignedInitLong 0xffffffff
+.initByte 0x1 // CIE Version
+.ascii 16#0# // CIE Augmentation
+.uleb128 0x1 // CIE Code Alignment Factor
+.sleb128 2 // CIE Data Alignment Factor
+.initByte 0xc // CIE RA Column
+.initByte 0xc // DW_CFA_def_cfa
+.uleb128 0xd
+.uleb128 0x0
+.align 2
+_picoMark_CieEnd=
+
+// FDE
+_picoMark_LSFDE0I900821033007563=
+.unalignedInitLong _picoMark_FdeEnd-_picoMark_FdeBegin
+_picoMark_FdeBegin=
+.unalignedInitLong _picoMark_DebugFrame // FDE CIE offset
+.unalignedInitWord _picoMark_FUNCTION_BEGIN // FDE initial location
+.unalignedInitWord _picoMark_FUNCTION_END-_picoMark_FUNCTION_BEGIN
+.initByte 0xe // DW_CFA_def_cfa_offset
+.uleb128 0x0 // <-- FUNCTION_STACK_SIZE_GOES_HERE
+.initByte 0x4 // DW_CFA_advance_loc4
+.unalignedInitLong _picoMark_FUNCTION_END-_picoMark_FUNCTION_BEGIN
+.initByte 0xe // DW_CFA_def_cfa_offset
+.uleb128 0x0
+.align 2
+_picoMark_FdeEnd=
+
+//============================================================================
+// Abbrevation information.
+//============================================================================
+
+.section .debug_abbrev
+_picoMark_ABBREVIATIONS=
+
+.section .debug_abbrev
+ .uleb128 0x1 // (abbrev code)
+ .uleb128 0x11 // (TAG: DW_TAG_compile_unit)
+ .initByte 0x1 // DW_children_yes
+ .uleb128 0x10 // (DW_AT_stmt_list)
+ .uleb128 0x6 // (DW_FORM_data4)
+ .uleb128 0x12 // (DW_AT_high_pc)
+ .uleb128 0x1 // (DW_FORM_addr)
+ .uleb128 0x11 // (DW_AT_low_pc)
+ .uleb128 0x1 // (DW_FORM_addr)
+ .uleb128 0x25 // (DW_AT_producer)
+ .uleb128 0x8 // (DW_FORM_string)
+ .uleb128 0x13 // (DW_AT_language)
+ .uleb128 0x5 // (DW_FORM_data2)
+ .uleb128 0x3 // (DW_AT_name)
+ .uleb128 0x8 // (DW_FORM_string)
+.initByte 0x0
+.initByte 0x0
+
+ .uleb128 0x2 ;# (abbrev code)
+ .uleb128 0x2e ;# (TAG: DW_TAG_subprogram)
+.initByte 0x0 ;# DW_children_no
+ .uleb128 0x3 ;# (DW_AT_name)
+ .uleb128 0x8 ;# (DW_FORM_string)
+ .uleb128 0x11 ;# (DW_AT_low_pc)
+ .uleb128 0x1 ;# (DW_FORM_addr)
+ .uleb128 0x12 ;# (DW_AT_high_pc)
+ .uleb128 0x1 ;# (DW_FORM_addr)
+.initByte 0x0
+.initByte 0x0
+
+.initByte 0x0
+
+//============================================================================
+// Line information. DwarfLib requires this to be present, but it can
+// be empty.
+//============================================================================
+
+.section .debug_line
+_picoMark_LINES=
+
+//============================================================================
+// Debug Information
+//============================================================================
+.section .debug_info
+
+//Fixed header.
+.unalignedInitLong _picoMark_DEBUG_INFO_END-_picoMark_DEBUG_INFO_BEGIN
+_picoMark_DEBUG_INFO_BEGIN=
+.unalignedInitWord 0x2
+.unalignedInitLong _picoMark_ABBREVIATIONS
+.initByte 0x2
+
+// Compile unit information.
+.uleb128 0x1 // (DIE 0xb) DW_TAG_compile_unit)
+.unalignedInitLong _picoMark_LINES
+.unalignedInitWord _picoMark_FUNCTION_END
+.unalignedInitWord _picoMark_FUNCTION_BEGIN
+// Producer is `picoChip'
+.ascii 16#70# 16#69# 16#63# 16#6f# 16#43# 16#68# 16#69# 16#70# 16#00#
+.unalignedInitWord 0xcafe // ASM language
+.ascii 16#0# // Name. DwarfLib expects this to be present.
+
+.uleb128 0x2 ;# (DIE DW_TAG_subprogram)
+
+// FUNCTION NAME GOES HERE. Use `echo name | od -t x1' to get the hex. Each hex
+// digit is specified using the format 16#XX#
+.ascii 16#5f# 16#61# 16#73# 16#68# 16#6c# 16#73# 16#69# 16#33# 16#0# // Function name `_ashlsi3'
+.unalignedInitWord _picoMark_FUNCTION_BEGIN // DW_AT_low_pc
+.unalignedInitWord _picoMark_FUNCTION_END // DW_AT_high_pc
+
+.initByte 0x0 // end of compile unit children.
+
+_picoMark_DEBUG_INFO_END=
+
+//============================================================================
+// END OF DWARF
+//============================================================================
+
+.section .endFile
diff --git a/libgcc/config/picochip/ashlsi3.c b/libgcc/config/picochip/ashlsi3.c
new file mode 100644
index 00000000000..600461c0b83
--- /dev/null
+++ b/libgcc/config/picochip/ashlsi3.c
@@ -0,0 +1,82 @@
+/*
+
+picoChip GCC support for 32-bit shift left.
+
+Copyright (C) 2003, 2004, 2005, 2008, 2009 Free Software Foundation, Inc.
+Contributed by Picochip Ltd.
+Maintained by Daniel Towner (daniel.towner@picochip.com)
+
+This file is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 3, or (at your option) any
+later version.
+
+This file is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef PICOCHIP
+#error "Intended for compilation for PICOCHIP only."
+#endif
+
+typedef int HItype __attribute__ ((mode (HI)));
+typedef unsigned int UHItype __attribute__ ((mode (HI)));
+typedef unsigned int USItype __attribute__ ((mode (SI)));
+
+typedef struct USIstruct {
+ UHItype low, high;
+} USIstruct;
+
+typedef union USIunion {
+ USItype l;
+ USIstruct s;
+} USIunion;
+
+USItype __ashlsi3(USIunion value, HItype count) {
+ USIunion result;
+ int temp;
+
+ /* Ignore a zero count until we get into the (count < 16)
+ clause. This is slightly slower when shifting by zero, but faster
+ and smaller in all other cases (due to the better scheduling
+ opportunities available by putting the test near computational
+ instructions. */
+ /* if (count == 0) return value.l; */
+
+ if (count < 16) {
+ /* Shift low and high words by the count. */
+ result.s.low = value.s.low << count;
+ result.s.high = value.s.high << count;
+
+ /* There is now a hole in the lower `count' bits of the high
+ word. Shift the upper `count' bits of the low word into the
+ high word. This is only required when the count is non-zero. */
+ if (count != 0) {
+ temp = 16 - count;
+ temp = value.s.low >> temp;
+ result.s.high |= temp;
+ }
+
+ } else {
+ /* Shift the lower word of the source into the upper word of the
+ result, and zero the result's lower word. */
+ count -= 16;
+ result.s.high = value.s.low << count;
+ result.s.low = 0;
+
+ }
+
+ return result.l;
+
+}
+
diff --git a/libgcc/config/picochip/ashrsi3.S b/libgcc/config/picochip/ashrsi3.S
new file mode 100644
index 00000000000..fddd70b6895
--- /dev/null
+++ b/libgcc/config/picochip/ashrsi3.S
@@ -0,0 +1,202 @@
+// picoChip ASM file
+//
+// Support for 32-bit arithmetic shift right.
+//
+// Copyright (C) 2003, 2004, 2005, 2008, 2009 Free Software Foundation, Inc.
+// Contributed by Picochip Ltd.
+// Maintained by Hariharan Sandanagobalane (hariharan@picochip.com)
+//
+// This file is free software; you can redistribute it and/or modify it
+// under the terms of the GNU General Public License as published by the
+// Free Software Foundation; either version 3, or (at your option) any
+// later version.
+//
+// This file is distributed in the hope that it will be useful, but
+// WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// General Public License for more details.
+//
+// Under Section 7 of GPL version 3, you are granted additional
+// permissions described in the GCC Runtime Library Exception, version
+// 3.1, as published by the Free Software Foundation.
+//
+// You should have received a copy of the GNU General Public License and
+// a copy of the GCC Runtime Library Exception along with this program;
+// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+// <http://www.gnu.org/licenses/>.
+
+.section .text
+
+.global ___ashrsi3
+___ashrsi3:
+_picoMark_FUNCTION_BEGIN=
+
+// picoChip Function Prologue : &___ashrsi3 = 0 bytes
+
+ // if (R2 > 15) goto _L2
+ SUB.0 15,R2,r15
+ JMPLT _L2
+=-> COPY.0 R1,R3
+
+ LSR.0 R1,R2,R1 // R1 := R1 >> R2
+ // if (R2 == 0) goto _L4
+ SUB.0 R2,0,r15
+ JMPEQ _L4
+=-> LSR.0 R0,R2,R0 // R2 := R0 >> R2
+
+ SUB.0 16,R2,R4 // R4 := R4 - R2 (HI)
+ ASR.0 R3,15,R5 // R5 = R1 >>{arith} 15
+ LSL.0 R5,R4,R5 // R5 := R5 << R4
+ LSL.0 R3,R4,R4 // R4 := R1 << R4
+ OR.0 R5,R1,R1 // R3 := R5 IOR R3 (HI)
+ BRA _L4
+ =-> OR.0 R4,R0,R0 // R2 := R4 IOR R0 (HI)
+_L2:
+ ASR.0 R1,15,R1 // R4 = R1 >>{arith} 15
+ SUB.0 16,R2,R5 // R5 := R5 - R2 (HI)
+ LSR.0 R3,R2,R0 // R2 := R1 >> R2
+ LSL.0 R1,R5,R5 // R5 := R4 << R5
+ OR.0 R5,R0,R5 // R2 := R5 IOR R2 (HI)
+ SUB.0 R2,16,r15 // R5 := R5 - R2 (HI)
+ COPYNE R5,R0
+_L4:
+ JR (R12) // Return to caller
+
+_picoMark_FUNCTION_END=
+
+// picoChip Function Epilogue : __ashrsi3
+//============================================================================
+// All DWARF information between this marker, and the END OF DWARF
+// marker should be included in the source file. Search for
+// FUNCTION_STACK_SIZE_GOES_HERE and FUNCTION NAME GOES HERE, and
+// provide the relevent information. Add markers called
+// _picoMark_FUNCTION_BEGIN and _picoMark_FUNCTION_END around the
+// function in question.
+//============================================================================
+
+//============================================================================
+// Frame information.
+//============================================================================
+
+.section .debug_frame
+_picoMark_DebugFrame=
+
+// Common CIE header.
+.unalignedInitLong _picoMark_CieEnd-_picoMark_CieBegin
+_picoMark_CieBegin=
+.unalignedInitLong 0xffffffff
+.initByte 0x1 // CIE Version
+.ascii 16#0# // CIE Augmentation
+.uleb128 0x1 // CIE Code Alignment Factor
+.sleb128 2 // CIE Data Alignment Factor
+.initByte 0xc // CIE RA Column
+.initByte 0xc // DW_CFA_def_cfa
+.uleb128 0xd
+.uleb128 0x0
+.align 2
+_picoMark_CieEnd=
+
+// FDE
+_picoMark_LSFDE0I900821033007563=
+.unalignedInitLong _picoMark_FdeEnd-_picoMark_FdeBegin
+_picoMark_FdeBegin=
+.unalignedInitLong _picoMark_DebugFrame // FDE CIE offset
+.unalignedInitWord _picoMark_FUNCTION_BEGIN // FDE initial location
+.unalignedInitWord _picoMark_FUNCTION_END-_picoMark_FUNCTION_BEGIN
+.initByte 0xe // DW_CFA_def_cfa_offset
+.uleb128 0x0 // <-- FUNCTION_STACK_SIZE_GOES_HERE
+.initByte 0x4 // DW_CFA_advance_loc4
+.unalignedInitLong _picoMark_FUNCTION_END-_picoMark_FUNCTION_BEGIN
+.initByte 0xe // DW_CFA_def_cfa_offset
+.uleb128 0x0
+.align 2
+_picoMark_FdeEnd=
+
+//============================================================================
+// Abbrevation information.
+//============================================================================
+
+.section .debug_abbrev
+_picoMark_ABBREVIATIONS=
+
+.section .debug_abbrev
+ .uleb128 0x1 // (abbrev code)
+ .uleb128 0x11 // (TAG: DW_TAG_compile_unit)
+ .initByte 0x1 // DW_children_yes
+ .uleb128 0x10 // (DW_AT_stmt_list)
+ .uleb128 0x6 // (DW_FORM_data4)
+ .uleb128 0x12 // (DW_AT_high_pc)
+ .uleb128 0x1 // (DW_FORM_addr)
+ .uleb128 0x11 // (DW_AT_low_pc)
+ .uleb128 0x1 // (DW_FORM_addr)
+ .uleb128 0x25 // (DW_AT_producer)
+ .uleb128 0x8 // (DW_FORM_string)
+ .uleb128 0x13 // (DW_AT_language)
+ .uleb128 0x5 // (DW_FORM_data2)
+ .uleb128 0x3 // (DW_AT_name)
+ .uleb128 0x8 // (DW_FORM_string)
+.initByte 0x0
+.initByte 0x0
+
+ .uleb128 0x2 ;# (abbrev code)
+ .uleb128 0x2e ;# (TAG: DW_TAG_subprogram)
+.initByte 0x0 ;# DW_children_no
+ .uleb128 0x3 ;# (DW_AT_name)
+ .uleb128 0x8 ;# (DW_FORM_string)
+ .uleb128 0x11 ;# (DW_AT_low_pc)
+ .uleb128 0x1 ;# (DW_FORM_addr)
+ .uleb128 0x12 ;# (DW_AT_high_pc)
+ .uleb128 0x1 ;# (DW_FORM_addr)
+.initByte 0x0
+.initByte 0x0
+
+.initByte 0x0
+
+//============================================================================
+// Line information. DwarfLib requires this to be present, but it can
+// be empty.
+//============================================================================
+
+.section .debug_line
+_picoMark_LINES=
+
+//============================================================================
+// Debug Information
+//============================================================================
+.section .debug_info
+
+//Fixed header.
+.unalignedInitLong _picoMark_DEBUG_INFO_END-_picoMark_DEBUG_INFO_BEGIN
+_picoMark_DEBUG_INFO_BEGIN=
+.unalignedInitWord 0x2
+.unalignedInitLong _picoMark_ABBREVIATIONS
+.initByte 0x2
+
+// Compile unit information.
+.uleb128 0x1 // (DIE 0xb) DW_TAG_compile_unit)
+.unalignedInitLong _picoMark_LINES
+.unalignedInitWord _picoMark_FUNCTION_END
+.unalignedInitWord _picoMark_FUNCTION_BEGIN
+// Producer is `picoChip'
+.ascii 16#70# 16#69# 16#63# 16#6f# 16#43# 16#68# 16#69# 16#70# 16#00#
+.unalignedInitWord 0xcafe // ASM language
+.ascii 16#0# // Name. DwarfLib expects this to be present.
+
+.uleb128 0x2 ;# (DIE DW_TAG_subprogram)
+
+// FUNCTION NAME GOES HERE. Use `echo name | od -t x1' to get the hex. Each hex
+// digit is specified using the format 16#XX#
+.ascii 16#5f# 16#61# 16#73# 16#68# 16#72# 16#73# 16#69# 16#33# 16#0# // Function name `_ashrsi3'
+.unalignedInitWord _picoMark_FUNCTION_BEGIN // DW_AT_low_pc
+.unalignedInitWord _picoMark_FUNCTION_END // DW_AT_high_pc
+
+.initByte 0x0 // end of compile unit children.
+
+_picoMark_DEBUG_INFO_END=
+
+//============================================================================
+// END OF DWARF
+//============================================================================
+
+.section .endFile
+// End of picoChip ASM file
diff --git a/libgcc/config/picochip/ashrsi3.c b/libgcc/config/picochip/ashrsi3.c
new file mode 100644
index 00000000000..4f1567b1347
--- /dev/null
+++ b/libgcc/config/picochip/ashrsi3.c
@@ -0,0 +1,113 @@
+/*
+
+picoChip GCC support for 32-bit arithmetic shift right.
+
+Copyright (C) 2003, 2004, 2005, 2008, 2009 Free Software Foundation, Inc.
+Contributed by Picochip Ltd.
+Maintained by Daniel Towner (daniel.towner@picochip.com)
+
+This file is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 3, or (at your option) any
+later version.
+
+This file is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+typedef int HItype __attribute__ ((mode (HI)));
+typedef unsigned int UHItype __attribute__ ((mode (HI)));
+typedef unsigned int USItype __attribute__ ((mode (SI)));
+
+typedef struct USIstruct {
+ UHItype low, high;
+} USIstruct;
+
+typedef union USIunion {
+ USItype l;
+ USIstruct s;
+} USIunion;
+
+USItype __ashrsi3(USIunion value, HItype count) {
+ USIunion result;
+ int temp;
+ int wordOfSignBits;
+
+ /* Ignore a zero count until we get into the (count < 16)
+ clause. This is slightly slower when shifting by zero, but faster
+ and smaller in all other cases (due to the better scheduling
+ opportunities available by putting the test near computational
+ instructions. */
+ /* if (count == 0) return value.l; */
+
+ if (count < 16) {
+ /* Shift low and high words by the count. The high word must use
+ an arithmetic shift. There is no arithmetic shift-right by
+ variable, so synthesise it. */
+ int signWord;
+ int reverseCount;
+
+ /* Shift low and high parts by the count. The upper word now has
+ invalid signed bits. */
+ result.s.low = value.s.low >> count;
+ result.s.high = value.s.high >> count;
+
+ if (count != 0) {
+
+ reverseCount = 16 - count;
+
+ /* Given a word of sign bits, shift back left to create the
+ destination sign bits. */
+ wordOfSignBits = __builtin_asri(value.s.high, 15);
+ signWord = wordOfSignBits << reverseCount;
+ result.s.high |= signWord;
+
+ /* There is now a hole in the upper `count' bits of the low
+ word. Shift the lower `count' bits of the upper word into the
+ low word. */
+ temp = value.s.high << reverseCount;
+ result.s.low |= temp;
+ }
+
+ } else {
+ int signWord;
+
+ /* Shift is greater than one word, so top word will always be set
+ to sign bits, and bottom word will be shifted from top word. */
+ result.s.low = value.s.high >> count;
+ result.s.high = __builtin_asri(value.s.high, 15);
+
+ if (count != 16) {
+
+ /* Shift the upper word of the source into the lower word of the
+ result. Arithmetically shift the upper word as well, to retain
+ the sign. This shift must be synthesised, as no such shift
+ exists in the instruction set. */
+ int signWord;
+
+
+ /* Given a complete word of sign-bits, shift this back left to
+ create the destination sign bits. */
+ signWord = result.s.high << (16 - count);
+ // signWord = wordOfSignBits << (16 - count);
+
+ /* Insert the sign bits to the result's low word. */
+ result.s.low |= signWord;
+
+ }
+
+ }
+
+ return result.l;
+
+}
diff --git a/libgcc/config/picochip/clzsi2.S b/libgcc/config/picochip/clzsi2.S
new file mode 100644
index 00000000000..d5c99aa7154
--- /dev/null
+++ b/libgcc/config/picochip/clzsi2.S
@@ -0,0 +1,189 @@
+// Copyright (C) 2008, 2011 Free Software Foundation, Inc.
+//
+// This file is part of GCC.
+//
+// GCC is free software; you can redistribute it and/or modify it under
+// the terms of the GNU General Public License as published by the Free
+// Software Foundation; either version 3, or (at your option) any later
+// version.
+//
+// GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or
+// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+// for more details.
+//
+// Under Section 7 of GPL version 3, you are granted additional
+// permissions described in the GCC Runtime Library Exception, version
+// 3.1, as published by the Free Software Foundation.
+//
+// You should have received a copy of the GNU General Public License and
+// a copy of the GCC Runtime Library Exception along with this program;
+// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+// <http://www.gnu.org/licenses/>.
+
+// picoChip ASM file
+//.file "clzsi2.S"
+
+.section .text
+
+.global __clzsi2
+__clzsi2:
+_picoMark_FUNCTION_BEGIN=
+
+// picoChip Function Prologue : &__clzsi2 = 0 bytes
+
+ // What value should be operated on? If the top word is empty
+ // then count the bits in the bottom word, and add 16. If the
+ // top word is not empty, then count the bits in the top word.
+
+ // R4 stores the constant 0
+
+ sub.0 R1,0,r15 \ copy.1 16,r2
+ copyeq r0,r1
+ copyne 0,r2
+
+ // R1 now stores value to count, and R2 stores current bit offset.
+ sbc r1,r0
+ asr.0 r1,15,r15 \ add.1 r0,1,r0
+ jr (lr) \ copyne 0,r0
+=-> add.0 r0,r2,r0
+
+_picoMark_FUNCTION_END=
+
+// picoChip Function Epilogue : __clzsi2
+
+//============================================================================
+// All DWARF information between this marker, and the END OF DWARF
+// marker should be included in the source file. Search for
+// FUNCTION_STACK_SIZE_GOES_HERE and FUNCTION NAME GOES HERE, and
+// provide the relevent information. Add markers called
+// _picoMark_FUNCTION_BEGIN and _picoMark_FUNCTION_END around the
+// function in question.
+//============================================================================
+
+//============================================================================
+// Frame information.
+//============================================================================
+
+.section .debug_frame
+_picoMark_DebugFrame=
+
+// Common CIE header.
+.unalignedInitLong _picoMark_CieEnd-_picoMark_CieBegin
+_picoMark_CieBegin=
+.unalignedInitLong 0xffffffff
+.initByte 0x1 // CIE Version
+.ascii 16#0# // CIE Augmentation
+.uleb128 0x1 // CIE Code Alignment Factor
+.sleb128 2 // CIE Data Alignment Factor
+.initByte 0xc // CIE RA Column
+.initByte 0xc // DW_CFA_def_cfa
+.uleb128 0xd
+.uleb128 0x0
+.align 2
+_picoMark_CieEnd=
+
+// FDE
+_picoMark_LSFDE0I900821033007563=
+.unalignedInitLong _picoMark_FdeEnd-_picoMark_FdeBegin
+_picoMark_FdeBegin=
+.unalignedInitLong _picoMark_DebugFrame // FDE CIE offset
+.unalignedInitWord _picoMark_FUNCTION_BEGIN // FDE initial location
+.unalignedInitWord _picoMark_FUNCTION_END-_picoMark_FUNCTION_BEGIN
+.initByte 0xe // DW_CFA_def_cfa_offset
+.uleb128 0x0 // <-- FUNCTION_STACK_SIZE_GOES_HERE
+.initByte 0x4 // DW_CFA_advance_loc4
+.unalignedInitLong _picoMark_FUNCTION_END-_picoMark_FUNCTION_BEGIN
+.initByte 0xe // DW_CFA_def_cfa_offset
+.uleb128 0x0
+.align 2
+_picoMark_FdeEnd=
+
+//============================================================================
+// Abbrevation information.
+//============================================================================
+
+.section .debug_abbrev
+_picoMark_ABBREVIATIONS=
+
+.section .debug_abbrev
+ .uleb128 0x1 // (abbrev code)
+ .uleb128 0x11 // (TAG: DW_TAG_compile_unit)
+ .initByte 0x1 // DW_children_yes
+ .uleb128 0x10 // (DW_AT_stmt_list)
+ .uleb128 0x6 // (DW_FORM_data4)
+ .uleb128 0x12 // (DW_AT_high_pc)
+ .uleb128 0x1 // (DW_FORM_addr)
+ .uleb128 0x11 // (DW_AT_low_pc)
+ .uleb128 0x1 // (DW_FORM_addr)
+ .uleb128 0x25 // (DW_AT_producer)
+ .uleb128 0x8 // (DW_FORM_string)
+ .uleb128 0x13 // (DW_AT_language)
+ .uleb128 0x5 // (DW_FORM_data2)
+ .uleb128 0x3 // (DW_AT_name)
+ .uleb128 0x8 // (DW_FORM_string)
+.initByte 0x0
+.initByte 0x0
+
+ .uleb128 0x2 ;# (abbrev code)
+ .uleb128 0x2e ;# (TAG: DW_TAG_subprogram)
+.initByte 0x0 ;# DW_children_no
+ .uleb128 0x3 ;# (DW_AT_name)
+ .uleb128 0x8 ;# (DW_FORM_string)
+ .uleb128 0x11 ;# (DW_AT_low_pc)
+ .uleb128 0x1 ;# (DW_FORM_addr)
+ .uleb128 0x12 ;# (DW_AT_high_pc)
+ .uleb128 0x1 ;# (DW_FORM_addr)
+.initByte 0x0
+.initByte 0x0
+
+.initByte 0x0
+
+//============================================================================
+// Line information. DwarfLib requires this to be present, but it can
+// be empty.
+//============================================================================
+
+.section .debug_line
+_picoMark_LINES=
+
+//============================================================================
+// Debug Information
+//============================================================================
+.section .debug_info
+
+//Fixed header.
+.unalignedInitLong _picoMark_DEBUG_INFO_END-_picoMark_DEBUG_INFO_BEGIN
+_picoMark_DEBUG_INFO_BEGIN=
+.unalignedInitWord 0x2
+.unalignedInitLong _picoMark_ABBREVIATIONS
+.initByte 0x2
+
+// Compile unit information.
+.uleb128 0x1 // (DIE 0xb) DW_TAG_compile_unit)
+.unalignedInitLong _picoMark_LINES
+.unalignedInitWord _picoMark_FUNCTION_END
+.unalignedInitWord _picoMark_FUNCTION_BEGIN
+// Producer is `picoChip'
+.ascii 16#70# 16#69# 16#63# 16#6f# 16#43# 16#68# 16#69# 16#70# 16#00#
+.unalignedInitWord 0xcafe // ASM language
+.ascii 16#0# // Name. DwarfLib expects this to be present.
+
+.uleb128 0x2 ;# (DIE DW_TAG_subprogram)
+
+// FUNCTION NAME GOES HERE. Use `echo name | od -t x1' to get the hex. Each hex
+// digit is specified using the format 16#XX#
+.ascii 16#5F# 16#63# 16#6C# 16#7A# 16#73# 16#69# 16#32# 16#0# // Function name `_clzsi2'
+.unalignedInitWord _picoMark_FUNCTION_BEGIN // DW_AT_low_pc
+.unalignedInitWord _picoMark_FUNCTION_END // DW_AT_high_pc
+
+.initByte 0x0 // end of compile unit children.
+
+_picoMark_DEBUG_INFO_END=
+
+//============================================================================
+// END OF DWARF
+//============================================================================
+
+.section .endFile
+// End of picoChip ASM file
diff --git a/libgcc/config/picochip/cmpsi2.S b/libgcc/config/picochip/cmpsi2.S
new file mode 100644
index 00000000000..95322f32419
--- /dev/null
+++ b/libgcc/config/picochip/cmpsi2.S
@@ -0,0 +1,212 @@
+// picoChip ASM file
+//.file "ucmpsi2.c"
+//
+// Support for 32-bit signed compare.
+//
+// Copyright (C) 2003, 2004, 2005, 2008, 2009 Free Software Foundation, Inc.
+// Contributed by Picochip Ltd.
+// Maintained by Daniel Towner (daniel.towner@picochip.com)
+//
+// This file is free software; you can redistribute it and/or modify it
+// under the terms of the GNU General Public License as published by the
+// Free Software Foundation; either version 3, or (at your option) any
+// later version.
+//
+// This file is distributed in the hope that it will be useful, but
+// WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// General Public License for more details.
+//
+// Under Section 7 of GPL version 3, you are granted additional
+// permissions described in the GCC Runtime Library Exception, version
+// 3.1, as published by the Free Software Foundation.
+//
+// You should have received a copy of the GNU General Public License and
+// a copy of the GCC Runtime Library Exception along with this program;
+// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+// <http://www.gnu.org/licenses/>.
+
+// Compiled from the following, and then hand optimised.
+//
+// int __cmpsi2 (USItype x, USItype y)
+// {
+//
+// SIunion lx; lx.l = x;
+// SIunion ly; ly.l = y;
+//
+// if (lx.s.high < ly.s.high)
+// return 0;
+// else if (lx.s.high > ly.s.high)
+// return 2;
+// if (lx.s.low < ly.s.low)
+// return 0;
+// else if (lx.s.low > ly.s.low)
+// return 2;
+// return 1;
+// }
+
+.section .text
+
+.align 8
+.global ___cmpsi2
+___cmpsi2:
+_picoMark_FUNCTION_BEGIN=
+
+// picoChip Function Prologue : &___cmpsi2 = 0 bytes
+
+ SUB.0 R1,R3,r15
+
+ BLT _L1
+=-> SUB.0 R3,R1,r15 \ COPY.1 0,R5
+
+ BLT _L1
+=-> SUB.0 R0,R2,r15 \ COPY.1 2,R5
+
+ BLO _L1
+=-> SUB.0 R2,R0,r15 \ COPY.1 0,R5
+
+ BLO _L1
+=-> COPY.0 2,R5
+
+ COPY.0 1,R5
+_L1:
+ JR (R12)
+=-> COPY.0 R5,R0
+
+_picoMark_FUNCTION_END=
+// picoChip Function Epilogue : __cmpsi2
+//============================================================================
+// All DWARF information between this marker, and the END OF DWARF
+// marker should be included in the source file. Search for
+// FUNCTION_STACK_SIZE_GOES_HERE and FUNCTION NAME GOES HERE, and
+// provide the relevent information. Add markers called
+// _picoMark_FUNCTION_BEGIN and _picoMark_FUNCTION_END around the
+// function in question.
+//============================================================================
+
+//============================================================================
+// Frame information.
+//============================================================================
+
+.section .debug_frame
+_picoMark_DebugFrame=
+
+// Common CIE header.
+.unalignedInitLong _picoMark_CieEnd-_picoMark_CieBegin
+_picoMark_CieBegin=
+.unalignedInitLong 0xffffffff
+.initByte 0x1 // CIE Version
+.ascii 16#0# // CIE Augmentation
+.uleb128 0x1 // CIE Code Alignment Factor
+.sleb128 2 // CIE Data Alignment Factor
+.initByte 0xc // CIE RA Column
+.initByte 0xc // DW_CFA_def_cfa
+.uleb128 0xd
+.uleb128 0x0
+.align 2
+_picoMark_CieEnd=
+
+// FDE
+_picoMark_LSFDE0I900821033007563=
+.unalignedInitLong _picoMark_FdeEnd-_picoMark_FdeBegin
+_picoMark_FdeBegin=
+.unalignedInitLong _picoMark_DebugFrame // FDE CIE offset
+.unalignedInitWord _picoMark_FUNCTION_BEGIN // FDE initial location
+.unalignedInitWord _picoMark_FUNCTION_END-_picoMark_FUNCTION_BEGIN
+.initByte 0xe // DW_CFA_def_cfa_offset
+.uleb128 0x0 // <-- FUNCTION_STACK_SIZE_GOES_HERE
+.initByte 0x4 // DW_CFA_advance_loc4
+.unalignedInitLong _picoMark_FUNCTION_END-_picoMark_FUNCTION_BEGIN
+.initByte 0xe // DW_CFA_def_cfa_offset
+.uleb128 0x0
+.align 2
+_picoMark_FdeEnd=
+
+//============================================================================
+// Abbrevation information.
+//============================================================================
+
+.section .debug_abbrev
+_picoMark_ABBREVIATIONS=
+
+.section .debug_abbrev
+ .uleb128 0x1 // (abbrev code)
+ .uleb128 0x11 // (TAG: DW_TAG_compile_unit)
+ .initByte 0x1 // DW_children_yes
+ .uleb128 0x10 // (DW_AT_stmt_list)
+ .uleb128 0x6 // (DW_FORM_data4)
+ .uleb128 0x12 // (DW_AT_high_pc)
+ .uleb128 0x1 // (DW_FORM_addr)
+ .uleb128 0x11 // (DW_AT_low_pc)
+ .uleb128 0x1 // (DW_FORM_addr)
+ .uleb128 0x25 // (DW_AT_producer)
+ .uleb128 0x8 // (DW_FORM_string)
+ .uleb128 0x13 // (DW_AT_language)
+ .uleb128 0x5 // (DW_FORM_data2)
+ .uleb128 0x3 // (DW_AT_name)
+ .uleb128 0x8 // (DW_FORM_string)
+.initByte 0x0
+.initByte 0x0
+
+ .uleb128 0x2 ;# (abbrev code)
+ .uleb128 0x2e ;# (TAG: DW_TAG_subprogram)
+.initByte 0x0 ;# DW_children_no
+ .uleb128 0x3 ;# (DW_AT_name)
+ .uleb128 0x8 ;# (DW_FORM_string)
+ .uleb128 0x11 ;# (DW_AT_low_pc)
+ .uleb128 0x1 ;# (DW_FORM_addr)
+ .uleb128 0x12 ;# (DW_AT_high_pc)
+ .uleb128 0x1 ;# (DW_FORM_addr)
+.initByte 0x0
+.initByte 0x0
+
+.initByte 0x0
+
+//============================================================================
+// Line information. DwarfLib requires this to be present, but it can
+// be empty.
+//============================================================================
+
+.section .debug_line
+_picoMark_LINES=
+
+//============================================================================
+// Debug Information
+//============================================================================
+.section .debug_info
+
+//Fixed header.
+.unalignedInitLong _picoMark_DEBUG_INFO_END-_picoMark_DEBUG_INFO_BEGIN
+_picoMark_DEBUG_INFO_BEGIN=
+.unalignedInitWord 0x2
+.unalignedInitLong _picoMark_ABBREVIATIONS
+.initByte 0x2
+
+// Compile unit information.
+.uleb128 0x1 // (DIE 0xb) DW_TAG_compile_unit)
+.unalignedInitLong _picoMark_LINES
+.unalignedInitWord _picoMark_FUNCTION_END
+.unalignedInitWord _picoMark_FUNCTION_BEGIN
+// Producer is `picoChip'
+.ascii 16#70# 16#69# 16#63# 16#6f# 16#43# 16#68# 16#69# 16#70# 16#00#
+.unalignedInitWord 0xcafe // ASM language
+.ascii 16#0# // Name. DwarfLib expects this to be present.
+
+.uleb128 0x2 ;# (DIE DW_TAG_subprogram)
+
+// FUNCTION NAME GOES HERE. Use `echo name | od -t x1' to get the hex. Each hex
+// digit is specified using the format 16#XX#
+.ascii 16#5f# 16#5f# 16#63# 16#6d# 16#70# 16#73# 16#69# 16#32# 16#0# // Function name `__cmpsi2'
+.unalignedInitWord _picoMark_FUNCTION_BEGIN // DW_AT_low_pc
+.unalignedInitWord _picoMark_FUNCTION_END // DW_AT_high_pc
+
+.initByte 0x0 // end of compile unit children.
+
+_picoMark_DEBUG_INFO_END=
+
+//============================================================================
+// END OF DWARF
+//============================================================================
+
+.section .endFile
+// End of picoChip ASM file
diff --git a/libgcc/config/picochip/divmod15.S b/libgcc/config/picochip/divmod15.S
new file mode 100644
index 00000000000..d314b3be570
--- /dev/null
+++ b/libgcc/config/picochip/divmod15.S
@@ -0,0 +1,261 @@
+// picoChip ASM file
+//
+// Support for 16-bit unsigned division/modulus.
+//
+// Copyright (C) 2003, 2004, 2005, 2008, 2009 Free Software Foundation, Inc.
+// Contributed by Picochip Ltd.
+// Maintained by Daniel Towner (daniel.towner@picochip.com)
+//
+// This file is free software; you can redistribute it and/or modify it
+// under the terms of the GNU General Public License as published by the
+// Free Software Foundation; either version 3, or (at your option) any
+// later version.
+//
+// This file is distributed in the hope that it will be useful, but
+// WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// General Public License for more details.
+//
+// Under Section 7 of GPL version 3, you are granted additional
+// permissions described in the GCC Runtime Library Exception, version
+// 3.1, as published by the Free Software Foundation.
+//
+// You should have received a copy of the GNU General Public License and
+// a copy of the GCC Runtime Library Exception along with this program;
+// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+// <http://www.gnu.org/licenses/>.
+
+.section .text
+
+.global __divmod15
+__divmod15:
+_picoMark_FUNCTION_BEGIN=
+
+// picoChip Function Prologue : &__divmod15 = 0 bytes
+
+ // The picoChip instruction set has a divstep instruction which
+ // is used to perform one iteration of a binary division algorithm.
+ // The instruction allows 16-bit signed division to be implemented.
+ // It does not directly allow 16-bit unsigned division to be
+ // implemented. Thus, this function pulls out the common division
+ // iteration for 15-bits unsigned, and then special wrappers
+ // provide the logic to change this into a 16-bit signed or
+ // unsigned division, as appropriate. This allows the two
+ // versions of division to share a common implementation, reducing
+ // code size when the two are used together. It also reduces
+ // the maintenance overhead.
+
+ // Input:
+ // r0 - dividend
+ // r1 - divisor
+ // Output:
+ // r0 - quotient
+ // r1 - remainder
+ // R5 is unused
+
+ // Check for special cases. The emphasis is on detecting these as
+ // quickly as possible, so that the main division can be started. If
+ // the user requests division by one, division by self, and so on
+ // then they will just have to accept that this won't be particularly
+ // quick (relatively), whereas a real division (e.g., dividing a
+ // large value by a small value) will run as fast as possible
+ // (i.e., special case detection should not slow down the common case)
+ //
+ // Special cases to consider:
+ //
+ // Division by zero.
+ // Division of zero.
+ // Inputs are equal
+ // Divisor is bigger than dividend
+ // Division by power of two (can be shifted instead).
+ // Division by 1 (special case of power of two division)
+ //
+ // Division/modulus by zero is undefined (ISO C:6.5.5), so
+ // don't bother handling this special case.
+ //
+ // The special cases of division by a power of 2 are ignored, since
+ // they cause the general case to slow down. Omitting these
+ // special cases also reduces code size considerably.
+
+ // Handle divisor >= dividend separately. Note that this also handles
+ // the case where the dividend is zero. Note that the flags must be
+ // preserved, since they are also used at the branch destination.
+ sub.0 r1,r0,r15
+ sbc r0,r2 \ bge divisorGeDividend
+=-> sbc r1,r4
+
+ // Compute the shift count. The amount by which the divisor
+ // must be shifted left to be aligned with the dividend.
+ sub.0 r4,r2,r3
+
+ // Align the divisor to the dividend. Execute a divstep (since at
+ // least one will always be executed). Skip the remaining loop
+ // if the shift count is zero.
+ lsl.0 r1,r3,r1 \ beq skipLoop
+=-> divstep r0,r1 \ add.1 r3,1,r2
+
+ // Execute the divstep loop until temp is 0. This assumes that the
+ // loop count is at least one.
+ sub.0 r3,1,r4
+divLoop:
+ divstep r0,r1 \ bne divLoop
+=-> sub.0 r4,1,r4
+
+skipLoop:
+
+ // The top bits of the result are the remainder. The bottom
+ // bits are the quotient.
+ lsr.0 r0,r2,r1 \ sub.1 16,r2,r4
+ jr (lr ) \ lsl.0 r0,r4,r0
+=-> lsr.0 r0,r4,r0
+
+// Special case.
+
+divisorGeDividend:
+ // The divisor is greater than or equal to the dividend. The flags
+ // indicate which of these alternatives it is. The COPYNE can be used
+ // to set the result appropriately, without introducing any more
+ // branches.
+ copy.0 r0,r1 \ copy.1 0,r0
+ jr (lr) \ copyeq r0,r1
+=-> copyeq 1,r0
+
+_picoMark_FUNCTION_END=
+// picoChip Function Epilogue : __divmod15
+
+
+//============================================================================
+// All DWARF information between this marker, and the END OF DWARF
+// marker should be included in the source file. Search for
+// FUNCTION_STACK_SIZE_GOES_HERE and FUNCTION NAME GOES HERE, and
+// provide the relevent information. Add markers called
+// _picoMark_FUNCTION_BEGIN and _picoMark_FUNCTION_END around the
+// function in question.
+//============================================================================
+
+//============================================================================
+// Frame information.
+//============================================================================
+
+.section .debug_frame
+_picoMark_DebugFrame=
+
+// Common CIE header.
+.unalignedInitLong _picoMark_CieEnd-_picoMark_CieBegin
+_picoMark_CieBegin=
+.unalignedInitLong 0xffffffff
+.initByte 0x1 // CIE Version
+.ascii 16#0# // CIE Augmentation
+.uleb128 0x1 // CIE Code Alignment Factor
+.sleb128 2 // CIE Data Alignment Factor
+.initByte 0xc // CIE RA Column
+.initByte 0xc // DW_CFA_def_cfa
+.uleb128 0xd
+.uleb128 0x0
+.align 2
+_picoMark_CieEnd=
+
+// FDE
+_picoMark_LSFDE0I900821033007563=
+.unalignedInitLong _picoMark_FdeEnd-_picoMark_FdeBegin
+_picoMark_FdeBegin=
+.unalignedInitLong _picoMark_DebugFrame // FDE CIE offset
+.unalignedInitWord _picoMark_FUNCTION_BEGIN // FDE initial location
+.unalignedInitWord _picoMark_FUNCTION_END-_picoMark_FUNCTION_BEGIN
+.initByte 0xe // DW_CFA_def_cfa_offset
+.uleb128 0x0 // <-- FUNCTION_STACK_SIZE_GOES_HERE
+.initByte 0x4 // DW_CFA_advance_loc4
+.unalignedInitLong _picoMark_FUNCTION_END-_picoMark_FUNCTION_BEGIN
+.initByte 0xe // DW_CFA_def_cfa_offset
+.uleb128 0x0
+.align 2
+_picoMark_FdeEnd=
+
+//============================================================================
+// Abbrevation information.
+//============================================================================
+
+.section .debug_abbrev
+_picoMark_ABBREVIATIONS=
+
+.section .debug_abbrev
+ .uleb128 0x1 // (abbrev code)
+ .uleb128 0x11 // (TAG: DW_TAG_compile_unit)
+ .initByte 0x1 // DW_children_yes
+ .uleb128 0x10 // (DW_AT_stmt_list)
+ .uleb128 0x6 // (DW_FORM_data4)
+ .uleb128 0x12 // (DW_AT_high_pc)
+ .uleb128 0x1 // (DW_FORM_addr)
+ .uleb128 0x11 // (DW_AT_low_pc)
+ .uleb128 0x1 // (DW_FORM_addr)
+ .uleb128 0x25 // (DW_AT_producer)
+ .uleb128 0x8 // (DW_FORM_string)
+ .uleb128 0x13 // (DW_AT_language)
+ .uleb128 0x5 // (DW_FORM_data2)
+ .uleb128 0x3 // (DW_AT_name)
+ .uleb128 0x8 // (DW_FORM_string)
+.initByte 0x0
+.initByte 0x0
+
+ .uleb128 0x2 ;# (abbrev code)
+ .uleb128 0x2e ;# (TAG: DW_TAG_subprogram)
+.initByte 0x0 ;# DW_children_no
+ .uleb128 0x3 ;# (DW_AT_name)
+ .uleb128 0x8 ;# (DW_FORM_string)
+ .uleb128 0x11 ;# (DW_AT_low_pc)
+ .uleb128 0x1 ;# (DW_FORM_addr)
+ .uleb128 0x12 ;# (DW_AT_high_pc)
+ .uleb128 0x1 ;# (DW_FORM_addr)
+.initByte 0x0
+.initByte 0x0
+
+.initByte 0x0
+
+//============================================================================
+// Line information. DwarfLib requires this to be present, but it can
+// be empty.
+//============================================================================
+
+.section .debug_line
+_picoMark_LINES=
+
+//============================================================================
+// Debug Information
+//============================================================================
+.section .debug_info
+
+//Fixed header.
+.unalignedInitLong _picoMark_DEBUG_INFO_END-_picoMark_DEBUG_INFO_BEGIN
+_picoMark_DEBUG_INFO_BEGIN=
+.unalignedInitWord 0x2
+.unalignedInitLong _picoMark_ABBREVIATIONS
+.initByte 0x2
+
+// Compile unit information.
+.uleb128 0x1 // (DIE 0xb) DW_TAG_compile_unit)
+.unalignedInitLong _picoMark_LINES
+.unalignedInitWord _picoMark_FUNCTION_END
+.unalignedInitWord _picoMark_FUNCTION_BEGIN
+// Producer is `picoChip'
+.ascii 16#70# 16#69# 16#63# 16#6f# 16#43# 16#68# 16#69# 16#70# 16#00#
+.unalignedInitWord 0xcafe // ASM language
+.ascii 16#0# // Name. DwarfLib expects this to be present.
+
+.uleb128 0x2 ;# (DIE DW_TAG_subprogram)
+
+// FUNCTION NAME GOES HERE. Use `echo name | od -t x1' to get the hex. Each hex
+// digit is specified using the format 16#XX#
+.ascii 16#5f# 16#64# 16#69# 16#76# 16#6d# 16#6f# 16#64# 16#31# 16#35# 16#0# // Function name `_divmod15'
+.unalignedInitWord _picoMark_FUNCTION_BEGIN // DW_AT_low_pc
+.unalignedInitWord _picoMark_FUNCTION_END // DW_AT_high_pc
+
+.initByte 0x0 // end of compile unit children.
+
+_picoMark_DEBUG_INFO_END=
+
+//============================================================================
+// END OF DWARF
+//============================================================================
+
+.section .endFile
+// End of picoChip ASM file
diff --git a/libgcc/config/picochip/divmodhi4.S b/libgcc/config/picochip/divmodhi4.S
new file mode 100644
index 00000000000..9dad674c7bc
--- /dev/null
+++ b/libgcc/config/picochip/divmodhi4.S
@@ -0,0 +1,246 @@
+// picoChip ASM file
+//
+// Support for 16-bit signed division/modulus.
+//
+// Copyright (C) 2003, 2004, 2005, 2008, 2009 Free Software Foundation, Inc.
+// Contributed by Picochip Ltd.
+// Maintained by Daniel Towner (daniel.towner@picochip.com)
+//
+// This file is free software; you can redistribute it and/or modify it
+// under the terms of the GNU General Public License as published by the
+// Free Software Foundation; either version 3, or (at your option) any
+// later version.
+//
+// This file is distributed in the hope that it will be useful, but
+// WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// General Public License for more details.
+//
+// Under Section 7 of GPL version 3, you are granted additional
+// permissions described in the GCC Runtime Library Exception, version
+// 3.1, as published by the Free Software Foundation.
+//
+// You should have received a copy of the GNU General Public License and
+// a copy of the GCC Runtime Library Exception along with this program;
+// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+// <http://www.gnu.org/licenses/>.
+
+.section .text
+
+.align 8
+.global __divmodhi4
+__divmodhi4:
+_picoMark_FUNCTION_BEGIN=
+
+// picoChip Function Prologue : &__divmodhi4 = 4 bytes
+
+ // 16-bit signed division. Most of the special cases are dealt
+ // with by the 15-bit signed division library (e.g., division by
+ // zero, division by 1, and so on). This wrapper simply inverts
+ // any negative inputs, calls the 15-bit library, and flips any
+ // results as necessary. The
+ // only special cases to be handled here are where either the
+ // divisor or the dividend are the maximum negative values.
+
+ // Encode r5 with a bit pattern which indicates whether the
+ // outputs of the division must be negated. The MSB will be set
+ // to the sign of the dividend (which controls the remainder's
+ // sign), while the LSB will store the XOR of the two signs,
+ // which indicates the quotient's sign. R5 is not modified by the
+ // 15-bit divmod routine.
+ sub.0 r1,16#8000#,r15 \ asr.1 r0,15,r4
+ beq divisorIsLargestNegative \ lsr.0 r1,15,r3
+=-> sub.0 r0,16#8000#,r15 \ xor.1 r3,r4,r5
+
+ // Handle least negative dividend with a special case. Note that the
+ // absolute value of the divisor is also computed here.
+ add.0 [asr r1,15],r1,r3 \ beq dividendIsLargestNegative
+=-> xor.0 [asr r1,15],r3,r1 \ stw lr,(fp)-1
+
+ // Compute the absolute value of the dividend, and call the main
+ // divide routine.
+ add.0 r4,r0,r2 \ jl (&__divmod15) // fn_call &__divmod15
+=-> xor.0 r4,r2,r0
+
+handleNegatedResults:
+ // Speculatively store the negation of the results.
+ sub.0 0,r0,r2 \ sub.1 0,r1,r3
+
+ // Does the quotient need negating? The LSB indicates this.
+ and.0 r5,1,r15 \ ldw (fp)-1,lr
+ copyne r2,r0
+
+ asr.0 r5,15,r15 \ jr (lr)
+=-> copyne r3,r1
+
+dividendIsLargestNegative:
+
+ // Divide the constant -32768. Use the Hacker's Delight
+ // algorithm (i.e., ((dividend / 2) / divisor) * 2) gives
+ // approximate answer). This code is a special case, so no
+ // great effort is made to make it fast, only to make it
+ // small.
+
+ lsr.0 r0,1,r0 \ jl (&__divmod15) // fn_call &__divmod15
+=-> stw r1,(fp)-2
+
+ // Load the original divisor, and compute the new quotient and
+ // remainder.
+ lsl.0 r0,1,r0 \ ldw (fp)-2,r3
+ lsl.0 r1,1,r1 // Fill stall slot
+
+ // The error in the quotient is 0 or 1. The error can be determined
+ // by comparing the remainder to the original divisor. If the
+ // remainder is bigger, then an error of 1 has been introduced,
+ // which must be fixed.
+ sub.0 r1,r3,r15
+ blo noCompensationForError
+=-> nop
+ add.0 r0,1,r0 \ sub.1 r1,r3,r1
+noCompensationForError:
+ bra handleNegatedResults
+=-> nop
+
+divisorIsLargestNegative:
+ // The flags indicate whether the dividend is also the maximum negative
+ copy.0 r0,r1 \ copy.1 0,r0
+ copyeq r0,r1 \ jr (lr)
+=-> copyeq 1,r0
+
+_picoMark_FUNCTION_END=
+// picoChip Function Epilogue : __divmodhi4
+
+
+//============================================================================
+// All DWARF information between this marker, and the END OF DWARF
+// marker should be included in the source file. Search for
+// FUNCTION_STACK_SIZE_GOES_HERE and FUNCTION NAME GOES HERE, and
+// provide the relevent information. Add markers called
+// _picoMark_FUNCTION_BEGIN and _picoMark_FUNCTION_END around the
+// function in question.
+//============================================================================
+
+//============================================================================
+// Frame information.
+//============================================================================
+
+.section .debug_frame
+_picoMark_DebugFrame=
+
+// Common CIE header.
+.unalignedInitLong _picoMark_CieEnd-_picoMark_CieBegin
+_picoMark_CieBegin=
+.unalignedInitLong 0xffffffff
+.initByte 0x1 // CIE Version
+.ascii 16#0# // CIE Augmentation
+.uleb128 0x1 // CIE Code Alignment Factor
+.sleb128 2 // CIE Data Alignment Factor
+.initByte 0xc // CIE RA Column
+.initByte 0xc // DW_CFA_def_cfa
+.uleb128 0xd
+.uleb128 0x0
+.align 2
+_picoMark_CieEnd=
+
+// FDE
+_picoMark_LSFDE0I900821033007563=
+.unalignedInitLong _picoMark_FdeEnd-_picoMark_FdeBegin
+_picoMark_FdeBegin=
+.unalignedInitLong _picoMark_DebugFrame // FDE CIE offset
+.unalignedInitWord _picoMark_FUNCTION_BEGIN // FDE initial location
+.unalignedInitWord _picoMark_FUNCTION_END-_picoMark_FUNCTION_BEGIN
+.initByte 0xe // DW_CFA_def_cfa_offset
+.uleb128 0x4 // <-- FUNCTION_STACK_SIZE_GOES_HERE
+.initByte 0x4 // DW_CFA_advance_loc4
+.unalignedInitLong _picoMark_FUNCTION_END-_picoMark_FUNCTION_BEGIN
+.initByte 0xe // DW_CFA_def_cfa_offset
+.uleb128 0x0
+.align 2
+_picoMark_FdeEnd=
+
+//============================================================================
+// Abbrevation information.
+//============================================================================
+
+.section .debug_abbrev
+_picoMark_ABBREVIATIONS=
+
+.section .debug_abbrev
+ .uleb128 0x1 // (abbrev code)
+ .uleb128 0x11 // (TAG: DW_TAG_compile_unit)
+ .initByte 0x1 // DW_children_yes
+ .uleb128 0x10 // (DW_AT_stmt_list)
+ .uleb128 0x6 // (DW_FORM_data4)
+ .uleb128 0x12 // (DW_AT_high_pc)
+ .uleb128 0x1 // (DW_FORM_addr)
+ .uleb128 0x11 // (DW_AT_low_pc)
+ .uleb128 0x1 // (DW_FORM_addr)
+ .uleb128 0x25 // (DW_AT_producer)
+ .uleb128 0x8 // (DW_FORM_string)
+ .uleb128 0x13 // (DW_AT_language)
+ .uleb128 0x5 // (DW_FORM_data2)
+ .uleb128 0x3 // (DW_AT_name)
+ .uleb128 0x8 // (DW_FORM_string)
+.initByte 0x0
+.initByte 0x0
+
+ .uleb128 0x2 ;# (abbrev code)
+ .uleb128 0x2e ;# (TAG: DW_TAG_subprogram)
+.initByte 0x0 ;# DW_children_no
+ .uleb128 0x3 ;# (DW_AT_name)
+ .uleb128 0x8 ;# (DW_FORM_string)
+ .uleb128 0x11 ;# (DW_AT_low_pc)
+ .uleb128 0x1 ;# (DW_FORM_addr)
+ .uleb128 0x12 ;# (DW_AT_high_pc)
+ .uleb128 0x1 ;# (DW_FORM_addr)
+.initByte 0x0
+.initByte 0x0
+
+.initByte 0x0
+
+//============================================================================
+// Line information. DwarfLib requires this to be present, but it can
+// be empty.
+//============================================================================
+
+.section .debug_line
+_picoMark_LINES=
+
+//============================================================================
+// Debug Information
+//============================================================================
+.section .debug_info
+
+//Fixed header.
+.unalignedInitLong _picoMark_DEBUG_INFO_END-_picoMark_DEBUG_INFO_BEGIN
+_picoMark_DEBUG_INFO_BEGIN=
+.unalignedInitWord 0x2
+.unalignedInitLong _picoMark_ABBREVIATIONS
+.initByte 0x2
+
+// Compile unit information.
+.uleb128 0x1 // (DIE 0xb) DW_TAG_compile_unit)
+.unalignedInitLong _picoMark_LINES
+.unalignedInitWord _picoMark_FUNCTION_END
+.unalignedInitWord _picoMark_FUNCTION_BEGIN
+// Producer is `picoChip'
+.ascii 16#70# 16#69# 16#63# 16#6f# 16#43# 16#68# 16#69# 16#70# 16#00#
+.unalignedInitWord 0xcafe // ASM language
+.ascii 16#0# // Name. DwarfLib expects this to be present.
+
+.uleb128 0x2 ;# (DIE DW_TAG_subprogram)
+
+// FUNCTION NAME GOES HERE. Use `echo name | od -t x1' to get the hex. Each hex
+// digit is specified using the format 16#XX#
+.ascii 16#5f# 16#64# 16#69# 16#76# 16#6d# 16#6f# 16#64# 16#68# 16#69# 16#34# 16#0# // Function name `_divmodhi4'
+.unalignedInitWord _picoMark_FUNCTION_BEGIN // DW_AT_low_pc
+.unalignedInitWord _picoMark_FUNCTION_END // DW_AT_high_pc
+
+.initByte 0x0 // end of compile unit children.
+
+_picoMark_DEBUG_INFO_END=
+
+//============================================================================
+// END OF DWARF
+//============================================================================
+.section .endFile
diff --git a/libgcc/config/picochip/divmodsi4.S b/libgcc/config/picochip/divmodsi4.S
new file mode 100644
index 00000000000..4fc1acb1b63
--- /dev/null
+++ b/libgcc/config/picochip/divmodsi4.S
@@ -0,0 +1,233 @@
+// picoChip ASM file
+//
+// Support for 32-bit signed division/modulus.
+//
+// Copyright (C) 2003, 2004, 2005, 2008, 2009 Free Software Foundation, Inc.
+// Contributed by Picochip Ltd.
+// Maintained by Daniel Towner (daniel.towner@picochip.com)
+//
+// This file is free software; you can redistribute it and/or modify it
+// under the terms of the GNU General Public License as published by the
+// Free Software Foundation; either version 3, or (at your option) any
+// later version.
+//
+// This file is distributed in the hope that it will be useful, but
+// WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// General Public License for more details.
+//
+// Under Section 7 of GPL version 3, you are granted additional
+// permissions described in the GCC Runtime Library Exception, version
+// 3.1, as published by the Free Software Foundation.
+//
+// You should have received a copy of the GNU General Public License and
+// a copy of the GCC Runtime Library Exception along with this program;
+// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+// <http://www.gnu.org/licenses/>.
+
+.section .text
+
+.align 8
+.global __divmodsi4
+__divmodsi4:
+_picoMark_FUNCTION_BEGIN=
+// picoChip Function Prologue : &__divmodsi4 = 8 bytes
+
+ // Note: optimising for size is preferred over optimising for speed.
+
+ // Note: the frame is setup throughout the following instructions,
+ // and is complete at the point the udivmodsi4 function is called.
+
+ // Note that R9 is encoded with a pattern which indicates
+ // whether the remainder and quotient should be negated on
+ // completion. The MSB is set to the sign of the dividend
+ // (i.e., the sign of the remainder), while the LSB encodes
+ // the XOR of the two input's signs (i.e., the sign of the
+ // quotient.
+
+ // If dividend is negative, invert the dividend and flag.
+ ASR.0 r1,15,r4
+ BEQ dividendNotNegative
+=-> STL R[9:8],(FP)-2
+
+ // Dividend is negative - negate dividend.
+ SUB.0 0,R0,R0
+ SUBB.0 0,R1,R1
+
+dividendNotNegative:
+
+ // If divisor is negative, invert the divisor.
+ AND.0 [lsr r3,15],1,r5
+ SUB.0 R3,0, r15
+ BGE divisorNotNegative
+=-> XOR.0 r4,r5,r9
+
+ // Divisor is negative - negate divisor.
+ SUB.0 0,R2,R2
+ SUBB.0 0,R3,R3
+
+divisorNotNegative:
+
+ STL R[13:12],(FP)-1 \ JL (&__udivmodsi4)
+=-> SUB.0 FP,8,FP // udivmodsi expects the frame to be valid still.
+
+ // The LSB of R9 indicates whether the quotient should be negated.
+ AND.0 r9,1,r15
+ BEQ skipQuotientNegation
+=-> LDL (FP)1,R[13:12] // Convenient point to restore link/fp
+
+ SUB.0 0,R4,R4
+ SUBB.0 0,R5,R5
+
+skipQuotientNegation:
+
+ // The MSB of R9 indicates whether the remainder should be negated.
+ ASR.0 R9,15,r15
+ BEQ epilogue
+
+ SUB.0 0,R6,R6
+ SUBB.0 0,R7,R7
+
+epilogue:
+
+ JR (R12)
+=-> LDL (FP)-2,R[9:8]
+
+_picoMark_FUNCTION_END=
+// picoChip Function Epilogue : __divmodsi4
+
+//============================================================================
+// All DWARF information between this marker, and the END OF DWARF
+// marker should be included in the source file. Search for
+// FUNCTION_STACK_SIZE_GOES_HERE and FUNCTION NAME GOES HERE, and
+// provide the relevent information. Add markers called
+// _picoMark_FUNCTION_BEGIN and _picoMark_FUNCTION_END around the
+// function in question.
+//============================================================================
+
+//============================================================================
+// Frame information.
+//============================================================================
+
+.section .debug_frame
+_picoMark_DebugFrame=
+
+// Common CIE header.
+.unalignedInitLong _picoMark_CieEnd-_picoMark_CieBegin
+_picoMark_CieBegin=
+.unalignedInitLong 0xffffffff
+.initByte 0x1 // CIE Version
+.ascii 16#0# // CIE Augmentation
+.uleb128 0x1 // CIE Code Alignment Factor
+.sleb128 2 // CIE Data Alignment Factor
+.initByte 0xc // CIE RA Column
+.initByte 0xc // DW_CFA_def_cfa
+.uleb128 0xd
+.uleb128 0x0
+.align 2
+_picoMark_CieEnd=
+
+// FDE
+_picoMark_LSFDE0I900821033007563=
+.unalignedInitLong _picoMark_FdeEnd-_picoMark_FdeBegin
+_picoMark_FdeBegin=
+.unalignedInitLong _picoMark_DebugFrame // FDE CIE offset
+.unalignedInitWord _picoMark_FUNCTION_BEGIN // FDE initial location
+.unalignedInitWord _picoMark_FUNCTION_END-_picoMark_FUNCTION_BEGIN
+.initByte 0xe // DW_CFA_def_cfa_offset
+.uleb128 0x8 // <-- FUNCTION_STACK_SIZE_GOES_HERE
+.initByte 0x4 // DW_CFA_advance_loc4
+.unalignedInitLong _picoMark_FUNCTION_END-_picoMark_FUNCTION_BEGIN
+.initByte 0xe // DW_CFA_def_cfa_offset
+.uleb128 0x0
+.align 2
+_picoMark_FdeEnd=
+
+//============================================================================
+// Abbrevation information.
+//============================================================================
+
+.section .debug_abbrev
+_picoMark_ABBREVIATIONS=
+
+.section .debug_abbrev
+ .uleb128 0x1 // (abbrev code)
+ .uleb128 0x11 // (TAG: DW_TAG_compile_unit)
+ .initByte 0x1 // DW_children_yes
+ .uleb128 0x10 // (DW_AT_stmt_list)
+ .uleb128 0x6 // (DW_FORM_data4)
+ .uleb128 0x12 // (DW_AT_high_pc)
+ .uleb128 0x1 // (DW_FORM_addr)
+ .uleb128 0x11 // (DW_AT_low_pc)
+ .uleb128 0x1 // (DW_FORM_addr)
+ .uleb128 0x25 // (DW_AT_producer)
+ .uleb128 0x8 // (DW_FORM_string)
+ .uleb128 0x13 // (DW_AT_language)
+ .uleb128 0x5 // (DW_FORM_data2)
+ .uleb128 0x3 // (DW_AT_name)
+ .uleb128 0x8 // (DW_FORM_string)
+.initByte 0x0
+.initByte 0x0
+
+ .uleb128 0x2 ;# (abbrev code)
+ .uleb128 0x2e ;# (TAG: DW_TAG_subprogram)
+.initByte 0x0 ;# DW_children_no
+ .uleb128 0x3 ;# (DW_AT_name)
+ .uleb128 0x8 ;# (DW_FORM_string)
+ .uleb128 0x11 ;# (DW_AT_low_pc)
+ .uleb128 0x1 ;# (DW_FORM_addr)
+ .uleb128 0x12 ;# (DW_AT_high_pc)
+ .uleb128 0x1 ;# (DW_FORM_addr)
+.initByte 0x0
+.initByte 0x0
+
+.initByte 0x0
+
+//============================================================================
+// Line information. DwarfLib requires this to be present, but it can
+// be empty.
+//============================================================================
+
+.section .debug_line
+_picoMark_LINES=
+
+//============================================================================
+// Debug Information
+//============================================================================
+.section .debug_info
+
+//Fixed header.
+.unalignedInitLong _picoMark_DEBUG_INFO_END-_picoMark_DEBUG_INFO_BEGIN
+_picoMark_DEBUG_INFO_BEGIN=
+.unalignedInitWord 0x2
+.unalignedInitLong _picoMark_ABBREVIATIONS
+.initByte 0x2
+
+// Compile unit information.
+.uleb128 0x1 // (DIE 0xb) DW_TAG_compile_unit)
+.unalignedInitLong _picoMark_LINES
+.unalignedInitWord _picoMark_FUNCTION_END
+.unalignedInitWord _picoMark_FUNCTION_BEGIN
+// Producer is `picoChip'
+.ascii 16#70# 16#69# 16#63# 16#6f# 16#43# 16#68# 16#69# 16#70# 16#00#
+.unalignedInitWord 0xcafe // ASM language
+.ascii 16#0# // Name. DwarfLib expects this to be present.
+
+.uleb128 0x2 ;# (DIE DW_TAG_subprogram)
+
+// FUNCTION NAME GOES HERE. Use `echo name | od -t x1' to get the hex. Each hex
+// digit is specified using the format 16#XX#
+.ascii 16#5f# 16#64# 16#69# 16#76# 16#6d# 16#6f# 16#64# 16#73# 16#69# 16#34# 16#0# // Function name `_divmodsi4'
+.unalignedInitWord _picoMark_FUNCTION_BEGIN // DW_AT_low_pc
+.unalignedInitWord _picoMark_FUNCTION_END // DW_AT_high_pc
+
+.initByte 0x0 // end of compile unit children.
+
+_picoMark_DEBUG_INFO_END=
+
+//============================================================================
+// END OF DWARF
+//============================================================================
+
+.section .endFile
+// End of picoChip ASM file
diff --git a/libgcc/config/picochip/lib1funcs.S b/libgcc/config/picochip/lib1funcs.S
new file mode 100644
index 00000000000..d344170d248
--- /dev/null
+++ b/libgcc/config/picochip/lib1funcs.S
@@ -0,0 +1,4 @@
+// picoChip ASM file
+// Fake libgcc asm file. This contains nothing, but is used to prevent gcc
+// getting upset about the lack of a lib1funcs.S file when LIB1ASMFUNCS is
+// defined to switch off the compilation of parts of libgcc.
diff --git a/libgcc/config/picochip/longjmp.S b/libgcc/config/picochip/longjmp.S
new file mode 100644
index 00000000000..d2a80aca730
--- /dev/null
+++ b/libgcc/config/picochip/longjmp.S
@@ -0,0 +1,182 @@
+// picoChip ASM file
+//
+// Support for 32-bit arithmetic shift right.
+//
+// Copyright (C) 2003, 2004, 2005, 2008, 2009 Free Software Foundation, Inc.
+// Contributed by Picochip Ltd.
+// Maintained by Hariharan Sandanagobalane (hariharan@picochip.com)
+//
+// This file is free software; you can redistribute it and/or modify it
+// under the terms of the GNU General Public License as published by the
+// Free Software Foundation; either version 3, or (at your option) any
+// later version.
+//
+// This file is distributed in the hope that it will be useful, but
+// WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// General Public License for more details.
+//
+// Under Section 7 of GPL version 3, you are granted additional
+// permissions described in the GCC Runtime Library Exception, version
+// 3.1, as published by the Free Software Foundation.
+//
+// You should have received a copy of the GNU General Public License and
+// a copy of the GCC Runtime Library Exception along with this program;
+// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+// <http://www.gnu.org/licenses/>.
+
+.section .text
+
+.global _longjmp
+_longjmp:
+_picoMark_FUNCTION_BEGIN=
+
+// picoChip Function Prologue : &_longjmp = 0 bytes
+
+ LDL (R0)0, R[3:2]
+ LDL (R0)1, R[5:4]
+ LDL (R0)2, R[7:6]
+ LDL (R0)3, R[9:8]
+ LDL (R0)4, R[11:10]
+ LDL (R0)5, R[13:12]
+ LDW (R0)12, R14
+ LDW (R0)13, R1
+ JR (R12)
+=-> COPY.0 1,R0
+
+// picoChip Function Epilogue : longjmp
+//============================================================================
+// All DWARF information between this marker, and the END OF DWARF
+// marker should be included in the source file. Search for
+// FUNCTION_STACK_SIZE_GOES_HERE and FUNCTION NAME GOES HERE, and
+// provide the relevent information. Add markers called
+// _picoMark_FUNCTION_BEGIN and _picoMark_FUNCTION_END around the
+// function in question.
+//============================================================================
+
+//============================================================================
+// Frame information.
+//============================================================================
+
+.section .debug_frame
+_picoMark_DebugFrame=
+
+// Common CIE header.
+.unalignedInitLong _picoMark_CieEnd-_picoMark_CieBegin
+_picoMark_CieBegin=
+.unalignedInitLong 0xffffffff
+.initByte 0x1 // CIE Version
+.ascii 16#0# // CIE Augmentation
+.uleb128 0x1 // CIE Code Alignment Factor
+.sleb128 2 // CIE Data Alignment Factor
+.initByte 0xc // CIE RA Column
+.initByte 0xc // DW_CFA_def_cfa
+.uleb128 0xd
+.uleb128 0x0
+.align 2
+_picoMark_CieEnd=
+
+// FDE
+_picoMark_LSFDE0I900821033007563=
+.unalignedInitLong _picoMark_FdeEnd-_picoMark_FdeBegin
+_picoMark_FdeBegin=
+.unalignedInitLong _picoMark_DebugFrame // FDE CIE offset
+.unalignedInitWord _picoMark_FUNCTION_BEGIN // FDE initial location
+.unalignedInitWord _picoMark_FUNCTION_END-_picoMark_FUNCTION_BEGIN
+.initByte 0xe // DW_CFA_def_cfa_offset
+.uleb128 0x0 // <-- FUNCTION_STACK_SIZE_GOES_HERE
+.initByte 0x4 // DW_CFA_advance_loc4
+.unalignedInitLong _picoMark_FUNCTION_END-_picoMark_FUNCTION_BEGIN
+.initByte 0xe // DW_CFA_def_cfa_offset
+.uleb128 0x0
+.align 2
+_picoMark_FdeEnd=
+
+//============================================================================
+// Abbrevation information.
+//============================================================================
+
+.section .debug_abbrev
+_picoMark_ABBREVIATIONS=
+
+.section .debug_abbrev
+ .uleb128 0x1 // (abbrev code)
+ .uleb128 0x11 // (TAG: DW_TAG_compile_unit)
+ .initByte 0x1 // DW_children_yes
+ .uleb128 0x10 // (DW_AT_stmt_list)
+ .uleb128 0x6 // (DW_FORM_data4)
+ .uleb128 0x12 // (DW_AT_high_pc)
+ .uleb128 0x1 // (DW_FORM_addr)
+ .uleb128 0x11 // (DW_AT_low_pc)
+ .uleb128 0x1 // (DW_FORM_addr)
+ .uleb128 0x25 // (DW_AT_producer)
+ .uleb128 0x8 // (DW_FORM_string)
+ .uleb128 0x13 // (DW_AT_language)
+ .uleb128 0x5 // (DW_FORM_data2)
+ .uleb128 0x3 // (DW_AT_name)
+ .uleb128 0x8 // (DW_FORM_string)
+.initByte 0x0
+.initByte 0x0
+
+ .uleb128 0x2 ;# (abbrev code)
+ .uleb128 0x2e ;# (TAG: DW_TAG_subprogram)
+.initByte 0x0 ;# DW_children_no
+ .uleb128 0x3 ;# (DW_AT_name)
+ .uleb128 0x8 ;# (DW_FORM_string)
+ .uleb128 0x11 ;# (DW_AT_low_pc)
+ .uleb128 0x1 ;# (DW_FORM_addr)
+ .uleb128 0x12 ;# (DW_AT_high_pc)
+ .uleb128 0x1 ;# (DW_FORM_addr)
+.initByte 0x0
+.initByte 0x0
+
+.initByte 0x0
+
+//============================================================================
+// Line information. DwarfLib requires this to be present, but it can
+// be empty.
+//============================================================================
+
+.section .debug_line
+_picoMark_LINES=
+
+//============================================================================
+// Debug Information
+//============================================================================
+.section .debug_info
+
+//Fixed header.
+.unalignedInitLong _picoMark_DEBUG_INFO_END-_picoMark_DEBUG_INFO_BEGIN
+_picoMark_DEBUG_INFO_BEGIN=
+.unalignedInitWord 0x2
+.unalignedInitLong _picoMark_ABBREVIATIONS
+.initByte 0x2
+
+// Compile unit information.
+.uleb128 0x1 // (DIE 0xb) DW_TAG_compile_unit)
+.unalignedInitLong _picoMark_LINES
+.unalignedInitWord _picoMark_FUNCTION_END
+.unalignedInitWord _picoMark_FUNCTION_BEGIN
+// Producer is `picoChip'
+.ascii 16#70# 16#69# 16#63# 16#6f# 16#43# 16#68# 16#69# 16#70# 16#00#
+.unalignedInitWord 0xcafe // ASM language
+.ascii 16#0# // Name. DwarfLib expects this to be present.
+
+.uleb128 0x2 ;# (DIE DW_TAG_subprogram)
+
+// FUNCTION NAME GOES HERE. Use `echo name | od -t x1' to get the hex. Each hex
+// digit is specified using the format 16#XX#
+.ascii 16#6c# 16#6f# 16#6e# 16#67# 16#6a# 16#6d# 16#70# 16#0# // Function name `longjmp'
+.unalignedInitWord _picoMark_FUNCTION_BEGIN // DW_AT_low_pc
+.unalignedInitWord _picoMark_FUNCTION_END // DW_AT_high_pc
+
+.initByte 0x0 // end of compile unit children.
+
+_picoMark_DEBUG_INFO_END=
+
+//============================================================================
+// END OF DWARF
+//============================================================================
+
+.section .endFile
+// End of picoChip ASM file
diff --git a/libgcc/config/picochip/lshrsi3.S b/libgcc/config/picochip/lshrsi3.S
new file mode 100644
index 00000000000..4fc53902955
--- /dev/null
+++ b/libgcc/config/picochip/lshrsi3.S
@@ -0,0 +1,190 @@
+// picoChip ASM file
+//
+// Support for 32-bit logical shift right.
+//
+// Copyright (C) 2003, 2004, 2005, 2008, 2009 Free Software Foundation, Inc.
+// Contributed by Picochip Ltd.
+// Maintained by Hariharan Sandanagobalane (hariharan@picochip.com)
+//
+// This file is free software; you can redistribute it and/or modify it
+// under the terms of the GNU General Public License as published by the
+// Free Software Foundation; either version 3, or (at your option) any
+// later version.
+//
+// This file is distributed in the hope that it will be useful, but
+// WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// General Public License for more details.
+//
+// Under Section 7 of GPL version 3, you are granted additional
+// permissions described in the GCC Runtime Library Exception, version
+// 3.1, as published by the Free Software Foundation.
+//
+// You should have received a copy of the GNU General Public License and
+// a copy of the GCC Runtime Library Exception along with this program;
+// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+// <http://www.gnu.org/licenses/>.
+.section .text
+
+.global ___lshrsi3
+___lshrsi3:
+_picoMark_FUNCTION_BEGIN=
+
+// picoChip Function Prologue : &___lshrsi3 = 4 bytes
+
+ // if (R2 > 15) goto _L2
+ SUB.0 15,R2,r15
+ JMPLT _L2
+=-> SUB.0 16,R2,R5 // R5 := R5 - R2 (HI)
+
+ LSR.0 R0,R2,R0 // R4 := R0 >> R2
+ LSR.0 R1,R2,R3 // R3 := R1 >> R2
+ // if (R2 == 0) goto _L4
+ LSL.0 R1,R5,R5 // R5 := R1 << R5
+ OR.0 R5,R0,R4 // R2 := R5 IOR R2 (HI)
+ SUB.0 R2,0,r15
+ COPYNE R4,R0 // R0 := R2
+ JR (R12) // Return to caller
+=-> COPY.0 R3,R1 // R1 := R3
+
+_L2:
+ LSR.0 R1,R2,R0 // R2 := R1 >> R2
+ JR (R12) // Return to caller
+=-> COPY.0 0,R1 // R3 := 0 (short constant)
+
+_picoMark_FUNCTION_END=
+// picoChip Function Epilogue : __lshrsi3
+
+//============================================================================
+// All DWARF information between this marker, and the END OF DWARF
+// marker should be included in the source file. Search for
+// FUNCTION_STACK_SIZE_GOES_HERE and FUNCTION NAME GOES HERE, and
+// provide the relevent information. Add markers called
+// _picoMark_FUNCTION_BEGIN and _picoMark_FUNCTION_END around the
+// function in question.
+//============================================================================
+
+//============================================================================
+// Frame information.
+//============================================================================
+
+.section .debug_frame
+_picoMark_DebugFrame=
+
+// Common CIE header.
+.unalignedInitLong _picoMark_CieEnd-_picoMark_CieBegin
+_picoMark_CieBegin=
+.unalignedInitLong 0xffffffff
+.initByte 0x1 // CIE Version
+.ascii 16#0# // CIE Augmentation
+.uleb128 0x1 // CIE Code Alignment Factor
+.sleb128 2 // CIE Data Alignment Factor
+.initByte 0xc // CIE RA Column
+.initByte 0xc // DW_CFA_def_cfa
+.uleb128 0xd
+.uleb128 0x0
+.align 2
+_picoMark_CieEnd=
+
+// FDE
+_picoMark_LSFDE0I900821033007563=
+.unalignedInitLong _picoMark_FdeEnd-_picoMark_FdeBegin
+_picoMark_FdeBegin=
+.unalignedInitLong _picoMark_DebugFrame // FDE CIE offset
+.unalignedInitWord _picoMark_FUNCTION_BEGIN // FDE initial location
+.unalignedInitWord _picoMark_FUNCTION_END-_picoMark_FUNCTION_BEGIN
+.initByte 0xe // DW_CFA_def_cfa_offset
+.uleb128 0x4 // <-- FUNCTION_STACK_SIZE_GOES_HERE
+.initByte 0x4 // DW_CFA_advance_loc4
+.unalignedInitLong _picoMark_FUNCTION_END-_picoMark_FUNCTION_BEGIN
+.initByte 0xe // DW_CFA_def_cfa_offset
+.uleb128 0x0
+.align 2
+_picoMark_FdeEnd=
+
+//============================================================================
+// Abbrevation information.
+//============================================================================
+
+.section .debug_abbrev
+_picoMark_ABBREVIATIONS=
+
+.section .debug_abbrev
+ .uleb128 0x1 // (abbrev code)
+ .uleb128 0x11 // (TAG: DW_TAG_compile_unit)
+ .initByte 0x1 // DW_children_yes
+ .uleb128 0x10 // (DW_AT_stmt_list)
+ .uleb128 0x6 // (DW_FORM_data4)
+ .uleb128 0x12 // (DW_AT_high_pc)
+ .uleb128 0x1 // (DW_FORM_addr)
+ .uleb128 0x11 // (DW_AT_low_pc)
+ .uleb128 0x1 // (DW_FORM_addr)
+ .uleb128 0x25 // (DW_AT_producer)
+ .uleb128 0x8 // (DW_FORM_string)
+ .uleb128 0x13 // (DW_AT_language)
+ .uleb128 0x5 // (DW_FORM_data2)
+ .uleb128 0x3 // (DW_AT_name)
+ .uleb128 0x8 // (DW_FORM_string)
+.initByte 0x0
+.initByte 0x0
+
+ .uleb128 0x2 ;# (abbrev code)
+ .uleb128 0x2e ;# (TAG: DW_TAG_subprogram)
+.initByte 0x0 ;# DW_children_no
+ .uleb128 0x3 ;# (DW_AT_name)
+ .uleb128 0x8 ;# (DW_FORM_string)
+ .uleb128 0x11 ;# (DW_AT_low_pc)
+ .uleb128 0x1 ;# (DW_FORM_addr)
+ .uleb128 0x12 ;# (DW_AT_high_pc)
+ .uleb128 0x1 ;# (DW_FORM_addr)
+.initByte 0x0
+.initByte 0x0
+
+.initByte 0x0
+
+//============================================================================
+// Line information. DwarfLib requires this to be present, but it can
+// be empty.
+//============================================================================
+
+.section .debug_line
+_picoMark_LINES=
+
+//============================================================================
+// Debug Information
+//============================================================================
+.section .debug_info
+
+//Fixed header.
+.unalignedInitLong _picoMark_DEBUG_INFO_END-_picoMark_DEBUG_INFO_BEGIN
+_picoMark_DEBUG_INFO_BEGIN=
+.unalignedInitWord 0x2
+.unalignedInitLong _picoMark_ABBREVIATIONS
+.initByte 0x2
+
+// Compile unit information.
+.uleb128 0x1 // (DIE 0xb) DW_TAG_compile_unit)
+.unalignedInitLong _picoMark_LINES
+.unalignedInitWord _picoMark_FUNCTION_END
+.unalignedInitWord _picoMark_FUNCTION_BEGIN
+// Producer is `picoChip'
+.ascii 16#70# 16#69# 16#63# 16#6f# 16#43# 16#68# 16#69# 16#70# 16#00#
+.unalignedInitWord 0xcafe // ASM language
+.ascii 16#0# // Name. DwarfLib expects this to be present.
+
+.uleb128 0x2 ;# (DIE DW_TAG_subprogram)
+
+// FUNCTION NAME GOES HERE. Use `echo name | od -t x1' to get the hex. Each hex
+// digit is specified using the format 16#XX#
+.ascii 16#5f# 16#5f# 16#6c# 16#73# 16#68# 16#72# 16#72# 16#73# 16#69# 16#33# 16#0# // Function name `__lshrsi3'
+.unalignedInitWord _picoMark_FUNCTION_BEGIN // DW_AT_low_pc
+.unalignedInitWord _picoMark_FUNCTION_END // DW_AT_high_pc
+
+.initByte 0x0 // end of compile unit children.
+
+_picoMark_DEBUG_INFO_END=
+
+//============================================================================
+// END OF DWARF
+//============================================================================
+.section .endFile
diff --git a/libgcc/config/picochip/lshrsi3.c b/libgcc/config/picochip/lshrsi3.c
new file mode 100644
index 00000000000..fa32dc726ef
--- /dev/null
+++ b/libgcc/config/picochip/lshrsi3.c
@@ -0,0 +1,76 @@
+/*
+
+picoChip GCC support for 32-bit logical shift right.
+
+Copyright (C) 2003, 2004, 2005, 2008, 2009 Free Software Foundation, Inc.
+Contributed by Picochip Ltd.
+Maintained by Daniel Towner (daniel.towner@picochip.com)
+
+This file is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 3, or (at your option) any
+later version.
+
+This file is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+typedef int HItype __attribute__ ((mode (HI)));
+typedef unsigned int UHItype __attribute__ ((mode (HI)));
+typedef unsigned int USItype __attribute__ ((mode (SI)));
+
+typedef struct USIstruct {
+ UHItype low, high;
+} USIstruct;
+
+typedef union USIunion {
+ USItype l;
+ USIstruct s;
+} USIunion;
+
+USItype __lshrsi3(USIunion value, HItype count) {
+ USIunion result;
+ int temp;
+
+ /* Ignore a zero count until we get into the (count < 16)
+ clause. This is slightly slower when shifting by zero, but faster
+ and smaller in all other cases (due to the better scheduling
+ opportunities available by putting the test near computational
+ instructions. */
+
+ if (count < 16) {
+ /* Shift low and high words by the count. */
+ result.s.low = value.s.low >> count;
+ result.s.high = value.s.high >> count;
+
+ /* There is now a hole in the upper `count' bits of the low
+ word. Shift the lower `count' bits of the upper word into the
+ low word. This only works when count isn't zero. */
+ if (count != 0) {
+ temp = value.s.high << (16 - count);
+ result.s.low |= temp;
+ }
+
+ } else {
+ /* Shift the upper word of the source into the lower word of the
+ result, and zero the result's upper word. Note that we actually
+ ned to shift by (count - 16), but as we are only using the
+ bottom 4 bits, this is equivalent to shifting by count. */
+ result.s.low = value.s.high >> count;
+ result.s.high = 0;
+
+ }
+
+ return result.l;
+
+}
diff --git a/libgcc/config/picochip/parityhi2.S b/libgcc/config/picochip/parityhi2.S
new file mode 100644
index 00000000000..b9d0cdc63dd
--- /dev/null
+++ b/libgcc/config/picochip/parityhi2.S
@@ -0,0 +1,179 @@
+// picoChip ASM file
+//.file "ucmpsi2.c"
+//
+// Support for parity checks.
+//
+// Copyright (C) 2003, 2004, 2005, 2008, 2009 Free Software Foundation, Inc.
+// Contributed by Picochip Ltd.
+// Maintained by Daniel Towner (daniel.towner@picochip.com)
+//
+// This file is free software; you can redistribute it and/or modify it
+// under the terms of the GNU General Public License as published by the
+// Free Software Foundation; either version 3, or (at your option) any
+// later version.
+//
+// This file is distributed in the hope that it will be useful, but
+// WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// General Public License for more details.
+//
+// Under Section 7 of GPL version 3, you are granted additional
+// permissions described in the GCC Runtime Library Exception, version
+// 3.1, as published by the Free Software Foundation.
+//
+// You should have received a copy of the GNU General Public License and
+// a copy of the GCC Runtime Library Exception along with this program;
+// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+// <http://www.gnu.org/licenses/>.
+
+.section .text
+
+.align 8
+.global ___parityhi2
+___parityhi2:
+_picoMark_FUNCTION_BEGIN=
+
+// picoChip Function Prologue : &___parityhi2 = 0 bytes
+ XOR.0 [LSR R0,8],R0,R0
+ XOR.0 [LSR R0,4],R0,R0
+ XOR.0 [LSR R0,2],R0,R0
+ JR (R12) \ XOR.0 [LSR R0,1],R0,R0
+=-> AND.0 R0,1,R0
+
+_picoMark_FUNCTION_END=
+// picoChip Function Epilogue : __parityhi2
+//============================================================================
+// All DWARF information between this marker, and the END OF DWARF
+// marker should be included in the source file. Search for
+// FUNCTION_STACK_SIZE_GOES_HERE and FUNCTION NAME GOES HERE, and
+// provide the relevent information. Add markers called
+// _picoMark_FUNCTION_BEGIN and _picoMark_FUNCTION_END around the
+// function in question.
+//============================================================================
+
+//============================================================================
+// Frame information.
+//============================================================================
+
+.section .debug_frame
+_picoMark_DebugFrame=
+
+// Common CIE header.
+.unalignedInitLong _picoMark_CieEnd-_picoMark_CieBegin
+_picoMark_CieBegin=
+.unalignedInitLong 0xffffffff
+.initByte 0x1 // CIE Version
+.ascii 16#0# // CIE Augmentation
+.uleb128 0x1 // CIE Code Alignment Factor
+.sleb128 2 // CIE Data Alignment Factor
+.initByte 0xc // CIE RA Column
+.initByte 0xc // DW_CFA_def_cfa
+.uleb128 0xd
+.uleb128 0x0
+.align 2
+_picoMark_CieEnd=
+
+// FDE
+_picoMark_LSFDE0I900821033007563=
+.unalignedInitLong _picoMark_FdeEnd-_picoMark_FdeBegin
+_picoMark_FdeBegin=
+.unalignedInitLong _picoMark_DebugFrame // FDE CIE offset
+.unalignedInitWord _picoMark_FUNCTION_BEGIN // FDE initial location
+.unalignedInitWord _picoMark_FUNCTION_END-_picoMark_FUNCTION_BEGIN
+.initByte 0xe // DW_CFA_def_cfa_offset
+.uleb128 0x0 // <-- FUNCTION_STACK_SIZE_GOES_HERE
+.initByte 0x4 // DW_CFA_advance_loc4
+.unalignedInitLong _picoMark_FUNCTION_END-_picoMark_FUNCTION_BEGIN
+.initByte 0xe // DW_CFA_def_cfa_offset
+.uleb128 0x0
+.align 2
+_picoMark_FdeEnd=
+
+//============================================================================
+// Abbrevation information.
+//============================================================================
+
+.section .debug_abbrev
+_picoMark_ABBREVIATIONS=
+
+.section .debug_abbrev
+ .uleb128 0x1 // (abbrev code)
+ .uleb128 0x11 // (TAG: DW_TAG_compile_unit)
+ .initByte 0x1 // DW_children_yes
+ .uleb128 0x10 // (DW_AT_stmt_list)
+ .uleb128 0x6 // (DW_FORM_data4)
+ .uleb128 0x12 // (DW_AT_high_pc)
+ .uleb128 0x1 // (DW_FORM_addr)
+ .uleb128 0x11 // (DW_AT_low_pc)
+ .uleb128 0x1 // (DW_FORM_addr)
+ .uleb128 0x25 // (DW_AT_producer)
+ .uleb128 0x8 // (DW_FORM_string)
+ .uleb128 0x13 // (DW_AT_language)
+ .uleb128 0x5 // (DW_FORM_data2)
+ .uleb128 0x3 // (DW_AT_name)
+ .uleb128 0x8 // (DW_FORM_string)
+.initByte 0x0
+.initByte 0x0
+
+ .uleb128 0x2 ;# (abbrev code)
+ .uleb128 0x2e ;# (TAG: DW_TAG_subprogram)
+.initByte 0x0 ;# DW_children_no
+ .uleb128 0x3 ;# (DW_AT_name)
+ .uleb128 0x8 ;# (DW_FORM_string)
+ .uleb128 0x11 ;# (DW_AT_low_pc)
+ .uleb128 0x1 ;# (DW_FORM_addr)
+ .uleb128 0x12 ;# (DW_AT_high_pc)
+ .uleb128 0x1 ;# (DW_FORM_addr)
+.initByte 0x0
+.initByte 0x0
+
+.initByte 0x0
+
+//============================================================================
+// Line information. DwarfLib requires this to be present, but it can
+// be empty.
+//============================================================================
+
+.section .debug_line
+_picoMark_LINES=
+
+//============================================================================
+// Debug Information
+//============================================================================
+.section .debug_info
+
+//Fixed header.
+.unalignedInitLong _picoMark_DEBUG_INFO_END-_picoMark_DEBUG_INFO_BEGIN
+_picoMark_DEBUG_INFO_BEGIN=
+.unalignedInitWord 0x2
+.unalignedInitLong _picoMark_ABBREVIATIONS
+.initByte 0x2
+
+// Compile unit information.
+.uleb128 0x1 // (DIE 0xb) DW_TAG_compile_unit)
+.unalignedInitLong _picoMark_LINES
+.unalignedInitWord _picoMark_FUNCTION_END
+.unalignedInitWord _picoMark_FUNCTION_BEGIN
+// Producer is `picoChip'
+.ascii 16#70# 16#69# 16#63# 16#6f# 16#43# 16#68# 16#69# 16#70# 16#00#
+.unalignedInitWord 0xcafe // ASM language
+.ascii 16#0# // Name. DwarfLib expects this to be present.
+
+.uleb128 0x2 ;# (DIE DW_TAG_subprogram)
+
+// FUNCTION NAME GOES HERE. Use `echo name | od -t x1' to get the hex. Each hex
+// digit is specified using the format 16#XX#
+.ascii 16#5f# 16#5f# 16#70# 16#61# 16#72# 16#69# 16#74# 16#79# 16#68# 16#69# 16#32# 16#0# // Function name `__parityhi2'
+.unalignedInitWord _picoMark_FUNCTION_BEGIN // DW_AT_low_pc
+.unalignedInitWord _picoMark_FUNCTION_END // DW_AT_high_pc
+
+.initByte 0x0 // end of compile unit children.
+
+_picoMark_DEBUG_INFO_END=
+
+//============================================================================
+// END OF DWARF
+//============================================================================
+
+.section .endFile
+// End of picoChip ASM file
diff --git a/libgcc/config/picochip/popcounthi2.S b/libgcc/config/picochip/popcounthi2.S
new file mode 100644
index 00000000000..2da618c96de
--- /dev/null
+++ b/libgcc/config/picochip/popcounthi2.S
@@ -0,0 +1,201 @@
+// picoChip ASM file
+//.file "popcounthi2.S"
+//
+// Support for 16-bit population count.
+//
+// Copyright (C) 2003, 2004, 2005, 2008, 2009 Free Software Foundation, Inc.
+// Contributed by Picochip Ltd.
+// Maintained by Daniel Towner (daniel.towner@picochip.com)
+//
+// This file is free software; you can redistribute it and/or modify it
+// under the terms of the GNU General Public License as published by the
+// Free Software Foundation; either version 3, or (at your option) any
+// later version.
+//
+// This file is distributed in the hope that it will be useful, but
+// WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// General Public License for more details.
+//
+// Under Section 7 of GPL version 3, you are granted additional
+// permissions described in the GCC Runtime Library Exception, version
+// 3.1, as published by the Free Software Foundation.
+//
+// You should have received a copy of the GNU General Public License and
+// a copy of the GCC Runtime Library Exception along with this program;
+// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+// <http://www.gnu.org/licenses/>.
+
+.section .text
+
+// The following code (taken from a newsgroup posting) was compiled, and then
+// hand assembled (a similar version is given in the Hacker's Delight
+// book, chapter 5).
+//
+// int
+// popcount (int value)
+// {
+// value = ((value & 0xAAAA) >> 1) + (value & 0x5555);
+// value = ((value & 0xCCCC) >> 2) + (value & 0x3333);
+// value = ((value & 0xF0F0) >> 4) + (value & 0x0F0F);
+// return ((value & 0xFF00) >> 8) + (value & 0x00FF);
+// }
+//
+// This assembly function is approx. 20x faster than a naive loop
+// implementation of the population count, but about 30% bigger
+// (45 bytes v. 34 bytes).
+
+.align 8
+.global ___popcounthi2
+___popcounthi2:
+
+_picoMark_FUNCTION_BEGIN=
+
+// picoChip Function Prologue : &___popcounthi2 = 0 bytes
+
+ AND.0 [LSR R0,1],21845,R0 \ AND.1 R0,21845,R5
+ ADD.0 R0,R5,R0
+ AND.0 [LSR R0,2],13107,R0 \ AND.1 R0,13107,R5
+ ADD.0 R0,R5,R0 \ COPY.1 1807,R2
+ AND.0 [LSR R0,4],R2,R0 \ AND.1 R0,3855,R5
+ ADD.0 R0,R5,R0
+ JR (R12) \ AND.0 R0, 255, R5
+=-> ADD.0 [LSR R0,8],R5,R0
+
+_picoMark_FUNCTION_END=
+// picoChip Function Epilogue : ___popcounthi2
+//============================================================================
+// All DWARF information between this marker, and the END OF DWARF
+// marker should be included in the source file. Search for
+// FUNCTION_STACK_SIZE_GOES_HERE and FUNCTION NAME GOES HERE, and
+// provide the relevent information. Add markers called
+// _picoMark_FUNCTION_BEGIN and _picoMark_FUNCTION_END around the
+// function in question.
+//============================================================================
+
+//============================================================================
+// Frame information.
+//============================================================================
+
+.section .debug_frame
+_picoMark_DebugFrame=
+
+// Common CIE header.
+.unalignedInitLong _picoMark_CieEnd-_picoMark_CieBegin
+_picoMark_CieBegin=
+.unalignedInitLong 0xffffffff
+.initByte 0x1 // CIE Version
+.ascii 16#0# // CIE Augmentation
+.uleb128 0x1 // CIE Code Alignment Factor
+.sleb128 2 // CIE Data Alignment Factor
+.initByte 0xc // CIE RA Column
+.initByte 0xc // DW_CFA_def_cfa
+.uleb128 0xd
+.uleb128 0x0
+.align 2
+_picoMark_CieEnd=
+
+// FDE
+_picoMark_LSFDE0I900821033007563=
+.unalignedInitLong _picoMark_FdeEnd-_picoMark_FdeBegin
+_picoMark_FdeBegin=
+.unalignedInitLong _picoMark_DebugFrame // FDE CIE offset
+.unalignedInitWord _picoMark_FUNCTION_BEGIN // FDE initial location
+.unalignedInitWord _picoMark_FUNCTION_END-_picoMark_FUNCTION_BEGIN
+.initByte 0xe // DW_CFA_def_cfa_offset
+.uleb128 0x0 // <-- FUNCTION_STACK_SIZE_GOES_HERE
+.initByte 0x4 // DW_CFA_advance_loc4
+.unalignedInitLong _picoMark_FUNCTION_END-_picoMark_FUNCTION_BEGIN
+.initByte 0xe // DW_CFA_def_cfa_offset
+.uleb128 0x0
+.align 2
+_picoMark_FdeEnd=
+
+//============================================================================
+// Abbrevation information.
+//============================================================================
+
+.section .debug_abbrev
+_picoMark_ABBREVIATIONS=
+
+.section .debug_abbrev
+ .uleb128 0x1 // (abbrev code)
+ .uleb128 0x11 // (TAG: DW_TAG_compile_unit)
+ .initByte 0x1 // DW_children_yes
+ .uleb128 0x10 // (DW_AT_stmt_list)
+ .uleb128 0x6 // (DW_FORM_data4)
+ .uleb128 0x12 // (DW_AT_high_pc)
+ .uleb128 0x1 // (DW_FORM_addr)
+ .uleb128 0x11 // (DW_AT_low_pc)
+ .uleb128 0x1 // (DW_FORM_addr)
+ .uleb128 0x25 // (DW_AT_producer)
+ .uleb128 0x8 // (DW_FORM_string)
+ .uleb128 0x13 // (DW_AT_language)
+ .uleb128 0x5 // (DW_FORM_data2)
+ .uleb128 0x3 // (DW_AT_name)
+ .uleb128 0x8 // (DW_FORM_string)
+.initByte 0x0
+.initByte 0x0
+
+ .uleb128 0x2 ;# (abbrev code)
+ .uleb128 0x2e ;# (TAG: DW_TAG_subprogram)
+.initByte 0x0 ;# DW_children_no
+ .uleb128 0x3 ;# (DW_AT_name)
+ .uleb128 0x8 ;# (DW_FORM_string)
+ .uleb128 0x11 ;# (DW_AT_low_pc)
+ .uleb128 0x1 ;# (DW_FORM_addr)
+ .uleb128 0x12 ;# (DW_AT_high_pc)
+ .uleb128 0x1 ;# (DW_FORM_addr)
+.initByte 0x0
+.initByte 0x0
+
+.initByte 0x0
+
+//============================================================================
+// Line information. DwarfLib requires this to be present, but it can
+// be empty.
+//============================================================================
+
+.section .debug_line
+_picoMark_LINES=
+
+//============================================================================
+// Debug Information
+//============================================================================
+.section .debug_info
+
+//Fixed header.
+.unalignedInitLong _picoMark_DEBUG_INFO_END-_picoMark_DEBUG_INFO_BEGIN
+_picoMark_DEBUG_INFO_BEGIN=
+.unalignedInitWord 0x2
+.unalignedInitLong _picoMark_ABBREVIATIONS
+.initByte 0x2
+
+// Compile unit information.
+.uleb128 0x1 // (DIE 0xb) DW_TAG_compile_unit)
+.unalignedInitLong _picoMark_LINES
+.unalignedInitWord _picoMark_FUNCTION_END
+.unalignedInitWord _picoMark_FUNCTION_BEGIN
+// Producer is `picoChip'
+.ascii 16#70# 16#69# 16#63# 16#6f# 16#43# 16#68# 16#69# 16#70# 16#00#
+.unalignedInitWord 0xcafe // ASM language
+.ascii 16#0# // Name. DwarfLib expects this to be present.
+
+.uleb128 0x2 ;# (DIE DW_TAG_subprogram)
+
+// FUNCTION NAME GOES HERE. Use `echo name | od -t x1' to get the hex. Each hex
+// digit is specified using the format 16#XX#
+.ascii 16#5f# 16#5f# 16#70# 16#6f# 16#70# 16#63# 16#6f# 16#75# 16#6e# 16#74# 16#68# 16#69# 16#32# 16#0# // Function name `__popcounthi2'
+.unalignedInitWord _picoMark_FUNCTION_BEGIN // DW_AT_low_pc
+.unalignedInitWord _picoMark_FUNCTION_END // DW_AT_high_pc
+
+.initByte 0x0 // end of compile unit children.
+
+_picoMark_DEBUG_INFO_END=
+
+//============================================================================
+// END OF DWARF
+//============================================================================
+
+.section .endFile
+// End of picoChip ASM file
diff --git a/libgcc/config/picochip/setjmp.S b/libgcc/config/picochip/setjmp.S
new file mode 100644
index 00000000000..247c715f6a9
--- /dev/null
+++ b/libgcc/config/picochip/setjmp.S
@@ -0,0 +1,182 @@
+// picoChip ASM file
+//
+// Support for 32-bit arithmetic shift right.
+//
+// Copyright (C) 2003, 2004, 2005, 2008, 2009 Free Software Foundation, Inc.
+// Contributed by Picochip Ltd.
+// Maintained by Hariharan Sandanagobalane (hariharan@picochip.com)
+//
+// This file is free software; you can redistribute it and/or modify it
+// under the terms of the GNU General Public License as published by the
+// Free Software Foundation; either version 3, or (at your option) any
+// later version.
+//
+// This file is distributed in the hope that it will be useful, but
+// WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// General Public License for more details.
+//
+// Under Section 7 of GPL version 3, you are granted additional
+// permissions described in the GCC Runtime Library Exception, version
+// 3.1, as published by the Free Software Foundation.
+//
+// You should have received a copy of the GNU General Public License and
+// a copy of the GCC Runtime Library Exception along with this program;
+// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+// <http://www.gnu.org/licenses/>.
+
+.section .text
+
+.global _setjmp
+_setjmp:
+_picoMark_FUNCTION_BEGIN=
+
+// picoChip Function Prologue : &_setjmp = 0 bytes
+
+ STL R[3:2],(R0)0
+ STL R[5:4],(R0)1
+ STL R[7:6],(R0)2
+ STL R[9:8],(R0)3
+ STL R[11:10],(R0)4
+ STL R[13:12],(R0)5
+ STW R14,(R0)12
+ STW R1,(R0)13
+ JR (R12)
+=-> COPY.0 0,R0
+
+// picoChip Function Epilogue : setjmp
+//============================================================================
+// All DWARF information between this marker, and the END OF DWARF
+// marker should be included in the source file. Search for
+// FUNCTION_STACK_SIZE_GOES_HERE and FUNCTION NAME GOES HERE, and
+// provide the relevent information. Add markers called
+// _picoMark_FUNCTION_BEGIN and _picoMark_FUNCTION_END around the
+// function in question.
+//============================================================================
+
+//============================================================================
+// Frame information.
+//============================================================================
+
+.section .debug_frame
+_picoMark_DebugFrame=
+
+// Common CIE header.
+.unalignedInitLong _picoMark_CieEnd-_picoMark_CieBegin
+_picoMark_CieBegin=
+.unalignedInitLong 0xffffffff
+.initByte 0x1 // CIE Version
+.ascii 16#0# // CIE Augmentation
+.uleb128 0x1 // CIE Code Alignment Factor
+.sleb128 2 // CIE Data Alignment Factor
+.initByte 0xc // CIE RA Column
+.initByte 0xc // DW_CFA_def_cfa
+.uleb128 0xd
+.uleb128 0x0
+.align 2
+_picoMark_CieEnd=
+
+// FDE
+_picoMark_LSFDE0I900821033007563=
+.unalignedInitLong _picoMark_FdeEnd-_picoMark_FdeBegin
+_picoMark_FdeBegin=
+.unalignedInitLong _picoMark_DebugFrame // FDE CIE offset
+.unalignedInitWord _picoMark_FUNCTION_BEGIN // FDE initial location
+.unalignedInitWord _picoMark_FUNCTION_END-_picoMark_FUNCTION_BEGIN
+.initByte 0xe // DW_CFA_def_cfa_offset
+.uleb128 0x0 // <-- FUNCTION_STACK_SIZE_GOES_HERE
+.initByte 0x4 // DW_CFA_advance_loc4
+.unalignedInitLong _picoMark_FUNCTION_END-_picoMark_FUNCTION_BEGIN
+.initByte 0xe // DW_CFA_def_cfa_offset
+.uleb128 0x0
+.align 2
+_picoMark_FdeEnd=
+
+//============================================================================
+// Abbrevation information.
+//============================================================================
+
+.section .debug_abbrev
+_picoMark_ABBREVIATIONS=
+
+.section .debug_abbrev
+ .uleb128 0x1 // (abbrev code)
+ .uleb128 0x11 // (TAG: DW_TAG_compile_unit)
+ .initByte 0x1 // DW_children_yes
+ .uleb128 0x10 // (DW_AT_stmt_list)
+ .uleb128 0x6 // (DW_FORM_data4)
+ .uleb128 0x12 // (DW_AT_high_pc)
+ .uleb128 0x1 // (DW_FORM_addr)
+ .uleb128 0x11 // (DW_AT_low_pc)
+ .uleb128 0x1 // (DW_FORM_addr)
+ .uleb128 0x25 // (DW_AT_producer)
+ .uleb128 0x8 // (DW_FORM_string)
+ .uleb128 0x13 // (DW_AT_language)
+ .uleb128 0x5 // (DW_FORM_data2)
+ .uleb128 0x3 // (DW_AT_name)
+ .uleb128 0x8 // (DW_FORM_string)
+.initByte 0x0
+.initByte 0x0
+
+ .uleb128 0x2 ;# (abbrev code)
+ .uleb128 0x2e ;# (TAG: DW_TAG_subprogram)
+.initByte 0x0 ;# DW_children_no
+ .uleb128 0x3 ;# (DW_AT_name)
+ .uleb128 0x8 ;# (DW_FORM_string)
+ .uleb128 0x11 ;# (DW_AT_low_pc)
+ .uleb128 0x1 ;# (DW_FORM_addr)
+ .uleb128 0x12 ;# (DW_AT_high_pc)
+ .uleb128 0x1 ;# (DW_FORM_addr)
+.initByte 0x0
+.initByte 0x0
+
+.initByte 0x0
+
+//============================================================================
+// Line information. DwarfLib requires this to be present, but it can
+// be empty.
+//============================================================================
+
+.section .debug_line
+_picoMark_LINES=
+
+//============================================================================
+// Debug Information
+//============================================================================
+.section .debug_info
+
+//Fixed header.
+.unalignedInitLong _picoMark_DEBUG_INFO_END-_picoMark_DEBUG_INFO_BEGIN
+_picoMark_DEBUG_INFO_BEGIN=
+.unalignedInitWord 0x2
+.unalignedInitLong _picoMark_ABBREVIATIONS
+.initByte 0x2
+
+// Compile unit information.
+.uleb128 0x1 // (DIE 0xb) DW_TAG_compile_unit)
+.unalignedInitLong _picoMark_LINES
+.unalignedInitWord _picoMark_FUNCTION_END
+.unalignedInitWord _picoMark_FUNCTION_BEGIN
+// Producer is `picoChip'
+.ascii 16#70# 16#69# 16#63# 16#6f# 16#43# 16#68# 16#69# 16#70# 16#00#
+.unalignedInitWord 0xcafe // ASM language
+.ascii 16#0# // Name. DwarfLib expects this to be present.
+
+.uleb128 0x2 ;# (DIE DW_TAG_subprogram)
+
+// FUNCTION NAME GOES HERE. Use `echo name | od -t x1' to get the hex. Each hex
+// digit is specified using the format 16#XX#
+.ascii 16#73# 16#65# 16#74# 16#6a# 16#6d# 16#70# 16#0# // Function name `setjmp'
+.unalignedInitWord _picoMark_FUNCTION_BEGIN // DW_AT_low_pc
+.unalignedInitWord _picoMark_FUNCTION_END // DW_AT_high_pc
+
+.initByte 0x0 // end of compile unit children.
+
+_picoMark_DEBUG_INFO_END=
+
+//============================================================================
+// END OF DWARF
+//============================================================================
+
+.section .endFile
+// End of picoChip ASM file
diff --git a/libgcc/config/picochip/subdi3.S b/libgcc/config/picochip/subdi3.S
new file mode 100644
index 00000000000..d1c833ea824
--- /dev/null
+++ b/libgcc/config/picochip/subdi3.S
@@ -0,0 +1,191 @@
+// picoChip ASM file
+//
+// Support for 64-bit subtraction.
+//
+// Copyright (C) 2003, 2004, 2005, 2008, 2009 Free Software Foundation, Inc.
+// Contributed by Picochip Ltd.
+// Maintained by Hariharan Sandanagobalane (hariharan@picochip.com)
+//
+// This file is free software; you can redistribute it and/or modify it
+// under the terms of the GNU General Public License as published by the
+// Free Software Foundation; either version 3, or (at your option) any
+// later version.
+//
+// This file is distributed in the hope that it will be useful, but
+// WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// General Public License for more details.
+//
+// Under Section 7 of GPL version 3, you are granted additional
+// permissions described in the GCC Runtime Library Exception, version
+// 3.1, as published by the Free Software Foundation.
+//
+// You should have received a copy of the GNU General Public License and
+// a copy of the GCC Runtime Library Exception along with this program;
+// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+// <http://www.gnu.org/licenses/>.
+
+.section .text
+
+.align 8
+.global __subdi3
+__subdi3:
+
+_picoMark_FUNCTION_BEGIN=
+// picoChip Function Prologue : &__subdi3 = 4 bytes
+
+ // The first operand of sub is completely in registers r[2-5]
+ // The second operand of sub is in stack FP(0-3)
+ // and result need to be written pointed to by the register r0.
+ // All we need to do is to load the appropriate values, sub them
+ // appropriately (with sub or subb) and then store the values back.
+ ldw (FP)0, r1
+ stl r[7:6], (FP)-1
+ sub.0 r2, r1, r6
+ ldw (FP)1, r1
+ subb.0 r3, r1, r7
+ ldl (FP)1, r[3:2]
+ stl r[7:6], (r0)0
+ subb.0 r4, r2, r6
+ subb.0 r5, r3, r7
+ stl r[7:6], (r0)1
+ jr (r12)
+=-> ldl (FP)2, r[7:6]
+
+_picoMark_FUNCTION_END=
+// picoChip Function Epilogue : __subdi3
+
+//============================================================================
+// All DWARF information between this marker, and the END OF DWARF
+// marker should be included in the source file. Search for
+// FUNCTION_STACK_SIZE_GOES_HERE and FUNCTION NAME GOES HERE, and
+// provide the relevent information. Add markers called
+// _picoMark_FUNCTION_BEGIN and _picoMark_FUNCTION_END around the
+// function in question.
+//============================================================================
+
+//============================================================================
+// Frame information.
+//============================================================================
+
+.section .debug_frame
+_picoMark_DebugFrame=
+
+// Common CIE header.
+.unalignedInitLong _picoMark_CieEnd-_picoMark_CieBegin
+_picoMark_CieBegin=
+.unalignedInitLong 0xffffffff
+.initByte 0x1 // CIE Version
+.ascii 16#0# // CIE Augmentation
+.uleb128 0x1 // CIE Code Alignment Factor
+.sleb128 2 // CIE Data Alignment Factor
+.initByte 0xc // CIE RA Column
+.initByte 0xc // DW_CFA_def_cfa
+.uleb128 0xd
+.uleb128 0x0
+.align 2
+_picoMark_CieEnd=
+
+// FDE
+_picoMark_LSFDE0I900821033007563=
+.unalignedInitLong _picoMark_FdeEnd-_picoMark_FdeBegin
+_picoMark_FdeBegin=
+.unalignedInitLong _picoMark_DebugFrame // FDE CIE offset
+.unalignedInitWord _picoMark_FUNCTION_BEGIN // FDE initial location
+.unalignedInitWord _picoMark_FUNCTION_END-_picoMark_FUNCTION_BEGIN
+.initByte 0xe // DW_CFA_def_cfa_offset
+.uleb128 0x4 // <-- FUNCTION_STACK_SIZE_GOES_HERE
+.initByte 0x4 // DW_CFA_advance_loc4
+.unalignedInitLong _picoMark_FUNCTION_END-_picoMark_FUNCTION_BEGIN
+.initByte 0xe // DW_CFA_def_cfa_offset
+.uleb128 0x0
+.align 2
+_picoMark_FdeEnd=
+
+//============================================================================
+// Abbrevation information.
+//============================================================================
+
+.section .debug_abbrev
+_picoMark_ABBREVIATIONS=
+
+.section .debug_abbrev
+ .uleb128 0x1 // (abbrev code)
+ .uleb128 0x11 // (TAG: DW_TAG_compile_unit)
+ .initByte 0x1 // DW_children_yes
+ .uleb128 0x10 // (DW_AT_stmt_list)
+ .uleb128 0x6 // (DW_FORM_data4)
+ .uleb128 0x12 // (DW_AT_high_pc)
+ .uleb128 0x1 // (DW_FORM_addr)
+ .uleb128 0x11 // (DW_AT_low_pc)
+ .uleb128 0x1 // (DW_FORM_addr)
+ .uleb128 0x25 // (DW_AT_producer)
+ .uleb128 0x8 // (DW_FORM_string)
+ .uleb128 0x13 // (DW_AT_language)
+ .uleb128 0x5 // (DW_FORM_data2)
+ .uleb128 0x3 // (DW_AT_name)
+ .uleb128 0x8 // (DW_FORM_string)
+.initByte 0x0
+.initByte 0x0
+
+ .uleb128 0x2 ;# (abbrev code)
+ .uleb128 0x2e ;# (TAG: DW_TAG_subprogram)
+.initByte 0x0 ;# DW_children_no
+ .uleb128 0x3 ;# (DW_AT_name)
+ .uleb128 0x8 ;# (DW_FORM_string)
+ .uleb128 0x11 ;# (DW_AT_low_pc)
+ .uleb128 0x1 ;# (DW_FORM_addr)
+ .uleb128 0x12 ;# (DW_AT_high_pc)
+ .uleb128 0x1 ;# (DW_FORM_addr)
+.initByte 0x0
+.initByte 0x0
+
+.initByte 0x0
+
+//============================================================================
+// Line information. DwarfLib requires this to be present, but it can
+// be empty.
+//============================================================================
+
+.section .debug_line
+_picoMark_LINES=
+
+//============================================================================
+// Debug Information
+//============================================================================
+.section .debug_info
+
+//Fixed header.
+.unalignedInitLong _picoMark_DEBUG_INFO_END-_picoMark_DEBUG_INFO_BEGIN
+_picoMark_DEBUG_INFO_BEGIN=
+.unalignedInitWord 0x2
+.unalignedInitLong _picoMark_ABBREVIATIONS
+.initByte 0x2
+
+// Compile unit information.
+.uleb128 0x1 // (DIE 0xb) DW_TAG_compile_unit)
+.unalignedInitLong _picoMark_LINES
+.unalignedInitWord _picoMark_FUNCTION_END
+.unalignedInitWord _picoMark_FUNCTION_BEGIN
+// Producer is `picoChip'
+.ascii 16#70# 16#69# 16#63# 16#6f# 16#43# 16#68# 16#69# 16#70# 16#00#
+.unalignedInitWord 0xcafe // ASM language
+.ascii 16#0# // Name. DwarfLib expects this to be present.
+
+.uleb128 0x2 ;# (DIE DW_TAG_subprogram)
+
+// FUNCTION NAME GOES HERE. Use `echo name | od -t x1' to get the hex. Each hex
+// digit is specified using the format 16#XX#
+.ascii 16#5f# 16#73# 16#75# 16#62# 16#64# 16#69# 16#33# 16#0# // Function name `_subdi3'
+.unalignedInitWord _picoMark_FUNCTION_BEGIN // DW_AT_low_pc
+.unalignedInitWord _picoMark_FUNCTION_END // DW_AT_high_pc
+
+.initByte 0x0 // end of compile unit children.
+
+_picoMark_DEBUG_INFO_END=
+
+//============================================================================
+// END OF DWARF
+//============================================================================
+.section .endFile
+
diff --git a/libgcc/config/picochip/t-picochip b/libgcc/config/picochip/t-picochip
index 5135d500cbb..4de5dbbdcd3 100644
--- a/libgcc/config/picochip/t-picochip
+++ b/libgcc/config/picochip/t-picochip
@@ -1,2 +1,39 @@
+# Prevent some of the more complicated libgcc functions from being
+# compiled. This is because they are generally too big to fit into an
+# AE anyway, so there is no point in having them. Also, some don't
+# compile properly so we'll ignore them for the moment.
+LIB1ASMSRC = picochip/lib1funcs.S
+LIB1ASMFUNCS = _mulsc3 _divsc3
+
+# Compile the extra library functions.
+LIB2ADD = \
+ $(srcdir)/config/picochip/ashrsi3.S \
+ $(srcdir)/config/picochip/ashlsi3.S \
+ $(srcdir)/config/picochip/divmodhi4.S \
+ $(srcdir)/config/picochip/udivmodhi4.S \
+ $(srcdir)/config/picochip/divmodsi4.S \
+ $(srcdir)/config/picochip/udivmodsi4.S \
+ $(srcdir)/config/picochip/divmod15.S \
+ $(srcdir)/config/picochip/ucmpsi2.S \
+ $(srcdir)/config/picochip/cmpsi2.S \
+ $(srcdir)/config/picochip/clzsi2.S \
+ $(srcdir)/config/picochip/adddi3.S \
+ $(srcdir)/config/picochip/subdi3.S \
+ $(srcdir)/config/picochip/lshrsi3.S \
+ $(srcdir)/config/picochip/parityhi2.S \
+ $(srcdir)/config/picochip/popcounthi2.S
+
+# Special libgcc setup. Make single/double floating point the same,
+# and use our own include files.
+HOST_LIBGCC2_CFLAGS += -DDF=SF -I../../includes/
+
+# Switch off all debugging for the embedded libraries.
+# (embedded processors need small libraries by default).
+# NOTE: If the debug level is increased, turn off instruction scheduling.
+LIBGCC2_DEBUG_CFLAGS = -g0
+
# Turn off the building of exception handling libraries.
LIB2ADDEH =
+
+# Turn off ranlib on target libraries.
+RANLIB_FOR_TARGET = cat
diff --git a/libgcc/config/picochip/ucmpsi2.S b/libgcc/config/picochip/ucmpsi2.S
new file mode 100644
index 00000000000..10c03cfcd6e
--- /dev/null
+++ b/libgcc/config/picochip/ucmpsi2.S
@@ -0,0 +1,209 @@
+// picoChip ASM file
+//.file "ucmpsi2.c"
+//
+// Support for 32-bit unsigned compare.
+//
+// Copyright (C) 2003, 2004, 2005, 2008, 2009 Free Software Foundation, Inc.
+// Contributed by Picochip Ltd.
+// Maintained by Daniel Towner (daniel.towner@picochip.com)
+//
+// This file is free software; you can redistribute it and/or modify it
+// under the terms of the GNU General Public License as published by the
+// Free Software Foundation; either version 3, or (at your option) any
+// later version.
+//
+// This file is distributed in the hope that it will be useful, but
+// WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// General Public License for more details.
+//
+// Under Section 7 of GPL version 3, you are granted additional
+// permissions described in the GCC Runtime Library Exception, version
+// 3.1, as published by the Free Software Foundation.
+//
+// You should have received a copy of the GNU General Public License and
+// a copy of the GCC Runtime Library Exception along with this program;
+// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+// <http://www.gnu.org/licenses/>.
+//
+// Compiled from the following, and then hand optimised.
+//
+// int __ucmpsi2 (USItype x, USItype y)
+// {
+//
+// USIunion lx; lx.l = x;
+// USIunion ly; ly.l = y;
+//
+// if (lx.s.high < ly.s.high)
+// return 0;
+// else if (lx.s.high > ly.s.high)
+// return 2;
+// if (lx.s.low < ly.s.low)
+// return 0;
+// else if (lx.s.low > ly.s.low)
+// return 2;
+// return 1;
+// }
+
+.section .text
+
+.align 8
+.global ___ucmpsi2
+___ucmpsi2:
+_picoMark_FUNCTION_BEGIN=
+// picoChip Function Prologue : &___ucmpsi2 = 0 bytes
+ SUB.0 R1,R3,r15
+
+ BLO _L1
+=-> SUB.0 R3,R1,r15 \ COPY.1 0,R5
+
+ BLO _L1
+=-> SUB.0 R0,R2,r15 \ COPY.1 2,R5
+
+ BLO _L1
+=-> SUB.0 R2,R0,r15 \ COPY.1 0,R5
+
+ BLO _L1
+=-> COPY.0 2,R5
+
+ COPY.0 1,R5
+_L1:
+ JR (R12)
+=-> COPY.0 R5,R0 // R0 := R5
+
+_picoMark_FUNCTION_END=
+// picoChip Function Epilogue : __ucmpsi2
+//============================================================================
+// All DWARF information between this marker, and the END OF DWARF
+// marker should be included in the source file. Search for
+// FUNCTION_STACK_SIZE_GOES_HERE and FUNCTION NAME GOES HERE, and
+// provide the relevent information. Add markers called
+// _picoMark_FUNCTION_BEGIN and _picoMark_FUNCTION_END around the
+// function in question.
+//============================================================================
+
+//============================================================================
+// Frame information.
+//============================================================================
+
+.section .debug_frame
+_picoMark_DebugFrame=
+
+// Common CIE header.
+.unalignedInitLong _picoMark_CieEnd-_picoMark_CieBegin
+_picoMark_CieBegin=
+.unalignedInitLong 0xffffffff
+.initByte 0x1 // CIE Version
+.ascii 16#0# // CIE Augmentation
+.uleb128 0x1 // CIE Code Alignment Factor
+.sleb128 2 // CIE Data Alignment Factor
+.initByte 0xc // CIE RA Column
+.initByte 0xc // DW_CFA_def_cfa
+.uleb128 0xd
+.uleb128 0x0
+.align 2
+_picoMark_CieEnd=
+
+// FDE
+_picoMark_LSFDE0I900821033007563=
+.unalignedInitLong _picoMark_FdeEnd-_picoMark_FdeBegin
+_picoMark_FdeBegin=
+.unalignedInitLong _picoMark_DebugFrame // FDE CIE offset
+.unalignedInitWord _picoMark_FUNCTION_BEGIN // FDE initial location
+.unalignedInitWord _picoMark_FUNCTION_END-_picoMark_FUNCTION_BEGIN
+.initByte 0xe // DW_CFA_def_cfa_offset
+.uleb128 0x0 // <-- FUNCTION_STACK_SIZE_GOES_HERE
+.initByte 0x4 // DW_CFA_advance_loc4
+.unalignedInitLong _picoMark_FUNCTION_END-_picoMark_FUNCTION_BEGIN
+.initByte 0xe // DW_CFA_def_cfa_offset
+.uleb128 0x0
+.align 2
+_picoMark_FdeEnd=
+
+//============================================================================
+// Abbrevation information.
+//============================================================================
+
+.section .debug_abbrev
+_picoMark_ABBREVIATIONS=
+
+.section .debug_abbrev
+ .uleb128 0x1 // (abbrev code)
+ .uleb128 0x11 // (TAG: DW_TAG_compile_unit)
+ .initByte 0x1 // DW_children_yes
+ .uleb128 0x10 // (DW_AT_stmt_list)
+ .uleb128 0x6 // (DW_FORM_data4)
+ .uleb128 0x12 // (DW_AT_high_pc)
+ .uleb128 0x1 // (DW_FORM_addr)
+ .uleb128 0x11 // (DW_AT_low_pc)
+ .uleb128 0x1 // (DW_FORM_addr)
+ .uleb128 0x25 // (DW_AT_producer)
+ .uleb128 0x8 // (DW_FORM_string)
+ .uleb128 0x13 // (DW_AT_language)
+ .uleb128 0x5 // (DW_FORM_data2)
+ .uleb128 0x3 // (DW_AT_name)
+ .uleb128 0x8 // (DW_FORM_string)
+.initByte 0x0
+.initByte 0x0
+
+ .uleb128 0x2 ;# (abbrev code)
+ .uleb128 0x2e ;# (TAG: DW_TAG_subprogram)
+.initByte 0x0 ;# DW_children_no
+ .uleb128 0x3 ;# (DW_AT_name)
+ .uleb128 0x8 ;# (DW_FORM_string)
+ .uleb128 0x11 ;# (DW_AT_low_pc)
+ .uleb128 0x1 ;# (DW_FORM_addr)
+ .uleb128 0x12 ;# (DW_AT_high_pc)
+ .uleb128 0x1 ;# (DW_FORM_addr)
+.initByte 0x0
+.initByte 0x0
+
+.initByte 0x0
+
+//============================================================================
+// Line information. DwarfLib requires this to be present, but it can
+// be empty.
+//============================================================================
+
+.section .debug_line
+_picoMark_LINES=
+
+//============================================================================
+// Debug Information
+//============================================================================
+.section .debug_info
+
+//Fixed header.
+.unalignedInitLong _picoMark_DEBUG_INFO_END-_picoMark_DEBUG_INFO_BEGIN
+_picoMark_DEBUG_INFO_BEGIN=
+.unalignedInitWord 0x2
+.unalignedInitLong _picoMark_ABBREVIATIONS
+.initByte 0x2
+
+// Compile unit information.
+.uleb128 0x1 // (DIE 0xb) DW_TAG_compile_unit)
+.unalignedInitLong _picoMark_LINES
+.unalignedInitWord _picoMark_FUNCTION_END
+.unalignedInitWord _picoMark_FUNCTION_BEGIN
+// Producer is `picoChip'
+.ascii 16#70# 16#69# 16#63# 16#6f# 16#43# 16#68# 16#69# 16#70# 16#00#
+.unalignedInitWord 0xcafe // ASM language
+.ascii 16#0# // Name. DwarfLib expects this to be present.
+
+.uleb128 0x2 ;# (DIE DW_TAG_subprogram)
+
+// FUNCTION NAME GOES HERE. Use `echo name | od -t x1' to get the hex. Each hex
+// digit is specified using the format 16#XX#
+.ascii 16#5f# 16#5f# 16#75# 16#63# 16#6d# 16#70# 16#73# 16#69# 16#32# 16#0# // Function name `__ucmpsi2'
+.unalignedInitWord _picoMark_FUNCTION_BEGIN // DW_AT_low_pc
+.unalignedInitWord _picoMark_FUNCTION_END // DW_AT_high_pc
+
+.initByte 0x0 // end of compile unit children.
+
+_picoMark_DEBUG_INFO_END=
+
+//============================================================================
+// END OF DWARF
+//============================================================================
+.section .endFile
+// End of picoChip ASM file
diff --git a/libgcc/config/picochip/udivmodhi4.S b/libgcc/config/picochip/udivmodhi4.S
new file mode 100644
index 00000000000..ac16fae39cf
--- /dev/null
+++ b/libgcc/config/picochip/udivmodhi4.S
@@ -0,0 +1,238 @@
+// picoChip ASM file
+//
+// Support for 16-bit unsigned division/modulus.
+//
+// Copyright (C) 2003, 2004, 2005, 2008, 2009 Free Software Foundation, Inc.
+// Contributed by Picochip Ltd.
+// Maintained by Daniel Towner (daniel.towner@picochip.com)
+//
+// This file is free software; you can redistribute it and/or modify it
+// under the terms of the GNU General Public License as published by the
+// Free Software Foundation; either version 3, or (at your option) any
+// later version.
+//
+// This file is distributed in the hope that it will be useful, but
+// WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// General Public License for more details.
+//
+// Under Section 7 of GPL version 3, you are granted additional
+// permissions described in the GCC Runtime Library Exception, version
+// 3.1, as published by the Free Software Foundation.
+//
+// You should have received a copy of the GNU General Public License and
+// a copy of the GCC Runtime Library Exception along with this program;
+// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+// <http://www.gnu.org/licenses/>.
+
+.section .text
+
+.global __udivmodhi4
+__udivmodhi4:
+_picoMark_FUNCTION_BEGIN=
+
+// picoChip Function Prologue : &__udivmodhi4 = 6 bytes
+
+ // 16-bit unsigned division. The divstep function is only capable of
+ // handling 15-bit division (plus a sign to give 16-bits). It is not
+ // capable of handling unsigned division directly. Instead, take
+ // advantage of the special property that
+ // ((divisor / 2) / dividend) * 2 will be almost good enough. The
+ // error in the result is only 0 or 1, and this can be easily
+ // tested and corrected. A full description of the algorithm can
+ // be found in `Hacker's Delight', by Henry Warren, page 146.
+
+ // Input:
+ // r0 - dividend
+ // r1 - divisor
+ // Output:
+ // r0 - quotient
+ // r1 - remainder
+
+ // Note that the lr, and original inputs are speculatively saved. They
+ // will only be restored if the 15-bit division function is called.
+
+ sub.0 r1,0,r15 \ stl r[0:1],(fp)-1
+ bge divisorIs15bit
+=-> sub.0 r0,r1,r2 \ stw lr,(fp)-3
+
+ // The divisor is >= 2^15.
+ bhs quotientIs1
+
+ // The dividend < divisor. The quotient is thus 0, and the
+ // remainder is the dividend.
+ copy.0 r0,r1 \ jr (lr)
+=-> copy.0 0,r0
+
+quotientIs1:
+ // The dividend >= divisor. The quotient is thus 1, and the
+ // remainder can be computed directly by subtraction (i.e., the
+ // result of the comparison already performed to branch here).
+ jr (lr) \ copy.0 r2,r1
+=-> copy.0 1,r0
+
+divisorIs15bit:
+ // The divisor is < 2^15.
+
+ // Divide the original dividend by 2, and call the 15-bit division.
+ // Note that the original dividend is stored in r5, which is
+ // known to be unused by the called function, so that
+ // a memory stall isn't introduced immediately after the
+ // function returns, to reload this value from memory.
+
+ jl (&__divmod15) \ copy.0 r0,r5 // fn_call &__divmod15
+=-> lsr.0 r0,1,r0
+
+ // Compute the new quotient and remainder by multiplying them by 2.
+ // The remainder will be 1 out, if the original dividend was odd.
+ and.0 r5,1,r5 \ ldl (fp)-1,r[2:3]
+ add.0 [lsl r1,1],r5,r1 \ lsl.1 r0,1,r0
+
+ // The error in the quotient is 0 or 1. The error can be determined
+ // by comparing the remainder to the original divisor. If the
+ // remainder is bigger, then an error of 1 has been introduced.
+ sub.0 r1,r3,r15 \ ldw (fp)-3,lr
+ blo noCompensation
+=-> nop
+ add.0 r0,1,r0 \ sub.1 r1,r3,r1
+noCompensation:
+ jr (lr)
+
+_picoMark_FUNCTION_END=
+// picoChip Function Epilogue : udivmodhi4
+
+
+//============================================================================
+// All DWARF information between this marker, and the END OF DWARF
+// marker should be included in the source file. Search for
+// FUNCTION_STACK_SIZE_GOES_HERE and FUNCTION NAME GOES HERE, and
+// provide the relevent information. Add markers called
+// _picoMark_FUNCTION_BEGIN and _picoMark_FUNCTION_END around the
+// function in question.
+//============================================================================
+
+//============================================================================
+// Frame information.
+//============================================================================
+
+.section .debug_frame
+_picoMark_DebugFrame=
+
+// Common CIE header.
+.unalignedInitLong _picoMark_CieEnd-_picoMark_CieBegin
+_picoMark_CieBegin=
+.unalignedInitLong 0xffffffff
+.initByte 0x1 // CIE Version
+.ascii 16#0# // CIE Augmentation
+.uleb128 0x1 // CIE Code Alignment Factor
+.sleb128 2 // CIE Data Alignment Factor
+.initByte 0xc // CIE RA Column
+.initByte 0xc // DW_CFA_def_cfa
+.uleb128 0xd
+.uleb128 0x0
+.align 2
+_picoMark_CieEnd=
+
+// FDE
+_picoMark_LSFDE0I900821033007563=
+.unalignedInitLong _picoMark_FdeEnd-_picoMark_FdeBegin
+_picoMark_FdeBegin=
+.unalignedInitLong _picoMark_DebugFrame // FDE CIE offset
+.unalignedInitWord _picoMark_FUNCTION_BEGIN // FDE initial location
+.unalignedInitWord _picoMark_FUNCTION_END-_picoMark_FUNCTION_BEGIN
+.initByte 0xe // DW_CFA_def_cfa_offset
+.uleb128 0x6 // <-- FUNCTION_STACK_SIZE_GOES_HERE
+.initByte 0x4 // DW_CFA_advance_loc4
+.unalignedInitLong _picoMark_FUNCTION_END-_picoMark_FUNCTION_BEGIN
+.initByte 0xe // DW_CFA_def_cfa_offset
+.uleb128 0x0
+.align 2
+_picoMark_FdeEnd=
+
+//============================================================================
+// Abbrevation information.
+//============================================================================
+
+.section .debug_abbrev
+_picoMark_ABBREVIATIONS=
+
+.section .debug_abbrev
+ .uleb128 0x1 // (abbrev code)
+ .uleb128 0x11 // (TAG: DW_TAG_compile_unit)
+ .initByte 0x1 // DW_children_yes
+ .uleb128 0x10 // (DW_AT_stmt_list)
+ .uleb128 0x6 // (DW_FORM_data4)
+ .uleb128 0x12 // (DW_AT_high_pc)
+ .uleb128 0x1 // (DW_FORM_addr)
+ .uleb128 0x11 // (DW_AT_low_pc)
+ .uleb128 0x1 // (DW_FORM_addr)
+ .uleb128 0x25 // (DW_AT_producer)
+ .uleb128 0x8 // (DW_FORM_string)
+ .uleb128 0x13 // (DW_AT_language)
+ .uleb128 0x5 // (DW_FORM_data2)
+ .uleb128 0x3 // (DW_AT_name)
+ .uleb128 0x8 // (DW_FORM_string)
+.initByte 0x0
+.initByte 0x0
+
+ .uleb128 0x2 ;# (abbrev code)
+ .uleb128 0x2e ;# (TAG: DW_TAG_subprogram)
+.initByte 0x0 ;# DW_children_no
+ .uleb128 0x3 ;# (DW_AT_name)
+ .uleb128 0x8 ;# (DW_FORM_string)
+ .uleb128 0x11 ;# (DW_AT_low_pc)
+ .uleb128 0x1 ;# (DW_FORM_addr)
+ .uleb128 0x12 ;# (DW_AT_high_pc)
+ .uleb128 0x1 ;# (DW_FORM_addr)
+.initByte 0x0
+.initByte 0x0
+
+.initByte 0x0
+
+//============================================================================
+// Line information. DwarfLib requires this to be present, but it can
+// be empty.
+//============================================================================
+
+.section .debug_line
+_picoMark_LINES=
+
+//============================================================================
+// Debug Information
+//============================================================================
+.section .debug_info
+
+//Fixed header.
+.unalignedInitLong _picoMark_DEBUG_INFO_END-_picoMark_DEBUG_INFO_BEGIN
+_picoMark_DEBUG_INFO_BEGIN=
+.unalignedInitWord 0x2
+.unalignedInitLong _picoMark_ABBREVIATIONS
+.initByte 0x2
+
+// Compile unit information.
+.uleb128 0x1 // (DIE 0xb) DW_TAG_compile_unit)
+.unalignedInitLong _picoMark_LINES
+.unalignedInitWord _picoMark_FUNCTION_END
+.unalignedInitWord _picoMark_FUNCTION_BEGIN
+// Producer is `picoChip'
+.ascii 16#70# 16#69# 16#63# 16#6f# 16#43# 16#68# 16#69# 16#70# 16#00#
+.unalignedInitWord 0xcafe // ASM language
+.ascii 16#0# // Name. DwarfLib expects this to be present.
+
+.uleb128 0x2 ;# (DIE DW_TAG_subprogram)
+
+// FUNCTION NAME GOES HERE. Use `echo name | od -t x1' to get the hex. Each hex
+// digit is specified using the format 16#XX#
+.ascii 16#5f# 16#75# 16#64# 16#69# 16#76# 16#6d# 16#6f# 16#64# 16#68# 16#69# 16#34# 16#0# // Function name `_udivmodhi4'
+.unalignedInitWord _picoMark_FUNCTION_BEGIN // DW_AT_low_pc
+.unalignedInitWord _picoMark_FUNCTION_END // DW_AT_high_pc
+
+.initByte 0x0 // end of compile unit children.
+
+_picoMark_DEBUG_INFO_END=
+
+//============================================================================
+// END OF DWARF
+//============================================================================
+.section .endFile
+// End of picoChip ASM file
diff --git a/libgcc/config/picochip/udivmodsi4.S b/libgcc/config/picochip/udivmodsi4.S
new file mode 100644
index 00000000000..92c2a4983ce
--- /dev/null
+++ b/libgcc/config/picochip/udivmodsi4.S
@@ -0,0 +1,318 @@
+// picoChip ASM file
+//
+// Support for 32-bit unsigned division/modulus.
+//
+// Copyright (C) 2003, 2004, 2005, 2008, 2009 Free Software Foundation, Inc.
+// Contributed by Picochip Ltd.
+// Maintained by Daniel Towner (daniel.towner@picochip.com)
+//
+// This file is free software; you can redistribute it and/or modify it
+// under the terms of the GNU General Public License as published by the
+// Free Software Foundation; either version 3, or (at your option) any
+// later version.
+//
+// This file is distributed in the hope that it will be useful, but
+// WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// General Public License for more details.
+//
+// Under Section 7 of GPL version 3, you are granted additional
+// permissions described in the GCC Runtime Library Exception, version
+// 3.1, as published by the Free Software Foundation.
+//
+// You should have received a copy of the GNU General Public License and
+// a copy of the GCC Runtime Library Exception along with this program;
+// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+// <http://www.gnu.org/licenses/>.
+
+.section .text
+
+.align 8
+.global __udivmodsi4
+__udivmodsi4:
+_picoMark_FUNCTION_BEGIN=
+// picoChip Function Prologue : &__udivmodsi4 = 24 bytes
+
+ // Schedule the register saves alongside the special cases, so that
+ // if the special cases fail, the registers will have already
+ // been stored onto the stack.
+ SUB.0 R3,R1,r15 \ STL R[13:12],(FP)-1
+ BHS skipCommonCase \ STL R[9:8],(FP)-4
+=-> SUB.0 R2,1,r15 \ STL R[11:10],(FP)-3
+
+_L2:
+ // Flags set above, and in _L2 caller.
+ BNE restOfCode
+=-> SUB.0 R3,0,r15
+ BNE restOfCode
+=-> COPY.0 R0,R4 \ COPY.1 R1,R5
+ JR (R12) // Return to caller
+=-> COPY.0 0,R6 \ COPY.1 0,R7
+ // Never reach here
+
+skipCommonCase:
+ SUB.0 R3,R1,r15
+ BNE _L3 // (Reversed branch)
+=-> SUB.0 R2,R0,r15 // Must be set in delay slot, so ready by _L9
+
+_L9:
+ BLO _L2 // (Reversed branch)
+=-> SUB.0 R2,1,r15
+
+_L3:
+ SUB.0 R2,R0,r15
+ BEQ _L10 // (Reversed branch)
+=-> SUB.0 R1,R3,r15 // Set flags for branch at _L10
+
+_L4:
+ // greater than
+ COPY.0 0,R4 \ COPY.1 0,R5 \ JR (R12) // Return to caller
+=-> COPY.0 R0,R6 \ COPY.1 R1,R7
+ // Doesn't reach here.
+
+_L10:
+ // Flags set in _L10 call delay slot.
+ BNE _L4
+=-> COPY.0 1,R4 \ COPY.1 0,R5
+ JR (R12) // Return to caller
+=-> COPY.0 0,R6 \ COPY.1 0,R7
+
+restOfCode:
+
+// Prologue
+
+ // Register saves scheduled alongside special cases above.
+ ADD.0 FP,-20,FP \ STW R14,(FP)-4
+
+ // The following can be scheduled together.
+ // dividend in R[9:8] (from R[1:0])
+ // divisor in R[7:6] (from R[3:2])
+ // R14 := clzsi2 (dividend)
+ // R0 := clzsi2 (divisor)
+ JL (&__clzsi2) \ COPY.0 R0,R8 \ COPY.1 R1,R9
+=-> COPY.0 R2,R6 \ COPY.1 R3,R7
+ COPY.0 R0,R14 \ JL (&__clzsi2)
+=-> COPY.0 R6,R0 \ COPY.1 R7,R1
+
+ // R14 := R0 - R14
+ SUB.0 R0,R14,R14
+
+ ADD.0 R14,1,R0 // R0 := R14 + 1 (HI)
+
+ // R[11:10] = R[7,6] << R14
+ SUB.0 15,R14,r15
+ LSL.0 R6,R14,R11 \ BLT setupDivstepLoop
+=-> SUB.0 0,R14,R4 \ COPY.1 0,R10
+
+ // Zero shift is a special case. Shifting by zero within a 16-bit
+ // source object is fine, but don't execute the OR of the right-shift
+ // into the final result.
+ LSL.0 R7,R14,R11 \ BEQ setupDivstepLoop
+=-> LSL.0 R6,R14,R10
+
+ LSR.0 R6,R4,R4
+ OR.0 R11,R4,R11
+
+setupDivstepLoop:
+
+ // R[5:4] := R[9:8] (SI)
+ COPY.0 R8,R4 \ COPY.1 R9,R5
+ COPY.0 0,R6 \ COPY.1 R0,R8
+
+ // Store original value of loopCount for use after the loop.
+ // The Subtraction is handled in the tail of the loop iteration
+ // after this point.
+ SUB.0 R4,R10,R0 \ COPY.1 R8,R14
+
+ // workingResult in R4,5,6
+ // temps in r0,1,2 and r7
+ // alignedDivisor in R10,11
+ // loopCount in r8
+ // r3, r9 scratch, used for renaming.
+
+loopStart:
+ // R0 := R4 - zeroExtend (R10) - only need 33-bits (i.e., 48-bits)
+ SUBB.0 R5,R11,R1 \ LSR.1 R0,15,R3
+ SUBB.0 R6,0,R2 \ LSR.1 R1,15,R6
+
+ // if (carry) goto shiftOnly
+ SUB.0 R8,1,R8 \ BNE shiftOnly
+=-> LSR.0 R4,15,R7 \ LSL.1 R1,1,R9
+
+ OR.0 [LSL R0,1],1,R4 \ BNE loopStart
+=-> SUB.0 R4,R10,R0 \ OR.1 R9,R3,R5
+
+ BRA loopEnd
+
+shiftOnly:
+
+ OR.0 [LSL R5,1],R7,R5 \ BNE loopStart \ LSR.1 R5,15,R6
+=-> SUB.0 [LSL R4,1],R10,R0 \LSL.1 R4,1,R4
+
+// End of loop
+loopEnd:
+
+ // Schedule the computation of the upper word after shifting
+ // alongside the decision over whether to branch, and the register
+ // restores.
+ // R10 is filled with a useful constant.
+ SUB.0 15,r14,r15 \ LDL (FP)4,R[13:12]
+ SUB.1 0,R14,R1 // Don't set flags!
+ LSL.0 R6,R1,R3 \ LDL (FP)-4,R[9:8]
+
+ BLT remainderHasMoreThan16Bits \ LSR.0 R5,R14,R7 \ COPY.1 -1,R10
+=-> LSL.0 R5,R1,R2 \ OR.1 R7,R3,R3
+
+ LSR.0 R4,R14,R3 \ COPY.1 R3,R7
+ BRA epilogue \ LSR.0 -1,R1,R0 \ COPY.1 0,R5
+=-> OR.0 R3,R2,R6 \ AND.1 R0,R4,R4
+
+remainderHasMoreThan16Bits:
+
+ LSL.0 R10,R14,R1 \ COPY.1 R3,R6
+ XOR.0 R10,R1,R1 \ COPY.1 0,R7
+ AND.0 R1,R5,R5
+
+epilogue:
+
+ JR (R12) \ LDW (FP)-4,R14
+=-> LDL (FP)-3,R[11:10]
+
+_picoMark_FUNCTION_END=
+
+// picoChip Function Epilogue : udivmodsi4
+
+//============================================================================
+// All DWARF information between this marker, and the END OF DWARF
+// marker should be included in the source file. Search for
+// FUNCTION_STACK_SIZE_GOES_HERE and FUNCTION NAME GOES HERE, and
+// provide the relevent information. Add markers called
+// _picoMark_FUNCTION_BEGIN and _picoMark_FUNCTION_END around the
+// function in question.
+//============================================================================
+
+//============================================================================
+// Frame information.
+//============================================================================
+
+.section .debug_frame
+_picoMark_DebugFrame=
+
+// Common CIE header.
+.unalignedInitLong _picoMark_CieEnd-_picoMark_CieBegin
+_picoMark_CieBegin=
+.unalignedInitLong 0xffffffff
+.initByte 0x1 // CIE Version
+.ascii 16#0# // CIE Augmentation
+.uleb128 0x1 // CIE Code Alignment Factor
+.sleb128 2 // CIE Data Alignment Factor
+.initByte 0xc // CIE RA Column
+.initByte 0xc // DW_CFA_def_cfa
+.uleb128 0xd
+.uleb128 0x0
+.align 2
+_picoMark_CieEnd=
+
+// FDE
+_picoMark_LSFDE0I900821033007563=
+.unalignedInitLong _picoMark_FdeEnd-_picoMark_FdeBegin
+_picoMark_FdeBegin=
+.unalignedInitLong _picoMark_DebugFrame // FDE CIE offset
+.unalignedInitWord _picoMark_FUNCTION_BEGIN // FDE initial location
+.unalignedInitWord _picoMark_FUNCTION_END-_picoMark_FUNCTION_BEGIN
+.initByte 0xe // DW_CFA_def_cfa_offset
+.uleb128 0x18 // <-- FUNCTION_STACK_SIZE_GOES_HERE
+.initByte 0x4 // DW_CFA_advance_loc4
+.unalignedInitLong _picoMark_FUNCTION_END-_picoMark_FUNCTION_BEGIN
+.initByte 0xe // DW_CFA_def_cfa_offset
+.uleb128 0x0
+.align 2
+_picoMark_FdeEnd=
+
+//============================================================================
+// Abbrevation information.
+//============================================================================
+
+.section .debug_abbrev
+_picoMark_ABBREVIATIONS=
+
+.section .debug_abbrev
+ .uleb128 0x1 // (abbrev code)
+ .uleb128 0x11 // (TAG: DW_TAG_compile_unit)
+ .initByte 0x1 // DW_children_yes
+ .uleb128 0x10 // (DW_AT_stmt_list)
+ .uleb128 0x6 // (DW_FORM_data4)
+ .uleb128 0x12 // (DW_AT_high_pc)
+ .uleb128 0x1 // (DW_FORM_addr)
+ .uleb128 0x11 // (DW_AT_low_pc)
+ .uleb128 0x1 // (DW_FORM_addr)
+ .uleb128 0x25 // (DW_AT_producer)
+ .uleb128 0x8 // (DW_FORM_string)
+ .uleb128 0x13 // (DW_AT_language)
+ .uleb128 0x5 // (DW_FORM_data2)
+ .uleb128 0x3 // (DW_AT_name)
+ .uleb128 0x8 // (DW_FORM_string)
+.initByte 0x0
+.initByte 0x0
+
+ .uleb128 0x2 ;# (abbrev code)
+ .uleb128 0x2e ;# (TAG: DW_TAG_subprogram)
+.initByte 0x0 ;# DW_children_no
+ .uleb128 0x3 ;# (DW_AT_name)
+ .uleb128 0x8 ;# (DW_FORM_string)
+ .uleb128 0x11 ;# (DW_AT_low_pc)
+ .uleb128 0x1 ;# (DW_FORM_addr)
+ .uleb128 0x12 ;# (DW_AT_high_pc)
+ .uleb128 0x1 ;# (DW_FORM_addr)
+.initByte 0x0
+.initByte 0x0
+
+.initByte 0x0
+
+//============================================================================
+// Line information. DwarfLib requires this to be present, but it can
+// be empty.
+//============================================================================
+
+.section .debug_line
+_picoMark_LINES=
+
+//============================================================================
+// Debug Information
+//============================================================================
+.section .debug_info
+
+//Fixed header.
+.unalignedInitLong _picoMark_DEBUG_INFO_END-_picoMark_DEBUG_INFO_BEGIN
+_picoMark_DEBUG_INFO_BEGIN=
+.unalignedInitWord 0x2
+.unalignedInitLong _picoMark_ABBREVIATIONS
+.initByte 0x2
+
+// Compile unit information.
+.uleb128 0x1 // (DIE 0xb) DW_TAG_compile_unit)
+.unalignedInitLong _picoMark_LINES
+.unalignedInitWord _picoMark_FUNCTION_END
+.unalignedInitWord _picoMark_FUNCTION_BEGIN
+// Producer is `picoChip'
+.ascii 16#70# 16#69# 16#63# 16#6f# 16#43# 16#68# 16#69# 16#70# 16#00#
+.unalignedInitWord 0xcafe // ASM language
+.ascii 16#0# // Name. DwarfLib expects this to be present.
+
+.uleb128 0x2 ;# (DIE DW_TAG_subprogram)
+
+// FUNCTION NAME GOES HERE. Use `echo name | od -t x1' to get the hex. Each hex
+// digit is specified using the format 16#XX#
+.ascii 16#5f# 16#75# 16#64# 16#69# 16#76# 16#6d# 16#6f# 16#64# 16#73# 16#69# 16#34# 16#0# // Function name `_udivmodsi4'
+.unalignedInitWord _picoMark_FUNCTION_BEGIN // DW_AT_low_pc
+.unalignedInitWord _picoMark_FUNCTION_END // DW_AT_high_pc
+
+.initByte 0x0 // end of compile unit children.
+
+_picoMark_DEBUG_INFO_END=
+
+//============================================================================
+// END OF DWARF
+//============================================================================
+.section .endFile
+// End of picoChip ASM file
diff --git a/libgcc/config/rs6000/crtresfpr.S b/libgcc/config/rs6000/crtresfpr.S
new file mode 100644
index 00000000000..9fb228cf458
--- /dev/null
+++ b/libgcc/config/rs6000/crtresfpr.S
@@ -0,0 +1,81 @@
+/*
+ * Special support for eabi and SVR4
+ *
+ * Copyright (C) 1995, 1996, 1998, 2000, 2001, 2008, 2009
+ * Free Software Foundation, Inc.
+ * Written By Michael Meissner
+ * 64-bit support written by David Edelsohn
+ *
+ * This file is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 3, or (at your option) any
+ * later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Under Section 7 of GPL version 3, you are granted additional
+ * permissions described in the GCC Runtime Library Exception, version
+ * 3.1, as published by the Free Software Foundation.
+ *
+ * You should have received a copy of the GNU General Public License and
+ * a copy of the GCC Runtime Library Exception along with this program;
+ * see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ * <http://www.gnu.org/licenses/>.
+ */
+
+/* Do any initializations needed for the eabi environment */
+
+ .section ".text"
+ #include "ppc-asm.h"
+
+/* On PowerPC64 Linux, these functions are provided by the linker. */
+#ifndef __powerpc64__
+
+/* Routines for restoring floating point registers, called by the compiler. */
+/* Called with r11 pointing to the stack header word of the caller of the */
+/* function, just beyond the end of the floating point save area. */
+
+CFI_STARTPROC
+HIDDEN_FUNC(_restfpr_14) lfd 14,-144(11) /* restore fp registers */
+HIDDEN_FUNC(_restfpr_15) lfd 15,-136(11)
+HIDDEN_FUNC(_restfpr_16) lfd 16,-128(11)
+HIDDEN_FUNC(_restfpr_17) lfd 17,-120(11)
+HIDDEN_FUNC(_restfpr_18) lfd 18,-112(11)
+HIDDEN_FUNC(_restfpr_19) lfd 19,-104(11)
+HIDDEN_FUNC(_restfpr_20) lfd 20,-96(11)
+HIDDEN_FUNC(_restfpr_21) lfd 21,-88(11)
+HIDDEN_FUNC(_restfpr_22) lfd 22,-80(11)
+HIDDEN_FUNC(_restfpr_23) lfd 23,-72(11)
+HIDDEN_FUNC(_restfpr_24) lfd 24,-64(11)
+HIDDEN_FUNC(_restfpr_25) lfd 25,-56(11)
+HIDDEN_FUNC(_restfpr_26) lfd 26,-48(11)
+HIDDEN_FUNC(_restfpr_27) lfd 27,-40(11)
+HIDDEN_FUNC(_restfpr_28) lfd 28,-32(11)
+HIDDEN_FUNC(_restfpr_29) lfd 29,-24(11)
+HIDDEN_FUNC(_restfpr_30) lfd 30,-16(11)
+HIDDEN_FUNC(_restfpr_31) lfd 31,-8(11)
+ blr
+FUNC_END(_restfpr_31)
+FUNC_END(_restfpr_30)
+FUNC_END(_restfpr_29)
+FUNC_END(_restfpr_28)
+FUNC_END(_restfpr_27)
+FUNC_END(_restfpr_26)
+FUNC_END(_restfpr_25)
+FUNC_END(_restfpr_24)
+FUNC_END(_restfpr_23)
+FUNC_END(_restfpr_22)
+FUNC_END(_restfpr_21)
+FUNC_END(_restfpr_20)
+FUNC_END(_restfpr_19)
+FUNC_END(_restfpr_18)
+FUNC_END(_restfpr_17)
+FUNC_END(_restfpr_16)
+FUNC_END(_restfpr_15)
+FUNC_END(_restfpr_14)
+CFI_ENDPROC
+
+#endif
diff --git a/libgcc/config/rs6000/crtresgpr.S b/libgcc/config/rs6000/crtresgpr.S
new file mode 100644
index 00000000000..9f9cec9f9ca
--- /dev/null
+++ b/libgcc/config/rs6000/crtresgpr.S
@@ -0,0 +1,81 @@
+/*
+ * Special support for eabi and SVR4
+ *
+ * Copyright (C) 1995, 1996, 1998, 2000, 2001, 2008, 2009
+ * Free Software Foundation, Inc.
+ * Written By Michael Meissner
+ * 64-bit support written by David Edelsohn
+ *
+ * This file is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 3, or (at your option) any
+ * later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Under Section 7 of GPL version 3, you are granted additional
+ * permissions described in the GCC Runtime Library Exception, version
+ * 3.1, as published by the Free Software Foundation.
+ *
+ * You should have received a copy of the GNU General Public License and
+ * a copy of the GCC Runtime Library Exception along with this program;
+ * see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ * <http://www.gnu.org/licenses/>.
+ */
+
+/* Do any initializations needed for the eabi environment */
+
+ .section ".text"
+ #include "ppc-asm.h"
+
+/* On PowerPC64 Linux, these functions are provided by the linker. */
+#ifndef __powerpc64__
+
+/* Routines for restoring integer registers, called by the compiler. */
+/* Called with r11 pointing to the stack header word of the caller of the */
+/* function, just beyond the end of the integer restore area. */
+
+CFI_STARTPROC
+HIDDEN_FUNC(_restgpr_14) lwz 14,-72(11) /* restore gp registers */
+HIDDEN_FUNC(_restgpr_15) lwz 15,-68(11)
+HIDDEN_FUNC(_restgpr_16) lwz 16,-64(11)
+HIDDEN_FUNC(_restgpr_17) lwz 17,-60(11)
+HIDDEN_FUNC(_restgpr_18) lwz 18,-56(11)
+HIDDEN_FUNC(_restgpr_19) lwz 19,-52(11)
+HIDDEN_FUNC(_restgpr_20) lwz 20,-48(11)
+HIDDEN_FUNC(_restgpr_21) lwz 21,-44(11)
+HIDDEN_FUNC(_restgpr_22) lwz 22,-40(11)
+HIDDEN_FUNC(_restgpr_23) lwz 23,-36(11)
+HIDDEN_FUNC(_restgpr_24) lwz 24,-32(11)
+HIDDEN_FUNC(_restgpr_25) lwz 25,-28(11)
+HIDDEN_FUNC(_restgpr_26) lwz 26,-24(11)
+HIDDEN_FUNC(_restgpr_27) lwz 27,-20(11)
+HIDDEN_FUNC(_restgpr_28) lwz 28,-16(11)
+HIDDEN_FUNC(_restgpr_29) lwz 29,-12(11)
+HIDDEN_FUNC(_restgpr_30) lwz 30,-8(11)
+HIDDEN_FUNC(_restgpr_31) lwz 31,-4(11)
+ blr
+FUNC_END(_restgpr_31)
+FUNC_END(_restgpr_30)
+FUNC_END(_restgpr_29)
+FUNC_END(_restgpr_28)
+FUNC_END(_restgpr_27)
+FUNC_END(_restgpr_26)
+FUNC_END(_restgpr_25)
+FUNC_END(_restgpr_24)
+FUNC_END(_restgpr_23)
+FUNC_END(_restgpr_22)
+FUNC_END(_restgpr_21)
+FUNC_END(_restgpr_20)
+FUNC_END(_restgpr_19)
+FUNC_END(_restgpr_18)
+FUNC_END(_restgpr_17)
+FUNC_END(_restgpr_16)
+FUNC_END(_restgpr_15)
+FUNC_END(_restgpr_14)
+CFI_ENDPROC
+
+#endif
diff --git a/libgcc/config/rs6000/crtresxfpr.S b/libgcc/config/rs6000/crtresxfpr.S
new file mode 100644
index 00000000000..633f2db61f0
--- /dev/null
+++ b/libgcc/config/rs6000/crtresxfpr.S
@@ -0,0 +1,126 @@
+/*
+ * Special support for eabi and SVR4
+ *
+ * Copyright (C) 1995, 1996, 1998, 2000, 2001, 2008, 2009
+ * Free Software Foundation, Inc.
+ * Written By Michael Meissner
+ * 64-bit support written by David Edelsohn
+ *
+ * This file is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 3, or (at your option) any
+ * later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Under Section 7 of GPL version 3, you are granted additional
+ * permissions described in the GCC Runtime Library Exception, version
+ * 3.1, as published by the Free Software Foundation.
+ *
+ * You should have received a copy of the GNU General Public License and
+ * a copy of the GCC Runtime Library Exception along with this program;
+ * see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ * <http://www.gnu.org/licenses/>.
+ */
+
+/* Do any initializations needed for the eabi environment */
+
+ .section ".text"
+ #include "ppc-asm.h"
+
+/* On PowerPC64 Linux, these functions are provided by the linker. */
+#ifndef __powerpc64__
+
+/* Routines for restoring floating point registers, called by the compiler. */
+/* Called with r11 pointing to the stack header word of the caller of the */
+/* function, just beyond the end of the floating point save area. */
+/* In addition to restoring the fp registers, it will return to the caller's */
+/* caller */
+
+CFI_STARTPROC
+CFI_DEF_CFA_REGISTER (11)
+CFI_OFFSET (65, 4)
+CFI_OFFSET (46, -144)
+CFI_OFFSET (47, -136)
+CFI_OFFSET (48, -128)
+CFI_OFFSET (49, -120)
+CFI_OFFSET (50, -112)
+CFI_OFFSET (51, -104)
+CFI_OFFSET (52, -96)
+CFI_OFFSET (53, -88)
+CFI_OFFSET (54, -80)
+CFI_OFFSET (55, -72)
+CFI_OFFSET (56, -64)
+CFI_OFFSET (57, -56)
+CFI_OFFSET (58, -48)
+CFI_OFFSET (59, -40)
+CFI_OFFSET (60, -32)
+CFI_OFFSET (61, -24)
+CFI_OFFSET (62, -16)
+CFI_OFFSET (63, -8)
+HIDDEN_FUNC(_restfpr_14_x) lfd 14,-144(11) /* restore fp registers */
+CFI_RESTORE (46)
+HIDDEN_FUNC(_restfpr_15_x) lfd 15,-136(11)
+CFI_RESTORE (47)
+HIDDEN_FUNC(_restfpr_16_x) lfd 16,-128(11)
+CFI_RESTORE (48)
+HIDDEN_FUNC(_restfpr_17_x) lfd 17,-120(11)
+CFI_RESTORE (49)
+HIDDEN_FUNC(_restfpr_18_x) lfd 18,-112(11)
+CFI_RESTORE (50)
+HIDDEN_FUNC(_restfpr_19_x) lfd 19,-104(11)
+CFI_RESTORE (51)
+HIDDEN_FUNC(_restfpr_20_x) lfd 20,-96(11)
+CFI_RESTORE (52)
+HIDDEN_FUNC(_restfpr_21_x) lfd 21,-88(11)
+CFI_RESTORE (53)
+HIDDEN_FUNC(_restfpr_22_x) lfd 22,-80(11)
+CFI_RESTORE (54)
+HIDDEN_FUNC(_restfpr_23_x) lfd 23,-72(11)
+CFI_RESTORE (55)
+HIDDEN_FUNC(_restfpr_24_x) lfd 24,-64(11)
+CFI_RESTORE (56)
+HIDDEN_FUNC(_restfpr_25_x) lfd 25,-56(11)
+CFI_RESTORE (57)
+HIDDEN_FUNC(_restfpr_26_x) lfd 26,-48(11)
+CFI_RESTORE (58)
+HIDDEN_FUNC(_restfpr_27_x) lfd 27,-40(11)
+CFI_RESTORE (59)
+HIDDEN_FUNC(_restfpr_28_x) lfd 28,-32(11)
+CFI_RESTORE (60)
+HIDDEN_FUNC(_restfpr_29_x) lfd 29,-24(11)
+CFI_RESTORE (61)
+HIDDEN_FUNC(_restfpr_30_x) lfd 30,-16(11)
+CFI_RESTORE (62)
+HIDDEN_FUNC(_restfpr_31_x) lwz 0,4(11)
+ lfd 31,-8(11)
+CFI_RESTORE (63)
+ mtlr 0
+CFI_RESTORE (65)
+ mr 1,11
+CFI_DEF_CFA_REGISTER (1)
+ blr
+FUNC_END(_restfpr_31_x)
+FUNC_END(_restfpr_30_x)
+FUNC_END(_restfpr_29_x)
+FUNC_END(_restfpr_28_x)
+FUNC_END(_restfpr_27_x)
+FUNC_END(_restfpr_26_x)
+FUNC_END(_restfpr_25_x)
+FUNC_END(_restfpr_24_x)
+FUNC_END(_restfpr_23_x)
+FUNC_END(_restfpr_22_x)
+FUNC_END(_restfpr_21_x)
+FUNC_END(_restfpr_20_x)
+FUNC_END(_restfpr_19_x)
+FUNC_END(_restfpr_18_x)
+FUNC_END(_restfpr_17_x)
+FUNC_END(_restfpr_16_x)
+FUNC_END(_restfpr_15_x)
+FUNC_END(_restfpr_14_x)
+CFI_ENDPROC
+
+#endif
diff --git a/libgcc/config/rs6000/crtresxgpr.S b/libgcc/config/rs6000/crtresxgpr.S
new file mode 100644
index 00000000000..451b2b69d1e
--- /dev/null
+++ b/libgcc/config/rs6000/crtresxgpr.S
@@ -0,0 +1,124 @@
+/*
+ * Special support for eabi and SVR4
+ *
+ * Copyright (C) 1995, 1996, 1998, 2000, 2001, 2008, 2009
+ * Free Software Foundation, Inc.
+ * Written By Michael Meissner
+ * 64-bit support written by David Edelsohn
+ *
+ * This file is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 3, or (at your option) any
+ * later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Under Section 7 of GPL version 3, you are granted additional
+ * permissions described in the GCC Runtime Library Exception, version
+ * 3.1, as published by the Free Software Foundation.
+ *
+ * You should have received a copy of the GNU General Public License and
+ * a copy of the GCC Runtime Library Exception along with this program;
+ * see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ * <http://www.gnu.org/licenses/>.
+ */
+
+/* Do any initializations needed for the eabi environment */
+
+ .section ".text"
+ #include "ppc-asm.h"
+
+/* On PowerPC64 Linux, these functions are provided by the linker. */
+#ifndef __powerpc64__
+
+/* Routines for restoring integer registers, called by the compiler. */
+/* Called with r11 pointing to the stack header word of the caller of the */
+/* function, just beyond the end of the integer restore area. */
+
+CFI_STARTPROC
+CFI_DEF_CFA_REGISTER (11)
+CFI_OFFSET (65, 4)
+CFI_OFFSET (14, -72)
+CFI_OFFSET (15, -68)
+CFI_OFFSET (16, -64)
+CFI_OFFSET (17, -60)
+CFI_OFFSET (18, -56)
+CFI_OFFSET (19, -52)
+CFI_OFFSET (20, -48)
+CFI_OFFSET (21, -44)
+CFI_OFFSET (22, -40)
+CFI_OFFSET (23, -36)
+CFI_OFFSET (24, -32)
+CFI_OFFSET (25, -28)
+CFI_OFFSET (26, -24)
+CFI_OFFSET (27, -20)
+CFI_OFFSET (28, -16)
+CFI_OFFSET (29, -12)
+CFI_OFFSET (30, -8)
+CFI_OFFSET (31, -4)
+HIDDEN_FUNC(_restgpr_14_x) lwz 14,-72(11) /* restore gp registers */
+CFI_RESTORE (14)
+HIDDEN_FUNC(_restgpr_15_x) lwz 15,-68(11)
+CFI_RESTORE (15)
+HIDDEN_FUNC(_restgpr_16_x) lwz 16,-64(11)
+CFI_RESTORE (16)
+HIDDEN_FUNC(_restgpr_17_x) lwz 17,-60(11)
+CFI_RESTORE (17)
+HIDDEN_FUNC(_restgpr_18_x) lwz 18,-56(11)
+CFI_RESTORE (18)
+HIDDEN_FUNC(_restgpr_19_x) lwz 19,-52(11)
+CFI_RESTORE (19)
+HIDDEN_FUNC(_restgpr_20_x) lwz 20,-48(11)
+CFI_RESTORE (20)
+HIDDEN_FUNC(_restgpr_21_x) lwz 21,-44(11)
+CFI_RESTORE (21)
+HIDDEN_FUNC(_restgpr_22_x) lwz 22,-40(11)
+CFI_RESTORE (22)
+HIDDEN_FUNC(_restgpr_23_x) lwz 23,-36(11)
+CFI_RESTORE (23)
+HIDDEN_FUNC(_restgpr_24_x) lwz 24,-32(11)
+CFI_RESTORE (24)
+HIDDEN_FUNC(_restgpr_25_x) lwz 25,-28(11)
+CFI_RESTORE (25)
+HIDDEN_FUNC(_restgpr_26_x) lwz 26,-24(11)
+CFI_RESTORE (26)
+HIDDEN_FUNC(_restgpr_27_x) lwz 27,-20(11)
+CFI_RESTORE (27)
+HIDDEN_FUNC(_restgpr_28_x) lwz 28,-16(11)
+CFI_RESTORE (28)
+HIDDEN_FUNC(_restgpr_29_x) lwz 29,-12(11)
+CFI_RESTORE (29)
+HIDDEN_FUNC(_restgpr_30_x) lwz 30,-8(11)
+CFI_RESTORE (30)
+HIDDEN_FUNC(_restgpr_31_x) lwz 0,4(11)
+ lwz 31,-4(11)
+CFI_RESTORE (31)
+ mtlr 0
+CFI_RESTORE (65)
+ mr 1,11
+CFI_DEF_CFA_REGISTER (1)
+ blr
+FUNC_END(_restgpr_31_x)
+FUNC_END(_restgpr_30_x)
+FUNC_END(_restgpr_29_x)
+FUNC_END(_restgpr_28_x)
+FUNC_END(_restgpr_27_x)
+FUNC_END(_restgpr_26_x)
+FUNC_END(_restgpr_25_x)
+FUNC_END(_restgpr_24_x)
+FUNC_END(_restgpr_23_x)
+FUNC_END(_restgpr_22_x)
+FUNC_END(_restgpr_21_x)
+FUNC_END(_restgpr_20_x)
+FUNC_END(_restgpr_19_x)
+FUNC_END(_restgpr_18_x)
+FUNC_END(_restgpr_17_x)
+FUNC_END(_restgpr_16_x)
+FUNC_END(_restgpr_15_x)
+FUNC_END(_restgpr_14_x)
+CFI_ENDPROC
+
+#endif
diff --git a/libgcc/config/rs6000/crtsavfpr.S b/libgcc/config/rs6000/crtsavfpr.S
new file mode 100644
index 00000000000..3cdb25033ca
--- /dev/null
+++ b/libgcc/config/rs6000/crtsavfpr.S
@@ -0,0 +1,81 @@
+/*
+ * Special support for eabi and SVR4
+ *
+ * Copyright (C) 1995, 1996, 1998, 2000, 2001, 2008, 2009
+ * Free Software Foundation, Inc.
+ * Written By Michael Meissner
+ * 64-bit support written by David Edelsohn
+ *
+ * This file is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 3, or (at your option) any
+ * later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Under Section 7 of GPL version 3, you are granted additional
+ * permissions described in the GCC Runtime Library Exception, version
+ * 3.1, as published by the Free Software Foundation.
+ *
+ * You should have received a copy of the GNU General Public License and
+ * a copy of the GCC Runtime Library Exception along with this program;
+ * see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ * <http://www.gnu.org/licenses/>.
+ */
+
+/* Do any initializations needed for the eabi environment */
+
+ .section ".text"
+ #include "ppc-asm.h"
+
+/* On PowerPC64 Linux, these functions are provided by the linker. */
+#ifndef __powerpc64__
+
+/* Routines for saving floating point registers, called by the compiler. */
+/* Called with r11 pointing to the stack header word of the caller of the */
+/* function, just beyond the end of the floating point save area. */
+
+CFI_STARTPROC
+HIDDEN_FUNC(_savefpr_14) stfd 14,-144(11) /* save fp registers */
+HIDDEN_FUNC(_savefpr_15) stfd 15,-136(11)
+HIDDEN_FUNC(_savefpr_16) stfd 16,-128(11)
+HIDDEN_FUNC(_savefpr_17) stfd 17,-120(11)
+HIDDEN_FUNC(_savefpr_18) stfd 18,-112(11)
+HIDDEN_FUNC(_savefpr_19) stfd 19,-104(11)
+HIDDEN_FUNC(_savefpr_20) stfd 20,-96(11)
+HIDDEN_FUNC(_savefpr_21) stfd 21,-88(11)
+HIDDEN_FUNC(_savefpr_22) stfd 22,-80(11)
+HIDDEN_FUNC(_savefpr_23) stfd 23,-72(11)
+HIDDEN_FUNC(_savefpr_24) stfd 24,-64(11)
+HIDDEN_FUNC(_savefpr_25) stfd 25,-56(11)
+HIDDEN_FUNC(_savefpr_26) stfd 26,-48(11)
+HIDDEN_FUNC(_savefpr_27) stfd 27,-40(11)
+HIDDEN_FUNC(_savefpr_28) stfd 28,-32(11)
+HIDDEN_FUNC(_savefpr_29) stfd 29,-24(11)
+HIDDEN_FUNC(_savefpr_30) stfd 30,-16(11)
+HIDDEN_FUNC(_savefpr_31) stfd 31,-8(11)
+ blr
+FUNC_END(_savefpr_31)
+FUNC_END(_savefpr_30)
+FUNC_END(_savefpr_29)
+FUNC_END(_savefpr_28)
+FUNC_END(_savefpr_27)
+FUNC_END(_savefpr_26)
+FUNC_END(_savefpr_25)
+FUNC_END(_savefpr_24)
+FUNC_END(_savefpr_23)
+FUNC_END(_savefpr_22)
+FUNC_END(_savefpr_21)
+FUNC_END(_savefpr_20)
+FUNC_END(_savefpr_19)
+FUNC_END(_savefpr_18)
+FUNC_END(_savefpr_17)
+FUNC_END(_savefpr_16)
+FUNC_END(_savefpr_15)
+FUNC_END(_savefpr_14)
+CFI_ENDPROC
+
+#endif
diff --git a/libgcc/config/rs6000/crtsavgpr.S b/libgcc/config/rs6000/crtsavgpr.S
new file mode 100644
index 00000000000..6d473963bad
--- /dev/null
+++ b/libgcc/config/rs6000/crtsavgpr.S
@@ -0,0 +1,81 @@
+/*
+ * Special support for eabi and SVR4
+ *
+ * Copyright (C) 1995, 1996, 1998, 2000, 2001, 2008, 2009
+ * Free Software Foundation, Inc.
+ * Written By Michael Meissner
+ * 64-bit support written by David Edelsohn
+ *
+ * This file is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 3, or (at your option) any
+ * later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Under Section 7 of GPL version 3, you are granted additional
+ * permissions described in the GCC Runtime Library Exception, version
+ * 3.1, as published by the Free Software Foundation.
+ *
+ * You should have received a copy of the GNU General Public License and
+ * a copy of the GCC Runtime Library Exception along with this program;
+ * see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ * <http://www.gnu.org/licenses/>.
+ */
+
+/* Do any initializations needed for the eabi environment */
+
+ .section ".text"
+ #include "ppc-asm.h"
+
+/* On PowerPC64 Linux, these functions are provided by the linker. */
+#ifndef __powerpc64__
+
+/* Routines for saving integer registers, called by the compiler. */
+/* Called with r11 pointing to the stack header word of the caller of the */
+/* function, just beyond the end of the integer save area. */
+
+CFI_STARTPROC
+HIDDEN_FUNC(_savegpr_14) stw 14,-72(11) /* save gp registers */
+HIDDEN_FUNC(_savegpr_15) stw 15,-68(11)
+HIDDEN_FUNC(_savegpr_16) stw 16,-64(11)
+HIDDEN_FUNC(_savegpr_17) stw 17,-60(11)
+HIDDEN_FUNC(_savegpr_18) stw 18,-56(11)
+HIDDEN_FUNC(_savegpr_19) stw 19,-52(11)
+HIDDEN_FUNC(_savegpr_20) stw 20,-48(11)
+HIDDEN_FUNC(_savegpr_21) stw 21,-44(11)
+HIDDEN_FUNC(_savegpr_22) stw 22,-40(11)
+HIDDEN_FUNC(_savegpr_23) stw 23,-36(11)
+HIDDEN_FUNC(_savegpr_24) stw 24,-32(11)
+HIDDEN_FUNC(_savegpr_25) stw 25,-28(11)
+HIDDEN_FUNC(_savegpr_26) stw 26,-24(11)
+HIDDEN_FUNC(_savegpr_27) stw 27,-20(11)
+HIDDEN_FUNC(_savegpr_28) stw 28,-16(11)
+HIDDEN_FUNC(_savegpr_29) stw 29,-12(11)
+HIDDEN_FUNC(_savegpr_30) stw 30,-8(11)
+HIDDEN_FUNC(_savegpr_31) stw 31,-4(11)
+ blr
+FUNC_END(_savegpr_31)
+FUNC_END(_savegpr_30)
+FUNC_END(_savegpr_29)
+FUNC_END(_savegpr_28)
+FUNC_END(_savegpr_27)
+FUNC_END(_savegpr_26)
+FUNC_END(_savegpr_25)
+FUNC_END(_savegpr_24)
+FUNC_END(_savegpr_23)
+FUNC_END(_savegpr_22)
+FUNC_END(_savegpr_21)
+FUNC_END(_savegpr_20)
+FUNC_END(_savegpr_19)
+FUNC_END(_savegpr_18)
+FUNC_END(_savegpr_17)
+FUNC_END(_savegpr_16)
+FUNC_END(_savegpr_15)
+FUNC_END(_savegpr_14)
+CFI_ENDPROC
+
+#endif
diff --git a/libgcc/config/rs6000/darwin-asm.h b/libgcc/config/rs6000/darwin-asm.h
new file mode 100644
index 00000000000..837b7a33ef8
--- /dev/null
+++ b/libgcc/config/rs6000/darwin-asm.h
@@ -0,0 +1,51 @@
+/* Macro definitions to used to support 32/64-bit code in Darwin's
+ * assembly files.
+ *
+ * Copyright (C) 2004, 2009 Free Software Foundation, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 3, or (at your option) any
+ * later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Under Section 7 of GPL version 3, you are granted additional
+ * permissions described in the GCC Runtime Library Exception, version
+ * 3.1, as published by the Free Software Foundation.
+ *
+ * You should have received a copy of the GNU General Public License and
+ * a copy of the GCC Runtime Library Exception along with this program;
+ * see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ * <http://www.gnu.org/licenses/>.
+ */
+
+/* These are donated from /usr/include/architecture/ppc . */
+
+#if defined(__ppc64__)
+#define MODE_CHOICE(x, y) y
+#else
+#define MODE_CHOICE(x, y) x
+#endif
+
+#define cmpg MODE_CHOICE(cmpw, cmpd)
+#define lg MODE_CHOICE(lwz, ld)
+#define stg MODE_CHOICE(stw, std)
+#define lgx MODE_CHOICE(lwzx, ldx)
+#define stgx MODE_CHOICE(stwx, stdx)
+#define lgu MODE_CHOICE(lwzu, ldu)
+#define stgu MODE_CHOICE(stwu, stdu)
+#define lgux MODE_CHOICE(lwzux, ldux)
+#define stgux MODE_CHOICE(stwux, stdux)
+#define lgwa MODE_CHOICE(lwz, lwa)
+
+#define g_long MODE_CHOICE(long, quad) /* usage is ".g_long" */
+
+#define GPR_BYTES MODE_CHOICE(4,8) /* size of a GPR in bytes */
+#define LOG2_GPR_BYTES MODE_CHOICE(2,3) /* log2(GPR_BYTES) */
+
+#define SAVED_LR_OFFSET MODE_CHOICE(8,16) /* position of saved
+ LR in frame */
diff --git a/libgcc/config/rs6000/darwin-fallback.c b/libgcc/config/rs6000/darwin-fallback.c
index 4591071ea74..a61ea0b9ca2 100644
--- a/libgcc/config/rs6000/darwin-fallback.c
+++ b/libgcc/config/rs6000/darwin-fallback.c
@@ -1,5 +1,5 @@
/* Fallback frame-state unwinder for Darwin.
- Copyright (C) 2004, 2005, 2007, 2009 Free Software Foundation, Inc.
+ Copyright (C) 2004, 2005, 2007, 2009, 2011 Free Software Foundation, Inc.
This file is part of GCC.
@@ -28,6 +28,7 @@
#include "tsystem.h"
#include "coretypes.h"
#include "tm.h"
+#include "libgcc_tm.h"
#include "dwarf2.h"
#include "unwind.h"
#include "unwind-dw2.h"
diff --git a/libgcc/config/rs6000/darwin-fpsave.S b/libgcc/config/rs6000/darwin-fpsave.S
new file mode 100644
index 00000000000..47fdc92f860
--- /dev/null
+++ b/libgcc/config/rs6000/darwin-fpsave.S
@@ -0,0 +1,92 @@
+/* This file contains the floating-point save and restore routines.
+ *
+ * Copyright (C) 2004, 2009 Free Software Foundation, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 3, or (at your option) any
+ * later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Under Section 7 of GPL version 3, you are granted additional
+ * permissions described in the GCC Runtime Library Exception, version
+ * 3.1, as published by the Free Software Foundation.
+ *
+ * You should have received a copy of the GNU General Public License and
+ * a copy of the GCC Runtime Library Exception along with this program;
+ * see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ * <http://www.gnu.org/licenses/>.
+ */
+
+/* THE SAVE AND RESTORE ROUTINES CAN HAVE ONLY ONE GLOBALLY VISIBLE
+ ENTRY POINT - callers have to jump to "saveFP+60" to save f29..f31,
+ for example. For FP reg saves/restores, it takes one instruction
+ (4 bytes) to do the operation; for Vector regs, 2 instructions are
+ required (8 bytes.)
+
+ MORAL: DO NOT MESS AROUND WITH THESE FUNCTIONS! */
+
+#include "darwin-asm.h"
+
+.text
+ .align 2
+
+/* saveFP saves R0 -- assumed to be the callers LR -- to 8/16(R1). */
+
+.private_extern saveFP
+saveFP:
+ stfd f14,-144(r1)
+ stfd f15,-136(r1)
+ stfd f16,-128(r1)
+ stfd f17,-120(r1)
+ stfd f18,-112(r1)
+ stfd f19,-104(r1)
+ stfd f20,-96(r1)
+ stfd f21,-88(r1)
+ stfd f22,-80(r1)
+ stfd f23,-72(r1)
+ stfd f24,-64(r1)
+ stfd f25,-56(r1)
+ stfd f26,-48(r1)
+ stfd f27,-40(r1)
+ stfd f28,-32(r1)
+ stfd f29,-24(r1)
+ stfd f30,-16(r1)
+ stfd f31,-8(r1)
+ stg r0,SAVED_LR_OFFSET(r1)
+ blr
+
+/* restFP restores the caller`s LR from 8/16(R1). Note that the code for
+ this starts at the offset of F30 restoration, so calling this
+ routine in an attempt to restore only F31 WILL NOT WORK (it would
+ be a stupid thing to do, anyway.) */
+
+.private_extern restFP
+restFP:
+ lfd f14,-144(r1)
+ lfd f15,-136(r1)
+ lfd f16,-128(r1)
+ lfd f17,-120(r1)
+ lfd f18,-112(r1)
+ lfd f19,-104(r1)
+ lfd f20,-96(r1)
+ lfd f21,-88(r1)
+ lfd f22,-80(r1)
+ lfd f23,-72(r1)
+ lfd f24,-64(r1)
+ lfd f25,-56(r1)
+ lfd f26,-48(r1)
+ lfd f27,-40(r1)
+ lfd f28,-32(r1)
+ lfd f29,-24(r1)
+ /* <OFFSET OF F30 RESTORE> restore callers LR */
+ lg r0,SAVED_LR_OFFSET(r1)
+ lfd f30,-16(r1)
+ /* and prepare for return to caller */
+ mtlr r0
+ lfd f31,-8(r1)
+ blr
diff --git a/libgcc/config/rs6000/darwin-gpsave.S b/libgcc/config/rs6000/darwin-gpsave.S
new file mode 100644
index 00000000000..d3c3b912d27
--- /dev/null
+++ b/libgcc/config/rs6000/darwin-gpsave.S
@@ -0,0 +1,118 @@
+/* This file contains the GPR save and restore routines for Darwin.
+ *
+ * Copyright (C) 2011 Free Software Foundation, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 3, or (at your option) any
+ * later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Under Section 7 of GPL version 3, you are granted additional
+ * permissions described in the GCC Runtime Library Exception, version
+ * 3.1, as published by the Free Software Foundation.
+ *
+ * You should have received a copy of the GNU General Public License and
+ * a copy of the GCC Runtime Library Exception along with this program;
+ * see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ * <http://www.gnu.org/licenses/>.
+ */
+
+/* Contributed by Iain Sandoe <iains@gcc.gnu.org> */
+
+/* Like their FP and VEC counterparts, these routines have only one externally
+ visible entry point. Calls have to be constructed as offsets from this.
+ (I.E. callers have to jump to "saveGPR+((x-13)*4" to save registers x..31).
+
+ Each save/load instruction is 4 bytes long (for both m32 and m64 builds).
+
+ The save/restores here are done w.r.t r11.
+
+ restGPRx restores the link reg from the stack and returns to the saved
+ address.
+
+ */
+
+#include "darwin-asm.h"
+
+ .text
+ .align 2
+
+ .private_extern saveGPR
+saveGPR:
+ stg r13,(-19 * GPR_BYTES)(r11)
+ stg r14,(-18 * GPR_BYTES)(r11)
+ stg r15,(-17 * GPR_BYTES)(r11)
+ stg r16,(-16 * GPR_BYTES)(r11)
+ stg r17,(-15 * GPR_BYTES)(r11)
+ stg r18,(-14 * GPR_BYTES)(r11)
+ stg r19,(-13 * GPR_BYTES)(r11)
+ stg r20,(-12 * GPR_BYTES)(r11)
+ stg r21,(-11 * GPR_BYTES)(r11)
+ stg r22,(-10 * GPR_BYTES)(r11)
+ stg r23,( -9 * GPR_BYTES)(r11)
+ stg r24,( -8 * GPR_BYTES)(r11)
+ stg r25,( -7 * GPR_BYTES)(r11)
+ stg r26,( -6 * GPR_BYTES)(r11)
+ stg r27,( -5 * GPR_BYTES)(r11)
+ stg r28,( -4 * GPR_BYTES)(r11)
+ stg r29,( -3 * GPR_BYTES)(r11)
+ stg r30,( -2 * GPR_BYTES)(r11)
+ stg r31,( -1 * GPR_BYTES)(r11)
+ blr
+
+/* */
+
+ .private_extern restGPR
+restGPR:
+ lg r13,(-19 * GPR_BYTES)(r11)
+ lg r14,(-18 * GPR_BYTES)(r11)
+ lg r15,(-17 * GPR_BYTES)(r11)
+ lg r16,(-16 * GPR_BYTES)(r11)
+ lg r17,(-15 * GPR_BYTES)(r11)
+ lg r18,(-14 * GPR_BYTES)(r11)
+ lg r19,(-13 * GPR_BYTES)(r11)
+ lg r20,(-12 * GPR_BYTES)(r11)
+ lg r21,(-11 * GPR_BYTES)(r11)
+ lg r22,(-10 * GPR_BYTES)(r11)
+ lg r23,( -9 * GPR_BYTES)(r11)
+ lg r24,( -8 * GPR_BYTES)(r11)
+ lg r25,( -7 * GPR_BYTES)(r11)
+ lg r26,( -6 * GPR_BYTES)(r11)
+ lg r27,( -5 * GPR_BYTES)(r11)
+ lg r28,( -4 * GPR_BYTES)(r11)
+ lg r29,( -3 * GPR_BYTES)(r11)
+ lg r30,( -2 * GPR_BYTES)(r11)
+ lg r31,( -1 * GPR_BYTES)(r11)
+ blr
+
+ .private_extern restGPRx
+restGPRx:
+ lg r13,(-19 * GPR_BYTES)(r11)
+ lg r14,(-18 * GPR_BYTES)(r11)
+ lg r15,(-17 * GPR_BYTES)(r11)
+ lg r16,(-16 * GPR_BYTES)(r11)
+ lg r17,(-15 * GPR_BYTES)(r11)
+ lg r18,(-14 * GPR_BYTES)(r11)
+ lg r19,(-13 * GPR_BYTES)(r11)
+ lg r20,(-12 * GPR_BYTES)(r11)
+ lg r21,(-11 * GPR_BYTES)(r11)
+ lg r22,(-10 * GPR_BYTES)(r11)
+ lg r23,( -9 * GPR_BYTES)(r11)
+ lg r24,( -8 * GPR_BYTES)(r11)
+ lg r25,( -7 * GPR_BYTES)(r11)
+ lg r26,( -6 * GPR_BYTES)(r11)
+ lg r27,( -5 * GPR_BYTES)(r11)
+ lg r28,( -4 * GPR_BYTES)(r11)
+ lg r29,( -3 * GPR_BYTES)(r11)
+ /* Like the FP restore, we start from the offset for r30
+ thus a restore of only r31 is not going to work. */
+ lg r0,SAVED_LR_OFFSET(r1)
+ lg r30,( -2 * GPR_BYTES)(r11)
+ mtlr r0
+ lg r31,( -1 * GPR_BYTES)(r11)
+ blr
diff --git a/libgcc/config/rs6000/darwin-tramp.S b/libgcc/config/rs6000/darwin-tramp.S
new file mode 100644
index 00000000000..5188c98ef05
--- /dev/null
+++ b/libgcc/config/rs6000/darwin-tramp.S
@@ -0,0 +1,125 @@
+/* Special support for trampolines
+ *
+ * Copyright (C) 1996, 1997, 2000, 2004, 2005, 2009 Free Software Foundation, Inc.
+ * Written By Michael Meissner
+ *
+ * This file is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 3, or (at your option) any
+ * later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Under Section 7 of GPL version 3, you are granted additional
+ * permissions described in the GCC Runtime Library Exception, version
+ * 3.1, as published by the Free Software Foundation.
+ *
+ * You should have received a copy of the GNU General Public License and
+ * a copy of the GCC Runtime Library Exception along with this program;
+ * see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ * <http://www.gnu.org/licenses/>.
+ */
+
+#include "darwin-asm.h"
+
+/* Set up trampolines. */
+
+.text
+ .align LOG2_GPR_BYTES
+Ltrampoline_initial:
+ mflr r0
+ bl 1f
+Lfunc = .-Ltrampoline_initial
+ .g_long 0 /* will be replaced with function address */
+Lchain = .-Ltrampoline_initial
+ .g_long 0 /* will be replaced with static chain */
+1: mflr r11
+ lg r12,0(r11) /* function address */
+ mtlr r0
+ mtctr r12
+ lg r11,GPR_BYTES(r11) /* static chain */
+ bctr
+
+trampoline_size = .-Ltrampoline_initial
+
+/* R3 = stack address to store trampoline */
+/* R4 = length of trampoline area */
+/* R5 = function address */
+/* R6 = static chain */
+
+ .globl ___trampoline_setup
+___trampoline_setup:
+ mflr r0 /* save return address */
+ bcl 20,31,LCF0 /* load up __trampoline_initial into r7 */
+LCF0:
+ mflr r11
+ addis r7,r11,ha16(LTRAMP-LCF0)
+ lg r7,lo16(LTRAMP-LCF0)(r7)
+ subi r7,r7,4
+ li r8,trampoline_size /* verify trampoline big enough */
+ cmpg cr1,r8,r4
+ srwi r4,r4,2 /* # words to move (insns always 4-byte) */
+ addi r9,r3,-4 /* adjust pointer for lgu */
+ mtctr r4
+ blt cr1,Labort
+
+ mtlr r0
+
+ /* Copy the instructions to the stack */
+Lmove:
+ lwzu r10,4(r7)
+ stwu r10,4(r9)
+ bdnz Lmove
+
+ /* Store correct function and static chain */
+ stg r5,Lfunc(r3)
+ stg r6,Lchain(r3)
+
+ /* Now flush both caches */
+ mtctr r4
+Lcache:
+ icbi 0,r3
+ dcbf 0,r3
+ addi r3,r3,4
+ bdnz Lcache
+
+ /* Ensure cache-flushing has finished. */
+ sync
+ isync
+
+ /* Make stack writeable. */
+ b ___enable_execute_stack
+
+Labort:
+#ifdef __DYNAMIC__
+ bl L_abort$stub
+.data
+.section __TEXT,__picsymbolstub1,symbol_stubs,pure_instructions,32
+ .align 2
+L_abort$stub:
+ .indirect_symbol _abort
+ mflr r0
+ bcl 20,31,L0$_abort
+L0$_abort:
+ mflr r11
+ addis r11,r11,ha16(L_abort$lazy_ptr-L0$_abort)
+ mtlr r0
+ lgu r12,lo16(L_abort$lazy_ptr-L0$_abort)(r11)
+ mtctr r12
+ bctr
+.data
+.lazy_symbol_pointer
+L_abort$lazy_ptr:
+ .indirect_symbol _abort
+ .g_long dyld_stub_binding_helper
+#else
+ bl _abort
+#endif
+.data
+ .align LOG2_GPR_BYTES
+LTRAMP:
+ .g_long Ltrampoline_initial
+
diff --git a/libgcc/config/rs6000/darwin-vecsave.S b/libgcc/config/rs6000/darwin-vecsave.S
new file mode 100644
index 00000000000..0a46be20c89
--- /dev/null
+++ b/libgcc/config/rs6000/darwin-vecsave.S
@@ -0,0 +1,155 @@
+/* This file contains the vector save and restore routines.
+ *
+ * Copyright (C) 2004, 2009 Free Software Foundation, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 3, or (at your option) any
+ * later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Under Section 7 of GPL version 3, you are granted additional
+ * permissions described in the GCC Runtime Library Exception, version
+ * 3.1, as published by the Free Software Foundation.
+ *
+ * You should have received a copy of the GNU General Public License and
+ * a copy of the GCC Runtime Library Exception along with this program;
+ * see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ * <http://www.gnu.org/licenses/>.
+ */
+
+/* Vector save/restore routines for Darwin. Note that each vector
+ save/restore requires 2 instructions (8 bytes.)
+
+ THE SAVE AND RESTORE ROUTINES CAN HAVE ONLY ONE GLOBALLY VISIBLE
+ ENTRY POINT - callers have to jump to "saveFP+60" to save f29..f31,
+ for example. For FP reg saves/restores, it takes one instruction
+ (4 bytes) to do the operation; for Vector regs, 2 instructions are
+ required (8 bytes.). */
+
+ .machine ppc7400
+.text
+ .align 2
+
+.private_extern saveVEC
+saveVEC:
+ li r11,-192
+ stvx v20,r11,r0
+ li r11,-176
+ stvx v21,r11,r0
+ li r11,-160
+ stvx v22,r11,r0
+ li r11,-144
+ stvx v23,r11,r0
+ li r11,-128
+ stvx v24,r11,r0
+ li r11,-112
+ stvx v25,r11,r0
+ li r11,-96
+ stvx v26,r11,r0
+ li r11,-80
+ stvx v27,r11,r0
+ li r11,-64
+ stvx v28,r11,r0
+ li r11,-48
+ stvx v29,r11,r0
+ li r11,-32
+ stvx v30,r11,r0
+ li r11,-16
+ stvx v31,r11,r0
+ blr
+
+.private_extern restVEC
+restVEC:
+ li r11,-192
+ lvx v20,r11,r0
+ li r11,-176
+ lvx v21,r11,r0
+ li r11,-160
+ lvx v22,r11,r0
+ li r11,-144
+ lvx v23,r11,r0
+ li r11,-128
+ lvx v24,r11,r0
+ li r11,-112
+ lvx v25,r11,r0
+ li r11,-96
+ lvx v26,r11,r0
+ li r11,-80
+ lvx v27,r11,r0
+ li r11,-64
+ lvx v28,r11,r0
+ li r11,-48
+ lvx v29,r11,r0
+ li r11,-32
+ lvx v30,r11,r0
+ li r11,-16
+ lvx v31,r11,r0
+ blr
+
+/* saveVEC_vr11 -- as saveVEC but VRsave is returned in R11. */
+
+.private_extern saveVEC_vr11
+saveVEC_vr11:
+ li r11,-192
+ stvx v20,r11,r0
+ li r11,-176
+ stvx v21,r11,r0
+ li r11,-160
+ stvx v22,r11,r0
+ li r11,-144
+ stvx v23,r11,r0
+ li r11,-128
+ stvx v24,r11,r0
+ li r11,-112
+ stvx v25,r11,r0
+ li r11,-96
+ stvx v26,r11,r0
+ li r11,-80
+ stvx v27,r11,r0
+ li r11,-64
+ stvx v28,r11,r0
+ li r11,-48
+ stvx v29,r11,r0
+ li r11,-32
+ stvx v30,r11,r0
+ li r11,-16
+ stvx v31,r11,r0
+ mfspr r11,VRsave
+ blr
+
+/* As restVec, but the original VRsave value passed in R10. */
+
+.private_extern restVEC_vr10
+restVEC_vr10:
+ li r11,-192
+ lvx v20,r11,r0
+ li r11,-176
+ lvx v21,r11,r0
+ li r11,-160
+ lvx v22,r11,r0
+ li r11,-144
+ lvx v23,r11,r0
+ li r11,-128
+ lvx v24,r11,r0
+ li r11,-112
+ lvx v25,r11,r0
+ li r11,-96
+ lvx v26,r11,r0
+ li r11,-80
+ lvx v27,r11,r0
+ li r11,-64
+ lvx v28,r11,r0
+ li r11,-48
+ lvx v29,r11,r0
+ li r11,-32
+ lvx v30,r11,r0
+ li r11,-16
+ lvx v31,r11,r0
+ /* restore VRsave from R10. */
+ mtspr VRsave,r10
+ blr
diff --git a/libgcc/config/rs6000/darwin-world.S b/libgcc/config/rs6000/darwin-world.S
new file mode 100644
index 00000000000..c0b1bf1a2b1
--- /dev/null
+++ b/libgcc/config/rs6000/darwin-world.S
@@ -0,0 +1,259 @@
+/* This file contains the exception-handling save_world and
+ * restore_world routines, which need to do a run-time check to see if
+ * they should save and restore the vector registers.
+ *
+ * Copyright (C) 2004, 2009 Free Software Foundation, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 3, or (at your option) any
+ * later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Under Section 7 of GPL version 3, you are granted additional
+ * permissions described in the GCC Runtime Library Exception, version
+ * 3.1, as published by the Free Software Foundation.
+ *
+ * You should have received a copy of the GNU General Public License and
+ * a copy of the GCC Runtime Library Exception along with this program;
+ * see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ * <http://www.gnu.org/licenses/>.
+ */
+
+ .machine ppc7400
+.data
+ .align 2
+
+#ifdef __DYNAMIC__
+
+.non_lazy_symbol_pointer
+L_has_vec$non_lazy_ptr:
+ .indirect_symbol __cpu_has_altivec
+#ifdef __ppc64__
+ .quad 0
+#else
+ .long 0
+#endif
+
+#else
+
+/* For static, "pretend" we have a non-lazy-pointer. */
+
+L_has_vec$non_lazy_ptr:
+ .long __cpu_has_altivec
+
+#endif
+
+
+.text
+ .align 2
+
+/* save_world and rest_world save/restore F14-F31 and possibly V20-V31
+ (assuming you have a CPU with vector registers; we use a global var
+ provided by the System Framework to determine this.)
+
+ SAVE_WORLD takes R0 (the caller`s caller`s return address) and R11
+ (the stack frame size) as parameters. It returns VRsave in R0 if
+ we`re on a CPU with vector regs.
+
+ With gcc3, we now need to save and restore CR as well, since gcc3's
+ scheduled prologs can cause comparisons to be moved before calls to
+ save_world!
+
+ USES: R0 R11 R12 */
+
+.private_extern save_world
+save_world:
+ stw r0,8(r1)
+ mflr r0
+ bcl 20,31,Ls$pb
+Ls$pb: mflr r12
+ addis r12,r12,ha16(L_has_vec$non_lazy_ptr-Ls$pb)
+ lwz r12,lo16(L_has_vec$non_lazy_ptr-Ls$pb)(r12)
+ mtlr r0
+ lwz r12,0(r12)
+ /* grab CR */
+ mfcr r0
+ /* test HAS_VEC */
+ cmpwi r12,0
+ stfd f14,-144(r1)
+ stfd f15,-136(r1)
+ stfd f16,-128(r1)
+ stfd f17,-120(r1)
+ stfd f18,-112(r1)
+ stfd f19,-104(r1)
+ stfd f20,-96(r1)
+ stfd f21,-88(r1)
+ stfd f22,-80(r1)
+ stfd f23,-72(r1)
+ stfd f24,-64(r1)
+ stfd f25,-56(r1)
+ stfd f26,-48(r1)
+ stfd f27,-40(r1)
+ stfd f28,-32(r1)
+ stfd f29,-24(r1)
+ stfd f30,-16(r1)
+ stfd f31,-8(r1)
+ stmw r13,-220(r1)
+ /* stash CR */
+ stw r0,4(r1)
+ /* set R12 pointing at Vector Reg save area */
+ addi r12,r1,-224
+ /* allocate stack frame */
+ stwux r1,r1,r11
+ /* ...but return if HAS_VEC is zero */
+ bne+ L$saveVMX
+ /* Not forgetting to restore CR. */
+ mtcr r0
+ blr
+
+L$saveVMX:
+ /* We're saving Vector regs too. */
+ /* Restore CR from R0. No More Branches! */
+ mtcr r0
+
+ /* We should really use VRSAVE to figure out which vector regs
+ we actually need to save and restore. Some other time :-/ */
+
+ li r11,-192
+ stvx v20,r11,r12
+ li r11,-176
+ stvx v21,r11,r12
+ li r11,-160
+ stvx v22,r11,r12
+ li r11,-144
+ stvx v23,r11,r12
+ li r11,-128
+ stvx v24,r11,r12
+ li r11,-112
+ stvx v25,r11,r12
+ li r11,-96
+ stvx v26,r11,r12
+ li r11,-80
+ stvx v27,r11,r12
+ li r11,-64
+ stvx v28,r11,r12
+ li r11,-48
+ stvx v29,r11,r12
+ li r11,-32
+ stvx v30,r11,r12
+ mfspr r0,VRsave
+ li r11,-16
+ stvx v31,r11,r12
+ /* VRsave lives at -224(R1) */
+ stw r0,0(r12)
+ blr
+
+
+/* eh_rest_world_r10 is jumped to, not called, so no need to worry about LR.
+ R10 is the C++ EH stack adjust parameter, we return to the caller`s caller.
+
+ USES: R0 R10 R11 R12 and R7 R8
+ RETURNS: C++ EH Data registers (R3 - R6.)
+
+ We now set up R7/R8 and jump to rest_world_eh_r7r8.
+
+ rest_world doesn't use the R10 stack adjust parameter, nor does it
+ pick up the R3-R6 exception handling stuff. */
+
+.private_extern rest_world
+rest_world:
+ /* Pickup previous SP */
+ lwz r11, 0(r1)
+ li r7, 0
+ lwz r8, 8(r11)
+ li r10, 0
+ b rest_world_eh_r7r8
+
+.private_extern eh_rest_world_r10
+eh_rest_world_r10:
+ /* Pickup previous SP */
+ lwz r11, 0(r1)
+ mr r7,r10
+ lwz r8, 8(r11)
+ /* pickup the C++ EH data regs (R3 - R6.) */
+ lwz r6,-420(r11)
+ lwz r5,-424(r11)
+ lwz r4,-428(r11)
+ lwz r3,-432(r11)
+
+ b rest_world_eh_r7r8
+
+/* rest_world_eh_r7r8 is jumped to -- not called! -- when we're doing
+ the exception-handling epilog. R7 contains the offset to add to
+ the SP, and R8 contains the 'real' return address.
+
+ USES: R0 R11 R12 [R7/R8]
+ RETURNS: C++ EH Data registers (R3 - R6.) */
+
+rest_world_eh_r7r8:
+ bcl 20,31,Lr7r8$pb
+Lr7r8$pb: mflr r12
+ lwz r11,0(r1)
+ /* R11 := previous SP */
+ addis r12,r12,ha16(L_has_vec$non_lazy_ptr-Lr7r8$pb)
+ lwz r12,lo16(L_has_vec$non_lazy_ptr-Lr7r8$pb)(r12)
+ lwz r0,4(r11)
+ /* R0 := old CR */
+ lwz r12,0(r12)
+ /* R12 := HAS_VEC */
+ mtcr r0
+ cmpwi r12,0
+ lmw r13,-220(r11)
+ beq L.rest_world_fp_eh
+ /* restore VRsave and V20..V31 */
+ lwz r0,-224(r11)
+ li r12,-416
+ mtspr VRsave,r0
+ lvx v20,r11,r12
+ li r12,-400
+ lvx v21,r11,r12
+ li r12,-384
+ lvx v22,r11,r12
+ li r12,-368
+ lvx v23,r11,r12
+ li r12,-352
+ lvx v24,r11,r12
+ li r12,-336
+ lvx v25,r11,r12
+ li r12,-320
+ lvx v26,r11,r12
+ li r12,-304
+ lvx v27,r11,r12
+ li r12,-288
+ lvx v28,r11,r12
+ li r12,-272
+ lvx v29,r11,r12
+ li r12,-256
+ lvx v30,r11,r12
+ li r12,-240
+ lvx v31,r11,r12
+
+L.rest_world_fp_eh:
+ lfd f14,-144(r11)
+ lfd f15,-136(r11)
+ lfd f16,-128(r11)
+ lfd f17,-120(r11)
+ lfd f18,-112(r11)
+ lfd f19,-104(r11)
+ lfd f20,-96(r11)
+ lfd f21,-88(r11)
+ lfd f22,-80(r11)
+ lfd f23,-72(r11)
+ lfd f24,-64(r11)
+ lfd f25,-56(r11)
+ lfd f26,-48(r11)
+ lfd f27,-40(r11)
+ lfd f28,-32(r11)
+ lfd f29,-24(r11)
+ lfd f30,-16(r11)
+ /* R8 is the exception-handler's address */
+ mtctr r8
+ lfd f31,-8(r11)
+ /* set SP to original value + R7 offset */
+ add r1,r11,r7
+ bctr
diff --git a/libgcc/config/rs6000/e500crtres32gpr.S b/libgcc/config/rs6000/e500crtres32gpr.S
new file mode 100644
index 00000000000..6fbff820b88
--- /dev/null
+++ b/libgcc/config/rs6000/e500crtres32gpr.S
@@ -0,0 +1,73 @@
+/*
+ * Special support for e500 eabi and SVR4
+ *
+ * Copyright (C) 2008, 2009 Free Software Foundation, Inc.
+ * Written by Nathan Froyd
+ *
+ * This file is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 3, or (at your option) any
+ * later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Under Section 7 of GPL version 3, you are granted additional
+ * permissions described in the GCC Runtime Library Exception, version
+ * 3.1, as published by the Free Software Foundation.
+ *
+ * You should have received a copy of the GNU General Public License and
+ * a copy of the GCC Runtime Library Exception along with this program;
+ * see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ * <http://www.gnu.org/licenses/>.
+ */
+
+ .section ".text"
+ #include "ppc-asm.h"
+
+#ifdef __SPE__
+
+/* Routines for restoring 32-bit integer registers, called by the compiler. */
+/* "Bare" versions that simply return to their caller. */
+
+HIDDEN_FUNC(_rest32gpr_14) lwz 14,-72(11)
+HIDDEN_FUNC(_rest32gpr_15) lwz 15,-68(11)
+HIDDEN_FUNC(_rest32gpr_16) lwz 16,-64(11)
+HIDDEN_FUNC(_rest32gpr_17) lwz 17,-60(11)
+HIDDEN_FUNC(_rest32gpr_18) lwz 18,-56(11)
+HIDDEN_FUNC(_rest32gpr_19) lwz 19,-52(11)
+HIDDEN_FUNC(_rest32gpr_20) lwz 20,-48(11)
+HIDDEN_FUNC(_rest32gpr_21) lwz 21,-44(11)
+HIDDEN_FUNC(_rest32gpr_22) lwz 22,-40(11)
+HIDDEN_FUNC(_rest32gpr_23) lwz 23,-36(11)
+HIDDEN_FUNC(_rest32gpr_24) lwz 24,-32(11)
+HIDDEN_FUNC(_rest32gpr_25) lwz 25,-28(11)
+HIDDEN_FUNC(_rest32gpr_26) lwz 26,-24(11)
+HIDDEN_FUNC(_rest32gpr_27) lwz 27,-20(11)
+HIDDEN_FUNC(_rest32gpr_28) lwz 28,-16(11)
+HIDDEN_FUNC(_rest32gpr_29) lwz 29,-12(11)
+HIDDEN_FUNC(_rest32gpr_30) lwz 30,-8(11)
+HIDDEN_FUNC(_rest32gpr_31) lwz 31,-4(11)
+ blr
+FUNC_END(_rest32gpr_31)
+FUNC_END(_rest32gpr_30)
+FUNC_END(_rest32gpr_29)
+FUNC_END(_rest32gpr_28)
+FUNC_END(_rest32gpr_27)
+FUNC_END(_rest32gpr_26)
+FUNC_END(_rest32gpr_25)
+FUNC_END(_rest32gpr_24)
+FUNC_END(_rest32gpr_23)
+FUNC_END(_rest32gpr_22)
+FUNC_END(_rest32gpr_21)
+FUNC_END(_rest32gpr_20)
+FUNC_END(_rest32gpr_19)
+FUNC_END(_rest32gpr_18)
+FUNC_END(_rest32gpr_17)
+FUNC_END(_rest32gpr_16)
+FUNC_END(_rest32gpr_15)
+FUNC_END(_rest32gpr_14)
+
+#endif
diff --git a/libgcc/config/rs6000/e500crtres64gpr.S b/libgcc/config/rs6000/e500crtres64gpr.S
new file mode 100644
index 00000000000..5182e55392d
--- /dev/null
+++ b/libgcc/config/rs6000/e500crtres64gpr.S
@@ -0,0 +1,73 @@
+/*
+ * Special support for e500 eabi and SVR4
+ *
+ * Copyright (C) 2008, 2009 Free Software Foundation, Inc.
+ * Written by Nathan Froyd
+ *
+ * This file is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 3, or (at your option) any
+ * later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Under Section 7 of GPL version 3, you are granted additional
+ * permissions described in the GCC Runtime Library Exception, version
+ * 3.1, as published by the Free Software Foundation.
+ *
+ * You should have received a copy of the GNU General Public License and
+ * a copy of the GCC Runtime Library Exception along with this program;
+ * see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ * <http://www.gnu.org/licenses/>.
+ */
+
+ .section ".text"
+ #include "ppc-asm.h"
+
+#ifdef __SPE__
+
+/* Routines for restoring 64-bit integer registers, called by the compiler. */
+/* "Bare" versions that return to their caller. */
+
+HIDDEN_FUNC(_rest64gpr_14) evldd 14,0(11)
+HIDDEN_FUNC(_rest64gpr_15) evldd 15,8(11)
+HIDDEN_FUNC(_rest64gpr_16) evldd 16,16(11)
+HIDDEN_FUNC(_rest64gpr_17) evldd 17,24(11)
+HIDDEN_FUNC(_rest64gpr_18) evldd 18,32(11)
+HIDDEN_FUNC(_rest64gpr_19) evldd 19,40(11)
+HIDDEN_FUNC(_rest64gpr_20) evldd 20,48(11)
+HIDDEN_FUNC(_rest64gpr_21) evldd 21,56(11)
+HIDDEN_FUNC(_rest64gpr_22) evldd 22,64(11)
+HIDDEN_FUNC(_rest64gpr_23) evldd 23,72(11)
+HIDDEN_FUNC(_rest64gpr_24) evldd 24,80(11)
+HIDDEN_FUNC(_rest64gpr_25) evldd 25,88(11)
+HIDDEN_FUNC(_rest64gpr_26) evldd 26,96(11)
+HIDDEN_FUNC(_rest64gpr_27) evldd 27,104(11)
+HIDDEN_FUNC(_rest64gpr_28) evldd 28,112(11)
+HIDDEN_FUNC(_rest64gpr_29) evldd 29,120(11)
+HIDDEN_FUNC(_rest64gpr_30) evldd 30,128(11)
+HIDDEN_FUNC(_rest64gpr_31) evldd 31,136(11)
+ blr
+FUNC_END(_rest64gpr_31)
+FUNC_END(_rest64gpr_30)
+FUNC_END(_rest64gpr_29)
+FUNC_END(_rest64gpr_28)
+FUNC_END(_rest64gpr_27)
+FUNC_END(_rest64gpr_26)
+FUNC_END(_rest64gpr_25)
+FUNC_END(_rest64gpr_24)
+FUNC_END(_rest64gpr_23)
+FUNC_END(_rest64gpr_22)
+FUNC_END(_rest64gpr_21)
+FUNC_END(_rest64gpr_20)
+FUNC_END(_rest64gpr_19)
+FUNC_END(_rest64gpr_18)
+FUNC_END(_rest64gpr_17)
+FUNC_END(_rest64gpr_16)
+FUNC_END(_rest64gpr_15)
+FUNC_END(_rest64gpr_14)
+
+#endif
diff --git a/libgcc/config/rs6000/e500crtres64gprctr.S b/libgcc/config/rs6000/e500crtres64gprctr.S
new file mode 100644
index 00000000000..74309d6bed6
--- /dev/null
+++ b/libgcc/config/rs6000/e500crtres64gprctr.S
@@ -0,0 +1,90 @@
+/*
+ * Special support for e500 eabi and SVR4
+ *
+ * Copyright (C) 2008, 2009, 2010 Free Software Foundation, Inc.
+ * Written by Nathan Froyd
+ *
+ * This file is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 3, or (at your option) any
+ * later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Under Section 7 of GPL version 3, you are granted additional
+ * permissions described in the GCC Runtime Library Exception, version
+ * 3.1, as published by the Free Software Foundation.
+ *
+ * You should have received a copy of the GNU General Public License and
+ * a copy of the GCC Runtime Library Exception along with this program;
+ * see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ * <http://www.gnu.org/licenses/>.
+ */
+
+ .section ".text"
+ #include "ppc-asm.h"
+
+#ifdef __SPE__
+
+/* Routines for restoring 64-bit integer registers where the number of
+ registers to be restored is passed in CTR, called by the compiler. */
+
+HIDDEN_FUNC(_rest64gpr_ctr_14) evldd 14,0(11)
+ bdz _rest64gpr_ctr_done
+HIDDEN_FUNC(_rest64gpr_ctr_15) evldd 15,8(11)
+ bdz _rest64gpr_ctr_done
+HIDDEN_FUNC(_rest64gpr_ctr_16) evldd 16,16(11)
+ bdz _rest64gpr_ctr_done
+HIDDEN_FUNC(_rest64gpr_ctr_17) evldd 17,24(11)
+ bdz _rest64gpr_ctr_done
+HIDDEN_FUNC(_rest64gpr_ctr_18) evldd 18,32(11)
+ bdz _rest64gpr_ctr_done
+HIDDEN_FUNC(_rest64gpr_ctr_19) evldd 19,40(11)
+ bdz _rest64gpr_ctr_done
+HIDDEN_FUNC(_rest64gpr_ctr_20) evldd 20,48(11)
+ bdz _rest64gpr_ctr_done
+HIDDEN_FUNC(_rest64gpr_ctr_21) evldd 21,56(11)
+ bdz _rest64gpr_ctr_done
+HIDDEN_FUNC(_rest64gpr_ctr_22) evldd 22,64(11)
+ bdz _rest64gpr_ctr_done
+HIDDEN_FUNC(_rest64gpr_ctr_23) evldd 23,72(11)
+ bdz _rest64gpr_ctr_done
+HIDDEN_FUNC(_rest64gpr_ctr_24) evldd 24,80(11)
+ bdz _rest64gpr_ctr_done
+HIDDEN_FUNC(_rest64gpr_ctr_25) evldd 25,88(11)
+ bdz _rest64gpr_ctr_done
+HIDDEN_FUNC(_rest64gpr_ctr_26) evldd 26,96(11)
+ bdz _rest64gpr_ctr_done
+HIDDEN_FUNC(_rest64gpr_ctr_27) evldd 27,104(11)
+ bdz _rest64gpr_ctr_done
+HIDDEN_FUNC(_rest64gpr_ctr_28) evldd 28,112(11)
+ bdz _rest64gpr_ctr_done
+HIDDEN_FUNC(_rest64gpr_ctr_29) evldd 29,120(11)
+ bdz _rest64gpr_ctr_done
+HIDDEN_FUNC(_rest64gpr_ctr_30) evldd 30,128(11)
+ bdz _rest64gpr_ctr_done
+HIDDEN_FUNC(_rest64gpr_ctr_31) evldd 31,136(11)
+_rest64gpr_ctr_done: blr
+FUNC_END(_rest64gpr_ctr_31)
+FUNC_END(_rest64gpr_ctr_30)
+FUNC_END(_rest64gpr_ctr_29)
+FUNC_END(_rest64gpr_ctr_28)
+FUNC_END(_rest64gpr_ctr_27)
+FUNC_END(_rest64gpr_ctr_26)
+FUNC_END(_rest64gpr_ctr_25)
+FUNC_END(_rest64gpr_ctr_24)
+FUNC_END(_rest64gpr_ctr_23)
+FUNC_END(_rest64gpr_ctr_22)
+FUNC_END(_rest64gpr_ctr_21)
+FUNC_END(_rest64gpr_ctr_20)
+FUNC_END(_rest64gpr_ctr_19)
+FUNC_END(_rest64gpr_ctr_18)
+FUNC_END(_rest64gpr_ctr_17)
+FUNC_END(_rest64gpr_ctr_16)
+FUNC_END(_rest64gpr_ctr_15)
+FUNC_END(_rest64gpr_ctr_14)
+
+#endif
diff --git a/libgcc/config/rs6000/e500crtrest32gpr.S b/libgcc/config/rs6000/e500crtrest32gpr.S
new file mode 100644
index 00000000000..4e61010dcff
--- /dev/null
+++ b/libgcc/config/rs6000/e500crtrest32gpr.S
@@ -0,0 +1,75 @@
+/*
+ * Special support for e500 eabi and SVR4
+ *
+ * Copyright (C) 2008, 2009 Free Software Foundation, Inc.
+ * Written by Nathan Froyd
+ *
+ * This file is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 3, or (at your option) any
+ * later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Under Section 7 of GPL version 3, you are granted additional
+ * permissions described in the GCC Runtime Library Exception, version
+ * 3.1, as published by the Free Software Foundation.
+ *
+ * You should have received a copy of the GNU General Public License and
+ * a copy of the GCC Runtime Library Exception along with this program;
+ * see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ * <http://www.gnu.org/licenses/>.
+ */
+
+ .section ".text"
+ #include "ppc-asm.h"
+
+#ifdef __SPE__
+
+/* Routines for restoring 32-bit integer registers, called by the compiler. */
+/* "Tail" versions that perform a tail call. */
+
+HIDDEN_FUNC(_rest32gpr_14_t) lwz 14,-72(11)
+HIDDEN_FUNC(_rest32gpr_15_t) lwz 15,-68(11)
+HIDDEN_FUNC(_rest32gpr_16_t) lwz 16,-64(11)
+HIDDEN_FUNC(_rest32gpr_17_t) lwz 17,-60(11)
+HIDDEN_FUNC(_rest32gpr_18_t) lwz 18,-56(11)
+HIDDEN_FUNC(_rest32gpr_19_t) lwz 19,-52(11)
+HIDDEN_FUNC(_rest32gpr_20_t) lwz 20,-48(11)
+HIDDEN_FUNC(_rest32gpr_21_t) lwz 21,-44(11)
+HIDDEN_FUNC(_rest32gpr_22_t) lwz 22,-40(11)
+HIDDEN_FUNC(_rest32gpr_23_t) lwz 23,-36(11)
+HIDDEN_FUNC(_rest32gpr_24_t) lwz 24,-32(11)
+HIDDEN_FUNC(_rest32gpr_25_t) lwz 25,-28(11)
+HIDDEN_FUNC(_rest32gpr_26_t) lwz 26,-24(11)
+HIDDEN_FUNC(_rest32gpr_27_t) lwz 27,-20(11)
+HIDDEN_FUNC(_rest32gpr_28_t) lwz 28,-16(11)
+HIDDEN_FUNC(_rest32gpr_29_t) lwz 29,-12(11)
+HIDDEN_FUNC(_rest32gpr_30_t) lwz 30,-8(11)
+HIDDEN_FUNC(_rest32gpr_31_t) lwz 31,-4(11)
+ lwz 0,4(11)
+ mr 1,11
+ blr
+FUNC_END(_rest32gpr_31_t)
+FUNC_END(_rest32gpr_30_t)
+FUNC_END(_rest32gpr_29_t)
+FUNC_END(_rest32gpr_28_t)
+FUNC_END(_rest32gpr_27_t)
+FUNC_END(_rest32gpr_26_t)
+FUNC_END(_rest32gpr_25_t)
+FUNC_END(_rest32gpr_24_t)
+FUNC_END(_rest32gpr_23_t)
+FUNC_END(_rest32gpr_22_t)
+FUNC_END(_rest32gpr_21_t)
+FUNC_END(_rest32gpr_20_t)
+FUNC_END(_rest32gpr_19_t)
+FUNC_END(_rest32gpr_18_t)
+FUNC_END(_rest32gpr_17_t)
+FUNC_END(_rest32gpr_16_t)
+FUNC_END(_rest32gpr_15_t)
+FUNC_END(_rest32gpr_14_t)
+
+#endif
diff --git a/libgcc/config/rs6000/e500crtrest64gpr.S b/libgcc/config/rs6000/e500crtrest64gpr.S
new file mode 100644
index 00000000000..090786fdc71
--- /dev/null
+++ b/libgcc/config/rs6000/e500crtrest64gpr.S
@@ -0,0 +1,74 @@
+/*
+ * Special support for e500 eabi and SVR4
+ *
+ * Copyright (C) 2008, 2009 Free Software Foundation, Inc.
+ * Written by Nathan Froyd
+ *
+ * This file is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 3, or (at your option) any
+ * later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Under Section 7 of GPL version 3, you are granted additional
+ * permissions described in the GCC Runtime Library Exception, version
+ * 3.1, as published by the Free Software Foundation.
+ *
+ * You should have received a copy of the GNU General Public License and
+ * a copy of the GCC Runtime Library Exception along with this program;
+ * see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ * <http://www.gnu.org/licenses/>.
+ */
+
+ .section ".text"
+ #include "ppc-asm.h"
+
+#ifdef __SPE__
+
+/* "Tail" versions that perform a tail call. */
+
+HIDDEN_FUNC(_rest64gpr_14_t) evldd 14,0(11)
+HIDDEN_FUNC(_rest64gpr_15_t) evldd 15,8(11)
+HIDDEN_FUNC(_rest64gpr_16_t) evldd 16,16(11)
+HIDDEN_FUNC(_rest64gpr_17_t) evldd 17,24(11)
+HIDDEN_FUNC(_rest64gpr_18_t) evldd 18,32(11)
+HIDDEN_FUNC(_rest64gpr_19_t) evldd 19,40(11)
+HIDDEN_FUNC(_rest64gpr_20_t) evldd 20,48(11)
+HIDDEN_FUNC(_rest64gpr_21_t) evldd 21,56(11)
+HIDDEN_FUNC(_rest64gpr_22_t) evldd 22,64(11)
+HIDDEN_FUNC(_rest64gpr_23_t) evldd 23,72(11)
+HIDDEN_FUNC(_rest64gpr_24_t) evldd 24,80(11)
+HIDDEN_FUNC(_rest64gpr_25_t) evldd 25,88(11)
+HIDDEN_FUNC(_rest64gpr_26_t) evldd 26,96(11)
+HIDDEN_FUNC(_rest64gpr_27_t) evldd 27,104(11)
+HIDDEN_FUNC(_rest64gpr_28_t) evldd 28,112(11)
+HIDDEN_FUNC(_rest64gpr_29_t) evldd 29,120(11)
+HIDDEN_FUNC(_rest64gpr_30_t) evldd 30,128(11)
+HIDDEN_FUNC(_rest64gpr_31_t) lwz 0,148(11)
+ evldd 31,136(11)
+ addi 1,11,144
+ blr
+FUNC_END(_rest64gpr_31_t)
+FUNC_END(_rest64gpr_30_t)
+FUNC_END(_rest64gpr_29_t)
+FUNC_END(_rest64gpr_28_t)
+FUNC_END(_rest64gpr_27_t)
+FUNC_END(_rest64gpr_26_t)
+FUNC_END(_rest64gpr_25_t)
+FUNC_END(_rest64gpr_24_t)
+FUNC_END(_rest64gpr_23_t)
+FUNC_END(_rest64gpr_22_t)
+FUNC_END(_rest64gpr_21_t)
+FUNC_END(_rest64gpr_20_t)
+FUNC_END(_rest64gpr_19_t)
+FUNC_END(_rest64gpr_18_t)
+FUNC_END(_rest64gpr_17_t)
+FUNC_END(_rest64gpr_16_t)
+FUNC_END(_rest64gpr_15_t)
+FUNC_END(_rest64gpr_14_t)
+
+#endif
diff --git a/libgcc/config/rs6000/e500crtresx32gpr.S b/libgcc/config/rs6000/e500crtresx32gpr.S
new file mode 100644
index 00000000000..0b35245df42
--- /dev/null
+++ b/libgcc/config/rs6000/e500crtresx32gpr.S
@@ -0,0 +1,75 @@
+/*
+ * Special support for e500 eabi and SVR4
+ *
+ * Copyright (C) 2008, 2009 Free Software Foundation, Inc.
+ * Written by Nathan Froyd
+ *
+ * This file is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 3, or (at your option) any
+ * later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Under Section 7 of GPL version 3, you are granted additional
+ * permissions described in the GCC Runtime Library Exception, version
+ * 3.1, as published by the Free Software Foundation.
+ *
+ * You should have received a copy of the GNU General Public License and
+ * a copy of the GCC Runtime Library Exception along with this program;
+ * see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ * <http://www.gnu.org/licenses/>.
+ */
+ .section ".text"
+ #include "ppc-asm.h"
+
+#ifdef __SPE__
+
+/* Routines for restoring 32-bit integer registers, called by the compiler. */
+/* "Exit" versions that return to the caller's caller. */
+
+HIDDEN_FUNC(_rest32gpr_14_x) lwz 14,-72(11)
+HIDDEN_FUNC(_rest32gpr_15_x) lwz 15,-68(11)
+HIDDEN_FUNC(_rest32gpr_16_x) lwz 16,-64(11)
+HIDDEN_FUNC(_rest32gpr_17_x) lwz 17,-60(11)
+HIDDEN_FUNC(_rest32gpr_18_x) lwz 18,-56(11)
+HIDDEN_FUNC(_rest32gpr_19_x) lwz 19,-52(11)
+HIDDEN_FUNC(_rest32gpr_20_x) lwz 20,-48(11)
+HIDDEN_FUNC(_rest32gpr_21_x) lwz 21,-44(11)
+HIDDEN_FUNC(_rest32gpr_22_x) lwz 22,-40(11)
+HIDDEN_FUNC(_rest32gpr_23_x) lwz 23,-36(11)
+HIDDEN_FUNC(_rest32gpr_24_x) lwz 24,-32(11)
+HIDDEN_FUNC(_rest32gpr_25_x) lwz 25,-28(11)
+HIDDEN_FUNC(_rest32gpr_26_x) lwz 26,-24(11)
+HIDDEN_FUNC(_rest32gpr_27_x) lwz 27,-20(11)
+HIDDEN_FUNC(_rest32gpr_28_x) lwz 28,-16(11)
+HIDDEN_FUNC(_rest32gpr_29_x) lwz 29,-12(11)
+HIDDEN_FUNC(_rest32gpr_30_x) lwz 30,-8(11)
+HIDDEN_FUNC(_rest32gpr_31_x) lwz 0,4(11)
+ lwz 31,-4(11)
+ mr 1,11
+ mtlr 0
+ blr
+FUNC_END(_rest32gpr_31_x)
+FUNC_END(_rest32gpr_30_x)
+FUNC_END(_rest32gpr_29_x)
+FUNC_END(_rest32gpr_28_x)
+FUNC_END(_rest32gpr_27_x)
+FUNC_END(_rest32gpr_26_x)
+FUNC_END(_rest32gpr_25_x)
+FUNC_END(_rest32gpr_24_x)
+FUNC_END(_rest32gpr_23_x)
+FUNC_END(_rest32gpr_22_x)
+FUNC_END(_rest32gpr_21_x)
+FUNC_END(_rest32gpr_20_x)
+FUNC_END(_rest32gpr_19_x)
+FUNC_END(_rest32gpr_18_x)
+FUNC_END(_rest32gpr_17_x)
+FUNC_END(_rest32gpr_16_x)
+FUNC_END(_rest32gpr_15_x)
+FUNC_END(_rest32gpr_14_x)
+
+#endif
diff --git a/libgcc/config/rs6000/e500crtresx64gpr.S b/libgcc/config/rs6000/e500crtresx64gpr.S
new file mode 100644
index 00000000000..ce2a6cfa2aa
--- /dev/null
+++ b/libgcc/config/rs6000/e500crtresx64gpr.S
@@ -0,0 +1,75 @@
+/*
+ * Special support for e500 eabi and SVR4
+ *
+ * Copyright (C) 2008, 2009 Free Software Foundation, Inc.
+ * Written by Nathan Froyd
+ *
+ * This file is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 3, or (at your option) any
+ * later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Under Section 7 of GPL version 3, you are granted additional
+ * permissions described in the GCC Runtime Library Exception, version
+ * 3.1, as published by the Free Software Foundation.
+ *
+ * You should have received a copy of the GNU General Public License and
+ * a copy of the GCC Runtime Library Exception along with this program;
+ * see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ * <http://www.gnu.org/licenses/>.
+ */
+
+ .section ".text"
+ #include "ppc-asm.h"
+
+#ifdef __SPE__
+
+/* "Exit" versions that return to their caller's caller. */
+
+HIDDEN_FUNC(_rest64gpr_14_x) evldd 14,0(11)
+HIDDEN_FUNC(_rest64gpr_15_x) evldd 15,8(11)
+HIDDEN_FUNC(_rest64gpr_16_x) evldd 16,16(11)
+HIDDEN_FUNC(_rest64gpr_17_x) evldd 17,24(11)
+HIDDEN_FUNC(_rest64gpr_18_x) evldd 18,32(11)
+HIDDEN_FUNC(_rest64gpr_19_x) evldd 19,40(11)
+HIDDEN_FUNC(_rest64gpr_20_x) evldd 20,48(11)
+HIDDEN_FUNC(_rest64gpr_21_x) evldd 21,56(11)
+HIDDEN_FUNC(_rest64gpr_22_x) evldd 22,64(11)
+HIDDEN_FUNC(_rest64gpr_23_x) evldd 23,72(11)
+HIDDEN_FUNC(_rest64gpr_24_x) evldd 24,80(11)
+HIDDEN_FUNC(_rest64gpr_25_x) evldd 25,88(11)
+HIDDEN_FUNC(_rest64gpr_26_x) evldd 26,96(11)
+HIDDEN_FUNC(_rest64gpr_27_x) evldd 27,104(11)
+HIDDEN_FUNC(_rest64gpr_28_x) evldd 28,112(11)
+HIDDEN_FUNC(_rest64gpr_29_x) evldd 29,120(11)
+HIDDEN_FUNC(_rest64gpr_30_x) evldd 30,128(11)
+HIDDEN_FUNC(_rest64gpr_31_x) lwz 0,148(11)
+ evldd 31,136(11)
+ addi 1,11,144
+ mtlr 0
+ blr
+FUNC_END(_rest64gpr_31_x)
+FUNC_END(_rest64gpr_30_x)
+FUNC_END(_rest64gpr_29_x)
+FUNC_END(_rest64gpr_28_x)
+FUNC_END(_rest64gpr_27_x)
+FUNC_END(_rest64gpr_26_x)
+FUNC_END(_rest64gpr_25_x)
+FUNC_END(_rest64gpr_24_x)
+FUNC_END(_rest64gpr_23_x)
+FUNC_END(_rest64gpr_22_x)
+FUNC_END(_rest64gpr_21_x)
+FUNC_END(_rest64gpr_20_x)
+FUNC_END(_rest64gpr_19_x)
+FUNC_END(_rest64gpr_18_x)
+FUNC_END(_rest64gpr_17_x)
+FUNC_END(_rest64gpr_16_x)
+FUNC_END(_rest64gpr_15_x)
+FUNC_END(_rest64gpr_14_x)
+
+#endif
diff --git a/libgcc/config/rs6000/e500crtsav32gpr.S b/libgcc/config/rs6000/e500crtsav32gpr.S
new file mode 100644
index 00000000000..c891030507e
--- /dev/null
+++ b/libgcc/config/rs6000/e500crtsav32gpr.S
@@ -0,0 +1,73 @@
+/*
+ * Special support for e500 eabi and SVR4
+ *
+ * Copyright (C) 2008, 2009 Free Software Foundation, Inc.
+ * Written by Nathan Froyd
+ *
+ * This file is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 3, or (at your option) any
+ * later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Under Section 7 of GPL version 3, you are granted additional
+ * permissions described in the GCC Runtime Library Exception, version
+ * 3.1, as published by the Free Software Foundation.
+ *
+ * You should have received a copy of the GNU General Public License and
+ * a copy of the GCC Runtime Library Exception along with this program;
+ * see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ * <http://www.gnu.org/licenses/>.
+ */
+
+ .section ".text"
+ #include "ppc-asm.h"
+
+#ifdef __SPE__
+
+/* Routines for saving 32-bit integer registers, called by the compiler. */
+/* "Bare" versions that simply return to their caller. */
+
+HIDDEN_FUNC(_save32gpr_14) stw 14,-72(11)
+HIDDEN_FUNC(_save32gpr_15) stw 15,-68(11)
+HIDDEN_FUNC(_save32gpr_16) stw 16,-64(11)
+HIDDEN_FUNC(_save32gpr_17) stw 17,-60(11)
+HIDDEN_FUNC(_save32gpr_18) stw 18,-56(11)
+HIDDEN_FUNC(_save32gpr_19) stw 19,-52(11)
+HIDDEN_FUNC(_save32gpr_20) stw 20,-48(11)
+HIDDEN_FUNC(_save32gpr_21) stw 21,-44(11)
+HIDDEN_FUNC(_save32gpr_22) stw 22,-40(11)
+HIDDEN_FUNC(_save32gpr_23) stw 23,-36(11)
+HIDDEN_FUNC(_save32gpr_24) stw 24,-32(11)
+HIDDEN_FUNC(_save32gpr_25) stw 25,-28(11)
+HIDDEN_FUNC(_save32gpr_26) stw 26,-24(11)
+HIDDEN_FUNC(_save32gpr_27) stw 27,-20(11)
+HIDDEN_FUNC(_save32gpr_28) stw 28,-16(11)
+HIDDEN_FUNC(_save32gpr_29) stw 29,-12(11)
+HIDDEN_FUNC(_save32gpr_30) stw 30,-8(11)
+HIDDEN_FUNC(_save32gpr_31) stw 31,-4(11)
+ blr
+FUNC_END(_save32gpr_31)
+FUNC_END(_save32gpr_30)
+FUNC_END(_save32gpr_29)
+FUNC_END(_save32gpr_28)
+FUNC_END(_save32gpr_27)
+FUNC_END(_save32gpr_26)
+FUNC_END(_save32gpr_25)
+FUNC_END(_save32gpr_24)
+FUNC_END(_save32gpr_23)
+FUNC_END(_save32gpr_22)
+FUNC_END(_save32gpr_21)
+FUNC_END(_save32gpr_20)
+FUNC_END(_save32gpr_19)
+FUNC_END(_save32gpr_18)
+FUNC_END(_save32gpr_17)
+FUNC_END(_save32gpr_16)
+FUNC_END(_save32gpr_15)
+FUNC_END(_save32gpr_14)
+
+#endif
diff --git a/libgcc/config/rs6000/e500crtsav64gpr.S b/libgcc/config/rs6000/e500crtsav64gpr.S
new file mode 100644
index 00000000000..2a5d3e475fd
--- /dev/null
+++ b/libgcc/config/rs6000/e500crtsav64gpr.S
@@ -0,0 +1,72 @@
+/*
+ * Special support for e500 eabi and SVR4
+ *
+ * Copyright (C) 2008, 2009 Free Software Foundation, Inc.
+ * Written by Nathan Froyd
+ *
+ * This file is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 3, or (at your option) any
+ * later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Under Section 7 of GPL version 3, you are granted additional
+ * permissions described in the GCC Runtime Library Exception, version
+ * 3.1, as published by the Free Software Foundation.
+ *
+ * You should have received a copy of the GNU General Public License and
+ * a copy of the GCC Runtime Library Exception along with this program;
+ * see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ * <http://www.gnu.org/licenses/>.
+ */
+
+ .section ".text"
+ #include "ppc-asm.h"
+
+#ifdef __SPE__
+
+/* Routines for saving 64-bit integer registers, called by the compiler. */
+
+HIDDEN_FUNC(_save64gpr_14) evstdd 14,0(11)
+HIDDEN_FUNC(_save64gpr_15) evstdd 15,8(11)
+HIDDEN_FUNC(_save64gpr_16) evstdd 16,16(11)
+HIDDEN_FUNC(_save64gpr_17) evstdd 17,24(11)
+HIDDEN_FUNC(_save64gpr_18) evstdd 18,32(11)
+HIDDEN_FUNC(_save64gpr_19) evstdd 19,40(11)
+HIDDEN_FUNC(_save64gpr_20) evstdd 20,48(11)
+HIDDEN_FUNC(_save64gpr_21) evstdd 21,56(11)
+HIDDEN_FUNC(_save64gpr_22) evstdd 22,64(11)
+HIDDEN_FUNC(_save64gpr_23) evstdd 23,72(11)
+HIDDEN_FUNC(_save64gpr_24) evstdd 24,80(11)
+HIDDEN_FUNC(_save64gpr_25) evstdd 25,88(11)
+HIDDEN_FUNC(_save64gpr_26) evstdd 26,96(11)
+HIDDEN_FUNC(_save64gpr_27) evstdd 27,104(11)
+HIDDEN_FUNC(_save64gpr_28) evstdd 28,112(11)
+HIDDEN_FUNC(_save64gpr_29) evstdd 29,120(11)
+HIDDEN_FUNC(_save64gpr_30) evstdd 30,128(11)
+HIDDEN_FUNC(_save64gpr_31) evstdd 31,136(11)
+ blr
+FUNC_END(_save64gpr_31)
+FUNC_END(_save64gpr_30)
+FUNC_END(_save64gpr_29)
+FUNC_END(_save64gpr_28)
+FUNC_END(_save64gpr_27)
+FUNC_END(_save64gpr_26)
+FUNC_END(_save64gpr_25)
+FUNC_END(_save64gpr_24)
+FUNC_END(_save64gpr_23)
+FUNC_END(_save64gpr_22)
+FUNC_END(_save64gpr_21)
+FUNC_END(_save64gpr_20)
+FUNC_END(_save64gpr_19)
+FUNC_END(_save64gpr_18)
+FUNC_END(_save64gpr_17)
+FUNC_END(_save64gpr_16)
+FUNC_END(_save64gpr_15)
+FUNC_END(_save64gpr_14)
+
+#endif
diff --git a/libgcc/config/rs6000/e500crtsav64gprctr.S b/libgcc/config/rs6000/e500crtsav64gprctr.S
new file mode 100644
index 00000000000..dd0bdf3c89a
--- /dev/null
+++ b/libgcc/config/rs6000/e500crtsav64gprctr.S
@@ -0,0 +1,91 @@
+/*
+ * Special support for e500 eabi and SVR4
+ *
+ * Copyright (C) 2008, 2009, 2010 Free Software Foundation, Inc.
+ * Written by Nathan Froyd
+ *
+ * This file is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 3, or (at your option) any
+ * later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Under Section 7 of GPL version 3, you are granted additional
+ * permissions described in the GCC Runtime Library Exception, version
+ * 3.1, as published by the Free Software Foundation.
+ *
+ * You should have received a copy of the GNU General Public License and
+ * a copy of the GCC Runtime Library Exception along with this program;
+ * see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ * <http://www.gnu.org/licenses/>.
+ */
+
+ .section ".text"
+ #include "ppc-asm.h"
+
+#ifdef __SPE__
+
+/* Routines for saving 64-bit integer registers where the number of
+ registers to be saved is passed in CTR, called by the compiler. */
+/* "Bare" versions that return to their caller. */
+
+HIDDEN_FUNC(_save64gpr_ctr_14) evstdd 14,0(11)
+ bdz _save64gpr_ctr_done
+HIDDEN_FUNC(_save64gpr_ctr_15) evstdd 15,8(11)
+ bdz _save64gpr_ctr_done
+HIDDEN_FUNC(_save64gpr_ctr_16) evstdd 16,16(11)
+ bdz _save64gpr_ctr_done
+HIDDEN_FUNC(_save64gpr_ctr_17) evstdd 17,24(11)
+ bdz _save64gpr_ctr_done
+HIDDEN_FUNC(_save64gpr_ctr_18) evstdd 18,32(11)
+ bdz _save64gpr_ctr_done
+HIDDEN_FUNC(_save64gpr_ctr_19) evstdd 19,40(11)
+ bdz _save64gpr_ctr_done
+HIDDEN_FUNC(_save64gpr_ctr_20) evstdd 20,48(11)
+ bdz _save64gpr_ctr_done
+HIDDEN_FUNC(_save64gpr_ctr_21) evstdd 21,56(11)
+ bdz _save64gpr_ctr_done
+HIDDEN_FUNC(_save64gpr_ctr_22) evstdd 22,64(11)
+ bdz _save64gpr_ctr_done
+HIDDEN_FUNC(_save64gpr_ctr_23) evstdd 23,72(11)
+ bdz _save64gpr_ctr_done
+HIDDEN_FUNC(_save64gpr_ctr_24) evstdd 24,80(11)
+ bdz _save64gpr_ctr_done
+HIDDEN_FUNC(_save64gpr_ctr_25) evstdd 25,88(11)
+ bdz _save64gpr_ctr_done
+HIDDEN_FUNC(_save64gpr_ctr_26) evstdd 26,96(11)
+ bdz _save64gpr_ctr_done
+HIDDEN_FUNC(_save64gpr_ctr_27) evstdd 27,104(11)
+ bdz _save64gpr_ctr_done
+HIDDEN_FUNC(_save64gpr_ctr_28) evstdd 28,112(11)
+ bdz _save64gpr_ctr_done
+HIDDEN_FUNC(_save64gpr_ctr_29) evstdd 29,120(11)
+ bdz _save64gpr_ctr_done
+HIDDEN_FUNC(_save64gpr_ctr_30) evstdd 30,128(11)
+ bdz _save64gpr_ctr_done
+HIDDEN_FUNC(_save64gpr_ctr_31) evstdd 31,136(11)
+_save64gpr_ctr_done: blr
+FUNC_END(_save64gpr_ctr_31)
+FUNC_END(_save64gpr_ctr_30)
+FUNC_END(_save64gpr_ctr_29)
+FUNC_END(_save64gpr_ctr_28)
+FUNC_END(_save64gpr_ctr_27)
+FUNC_END(_save64gpr_ctr_26)
+FUNC_END(_save64gpr_ctr_25)
+FUNC_END(_save64gpr_ctr_24)
+FUNC_END(_save64gpr_ctr_23)
+FUNC_END(_save64gpr_ctr_22)
+FUNC_END(_save64gpr_ctr_21)
+FUNC_END(_save64gpr_ctr_20)
+FUNC_END(_save64gpr_ctr_19)
+FUNC_END(_save64gpr_ctr_18)
+FUNC_END(_save64gpr_ctr_17)
+FUNC_END(_save64gpr_ctr_16)
+FUNC_END(_save64gpr_ctr_15)
+FUNC_END(_save64gpr_ctr_14)
+
+#endif
diff --git a/libgcc/config/rs6000/e500crtsavg32gpr.S b/libgcc/config/rs6000/e500crtsavg32gpr.S
new file mode 100644
index 00000000000..d14088e0dec
--- /dev/null
+++ b/libgcc/config/rs6000/e500crtsavg32gpr.S
@@ -0,0 +1,73 @@
+/*
+ * Special support for e500 eabi and SVR4
+ *
+ * Copyright (C) 2008, 2009 Free Software Foundation, Inc.
+ * Written by Nathan Froyd
+ *
+ * This file is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 3, or (at your option) any
+ * later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Under Section 7 of GPL version 3, you are granted additional
+ * permissions described in the GCC Runtime Library Exception, version
+ * 3.1, as published by the Free Software Foundation.
+ *
+ * You should have received a copy of the GNU General Public License and
+ * a copy of the GCC Runtime Library Exception along with this program;
+ * see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ * <http://www.gnu.org/licenses/>.
+ */
+
+ .section ".text"
+ #include "ppc-asm.h"
+
+#ifdef __SPE__
+
+/* Routines for saving 32-bit integer registers, called by the compiler. */
+/* "GOT" versions that load the address of the GOT into lr before returning. */
+
+HIDDEN_FUNC(_save32gpr_14_g) stw 14,-72(11)
+HIDDEN_FUNC(_save32gpr_15_g) stw 15,-68(11)
+HIDDEN_FUNC(_save32gpr_16_g) stw 16,-64(11)
+HIDDEN_FUNC(_save32gpr_17_g) stw 17,-60(11)
+HIDDEN_FUNC(_save32gpr_18_g) stw 18,-56(11)
+HIDDEN_FUNC(_save32gpr_19_g) stw 19,-52(11)
+HIDDEN_FUNC(_save32gpr_20_g) stw 20,-48(11)
+HIDDEN_FUNC(_save32gpr_21_g) stw 21,-44(11)
+HIDDEN_FUNC(_save32gpr_22_g) stw 22,-40(11)
+HIDDEN_FUNC(_save32gpr_23_g) stw 23,-36(11)
+HIDDEN_FUNC(_save32gpr_24_g) stw 24,-32(11)
+HIDDEN_FUNC(_save32gpr_25_g) stw 25,-28(11)
+HIDDEN_FUNC(_save32gpr_26_g) stw 26,-24(11)
+HIDDEN_FUNC(_save32gpr_27_g) stw 27,-20(11)
+HIDDEN_FUNC(_save32gpr_28_g) stw 28,-16(11)
+HIDDEN_FUNC(_save32gpr_29_g) stw 29,-12(11)
+HIDDEN_FUNC(_save32gpr_30_g) stw 30,-8(11)
+HIDDEN_FUNC(_save32gpr_31_g) stw 31,-4(11)
+ b _GLOBAL_OFFSET_TABLE_-4
+FUNC_END(_save32gpr_31_g)
+FUNC_END(_save32gpr_30_g)
+FUNC_END(_save32gpr_29_g)
+FUNC_END(_save32gpr_28_g)
+FUNC_END(_save32gpr_27_g)
+FUNC_END(_save32gpr_26_g)
+FUNC_END(_save32gpr_25_g)
+FUNC_END(_save32gpr_24_g)
+FUNC_END(_save32gpr_23_g)
+FUNC_END(_save32gpr_22_g)
+FUNC_END(_save32gpr_21_g)
+FUNC_END(_save32gpr_20_g)
+FUNC_END(_save32gpr_19_g)
+FUNC_END(_save32gpr_18_g)
+FUNC_END(_save32gpr_17_g)
+FUNC_END(_save32gpr_16_g)
+FUNC_END(_save32gpr_15_g)
+FUNC_END(_save32gpr_14_g)
+
+#endif
diff --git a/libgcc/config/rs6000/e500crtsavg64gpr.S b/libgcc/config/rs6000/e500crtsavg64gpr.S
new file mode 100644
index 00000000000..cbad75bc053
--- /dev/null
+++ b/libgcc/config/rs6000/e500crtsavg64gpr.S
@@ -0,0 +1,73 @@
+/*
+ * Special support for e500 eabi and SVR4
+ *
+ * Copyright (C) 2008, 2009 Free Software Foundation, Inc.
+ * Written by Nathan Froyd
+ *
+ * This file is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 3, or (at your option) any
+ * later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Under Section 7 of GPL version 3, you are granted additional
+ * permissions described in the GCC Runtime Library Exception, version
+ * 3.1, as published by the Free Software Foundation.
+ *
+ * You should have received a copy of the GNU General Public License and
+ * a copy of the GCC Runtime Library Exception along with this program;
+ * see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ * <http://www.gnu.org/licenses/>.
+ */
+
+ .section ".text"
+ #include "ppc-asm.h"
+
+#ifdef __SPE__
+
+/* Routines for saving 64-bit integer registers, called by the compiler. */
+/* "GOT" versions that load the address of the GOT into lr before returning. */
+
+HIDDEN_FUNC(_save64gpr_14_g) evstdd 14,0(11)
+HIDDEN_FUNC(_save64gpr_15_g) evstdd 15,8(11)
+HIDDEN_FUNC(_save64gpr_16_g) evstdd 16,16(11)
+HIDDEN_FUNC(_save64gpr_17_g) evstdd 17,24(11)
+HIDDEN_FUNC(_save64gpr_18_g) evstdd 18,32(11)
+HIDDEN_FUNC(_save64gpr_19_g) evstdd 19,40(11)
+HIDDEN_FUNC(_save64gpr_20_g) evstdd 20,48(11)
+HIDDEN_FUNC(_save64gpr_21_g) evstdd 21,56(11)
+HIDDEN_FUNC(_save64gpr_22_g) evstdd 22,64(11)
+HIDDEN_FUNC(_save64gpr_23_g) evstdd 23,72(11)
+HIDDEN_FUNC(_save64gpr_24_g) evstdd 24,80(11)
+HIDDEN_FUNC(_save64gpr_25_g) evstdd 25,88(11)
+HIDDEN_FUNC(_save64gpr_26_g) evstdd 26,96(11)
+HIDDEN_FUNC(_save64gpr_27_g) evstdd 27,104(11)
+HIDDEN_FUNC(_save64gpr_28_g) evstdd 28,112(11)
+HIDDEN_FUNC(_save64gpr_29_g) evstdd 29,120(11)
+HIDDEN_FUNC(_save64gpr_30_g) evstdd 30,128(11)
+HIDDEN_FUNC(_save64gpr_31_g) evstdd 31,136(11)
+ b _GLOBAL_OFFSET_TABLE_-4
+FUNC_END(_save64gpr_31_g)
+FUNC_END(_save64gpr_30_g)
+FUNC_END(_save64gpr_29_g)
+FUNC_END(_save64gpr_28_g)
+FUNC_END(_save64gpr_27_g)
+FUNC_END(_save64gpr_26_g)
+FUNC_END(_save64gpr_25_g)
+FUNC_END(_save64gpr_24_g)
+FUNC_END(_save64gpr_23_g)
+FUNC_END(_save64gpr_22_g)
+FUNC_END(_save64gpr_21_g)
+FUNC_END(_save64gpr_20_g)
+FUNC_END(_save64gpr_19_g)
+FUNC_END(_save64gpr_18_g)
+FUNC_END(_save64gpr_17_g)
+FUNC_END(_save64gpr_16_g)
+FUNC_END(_save64gpr_15_g)
+FUNC_END(_save64gpr_14_g)
+
+#endif
diff --git a/libgcc/config/rs6000/e500crtsavg64gprctr.S b/libgcc/config/rs6000/e500crtsavg64gprctr.S
new file mode 100644
index 00000000000..238df4e8319
--- /dev/null
+++ b/libgcc/config/rs6000/e500crtsavg64gprctr.S
@@ -0,0 +1,90 @@
+/*
+ * Special support for e500 eabi and SVR4
+ *
+ * Copyright (C) 2008, 2009, 2010 Free Software Foundation, Inc.
+ * Written by Nathan Froyd
+ *
+ * This file is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 3, or (at your option) any
+ * later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Under Section 7 of GPL version 3, you are granted additional
+ * permissions described in the GCC Runtime Library Exception, version
+ * 3.1, as published by the Free Software Foundation.
+ *
+ * You should have received a copy of the GNU General Public License and
+ * a copy of the GCC Runtime Library Exception along with this program;
+ * see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ * <http://www.gnu.org/licenses/>.
+ */
+
+ .section ".text"
+ #include "ppc-asm.h"
+
+#ifdef __SPE__
+
+/* Routines for saving 64-bit integer registers, called by the compiler. */
+/* "GOT" versions that load the address of the GOT into lr before returning. */
+
+HIDDEN_FUNC(_save64gpr_ctr_14_g) evstdd 14,0(11)
+ bdz _save64gpr_ctr_g_done
+HIDDEN_FUNC(_save64gpr_ctr_15_g) evstdd 15,8(11)
+ bdz _save64gpr_ctr_g_done
+HIDDEN_FUNC(_save64gpr_ctr_16_g) evstdd 16,16(11)
+ bdz _save64gpr_ctr_g_done
+HIDDEN_FUNC(_save64gpr_ctr_17_g) evstdd 17,24(11)
+ bdz _save64gpr_ctr_g_done
+HIDDEN_FUNC(_save64gpr_ctr_18_g) evstdd 18,32(11)
+ bdz _save64gpr_ctr_g_done
+HIDDEN_FUNC(_save64gpr_ctr_19_g) evstdd 19,40(11)
+ bdz _save64gpr_ctr_g_done
+HIDDEN_FUNC(_save64gpr_ctr_20_g) evstdd 20,48(11)
+ bdz _save64gpr_ctr_g_done
+HIDDEN_FUNC(_save64gpr_ctr_21_g) evstdd 21,56(11)
+ bdz _save64gpr_ctr_g_done
+HIDDEN_FUNC(_save64gpr_ctr_22_g) evstdd 22,64(11)
+ bdz _save64gpr_ctr_g_done
+HIDDEN_FUNC(_save64gpr_ctr_23_g) evstdd 23,72(11)
+ bdz _save64gpr_ctr_g_done
+HIDDEN_FUNC(_save64gpr_ctr_24_g) evstdd 24,80(11)
+ bdz _save64gpr_ctr_g_done
+HIDDEN_FUNC(_save64gpr_ctr_25_g) evstdd 25,88(11)
+ bdz _save64gpr_ctr_g_done
+HIDDEN_FUNC(_save64gpr_ctr_26_g) evstdd 26,96(11)
+ bdz _save64gpr_ctr_g_done
+HIDDEN_FUNC(_save64gpr_ctr_27_g) evstdd 27,104(11)
+ bdz _save64gpr_ctr_g_done
+HIDDEN_FUNC(_save64gpr_ctr_28_g) evstdd 28,112(11)
+ bdz _save64gpr_ctr_g_done
+HIDDEN_FUNC(_save64gpr_ctr_29_g) evstdd 29,120(11)
+ bdz _save64gpr_ctr_g_done
+HIDDEN_FUNC(_save64gpr_ctr_30_g) evstdd 30,128(11)
+ bdz _save64gpr_ctr_g_done
+HIDDEN_FUNC(_save64gpr_ctr_31_g) evstdd 31,136(11)
+_save64gpr_ctr_g_done: b _GLOBAL_OFFSET_TABLE_-4
+FUNC_END(_save64gpr_ctr_31_g)
+FUNC_END(_save64gpr_ctr_30_g)
+FUNC_END(_save64gpr_ctr_29_g)
+FUNC_END(_save64gpr_ctr_28_g)
+FUNC_END(_save64gpr_ctr_27_g)
+FUNC_END(_save64gpr_ctr_26_g)
+FUNC_END(_save64gpr_ctr_25_g)
+FUNC_END(_save64gpr_ctr_24_g)
+FUNC_END(_save64gpr_ctr_23_g)
+FUNC_END(_save64gpr_ctr_22_g)
+FUNC_END(_save64gpr_ctr_21_g)
+FUNC_END(_save64gpr_ctr_20_g)
+FUNC_END(_save64gpr_ctr_19_g)
+FUNC_END(_save64gpr_ctr_18_g)
+FUNC_END(_save64gpr_ctr_17_g)
+FUNC_END(_save64gpr_ctr_16_g)
+FUNC_END(_save64gpr_ctr_15_g)
+FUNC_END(_save64gpr_ctr_14_g)
+
+#endif
diff --git a/libgcc/config/rs6000/eabi-ci.S b/libgcc/config/rs6000/eabi-ci.S
new file mode 100644
index 00000000000..696f33d394f
--- /dev/null
+++ b/libgcc/config/rs6000/eabi-ci.S
@@ -0,0 +1,113 @@
+/* crti.s for eabi
+ Copyright (C) 1996, 2000, 2008, 2009 Free Software Foundation, Inc.
+ Written By Michael Meissner
+
+This file is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 3, or (at your option) any
+later version.
+
+This file is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+/* This file just supplies labeled starting points for the .got* and other
+ special sections. It is linked in first before other modules. */
+
+ .ident "GNU C crti.s"
+
+#include <ppc-asm.h>
+
+#ifndef __powerpc64__
+ .section ".got","aw"
+ .globl __GOT_START__
+ .type __GOT_START__,@object
+__GOT_START__:
+
+ .section ".got1","aw"
+ .globl __GOT1_START__
+ .type __GOT1_START__,@object
+__GOT1_START__:
+
+ .section ".got2","aw"
+ .globl __GOT2_START__
+ .type __GOT2_START__,@object
+__GOT2_START__:
+
+ .section ".fixup","aw"
+ .globl __FIXUP_START__
+ .type __FIXUP_START__,@object
+__FIXUP_START__:
+
+ .section ".ctors","aw"
+ .globl __CTOR_LIST__
+ .type __CTOR_LIST__,@object
+__CTOR_LIST__:
+
+ .section ".dtors","aw"
+ .globl __DTOR_LIST__
+ .type __DTOR_LIST__,@object
+__DTOR_LIST__:
+
+ .section ".sdata","aw"
+ .globl __SDATA_START__
+ .type __SDATA_START__,@object
+ .weak _SDA_BASE_
+ .type _SDA_BASE_,@object
+__SDATA_START__:
+_SDA_BASE_:
+
+ .section ".sbss","aw",@nobits
+ .globl __SBSS_START__
+ .type __SBSS_START__,@object
+__SBSS_START__:
+
+ .section ".sdata2","a"
+ .weak _SDA2_BASE_
+ .type _SDA2_BASE_,@object
+ .globl __SDATA2_START__
+ .type __SDATA2_START__,@object
+__SDATA2_START__:
+_SDA2_BASE_:
+
+ .section ".sbss2","a"
+ .globl __SBSS2_START__
+ .type __SBSS2_START__,@object
+__SBSS2_START__:
+
+ .section ".gcc_except_table","aw"
+ .globl __EXCEPT_START__
+ .type __EXCEPT_START__,@object
+__EXCEPT_START__:
+
+ .section ".eh_frame","aw"
+ .globl __EH_FRAME_BEGIN__
+ .type __EH_FRAME_BEGIN__,@object
+__EH_FRAME_BEGIN__:
+
+/* Head of __init function used for static constructors. */
+ .section ".init","ax"
+ .align 2
+FUNC_START(__init)
+ stwu 1,-16(1)
+ mflr 0
+ stw 0,20(1)
+
+/* Head of __fini function used for static destructors. */
+ .section ".fini","ax"
+ .align 2
+FUNC_START(__fini)
+ stwu 1,-16(1)
+ mflr 0
+ stw 0,20(1)
+#endif
diff --git a/libgcc/config/rs6000/eabi-cn.S b/libgcc/config/rs6000/eabi-cn.S
new file mode 100644
index 00000000000..68774097c7c
--- /dev/null
+++ b/libgcc/config/rs6000/eabi-cn.S
@@ -0,0 +1,104 @@
+/* crtn.s for eabi
+ Copyright (C) 1996, 2000, 2007, 2008, 2009 Free Software Foundation, Inc.
+ Written By Michael Meissner
+
+This file is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 3, or (at your option) any
+later version.
+
+This file is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+/* This file just supplies labeled ending points for the .got* and other
+ special sections. It is linked in last after other modules. */
+
+ .ident "GNU C crtn.s"
+
+#ifndef __powerpc64__
+ .section ".got","aw"
+ .globl __GOT_END__
+ .type __GOT_END__,@object
+__GOT_END__:
+
+ .section ".got1","aw"
+ .globl __GOT1_END__
+ .type __GOT1_END__,@object
+__GOT1_END__:
+
+ .section ".got2","aw"
+ .globl __GOT2_END__
+ .type __GOT2_END__,@object
+__GOT2_END__:
+
+ .section ".fixup","aw"
+ .globl __FIXUP_END__
+ .type __FIXUP_END__,@object
+__FIXUP_END__:
+
+ .section ".ctors","aw"
+ .globl __CTOR_END__
+ .type __CTOR_END__,@object
+__CTOR_END__:
+
+ .section ".dtors","aw"
+ .weak __DTOR_END__
+ .type __DTOR_END__,@object
+__DTOR_END__:
+
+ .section ".sdata","aw"
+ .globl __SDATA_END__
+ .type __SDATA_END__,@object
+__SDATA_END__:
+
+ .section ".sbss","aw",@nobits
+ .globl __SBSS_END__
+ .type __SBSS_END__,@object
+__SBSS_END__:
+
+ .section ".sdata2","a"
+ .globl __SDATA2_END__
+ .type __SDATA2_END__,@object
+__SDATA2_END__:
+
+ .section ".sbss2","a"
+ .globl __SBSS2_END__
+ .type __SBSS2_END__,@object
+__SBSS2_END__:
+
+ .section ".gcc_except_table","aw"
+ .globl __EXCEPT_END__
+ .type __EXCEPT_END__,@object
+__EXCEPT_END__:
+
+ .section ".eh_frame","aw"
+ .globl __EH_FRAME_END__
+ .type __EH_FRAME_END__,@object
+__EH_FRAME_END__:
+ .long 0
+
+/* Tail of __init function used for static constructors. */
+ .section ".init","ax"
+ lwz 0,20(1)
+ mtlr 0
+ addi 1,1,16
+ blr
+
+/* Tail of __fini function used for static destructors. */
+ .section ".fini","ax"
+ lwz 0,20(1)
+ mtlr 0
+ addi 1,1,16
+ blr
+#endif
diff --git a/libgcc/config/rs6000/eabi.S b/libgcc/config/rs6000/eabi.S
new file mode 100644
index 00000000000..292d88e5016
--- /dev/null
+++ b/libgcc/config/rs6000/eabi.S
@@ -0,0 +1,289 @@
+/*
+ * Special support for eabi and SVR4
+ *
+ * Copyright (C) 1995, 1996, 1998, 2000, 2001, 2008, 2009
+ * Free Software Foundation, Inc.
+ * Written By Michael Meissner
+ *
+ * This file is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 3, or (at your option) any
+ * later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Under Section 7 of GPL version 3, you are granted additional
+ * permissions described in the GCC Runtime Library Exception, version
+ * 3.1, as published by the Free Software Foundation.
+ *
+ * You should have received a copy of the GNU General Public License and
+ * a copy of the GCC Runtime Library Exception along with this program;
+ * see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ * <http://www.gnu.org/licenses/>.
+ */
+
+/* Do any initializations needed for the eabi environment */
+
+ .section ".text"
+ #include "ppc-asm.h"
+
+#ifndef __powerpc64__
+
+ .section ".got2","aw"
+ .align 2
+.LCTOC1 = . /* +32768 */
+
+/* Table of addresses */
+.Ltable = .-.LCTOC1
+ .long .LCTOC1 /* address we are really at */
+
+.Lsda = .-.LCTOC1
+ .long _SDA_BASE_ /* address of the first small data area */
+
+.Lsdas = .-.LCTOC1
+ .long __SDATA_START__ /* start of .sdata/.sbss section */
+
+.Lsdae = .-.LCTOC1
+ .long __SBSS_END__ /* end of .sdata/.sbss section */
+
+.Lsda2 = .-.LCTOC1
+ .long _SDA2_BASE_ /* address of the second small data area */
+
+.Lsda2s = .-.LCTOC1
+ .long __SDATA2_START__ /* start of .sdata2/.sbss2 section */
+
+.Lsda2e = .-.LCTOC1
+ .long __SBSS2_END__ /* end of .sdata2/.sbss2 section */
+
+#ifdef _RELOCATABLE
+.Lgots = .-.LCTOC1
+ .long __GOT_START__ /* Global offset table start */
+
+.Lgotm1 = .-.LCTOC1
+ .long _GLOBAL_OFFSET_TABLE_-4 /* end of GOT ptrs before BLCL + 3 reserved words */
+
+.Lgotm2 = .-.LCTOC1
+ .long _GLOBAL_OFFSET_TABLE_+12 /* start of GOT ptrs after BLCL + 3 reserved words */
+
+.Lgote = .-.LCTOC1
+ .long __GOT_END__ /* Global offset table end */
+
+.Lgot2s = .-.LCTOC1
+ .long __GOT2_START__ /* -mrelocatable GOT pointers start */
+
+.Lgot2e = .-.LCTOC1
+ .long __GOT2_END__ /* -mrelocatable GOT pointers end */
+
+.Lfixups = .-.LCTOC1
+ .long __FIXUP_START__ /* start of .fixup section */
+
+.Lfixupe = .-.LCTOC1
+ .long __FIXUP_END__ /* end of .fixup section */
+
+.Lctors = .-.LCTOC1
+ .long __CTOR_LIST__ /* start of .ctor section */
+
+.Lctore = .-.LCTOC1
+ .long __CTOR_END__ /* end of .ctor section */
+
+.Ldtors = .-.LCTOC1
+ .long __DTOR_LIST__ /* start of .dtor section */
+
+.Ldtore = .-.LCTOC1
+ .long __DTOR_END__ /* end of .dtor section */
+
+.Lexcepts = .-.LCTOC1
+ .long __EXCEPT_START__ /* start of .gcc_except_table section */
+
+.Lexcepte = .-.LCTOC1
+ .long __EXCEPT_END__ /* end of .gcc_except_table section */
+
+.Linit = .-.LCTOC1
+ .long .Linit_p /* address of variable to say we've been called */
+
+ .text
+ .align 2
+.Lptr:
+ .long .LCTOC1-.Laddr /* PC relative pointer to .got2 */
+#endif
+
+ .data
+ .align 2
+.Linit_p:
+ .long 0
+
+ .text
+
+FUNC_START(__eabi)
+
+/* Eliminate -mrelocatable code if not -mrelocatable, so that this file can
+ be assembled with other assemblers than GAS. */
+
+#ifndef _RELOCATABLE
+ addis 10,0,.Linit_p@ha /* init flag */
+ addis 11,0,.LCTOC1@ha /* load address of .LCTOC1 */
+ lwz 9,.Linit_p@l(10) /* init flag */
+ addi 11,11,.LCTOC1@l
+ cmplwi 2,9,0 /* init flag != 0? */
+ bnelr 2 /* return now, if we've been called already */
+ stw 1,.Linit_p@l(10) /* store a nonzero value in the done flag */
+
+#else /* -mrelocatable */
+ mflr 0
+ bl .Laddr /* get current address */
+.Laddr:
+ mflr 12 /* real address of .Laddr */
+ lwz 11,(.Lptr-.Laddr)(12) /* linker generated address of .LCTOC1 */
+ add 11,11,12 /* correct to real pointer */
+ lwz 12,.Ltable(11) /* get linker's idea of where .Laddr is */
+ lwz 10,.Linit(11) /* address of init flag */
+ subf. 12,12,11 /* calculate difference */
+ lwzx 9,10,12 /* done flag */
+ cmplwi 2,9,0 /* init flag != 0? */
+ mtlr 0 /* restore in case branch was taken */
+ bnelr 2 /* return now, if we've been called already */
+ stwx 1,10,12 /* store a nonzero value in the done flag */
+ beq+ 0,.Lsdata /* skip if we don't need to relocate */
+
+/* We need to relocate the .got2 pointers. */
+
+ lwz 3,.Lgot2s(11) /* GOT2 pointers start */
+ lwz 4,.Lgot2e(11) /* GOT2 pointers end */
+ add 3,12,3 /* adjust pointers */
+ add 4,12,4
+ bl FUNC_NAME(__eabi_convert) /* convert pointers in .got2 section */
+
+/* Fixup the .ctor section for static constructors */
+
+ lwz 3,.Lctors(11) /* constructors pointers start */
+ lwz 4,.Lctore(11) /* constructors pointers end */
+ bl FUNC_NAME(__eabi_convert) /* convert constructors */
+
+/* Fixup the .dtor section for static destructors */
+
+ lwz 3,.Ldtors(11) /* destructors pointers start */
+ lwz 4,.Ldtore(11) /* destructors pointers end */
+ bl FUNC_NAME(__eabi_convert) /* convert destructors */
+
+/* Fixup the .gcc_except_table section for G++ exceptions */
+
+ lwz 3,.Lexcepts(11) /* exception table pointers start */
+ lwz 4,.Lexcepte(11) /* exception table pointers end */
+ bl FUNC_NAME(__eabi_convert) /* convert exceptions */
+
+/* Fixup the addresses in the GOT below _GLOBAL_OFFSET_TABLE_-4 */
+
+ lwz 3,.Lgots(11) /* GOT table pointers start */
+ lwz 4,.Lgotm1(11) /* GOT table pointers below _GLOBAL_OFFSET_TABLE-4 */
+ bl FUNC_NAME(__eabi_convert) /* convert lower GOT */
+
+/* Fixup the addresses in the GOT above _GLOBAL_OFFSET_TABLE_+12 */
+
+ lwz 3,.Lgotm2(11) /* GOT table pointers above _GLOBAL_OFFSET_TABLE+12 */
+ lwz 4,.Lgote(11) /* GOT table pointers end */
+ bl FUNC_NAME(__eabi_convert) /* convert lower GOT */
+
+/* Fixup any user initialized pointers now (the compiler drops pointers to */
+/* each of the relocs that it does in the .fixup section). */
+
+.Lfix:
+ lwz 3,.Lfixups(11) /* fixup pointers start */
+ lwz 4,.Lfixupe(11) /* fixup pointers end */
+ bl FUNC_NAME(__eabi_uconvert) /* convert user initialized pointers */
+
+.Lsdata:
+ mtlr 0 /* restore link register */
+#endif /* _RELOCATABLE */
+
+/* Only load up register 13 if there is a .sdata and/or .sbss section */
+ lwz 3,.Lsdas(11) /* start of .sdata/.sbss section */
+ lwz 4,.Lsdae(11) /* end of .sdata/.sbss section */
+ cmpw 1,3,4 /* .sdata/.sbss section non-empty? */
+ beq- 1,.Lsda2l /* skip loading r13 */
+
+ lwz 13,.Lsda(11) /* load r13 with _SDA_BASE_ address */
+
+/* Only load up register 2 if there is a .sdata2 and/or .sbss2 section */
+
+.Lsda2l:
+ lwz 3,.Lsda2s(11) /* start of .sdata/.sbss section */
+ lwz 4,.Lsda2e(11) /* end of .sdata/.sbss section */
+ cmpw 1,3,4 /* .sdata/.sbss section non-empty? */
+ beq+ 1,.Ldone /* skip loading r2 */
+
+ lwz 2,.Lsda2(11) /* load r2 with _SDA2_BASE_ address */
+
+/* Done adjusting pointers, return by way of doing the C++ global constructors. */
+
+.Ldone:
+ b FUNC_NAME(__init) /* do any C++ global constructors (which returns to caller) */
+FUNC_END(__eabi)
+
+/* Special subroutine to convert a bunch of pointers directly.
+ r0 has original link register
+ r3 has low pointer to convert
+ r4 has high pointer to convert
+ r5 .. r10 are scratch registers
+ r11 has the address of .LCTOC1 in it.
+ r12 has the value to add to each pointer
+ r13 .. r31 are unchanged */
+#ifdef _RELOCATABLE
+FUNC_START(__eabi_convert)
+ cmplw 1,3,4 /* any pointers to convert? */
+ subf 5,3,4 /* calculate number of words to convert */
+ bclr 4,4 /* return if no pointers */
+
+ srawi 5,5,2
+ addi 3,3,-4 /* start-4 for use with lwzu */
+ mtctr 5
+
+.Lcvt:
+ lwzu 6,4(3) /* pointer to convert */
+ cmpwi 0,6,0
+ beq- .Lcvt2 /* if pointer is null, don't convert */
+
+ add 6,6,12 /* convert pointer */
+ stw 6,0(3)
+.Lcvt2:
+ bdnz+ .Lcvt
+ blr
+
+FUNC_END(__eabi_convert)
+
+/* Special subroutine to convert the pointers the user has initialized. The
+ compiler has placed the address of the initialized pointer into the .fixup
+ section.
+
+ r0 has original link register
+ r3 has low pointer to convert
+ r4 has high pointer to convert
+ r5 .. r10 are scratch registers
+ r11 has the address of .LCTOC1 in it.
+ r12 has the value to add to each pointer
+ r13 .. r31 are unchanged */
+
+FUNC_START(__eabi_uconvert)
+ cmplw 1,3,4 /* any pointers to convert? */
+ subf 5,3,4 /* calculate number of words to convert */
+ bclr 4,4 /* return if no pointers */
+
+ srawi 5,5,2
+ addi 3,3,-4 /* start-4 for use with lwzu */
+ mtctr 5
+
+.Lucvt:
+ lwzu 6,4(3) /* next pointer to pointer to convert */
+ add 6,6,12 /* adjust pointer */
+ lwz 7,0(6) /* get the pointer it points to */
+ stw 6,0(3) /* store adjusted pointer */
+ add 7,7,12 /* adjust */
+ stw 7,0(6)
+ bdnz+ .Lucvt
+ blr
+
+FUNC_END(__eabi_uconvert)
+#endif
+#endif
diff --git a/libgcc/config/rs6000/gthr-aix.h b/libgcc/config/rs6000/gthr-aix.h
new file mode 100644
index 00000000000..3681af4257e
--- /dev/null
+++ b/libgcc/config/rs6000/gthr-aix.h
@@ -0,0 +1,35 @@
+/* Threads compatibility routines for libgcc2 and libobjc. */
+/* Compile this one with gcc. */
+/* Copyright (C) 2000, 2009, 2011 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_GTHR_AIX_H
+#define GCC_GTHR_AIX_H
+
+#ifdef _THREAD_SAFE
+#include "gthr-posix.h"
+#else
+#include "gthr-single.h"
+#endif
+
+#endif /* GCC_GTHR_AIX_H */
diff --git a/libgcc/config/rs6000/libgcc-darwin.10.4.ver b/libgcc/config/rs6000/libgcc-darwin.10.4.ver
new file mode 100644
index 00000000000..0c6f7c23156
--- /dev/null
+++ b/libgcc/config/rs6000/libgcc-darwin.10.4.ver
@@ -0,0 +1,93 @@
+# Copyright (C) 2005 Free Software Foundation, Inc.
+#
+# This file is part of GCC.
+#
+# GCC is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GCC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>.
+__Unwind_Backtrace
+__Unwind_DeleteException
+__Unwind_FindEnclosingFunction
+__Unwind_Find_FDE
+__Unwind_ForcedUnwind
+__Unwind_GetCFA
+__Unwind_GetDataRelBase
+__Unwind_GetGR
+__Unwind_GetIP
+__Unwind_GetLanguageSpecificData
+__Unwind_GetRegionStart
+__Unwind_GetTextRelBase
+__Unwind_RaiseException
+__Unwind_Resume
+__Unwind_Resume_or_Rethrow
+__Unwind_SetGR
+__Unwind_SetIP
+___absvdi2
+___absvsi2
+___addvdi3
+___addvsi3
+___ashldi3
+___ashrdi3
+___clear_cache
+___clzdi2
+___clzsi2
+___cmpdi2
+___ctzdi2
+___ctzsi2
+___deregister_frame
+___deregister_frame_info
+___deregister_frame_info_bases
+___divdi3
+___enable_execute_stack
+___ffsdi2
+___fixdfdi
+___fixsfdi
+___fixtfdi
+___fixunsdfdi
+___fixunsdfsi
+___fixunssfdi
+___fixunssfsi
+___fixunstfdi
+___floatdidf
+___floatdisf
+___floatditf
+___gcc_personality_v0
+___gcc_qadd
+___gcc_qdiv
+___gcc_qmul
+___gcc_qsub
+___lshrdi3
+___moddi3
+___muldi3
+___mulvdi3
+___mulvsi3
+___negdi2
+___negvdi2
+___negvsi2
+___paritydi2
+___paritysi2
+___popcountdi2
+___popcountsi2
+___register_frame
+___register_frame_info
+___register_frame_info_bases
+___register_frame_info_table
+___register_frame_info_table_bases
+___register_frame_table
+___subvdi3
+___subvsi3
+___trampoline_setup
+___ucmpdi2
+___udivdi3
+___udivmoddi4
+___umoddi3
diff --git a/libgcc/config/rs6000/libgcc-darwin.10.5.ver b/libgcc/config/rs6000/libgcc-darwin.10.5.ver
new file mode 100644
index 00000000000..c2f08924fd7
--- /dev/null
+++ b/libgcc/config/rs6000/libgcc-darwin.10.5.ver
@@ -0,0 +1,106 @@
+# Copyright (C) 2005, 2006 Free Software Foundation, Inc.
+#
+# This file is part of GCC.
+#
+# GCC is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GCC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>.
+__Unwind_Backtrace
+__Unwind_DeleteException
+__Unwind_FindEnclosingFunction
+__Unwind_Find_FDE
+__Unwind_ForcedUnwind
+__Unwind_GetCFA
+__Unwind_GetDataRelBase
+__Unwind_GetGR
+__Unwind_GetIP
+__Unwind_GetIPInfo
+__Unwind_GetLanguageSpecificData
+__Unwind_GetRegionStart
+__Unwind_GetTextRelBase
+__Unwind_RaiseException
+__Unwind_Resume
+__Unwind_Resume_or_Rethrow
+__Unwind_SetGR
+__Unwind_SetIP
+___absvdi2
+___absvsi2
+___addvdi3
+___addvsi3
+___ashldi3
+___ashrdi3
+___clear_cache
+___clzdi2
+___clzsi2
+___cmpdi2
+___ctzdi2
+___ctzsi2
+___deregister_frame
+___deregister_frame_info
+___deregister_frame_info_bases
+___divdc3
+___divdi3
+___divsc3
+___divtc3
+___enable_execute_stack
+___ffsdi2
+___fixdfdi
+___fixsfdi
+___fixtfdi
+___fixunsdfdi
+___fixunsdfsi
+___fixunssfdi
+___fixunssfsi
+___fixunstfdi
+___floatdidf
+___floatdisf
+___floatditf
+___floatundidf
+___floatundisf
+___floatunditf
+___gcc_personality_v0
+___gcc_qadd
+___gcc_qdiv
+___gcc_qmul
+___gcc_qsub
+___lshrdi3
+___moddi3
+___muldc3
+___muldi3
+___mulsc3
+___multc3
+___mulvdi3
+___mulvsi3
+___negdi2
+___negvdi2
+___negvsi2
+___paritydi2
+___paritysi2
+___popcountdi2
+___popcountsi2
+___powidf2
+___powisf2
+___powitf2
+___register_frame
+___register_frame_info
+___register_frame_info_bases
+___register_frame_info_table
+___register_frame_info_table_bases
+___register_frame_table
+___subvdi3
+___subvsi3
+___trampoline_setup
+___ucmpdi2
+___udivdi3
+___udivmoddi4
+___umoddi3
diff --git a/libgcc/config/rs6000/libgcc-ppc-glibc.ver b/libgcc/config/rs6000/libgcc-glibc.ver
index 8862c14cb3d..8862c14cb3d 100644
--- a/libgcc/config/rs6000/libgcc-ppc-glibc.ver
+++ b/libgcc/config/rs6000/libgcc-glibc.ver
diff --git a/libgcc/config/rs6000/libgcc-ppc64.ver b/libgcc/config/rs6000/libgcc-ibm-ldouble.ver
index b27b4b49249..b27b4b49249 100644
--- a/libgcc/config/rs6000/libgcc-ppc64.ver
+++ b/libgcc/config/rs6000/libgcc-ibm-ldouble.ver
diff --git a/libgcc/config/rs6000/sol-ci.S b/libgcc/config/rs6000/sol-ci.S
new file mode 100644
index 00000000000..7c2fbae9747
--- /dev/null
+++ b/libgcc/config/rs6000/sol-ci.S
@@ -0,0 +1,94 @@
+# crti.s for sysv4
+
+# Copyright (C) 1996, 2008, 2009 Free Software Foundation, Inc.
+# Written By Michael Meissner
+#
+# This file is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3, or (at your option) any
+# later version.
+#
+# This file is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# Under Section 7 of GPL version 3, you are granted additional
+# permissions described in the GCC Runtime Library Exception, version
+# 3.1, as published by the Free Software Foundation.
+#
+# You should have received a copy of the GNU General Public License and
+# a copy of the GCC Runtime Library Exception along with this program;
+# see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+# <http://www.gnu.org/licenses/>.
+
+# This file just supplies labeled starting points for the .got* and other
+# special sections. It is linked in first before other modules.
+
+ .ident "GNU C scrti.s"
+
+#ifndef __powerpc64__
+# Start of .text
+ .section ".text"
+ .globl _ex_text0
+_ex_text0:
+
+# Exception range
+ .section ".exception_ranges","aw"
+ .globl _ex_range0
+_ex_range0:
+
+# List of C++ constructors
+ .section ".ctors","aw"
+ .globl __CTOR_LIST__
+ .type __CTOR_LIST__,@object
+__CTOR_LIST__:
+
+# List of C++ destructors
+ .section ".dtors","aw"
+ .globl __DTOR_LIST__
+ .type __DTOR_LIST__,@object
+__DTOR_LIST__:
+
+# Head of _init function used for static constructors
+ .section ".init","ax"
+ .align 2
+ .globl _init
+ .type _init,@function
+_init: stwu %r1,-16(%r1)
+ mflr %r0
+ stw %r31,12(%r1)
+ stw %r0,16(%r1)
+
+ bl _GLOBAL_OFFSET_TABLE_-4 # get the GOT address
+ mflr %r31
+
+# lwz %r3,_ex_shared0@got(%r31)
+# lwz %r4,-8(%r3) # _ex_register or 0
+# cmpi %cr0,%r4,0
+# beq .Lno_reg
+# mtlr %r4
+# blrl
+#.Lno_reg:
+
+# Head of _fini function used for static destructors
+ .section ".fini","ax"
+ .align 2
+ .globl _fini
+ .type _fini,@function
+_fini: stwu %r1,-16(%r1)
+ mflr %r0
+ stw %r31,12(%r1)
+ stw %r0,16(%r1)
+
+ bl _GLOBAL_OFFSET_TABLE_-4 # get the GOT address
+ mflr %r31
+
+# _environ and its evil twin environ, pointing to the environment
+ .section ".sdata","aw"
+ .align 2
+ .globl _environ
+ .space 4
+ .weak environ
+ .set environ,_environ
+#endif
diff --git a/libgcc/config/rs6000/sol-cn.S b/libgcc/config/rs6000/sol-cn.S
new file mode 100644
index 00000000000..4aeacaf2cff
--- /dev/null
+++ b/libgcc/config/rs6000/sol-cn.S
@@ -0,0 +1,72 @@
+# crtn.s for sysv4
+
+# Copyright (C) 1996, 2007, 2008, 2009 Free Software Foundation, Inc.
+# Written By Michael Meissner
+#
+# This file is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3, or (at your option) any
+# later version.
+#
+# This file is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# Under Section 7 of GPL version 3, you are granted additional
+# permissions described in the GCC Runtime Library Exception, version
+# 3.1, as published by the Free Software Foundation.
+#
+# You should have received a copy of the GNU General Public License and
+# a copy of the GCC Runtime Library Exception along with this program;
+# see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+# <http://www.gnu.org/licenses/>.
+
+# This file just supplies labeled ending points for the .got* and other
+# special sections. It is linked in last after other modules.
+
+ .ident "GNU C scrtn.s"
+
+#ifndef __powerpc64__
+# Default versions of exception handling register/deregister
+ .weak _ex_register
+ .weak _ex_deregister
+ .set _ex_register,0
+ .set _ex_deregister,0
+
+# End list of C++ constructors
+ .section ".ctors","aw"
+ .globl __CTOR_END__
+ .type __CTOR_END__,@object
+__CTOR_END__:
+
+# End list of C++ destructors
+ .section ".dtors","aw"
+ .weak __DTOR_END__
+ .type __DTOR_END__,@object
+__DTOR_END__:
+
+ .section ".text"
+ .globl _ex_text1
+_ex_text1:
+
+ .section ".exception_ranges","aw"
+ .globl _ex_range1
+_ex_range1:
+
+# Tail of _init used for static constructors
+ .section ".init","ax"
+ lwz %r0,16(%r1)
+ lwz %r31,12(%r1)
+ mtlr %r0
+ addi %r1,%r1,16
+ blr
+
+# Tail of _fini used for static destructors
+ .section ".fini","ax"
+ lwz %r0,16(%r1)
+ lwz %r31,12(%r1)
+ mtlr %r0
+ addi %r1,%r1,16
+ blr
+#endif
diff --git a/libgcc/config/rs6000/t-crtstuff b/libgcc/config/rs6000/t-crtstuff
new file mode 100644
index 00000000000..7422d383754
--- /dev/null
+++ b/libgcc/config/rs6000/t-crtstuff
@@ -0,0 +1,3 @@
+# If .sdata is enabled __CTOR_{LIST,END}__ go into .sdata instead of
+# .ctors.
+CRTSTUFF_T_CFLAGS = -msdata=none
diff --git a/libgcc/config/rs6000/t-darwin b/libgcc/config/rs6000/t-darwin
index 4464fd7d6e6..abb41fc9bce 100644
--- a/libgcc/config/rs6000/t-darwin
+++ b/libgcc/config/rs6000/t-darwin
@@ -3,8 +3,21 @@ DARWIN_EXTRA_CRT_BUILD_CFLAGS = -mlongcall -mmacosx-version-min=10.4
crt2.o: $(srcdir)/config/rs6000/darwin-crt2.c
$(crt_compile) $(DARWIN_EXTRA_CRT_BUILD_CFLAGS) -c $<
-LIB2ADD += $(srcdir)/config/rs6000/ppc64-fp.c
+LIB2ADD = $(srcdir)/config/rs6000/darwin-tramp.S \
+ $(srcdir)/config/darwin-64.c \
+ $(srcdir)/config/rs6000/darwin-fpsave.S \
+ $(srcdir)/config/rs6000/darwin-gpsave.S \
+ $(srcdir)/config/rs6000/darwin-world.S \
+ $(srcdir)/config/rs6000/ppc64-fp.c
-LIB2ADDEH += $(srcdir)/config/rs6000/darwin-fallback.c
+LIB2ADD_ST = \
+ $(srcdir)/config/rs6000/darwin-vecsave.S
+
+# The .S files above are designed to run on all processors, even though
+# they use AltiVec instructions.
+# -Wa is used because -force_cpusubtype_ALL doesn't work with -dynamiclib.
+# -mmacosx-version-min=10.4 is used to provide compatibility for code from
+# earlier OSX versions.
+HOST_LIBGCC2_CFLAGS += -Wa,-force_cpusubtype_ALL -mmacosx-version-min=10.4
-SHLIB_VERPFX = $(gcc_srcdir)/config/rs6000/darwin-libgcc
+LIB2ADDEH += $(srcdir)/config/rs6000/darwin-fallback.c
diff --git a/libgcc/config/rs6000/t-darwin64 b/libgcc/config/rs6000/t-darwin64
new file mode 100644
index 00000000000..eea0671f3b5
--- /dev/null
+++ b/libgcc/config/rs6000/t-darwin64
@@ -0,0 +1,6 @@
+LIB2_SIDITI_CONV_FUNCS = yes
+
+LIB2ADD = $(srcdir)/config/rs6000/darwin-tramp.S \
+ $(srcdir)/config/darwin-64.c \
+ $(srcdir)/config/rs6000/darwin-world.S
+
diff --git a/libgcc/config/rs6000/t-ibm-ldouble b/libgcc/config/rs6000/t-ibm-ldouble
index 3f7a2d847e2..b13278498ec 100644
--- a/libgcc/config/rs6000/t-ibm-ldouble
+++ b/libgcc/config/rs6000/t-ibm-ldouble
@@ -3,4 +3,4 @@ LIB2ADD += $(srcdir)/config/rs6000/ibm-ldouble.c
HOST_LIBGCC2_CFLAGS += -mlong-double-128
-SHLIB_MAPFILES += $(srcdir)/config/rs6000/libgcc-ppc64.ver
+SHLIB_MAPFILES += $(srcdir)/config/rs6000/libgcc-ibm-ldouble.ver
diff --git a/libgcc/config/rs6000/t-ldbl128 b/libgcc/config/rs6000/t-ldbl128
deleted file mode 100644
index ecc3581b1a0..00000000000
--- a/libgcc/config/rs6000/t-ldbl128
+++ /dev/null
@@ -1,3 +0,0 @@
-SHLIB_MAPFILES += $(srcdir)/config/rs6000/libgcc-ppc-glibc.ver
-
-HOST_LIBGCC2_CFLAGS += -mlong-double-128
diff --git a/libgcc/config/rs6000/t-linux b/libgcc/config/rs6000/t-linux
new file mode 100644
index 00000000000..dcf7ffc255e
--- /dev/null
+++ b/libgcc/config/rs6000/t-linux
@@ -0,0 +1,3 @@
+SHLIB_MAPFILES += $(srcdir)/config/rs6000/libgcc-glibc.ver
+
+HOST_LIBGCC2_CFLAGS += -mlong-double-128
diff --git a/libgcc/config/rs6000/t-linux64 b/libgcc/config/rs6000/t-linux64
index 7b08315abc0..2b60f1a1f43 100644
--- a/libgcc/config/rs6000/t-linux64
+++ b/libgcc/config/rs6000/t-linux64
@@ -1,2 +1,4 @@
+HOST_LIBGCC2_CFLAGS += -mno-minimal-toc
+
softfp_wrap_start := '\#ifndef __powerpc64__'
softfp_wrap_end := '\#endif'
diff --git a/libgcc/config/rs6000/t-lynx b/libgcc/config/rs6000/t-lynx
new file mode 100644
index 00000000000..af7f5982b9c
--- /dev/null
+++ b/libgcc/config/rs6000/t-lynx
@@ -0,0 +1 @@
+LIB2ADD = $(srcdir)/config/rs6000/tramp.S
diff --git a/libgcc/config/rs6000/t-netbsd b/libgcc/config/rs6000/t-netbsd
new file mode 100644
index 00000000000..3b4ba32a215
--- /dev/null
+++ b/libgcc/config/rs6000/t-netbsd
@@ -0,0 +1,9 @@
+LIB2ADD = $(srcdir)/config/rs6000/tramp.S
+
+LIB2ADD_ST = \
+ $(srcdir)/config/rs6000/crtsavfpr.S \
+ $(srcdir)/config/rs6000/crtresfpr.S \
+ $(srcdir)/config/rs6000/crtsavgpr.S \
+ $(srcdir)/config/rs6000/crtresgpr.S \
+ $(srcdir)/config/rs6000/crtresxfpr.S \
+ $(srcdir)/config/rs6000/crtresxgpr.S
diff --git a/libgcc/config/rs6000/t-ppccomm b/libgcc/config/rs6000/t-ppccomm
index f75bee22737..e9233688268 100644
--- a/libgcc/config/rs6000/t-ppccomm
+++ b/libgcc/config/rs6000/t-ppccomm
@@ -1,107 +1,41 @@
-LIB2ADD += $(srcdir)/config/rs6000/ibm-ldouble.c
-
-LIB2ADD_ST += crtsavfpr.S crtresfpr.S \
- crtsavgpr.S crtresgpr.S \
- crtresxfpr.S crtresxgpr.S \
- e500crtres32gpr.S \
- e500crtres64gpr.S \
- e500crtres64gprctr.S \
- e500crtrest32gpr.S \
- e500crtrest64gpr.S \
- e500crtresx32gpr.S \
- e500crtresx64gpr.S \
- e500crtsav32gpr.S \
- e500crtsav64gpr.S \
- e500crtsav64gprctr.S \
- e500crtsavg32gpr.S \
- e500crtsavg64gpr.S \
- e500crtsavg64gprctr.S
-
-EXTRA_PARTS += crtbegin$(objext) crtend$(objext) \
- crtbeginS$(objext) crtendS$(objext) crtbeginT$(objext) \
- ecrti$(objext) ecrtn$(objext) ncrti$(objext) ncrtn$(objext)
+LIB2ADD += $(srcdir)/config/rs6000/ibm-ldouble.c \
+ $(srcdir)/config/rs6000/tramp.S
+
+# These can't end up in shared libgcc
+LIB2ADD_ST += \
+ $(srcdir)/config/rs6000/crtsavfpr.S \
+ $(srcdir)/config/rs6000/crtresfpr.S \
+ $(srcdir)/config/rs6000/crtsavgpr.S \
+ $(srcdir)/config/rs6000/crtresgpr.S \
+ $(srcdir)/config/rs6000/crtresxfpr.S \
+ $(srcdir)/config/rs6000/crtresxgpr.S \
+ $(srcdir)/config/rs6000/e500crtres32gpr.S \
+ $(srcdir)/config/rs6000/e500crtres64gpr.S \
+ $(srcdir)/config/rs6000/e500crtres64gprctr.S \
+ $(srcdir)/config/rs6000/e500crtrest32gpr.S \
+ $(srcdir)/config/rs6000/e500crtrest64gpr.S \
+ $(srcdir)/config/rs6000/e500crtresx32gpr.S \
+ $(srcdir)/config/rs6000/e500crtresx64gpr.S \
+ $(srcdir)/config/rs6000/e500crtsav32gpr.S \
+ $(srcdir)/config/rs6000/e500crtsav64gpr.S \
+ $(srcdir)/config/rs6000/e500crtsav64gprctr.S \
+ $(srcdir)/config/rs6000/e500crtsavg32gpr.S \
+ $(srcdir)/config/rs6000/e500crtsavg64gpr.S \
+ $(srcdir)/config/rs6000/e500crtsavg64gprctr.S \
+ $(srcdir)/config/rs6000/eabi.S
# We build {e,n}crti.o and {e,n}crtn.o, which serve to add begin and
# end labels to all of the special sections used when we link using gcc.
# Assemble startup files.
-ecrti.S: $(gcc_srcdir)/config/rs6000/eabi-ci.asm
- cat $(gcc_srcdir)/config/rs6000/eabi-ci.asm >ecrti.S
-
-ecrtn.S: $(gcc_srcdir)/config/rs6000/eabi-cn.asm
- cat $(gcc_srcdir)/config/rs6000/eabi-cn.asm >ecrtn.S
-
-ncrti.S: $(gcc_srcdir)/config/rs6000/sol-ci.asm
- cat $(gcc_srcdir)/config/rs6000/sol-ci.asm >ncrti.S
-
-ncrtn.S: $(gcc_srcdir)/config/rs6000/sol-cn.asm
- cat $(gcc_srcdir)/config/rs6000/sol-cn.asm >ncrtn.S
-
-crtsavfpr.S: $(gcc_srcdir)/config/rs6000/crtsavfpr.asm
- cat $(gcc_srcdir)/config/rs6000/crtsavfpr.asm >crtsavfpr.S
-
-crtresfpr.S: $(gcc_srcdir)/config/rs6000/crtresfpr.asm
- cat $(gcc_srcdir)/config/rs6000/crtresfpr.asm >crtresfpr.S
-
-crtsavgpr.S: $(gcc_srcdir)/config/rs6000/crtsavgpr.asm
- cat $(gcc_srcdir)/config/rs6000/crtsavgpr.asm >crtsavgpr.S
-
-crtresgpr.S: $(gcc_srcdir)/config/rs6000/crtresgpr.asm
- cat $(gcc_srcdir)/config/rs6000/crtresgpr.asm >crtresgpr.S
-
-crtresxfpr.S: $(gcc_srcdir)/config/rs6000/crtresxfpr.asm
- cat $(gcc_srcdir)/config/rs6000/crtresxfpr.asm >crtresxfpr.S
-
-crtresxgpr.S: $(gcc_srcdir)/config/rs6000/crtresxgpr.asm
- cat $(gcc_srcdir)/config/rs6000/crtresxgpr.asm >crtresxgpr.S
-
-e500crtres32gpr.S: $(gcc_srcdir)/config/rs6000/e500crtres32gpr.asm
- cat $(gcc_srcdir)/config/rs6000/e500crtres32gpr.asm >e500crtres32gpr.S
-
-e500crtres64gpr.S: $(gcc_srcdir)/config/rs6000/e500crtres64gpr.asm
- cat $(gcc_srcdir)/config/rs6000/e500crtres64gpr.asm >e500crtres64gpr.S
-
-e500crtres64gprctr.S: $(gcc_srcdir)/config/rs6000/e500crtres64gprctr.asm
- cat $(gcc_srcdir)/config/rs6000/e500crtres64gprctr.asm >e500crtres64gprctr.S
-
-e500crtrest32gpr.S: $(gcc_srcdir)/config/rs6000/e500crtrest32gpr.asm
- cat $(gcc_srcdir)/config/rs6000/e500crtrest32gpr.asm >e500crtrest32gpr.S
-
-e500crtrest64gpr.S: $(gcc_srcdir)/config/rs6000/e500crtrest64gpr.asm
- cat $(gcc_srcdir)/config/rs6000/e500crtrest64gpr.asm >e500crtrest64gpr.S
-
-e500crtresx32gpr.S: $(gcc_srcdir)/config/rs6000/e500crtresx32gpr.asm
- cat $(gcc_srcdir)/config/rs6000/e500crtresx32gpr.asm >e500crtresx32gpr.S
-
-e500crtresx64gpr.S: $(gcc_srcdir)/config/rs6000/e500crtresx64gpr.asm
- cat $(gcc_srcdir)/config/rs6000/e500crtresx64gpr.asm >e500crtresx64gpr.S
-
-e500crtsav32gpr.S: $(gcc_srcdir)/config/rs6000/e500crtsav32gpr.asm
- cat $(gcc_srcdir)/config/rs6000/e500crtsav32gpr.asm >e500crtsav32gpr.S
-
-e500crtsav64gpr.S: $(gcc_srcdir)/config/rs6000/e500crtsav64gpr.asm
- cat $(gcc_srcdir)/config/rs6000/e500crtsav64gpr.asm >e500crtsav64gpr.S
-
-e500crtsav64gprctr.S: $(gcc_srcdir)/config/rs6000/e500crtsav64gprctr.asm
- cat $(gcc_srcdir)/config/rs6000/e500crtsav64gprctr.asm >e500crtsav64gprctr.S
-
-e500crtsavg32gpr.S: $(gcc_srcdir)/config/rs6000/e500crtsavg32gpr.asm
- cat $(gcc_srcdir)/config/rs6000/e500crtsavg32gpr.asm >e500crtsavg32gpr.S
-
-e500crtsavg64gpr.S: $(gcc_srcdir)/config/rs6000/e500crtsavg64gpr.asm
- cat $(gcc_srcdir)/config/rs6000/e500crtsavg64gpr.asm >e500crtsavg64gpr.S
-
-e500crtsavg64gprctr.S: $(gcc_srcdir)/config/rs6000/e500crtsavg64gprctr.asm
- cat $(gcc_srcdir)/config/rs6000/e500crtsavg64gprctr.asm >e500crtsavg64gprctr.S
-
-ecrti$(objext): ecrti.S
- $(crt_compile) -c ecrti.S
+ecrti$(objext): $(srcdir)/config/rs6000/eabi-ci.S
+ $(crt_compile) -c $<
-ecrtn$(objext): ecrtn.S
- $(crt_compile) -c ecrtn.S
+ecrtn$(objext): $(srcdir)/config/rs6000/eabi-cn.S
+ $(crt_compile) -c $<
-ncrti$(objext): ncrti.S
- $(crt_compile) -c ncrti.S
+ncrti$(objext): $(srcdir)/config/rs6000/sol-ci.S
+ $(crt_compile) -c $<
-ncrtn$(objext): ncrtn.S
- $(crt_compile) -c ncrtn.S
+ncrtn$(objext): $(srcdir)/config/rs6000/sol-cn.S
+ $(crt_compile) -c $<
diff --git a/libgcc/config/rs6000/t-slibgcc-aix b/libgcc/config/rs6000/t-slibgcc-aix
new file mode 100644
index 00000000000..a0fdd133926
--- /dev/null
+++ b/libgcc/config/rs6000/t-slibgcc-aix
@@ -0,0 +1,44 @@
+# Copyright (C) 2002, 2003, 2004, 2005, 2006,
+# 2008, 2011 Free Software Foundation, Inc.
+#
+# This file is part of GCC.
+#
+# GCC is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GCC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>.
+
+# Build a shared libgcc library.
+SHLIB_EXT = .a
+SHLIB_LINK = $(CC) $(LIBGCC2_CFLAGS) -shared -nodefaultlibs \
+ -Wl,-bE:@shlib_map_file@ -o @multilib_dir@/shr.o \
+ @multilib_flags@ @shlib_objs@ -lc \
+ `case @multilib_dir@ in \
+ *pthread*) echo -L$(TARGET_SYSTEM_ROOT)/usr/lib/threads -lpthreads -lc_r $(TARGET_SYSTEM_ROOT)/usr/lib/libc.a ;; \
+ *) echo -lc ;; esac` ; \
+ rm -f @multilib_dir@/tmp-@shlib_base_name@.a ; \
+ $(AR_CREATE_FOR_TARGET) @multilib_dir@/tmp-@shlib_base_name@.a \
+ @multilib_dir@/shr.o ; \
+ mv @multilib_dir@/tmp-@shlib_base_name@.a \
+ @multilib_dir@/@shlib_base_name@.a ; \
+ rm -f @multilib_dir@/shr.o
+SHLIB_INSTALL = \
+ $(mkinstalldirs) $(DESTDIR)$(slibdir)@shlib_slibdir_qual@; \
+ $(INSTALL_DATA) @multilib_dir@/@shlib_base_name@.a \
+ $(DESTDIR)$(slibdir)@shlib_slibdir_qual@/
+SHLIB_LIBS = -lc `case @multilib_dir@ in *pthread*) echo -lpthread ;; esac`
+SHLIB_MKMAP = $(srcdir)/mkmap-flat.awk
+SHLIB_MAPFILES = libgcc-std.ver
+SHLIB_NM_FLAGS = -Bpg -X32_64
+
+# Either 32-bit and 64-bit objects in archives.
+AR_FLAGS_FOR_TARGET = -X32_64
diff --git a/libgcc/config/rs6000/tramp.S b/libgcc/config/rs6000/tramp.S
new file mode 100644
index 00000000000..133b98840f1
--- /dev/null
+++ b/libgcc/config/rs6000/tramp.S
@@ -0,0 +1,107 @@
+/* Special support for trampolines
+ *
+ * Copyright (C) 1996, 1997, 2000, 2007, 2008, 2009 Free Software Foundation, Inc.
+ * Written By Michael Meissner
+ *
+ * This file is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 3, or (at your option) any
+ * later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Under Section 7 of GPL version 3, you are granted additional
+ * permissions described in the GCC Runtime Library Exception, version
+ * 3.1, as published by the Free Software Foundation.
+ *
+ * You should have received a copy of the GNU General Public License and
+ * a copy of the GCC Runtime Library Exception along with this program;
+ * see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ * <http://www.gnu.org/licenses/>.
+ */
+
+/* Set up trampolines. */
+
+ .section ".text"
+#include "ppc-asm.h"
+#include "config.h"
+
+#ifndef __powerpc64__
+ .type trampoline_initial,@object
+ .align 2
+trampoline_initial:
+ mflr r0
+ bcl 20,31,1f
+.Lfunc = .-trampoline_initial
+ .long 0 /* will be replaced with function address */
+.Lchain = .-trampoline_initial
+ .long 0 /* will be replaced with static chain */
+1: mflr r11
+ mtlr r0
+ lwz r0,0(r11) /* function address */
+ lwz r11,4(r11) /* static chain */
+ mtctr r0
+ bctr
+
+trampoline_size = .-trampoline_initial
+ .size trampoline_initial,trampoline_size
+
+
+/* R3 = stack address to store trampoline */
+/* R4 = length of trampoline area */
+/* R5 = function address */
+/* R6 = static chain */
+
+FUNC_START(__trampoline_setup)
+ mflr r0 /* save return address */
+ bcl 20,31,.LCF0 /* load up __trampoline_initial into r7 */
+.LCF0:
+ mflr r11
+ addi r7,r11,trampoline_initial-4-.LCF0 /* trampoline address -4 */
+
+ li r8,trampoline_size /* verify that the trampoline is big enough */
+ cmpw cr1,r8,r4
+ srwi r4,r4,2 /* # words to move */
+ addi r9,r3,-4 /* adjust pointer for lwzu */
+ mtctr r4
+ blt cr1,.Labort
+
+ mtlr r0
+
+ /* Copy the instructions to the stack */
+.Lmove:
+ lwzu r10,4(r7)
+ stwu r10,4(r9)
+ bdnz .Lmove
+
+ /* Store correct function and static chain */
+ stw r5,.Lfunc(r3)
+ stw r6,.Lchain(r3)
+
+ /* Now flush both caches */
+ mtctr r4
+.Lcache:
+ icbi 0,r3
+ dcbf 0,r3
+ addi r3,r3,4
+ bdnz .Lcache
+
+ /* Finally synchronize things & return */
+ sync
+ isync
+ blr
+
+.Labort:
+#if (defined __PIC__ || defined __pic__) && defined HAVE_AS_REL16
+ bcl 20,31,1f
+1: mflr r30
+ addis r30,r30,_GLOBAL_OFFSET_TABLE_-1b@ha
+ addi r30,r30,_GLOBAL_OFFSET_TABLE_-1b@l
+#endif
+ bl JUMP_TARGET(abort)
+FUNC_END(__trampoline_setup)
+
+#endif
diff --git a/libgcc/config/s390/gthr-tpf.h b/libgcc/config/s390/gthr-tpf.h
new file mode 100644
index 00000000000..fb23e91cfcd
--- /dev/null
+++ b/libgcc/config/s390/gthr-tpf.h
@@ -0,0 +1,229 @@
+/* Threads compatibility routines for libgcc2 and libobjc.
+ Compile this one with gcc.
+ Copyright (C) 2004, 2005, 2008, 2009 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+/* TPF needs its own version of gthr-*.h because TPF always links to
+ the thread library. However, for performance reasons we still do not
+ want to issue thread api calls unless a check is made to see that we
+ are running as a thread. */
+
+#ifndef GCC_GTHR_TPF_H
+#define GCC_GTHR_TPF_H
+
+/* POSIX threads specific definitions.
+ Easy, since the interface is just one-to-one mapping. */
+
+#define __GTHREADS 1
+
+/* Some implementations of <pthread.h> require this to be defined. */
+#ifndef _REENTRANT
+#define _REENTRANT 1
+#endif
+
+#include <pthread.h>
+#include <unistd.h>
+
+typedef pthread_key_t __gthread_key_t;
+typedef pthread_once_t __gthread_once_t;
+typedef pthread_mutex_t __gthread_mutex_t;
+typedef pthread_mutex_t __gthread_recursive_mutex_t;
+
+#if defined(PTHREAD_RECURSIVE_MUTEX_INITIALIZER)
+#define __GTHREAD_RECURSIVE_MUTEX_INIT PTHREAD_RECURSIVE_MUTEX_INITIALIZER
+#elif defined(PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP)
+#define __GTHREAD_RECURSIVE_MUTEX_INIT PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP
+#endif
+
+#define __GTHREAD_MUTEX_INIT PTHREAD_MUTEX_INITIALIZER
+#define __GTHREAD_ONCE_INIT PTHREAD_ONCE_INIT
+#define __GTHREAD_RECURSIVE_MUTEX_INIT_FUNCTION __gthread_recursive_mutex_init_function
+
+#define NOTATHREAD 00
+#define ECBBASEPTR (unsigned long int) *(unsigned int *)0x00000514u
+#define ECBPG2PTR ECBBASEPTR + 0x1000
+#define CE2THRCPTR *((unsigned char *)(ECBPG2PTR + 16))
+#define __tpf_pthread_active() (CE2THRCPTR != NOTATHREAD)
+
+#if SUPPORTS_WEAK && GTHREAD_USE_WEAK
+# define __gthrw(name) \
+ static __typeof(name) __gthrw_ ## name __attribute__ ((__weakref__(#name)));
+# define __gthrw_(name) __gthrw_ ## name
+#else
+# define __gthrw(name)
+# define __gthrw_(name) name
+#endif
+
+__gthrw(pthread_once)
+__gthrw(pthread_key_create)
+__gthrw(pthread_key_delete)
+__gthrw(pthread_getspecific)
+__gthrw(pthread_setspecific)
+__gthrw(pthread_create)
+
+__gthrw(pthread_mutex_lock)
+__gthrw(pthread_mutex_trylock)
+__gthrw(pthread_mutex_unlock)
+__gthrw(pthread_mutexattr_init)
+__gthrw(pthread_mutexattr_settype)
+__gthrw(pthread_mutexattr_destroy)
+__gthrw(pthread_mutex_init)
+__gthrw(pthread_mutex_destroy)
+
+static inline int
+__gthread_active_p (void)
+{
+ return 1;
+}
+
+static inline int
+__gthread_once (__gthread_once_t *__once, void (*__func) (void))
+{
+ if (__tpf_pthread_active ())
+ return __gthrw_(pthread_once) (__once, __func);
+ else
+ return -1;
+}
+
+static inline int
+__gthread_key_create (__gthread_key_t *__key, void (*__dtor) (void *))
+{
+ if (__tpf_pthread_active ())
+ return __gthrw_(pthread_key_create) (__key, __dtor);
+ else
+ return -1;
+}
+
+static inline int
+__gthread_key_delete (__gthread_key_t __key)
+{
+ if (__tpf_pthread_active ())
+ return __gthrw_(pthread_key_delete) (__key);
+ else
+ return -1;
+}
+
+static inline void *
+__gthread_getspecific (__gthread_key_t __key)
+{
+ if (__tpf_pthread_active ())
+ return __gthrw_(pthread_getspecific) (__key);
+ else
+ return NULL;
+}
+
+static inline int
+__gthread_setspecific (__gthread_key_t __key, const void *__ptr)
+{
+ if (__tpf_pthread_active ())
+ return __gthrw_(pthread_setspecific) (__key, __ptr);
+ else
+ return -1;
+}
+
+static inline int
+__gthread_mutex_destroy (__gthread_mutex_t *__mutex)
+{
+ if (__tpf_pthread_active ())
+ return __gthrw_(pthread_mutex_destroy) (__mutex);
+ else
+ return 0;
+}
+
+static inline int
+__gthread_mutex_lock (__gthread_mutex_t *__mutex)
+{
+ if (__tpf_pthread_active ())
+ return __gthrw_(pthread_mutex_lock) (__mutex);
+ else
+ return 0;
+}
+
+static inline int
+__gthread_mutex_trylock (__gthread_mutex_t *__mutex)
+{
+ if (__tpf_pthread_active ())
+ return __gthrw_(pthread_mutex_trylock) (__mutex);
+ else
+ return 0;
+}
+
+static inline int
+__gthread_mutex_unlock (__gthread_mutex_t *__mutex)
+{
+ if (__tpf_pthread_active ())
+ return __gthrw_(pthread_mutex_unlock) (__mutex);
+ else
+ return 0;
+}
+
+static inline int
+__gthread_recursive_mutex_lock (__gthread_recursive_mutex_t *__mutex)
+{
+ if (__tpf_pthread_active ())
+ return __gthread_mutex_lock (__mutex);
+ else
+ return 0;
+}
+
+static inline int
+__gthread_recursive_mutex_trylock (__gthread_recursive_mutex_t *__mutex)
+{
+ if (__tpf_pthread_active ())
+ return __gthread_mutex_trylock (__mutex);
+ else
+ return 0;
+}
+
+static inline int
+__gthread_recursive_mutex_unlock (__gthread_recursive_mutex_t *__mutex)
+{
+ if (__tpf_pthread_active ())
+ return __gthread_mutex_unlock (__mutex);
+ else
+ return 0;
+}
+
+static inline int
+__gthread_recursive_mutex_init_function (__gthread_recursive_mutex_t *__mutex)
+{
+ if (__tpf_pthread_active ())
+ {
+ pthread_mutexattr_t __attr;
+ int __r;
+
+ __r = __gthrw_(pthread_mutexattr_init) (&__attr);
+ if (!__r)
+ __r = __gthrw_(pthread_mutexattr_settype) (&__attr,
+ PTHREAD_MUTEX_RECURSIVE);
+ if (!__r)
+ __r = __gthrw_(pthread_mutex_init) (__mutex, &__attr);
+ if (!__r)
+ __r = __gthrw_(pthread_mutexattr_destroy) (&__attr);
+ return __r;
+ }
+ return 0;
+}
+
+
+#endif /* ! GCC_GTHR_TPF_H */
diff --git a/libgcc/config/s390/t-crtstuff b/libgcc/config/s390/t-crtstuff
index 92e87b2da7e..f90e35b7220 100644
--- a/libgcc/config/s390/t-crtstuff
+++ b/libgcc/config/s390/t-crtstuff
@@ -2,5 +2,4 @@
# because then __FRAME_END__ might not be the last thing in .eh_frame
# section.
CRTSTUFF_T_CFLAGS += -fno-asynchronous-unwind-tables
-CRTSTUFF_T_CFLAGS_S += -fno-asynchronous-unwind-tables
-
+CRTSTUFF_T_CFLAGS_S += -fno-asynchronous-unwind-tables -fPIC
diff --git a/libgcc/config/s390/t-tpf b/libgcc/config/s390/t-tpf
deleted file mode 100644
index 9d416acc12d..00000000000
--- a/libgcc/config/s390/t-tpf
+++ /dev/null
@@ -1,2 +0,0 @@
-# Compile libgcc2.a with pic.
-HOST_LIBGCC2_CFLAGS += -fPIC
diff --git a/libgcc/config/score/crti.S b/libgcc/config/score/crti.S
new file mode 100644
index 00000000000..8e7c78b978b
--- /dev/null
+++ b/libgcc/config/score/crti.S
@@ -0,0 +1,131 @@
+# crti.S for Sunplus S+CORE
+#
+# Copyright (C) 2005, 2009, 2010, 2011 Free Software Foundation, Inc.
+#
+# This file is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3, or (at your option) any
+# later version.
+#
+# This file is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# Under Section 7 of GPL version 3, you are granted additional
+# permissions described in the GCC Runtime Library Exception, version
+# 3.1, as published by the Free Software Foundation.
+#
+# You should have received a copy of the GNU General Public License and
+# a copy of the GCC Runtime Library Exception along with this program;
+# see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+# <http://www.gnu.org/licenses/>.
+
+# This file makes a stack frame for the contents of the .init and
+# .fini sections.
+.extern _stack
+
+#ifndef __pic__
+.section .init, "ax", @progbits
+ .weak _start
+ .ent _start
+ .frame r0, 0, r3, 0
+ .mask 0x00000000, 0
+_start:
+ la r28, _gp
+ la r8, __bss_start
+ la r9, __bss_end__
+ sub! r9, r8
+ srli! r9, 2
+ addi r9, -1
+ mtsr r9, sr0
+ li r9, 0
+1:
+ sw r9, [r8]+, 4
+ bcnz 1b
+ la r0, _stack
+ jl _init
+ la r4, _end
+ jl _init_argv
+ jl exit
+ .end _start
+
+ .weak _init_argv
+ .ent
+ .frame r0, 0, r3, 0
+ .mask 0x00000000, 0
+_init_argv:
+ ldiu! r4, 0
+ ldiu! r5, 0
+ j main
+ .end _init_argv
+
+ .globl _init
+ .type _init, %function
+_init:
+ addi r0, -32
+ sw r3, [r0, 20]
+
+ .section .fini, "ax", @progbits
+ .globl _fini
+ .type _fini, %function
+_fini:
+ addi r0, -32
+ sw r3, [r0, 20]
+#else
+.section .init, "ax", @progbits
+ .set pic
+ .weak _start
+ .ent _start
+ .frame r0, 0, r3, 0
+ .mask 0x00000000, 0
+_start:
+ mv r29, r3
+ bl 0f
+0:
+ .cpload r3
+ mv r3, r29
+ la r8, __bss_start
+ la r9, __bss_end__
+ sub! r9, r8
+ srli! r9, 2
+ addi r9, -1
+ mtsr r9, sr0
+ li r9, 0
+1:
+ sw r9, [r8]+, 4
+ bcnz 1b
+ la r0, _stack
+ bl _init
+ la r4, _end
+ la r29, _init_argv
+ brl r29
+ la r29, exit
+ brl r29
+ .end _start
+
+ .weak _init_argv
+ .ent _init_argv
+ .frame r0, 0, r3, 0
+ .mask 0x00000000, 0
+_init_argv:
+ ldiu! r4, 0
+ ldiu! r5, 0
+ la r29, main
+ brl r29
+ .end _init_argv
+
+ .globl _init
+ .type _init, %function
+_init:
+ addi r0, -32
+ sw r3, [r0, 20]
+
+ .section .fini, "ax", @progbits
+ .globl _fini
+ .type _fini, %function
+_fini:
+ addi r0, -32
+ sw r3, [r0, 20]
+
+#endif
diff --git a/libgcc/config/score/crtn.S b/libgcc/config/score/crtn.S
new file mode 100644
index 00000000000..adf4eb08400
--- /dev/null
+++ b/libgcc/config/score/crtn.S
@@ -0,0 +1,50 @@
+# crtn.S for Sunplus S+CORE
+
+# Copyright (C) 2005, 2009, 2010, 2011 Free Software Foundation, Inc.
+#
+# This file is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3, or (at your option) any
+# later version.
+#
+# This file is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# Under Section 7 of GPL version 3, you are granted additional
+# permissions described in the GCC Runtime Library Exception, version
+# 3.1, as published by the Free Software Foundation.
+#
+# You should have received a copy of the GNU General Public License and
+# a copy of the GCC Runtime Library Exception along with this program;
+# see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+# <http://www.gnu.org/licenses/>.
+
+# This file makes sure that the .init and .fini sections do in
+# fact return.
+
+#ifndef __pic__
+.section .init, "ax", @progbits
+ lw r3, [r0, 20]
+ addi r0, 32
+ br r3
+
+.section .fini, "ax", @progbits
+ lw r3, [r0, 20]
+ addi r0, 32
+ br r3
+#else
+ .set pic
+.section .init, "ax", @progbits
+ lw r3, [r0, 20]
+ addi r0, 32
+ br r3
+
+ .set pic
+.section .fini, "ax", @progbits
+ lw r3, [r0, 20]
+ addi r0, 32
+ br r3
+#endif
+
diff --git a/libgcc/config/sh/crt1.S b/libgcc/config/sh/crt1.S
new file mode 100644
index 00000000000..ce09a09458f
--- /dev/null
+++ b/libgcc/config/sh/crt1.S
@@ -0,0 +1,1369 @@
+/* Copyright (C) 2000, 2001, 2003, 2004, 2005, 2006, 2009, 2011
+ Free Software Foundation, Inc.
+ This file was pretty much copied from newlib.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 3, or (at your option) any
+later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+
+#ifdef MMU_SUPPORT
+ /* Section used for exception/timer interrupt stack area */
+ .section .data.vbr.stack,"aw"
+ .align 4
+ .global __ST_VBR
+__ST_VBR:
+ .zero 1024 * 2 /* ; 2k for VBR handlers */
+/* Label at the highest stack address where the stack grows from */
+__timer_stack:
+#endif /* MMU_SUPPORT */
+
+ /* ;----------------------------------------
+ Normal newlib crt1.S */
+
+#ifdef __SH5__
+ .section .data,"aw"
+ .global ___data
+___data:
+
+ .section .rodata,"a"
+ .global ___rodata
+___rodata:
+
+#define ICCR_BASE 0x01600000
+#define OCCR_BASE 0x01e00000
+#define MMUIR_BASE 0x00000000
+#define MMUDR_BASE 0x00800000
+
+#define PTE_ENABLED 1
+#define PTE_DISABLED 0
+
+#define PTE_SHARED (1 << 1)
+#define PTE_NOT_SHARED 0
+
+#define PTE_CB_UNCACHEABLE 0
+#define PTE_CB_DEVICE 1
+#define PTE_CB_CACHEABLE_WB 2
+#define PTE_CB_CACHEABLE_WT 3
+
+#define PTE_SZ_4KB (0 << 3)
+#define PTE_SZ_64KB (1 << 3)
+#define PTE_SZ_1MB (2 << 3)
+#define PTE_SZ_512MB (3 << 3)
+
+#define PTE_PRR (1 << 6)
+#define PTE_PRX (1 << 7)
+#define PTE_PRW (1 << 8)
+#define PTE_PRU (1 << 9)
+
+#define SR_MMU_BIT 31
+#define SR_BL_BIT 28
+
+#define ALIGN_4KB (0xfff)
+#define ALIGN_1MB (0xfffff)
+#define ALIGN_512MB (0x1fffffff)
+
+#define DYNACON_BASE 0x0f000000
+#define DM_CB_DLINK_BASE 0x0c000000
+#define DM_DB_DLINK_BASE 0x0b000000
+
+#define FEMI_AREA_0 0x00000000
+#define FEMI_AREA_1 0x04000000
+#define FEMI_AREA_2 0x05000000
+#define FEMI_AREA_3 0x06000000
+#define FEMI_AREA_4 0x07000000
+#define FEMI_CB 0x08000000
+
+#define EMI_BASE 0X80000000
+
+#define DMA_BASE 0X0e000000
+
+#define CPU_BASE 0X0d000000
+
+#define PERIPH_BASE 0X09000000
+#define DMAC_BASE 0x0e000000
+#define INTC_BASE 0x0a000000
+#define CPRC_BASE 0x0a010000
+#define TMU_BASE 0x0a020000
+#define SCIF_BASE 0x0a030000
+#define RTC_BASE 0x0a040000
+
+
+
+#define LOAD_CONST32(val, reg) \
+ movi ((val) >> 16) & 65535, reg; \
+ shori (val) & 65535, reg
+
+#define LOAD_PTEH_VAL(sym, align, bits, scratch_reg, reg) \
+ LOAD_ADDR (sym, reg); \
+ LOAD_CONST32 ((align), scratch_reg); \
+ andc reg, scratch_reg, reg; \
+ LOAD_CONST32 ((bits), scratch_reg); \
+ or reg, scratch_reg, reg
+
+#define LOAD_PTEL_VAL(sym, align, bits, scratch_reg, reg) \
+ LOAD_ADDR (sym, reg); \
+ LOAD_CONST32 ((align), scratch_reg); \
+ andc reg, scratch_reg, reg; \
+ LOAD_CONST32 ((bits), scratch_reg); \
+ or reg, scratch_reg, reg
+
+#define SET_PTE(pte_addr_reg, pteh_val_reg, ptel_val_reg) \
+ putcfg pte_addr_reg, 0, r63; \
+ putcfg pte_addr_reg, 1, ptel_val_reg; \
+ putcfg pte_addr_reg, 0, pteh_val_reg
+
+#if __SH5__ == 64
+ .section .text,"ax"
+#define LOAD_ADDR(sym, reg) \
+ movi (sym >> 48) & 65535, reg; \
+ shori (sym >> 32) & 65535, reg; \
+ shori (sym >> 16) & 65535, reg; \
+ shori sym & 65535, reg
+#else
+ .mode SHmedia
+ .section .text..SHmedia32,"ax"
+#define LOAD_ADDR(sym, reg) \
+ movi (sym >> 16) & 65535, reg; \
+ shori sym & 65535, reg
+#endif
+ .global start
+start:
+ LOAD_ADDR (_stack, r15)
+
+#ifdef MMU_SUPPORT
+ ! Set up the VM using the MMU and caches
+
+ ! .vm_ep is first instruction to execute
+ ! after VM initialization
+ pt/l .vm_ep, tr1
+
+ ! Configure instruction cache (ICCR)
+ movi 3, r2
+ movi 0, r3
+ LOAD_ADDR (ICCR_BASE, r1)
+ putcfg r1, 0, r2
+ putcfg r1, 1, r3
+
+ ! movi 7, r2 ! write through
+ ! Configure operand cache (OCCR)
+ LOAD_ADDR (OCCR_BASE, r1)
+ putcfg r1, 0, r2
+ putcfg r1, 1, r3
+
+ ! Disable all PTE translations
+ LOAD_ADDR (MMUIR_BASE, r1)
+ LOAD_ADDR (MMUDR_BASE, r2)
+ movi 64, r3
+ pt/l .disable_ptes_loop, tr0
+.disable_ptes_loop:
+ putcfg r1, 0, r63
+ putcfg r2, 0, r63
+ addi r1, 16, r1
+ addi r2, 16, r2
+ addi r3, -1, r3
+ bgt r3, r63, tr0
+
+ LOAD_ADDR (MMUIR_BASE, r1)
+
+ ! FEMI instruction mappings
+ ! Area 0 - 1Mb cacheable at 0x00000000
+ ! Area 1 - None
+ ! Area 2 - 1Mb cacheable at 0x05000000
+ ! - 1Mb cacheable at 0x05100000
+ ! Area 3 - None
+ ! Area 4 - None
+
+ ! Map a 1Mb page for instructions at 0x00000000
+ LOAD_PTEH_VAL (FEMI_AREA_0, ALIGN_1MB, PTE_ENABLED | PTE_NOT_SHARED, r25, r2)
+ LOAD_PTEL_VAL (FEMI_AREA_0, ALIGN_1MB, PTE_CB_CACHEABLE_WB | PTE_SZ_1MB | PTE_PRX | PTE_PRU, r25, r3)
+ SET_PTE (r1, r2, r3)
+
+ ! Map a 1Mb page for instructions at 0x05000000
+ addi r1, 16, r1
+ LOAD_PTEH_VAL (FEMI_AREA_2, ALIGN_1MB, PTE_ENABLED | PTE_NOT_SHARED, r25, r2)
+ LOAD_PTEL_VAL (FEMI_AREA_2, ALIGN_1MB, PTE_CB_CACHEABLE_WB | PTE_SZ_1MB | PTE_PRX | PTE_PRU, r25, r3)
+ SET_PTE (r1, r2, r3)
+
+ ! Map a 1Mb page for instructions at 0x05100000
+ addi r1, 16, r1
+ LOAD_PTEH_VAL ((FEMI_AREA_2+0x100000), ALIGN_1MB, PTE_ENABLED | PTE_NOT_SHARED, r25, r2)
+ LOAD_PTEL_VAL ((FEMI_AREA_2+0x100000), ALIGN_1MB, PTE_CB_CACHEABLE_WB | PTE_SZ_1MB | PTE_PRX | PTE_PRU, r25, r3)
+ SET_PTE (r1, r2, r3)
+
+ ! Map a 512M page for instructions at EMI base
+ addi r1, 16, r1
+ LOAD_PTEH_VAL (EMI_BASE, ALIGN_512MB, PTE_ENABLED | PTE_NOT_SHARED, r25, r2)
+ LOAD_PTEL_VAL (EMI_BASE, ALIGN_512MB, PTE_CB_CACHEABLE_WB | PTE_SZ_512MB | PTE_PRX | PTE_PRU, r25, r3)
+ SET_PTE (r1, r2, r3)
+
+ ! Map a 4K page for instructions at DM_DB_DLINK_BASE
+ addi r1, 16, r1
+ LOAD_PTEH_VAL (DM_DB_DLINK_BASE, ALIGN_4KB, PTE_ENABLED | PTE_NOT_SHARED, r25, r2)
+ LOAD_PTEL_VAL (DM_DB_DLINK_BASE, ALIGN_4KB, PTE_CB_CACHEABLE_WB | PTE_SZ_4KB | PTE_PRX | PTE_PRU, r25, r3)
+ SET_PTE (r1, r2, r3)
+
+ LOAD_ADDR (MMUDR_BASE, r1)
+
+ ! FEMI data mappings
+ ! Area 0 - 1Mb cacheable at 0x00000000
+ ! Area 1 - 1Mb device at 0x04000000
+ ! Area 2 - 1Mb cacheable at 0x05000000
+ ! - 1Mb cacheable at 0x05100000
+ ! Area 3 - None
+ ! Area 4 - None
+ ! CB - 1Mb device at 0x08000000
+
+ ! Map a 1Mb page for data at 0x00000000
+ LOAD_PTEH_VAL (FEMI_AREA_0, ALIGN_1MB, PTE_ENABLED | PTE_NOT_SHARED, r25, r2)
+ LOAD_PTEL_VAL (FEMI_AREA_0, ALIGN_1MB, PTE_CB_CACHEABLE_WB | PTE_SZ_1MB | PTE_PRR | PTE_PRW | PTE_PRU, r25, r3)
+ SET_PTE (r1, r2, r3)
+
+ ! Map a 1Mb page for data at 0x04000000
+ addi r1, 16, r1
+ LOAD_PTEH_VAL (FEMI_AREA_1, ALIGN_1MB, PTE_ENABLED | PTE_NOT_SHARED, r25, r2)
+ LOAD_PTEL_VAL (FEMI_AREA_1, ALIGN_1MB, PTE_CB_DEVICE | PTE_SZ_1MB | PTE_PRR | PTE_PRW | PTE_PRU, r25, r3)
+ SET_PTE (r1, r2, r3)
+
+ ! Map a 1Mb page for data at 0x05000000
+ addi r1, 16, r1
+ LOAD_PTEH_VAL (FEMI_AREA_2, ALIGN_1MB, PTE_ENABLED | PTE_NOT_SHARED, r25, r2)
+ LOAD_PTEL_VAL (FEMI_AREA_2, ALIGN_1MB, PTE_CB_CACHEABLE_WB | PTE_SZ_1MB | PTE_PRR | PTE_PRW | PTE_PRU, r25, r3)
+ SET_PTE (r1, r2, r3)
+
+ ! Map a 1Mb page for data at 0x05100000
+ addi r1, 16, r1
+ LOAD_PTEH_VAL ((FEMI_AREA_2+0x100000), ALIGN_1MB, PTE_ENABLED | PTE_NOT_SHARED, r25, r2)
+ LOAD_PTEL_VAL ((FEMI_AREA_2+0x100000), ALIGN_1MB, PTE_CB_CACHEABLE_WB | PTE_SZ_1MB | PTE_PRR | PTE_PRW | PTE_PRU, r25, r3)
+ SET_PTE (r1, r2, r3)
+
+ ! Map a 4K page for registers at 0x08000000
+ addi r1, 16, r1
+ LOAD_PTEH_VAL (FEMI_CB, ALIGN_4KB, PTE_ENABLED | PTE_NOT_SHARED, r25, r2)
+ LOAD_PTEL_VAL (FEMI_CB, ALIGN_4KB, PTE_CB_DEVICE | PTE_SZ_4KB | PTE_PRR | PTE_PRW | PTE_PRU, r25, r3)
+ SET_PTE (r1, r2, r3)
+
+ ! Map a 512M page for data at EMI
+ addi r1, 16, r1
+ LOAD_PTEH_VAL (EMI_BASE, ALIGN_512MB, PTE_ENABLED | PTE_NOT_SHARED, r25, r2)
+ LOAD_PTEL_VAL (EMI_BASE, ALIGN_512MB, PTE_CB_CACHEABLE_WB | PTE_SZ_512MB | PTE_PRR | PTE_PRW | PTE_PRU, r25, r3)
+ SET_PTE (r1, r2, r3)
+
+ ! Map a 4K page for DYNACON at DYNACON_BASE
+ addi r1, 16, r1
+ LOAD_PTEH_VAL (DYNACON_BASE, ALIGN_4KB, PTE_ENABLED | PTE_NOT_SHARED, r25, r2)
+ LOAD_PTEL_VAL (DYNACON_BASE, ALIGN_4KB, PTE_CB_DEVICE | PTE_SZ_4KB | PTE_PRR | PTE_PRW | PTE_PRU, r25, r3)
+ SET_PTE (r1, r2, r3)
+
+ ! Map a 4K page for instructions at DM_DB_DLINK_BASE
+ addi r1, 16, r1
+ LOAD_PTEH_VAL (DM_DB_DLINK_BASE, ALIGN_4KB, PTE_ENABLED | PTE_NOT_SHARED, r25, r2)
+ LOAD_PTEL_VAL (DM_DB_DLINK_BASE, ALIGN_4KB, PTE_CB_CACHEABLE_WB | PTE_SZ_4KB | PTE_PRR | PTE_PRW | PTE_PRU, r25, r3)
+ SET_PTE (r1, r2, r3)
+
+ ! Map a 4K page for data at DM_DB_DLINK_BASE+0x1000
+ addi r1, 16, r1
+ LOAD_PTEH_VAL ((DM_DB_DLINK_BASE+0x1000), ALIGN_4KB, PTE_ENABLED | PTE_NOT_SHARED, r25, r2)
+ LOAD_PTEL_VAL ((DM_DB_DLINK_BASE+0x1000), ALIGN_4KB, PTE_CB_UNCACHEABLE | PTE_SZ_4KB | PTE_PRR | PTE_PRW | PTE_PRU, r25, r3)
+ SET_PTE (r1, r2, r3)
+
+ ! Map a 4K page for stack DM_DB_DLINK_BASE+0x2000
+ addi r1, 16, r1
+ LOAD_PTEH_VAL ((DM_DB_DLINK_BASE+0x2000), ALIGN_4KB, PTE_ENABLED | PTE_NOT_SHARED, r25, r2)
+ LOAD_PTEL_VAL ((DM_DB_DLINK_BASE+0x2000), ALIGN_4KB, PTE_CB_CACHEABLE_WB | PTE_SZ_4KB | PTE_PRR | PTE_PRW | PTE_PRU, r25, r3)
+ SET_PTE (r1, r2, r3)
+
+ ! Map a 1M page for DM_CB_BASE2 at DM_CB_DLINK
+ ! 0x0c000000 - 0x0c0fffff
+ addi r1, 16, r1
+ LOAD_PTEH_VAL (DM_CB_DLINK_BASE, ALIGN_1MB, PTE_ENABLED | PTE_NOT_SHARED, r25, r2)
+ LOAD_PTEL_VAL (DM_CB_DLINK_BASE, ALIGN_1MB, PTE_CB_DEVICE | PTE_SZ_1MB | PTE_PRR | PTE_PRW | PTE_PRU, r25, r3)
+ SET_PTE (r1, r2, r3)
+
+ ! Map a 1M page for DM_CB_BASE2 at DM_CB_DLINK
+ ! 0x0c100000 - 0x0c1fffff
+ addi r1, 16, r1
+ LOAD_PTEH_VAL ((DM_CB_DLINK_BASE+0x100000), ALIGN_1MB, PTE_ENABLED | PTE_NOT_SHARED, r25, r2)
+ LOAD_PTEL_VAL ((DM_CB_DLINK_BASE+0x100000), ALIGN_1MB, PTE_CB_DEVICE | PTE_SZ_1MB | PTE_PRR | PTE_PRW | PTE_PRU, r25, r3)
+ SET_PTE (r1, r2, r3)
+
+ ! Map a 1M page for DM_CB_BASE2 at DM_CB_DLINK
+ ! 0x0c200000 - 0x0c2fffff
+ addi r1, 16, r1
+ LOAD_PTEH_VAL ((DM_CB_DLINK_BASE+0x200000), ALIGN_1MB, PTE_ENABLED | PTE_NOT_SHARED, r25, r2)
+ LOAD_PTEL_VAL ((DM_CB_DLINK_BASE+0x200000), ALIGN_1MB, PTE_CB_DEVICE | PTE_SZ_1MB | PTE_PRR | PTE_PRW | PTE_PRU, r25, r3)
+ SET_PTE (r1, r2, r3)
+
+ ! Map a 1M page for DM_CB_BASE2 at DM_CB_DLINK
+ ! 0x0c400000 - 0x0c4fffff
+ addi r1, 16, r1
+ LOAD_PTEH_VAL ((DM_CB_DLINK_BASE+0x400000), ALIGN_1MB, PTE_ENABLED | PTE_NOT_SHARED, r25, r2)
+ LOAD_PTEL_VAL ((DM_CB_DLINK_BASE+0x400000), ALIGN_1MB, PTE_CB_DEVICE | PTE_SZ_1MB | PTE_PRR | PTE_PRW | PTE_PRU, r25, r3)
+ SET_PTE (r1, r2, r3)
+
+ ! Map a 1M page for DM_CB_BASE2 at DM_CB_DLINK
+ ! 0x0c800000 - 0x0c8fffff
+ addi r1, 16, r1
+ LOAD_PTEH_VAL ((DM_CB_DLINK_BASE+0x800000), ALIGN_1MB, PTE_ENABLED | PTE_NOT_SHARED, r25, r2)
+ LOAD_PTEL_VAL ((DM_CB_DLINK_BASE+0x800000), ALIGN_1MB, PTE_CB_DEVICE | PTE_SZ_1MB | PTE_PRR | PTE_PRW | PTE_PRU, r25, r3)
+ SET_PTE (r1, r2, r3)
+
+ ! Map a 4K page for DMA control registers
+ addi r1, 16, r1
+ LOAD_PTEH_VAL (DMA_BASE, ALIGN_4KB, PTE_ENABLED | PTE_NOT_SHARED, r25, r2)
+ LOAD_PTEL_VAL (DMA_BASE, ALIGN_4KB, PTE_CB_DEVICE | PTE_SZ_4KB | PTE_PRR | PTE_PRW | PTE_PRU, r25, r3)
+ SET_PTE (r1, r2, r3)
+
+ ! Map lots of 4K pages for peripherals
+
+ ! /* peripheral */
+ addi r1, 16, r1
+ LOAD_PTEH_VAL (PERIPH_BASE, ALIGN_4KB, PTE_ENABLED | PTE_NOT_SHARED, r25, r2)
+ LOAD_PTEL_VAL (PERIPH_BASE, ALIGN_4KB, PTE_CB_DEVICE | PTE_SZ_4KB | PTE_PRR | PTE_PRW | PTE_PRU, r25, r3)
+ SET_PTE (r1, r2, r3)
+ ! /* dmac */
+ addi r1, 16, r1
+ LOAD_PTEH_VAL (DMAC_BASE, ALIGN_4KB, PTE_ENABLED | PTE_NOT_SHARED, r25, r2)
+ LOAD_PTEL_VAL (DMAC_BASE, ALIGN_4KB, PTE_CB_DEVICE | PTE_SZ_4KB | PTE_PRR | PTE_PRW | PTE_PRU, r25, r3)
+ SET_PTE (r1, r2, r3)
+ ! /* intc */
+ addi r1, 16, r1
+ LOAD_PTEH_VAL (INTC_BASE, ALIGN_4KB, PTE_ENABLED | PTE_NOT_SHARED, r25, r2)
+ LOAD_PTEL_VAL (INTC_BASE, ALIGN_4KB, PTE_CB_DEVICE | PTE_SZ_4KB | PTE_PRR | PTE_PRW | PTE_PRU, r25, r3)
+ SET_PTE (r1, r2, r3)
+ ! /* rtc */
+ addi r1, 16, r1
+ LOAD_PTEH_VAL (RTC_BASE, ALIGN_4KB, PTE_ENABLED | PTE_NOT_SHARED, r25, r2)
+ LOAD_PTEL_VAL (RTC_BASE, ALIGN_4KB, PTE_CB_DEVICE | PTE_SZ_4KB | PTE_PRR | PTE_PRW | PTE_PRU, r25, r3)
+ SET_PTE (r1, r2, r3)
+ ! /* dmac */
+ addi r1, 16, r1
+ LOAD_PTEH_VAL (TMU_BASE, ALIGN_4KB, PTE_ENABLED | PTE_NOT_SHARED, r25, r2)
+ LOAD_PTEL_VAL (TMU_BASE, ALIGN_4KB, PTE_CB_DEVICE | PTE_SZ_4KB | PTE_PRR | PTE_PRW | PTE_PRU, r25, r3)
+ SET_PTE (r1, r2, r3)
+ ! /* scif */
+ addi r1, 16, r1
+ LOAD_PTEH_VAL (SCIF_BASE, ALIGN_4KB, PTE_ENABLED | PTE_NOT_SHARED, r25, r2)
+ LOAD_PTEL_VAL (SCIF_BASE, ALIGN_4KB, PTE_CB_DEVICE | PTE_SZ_4KB | PTE_PRR | PTE_PRW | PTE_PRU, r25, r3)
+ SET_PTE (r1, r2, r3)
+ ! /* cprc */
+ addi r1, 16, r1
+ LOAD_PTEH_VAL (CPRC_BASE, ALIGN_4KB, PTE_ENABLED | PTE_NOT_SHARED, r25, r2)
+ LOAD_PTEL_VAL (CPRC_BASE, ALIGN_4KB, PTE_CB_DEVICE | PTE_SZ_4KB | PTE_PRR | PTE_PRW | PTE_PRU, r25, r3)
+ SET_PTE (r1, r2, r3)
+
+ ! Map CPU WPC registers
+ addi r1, 16, r1
+ LOAD_PTEH_VAL (CPU_BASE, ALIGN_1MB, PTE_ENABLED | PTE_NOT_SHARED, r25, r2)
+ LOAD_PTEL_VAL (CPU_BASE, ALIGN_1MB, PTE_CB_DEVICE | PTE_SZ_1MB | PTE_PRR | PTE_PRW | PTE_PRU, r25, r3)
+ SET_PTE (r1, r2, r3)
+ addi r1, 16, r1
+
+ LOAD_PTEH_VAL ((CPU_BASE+0x100000), ALIGN_1MB, PTE_ENABLED | PTE_NOT_SHARED, r25, r2)
+ LOAD_PTEL_VAL ((CPU_BASE+0x100000), ALIGN_1MB, PTE_CB_DEVICE | PTE_SZ_1MB | PTE_PRR | PTE_PRW | PTE_PRU, r25, r3)
+ SET_PTE (r1, r2, r3)
+
+ addi r1, 16, r1
+ LOAD_PTEH_VAL ((CPU_BASE+0x200000), ALIGN_1MB, PTE_ENABLED | PTE_NOT_SHARED, r25, r2)
+ LOAD_PTEL_VAL ((CPU_BASE+0x200000), ALIGN_1MB, PTE_CB_DEVICE | PTE_SZ_1MB | PTE_PRR | PTE_PRW | PTE_PRU, r25, r3)
+ SET_PTE (r1, r2, r3)
+
+ addi r1, 16, r1
+ LOAD_PTEH_VAL ((CPU_BASE+0x400000), ALIGN_1MB, PTE_ENABLED | PTE_NOT_SHARED, r25, r2)
+ LOAD_PTEL_VAL ((CPU_BASE+0x400000), ALIGN_1MB, PTE_CB_DEVICE | PTE_SZ_1MB | PTE_PRR | PTE_PRW | PTE_PRU, r25, r3)
+ SET_PTE (r1, r2, r3)
+
+ ! Switch over to virtual addressing and enabled cache
+ getcon sr, r1
+ movi 1, r2
+ shlli r2, SR_BL_BIT, r2
+ or r1, r2, r1
+ putcon r1, ssr
+ getcon sr, r1
+ movi 1, r2
+ shlli r2, SR_MMU_BIT, r2
+ or r1, r2, r1
+ putcon r1, ssr
+ gettr tr1, r1
+ putcon r1, spc
+ synco
+ rte
+
+ ! VM entry point. From now on, we are in VM mode.
+.vm_ep:
+
+ ! Install the trap handler, by seeding vbr with the
+ ! correct value, and by assigning sr.bl = 0.
+
+ LOAD_ADDR (vbr_start, r1)
+ putcon r1, vbr
+ movi ~(1<<28), r1
+ getcon sr, r2
+ and r1, r2, r2
+ putcon r2, sr
+#endif /* MMU_SUPPORT */
+
+ pt/l .Lzero_bss_loop, tr0
+ pt/l _init, tr5
+ pt/l ___setup_argv_and_call_main, tr6
+ pt/l _exit, tr7
+
+ ! zero out bss
+ LOAD_ADDR (_edata, r0)
+ LOAD_ADDR (_end, r1)
+.Lzero_bss_loop:
+ stx.q r0, r63, r63
+ addi r0, 8, r0
+ bgt/l r1, r0, tr0
+
+ LOAD_ADDR (___data, r26)
+ LOAD_ADDR (___rodata, r27)
+
+#ifdef __SH_FPU_ANY__
+ getcon sr, r0
+ ! enable the FP unit, by resetting SR.FD
+ ! also zero out SR.FR, SR.SZ and SR.PR, as mandated by the ABI
+ movi 0, r1
+ shori 0xf000, r1
+ andc r0, r1, r0
+ putcon r0, sr
+#if __SH5__ == 32
+ pt/l ___set_fpscr, tr0
+ movi 0, r4
+ blink tr0, r18
+#endif
+#endif
+
+ ! arrange for exit to call fini
+ pt/l _atexit, tr1
+ LOAD_ADDR (_fini, r2)
+ blink tr1, r18
+
+ ! call init
+ blink tr5, r18
+
+ ! call the mainline
+ blink tr6, r18
+
+ ! call exit
+ blink tr7, r18
+ ! We should never return from _exit but in case we do we would enter the
+ ! the following tight loop. This avoids executing any data that might follow.
+limbo:
+ pt/l limbo, tr0
+ blink tr0, r63
+
+#ifdef MMU_SUPPORT
+ ! All these traps are handled in the same place.
+ .balign 256
+vbr_start:
+ pt/l handler, tr0 ! tr0 trashed.
+ blink tr0, r63
+ .balign 256
+vbr_100:
+ pt/l handler, tr0 ! tr0 trashed.
+ blink tr0, r63
+vbr_100_end:
+ .balign 256
+vbr_200:
+ pt/l handler, tr0 ! tr0 trashed.
+ blink tr0, r63
+ .balign 256
+vbr_300:
+ pt/l handler, tr0 ! tr0 trashed.
+ blink tr0, r63
+ .balign 256
+vbr_400: ! Should be at vbr+0x400
+handler:
+ /* If the trap handler is there call it */
+ LOAD_ADDR (__superh_trap_handler, r2)
+ pta chandler,tr2
+ beq r2, r63, tr2 /* If zero, ie not present branch around to chandler */
+ /* Now call the trap handler with as much of the context unchanged as possible.
+ Move trapping address into R18 to make it look like the trap point */
+ getcon spc, r18
+ pt/l __superh_trap_handler, tr0
+ blink tr0, r7
+chandler:
+ getcon spc, r62
+ getcon expevt, r2
+ pt/l _exit, tr0
+ blink tr0, r63
+
+ /* Simulated trap handler */
+ .section .text..SHmedia32,"ax"
+gcc2_compiled.:
+ .section .debug_abbrev
+.Ldebug_abbrev0:
+ .section .text..SHmedia32
+.Ltext0:
+ .section .debug_info
+.Ldebug_info0:
+ .section .debug_line
+.Ldebug_line0:
+ .section .text..SHmedia32,"ax"
+ .align 5
+ .global __superh_trap_handler
+ .type __superh_trap_handler,@function
+__superh_trap_handler:
+.LFB1:
+ ptabs r18, tr0
+ addi.l r15, -8, r15
+ st.l r15, 4, r14
+ addi.l r15, -8, r15
+ add.l r15, r63, r14
+ st.l r14, 0, r2
+ ptabs r7, tr0
+ addi.l r14, 8, r14
+ add.l r14, r63, r15
+ ld.l r15, 4, r14
+ addi.l r15, 8, r15
+ blink tr0, r63
+.LFE1:
+.Lfe1:
+ .size __superh_trap_handler,.Lfe1-__superh_trap_handler
+
+ .section .text..SHmedia32
+.Letext0:
+
+ .section .debug_info
+ .ualong 0xa7
+ .uaword 0x2
+ .ualong .Ldebug_abbrev0
+ .byte 0x4
+ .byte 0x1
+ .ualong .Ldebug_line0
+ .ualong .Letext0
+ .ualong .Ltext0
+ .string "trap_handler.c"
+
+ .string "xxxxxxxxxxxxxxxxxxxxxxxxxxxx"
+
+ .string "GNU C 2.97-sh5-010522"
+
+ .byte 0x1
+ .byte 0x2
+ .ualong 0x9a
+ .byte 0x1
+ .string "_superh_trap_handler"
+
+ .byte 0x1
+ .byte 0x2
+ .byte 0x1
+ .ualong .LFB1
+ .ualong .LFE1
+ .byte 0x1
+ .byte 0x5e
+ .byte 0x3
+ .string "trap_reason"
+
+ .byte 0x1
+ .byte 0x1
+ .ualong 0x9a
+ .byte 0x2
+ .byte 0x91
+ .byte 0x0
+ .byte 0x0
+ .byte 0x4
+ .string "unsigned int"
+
+ .byte 0x4
+ .byte 0x7
+ .byte 0x0
+
+ .section .debug_abbrev
+ .byte 0x1
+ .byte 0x11
+ .byte 0x1
+ .byte 0x10
+ .byte 0x6
+ .byte 0x12
+ .byte 0x1
+ .byte 0x11
+ .byte 0x1
+ .byte 0x3
+ .byte 0x8
+ .byte 0x1b
+ .byte 0x8
+ .byte 0x25
+ .byte 0x8
+ .byte 0x13
+ .byte 0xb
+ .byte 0,0
+ .byte 0x2
+ .byte 0x2e
+ .byte 0x1
+ .byte 0x1
+ .byte 0x13
+ .byte 0x3f
+ .byte 0xc
+ .byte 0x3
+ .byte 0x8
+ .byte 0x3a
+ .byte 0xb
+ .byte 0x3b
+ .byte 0xb
+ .byte 0x27
+ .byte 0xc
+ .byte 0x11
+ .byte 0x1
+ .byte 0x12
+ .byte 0x1
+ .byte 0x40
+ .byte 0xa
+ .byte 0,0
+ .byte 0x3
+ .byte 0x5
+ .byte 0x0
+ .byte 0x3
+ .byte 0x8
+ .byte 0x3a
+ .byte 0xb
+ .byte 0x3b
+ .byte 0xb
+ .byte 0x49
+ .byte 0x13
+ .byte 0x2
+ .byte 0xa
+ .byte 0,0
+ .byte 0x4
+ .byte 0x24
+ .byte 0x0
+ .byte 0x3
+ .byte 0x8
+ .byte 0xb
+ .byte 0xb
+ .byte 0x3e
+ .byte 0xb
+ .byte 0,0
+ .byte 0
+
+ .section .debug_pubnames
+ .ualong 0x27
+ .uaword 0x2
+ .ualong .Ldebug_info0
+ .ualong 0xab
+ .ualong 0x5b
+ .string "_superh_trap_handler"
+
+ .ualong 0x0
+
+ .section .debug_aranges
+ .ualong 0x1c
+ .uaword 0x2
+ .ualong .Ldebug_info0
+ .byte 0x4
+ .byte 0x0
+ .uaword 0x0,0
+ .ualong .Ltext0
+ .ualong .Letext0-.Ltext0
+ .ualong 0x0
+ .ualong 0x0
+ .ident "GCC: (GNU) 2.97-sh5-010522"
+#endif /* MMU_SUPPORT */
+#else /* ! __SH5__ */
+
+ ! make a place to keep any previous value of the vbr register
+ ! this will only have a value if it has been set by redboot (for example)
+ .section .bss
+old_vbr:
+ .long 0
+#ifdef PROFILE
+profiling_enabled:
+ .long 0
+#endif
+
+
+ .section .text
+ .global start
+ .import ___rtos_profiler_start_timer
+ .weak ___rtos_profiler_start_timer
+start:
+ mov.l stack_k,r15
+
+#if defined (__SH3__) || (defined (__SH_FPU_ANY__) && ! defined (__SH2A__)) || defined (__SH4_NOFPU__)
+#define VBR_SETUP
+ ! before zeroing the bss ...
+ ! if the vbr is already set to vbr_start then the program has been restarted
+ ! (i.e. it is not the first time the program has been run since reset)
+ ! reset the vbr to its old value before old_vbr (in bss) is wiped
+ ! this ensures that the later code does not create a circular vbr chain
+ stc vbr, r1
+ mov.l vbr_start_k, r2
+ cmp/eq r1, r2
+ bf 0f
+ ! reset the old vbr value
+ mov.l old_vbr_k, r1
+ mov.l @r1, r2
+ ldc r2, vbr
+0:
+#endif /* VBR_SETUP */
+
+ ! zero out bss
+ mov.l edata_k,r0
+ mov.l end_k,r1
+ mov #0,r2
+start_l:
+ mov.l r2,@r0
+ add #4,r0
+ cmp/ge r0,r1
+ bt start_l
+
+#if defined (__SH_FPU_ANY__)
+ mov.l set_fpscr_k, r1
+ mov #4,r4
+ jsr @r1
+ shll16 r4 ! Set DN bit (flush denormal inputs to zero)
+ lds r3,fpscr ! Switch to default precision
+#endif /* defined (__SH_FPU_ANY__) */
+
+#ifdef VBR_SETUP
+ ! save the existing contents of the vbr
+ ! there will only be a prior value when using something like redboot
+ ! otherwise it will be zero
+ stc vbr, r1
+ mov.l old_vbr_k, r2
+ mov.l r1, @r2
+ ! setup vbr
+ mov.l vbr_start_k, r1
+ ldc r1,vbr
+#endif /* VBR_SETUP */
+
+ ! if an rtos is exporting a timer start fn,
+ ! then pick up an SR which does not enable ints
+ ! (the rtos will take care of this)
+ mov.l rtos_start_fn, r0
+ mov.l sr_initial_bare, r1
+ tst r0, r0
+ bt set_sr
+
+ mov.l sr_initial_rtos, r1
+
+set_sr:
+ ! Set status register (sr)
+ ldc r1, sr
+
+ ! arrange for exit to call fini
+ mov.l atexit_k,r0
+ mov.l fini_k,r4
+ jsr @r0
+ nop
+
+#ifdef PROFILE
+ ! arrange for exit to call _mcleanup (via stop_profiling)
+ mova stop_profiling,r0
+ mov.l atexit_k,r1
+ jsr @r1
+ mov r0, r4
+
+ ! Call profiler startup code
+ mov.l monstartup_k, r0
+ mov.l start_k, r4
+ mov.l etext_k, r5
+ jsr @r0
+ nop
+
+ ! enable profiling trap
+ ! until now any trap 33s will have been ignored
+ ! This means that all library functions called before this point
+ ! (directly or indirectly) may have the profiling trap at the start.
+ ! Therefore, only mcount itself may not have the extra header.
+ mov.l profiling_enabled_k2, r0
+ mov #1, r1
+ mov.l r1, @r0
+#endif /* PROFILE */
+
+ ! call init
+ mov.l init_k,r0
+ jsr @r0
+ nop
+
+ ! call the mainline
+ mov.l main_k,r0
+ jsr @r0
+ nop
+
+ ! call exit
+ mov r0,r4
+ mov.l exit_k,r0
+ jsr @r0
+ nop
+
+ .balign 4
+#ifdef PROFILE
+stop_profiling:
+ # stop mcount counting
+ mov.l profiling_enabled_k2, r0
+ mov #0, r1
+ mov.l r1, @r0
+
+ # call mcleanup
+ mov.l mcleanup_k, r0
+ jmp @r0
+ nop
+
+ .balign 4
+mcleanup_k:
+ .long __mcleanup
+monstartup_k:
+ .long ___monstartup
+profiling_enabled_k2:
+ .long profiling_enabled
+start_k:
+ .long _start
+etext_k:
+ .long __etext
+#endif /* PROFILE */
+
+ .align 2
+#if defined (__SH_FPU_ANY__)
+set_fpscr_k:
+ .long ___set_fpscr
+#endif /* defined (__SH_FPU_ANY__) */
+
+stack_k:
+ .long _stack
+edata_k:
+ .long _edata
+end_k:
+ .long _end
+main_k:
+ .long ___setup_argv_and_call_main
+exit_k:
+ .long _exit
+atexit_k:
+ .long _atexit
+init_k:
+ .long _init
+fini_k:
+ .long _fini
+#ifdef VBR_SETUP
+old_vbr_k:
+ .long old_vbr
+vbr_start_k:
+ .long vbr_start
+#endif /* VBR_SETUP */
+
+sr_initial_rtos:
+ ! Privileged mode RB 1 BL 0. Keep BL 0 to allow default trap handlers to work.
+ ! Whether profiling or not, keep interrupts masked,
+ ! the RTOS will enable these if required.
+ .long 0x600000f1
+
+rtos_start_fn:
+ .long ___rtos_profiler_start_timer
+
+#ifdef PROFILE
+sr_initial_bare:
+ ! Privileged mode RB 1 BL 0. Keep BL 0 to allow default trap handlers to work.
+ ! For bare machine, we need to enable interrupts to get profiling working
+ .long 0x60000001
+#else
+
+sr_initial_bare:
+ ! Privileged mode RB 1 BL 0. Keep BL 0 to allow default trap handlers to work.
+ ! Keep interrupts disabled - the application will enable as required.
+ .long 0x600000f1
+#endif
+
+ ! supplied for backward compatibility only, in case of linking
+ ! code whose main() was compiled with an older version of GCC.
+ .global ___main
+___main:
+ rts
+ nop
+#ifdef VBR_SETUP
+! Exception handlers
+ .section .text.vbr, "ax"
+vbr_start:
+
+ .org 0x100
+vbr_100:
+#ifdef PROFILE
+ ! Note on register usage.
+ ! we use r0..r3 as scratch in this code. If we are here due to a trapa for profiling
+ ! then this is OK as we are just before executing any function code.
+ ! The other r4..r7 we save explicityl on the stack
+ ! Remaining registers are saved by normal ABI conventions and we assert we do not
+ ! use floating point registers.
+ mov.l expevt_k1, r1
+ mov.l @r1, r1
+ mov.l event_mask, r0
+ and r0,r1
+ mov.l trapcode_k, r2
+ cmp/eq r1,r2
+ bt 1f
+ bra handler_100 ! if not a trapa, go to default handler
+ nop
+1:
+ mov.l trapa_k, r0
+ mov.l @r0, r0
+ shlr2 r0 ! trapa code is shifted by 2.
+ cmp/eq #33, r0
+ bt 2f
+ bra handler_100
+ nop
+2:
+
+ ! If here then it looks like we have trap #33
+ ! Now we need to call mcount with the following convention
+ ! Save and restore r4..r7
+ mov.l r4,@-r15
+ mov.l r5,@-r15
+ mov.l r6,@-r15
+ mov.l r7,@-r15
+ sts.l pr,@-r15
+
+ ! r4 is frompc.
+ ! r5 is selfpc
+ ! r0 is the branch back address.
+ ! The code sequence emitted by gcc for the profiling trap is
+ ! .align 2
+ ! trapa #33
+ ! .align 2
+ ! .long lab Where lab is planted by the compiler. This is the address
+ ! of a datum that needs to be incremented.
+ sts pr, r4 ! frompc
+ stc spc, r5 ! selfpc
+ mov #2, r2
+ not r2, r2 ! pattern to align to 4
+ and r2, r5 ! r5 now has aligned address
+! add #4, r5 ! r5 now has address of address
+ mov r5, r2 ! Remember it.
+! mov.l @r5, r5 ! r5 has value of lable (lab in above example)
+ add #8, r2
+ ldc r2, spc ! our return address avoiding address word
+
+ ! only call mcount if profiling is enabled
+ mov.l profiling_enabled_k, r0
+ mov.l @r0, r0
+ cmp/eq #0, r0
+ bt 3f
+ ! call mcount
+ mov.l mcount_k, r2
+ jsr @r2
+ nop
+3:
+ lds.l @r15+,pr
+ mov.l @r15+,r7
+ mov.l @r15+,r6
+ mov.l @r15+,r5
+ mov.l @r15+,r4
+ rte
+ nop
+ .balign 4
+event_mask:
+ .long 0xfff
+trapcode_k:
+ .long 0x160
+expevt_k1:
+ .long 0xff000024 ! Address of expevt
+trapa_k:
+ .long 0xff000020
+mcount_k:
+ .long __call_mcount
+profiling_enabled_k:
+ .long profiling_enabled
+#endif
+ ! Non profiling case.
+handler_100:
+ mov.l 2f, r0 ! load the old vbr setting (if any)
+ mov.l @r0, r0
+ cmp/eq #0, r0
+ bf 1f
+ ! no previous vbr - jump to own generic handler
+ bra handler
+ nop
+1: ! there was a previous handler - chain them
+ add #0x7f, r0 ! 0x7f
+ add #0x7f, r0 ! 0xfe
+ add #0x2, r0 ! add 0x100 without corrupting another register
+ jmp @r0
+ nop
+ .balign 4
+2:
+ .long old_vbr
+
+ .org 0x400
+vbr_400: ! Should be at vbr+0x400
+ mov.l 2f, r0 ! load the old vbr setting (if any)
+ mov.l @r0, r0
+ cmp/eq #0, r0
+ ! no previous vbr - jump to own generic handler
+ bt handler
+ ! there was a previous handler - chain them
+ rotcr r0
+ rotcr r0
+ add #0x7f, r0 ! 0x1fc
+ add #0x7f, r0 ! 0x3f8
+ add #0x02, r0 ! 0x400
+ rotcl r0
+ rotcl r0 ! Add 0x400 without corrupting another register
+ jmp @r0
+ nop
+ .balign 4
+2:
+ .long old_vbr
+handler:
+ /* If the trap handler is there call it */
+ mov.l superh_trap_handler_k, r0
+ cmp/eq #0, r0 ! True if zero.
+ bf 3f
+ bra chandler
+ nop
+3:
+ ! Here handler available, call it.
+ /* Now call the trap handler with as much of the context unchanged as possible.
+ Move trapping address into PR to make it look like the trap point */
+ stc spc, r1
+ lds r1, pr
+ mov.l expevt_k, r4
+ mov.l @r4, r4 ! r4 is value of expevt, first parameter.
+ mov r1, r5 ! Remember trapping pc.
+ mov r1, r6 ! Remember trapping pc.
+ mov.l chandler_k, r1
+ mov.l superh_trap_handler_k, r2
+ ! jmp to trap handler to avoid disturbing pr.
+ jmp @r2
+ nop
+
+ .org 0x600
+vbr_600:
+#ifdef PROFILE
+ ! Should be at vbr+0x600
+ ! Now we are in the land of interrupts so need to save more state.
+ ! Save register state
+ mov.l interrupt_stack_k, r15 ! r15 has been saved to sgr.
+ mov.l r0,@-r15
+ mov.l r1,@-r15
+ mov.l r2,@-r15
+ mov.l r3,@-r15
+ mov.l r4,@-r15
+ mov.l r5,@-r15
+ mov.l r6,@-r15
+ mov.l r7,@-r15
+ sts.l pr,@-r15
+ sts.l mach,@-r15
+ sts.l macl,@-r15
+#if defined(__SH_FPU_ANY__)
+ ! Save fpul and fpscr, save fr0-fr7 in 64 bit mode
+ ! and set the pervading precision for the timer_handler
+ mov #0,r0
+ sts.l fpul,@-r15
+ sts.l fpscr,@-r15
+ lds r0,fpscr ! Clear fpscr
+ fmov fr0,@-r15
+ fmov fr1,@-r15
+ fmov fr2,@-r15
+ fmov fr3,@-r15
+ mov.l pervading_precision_k,r0
+ fmov fr4,@-r15
+ fmov fr5,@-r15
+ mov.l @r0,r0
+ fmov fr6,@-r15
+ fmov fr7,@-r15
+ lds r0,fpscr
+#endif /* __SH_FPU_ANY__ */
+ ! Pass interrupted pc to timer_handler as first parameter (r4).
+ stc spc, r4
+ mov.l timer_handler_k, r0
+ jsr @r0
+ nop
+#if defined(__SH_FPU_ANY__)
+ mov #0,r0
+ lds r0,fpscr ! Clear the fpscr
+ fmov @r15+,fr7
+ fmov @r15+,fr6
+ fmov @r15+,fr5
+ fmov @r15+,fr4
+ fmov @r15+,fr3
+ fmov @r15+,fr2
+ fmov @r15+,fr1
+ fmov @r15+,fr0
+ lds.l @r15+,fpscr
+ lds.l @r15+,fpul
+#endif /* __SH_FPU_ANY__ */
+ lds.l @r15+,macl
+ lds.l @r15+,mach
+ lds.l @r15+,pr
+ mov.l @r15+,r7
+ mov.l @r15+,r6
+ mov.l @r15+,r5
+ mov.l @r15+,r4
+ mov.l @r15+,r3
+ mov.l @r15+,r2
+ mov.l @r15+,r1
+ mov.l @r15+,r0
+ stc sgr, r15 ! Restore r15, destroyed by this sequence.
+ rte
+ nop
+#if defined(__SH_FPU_ANY__)
+ .balign 4
+pervading_precision_k:
+#define CONCAT1(A,B) A##B
+#define CONCAT(A,B) CONCAT1(A,B)
+ .long CONCAT(__USER_LABEL_PREFIX__,__fpscr_values)+4
+#endif
+#else
+ mov.l 2f, r0 ! Load the old vbr setting (if any).
+ mov.l @r0, r0
+ cmp/eq #0, r0
+ ! no previous vbr - jump to own handler
+ bt chandler
+ ! there was a previous handler - chain them
+ rotcr r0
+ rotcr r0
+ add #0x7f, r0 ! 0x1fc
+ add #0x7f, r0 ! 0x3f8
+ add #0x7f, r0 ! 0x5f4
+ add #0x03, r0 ! 0x600
+ rotcl r0
+ rotcl r0 ! Add 0x600 without corrupting another register
+ jmp @r0
+ nop
+ .balign 4
+2:
+ .long old_vbr
+#endif /* PROFILE code */
+chandler:
+ mov.l expevt_k, r4
+ mov.l @r4, r4 ! r4 is value of expevt hence making this the return code
+ mov.l handler_exit_k,r0
+ jsr @r0
+ nop
+ ! We should never return from _exit but in case we do we would enter the
+ ! the following tight loop
+limbo:
+ bra limbo
+ nop
+ .balign 4
+#ifdef PROFILE
+interrupt_stack_k:
+ .long __timer_stack ! The high end of the stack
+timer_handler_k:
+ .long __profil_counter
+#endif
+expevt_k:
+ .long 0xff000024 ! Address of expevt
+chandler_k:
+ .long chandler
+superh_trap_handler_k:
+ .long __superh_trap_handler
+handler_exit_k:
+ .long _exit
+ .align 2
+! Simulated compile of trap handler.
+ .section .debug_abbrev,"",@progbits
+.Ldebug_abbrev0:
+ .section .debug_info,"",@progbits
+.Ldebug_info0:
+ .section .debug_line,"",@progbits
+.Ldebug_line0:
+ .text
+.Ltext0:
+ .align 5
+ .type __superh_trap_handler,@function
+__superh_trap_handler:
+.LFB1:
+ mov.l r14,@-r15
+.LCFI0:
+ add #-4,r15
+.LCFI1:
+ mov r15,r14
+.LCFI2:
+ mov.l r4,@r14
+ lds r1, pr
+ add #4,r14
+ mov r14,r15
+ mov.l @r15+,r14
+ rts
+ nop
+.LFE1:
+.Lfe1:
+ .size __superh_trap_handler,.Lfe1-__superh_trap_handler
+ .section .debug_frame,"",@progbits
+.Lframe0:
+ .ualong .LECIE0-.LSCIE0
+.LSCIE0:
+ .ualong 0xffffffff
+ .byte 0x1
+ .string ""
+ .uleb128 0x1
+ .sleb128 -4
+ .byte 0x11
+ .byte 0xc
+ .uleb128 0xf
+ .uleb128 0x0
+ .align 2
+.LECIE0:
+.LSFDE0:
+ .ualong .LEFDE0-.LASFDE0
+.LASFDE0:
+ .ualong .Lframe0
+ .ualong .LFB1
+ .ualong .LFE1-.LFB1
+ .byte 0x4
+ .ualong .LCFI0-.LFB1
+ .byte 0xe
+ .uleb128 0x4
+ .byte 0x4
+ .ualong .LCFI1-.LCFI0
+ .byte 0xe
+ .uleb128 0x8
+ .byte 0x8e
+ .uleb128 0x1
+ .byte 0x4
+ .ualong .LCFI2-.LCFI1
+ .byte 0xd
+ .uleb128 0xe
+ .align 2
+.LEFDE0:
+ .text
+.Letext0:
+ .section .debug_info
+ .ualong 0xb3
+ .uaword 0x2
+ .ualong .Ldebug_abbrev0
+ .byte 0x4
+ .uleb128 0x1
+ .ualong .Ldebug_line0
+ .ualong .Letext0
+ .ualong .Ltext0
+ .string "trap_handler.c"
+ .string "xxxxxxxxxxxxxxxxxxxxxxxxxxxx"
+ .string "GNU C 3.2 20020529 (experimental)"
+ .byte 0x1
+ .uleb128 0x2
+ .ualong 0xa6
+ .byte 0x1
+ .string "_superh_trap_handler"
+ .byte 0x1
+ .byte 0x2
+ .byte 0x1
+ .ualong .LFB1
+ .ualong .LFE1
+ .byte 0x1
+ .byte 0x5e
+ .uleb128 0x3
+ .string "trap_reason"
+ .byte 0x1
+ .byte 0x1
+ .ualong 0xa6
+ .byte 0x2
+ .byte 0x91
+ .sleb128 0
+ .byte 0x0
+ .uleb128 0x4
+ .string "unsigned int"
+ .byte 0x4
+ .byte 0x7
+ .byte 0x0
+ .section .debug_abbrev
+ .uleb128 0x1
+ .uleb128 0x11
+ .byte 0x1
+ .uleb128 0x10
+ .uleb128 0x6
+ .uleb128 0x12
+ .uleb128 0x1
+ .uleb128 0x11
+ .uleb128 0x1
+ .uleb128 0x3
+ .uleb128 0x8
+ .uleb128 0x1b
+ .uleb128 0x8
+ .uleb128 0x25
+ .uleb128 0x8
+ .uleb128 0x13
+ .uleb128 0xb
+ .byte 0x0
+ .byte 0x0
+ .uleb128 0x2
+ .uleb128 0x2e
+ .byte 0x1
+ .uleb128 0x1
+ .uleb128 0x13
+ .uleb128 0x3f
+ .uleb128 0xc
+ .uleb128 0x3
+ .uleb128 0x8
+ .uleb128 0x3a
+ .uleb128 0xb
+ .uleb128 0x3b
+ .uleb128 0xb
+ .uleb128 0x27
+ .uleb128 0xc
+ .uleb128 0x11
+ .uleb128 0x1
+ .uleb128 0x12
+ .uleb128 0x1
+ .uleb128 0x40
+ .uleb128 0xa
+ .byte 0x0
+ .byte 0x0
+ .uleb128 0x3
+ .uleb128 0x5
+ .byte 0x0
+ .uleb128 0x3
+ .uleb128 0x8
+ .uleb128 0x3a
+ .uleb128 0xb
+ .uleb128 0x3b
+ .uleb128 0xb
+ .uleb128 0x49
+ .uleb128 0x13
+ .uleb128 0x2
+ .uleb128 0xa
+ .byte 0x0
+ .byte 0x0
+ .uleb128 0x4
+ .uleb128 0x24
+ .byte 0x0
+ .uleb128 0x3
+ .uleb128 0x8
+ .uleb128 0xb
+ .uleb128 0xb
+ .uleb128 0x3e
+ .uleb128 0xb
+ .byte 0x0
+ .byte 0x0
+ .byte 0x0
+ .section .debug_pubnames,"",@progbits
+ .ualong 0x27
+ .uaword 0x2
+ .ualong .Ldebug_info0
+ .ualong 0xb7
+ .ualong 0x67
+ .string "_superh_trap_handler"
+ .ualong 0x0
+ .section .debug_aranges,"",@progbits
+ .ualong 0x1c
+ .uaword 0x2
+ .ualong .Ldebug_info0
+ .byte 0x4
+ .byte 0x0
+ .uaword 0x0
+ .uaword 0x0
+ .ualong .Ltext0
+ .ualong .Letext0-.Ltext0
+ .ualong 0x0
+ .ualong 0x0
+#endif /* VBR_SETUP */
+#endif /* ! __SH5__ */
diff --git a/libgcc/config/sh/crti.S b/libgcc/config/sh/crti.S
new file mode 100644
index 00000000000..a8e4f8bed22
--- /dev/null
+++ b/libgcc/config/sh/crti.S
@@ -0,0 +1,125 @@
+/* Copyright (C) 2000, 2001, 2009, 2011 Free Software Foundation, Inc.
+ This file was adapted from glibc sources.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 3, or (at your option) any
+later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+
+/* The code in sections .init and .fini is supposed to be a single
+ regular function. The function in .init is called directly from
+ start in crt1.S. The function in .fini is atexit()ed in crt1.S
+ too.
+
+ crti.S contributes the prologue of a function to these sections,
+ and crtn.S comes up the epilogue. STARTFILE_SPEC should list
+ crti.o before any other object files that might add code to .init
+ or .fini sections, and ENDFILE_SPEC should list crtn.o after any
+ such object files. */
+
+ .section .init
+/* The alignment below can't be smaller, otherwise the mova below
+ breaks. Yes, we might align just the label, but then we'd be
+ exchanging an alignment here for one there, since the code fragment
+ below ensures 4-byte alignment on __ELF__. */
+#ifdef __ELF__
+ .p2align 2
+#else
+ .p2align 1
+#endif
+ .global _init
+_init:
+#if __SHMEDIA__
+ addi r15, -16, r15
+ st.q r15, 8, r14
+ st.q r15, 0, r18
+ add r15, r63, r14
+#elif __SH5__ && ! __SHMEDIA__
+ mov r15,r0
+ add #-8,r15
+ mov.l r14,@-r0
+ sts.l pr,@-r0
+ mov r15,r14
+ nop
+#else
+#ifdef __ELF__
+ mov.l r12,@-r15
+ mova 0f,r0
+ mov.l 0f,r12
+#endif
+ mov.l r14,@-r15
+#ifdef __ELF__
+ add r0,r12
+#endif
+ sts.l pr,@-r15
+#ifdef __ELF__
+ bra 1f
+#endif
+ mov r15,r14
+#ifdef __ELF__
+0: .long _GLOBAL_OFFSET_TABLE_
+1:
+#endif
+#endif /* __SHMEDIA__ */
+
+ .section .fini
+/* The alignment below can't be smaller, otherwise the mova below
+ breaks. Yes, we might align just the label, but then we'd be
+ exchanging an alignment here for one there, since the code fragment
+ below ensures 4-byte alignment on __ELF__. */
+#ifdef __ELF__
+ .p2align 2
+#else
+ .p2align 1
+#endif
+ .global _fini
+_fini:
+#if __SHMEDIA__
+ addi r15, -16, r15
+ st.q r15, 8, r14
+ st.q r15, 0, r18
+ add r15, r63, r14
+#elif __SH5__ && ! __SHMEDIA__
+ mov r15,r0
+ add #-8,r15
+ mov.l r14,@-r0
+ sts.l pr,@-r0
+ mov r15,r14
+ nop
+#else
+#ifdef __ELF__
+ mov.l r12,@-r15
+ mova 0f,r0
+ mov.l 0f,r12
+#endif
+ mov.l r14,@-r15
+#ifdef __ELF__
+ add r0,r12
+#endif
+ sts.l pr,@-r15
+#ifdef __ELF__
+ bra 1f
+#endif
+ mov r15,r14
+#ifdef __ELF__
+0: .long _GLOBAL_OFFSET_TABLE_
+1:
+#endif
+#endif /* __SHMEDIA__ */
diff --git a/libgcc/config/sh/crtn.S b/libgcc/config/sh/crtn.S
new file mode 100644
index 00000000000..d2f58c01b1b
--- /dev/null
+++ b/libgcc/config/sh/crtn.S
@@ -0,0 +1,77 @@
+/* Copyright (C) 2000, 2001, 2009, 2011 Free Software Foundation, Inc.
+ This file was adapted from glibc sources.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 3, or (at your option) any
+later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+/* See an explanation about .init and .fini in crti.S. */
+
+ .section .init
+#if __SHMEDIA__
+ add r14, r63, r15
+ ld.q r15, 0, r18
+ ptabs r18, tr0
+ ld.q r15, 8, r14
+ addi r15, 16, r15
+ blink tr0, r63
+#elif __SH5__ && ! __SHMEDIA__
+ mov r14,r15
+ lds.l @r14+,pr
+ mov.l @r14,r14
+ rts
+ add #8,r15
+#else
+ mov r14,r15
+ lds.l @r15+,pr
+ mov.l @r15+,r14
+ rts
+#ifdef __ELF__
+ mov.l @r15+,r12
+#else
+ nop
+#endif
+#endif /* __SHMEDIA__ */
+
+ .section .fini
+#if __SHMEDIA__
+ add r14, r63, r15
+ ld.q r15, 0, r18
+ ptabs r18, tr0
+ ld.q r15, 8, r14
+ addi r15, 16, r15
+ blink tr0, r63
+#elif __SH5__ && ! __SHMEDIA__
+ mov r14,r15
+ lds.l @r14+,pr
+ mov.l @r14,r14
+ rts
+ add #8,r15
+#else
+ mov r14,r15
+ lds.l @r15+,pr
+ mov.l @r15+,r14
+ rts
+#ifdef __ELF__
+ mov.l @r15+,r12
+#else
+ nop
+#endif
+#endif /* __SHMEDIA__ */
diff --git a/libgcc/config/sh/lib1funcs-4-300.S b/libgcc/config/sh/lib1funcs-4-300.S
new file mode 100644
index 00000000000..b131877f121
--- /dev/null
+++ b/libgcc/config/sh/lib1funcs-4-300.S
@@ -0,0 +1,936 @@
+/* Copyright (C) 2004, 2006, 2009 Free Software Foundation, Inc.
+
+This file is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 3, or (at your option) any
+later version.
+
+This file is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+
+/* libgcc routines for the STMicroelectronics ST40-300 CPU.
+ Contributed by J"orn Rennecke joern.rennecke@st.com. */
+
+#include "lib1funcs.h"
+
+#if !__SHMEDIA__
+#ifdef L_div_table
+#if defined (__SH3__) || defined (__SH3E__) || defined (__SH4__) || defined (__SH4_SINGLE__) || defined (__SH4_SINGLE_ONLY__) || defined (__SH4_NOFPU__)
+/* This code used shld, thus is not suitable for SH1 / SH2. */
+
+/* Signed / unsigned division without use of FPU, optimized for SH4-300.
+ Uses a lookup table for divisors in the range -128 .. +127, and
+ div1 with case distinction for larger divisors in three more ranges.
+ The code is lumped together with the table to allow the use of mova. */
+#ifdef __LITTLE_ENDIAN__
+#define L_LSB 0
+#define L_LSWMSB 1
+#define L_MSWLSB 2
+#else
+#define L_LSB 3
+#define L_LSWMSB 2
+#define L_MSWLSB 1
+#endif
+
+ .global GLOBAL(udivsi3_i4i)
+ .global GLOBAL(sdivsi3_i4i)
+ FUNC(GLOBAL(udivsi3_i4i))
+ FUNC(GLOBAL(sdivsi3_i4i))
+
+ .balign 4
+LOCAL(div_ge8m): ! 10 cycles up to here
+ rotcr r1 ! signed shift must use original sign from r4
+ div0s r5,r4
+ mov #24,r7
+ shld r7,r6
+ shad r0,r1
+ rotcl r6
+ div1 r5,r1
+ swap.w r5,r0 ! detect -0x80000000 : 0x800000
+ rotcl r6
+ swap.w r4,r7
+ div1 r5,r1
+ swap.b r7,r7
+ rotcl r6
+ or r7,r0
+ div1 r5,r1
+ swap.w r0,r7
+ rotcl r6
+ or r7,r0
+ div1 r5,r1
+ add #-0x80,r0
+ rotcl r6
+ extu.w r0,r0
+ div1 r5,r1
+ neg r0,r0
+ rotcl r6
+ swap.w r0,r0
+ div1 r5,r1
+ mov.l @r15+,r7
+ and r6,r0
+ rotcl r6
+ div1 r5,r1
+ shll2 r0
+ rotcl r6
+ exts.b r0,r0
+ div1 r5,r1
+ swap.w r0,r0
+ exts.w r0,r1
+ exts.b r6,r0
+ mov.l @r15+,r6
+ rotcl r0
+ rts
+ sub r1,r0
+ ! 31 cycles up to here
+
+ .balign 4
+LOCAL(udiv_ge64k): ! 3 cycles up to here
+ mov r4,r0
+ shlr8 r0
+ div0u
+ cmp/hi r0,r5
+ bt LOCAL(udiv_r8)
+ mov.l r5,@-r15
+ shll8 r5
+ ! 7 cycles up to here
+ .rept 8
+ div1 r5,r0
+ .endr
+ extu.b r4,r1 ! 15 cycles up to here
+ extu.b r0,r6
+ xor r1,r0
+ xor r6,r0
+ swap.b r6,r6
+ .rept 8
+ div1 r5,r0
+ .endr ! 25 cycles up to here
+ extu.b r0,r0
+ mov.l @r15+,r5
+ or r6,r0
+ mov.l @r15+,r6
+ rts
+ rotcl r0 ! 28 cycles up to here
+
+ .balign 4
+LOCAL(udiv_r8): ! 6 cycles up to here
+ mov.l r4,@-r15
+ shll16 r4
+ shll8 r4
+ !
+ shll r4
+ mov r0,r1
+ div1 r5,r1
+ mov r4,r0
+ rotcl r0
+ mov.l @r15+,r4
+ div1 r5,r1
+ ! 12 cycles up to here
+ .rept 6
+ rotcl r0; div1 r5,r1
+ .endr
+ mov.l @r15+,r6 ! 24 cycles up to here
+ rts
+ rotcl r0
+
+ .balign 4
+LOCAL(div_ge32k): ! 6 cycles up to here
+ mov.l r7,@-r15
+ swap.w r5,r6
+ exts.b r6,r7
+ exts.w r6,r6
+ cmp/eq r6,r7
+ extu.b r1,r6
+ bf/s LOCAL(div_ge8m)
+ cmp/hi r1,r4 ! copy sign bit of r4 into T
+ rotcr r1 ! signed shift must use original sign from r4
+ div0s r5,r4
+ shad r0,r1
+ shll8 r5
+ div1 r5,r1
+ mov r5,r7 ! detect r4 == 0x80000000 && r5 == 0x8000(00)
+ div1 r5,r1
+ shlr8 r7
+ div1 r5,r1
+ swap.w r4,r0
+ div1 r5,r1
+ swap.b r0,r0
+ div1 r5,r1
+ or r0,r7
+ div1 r5,r1
+ add #-80,r7
+ div1 r5,r1
+ swap.w r7,r0
+ div1 r5,r1
+ or r0,r7
+ extu.b r1,r0
+ xor r6,r1
+ xor r0,r1
+ exts.b r0,r0
+ div1 r5,r1
+ extu.w r7,r7
+ div1 r5,r1
+ neg r7,r7 ! upper 16 bit of r7 == 0 if r4 == 0x80000000 && r5 == 0x8000
+ div1 r5,r1
+ and r0,r7
+ div1 r5,r1
+ swap.w r7,r7 ! 26 cycles up to here.
+ div1 r5,r1
+ shll8 r0
+ div1 r5,r1
+ exts.w r7,r7
+ div1 r5,r1
+ add r0,r0
+ div1 r5,r1
+ sub r7,r0
+ extu.b r1,r1
+ mov.l @r15+,r7
+ rotcl r1
+ mov.l @r15+,r6
+ add r1,r0
+ mov #-8,r1
+ rts
+ shad r1,r5 ! 34 cycles up to here
+
+ .balign 4
+GLOBAL(udivsi3_i4i):
+ mov.l r6,@-r15
+ extu.w r5,r6
+ cmp/eq r5,r6
+ mov #0x7f,r0
+ bf LOCAL(udiv_ge64k)
+ cmp/hi r0,r5
+ bf LOCAL(udiv_le128)
+ mov r4,r1
+ shlr8 r1
+ div0u
+ shlr r1
+ shll16 r6
+ div1 r6,r1
+ extu.b r4,r0 ! 7 cycles up to here
+ .rept 8
+ div1 r6,r1
+ .endr ! 15 cycles up to here
+ xor r1,r0 ! xor dividend with result lsb
+ .rept 6
+ div1 r6,r1
+ .endr
+ mov.l r7,@-r15 ! 21 cycles up to here
+ div1 r6,r1
+ extu.b r0,r7
+ div1 r6,r1
+ shll8 r7
+ extu.w r1,r0
+ xor r7,r1 ! replace lsb of result with lsb of dividend
+ div1 r6,r1
+ mov #0,r7
+ div1 r6,r1
+ !
+ div1 r6,r1
+ bra LOCAL(div_end)
+ div1 r6,r1 ! 28 cycles up to here
+
+ /* This is link-compatible with a GLOBAL(sdivsi3) call,
+ but we effectively clobber only r1, macl and mach */
+ /* Because negative quotients are calculated as one's complements,
+ -0x80000000 divided by the smallest positive number of a number
+ range (0x80, 0x8000, 0x800000) causes saturation in the one's
+ complement representation, and we have to suppress the
+ one's -> two's complement adjustment. Since positive numbers
+ don't get such an adjustment, it's OK to also compute one's -> two's
+ complement adjustment suppression for a dividend of 0. */
+ .balign 4
+GLOBAL(sdivsi3_i4i):
+ mov.l r6,@-r15
+ exts.b r5,r6
+ cmp/eq r5,r6
+ mov #-1,r1
+ bt/s LOCAL(div_le128)
+ cmp/pz r4
+ addc r4,r1
+ exts.w r5,r6
+ cmp/eq r5,r6
+ mov #-7,r0
+ bf/s LOCAL(div_ge32k)
+ cmp/hi r1,r4 ! copy sign bit of r4 into T
+ rotcr r1
+ shll16 r6 ! 7 cycles up to here
+ shad r0,r1
+ div0s r5,r4
+ div1 r6,r1
+ mov.l r7,@-r15
+ div1 r6,r1
+ mov r4,r0 ! re-compute adjusted dividend
+ div1 r6,r1
+ mov #-31,r7
+ div1 r6,r1
+ shad r7,r0
+ div1 r6,r1
+ add r4,r0 ! adjusted dividend
+ div1 r6,r1
+ mov.l r8,@-r15
+ div1 r6,r1
+ swap.w r4,r8 ! detect special case r4 = 0x80000000, r5 = 0x80
+ div1 r6,r1
+ swap.b r8,r8
+ xor r1,r0 ! xor dividend with result lsb
+ div1 r6,r1
+ div1 r6,r1
+ or r5,r8
+ div1 r6,r1
+ add #-0x80,r8 ! r8 is 0 iff there is a match
+ div1 r6,r1
+ swap.w r8,r7 ! or upper 16 bits...
+ div1 r6,r1
+ or r7,r8 !...into lower 16 bits
+ div1 r6,r1
+ extu.w r8,r8
+ div1 r6,r1
+ extu.b r0,r7
+ div1 r6,r1
+ shll8 r7
+ exts.w r1,r0
+ xor r7,r1 ! replace lsb of result with lsb of dividend
+ div1 r6,r1
+ neg r8,r8 ! upper 16 bits of r8 are now 0xffff iff we want end adjm.
+ div1 r6,r1
+ and r0,r8
+ div1 r6,r1
+ swap.w r8,r7
+ div1 r6,r1
+ mov.l @r15+,r8 ! 58 insns, 29 cycles up to here
+LOCAL(div_end):
+ div1 r6,r1
+ shll8 r0
+ div1 r6,r1
+ exts.w r7,r7
+ div1 r6,r1
+ add r0,r0
+ div1 r6,r1
+ sub r7,r0
+ extu.b r1,r1
+ mov.l @r15+,r7
+ rotcl r1
+ mov.l @r15+,r6
+ rts
+ add r1,r0
+
+ .balign 4
+LOCAL(udiv_le128): ! 4 cycles up to here (or 7 for mispredict)
+ mova LOCAL(div_table_inv),r0
+ shll2 r6
+ mov.l @(r0,r6),r1
+ mova LOCAL(div_table_clz),r0
+ lds r4,mach
+ !
+ !
+ !
+ tst r1,r1
+ !
+ bt 0f
+ dmulu.l r1,r4
+0: mov.b @(r0,r5),r1
+ clrt
+ !
+ !
+ sts mach,r0
+ addc r4,r0
+ rotcr r0
+ mov.l @r15+,r6
+ rts
+ shld r1,r0
+
+ .balign 4
+LOCAL(div_le128): ! 3 cycles up to here (or 6 for mispredict)
+ mova LOCAL(div_table_inv),r0
+ shll2 r6
+ mov.l @(r0,r6),r1
+ mova LOCAL(div_table_clz),r0
+ neg r4,r6
+ bf 0f
+ mov r4,r6
+0: lds r6,mach
+ tst r1,r1
+ bt 0f
+ dmulu.l r1,r6
+0: div0s r4,r5
+ mov.b @(r0,r5),r1
+ bt/s LOCAL(le128_neg)
+ clrt
+ !
+ sts mach,r0
+ addc r6,r0
+ rotcr r0
+ mov.l @r15+,r6
+ rts
+ shld r1,r0
+
+/* Could trap divide by zero for the cost of one cycle more mispredict penalty:
+...
+ dmulu.l r1,r6
+0: div0s r4,r5
+ bt/s LOCAL(le128_neg)
+ tst r5,r5
+ bt LOCAL(div_by_zero)
+ mov.b @(r0,r5),r1
+ sts mach,r0
+ addc r6,r0
+...
+LOCAL(div_by_zero):
+ trapa #
+ .balign 4
+LOCAL(le128_neg):
+ bt LOCAL(div_by_zero)
+ mov.b @(r0,r5),r1
+ sts mach,r0
+ addc r6,r0
+... */
+
+ .balign 4
+LOCAL(le128_neg):
+ sts mach,r0
+ addc r6,r0
+ rotcr r0
+ mov.l @r15+,r6
+ shad r1,r0
+ rts
+ neg r0,r0
+ ENDFUNC(GLOBAL(udivsi3_i4i))
+ ENDFUNC(GLOBAL(sdivsi3_i4i))
+
+/* This table has been generated by divtab-sh4.c. */
+ .balign 4
+ .byte -7
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -5
+ .byte -5
+ .byte -5
+ .byte -5
+ .byte -5
+ .byte -5
+ .byte -5
+ .byte -5
+ .byte -5
+ .byte -5
+ .byte -5
+ .byte -5
+ .byte -5
+ .byte -5
+ .byte -5
+ .byte -5
+ .byte -5
+ .byte -5
+ .byte -5
+ .byte -5
+ .byte -5
+ .byte -5
+ .byte -5
+ .byte -5
+ .byte -5
+ .byte -5
+ .byte -5
+ .byte -5
+ .byte -5
+ .byte -5
+ .byte -5
+ .byte -5
+ .byte -4
+ .byte -4
+ .byte -4
+ .byte -4
+ .byte -4
+ .byte -4
+ .byte -4
+ .byte -4
+ .byte -4
+ .byte -4
+ .byte -4
+ .byte -4
+ .byte -4
+ .byte -4
+ .byte -4
+ .byte -4
+ .byte -3
+ .byte -3
+ .byte -3
+ .byte -3
+ .byte -3
+ .byte -3
+ .byte -3
+ .byte -3
+ .byte -2
+ .byte -2
+ .byte -2
+ .byte -2
+ .byte -1
+ .byte -1
+ .byte 0
+LOCAL(div_table_clz):
+ .byte 0
+ .byte 0
+ .byte -1
+ .byte -1
+ .byte -2
+ .byte -2
+ .byte -2
+ .byte -2
+ .byte -3
+ .byte -3
+ .byte -3
+ .byte -3
+ .byte -3
+ .byte -3
+ .byte -3
+ .byte -3
+ .byte -4
+ .byte -4
+ .byte -4
+ .byte -4
+ .byte -4
+ .byte -4
+ .byte -4
+ .byte -4
+ .byte -4
+ .byte -4
+ .byte -4
+ .byte -4
+ .byte -4
+ .byte -4
+ .byte -4
+ .byte -4
+ .byte -5
+ .byte -5
+ .byte -5
+ .byte -5
+ .byte -5
+ .byte -5
+ .byte -5
+ .byte -5
+ .byte -5
+ .byte -5
+ .byte -5
+ .byte -5
+ .byte -5
+ .byte -5
+ .byte -5
+ .byte -5
+ .byte -5
+ .byte -5
+ .byte -5
+ .byte -5
+ .byte -5
+ .byte -5
+ .byte -5
+ .byte -5
+ .byte -5
+ .byte -5
+ .byte -5
+ .byte -5
+ .byte -5
+ .byte -5
+ .byte -5
+ .byte -5
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+/* 1/-128 .. 1/127, normalized. There is an implicit leading 1 in bit 32,
+ or in bit 33 for powers of two. */
+ .balign 4
+ .long 0x0
+ .long 0x2040811
+ .long 0x4104105
+ .long 0x624DD30
+ .long 0x8421085
+ .long 0xA6810A7
+ .long 0xC9714FC
+ .long 0xECF56BF
+ .long 0x11111112
+ .long 0x135C8114
+ .long 0x15B1E5F8
+ .long 0x18118119
+ .long 0x1A7B9612
+ .long 0x1CF06ADB
+ .long 0x1F7047DD
+ .long 0x21FB7813
+ .long 0x24924925
+ .long 0x27350B89
+ .long 0x29E4129F
+ .long 0x2C9FB4D9
+ .long 0x2F684BDB
+ .long 0x323E34A3
+ .long 0x3521CFB3
+ .long 0x38138139
+ .long 0x3B13B13C
+ .long 0x3E22CBCF
+ .long 0x41414142
+ .long 0x446F8657
+ .long 0x47AE147B
+ .long 0x4AFD6A06
+ .long 0x4E5E0A73
+ .long 0x51D07EAF
+ .long 0x55555556
+ .long 0x58ED2309
+ .long 0x5C9882BA
+ .long 0x60581606
+ .long 0x642C8591
+ .long 0x68168169
+ .long 0x6C16C16D
+ .long 0x702E05C1
+ .long 0x745D1746
+ .long 0x78A4C818
+ .long 0x7D05F418
+ .long 0x81818182
+ .long 0x86186187
+ .long 0x8ACB90F7
+ .long 0x8F9C18FA
+ .long 0x948B0FCE
+ .long 0x9999999A
+ .long 0x9EC8E952
+ .long 0xA41A41A5
+ .long 0xA98EF607
+ .long 0xAF286BCB
+ .long 0xB4E81B4F
+ .long 0xBACF914D
+ .long 0xC0E07039
+ .long 0xC71C71C8
+ .long 0xCD856891
+ .long 0xD41D41D5
+ .long 0xDAE6076C
+ .long 0xE1E1E1E2
+ .long 0xE9131AC0
+ .long 0xF07C1F08
+ .long 0xF81F81F9
+ .long 0x0
+ .long 0x4104105
+ .long 0x8421085
+ .long 0xC9714FC
+ .long 0x11111112
+ .long 0x15B1E5F8
+ .long 0x1A7B9612
+ .long 0x1F7047DD
+ .long 0x24924925
+ .long 0x29E4129F
+ .long 0x2F684BDB
+ .long 0x3521CFB3
+ .long 0x3B13B13C
+ .long 0x41414142
+ .long 0x47AE147B
+ .long 0x4E5E0A73
+ .long 0x55555556
+ .long 0x5C9882BA
+ .long 0x642C8591
+ .long 0x6C16C16D
+ .long 0x745D1746
+ .long 0x7D05F418
+ .long 0x86186187
+ .long 0x8F9C18FA
+ .long 0x9999999A
+ .long 0xA41A41A5
+ .long 0xAF286BCB
+ .long 0xBACF914D
+ .long 0xC71C71C8
+ .long 0xD41D41D5
+ .long 0xE1E1E1E2
+ .long 0xF07C1F08
+ .long 0x0
+ .long 0x8421085
+ .long 0x11111112
+ .long 0x1A7B9612
+ .long 0x24924925
+ .long 0x2F684BDB
+ .long 0x3B13B13C
+ .long 0x47AE147B
+ .long 0x55555556
+ .long 0x642C8591
+ .long 0x745D1746
+ .long 0x86186187
+ .long 0x9999999A
+ .long 0xAF286BCB
+ .long 0xC71C71C8
+ .long 0xE1E1E1E2
+ .long 0x0
+ .long 0x11111112
+ .long 0x24924925
+ .long 0x3B13B13C
+ .long 0x55555556
+ .long 0x745D1746
+ .long 0x9999999A
+ .long 0xC71C71C8
+ .long 0x0
+ .long 0x24924925
+ .long 0x55555556
+ .long 0x9999999A
+ .long 0x0
+ .long 0x55555556
+ .long 0x0
+ .long 0x0
+LOCAL(div_table_inv):
+ .long 0x0
+ .long 0x0
+ .long 0x0
+ .long 0x55555556
+ .long 0x0
+ .long 0x9999999A
+ .long 0x55555556
+ .long 0x24924925
+ .long 0x0
+ .long 0xC71C71C8
+ .long 0x9999999A
+ .long 0x745D1746
+ .long 0x55555556
+ .long 0x3B13B13C
+ .long 0x24924925
+ .long 0x11111112
+ .long 0x0
+ .long 0xE1E1E1E2
+ .long 0xC71C71C8
+ .long 0xAF286BCB
+ .long 0x9999999A
+ .long 0x86186187
+ .long 0x745D1746
+ .long 0x642C8591
+ .long 0x55555556
+ .long 0x47AE147B
+ .long 0x3B13B13C
+ .long 0x2F684BDB
+ .long 0x24924925
+ .long 0x1A7B9612
+ .long 0x11111112
+ .long 0x8421085
+ .long 0x0
+ .long 0xF07C1F08
+ .long 0xE1E1E1E2
+ .long 0xD41D41D5
+ .long 0xC71C71C8
+ .long 0xBACF914D
+ .long 0xAF286BCB
+ .long 0xA41A41A5
+ .long 0x9999999A
+ .long 0x8F9C18FA
+ .long 0x86186187
+ .long 0x7D05F418
+ .long 0x745D1746
+ .long 0x6C16C16D
+ .long 0x642C8591
+ .long 0x5C9882BA
+ .long 0x55555556
+ .long 0x4E5E0A73
+ .long 0x47AE147B
+ .long 0x41414142
+ .long 0x3B13B13C
+ .long 0x3521CFB3
+ .long 0x2F684BDB
+ .long 0x29E4129F
+ .long 0x24924925
+ .long 0x1F7047DD
+ .long 0x1A7B9612
+ .long 0x15B1E5F8
+ .long 0x11111112
+ .long 0xC9714FC
+ .long 0x8421085
+ .long 0x4104105
+ .long 0x0
+ .long 0xF81F81F9
+ .long 0xF07C1F08
+ .long 0xE9131AC0
+ .long 0xE1E1E1E2
+ .long 0xDAE6076C
+ .long 0xD41D41D5
+ .long 0xCD856891
+ .long 0xC71C71C8
+ .long 0xC0E07039
+ .long 0xBACF914D
+ .long 0xB4E81B4F
+ .long 0xAF286BCB
+ .long 0xA98EF607
+ .long 0xA41A41A5
+ .long 0x9EC8E952
+ .long 0x9999999A
+ .long 0x948B0FCE
+ .long 0x8F9C18FA
+ .long 0x8ACB90F7
+ .long 0x86186187
+ .long 0x81818182
+ .long 0x7D05F418
+ .long 0x78A4C818
+ .long 0x745D1746
+ .long 0x702E05C1
+ .long 0x6C16C16D
+ .long 0x68168169
+ .long 0x642C8591
+ .long 0x60581606
+ .long 0x5C9882BA
+ .long 0x58ED2309
+ .long 0x55555556
+ .long 0x51D07EAF
+ .long 0x4E5E0A73
+ .long 0x4AFD6A06
+ .long 0x47AE147B
+ .long 0x446F8657
+ .long 0x41414142
+ .long 0x3E22CBCF
+ .long 0x3B13B13C
+ .long 0x38138139
+ .long 0x3521CFB3
+ .long 0x323E34A3
+ .long 0x2F684BDB
+ .long 0x2C9FB4D9
+ .long 0x29E4129F
+ .long 0x27350B89
+ .long 0x24924925
+ .long 0x21FB7813
+ .long 0x1F7047DD
+ .long 0x1CF06ADB
+ .long 0x1A7B9612
+ .long 0x18118119
+ .long 0x15B1E5F8
+ .long 0x135C8114
+ .long 0x11111112
+ .long 0xECF56BF
+ .long 0xC9714FC
+ .long 0xA6810A7
+ .long 0x8421085
+ .long 0x624DD30
+ .long 0x4104105
+ .long 0x2040811
+ /* maximum error: 0.987342 scaled: 0.921875*/
+
+#endif /* SH3 / SH4 */
+
+#endif /* L_div_table */
+#endif /* !__SHMEDIA__ */
diff --git a/libgcc/config/sh/lib1funcs-Os-4-200.S b/libgcc/config/sh/lib1funcs-Os-4-200.S
new file mode 100644
index 00000000000..aae57ccd36c
--- /dev/null
+++ b/libgcc/config/sh/lib1funcs-Os-4-200.S
@@ -0,0 +1,322 @@
+/* Copyright (C) 2006, 2009 Free Software Foundation, Inc.
+
+This file is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 3, or (at your option) any
+later version.
+
+This file is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+/* Moderately Space-optimized libgcc routines for the Renesas SH /
+ STMicroelectronics ST40 CPUs.
+ Contributed by J"orn Rennecke joern.rennecke@st.com. */
+
+#include "lib1funcs.h"
+
+#if !__SHMEDIA__
+#ifdef L_udivsi3_i4i
+
+/* 88 bytes; sh4-200 cycle counts:
+ divisor >= 2G: 11 cycles
+ dividend < 2G: 48 cycles
+ dividend >= 2G: divisor != 1: 54 cycles
+ dividend >= 2G, divisor == 1: 22 cycles */
+#if defined (__SH_FPU_DOUBLE__) || defined (__SH4_SINGLE_ONLY__)
+!! args in r4 and r5, result in r0, clobber r1
+
+ .global GLOBAL(udivsi3_i4i)
+ FUNC(GLOBAL(udivsi3_i4i))
+GLOBAL(udivsi3_i4i):
+ mova L1,r0
+ cmp/pz r5
+ sts fpscr,r1
+ lds.l @r0+,fpscr
+ sts.l fpul,@-r15
+ bf LOCAL(huge_divisor)
+ mov.l r1,@-r15
+ lds r4,fpul
+ cmp/pz r4
+#ifdef FMOVD_WORKS
+ fmov.d dr0,@-r15
+ float fpul,dr0
+ fmov.d dr2,@-r15
+ bt LOCAL(dividend_adjusted)
+ mov #1,r1
+ fmov.d @r0,dr2
+ cmp/eq r1,r5
+ bt LOCAL(div_by_1)
+ fadd dr2,dr0
+LOCAL(dividend_adjusted):
+ lds r5,fpul
+ float fpul,dr2
+ fdiv dr2,dr0
+LOCAL(div_by_1):
+ fmov.d @r15+,dr2
+ ftrc dr0,fpul
+ fmov.d @r15+,dr0
+#else /* !FMOVD_WORKS */
+ fmov.s DR01,@-r15
+ mov #1,r1
+ fmov.s DR00,@-r15
+ float fpul,dr0
+ fmov.s DR21,@-r15
+ bt/s LOCAL(dividend_adjusted)
+ fmov.s DR20,@-r15
+ cmp/eq r1,r5
+ bt LOCAL(div_by_1)
+ fmov.s @r0+,DR20
+ fmov.s @r0,DR21
+ fadd dr2,dr0
+LOCAL(dividend_adjusted):
+ lds r5,fpul
+ float fpul,dr2
+ fdiv dr2,dr0
+LOCAL(div_by_1):
+ fmov.s @r15+,DR20
+ fmov.s @r15+,DR21
+ ftrc dr0,fpul
+ fmov.s @r15+,DR00
+ fmov.s @r15+,DR01
+#endif /* !FMOVD_WORKS */
+ lds.l @r15+,fpscr
+ sts fpul,r0
+ rts
+ lds.l @r15+,fpul
+
+#ifdef FMOVD_WORKS
+ .p2align 3 ! make double below 8 byte aligned.
+#endif
+LOCAL(huge_divisor):
+ lds r1,fpscr
+ add #4,r15
+ cmp/hs r5,r4
+ rts
+ movt r0
+
+ .p2align 2
+L1:
+#ifndef FMOVD_WORKS
+ .long 0x80000
+#else
+ .long 0x180000
+#endif
+ .double 4294967296
+
+ ENDFUNC(GLOBAL(udivsi3_i4i))
+#elif !defined (__sh1__) /* !__SH_FPU_DOUBLE__ */
+
+#if 0
+/* With 36 bytes, the following would probably be the most compact
+ implementation, but with 139 cycles on an sh4-200, it is extremely slow. */
+GLOBAL(udivsi3_i4i):
+ mov.l r2,@-r15
+ mov #0,r1
+ div0u
+ mov r1,r2
+ mov.l r3,@-r15
+ mov r1,r3
+ sett
+ mov r4,r0
+LOCAL(loop):
+ rotcr r2
+ ;
+ bt/s LOCAL(end)
+ cmp/gt r2,r3
+ rotcl r0
+ bra LOCAL(loop)
+ div1 r5,r1
+LOCAL(end):
+ rotcl r0
+ mov.l @r15+,r3
+ rts
+ mov.l @r15+,r2
+#endif /* 0 */
+
+/* Size: 186 bytes jointly for udivsi3_i4i and sdivsi3_i4i
+ sh4-200 run times:
+ udiv small divisor: 55 cycles
+ udiv large divisor: 52 cycles
+ sdiv small divisor, positive result: 59 cycles
+ sdiv large divisor, positive result: 56 cycles
+ sdiv small divisor, negative result: 65 cycles (*)
+ sdiv large divisor, negative result: 62 cycles (*)
+ (*): r2 is restored in the rts delay slot and has a lingering latency
+ of two more cycles. */
+ .balign 4
+ .global GLOBAL(udivsi3_i4i)
+ FUNC(GLOBAL(udivsi3_i4i))
+ FUNC(GLOBAL(sdivsi3_i4i))
+GLOBAL(udivsi3_i4i):
+ sts pr,r1
+ mov.l r4,@-r15
+ extu.w r5,r0
+ cmp/eq r5,r0
+ swap.w r4,r0
+ shlr16 r4
+ bf/s LOCAL(large_divisor)
+ div0u
+ mov.l r5,@-r15
+ shll16 r5
+LOCAL(sdiv_small_divisor):
+ div1 r5,r4
+ bsr LOCAL(div6)
+ div1 r5,r4
+ div1 r5,r4
+ bsr LOCAL(div6)
+ div1 r5,r4
+ xtrct r4,r0
+ xtrct r0,r4
+ bsr LOCAL(div7)
+ swap.w r4,r4
+ div1 r5,r4
+ bsr LOCAL(div7)
+ div1 r5,r4
+ xtrct r4,r0
+ mov.l @r15+,r5
+ swap.w r0,r0
+ mov.l @r15+,r4
+ jmp @r1
+ rotcl r0
+LOCAL(div7):
+ div1 r5,r4
+LOCAL(div6):
+ div1 r5,r4; div1 r5,r4; div1 r5,r4
+ div1 r5,r4; div1 r5,r4; rts; div1 r5,r4
+
+LOCAL(divx3):
+ rotcl r0
+ div1 r5,r4
+ rotcl r0
+ div1 r5,r4
+ rotcl r0
+ rts
+ div1 r5,r4
+
+LOCAL(large_divisor):
+ mov.l r5,@-r15
+LOCAL(sdiv_large_divisor):
+ xor r4,r0
+ .rept 4
+ rotcl r0
+ bsr LOCAL(divx3)
+ div1 r5,r4
+ .endr
+ mov.l @r15+,r5
+ mov.l @r15+,r4
+ jmp @r1
+ rotcl r0
+ ENDFUNC(GLOBAL(udivsi3_i4i))
+
+ .global GLOBAL(sdivsi3_i4i)
+GLOBAL(sdivsi3_i4i):
+ mov.l r4,@-r15
+ cmp/pz r5
+ mov.l r5,@-r15
+ bt/s LOCAL(pos_divisor)
+ cmp/pz r4
+ neg r5,r5
+ extu.w r5,r0
+ bt/s LOCAL(neg_result)
+ cmp/eq r5,r0
+ neg r4,r4
+LOCAL(pos_result):
+ swap.w r4,r0
+ bra LOCAL(sdiv_check_divisor)
+ sts pr,r1
+LOCAL(pos_divisor):
+ extu.w r5,r0
+ bt/s LOCAL(pos_result)
+ cmp/eq r5,r0
+ neg r4,r4
+LOCAL(neg_result):
+ mova LOCAL(negate_result),r0
+ ;
+ mov r0,r1
+ swap.w r4,r0
+ lds r2,macl
+ sts pr,r2
+LOCAL(sdiv_check_divisor):
+ shlr16 r4
+ bf/s LOCAL(sdiv_large_divisor)
+ div0u
+ bra LOCAL(sdiv_small_divisor)
+ shll16 r5
+ .balign 4
+LOCAL(negate_result):
+ neg r0,r0
+ jmp @r2
+ sts macl,r2
+ ENDFUNC(GLOBAL(sdivsi3_i4i))
+#endif /* !__SH_FPU_DOUBLE__ */
+#endif /* L_udivsi3_i4i */
+
+#ifdef L_sdivsi3_i4i
+#if defined (__SH_FPU_DOUBLE__) || defined (__SH4_SINGLE_ONLY__)
+/* 48 bytes, 45 cycles on sh4-200 */
+!! args in r4 and r5, result in r0, clobber r1
+
+ .global GLOBAL(sdivsi3_i4i)
+ FUNC(GLOBAL(sdivsi3_i4i))
+GLOBAL(sdivsi3_i4i):
+ sts.l fpscr,@-r15
+ sts fpul,r1
+ mova L1,r0
+ lds.l @r0+,fpscr
+ lds r4,fpul
+#ifdef FMOVD_WORKS
+ fmov.d dr0,@-r15
+ float fpul,dr0
+ lds r5,fpul
+ fmov.d dr2,@-r15
+#else
+ fmov.s DR01,@-r15
+ fmov.s DR00,@-r15
+ float fpul,dr0
+ lds r5,fpul
+ fmov.s DR21,@-r15
+ fmov.s DR20,@-r15
+#endif
+ float fpul,dr2
+ fdiv dr2,dr0
+#ifdef FMOVD_WORKS
+ fmov.d @r15+,dr2
+#else
+ fmov.s @r15+,DR20
+ fmov.s @r15+,DR21
+#endif
+ ftrc dr0,fpul
+#ifdef FMOVD_WORKS
+ fmov.d @r15+,dr0
+#else
+ fmov.s @r15+,DR00
+ fmov.s @r15+,DR01
+#endif
+ lds.l @r15+,fpscr
+ sts fpul,r0
+ rts
+ lds r1,fpul
+
+ .p2align 2
+L1:
+#ifndef FMOVD_WORKS
+ .long 0x80000
+#else
+ .long 0x180000
+#endif
+
+ ENDFUNC(GLOBAL(sdivsi3_i4i))
+#endif /* __SH_FPU_DOUBLE__ */
+#endif /* L_sdivsi3_i4i */
+#endif /* !__SHMEDIA__ */
diff --git a/libgcc/config/sh/lib1funcs.S b/libgcc/config/sh/lib1funcs.S
new file mode 100644
index 00000000000..2f0ca16cd91
--- /dev/null
+++ b/libgcc/config/sh/lib1funcs.S
@@ -0,0 +1,3933 @@
+/* Copyright (C) 1994, 1995, 1997, 1998, 1999, 2000, 2001, 2002, 2003,
+ 2004, 2005, 2006, 2009
+ Free Software Foundation, Inc.
+
+This file is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 3, or (at your option) any
+later version.
+
+This file is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+
+!! libgcc routines for the Renesas / SuperH SH CPUs.
+!! Contributed by Steve Chamberlain.
+!! sac@cygnus.com
+
+!! ashiftrt_r4_x, ___ashrsi3, ___ashlsi3, ___lshrsi3 routines
+!! recoded in assembly by Toshiyasu Morita
+!! tm@netcom.com
+
+#if defined(__ELF__) && defined(__linux__)
+.section .note.GNU-stack,"",%progbits
+.previous
+#endif
+
+/* SH2 optimizations for ___ashrsi3, ___ashlsi3, ___lshrsi3 and
+ ELF local label prefixes by J"orn Rennecke
+ amylaar@cygnus.com */
+
+#include "lib1funcs.h"
+
+/* t-vxworks needs to build both PIC and non-PIC versions of libgcc,
+ so it is more convenient to define NO_FPSCR_VALUES here than to
+ define it on the command line. */
+#if defined __vxworks && defined __PIC__
+#define NO_FPSCR_VALUES
+#endif
+
+#if ! __SH5__
+#ifdef L_ashiftrt
+ .global GLOBAL(ashiftrt_r4_0)
+ .global GLOBAL(ashiftrt_r4_1)
+ .global GLOBAL(ashiftrt_r4_2)
+ .global GLOBAL(ashiftrt_r4_3)
+ .global GLOBAL(ashiftrt_r4_4)
+ .global GLOBAL(ashiftrt_r4_5)
+ .global GLOBAL(ashiftrt_r4_6)
+ .global GLOBAL(ashiftrt_r4_7)
+ .global GLOBAL(ashiftrt_r4_8)
+ .global GLOBAL(ashiftrt_r4_9)
+ .global GLOBAL(ashiftrt_r4_10)
+ .global GLOBAL(ashiftrt_r4_11)
+ .global GLOBAL(ashiftrt_r4_12)
+ .global GLOBAL(ashiftrt_r4_13)
+ .global GLOBAL(ashiftrt_r4_14)
+ .global GLOBAL(ashiftrt_r4_15)
+ .global GLOBAL(ashiftrt_r4_16)
+ .global GLOBAL(ashiftrt_r4_17)
+ .global GLOBAL(ashiftrt_r4_18)
+ .global GLOBAL(ashiftrt_r4_19)
+ .global GLOBAL(ashiftrt_r4_20)
+ .global GLOBAL(ashiftrt_r4_21)
+ .global GLOBAL(ashiftrt_r4_22)
+ .global GLOBAL(ashiftrt_r4_23)
+ .global GLOBAL(ashiftrt_r4_24)
+ .global GLOBAL(ashiftrt_r4_25)
+ .global GLOBAL(ashiftrt_r4_26)
+ .global GLOBAL(ashiftrt_r4_27)
+ .global GLOBAL(ashiftrt_r4_28)
+ .global GLOBAL(ashiftrt_r4_29)
+ .global GLOBAL(ashiftrt_r4_30)
+ .global GLOBAL(ashiftrt_r4_31)
+ .global GLOBAL(ashiftrt_r4_32)
+
+ HIDDEN_FUNC(GLOBAL(ashiftrt_r4_0))
+ HIDDEN_FUNC(GLOBAL(ashiftrt_r4_1))
+ HIDDEN_FUNC(GLOBAL(ashiftrt_r4_2))
+ HIDDEN_FUNC(GLOBAL(ashiftrt_r4_3))
+ HIDDEN_FUNC(GLOBAL(ashiftrt_r4_4))
+ HIDDEN_FUNC(GLOBAL(ashiftrt_r4_5))
+ HIDDEN_FUNC(GLOBAL(ashiftrt_r4_6))
+ HIDDEN_FUNC(GLOBAL(ashiftrt_r4_7))
+ HIDDEN_FUNC(GLOBAL(ashiftrt_r4_8))
+ HIDDEN_FUNC(GLOBAL(ashiftrt_r4_9))
+ HIDDEN_FUNC(GLOBAL(ashiftrt_r4_10))
+ HIDDEN_FUNC(GLOBAL(ashiftrt_r4_11))
+ HIDDEN_FUNC(GLOBAL(ashiftrt_r4_12))
+ HIDDEN_FUNC(GLOBAL(ashiftrt_r4_13))
+ HIDDEN_FUNC(GLOBAL(ashiftrt_r4_14))
+ HIDDEN_FUNC(GLOBAL(ashiftrt_r4_15))
+ HIDDEN_FUNC(GLOBAL(ashiftrt_r4_16))
+ HIDDEN_FUNC(GLOBAL(ashiftrt_r4_17))
+ HIDDEN_FUNC(GLOBAL(ashiftrt_r4_18))
+ HIDDEN_FUNC(GLOBAL(ashiftrt_r4_19))
+ HIDDEN_FUNC(GLOBAL(ashiftrt_r4_20))
+ HIDDEN_FUNC(GLOBAL(ashiftrt_r4_21))
+ HIDDEN_FUNC(GLOBAL(ashiftrt_r4_22))
+ HIDDEN_FUNC(GLOBAL(ashiftrt_r4_23))
+ HIDDEN_FUNC(GLOBAL(ashiftrt_r4_24))
+ HIDDEN_FUNC(GLOBAL(ashiftrt_r4_25))
+ HIDDEN_FUNC(GLOBAL(ashiftrt_r4_26))
+ HIDDEN_FUNC(GLOBAL(ashiftrt_r4_27))
+ HIDDEN_FUNC(GLOBAL(ashiftrt_r4_28))
+ HIDDEN_FUNC(GLOBAL(ashiftrt_r4_29))
+ HIDDEN_FUNC(GLOBAL(ashiftrt_r4_30))
+ HIDDEN_FUNC(GLOBAL(ashiftrt_r4_31))
+ HIDDEN_FUNC(GLOBAL(ashiftrt_r4_32))
+
+ .align 1
+GLOBAL(ashiftrt_r4_32):
+GLOBAL(ashiftrt_r4_31):
+ rotcl r4
+ rts
+ subc r4,r4
+
+GLOBAL(ashiftrt_r4_30):
+ shar r4
+GLOBAL(ashiftrt_r4_29):
+ shar r4
+GLOBAL(ashiftrt_r4_28):
+ shar r4
+GLOBAL(ashiftrt_r4_27):
+ shar r4
+GLOBAL(ashiftrt_r4_26):
+ shar r4
+GLOBAL(ashiftrt_r4_25):
+ shar r4
+GLOBAL(ashiftrt_r4_24):
+ shlr16 r4
+ shlr8 r4
+ rts
+ exts.b r4,r4
+
+GLOBAL(ashiftrt_r4_23):
+ shar r4
+GLOBAL(ashiftrt_r4_22):
+ shar r4
+GLOBAL(ashiftrt_r4_21):
+ shar r4
+GLOBAL(ashiftrt_r4_20):
+ shar r4
+GLOBAL(ashiftrt_r4_19):
+ shar r4
+GLOBAL(ashiftrt_r4_18):
+ shar r4
+GLOBAL(ashiftrt_r4_17):
+ shar r4
+GLOBAL(ashiftrt_r4_16):
+ shlr16 r4
+ rts
+ exts.w r4,r4
+
+GLOBAL(ashiftrt_r4_15):
+ shar r4
+GLOBAL(ashiftrt_r4_14):
+ shar r4
+GLOBAL(ashiftrt_r4_13):
+ shar r4
+GLOBAL(ashiftrt_r4_12):
+ shar r4
+GLOBAL(ashiftrt_r4_11):
+ shar r4
+GLOBAL(ashiftrt_r4_10):
+ shar r4
+GLOBAL(ashiftrt_r4_9):
+ shar r4
+GLOBAL(ashiftrt_r4_8):
+ shar r4
+GLOBAL(ashiftrt_r4_7):
+ shar r4
+GLOBAL(ashiftrt_r4_6):
+ shar r4
+GLOBAL(ashiftrt_r4_5):
+ shar r4
+GLOBAL(ashiftrt_r4_4):
+ shar r4
+GLOBAL(ashiftrt_r4_3):
+ shar r4
+GLOBAL(ashiftrt_r4_2):
+ shar r4
+GLOBAL(ashiftrt_r4_1):
+ rts
+ shar r4
+
+GLOBAL(ashiftrt_r4_0):
+ rts
+ nop
+
+ ENDFUNC(GLOBAL(ashiftrt_r4_0))
+ ENDFUNC(GLOBAL(ashiftrt_r4_1))
+ ENDFUNC(GLOBAL(ashiftrt_r4_2))
+ ENDFUNC(GLOBAL(ashiftrt_r4_3))
+ ENDFUNC(GLOBAL(ashiftrt_r4_4))
+ ENDFUNC(GLOBAL(ashiftrt_r4_5))
+ ENDFUNC(GLOBAL(ashiftrt_r4_6))
+ ENDFUNC(GLOBAL(ashiftrt_r4_7))
+ ENDFUNC(GLOBAL(ashiftrt_r4_8))
+ ENDFUNC(GLOBAL(ashiftrt_r4_9))
+ ENDFUNC(GLOBAL(ashiftrt_r4_10))
+ ENDFUNC(GLOBAL(ashiftrt_r4_11))
+ ENDFUNC(GLOBAL(ashiftrt_r4_12))
+ ENDFUNC(GLOBAL(ashiftrt_r4_13))
+ ENDFUNC(GLOBAL(ashiftrt_r4_14))
+ ENDFUNC(GLOBAL(ashiftrt_r4_15))
+ ENDFUNC(GLOBAL(ashiftrt_r4_16))
+ ENDFUNC(GLOBAL(ashiftrt_r4_17))
+ ENDFUNC(GLOBAL(ashiftrt_r4_18))
+ ENDFUNC(GLOBAL(ashiftrt_r4_19))
+ ENDFUNC(GLOBAL(ashiftrt_r4_20))
+ ENDFUNC(GLOBAL(ashiftrt_r4_21))
+ ENDFUNC(GLOBAL(ashiftrt_r4_22))
+ ENDFUNC(GLOBAL(ashiftrt_r4_23))
+ ENDFUNC(GLOBAL(ashiftrt_r4_24))
+ ENDFUNC(GLOBAL(ashiftrt_r4_25))
+ ENDFUNC(GLOBAL(ashiftrt_r4_26))
+ ENDFUNC(GLOBAL(ashiftrt_r4_27))
+ ENDFUNC(GLOBAL(ashiftrt_r4_28))
+ ENDFUNC(GLOBAL(ashiftrt_r4_29))
+ ENDFUNC(GLOBAL(ashiftrt_r4_30))
+ ENDFUNC(GLOBAL(ashiftrt_r4_31))
+ ENDFUNC(GLOBAL(ashiftrt_r4_32))
+#endif
+
+#ifdef L_ashiftrt_n
+
+!
+! GLOBAL(ashrsi3)
+!
+! Entry:
+!
+! r4: Value to shift
+! r5: Shifts
+!
+! Exit:
+!
+! r0: Result
+!
+! Destroys:
+!
+! (none)
+!
+
+ .global GLOBAL(ashrsi3)
+ HIDDEN_FUNC(GLOBAL(ashrsi3))
+ .align 2
+GLOBAL(ashrsi3):
+ mov #31,r0
+ and r0,r5
+ mova LOCAL(ashrsi3_table),r0
+ mov.b @(r0,r5),r5
+#ifdef __sh1__
+ add r5,r0
+ jmp @r0
+#else
+ braf r5
+#endif
+ mov r4,r0
+
+ .align 2
+LOCAL(ashrsi3_table):
+ .byte LOCAL(ashrsi3_0)-LOCAL(ashrsi3_table)
+ .byte LOCAL(ashrsi3_1)-LOCAL(ashrsi3_table)
+ .byte LOCAL(ashrsi3_2)-LOCAL(ashrsi3_table)
+ .byte LOCAL(ashrsi3_3)-LOCAL(ashrsi3_table)
+ .byte LOCAL(ashrsi3_4)-LOCAL(ashrsi3_table)
+ .byte LOCAL(ashrsi3_5)-LOCAL(ashrsi3_table)
+ .byte LOCAL(ashrsi3_6)-LOCAL(ashrsi3_table)
+ .byte LOCAL(ashrsi3_7)-LOCAL(ashrsi3_table)
+ .byte LOCAL(ashrsi3_8)-LOCAL(ashrsi3_table)
+ .byte LOCAL(ashrsi3_9)-LOCAL(ashrsi3_table)
+ .byte LOCAL(ashrsi3_10)-LOCAL(ashrsi3_table)
+ .byte LOCAL(ashrsi3_11)-LOCAL(ashrsi3_table)
+ .byte LOCAL(ashrsi3_12)-LOCAL(ashrsi3_table)
+ .byte LOCAL(ashrsi3_13)-LOCAL(ashrsi3_table)
+ .byte LOCAL(ashrsi3_14)-LOCAL(ashrsi3_table)
+ .byte LOCAL(ashrsi3_15)-LOCAL(ashrsi3_table)
+ .byte LOCAL(ashrsi3_16)-LOCAL(ashrsi3_table)
+ .byte LOCAL(ashrsi3_17)-LOCAL(ashrsi3_table)
+ .byte LOCAL(ashrsi3_18)-LOCAL(ashrsi3_table)
+ .byte LOCAL(ashrsi3_19)-LOCAL(ashrsi3_table)
+ .byte LOCAL(ashrsi3_20)-LOCAL(ashrsi3_table)
+ .byte LOCAL(ashrsi3_21)-LOCAL(ashrsi3_table)
+ .byte LOCAL(ashrsi3_22)-LOCAL(ashrsi3_table)
+ .byte LOCAL(ashrsi3_23)-LOCAL(ashrsi3_table)
+ .byte LOCAL(ashrsi3_24)-LOCAL(ashrsi3_table)
+ .byte LOCAL(ashrsi3_25)-LOCAL(ashrsi3_table)
+ .byte LOCAL(ashrsi3_26)-LOCAL(ashrsi3_table)
+ .byte LOCAL(ashrsi3_27)-LOCAL(ashrsi3_table)
+ .byte LOCAL(ashrsi3_28)-LOCAL(ashrsi3_table)
+ .byte LOCAL(ashrsi3_29)-LOCAL(ashrsi3_table)
+ .byte LOCAL(ashrsi3_30)-LOCAL(ashrsi3_table)
+ .byte LOCAL(ashrsi3_31)-LOCAL(ashrsi3_table)
+
+LOCAL(ashrsi3_31):
+ rotcl r0
+ rts
+ subc r0,r0
+
+LOCAL(ashrsi3_30):
+ shar r0
+LOCAL(ashrsi3_29):
+ shar r0
+LOCAL(ashrsi3_28):
+ shar r0
+LOCAL(ashrsi3_27):
+ shar r0
+LOCAL(ashrsi3_26):
+ shar r0
+LOCAL(ashrsi3_25):
+ shar r0
+LOCAL(ashrsi3_24):
+ shlr16 r0
+ shlr8 r0
+ rts
+ exts.b r0,r0
+
+LOCAL(ashrsi3_23):
+ shar r0
+LOCAL(ashrsi3_22):
+ shar r0
+LOCAL(ashrsi3_21):
+ shar r0
+LOCAL(ashrsi3_20):
+ shar r0
+LOCAL(ashrsi3_19):
+ shar r0
+LOCAL(ashrsi3_18):
+ shar r0
+LOCAL(ashrsi3_17):
+ shar r0
+LOCAL(ashrsi3_16):
+ shlr16 r0
+ rts
+ exts.w r0,r0
+
+LOCAL(ashrsi3_15):
+ shar r0
+LOCAL(ashrsi3_14):
+ shar r0
+LOCAL(ashrsi3_13):
+ shar r0
+LOCAL(ashrsi3_12):
+ shar r0
+LOCAL(ashrsi3_11):
+ shar r0
+LOCAL(ashrsi3_10):
+ shar r0
+LOCAL(ashrsi3_9):
+ shar r0
+LOCAL(ashrsi3_8):
+ shar r0
+LOCAL(ashrsi3_7):
+ shar r0
+LOCAL(ashrsi3_6):
+ shar r0
+LOCAL(ashrsi3_5):
+ shar r0
+LOCAL(ashrsi3_4):
+ shar r0
+LOCAL(ashrsi3_3):
+ shar r0
+LOCAL(ashrsi3_2):
+ shar r0
+LOCAL(ashrsi3_1):
+ rts
+ shar r0
+
+LOCAL(ashrsi3_0):
+ rts
+ nop
+
+ ENDFUNC(GLOBAL(ashrsi3))
+#endif
+
+#ifdef L_ashiftlt
+
+!
+! GLOBAL(ashlsi3)
+!
+! Entry:
+!
+! r4: Value to shift
+! r5: Shifts
+!
+! Exit:
+!
+! r0: Result
+!
+! Destroys:
+!
+! (none)
+!
+ .global GLOBAL(ashlsi3)
+ HIDDEN_FUNC(GLOBAL(ashlsi3))
+ .align 2
+GLOBAL(ashlsi3):
+ mov #31,r0
+ and r0,r5
+ mova LOCAL(ashlsi3_table),r0
+ mov.b @(r0,r5),r5
+#ifdef __sh1__
+ add r5,r0
+ jmp @r0
+#else
+ braf r5
+#endif
+ mov r4,r0
+
+ .align 2
+LOCAL(ashlsi3_table):
+ .byte LOCAL(ashlsi3_0)-LOCAL(ashlsi3_table)
+ .byte LOCAL(ashlsi3_1)-LOCAL(ashlsi3_table)
+ .byte LOCAL(ashlsi3_2)-LOCAL(ashlsi3_table)
+ .byte LOCAL(ashlsi3_3)-LOCAL(ashlsi3_table)
+ .byte LOCAL(ashlsi3_4)-LOCAL(ashlsi3_table)
+ .byte LOCAL(ashlsi3_5)-LOCAL(ashlsi3_table)
+ .byte LOCAL(ashlsi3_6)-LOCAL(ashlsi3_table)
+ .byte LOCAL(ashlsi3_7)-LOCAL(ashlsi3_table)
+ .byte LOCAL(ashlsi3_8)-LOCAL(ashlsi3_table)
+ .byte LOCAL(ashlsi3_9)-LOCAL(ashlsi3_table)
+ .byte LOCAL(ashlsi3_10)-LOCAL(ashlsi3_table)
+ .byte LOCAL(ashlsi3_11)-LOCAL(ashlsi3_table)
+ .byte LOCAL(ashlsi3_12)-LOCAL(ashlsi3_table)
+ .byte LOCAL(ashlsi3_13)-LOCAL(ashlsi3_table)
+ .byte LOCAL(ashlsi3_14)-LOCAL(ashlsi3_table)
+ .byte LOCAL(ashlsi3_15)-LOCAL(ashlsi3_table)
+ .byte LOCAL(ashlsi3_16)-LOCAL(ashlsi3_table)
+ .byte LOCAL(ashlsi3_17)-LOCAL(ashlsi3_table)
+ .byte LOCAL(ashlsi3_18)-LOCAL(ashlsi3_table)
+ .byte LOCAL(ashlsi3_19)-LOCAL(ashlsi3_table)
+ .byte LOCAL(ashlsi3_20)-LOCAL(ashlsi3_table)
+ .byte LOCAL(ashlsi3_21)-LOCAL(ashlsi3_table)
+ .byte LOCAL(ashlsi3_22)-LOCAL(ashlsi3_table)
+ .byte LOCAL(ashlsi3_23)-LOCAL(ashlsi3_table)
+ .byte LOCAL(ashlsi3_24)-LOCAL(ashlsi3_table)
+ .byte LOCAL(ashlsi3_25)-LOCAL(ashlsi3_table)
+ .byte LOCAL(ashlsi3_26)-LOCAL(ashlsi3_table)
+ .byte LOCAL(ashlsi3_27)-LOCAL(ashlsi3_table)
+ .byte LOCAL(ashlsi3_28)-LOCAL(ashlsi3_table)
+ .byte LOCAL(ashlsi3_29)-LOCAL(ashlsi3_table)
+ .byte LOCAL(ashlsi3_30)-LOCAL(ashlsi3_table)
+ .byte LOCAL(ashlsi3_31)-LOCAL(ashlsi3_table)
+
+LOCAL(ashlsi3_6):
+ shll2 r0
+LOCAL(ashlsi3_4):
+ shll2 r0
+LOCAL(ashlsi3_2):
+ rts
+ shll2 r0
+
+LOCAL(ashlsi3_7):
+ shll2 r0
+LOCAL(ashlsi3_5):
+ shll2 r0
+LOCAL(ashlsi3_3):
+ shll2 r0
+LOCAL(ashlsi3_1):
+ rts
+ shll r0
+
+LOCAL(ashlsi3_14):
+ shll2 r0
+LOCAL(ashlsi3_12):
+ shll2 r0
+LOCAL(ashlsi3_10):
+ shll2 r0
+LOCAL(ashlsi3_8):
+ rts
+ shll8 r0
+
+LOCAL(ashlsi3_15):
+ shll2 r0
+LOCAL(ashlsi3_13):
+ shll2 r0
+LOCAL(ashlsi3_11):
+ shll2 r0
+LOCAL(ashlsi3_9):
+ shll8 r0
+ rts
+ shll r0
+
+LOCAL(ashlsi3_22):
+ shll2 r0
+LOCAL(ashlsi3_20):
+ shll2 r0
+LOCAL(ashlsi3_18):
+ shll2 r0
+LOCAL(ashlsi3_16):
+ rts
+ shll16 r0
+
+LOCAL(ashlsi3_23):
+ shll2 r0
+LOCAL(ashlsi3_21):
+ shll2 r0
+LOCAL(ashlsi3_19):
+ shll2 r0
+LOCAL(ashlsi3_17):
+ shll16 r0
+ rts
+ shll r0
+
+LOCAL(ashlsi3_30):
+ shll2 r0
+LOCAL(ashlsi3_28):
+ shll2 r0
+LOCAL(ashlsi3_26):
+ shll2 r0
+LOCAL(ashlsi3_24):
+ shll16 r0
+ rts
+ shll8 r0
+
+LOCAL(ashlsi3_31):
+ shll2 r0
+LOCAL(ashlsi3_29):
+ shll2 r0
+LOCAL(ashlsi3_27):
+ shll2 r0
+LOCAL(ashlsi3_25):
+ shll16 r0
+ shll8 r0
+ rts
+ shll r0
+
+LOCAL(ashlsi3_0):
+ rts
+ nop
+
+ ENDFUNC(GLOBAL(ashlsi3))
+#endif
+
+#ifdef L_lshiftrt
+
+!
+! GLOBAL(lshrsi3)
+!
+! Entry:
+!
+! r4: Value to shift
+! r5: Shifts
+!
+! Exit:
+!
+! r0: Result
+!
+! Destroys:
+!
+! (none)
+!
+ .global GLOBAL(lshrsi3)
+ HIDDEN_FUNC(GLOBAL(lshrsi3))
+ .align 2
+GLOBAL(lshrsi3):
+ mov #31,r0
+ and r0,r5
+ mova LOCAL(lshrsi3_table),r0
+ mov.b @(r0,r5),r5
+#ifdef __sh1__
+ add r5,r0
+ jmp @r0
+#else
+ braf r5
+#endif
+ mov r4,r0
+
+ .align 2
+LOCAL(lshrsi3_table):
+ .byte LOCAL(lshrsi3_0)-LOCAL(lshrsi3_table)
+ .byte LOCAL(lshrsi3_1)-LOCAL(lshrsi3_table)
+ .byte LOCAL(lshrsi3_2)-LOCAL(lshrsi3_table)
+ .byte LOCAL(lshrsi3_3)-LOCAL(lshrsi3_table)
+ .byte LOCAL(lshrsi3_4)-LOCAL(lshrsi3_table)
+ .byte LOCAL(lshrsi3_5)-LOCAL(lshrsi3_table)
+ .byte LOCAL(lshrsi3_6)-LOCAL(lshrsi3_table)
+ .byte LOCAL(lshrsi3_7)-LOCAL(lshrsi3_table)
+ .byte LOCAL(lshrsi3_8)-LOCAL(lshrsi3_table)
+ .byte LOCAL(lshrsi3_9)-LOCAL(lshrsi3_table)
+ .byte LOCAL(lshrsi3_10)-LOCAL(lshrsi3_table)
+ .byte LOCAL(lshrsi3_11)-LOCAL(lshrsi3_table)
+ .byte LOCAL(lshrsi3_12)-LOCAL(lshrsi3_table)
+ .byte LOCAL(lshrsi3_13)-LOCAL(lshrsi3_table)
+ .byte LOCAL(lshrsi3_14)-LOCAL(lshrsi3_table)
+ .byte LOCAL(lshrsi3_15)-LOCAL(lshrsi3_table)
+ .byte LOCAL(lshrsi3_16)-LOCAL(lshrsi3_table)
+ .byte LOCAL(lshrsi3_17)-LOCAL(lshrsi3_table)
+ .byte LOCAL(lshrsi3_18)-LOCAL(lshrsi3_table)
+ .byte LOCAL(lshrsi3_19)-LOCAL(lshrsi3_table)
+ .byte LOCAL(lshrsi3_20)-LOCAL(lshrsi3_table)
+ .byte LOCAL(lshrsi3_21)-LOCAL(lshrsi3_table)
+ .byte LOCAL(lshrsi3_22)-LOCAL(lshrsi3_table)
+ .byte LOCAL(lshrsi3_23)-LOCAL(lshrsi3_table)
+ .byte LOCAL(lshrsi3_24)-LOCAL(lshrsi3_table)
+ .byte LOCAL(lshrsi3_25)-LOCAL(lshrsi3_table)
+ .byte LOCAL(lshrsi3_26)-LOCAL(lshrsi3_table)
+ .byte LOCAL(lshrsi3_27)-LOCAL(lshrsi3_table)
+ .byte LOCAL(lshrsi3_28)-LOCAL(lshrsi3_table)
+ .byte LOCAL(lshrsi3_29)-LOCAL(lshrsi3_table)
+ .byte LOCAL(lshrsi3_30)-LOCAL(lshrsi3_table)
+ .byte LOCAL(lshrsi3_31)-LOCAL(lshrsi3_table)
+
+LOCAL(lshrsi3_6):
+ shlr2 r0
+LOCAL(lshrsi3_4):
+ shlr2 r0
+LOCAL(lshrsi3_2):
+ rts
+ shlr2 r0
+
+LOCAL(lshrsi3_7):
+ shlr2 r0
+LOCAL(lshrsi3_5):
+ shlr2 r0
+LOCAL(lshrsi3_3):
+ shlr2 r0
+LOCAL(lshrsi3_1):
+ rts
+ shlr r0
+
+LOCAL(lshrsi3_14):
+ shlr2 r0
+LOCAL(lshrsi3_12):
+ shlr2 r0
+LOCAL(lshrsi3_10):
+ shlr2 r0
+LOCAL(lshrsi3_8):
+ rts
+ shlr8 r0
+
+LOCAL(lshrsi3_15):
+ shlr2 r0
+LOCAL(lshrsi3_13):
+ shlr2 r0
+LOCAL(lshrsi3_11):
+ shlr2 r0
+LOCAL(lshrsi3_9):
+ shlr8 r0
+ rts
+ shlr r0
+
+LOCAL(lshrsi3_22):
+ shlr2 r0
+LOCAL(lshrsi3_20):
+ shlr2 r0
+LOCAL(lshrsi3_18):
+ shlr2 r0
+LOCAL(lshrsi3_16):
+ rts
+ shlr16 r0
+
+LOCAL(lshrsi3_23):
+ shlr2 r0
+LOCAL(lshrsi3_21):
+ shlr2 r0
+LOCAL(lshrsi3_19):
+ shlr2 r0
+LOCAL(lshrsi3_17):
+ shlr16 r0
+ rts
+ shlr r0
+
+LOCAL(lshrsi3_30):
+ shlr2 r0
+LOCAL(lshrsi3_28):
+ shlr2 r0
+LOCAL(lshrsi3_26):
+ shlr2 r0
+LOCAL(lshrsi3_24):
+ shlr16 r0
+ rts
+ shlr8 r0
+
+LOCAL(lshrsi3_31):
+ shlr2 r0
+LOCAL(lshrsi3_29):
+ shlr2 r0
+LOCAL(lshrsi3_27):
+ shlr2 r0
+LOCAL(lshrsi3_25):
+ shlr16 r0
+ shlr8 r0
+ rts
+ shlr r0
+
+LOCAL(lshrsi3_0):
+ rts
+ nop
+
+ ENDFUNC(GLOBAL(lshrsi3))
+#endif
+
+#ifdef L_movmem
+ .text
+ .balign 4
+ .global GLOBAL(movmem)
+ HIDDEN_FUNC(GLOBAL(movmem))
+ HIDDEN_ALIAS(movstr,movmem)
+ /* This would be a lot simpler if r6 contained the byte count
+ minus 64, and we wouldn't be called here for a byte count of 64. */
+GLOBAL(movmem):
+ sts.l pr,@-r15
+ shll2 r6
+ bsr GLOBAL(movmemSI52+2)
+ mov.l @(48,r5),r0
+ .balign 4
+LOCAL(movmem_loop): /* Reached with rts */
+ mov.l @(60,r5),r0
+ add #-64,r6
+ mov.l r0,@(60,r4)
+ tst r6,r6
+ mov.l @(56,r5),r0
+ bt LOCAL(movmem_done)
+ mov.l r0,@(56,r4)
+ cmp/pl r6
+ mov.l @(52,r5),r0
+ add #64,r5
+ mov.l r0,@(52,r4)
+ add #64,r4
+ bt GLOBAL(movmemSI52)
+! done all the large groups, do the remainder
+! jump to movmem+
+ mova GLOBAL(movmemSI4)+4,r0
+ add r6,r0
+ jmp @r0
+LOCAL(movmem_done): ! share slot insn, works out aligned.
+ lds.l @r15+,pr
+ mov.l r0,@(56,r4)
+ mov.l @(52,r5),r0
+ rts
+ mov.l r0,@(52,r4)
+ .balign 4
+! ??? We need aliases movstr* for movmem* for the older libraries. These
+! aliases will be removed at the some point in the future.
+ .global GLOBAL(movmemSI64)
+ HIDDEN_FUNC(GLOBAL(movmemSI64))
+ HIDDEN_ALIAS(movstrSI64,movmemSI64)
+GLOBAL(movmemSI64):
+ mov.l @(60,r5),r0
+ mov.l r0,@(60,r4)
+ .global GLOBAL(movmemSI60)
+ HIDDEN_FUNC(GLOBAL(movmemSI60))
+ HIDDEN_ALIAS(movstrSI60,movmemSI60)
+GLOBAL(movmemSI60):
+ mov.l @(56,r5),r0
+ mov.l r0,@(56,r4)
+ .global GLOBAL(movmemSI56)
+ HIDDEN_FUNC(GLOBAL(movmemSI56))
+ HIDDEN_ALIAS(movstrSI56,movmemSI56)
+GLOBAL(movmemSI56):
+ mov.l @(52,r5),r0
+ mov.l r0,@(52,r4)
+ .global GLOBAL(movmemSI52)
+ HIDDEN_FUNC(GLOBAL(movmemSI52))
+ HIDDEN_ALIAS(movstrSI52,movmemSI52)
+GLOBAL(movmemSI52):
+ mov.l @(48,r5),r0
+ mov.l r0,@(48,r4)
+ .global GLOBAL(movmemSI48)
+ HIDDEN_FUNC(GLOBAL(movmemSI48))
+ HIDDEN_ALIAS(movstrSI48,movmemSI48)
+GLOBAL(movmemSI48):
+ mov.l @(44,r5),r0
+ mov.l r0,@(44,r4)
+ .global GLOBAL(movmemSI44)
+ HIDDEN_FUNC(GLOBAL(movmemSI44))
+ HIDDEN_ALIAS(movstrSI44,movmemSI44)
+GLOBAL(movmemSI44):
+ mov.l @(40,r5),r0
+ mov.l r0,@(40,r4)
+ .global GLOBAL(movmemSI40)
+ HIDDEN_FUNC(GLOBAL(movmemSI40))
+ HIDDEN_ALIAS(movstrSI40,movmemSI40)
+GLOBAL(movmemSI40):
+ mov.l @(36,r5),r0
+ mov.l r0,@(36,r4)
+ .global GLOBAL(movmemSI36)
+ HIDDEN_FUNC(GLOBAL(movmemSI36))
+ HIDDEN_ALIAS(movstrSI36,movmemSI36)
+GLOBAL(movmemSI36):
+ mov.l @(32,r5),r0
+ mov.l r0,@(32,r4)
+ .global GLOBAL(movmemSI32)
+ HIDDEN_FUNC(GLOBAL(movmemSI32))
+ HIDDEN_ALIAS(movstrSI32,movmemSI32)
+GLOBAL(movmemSI32):
+ mov.l @(28,r5),r0
+ mov.l r0,@(28,r4)
+ .global GLOBAL(movmemSI28)
+ HIDDEN_FUNC(GLOBAL(movmemSI28))
+ HIDDEN_ALIAS(movstrSI28,movmemSI28)
+GLOBAL(movmemSI28):
+ mov.l @(24,r5),r0
+ mov.l r0,@(24,r4)
+ .global GLOBAL(movmemSI24)
+ HIDDEN_FUNC(GLOBAL(movmemSI24))
+ HIDDEN_ALIAS(movstrSI24,movmemSI24)
+GLOBAL(movmemSI24):
+ mov.l @(20,r5),r0
+ mov.l r0,@(20,r4)
+ .global GLOBAL(movmemSI20)
+ HIDDEN_FUNC(GLOBAL(movmemSI20))
+ HIDDEN_ALIAS(movstrSI20,movmemSI20)
+GLOBAL(movmemSI20):
+ mov.l @(16,r5),r0
+ mov.l r0,@(16,r4)
+ .global GLOBAL(movmemSI16)
+ HIDDEN_FUNC(GLOBAL(movmemSI16))
+ HIDDEN_ALIAS(movstrSI16,movmemSI16)
+GLOBAL(movmemSI16):
+ mov.l @(12,r5),r0
+ mov.l r0,@(12,r4)
+ .global GLOBAL(movmemSI12)
+ HIDDEN_FUNC(GLOBAL(movmemSI12))
+ HIDDEN_ALIAS(movstrSI12,movmemSI12)
+GLOBAL(movmemSI12):
+ mov.l @(8,r5),r0
+ mov.l r0,@(8,r4)
+ .global GLOBAL(movmemSI8)
+ HIDDEN_FUNC(GLOBAL(movmemSI8))
+ HIDDEN_ALIAS(movstrSI8,movmemSI8)
+GLOBAL(movmemSI8):
+ mov.l @(4,r5),r0
+ mov.l r0,@(4,r4)
+ .global GLOBAL(movmemSI4)
+ HIDDEN_FUNC(GLOBAL(movmemSI4))
+ HIDDEN_ALIAS(movstrSI4,movmemSI4)
+GLOBAL(movmemSI4):
+ mov.l @(0,r5),r0
+ rts
+ mov.l r0,@(0,r4)
+
+ ENDFUNC(GLOBAL(movmemSI64))
+ ENDFUNC(GLOBAL(movmemSI60))
+ ENDFUNC(GLOBAL(movmemSI56))
+ ENDFUNC(GLOBAL(movmemSI52))
+ ENDFUNC(GLOBAL(movmemSI48))
+ ENDFUNC(GLOBAL(movmemSI44))
+ ENDFUNC(GLOBAL(movmemSI40))
+ ENDFUNC(GLOBAL(movmemSI36))
+ ENDFUNC(GLOBAL(movmemSI32))
+ ENDFUNC(GLOBAL(movmemSI28))
+ ENDFUNC(GLOBAL(movmemSI24))
+ ENDFUNC(GLOBAL(movmemSI20))
+ ENDFUNC(GLOBAL(movmemSI16))
+ ENDFUNC(GLOBAL(movmemSI12))
+ ENDFUNC(GLOBAL(movmemSI8))
+ ENDFUNC(GLOBAL(movmemSI4))
+ ENDFUNC(GLOBAL(movmem))
+#endif
+
+#ifdef L_movmem_i4
+ .text
+ .global GLOBAL(movmem_i4_even)
+ .global GLOBAL(movmem_i4_odd)
+ .global GLOBAL(movmemSI12_i4)
+
+ HIDDEN_FUNC(GLOBAL(movmem_i4_even))
+ HIDDEN_FUNC(GLOBAL(movmem_i4_odd))
+ HIDDEN_FUNC(GLOBAL(movmemSI12_i4))
+
+ HIDDEN_ALIAS(movstr_i4_even,movmem_i4_even)
+ HIDDEN_ALIAS(movstr_i4_odd,movmem_i4_odd)
+ HIDDEN_ALIAS(movstrSI12_i4,movmemSI12_i4)
+
+ .p2align 5
+L_movmem_2mod4_end:
+ mov.l r0,@(16,r4)
+ rts
+ mov.l r1,@(20,r4)
+
+ .p2align 2
+
+GLOBAL(movmem_i4_even):
+ mov.l @r5+,r0
+ bra L_movmem_start_even
+ mov.l @r5+,r1
+
+GLOBAL(movmem_i4_odd):
+ mov.l @r5+,r1
+ add #-4,r4
+ mov.l @r5+,r2
+ mov.l @r5+,r3
+ mov.l r1,@(4,r4)
+ mov.l r2,@(8,r4)
+
+L_movmem_loop:
+ mov.l r3,@(12,r4)
+ dt r6
+ mov.l @r5+,r0
+ bt/s L_movmem_2mod4_end
+ mov.l @r5+,r1
+ add #16,r4
+L_movmem_start_even:
+ mov.l @r5+,r2
+ mov.l @r5+,r3
+ mov.l r0,@r4
+ dt r6
+ mov.l r1,@(4,r4)
+ bf/s L_movmem_loop
+ mov.l r2,@(8,r4)
+ rts
+ mov.l r3,@(12,r4)
+
+ ENDFUNC(GLOBAL(movmem_i4_even))
+ ENDFUNC(GLOBAL(movmem_i4_odd))
+
+ .p2align 4
+GLOBAL(movmemSI12_i4):
+ mov.l @r5,r0
+ mov.l @(4,r5),r1
+ mov.l @(8,r5),r2
+ mov.l r0,@r4
+ mov.l r1,@(4,r4)
+ rts
+ mov.l r2,@(8,r4)
+
+ ENDFUNC(GLOBAL(movmemSI12_i4))
+#endif
+
+#ifdef L_mulsi3
+
+
+ .global GLOBAL(mulsi3)
+ HIDDEN_FUNC(GLOBAL(mulsi3))
+
+! r4 = aabb
+! r5 = ccdd
+! r0 = aabb*ccdd via partial products
+!
+! if aa == 0 and cc = 0
+! r0 = bb*dd
+!
+! else
+! aa = bb*dd + (aa*dd*65536) + (cc*bb*65536)
+!
+
+GLOBAL(mulsi3):
+ mulu.w r4,r5 ! multiply the lsws macl=bb*dd
+ mov r5,r3 ! r3 = ccdd
+ swap.w r4,r2 ! r2 = bbaa
+ xtrct r2,r3 ! r3 = aacc
+ tst r3,r3 ! msws zero ?
+ bf hiset
+ rts ! yes - then we have the answer
+ sts macl,r0
+
+hiset: sts macl,r0 ! r0 = bb*dd
+ mulu.w r2,r5 ! brewing macl = aa*dd
+ sts macl,r1
+ mulu.w r3,r4 ! brewing macl = cc*bb
+ sts macl,r2
+ add r1,r2
+ shll16 r2
+ rts
+ add r2,r0
+
+ ENDFUNC(GLOBAL(mulsi3))
+#endif
+#endif /* ! __SH5__ */
+#ifdef L_sdivsi3_i4
+ .title "SH DIVIDE"
+!! 4 byte integer Divide code for the Renesas SH
+#ifdef __SH4__
+!! args in r4 and r5, result in fpul, clobber dr0, dr2
+
+ .global GLOBAL(sdivsi3_i4)
+ HIDDEN_FUNC(GLOBAL(sdivsi3_i4))
+GLOBAL(sdivsi3_i4):
+ lds r4,fpul
+ float fpul,dr0
+ lds r5,fpul
+ float fpul,dr2
+ fdiv dr2,dr0
+ rts
+ ftrc dr0,fpul
+
+ ENDFUNC(GLOBAL(sdivsi3_i4))
+#elif defined(__SH4_SINGLE__) || defined(__SH4_SINGLE_ONLY__) || (defined (__SH5__) && ! defined __SH4_NOFPU__)
+!! args in r4 and r5, result in fpul, clobber r2, dr0, dr2
+
+#if ! __SH5__ || __SH5__ == 32
+#if __SH5__
+ .mode SHcompact
+#endif
+ .global GLOBAL(sdivsi3_i4)
+ HIDDEN_FUNC(GLOBAL(sdivsi3_i4))
+GLOBAL(sdivsi3_i4):
+ sts.l fpscr,@-r15
+ mov #8,r2
+ swap.w r2,r2
+ lds r2,fpscr
+ lds r4,fpul
+ float fpul,dr0
+ lds r5,fpul
+ float fpul,dr2
+ fdiv dr2,dr0
+ ftrc dr0,fpul
+ rts
+ lds.l @r15+,fpscr
+
+ ENDFUNC(GLOBAL(sdivsi3_i4))
+#endif /* ! __SH5__ || __SH5__ == 32 */
+#endif /* ! __SH4__ */
+#endif
+
+#ifdef L_sdivsi3
+/* __SH4_SINGLE_ONLY__ keeps this part for link compatibility with
+ sh2e/sh3e code. */
+#if (! defined(__SH4__) && ! defined (__SH4_SINGLE__)) || defined (__linux__)
+!!
+!! Steve Chamberlain
+!! sac@cygnus.com
+!!
+!!
+
+!! args in r4 and r5, result in r0 clobber r1, r2, r3, and t bit
+
+ .global GLOBAL(sdivsi3)
+#if __SHMEDIA__
+#if __SH5__ == 32
+ .section .text..SHmedia32,"ax"
+#else
+ .text
+#endif
+ .align 2
+#if 0
+/* The assembly code that follows is a hand-optimized version of the C
+ code that follows. Note that the registers that are modified are
+ exactly those listed as clobbered in the patterns divsi3_i1 and
+ divsi3_i1_media.
+
+int __sdivsi3 (i, j)
+ int i, j;
+{
+ register unsigned long long r18 asm ("r18");
+ register unsigned long long r19 asm ("r19");
+ register unsigned long long r0 asm ("r0") = 0;
+ register unsigned long long r1 asm ("r1") = 1;
+ register int r2 asm ("r2") = i >> 31;
+ register int r3 asm ("r3") = j >> 31;
+
+ r2 = r2 ? r2 : r1;
+ r3 = r3 ? r3 : r1;
+ r18 = i * r2;
+ r19 = j * r3;
+ r2 *= r3;
+
+ r19 <<= 31;
+ r1 <<= 31;
+ do
+ if (r18 >= r19)
+ r0 |= r1, r18 -= r19;
+ while (r19 >>= 1, r1 >>= 1);
+
+ return r2 * (int)r0;
+}
+*/
+GLOBAL(sdivsi3):
+ pt/l LOCAL(sdivsi3_dontadd), tr2
+ pt/l LOCAL(sdivsi3_loop), tr1
+ ptabs/l r18, tr0
+ movi 0, r0
+ movi 1, r1
+ shari.l r4, 31, r2
+ shari.l r5, 31, r3
+ cmveq r2, r1, r2
+ cmveq r3, r1, r3
+ muls.l r4, r2, r18
+ muls.l r5, r3, r19
+ muls.l r2, r3, r2
+ shlli r19, 31, r19
+ shlli r1, 31, r1
+LOCAL(sdivsi3_loop):
+ bgtu r19, r18, tr2
+ or r0, r1, r0
+ sub r18, r19, r18
+LOCAL(sdivsi3_dontadd):
+ shlri r1, 1, r1
+ shlri r19, 1, r19
+ bnei r1, 0, tr1
+ muls.l r0, r2, r0
+ add.l r0, r63, r0
+ blink tr0, r63
+#elif 0 /* ! 0 */
+ // inputs: r4,r5
+ // clobbered: r1,r2,r3,r18,r19,r20,r21,r25,tr0
+ // result in r0
+GLOBAL(sdivsi3):
+ // can create absolute value without extra latency,
+ // but dependent on proper sign extension of inputs:
+ // shari.l r5,31,r2
+ // xor r5,r2,r20
+ // sub r20,r2,r20 // r20 is now absolute value of r5, zero-extended.
+ shari.l r5,31,r2
+ ori r2,1,r2
+ muls.l r5,r2,r20 // r20 is now absolute value of r5, zero-extended.
+ movi 0xffffffffffffbb0c,r19 // shift count eqiv 76
+ shari.l r4,31,r3
+ nsb r20,r0
+ shlld r20,r0,r25
+ shlri r25,48,r25
+ sub r19,r25,r1
+ mmulfx.w r1,r1,r2
+ mshflo.w r1,r63,r1
+ // If r4 was to be used in-place instead of r21, could use this sequence
+ // to compute absolute:
+ // sub r63,r4,r19 // compute absolute value of r4
+ // shlri r4,32,r3 // into lower 32 bit of r4, keeping
+ // mcmv r19,r3,r4 // the sign in the upper 32 bits intact.
+ ori r3,1,r3
+ mmulfx.w r25,r2,r2
+ sub r19,r0,r0
+ muls.l r4,r3,r21
+ msub.w r1,r2,r2
+ addi r2,-2,r1
+ mulu.l r21,r1,r19
+ mmulfx.w r2,r2,r2
+ shlli r1,15,r1
+ shlrd r19,r0,r19
+ mulu.l r19,r20,r3
+ mmacnfx.wl r25,r2,r1
+ ptabs r18,tr0
+ sub r21,r3,r25
+
+ mulu.l r25,r1,r2
+ addi r0,14,r0
+ xor r4,r5,r18
+ shlrd r2,r0,r2
+ mulu.l r2,r20,r3
+ add r19,r2,r19
+ shari.l r18,31,r18
+ sub r25,r3,r25
+
+ mulu.l r25,r1,r2
+ sub r25,r20,r25
+ add r19,r18,r19
+ shlrd r2,r0,r2
+ mulu.l r2,r20,r3
+ addi r25,1,r25
+ add r19,r2,r19
+
+ cmpgt r25,r3,r25
+ add.l r19,r25,r0
+ xor r0,r18,r0
+ blink tr0,r63
+#else /* ! 0 && ! 0 */
+
+ // inputs: r4,r5
+ // clobbered: r1,r18,r19,r20,r21,r25,tr0
+ // result in r0
+ HIDDEN_FUNC(GLOBAL(sdivsi3_2))
+#ifndef __pic__
+ FUNC(GLOBAL(sdivsi3))
+GLOBAL(sdivsi3): /* this is the shcompact entry point */
+ // The special SHmedia entry point sdivsi3_1 prevents accidental linking
+ // with the SHcompact implementation, which clobbers tr1 / tr2.
+ .global GLOBAL(sdivsi3_1)
+GLOBAL(sdivsi3_1):
+ .global GLOBAL(div_table_internal)
+ movi (GLOBAL(div_table_internal) >> 16) & 65535, r20
+ shori GLOBAL(div_table_internal) & 65535, r20
+#endif
+ .global GLOBAL(sdivsi3_2)
+ // div_table in r20
+ // clobbered: r1,r18,r19,r21,r25,tr0
+GLOBAL(sdivsi3_2):
+ nsb r5, r1
+ shlld r5, r1, r25 // normalize; [-2 ..1, 1..2) in s2.62
+ shari r25, 58, r21 // extract 5(6) bit index (s2.4 with hole -1..1)
+ ldx.ub r20, r21, r19 // u0.8
+ shari r25, 32, r25 // normalize to s2.30
+ shlli r21, 1, r21
+ muls.l r25, r19, r19 // s2.38
+ ldx.w r20, r21, r21 // s2.14
+ ptabs r18, tr0
+ shari r19, 24, r19 // truncate to s2.14
+ sub r21, r19, r19 // some 11 bit inverse in s1.14
+ muls.l r19, r19, r21 // u0.28
+ sub r63, r1, r1
+ addi r1, 92, r1
+ muls.l r25, r21, r18 // s2.58
+ shlli r19, 45, r19 // multiply by two and convert to s2.58
+ /* bubble */
+ sub r19, r18, r18
+ shari r18, 28, r18 // some 22 bit inverse in s1.30
+ muls.l r18, r25, r0 // s2.60
+ muls.l r18, r4, r25 // s32.30
+ /* bubble */
+ shari r0, 16, r19 // s-16.44
+ muls.l r19, r18, r19 // s-16.74
+ shari r25, 63, r0
+ shari r4, 14, r18 // s19.-14
+ shari r19, 30, r19 // s-16.44
+ muls.l r19, r18, r19 // s15.30
+ xor r21, r0, r21 // You could also use the constant 1 << 27.
+ add r21, r25, r21
+ sub r21, r19, r21
+ shard r21, r1, r21
+ sub r21, r0, r0
+ blink tr0, r63
+#ifndef __pic__
+ ENDFUNC(GLOBAL(sdivsi3))
+#endif
+ ENDFUNC(GLOBAL(sdivsi3_2))
+#endif
+#elif defined __SHMEDIA__
+/* m5compact-nofpu */
+ // clobbered: r18,r19,r20,r21,r25,tr0,tr1,tr2
+ .mode SHmedia
+ .section .text..SHmedia32,"ax"
+ .align 2
+ FUNC(GLOBAL(sdivsi3))
+GLOBAL(sdivsi3):
+ pt/l LOCAL(sdivsi3_dontsub), tr0
+ pt/l LOCAL(sdivsi3_loop), tr1
+ ptabs/l r18,tr2
+ shari.l r4,31,r18
+ shari.l r5,31,r19
+ xor r4,r18,r20
+ xor r5,r19,r21
+ sub.l r20,r18,r20
+ sub.l r21,r19,r21
+ xor r18,r19,r19
+ shlli r21,32,r25
+ addi r25,-1,r21
+ addz.l r20,r63,r20
+LOCAL(sdivsi3_loop):
+ shlli r20,1,r20
+ bgeu/u r21,r20,tr0
+ sub r20,r21,r20
+LOCAL(sdivsi3_dontsub):
+ addi.l r25,-1,r25
+ bnei r25,-32,tr1
+ xor r20,r19,r20
+ sub.l r20,r19,r0
+ blink tr2,r63
+ ENDFUNC(GLOBAL(sdivsi3))
+#else /* ! __SHMEDIA__ */
+ FUNC(GLOBAL(sdivsi3))
+GLOBAL(sdivsi3):
+ mov r4,r1
+ mov r5,r0
+
+ tst r0,r0
+ bt div0
+ mov #0,r2
+ div0s r2,r1
+ subc r3,r3
+ subc r2,r1
+ div0s r0,r3
+ rotcl r1
+ div1 r0,r3
+ rotcl r1
+ div1 r0,r3
+ rotcl r1
+ div1 r0,r3
+ rotcl r1
+ div1 r0,r3
+ rotcl r1
+ div1 r0,r3
+ rotcl r1
+ div1 r0,r3
+ rotcl r1
+ div1 r0,r3
+ rotcl r1
+ div1 r0,r3
+ rotcl r1
+ div1 r0,r3
+ rotcl r1
+ div1 r0,r3
+ rotcl r1
+ div1 r0,r3
+ rotcl r1
+ div1 r0,r3
+ rotcl r1
+ div1 r0,r3
+ rotcl r1
+ div1 r0,r3
+ rotcl r1
+ div1 r0,r3
+ rotcl r1
+ div1 r0,r3
+ rotcl r1
+ div1 r0,r3
+ rotcl r1
+ div1 r0,r3
+ rotcl r1
+ div1 r0,r3
+ rotcl r1
+ div1 r0,r3
+ rotcl r1
+ div1 r0,r3
+ rotcl r1
+ div1 r0,r3
+ rotcl r1
+ div1 r0,r3
+ rotcl r1
+ div1 r0,r3
+ rotcl r1
+ div1 r0,r3
+ rotcl r1
+ div1 r0,r3
+ rotcl r1
+ div1 r0,r3
+ rotcl r1
+ div1 r0,r3
+ rotcl r1
+ div1 r0,r3
+ rotcl r1
+ div1 r0,r3
+ rotcl r1
+ div1 r0,r3
+ rotcl r1
+ div1 r0,r3
+ rotcl r1
+ addc r2,r1
+ rts
+ mov r1,r0
+
+
+div0: rts
+ mov #0,r0
+
+ ENDFUNC(GLOBAL(sdivsi3))
+#endif /* ! __SHMEDIA__ */
+#endif /* ! __SH4__ */
+#endif
+#ifdef L_udivsi3_i4
+
+ .title "SH DIVIDE"
+!! 4 byte integer Divide code for the Renesas SH
+#ifdef __SH4__
+!! args in r4 and r5, result in fpul, clobber r0, r1, r4, r5, dr0, dr2, dr4,
+!! and t bit
+
+ .global GLOBAL(udivsi3_i4)
+ HIDDEN_FUNC(GLOBAL(udivsi3_i4))
+GLOBAL(udivsi3_i4):
+ mov #1,r1
+ cmp/hi r1,r5
+ bf trivial
+ rotr r1
+ xor r1,r4
+ lds r4,fpul
+ mova L1,r0
+#ifdef FMOVD_WORKS
+ fmov.d @r0+,dr4
+#else
+ fmov.s @r0+,DR40
+ fmov.s @r0,DR41
+#endif
+ float fpul,dr0
+ xor r1,r5
+ lds r5,fpul
+ float fpul,dr2
+ fadd dr4,dr0
+ fadd dr4,dr2
+ fdiv dr2,dr0
+ rts
+ ftrc dr0,fpul
+
+trivial:
+ rts
+ lds r4,fpul
+
+ .align 2
+#ifdef FMOVD_WORKS
+ .align 3 ! make double below 8 byte aligned.
+#endif
+L1:
+ .double 2147483648
+
+ ENDFUNC(GLOBAL(udivsi3_i4))
+#elif defined (__SH5__) && ! defined (__SH4_NOFPU__)
+#if ! __SH5__ || __SH5__ == 32
+!! args in r4 and r5, result in fpul, clobber r20, r21, dr0, fr33
+ .mode SHmedia
+ .global GLOBAL(udivsi3_i4)
+ HIDDEN_FUNC(GLOBAL(udivsi3_i4))
+GLOBAL(udivsi3_i4):
+ addz.l r4,r63,r20
+ addz.l r5,r63,r21
+ fmov.qd r20,dr0
+ fmov.qd r21,dr32
+ ptabs r18,tr0
+ float.qd dr0,dr0
+ float.qd dr32,dr32
+ fdiv.d dr0,dr32,dr0
+ ftrc.dq dr0,dr32
+ fmov.s fr33,fr32
+ blink tr0,r63
+
+ ENDFUNC(GLOBAL(udivsi3_i4))
+#endif /* ! __SH5__ || __SH5__ == 32 */
+#elif defined(__SH4_SINGLE__) || defined(__SH4_SINGLE_ONLY__)
+!! args in r4 and r5, result in fpul, clobber r0, r1, r4, r5, dr0, dr2, dr4
+
+ .global GLOBAL(udivsi3_i4)
+ HIDDEN_FUNC(GLOBAL(udivsi3_i4))
+GLOBAL(udivsi3_i4):
+ mov #1,r1
+ cmp/hi r1,r5
+ bf trivial
+ sts.l fpscr,@-r15
+ mova L1,r0
+ lds.l @r0+,fpscr
+ rotr r1
+ xor r1,r4
+ lds r4,fpul
+#ifdef FMOVD_WORKS
+ fmov.d @r0+,dr4
+#else
+ fmov.s @r0+,DR40
+ fmov.s @r0,DR41
+#endif
+ float fpul,dr0
+ xor r1,r5
+ lds r5,fpul
+ float fpul,dr2
+ fadd dr4,dr0
+ fadd dr4,dr2
+ fdiv dr2,dr0
+ ftrc dr0,fpul
+ rts
+ lds.l @r15+,fpscr
+
+#ifdef FMOVD_WORKS
+ .align 3 ! make double below 8 byte aligned.
+#endif
+trivial:
+ rts
+ lds r4,fpul
+
+ .align 2
+L1:
+#ifndef FMOVD_WORKS
+ .long 0x80000
+#else
+ .long 0x180000
+#endif
+ .double 2147483648
+
+ ENDFUNC(GLOBAL(udivsi3_i4))
+#endif /* ! __SH4__ */
+#endif
+
+#ifdef L_udivsi3
+/* __SH4_SINGLE_ONLY__ keeps this part for link compatibility with
+ sh2e/sh3e code. */
+#if (! defined(__SH4__) && ! defined (__SH4_SINGLE__)) || defined (__linux__)
+
+!! args in r4 and r5, result in r0, clobbers r4, pr, and t bit
+ .global GLOBAL(udivsi3)
+ HIDDEN_FUNC(GLOBAL(udivsi3))
+
+#if __SHMEDIA__
+#if __SH5__ == 32
+ .section .text..SHmedia32,"ax"
+#else
+ .text
+#endif
+ .align 2
+#if 0
+/* The assembly code that follows is a hand-optimized version of the C
+ code that follows. Note that the registers that are modified are
+ exactly those listed as clobbered in the patterns udivsi3_i1 and
+ udivsi3_i1_media.
+
+unsigned
+__udivsi3 (i, j)
+ unsigned i, j;
+{
+ register unsigned long long r0 asm ("r0") = 0;
+ register unsigned long long r18 asm ("r18") = 1;
+ register unsigned long long r4 asm ("r4") = i;
+ register unsigned long long r19 asm ("r19") = j;
+
+ r19 <<= 31;
+ r18 <<= 31;
+ do
+ if (r4 >= r19)
+ r0 |= r18, r4 -= r19;
+ while (r19 >>= 1, r18 >>= 1);
+
+ return r0;
+}
+*/
+GLOBAL(udivsi3):
+ pt/l LOCAL(udivsi3_dontadd), tr2
+ pt/l LOCAL(udivsi3_loop), tr1
+ ptabs/l r18, tr0
+ movi 0, r0
+ movi 1, r18
+ addz.l r5, r63, r19
+ addz.l r4, r63, r4
+ shlli r19, 31, r19
+ shlli r18, 31, r18
+LOCAL(udivsi3_loop):
+ bgtu r19, r4, tr2
+ or r0, r18, r0
+ sub r4, r19, r4
+LOCAL(udivsi3_dontadd):
+ shlri r18, 1, r18
+ shlri r19, 1, r19
+ bnei r18, 0, tr1
+ blink tr0, r63
+#else
+GLOBAL(udivsi3):
+ // inputs: r4,r5
+ // clobbered: r18,r19,r20,r21,r22,r25,tr0
+ // result in r0.
+ addz.l r5,r63,r22
+ nsb r22,r0
+ shlld r22,r0,r25
+ shlri r25,48,r25
+ movi 0xffffffffffffbb0c,r20 // shift count eqiv 76
+ sub r20,r25,r21
+ mmulfx.w r21,r21,r19
+ mshflo.w r21,r63,r21
+ ptabs r18,tr0
+ mmulfx.w r25,r19,r19
+ sub r20,r0,r0
+ /* bubble */
+ msub.w r21,r19,r19
+ addi r19,-2,r21 /* It would be nice for scheduling to do this add to r21
+ before the msub.w, but we need a different value for
+ r19 to keep errors under control. */
+ mulu.l r4,r21,r18
+ mmulfx.w r19,r19,r19
+ shlli r21,15,r21
+ shlrd r18,r0,r18
+ mulu.l r18,r22,r20
+ mmacnfx.wl r25,r19,r21
+ /* bubble */
+ sub r4,r20,r25
+
+ mulu.l r25,r21,r19
+ addi r0,14,r0
+ /* bubble */
+ shlrd r19,r0,r19
+ mulu.l r19,r22,r20
+ add r18,r19,r18
+ /* bubble */
+ sub.l r25,r20,r25
+
+ mulu.l r25,r21,r19
+ addz.l r25,r63,r25
+ sub r25,r22,r25
+ shlrd r19,r0,r19
+ mulu.l r19,r22,r20
+ addi r25,1,r25
+ add r18,r19,r18
+
+ cmpgt r25,r20,r25
+ add.l r18,r25,r0
+ blink tr0,r63
+#endif
+#elif defined (__SHMEDIA__)
+/* m5compact-nofpu - more emphasis on code size than on speed, but don't
+ ignore speed altogether - div1 needs 9 cycles, subc 7 and rotcl 4.
+ So use a short shmedia loop. */
+ // clobbered: r20,r21,r25,tr0,tr1,tr2
+ .mode SHmedia
+ .section .text..SHmedia32,"ax"
+ .align 2
+GLOBAL(udivsi3):
+ pt/l LOCAL(udivsi3_dontsub), tr0
+ pt/l LOCAL(udivsi3_loop), tr1
+ ptabs/l r18,tr2
+ shlli r5,32,r25
+ addi r25,-1,r21
+ addz.l r4,r63,r20
+LOCAL(udivsi3_loop):
+ shlli r20,1,r20
+ bgeu/u r21,r20,tr0
+ sub r20,r21,r20
+LOCAL(udivsi3_dontsub):
+ addi.l r25,-1,r25
+ bnei r25,-32,tr1
+ add.l r20,r63,r0
+ blink tr2,r63
+#else /* ! defined (__SHMEDIA__) */
+LOCAL(div8):
+ div1 r5,r4
+LOCAL(div7):
+ div1 r5,r4; div1 r5,r4; div1 r5,r4
+ div1 r5,r4; div1 r5,r4; div1 r5,r4; rts; div1 r5,r4
+
+LOCAL(divx4):
+ div1 r5,r4; rotcl r0
+ div1 r5,r4; rotcl r0
+ div1 r5,r4; rotcl r0
+ rts; div1 r5,r4
+
+GLOBAL(udivsi3):
+ sts.l pr,@-r15
+ extu.w r5,r0
+ cmp/eq r5,r0
+#ifdef __sh1__
+ bf LOCAL(large_divisor)
+#else
+ bf/s LOCAL(large_divisor)
+#endif
+ div0u
+ swap.w r4,r0
+ shlr16 r4
+ bsr LOCAL(div8)
+ shll16 r5
+ bsr LOCAL(div7)
+ div1 r5,r4
+ xtrct r4,r0
+ xtrct r0,r4
+ bsr LOCAL(div8)
+ swap.w r4,r4
+ bsr LOCAL(div7)
+ div1 r5,r4
+ lds.l @r15+,pr
+ xtrct r4,r0
+ swap.w r0,r0
+ rotcl r0
+ rts
+ shlr16 r5
+
+LOCAL(large_divisor):
+#ifdef __sh1__
+ div0u
+#endif
+ mov #0,r0
+ xtrct r4,r0
+ xtrct r0,r4
+ bsr LOCAL(divx4)
+ rotcl r0
+ bsr LOCAL(divx4)
+ rotcl r0
+ bsr LOCAL(divx4)
+ rotcl r0
+ bsr LOCAL(divx4)
+ rotcl r0
+ lds.l @r15+,pr
+ rts
+ rotcl r0
+
+ ENDFUNC(GLOBAL(udivsi3))
+#endif /* ! __SHMEDIA__ */
+#endif /* __SH4__ */
+#endif /* L_udivsi3 */
+
+#ifdef L_udivdi3
+#ifdef __SHMEDIA__
+ .mode SHmedia
+ .section .text..SHmedia32,"ax"
+ .align 2
+ .global GLOBAL(udivdi3)
+ FUNC(GLOBAL(udivdi3))
+GLOBAL(udivdi3):
+ HIDDEN_ALIAS(udivdi3_internal,udivdi3)
+ shlri r3,1,r4
+ nsb r4,r22
+ shlld r3,r22,r6
+ shlri r6,49,r5
+ movi 0xffffffffffffbaf1,r21 /* .l shift count 17. */
+ sub r21,r5,r1
+ mmulfx.w r1,r1,r4
+ mshflo.w r1,r63,r1
+ sub r63,r22,r20 // r63 == 64 % 64
+ mmulfx.w r5,r4,r4
+ pta LOCAL(large_divisor),tr0
+ addi r20,32,r9
+ msub.w r1,r4,r1
+ madd.w r1,r1,r1
+ mmulfx.w r1,r1,r4
+ shlri r6,32,r7
+ bgt/u r9,r63,tr0 // large_divisor
+ mmulfx.w r5,r4,r4
+ shlri r2,32+14,r19
+ addi r22,-31,r0
+ msub.w r1,r4,r1
+
+ mulu.l r1,r7,r4
+ addi r1,-3,r5
+ mulu.l r5,r19,r5
+ sub r63,r4,r4 // Negate to make sure r1 ends up <= 1/r2
+ shlri r4,2,r4 /* chop off leading %0000000000000000 001.00000000000 - or, as
+ the case may be, %0000000000000000 000.11111111111, still */
+ muls.l r1,r4,r4 /* leaving at least one sign bit. */
+ mulu.l r5,r3,r8
+ mshalds.l r1,r21,r1
+ shari r4,26,r4
+ shlld r8,r0,r8
+ add r1,r4,r1 // 31 bit unsigned reciprocal now in r1 (msb equiv. 0.5)
+ sub r2,r8,r2
+ /* Can do second step of 64 : 32 div now, using r1 and the rest in r2. */
+
+ shlri r2,22,r21
+ mulu.l r21,r1,r21
+ shlld r5,r0,r8
+ addi r20,30-22,r0
+ shlrd r21,r0,r21
+ mulu.l r21,r3,r5
+ add r8,r21,r8
+ mcmpgt.l r21,r63,r21 // See Note 1
+ addi r20,30,r0
+ mshfhi.l r63,r21,r21
+ sub r2,r5,r2
+ andc r2,r21,r2
+
+ /* small divisor: need a third divide step */
+ mulu.l r2,r1,r7
+ ptabs r18,tr0
+ addi r2,1,r2
+ shlrd r7,r0,r7
+ mulu.l r7,r3,r5
+ add r8,r7,r8
+ sub r2,r3,r2
+ cmpgt r2,r5,r5
+ add r8,r5,r2
+ /* could test r3 here to check for divide by zero. */
+ blink tr0,r63
+
+LOCAL(large_divisor):
+ mmulfx.w r5,r4,r4
+ shlrd r2,r9,r25
+ shlri r25,32,r8
+ msub.w r1,r4,r1
+
+ mulu.l r1,r7,r4
+ addi r1,-3,r5
+ mulu.l r5,r8,r5
+ sub r63,r4,r4 // Negate to make sure r1 ends up <= 1/r2
+ shlri r4,2,r4 /* chop off leading %0000000000000000 001.00000000000 - or, as
+ the case may be, %0000000000000000 000.11111111111, still */
+ muls.l r1,r4,r4 /* leaving at least one sign bit. */
+ shlri r5,14-1,r8
+ mulu.l r8,r7,r5
+ mshalds.l r1,r21,r1
+ shari r4,26,r4
+ add r1,r4,r1 // 31 bit unsigned reciprocal now in r1 (msb equiv. 0.5)
+ sub r25,r5,r25
+ /* Can do second step of 64 : 32 div now, using r1 and the rest in r25. */
+
+ shlri r25,22,r21
+ mulu.l r21,r1,r21
+ pta LOCAL(no_lo_adj),tr0
+ addi r22,32,r0
+ shlri r21,40,r21
+ mulu.l r21,r7,r5
+ add r8,r21,r8
+ shlld r2,r0,r2
+ sub r25,r5,r25
+ bgtu/u r7,r25,tr0 // no_lo_adj
+ addi r8,1,r8
+ sub r25,r7,r25
+LOCAL(no_lo_adj):
+ mextr4 r2,r25,r2
+
+ /* large_divisor: only needs a few adjustments. */
+ mulu.l r8,r6,r5
+ ptabs r18,tr0
+ /* bubble */
+ cmpgtu r5,r2,r5
+ sub r8,r5,r2
+ blink tr0,r63
+ ENDFUNC(GLOBAL(udivdi3))
+/* Note 1: To shift the result of the second divide stage so that the result
+ always fits into 32 bits, yet we still reduce the rest sufficiently
+ would require a lot of instructions to do the shifts just right. Using
+ the full 64 bit shift result to multiply with the divisor would require
+ four extra instructions for the upper 32 bits (shift / mulu / shift / sub).
+ Fortunately, if the upper 32 bits of the shift result are nonzero, we
+ know that the rest after taking this partial result into account will
+ fit into 32 bits. So we just clear the upper 32 bits of the rest if the
+ upper 32 bits of the partial result are nonzero. */
+#endif /* __SHMEDIA__ */
+#endif /* L_udivdi3 */
+
+#ifdef L_divdi3
+#ifdef __SHMEDIA__
+ .mode SHmedia
+ .section .text..SHmedia32,"ax"
+ .align 2
+ .global GLOBAL(divdi3)
+ FUNC(GLOBAL(divdi3))
+GLOBAL(divdi3):
+ pta GLOBAL(udivdi3_internal),tr0
+ shari r2,63,r22
+ shari r3,63,r23
+ xor r2,r22,r2
+ xor r3,r23,r3
+ sub r2,r22,r2
+ sub r3,r23,r3
+ beq/u r22,r23,tr0
+ ptabs r18,tr1
+ blink tr0,r18
+ sub r63,r2,r2
+ blink tr1,r63
+ ENDFUNC(GLOBAL(divdi3))
+#endif /* __SHMEDIA__ */
+#endif /* L_divdi3 */
+
+#ifdef L_umoddi3
+#ifdef __SHMEDIA__
+ .mode SHmedia
+ .section .text..SHmedia32,"ax"
+ .align 2
+ .global GLOBAL(umoddi3)
+ FUNC(GLOBAL(umoddi3))
+GLOBAL(umoddi3):
+ HIDDEN_ALIAS(umoddi3_internal,umoddi3)
+ shlri r3,1,r4
+ nsb r4,r22
+ shlld r3,r22,r6
+ shlri r6,49,r5
+ movi 0xffffffffffffbaf1,r21 /* .l shift count 17. */
+ sub r21,r5,r1
+ mmulfx.w r1,r1,r4
+ mshflo.w r1,r63,r1
+ sub r63,r22,r20 // r63 == 64 % 64
+ mmulfx.w r5,r4,r4
+ pta LOCAL(large_divisor),tr0
+ addi r20,32,r9
+ msub.w r1,r4,r1
+ madd.w r1,r1,r1
+ mmulfx.w r1,r1,r4
+ shlri r6,32,r7
+ bgt/u r9,r63,tr0 // large_divisor
+ mmulfx.w r5,r4,r4
+ shlri r2,32+14,r19
+ addi r22,-31,r0
+ msub.w r1,r4,r1
+
+ mulu.l r1,r7,r4
+ addi r1,-3,r5
+ mulu.l r5,r19,r5
+ sub r63,r4,r4 // Negate to make sure r1 ends up <= 1/r2
+ shlri r4,2,r4 /* chop off leading %0000000000000000 001.00000000000 - or, as
+ the case may be, %0000000000000000 000.11111111111, still */
+ muls.l r1,r4,r4 /* leaving at least one sign bit. */
+ mulu.l r5,r3,r5
+ mshalds.l r1,r21,r1
+ shari r4,26,r4
+ shlld r5,r0,r5
+ add r1,r4,r1 // 31 bit unsigned reciprocal now in r1 (msb equiv. 0.5)
+ sub r2,r5,r2
+ /* Can do second step of 64 : 32 div now, using r1 and the rest in r2. */
+
+ shlri r2,22,r21
+ mulu.l r21,r1,r21
+ addi r20,30-22,r0
+ /* bubble */ /* could test r3 here to check for divide by zero. */
+ shlrd r21,r0,r21
+ mulu.l r21,r3,r5
+ mcmpgt.l r21,r63,r21 // See Note 1
+ addi r20,30,r0
+ mshfhi.l r63,r21,r21
+ sub r2,r5,r2
+ andc r2,r21,r2
+
+ /* small divisor: need a third divide step */
+ mulu.l r2,r1,r7
+ ptabs r18,tr0
+ sub r2,r3,r8 /* re-use r8 here for rest - r3 */
+ shlrd r7,r0,r7
+ mulu.l r7,r3,r5
+ /* bubble */
+ addi r8,1,r7
+ cmpgt r7,r5,r7
+ cmvne r7,r8,r2
+ sub r2,r5,r2
+ blink tr0,r63
+
+LOCAL(large_divisor):
+ mmulfx.w r5,r4,r4
+ shlrd r2,r9,r25
+ shlri r25,32,r8
+ msub.w r1,r4,r1
+
+ mulu.l r1,r7,r4
+ addi r1,-3,r5
+ mulu.l r5,r8,r5
+ sub r63,r4,r4 // Negate to make sure r1 ends up <= 1/r2
+ shlri r4,2,r4 /* chop off leading %0000000000000000 001.00000000000 - or, as
+ the case may be, %0000000000000000 000.11111111111, still */
+ muls.l r1,r4,r4 /* leaving at least one sign bit. */
+ shlri r5,14-1,r8
+ mulu.l r8,r7,r5
+ mshalds.l r1,r21,r1
+ shari r4,26,r4
+ add r1,r4,r1 // 31 bit unsigned reciprocal now in r1 (msb equiv. 0.5)
+ sub r25,r5,r25
+ /* Can do second step of 64 : 32 div now, using r1 and the rest in r25. */
+
+ shlri r25,22,r21
+ mulu.l r21,r1,r21
+ pta LOCAL(no_lo_adj),tr0
+ addi r22,32,r0
+ shlri r21,40,r21
+ mulu.l r21,r7,r5
+ add r8,r21,r8
+ shlld r2,r0,r2
+ sub r25,r5,r25
+ bgtu/u r7,r25,tr0 // no_lo_adj
+ addi r8,1,r8
+ sub r25,r7,r25
+LOCAL(no_lo_adj):
+ mextr4 r2,r25,r2
+
+ /* large_divisor: only needs a few adjustments. */
+ mulu.l r8,r6,r5
+ ptabs r18,tr0
+ add r2,r6,r7
+ cmpgtu r5,r2,r8
+ cmvne r8,r7,r2
+ sub r2,r5,r2
+ shlrd r2,r22,r2
+ blink tr0,r63
+ ENDFUNC(GLOBAL(umoddi3))
+/* Note 1: To shift the result of the second divide stage so that the result
+ always fits into 32 bits, yet we still reduce the rest sufficiently
+ would require a lot of instructions to do the shifts just right. Using
+ the full 64 bit shift result to multiply with the divisor would require
+ four extra instructions for the upper 32 bits (shift / mulu / shift / sub).
+ Fortunately, if the upper 32 bits of the shift result are nonzero, we
+ know that the rest after taking this partial result into account will
+ fit into 32 bits. So we just clear the upper 32 bits of the rest if the
+ upper 32 bits of the partial result are nonzero. */
+#endif /* __SHMEDIA__ */
+#endif /* L_umoddi3 */
+
+#ifdef L_moddi3
+#ifdef __SHMEDIA__
+ .mode SHmedia
+ .section .text..SHmedia32,"ax"
+ .align 2
+ .global GLOBAL(moddi3)
+ FUNC(GLOBAL(moddi3))
+GLOBAL(moddi3):
+ pta GLOBAL(umoddi3_internal),tr0
+ shari r2,63,r22
+ shari r3,63,r23
+ xor r2,r22,r2
+ xor r3,r23,r3
+ sub r2,r22,r2
+ sub r3,r23,r3
+ beq/u r22,r63,tr0
+ ptabs r18,tr1
+ blink tr0,r18
+ sub r63,r2,r2
+ blink tr1,r63
+ ENDFUNC(GLOBAL(moddi3))
+#endif /* __SHMEDIA__ */
+#endif /* L_moddi3 */
+
+#ifdef L_set_fpscr
+#if !defined (__SH2A_NOFPU__)
+#if defined (__SH2E__) || defined (__SH2A__) || defined (__SH3E__) || defined(__SH4_SINGLE__) || defined(__SH4__) || defined(__SH4_SINGLE_ONLY__) || __SH5__ == 32
+#ifdef __SH5__
+ .mode SHcompact
+#endif
+ .global GLOBAL(set_fpscr)
+ HIDDEN_FUNC(GLOBAL(set_fpscr))
+GLOBAL(set_fpscr):
+ lds r4,fpscr
+#ifdef __PIC__
+ mov.l r12,@-r15
+#ifdef __vxworks
+ mov.l LOCAL(set_fpscr_L0_base),r12
+ mov.l LOCAL(set_fpscr_L0_index),r0
+ mov.l @r12,r12
+ mov.l @(r0,r12),r12
+#else
+ mova LOCAL(set_fpscr_L0),r0
+ mov.l LOCAL(set_fpscr_L0),r12
+ add r0,r12
+#endif
+ mov.l LOCAL(set_fpscr_L1),r0
+ mov.l @(r0,r12),r1
+ mov.l @r15+,r12
+#else
+ mov.l LOCAL(set_fpscr_L1),r1
+#endif
+ swap.w r4,r0
+ or #24,r0
+#ifndef FMOVD_WORKS
+ xor #16,r0
+#endif
+#if defined(__SH4__) || defined (__SH2A_DOUBLE__)
+ swap.w r0,r3
+ mov.l r3,@(4,r1)
+#else /* defined (__SH2E__) || defined(__SH3E__) || defined(__SH4_SINGLE*__) */
+ swap.w r0,r2
+ mov.l r2,@r1
+#endif
+#ifndef FMOVD_WORKS
+ xor #8,r0
+#else
+ xor #24,r0
+#endif
+#if defined(__SH4__) || defined (__SH2A_DOUBLE__)
+ swap.w r0,r2
+ rts
+ mov.l r2,@r1
+#else /* defined(__SH2E__) || defined(__SH3E__) || defined(__SH4_SINGLE*__) */
+ swap.w r0,r3
+ rts
+ mov.l r3,@(4,r1)
+#endif
+ .align 2
+#ifdef __PIC__
+#ifdef __vxworks
+LOCAL(set_fpscr_L0_base):
+ .long ___GOTT_BASE__
+LOCAL(set_fpscr_L0_index):
+ .long ___GOTT_INDEX__
+#else
+LOCAL(set_fpscr_L0):
+ .long _GLOBAL_OFFSET_TABLE_
+#endif
+LOCAL(set_fpscr_L1):
+ .long GLOBAL(fpscr_values@GOT)
+#else
+LOCAL(set_fpscr_L1):
+ .long GLOBAL(fpscr_values)
+#endif
+
+ ENDFUNC(GLOBAL(set_fpscr))
+#ifndef NO_FPSCR_VALUES
+#ifdef __ELF__
+ .comm GLOBAL(fpscr_values),8,4
+#else
+ .comm GLOBAL(fpscr_values),8
+#endif /* ELF */
+#endif /* NO_FPSCR_VALUES */
+#endif /* SH2E / SH3E / SH4 */
+#endif /* __SH2A_NOFPU__ */
+#endif /* L_set_fpscr */
+#ifdef L_ic_invalidate
+#if __SH5__ == 32
+ .mode SHmedia
+ .section .text..SHmedia32,"ax"
+ .align 2
+ .global GLOBAL(init_trampoline)
+ HIDDEN_FUNC(GLOBAL(init_trampoline))
+GLOBAL(init_trampoline):
+ st.l r0,8,r2
+#ifdef __LITTLE_ENDIAN__
+ movi 9,r20
+ shori 0x402b,r20
+ shori 0xd101,r20
+ shori 0xd002,r20
+#else
+ movi 0xffffffffffffd002,r20
+ shori 0xd101,r20
+ shori 0x402b,r20
+ shori 9,r20
+#endif
+ st.q r0,0,r20
+ st.l r0,12,r3
+ ENDFUNC(GLOBAL(init_trampoline))
+ .global GLOBAL(ic_invalidate)
+ HIDDEN_FUNC(GLOBAL(ic_invalidate))
+GLOBAL(ic_invalidate):
+ ocbwb r0,0
+ synco
+ icbi r0, 0
+ ptabs r18, tr0
+ synci
+ blink tr0, r63
+ ENDFUNC(GLOBAL(ic_invalidate))
+#elif defined(__SH4A__)
+ .global GLOBAL(ic_invalidate)
+ HIDDEN_FUNC(GLOBAL(ic_invalidate))
+GLOBAL(ic_invalidate):
+ ocbwb @r4
+ synco
+ icbi @r4
+ rts
+ nop
+ ENDFUNC(GLOBAL(ic_invalidate))
+#elif defined(__SH4_SINGLE__) || defined(__SH4__) || defined(__SH4_SINGLE_ONLY__) || (defined(__SH4_NOFPU__) && !defined(__SH5__))
+ /* For system code, we use ic_invalidate_line_i, but user code
+ needs a different mechanism. A kernel call is generally not
+ available, and it would also be slow. Different SH4 variants use
+ different sizes and associativities of the Icache. We use a small
+ bit of dispatch code that can be put hidden in every shared object,
+ which calls the actual processor-specific invalidation code in a
+ separate module.
+ Or if you have operating system support, the OS could mmap the
+ procesor-specific code from a single page, since it is highly
+ repetitive. */
+ .global GLOBAL(ic_invalidate)
+ HIDDEN_FUNC(GLOBAL(ic_invalidate))
+GLOBAL(ic_invalidate):
+#ifdef __pic__
+#ifdef __vxworks
+ mov.l 1f,r1
+ mov.l 2f,r0
+ mov.l @r1,r1
+ mov.l 0f,r2
+ mov.l @(r0,r1),r0
+#else
+ mov.l 1f,r1
+ mova 1f,r0
+ mov.l 0f,r2
+ add r1,r0
+#endif
+ mov.l @(r0,r2),r1
+#else
+ mov.l 0f,r1
+#endif
+ ocbwb @r4
+ mov.l @(8,r1),r0
+ sub r1,r4
+ and r4,r0
+ add r1,r0
+ jmp @r0
+ mov.l @(4,r1),r0
+ .align 2
+#ifndef __pic__
+0: .long GLOBAL(ic_invalidate_array)
+#else /* __pic__ */
+ .global GLOBAL(ic_invalidate_array)
+0: .long GLOBAL(ic_invalidate_array)@GOT
+#ifdef __vxworks
+1: .long ___GOTT_BASE__
+2: .long ___GOTT_INDEX__
+#else
+1: .long _GLOBAL_OFFSET_TABLE_
+#endif
+ ENDFUNC(GLOBAL(ic_invalidate))
+#endif /* __pic__ */
+#endif /* SH4 */
+#endif /* L_ic_invalidate */
+
+#ifdef L_ic_invalidate_array
+#if defined(__SH4A__) || (defined (__FORCE_SH4A__) && (defined(__SH4_SINGLE__) || defined(__SH4__) || defined(__SH4_SINGLE_ONLY__) || (defined(__SH4_NOFPU__) && !defined(__SH5__))))
+ .global GLOBAL(ic_invalidate_array)
+ /* This is needed when an SH4 dso with trampolines is used on SH4A. */
+ .global GLOBAL(ic_invalidate_array)
+ FUNC(GLOBAL(ic_invalidate_array))
+GLOBAL(ic_invalidate_array):
+ add r1,r4
+ synco
+ icbi @r4
+ rts
+ nop
+ .align 2
+ .long 0
+ ENDFUNC(GLOBAL(ic_invalidate_array))
+#elif defined(__SH4_SINGLE__) || defined(__SH4__) || defined(__SH4_SINGLE_ONLY__) || (defined(__SH4_NOFPU__) && !defined(__SH5__))
+ .global GLOBAL(ic_invalidate_array)
+ .p2align 5
+ FUNC(GLOBAL(ic_invalidate_array))
+/* This must be aligned to the beginning of a cache line. */
+GLOBAL(ic_invalidate_array):
+#ifndef WAYS
+#define WAYS 4
+#define WAY_SIZE 0x4000
+#endif
+#if WAYS == 1
+ .rept WAY_SIZE * WAYS / 32
+ rts
+ nop
+ .rept 7
+ .long WAY_SIZE - 32
+ .endr
+ .endr
+#elif WAYS <= 6
+ .rept WAY_SIZE * WAYS / 32
+ braf r0
+ add #-8,r0
+ .long WAY_SIZE + 8
+ .long WAY_SIZE - 32
+ .rept WAYS-2
+ braf r0
+ nop
+ .endr
+ .rept 7 - WAYS
+ rts
+ nop
+ .endr
+ .endr
+#else /* WAYS > 6 */
+ /* This variant needs two different pages for mmap-ing. */
+ .rept WAYS-1
+ .rept WAY_SIZE / 32
+ braf r0
+ nop
+ .long WAY_SIZE
+ .rept 6
+ .long WAY_SIZE - 32
+ .endr
+ .endr
+ .endr
+ .rept WAY_SIZE / 32
+ rts
+ .rept 15
+ nop
+ .endr
+ .endr
+#endif /* WAYS */
+ ENDFUNC(GLOBAL(ic_invalidate_array))
+#endif /* SH4 */
+#endif /* L_ic_invalidate_array */
+
+#if defined (__SH5__) && __SH5__ == 32
+#ifdef L_shcompact_call_trampoline
+ .section .rodata
+ .align 1
+LOCAL(ct_main_table):
+.word LOCAL(ct_r2_fp) - datalabel LOCAL(ct_main_label)
+.word LOCAL(ct_r2_ld) - datalabel LOCAL(ct_main_label)
+.word LOCAL(ct_r2_pop) - datalabel LOCAL(ct_main_label)
+.word LOCAL(ct_r3_fp) - datalabel LOCAL(ct_main_label)
+.word LOCAL(ct_r3_ld) - datalabel LOCAL(ct_main_label)
+.word LOCAL(ct_r3_pop) - datalabel LOCAL(ct_main_label)
+.word LOCAL(ct_r4_fp) - datalabel LOCAL(ct_main_label)
+.word LOCAL(ct_r4_ld) - datalabel LOCAL(ct_main_label)
+.word LOCAL(ct_r4_pop) - datalabel LOCAL(ct_main_label)
+.word LOCAL(ct_r5_fp) - datalabel LOCAL(ct_main_label)
+.word LOCAL(ct_r5_ld) - datalabel LOCAL(ct_main_label)
+.word LOCAL(ct_r5_pop) - datalabel LOCAL(ct_main_label)
+.word LOCAL(ct_r6_fph) - datalabel LOCAL(ct_main_label)
+.word LOCAL(ct_r6_fpl) - datalabel LOCAL(ct_main_label)
+.word LOCAL(ct_r6_ld) - datalabel LOCAL(ct_main_label)
+.word LOCAL(ct_r6_pop) - datalabel LOCAL(ct_main_label)
+.word LOCAL(ct_r7_fph) - datalabel LOCAL(ct_main_label)
+.word LOCAL(ct_r7_fpl) - datalabel LOCAL(ct_main_label)
+.word LOCAL(ct_r7_ld) - datalabel LOCAL(ct_main_label)
+.word LOCAL(ct_r7_pop) - datalabel LOCAL(ct_main_label)
+.word LOCAL(ct_r8_fph) - datalabel LOCAL(ct_main_label)
+.word LOCAL(ct_r8_fpl) - datalabel LOCAL(ct_main_label)
+.word LOCAL(ct_r8_ld) - datalabel LOCAL(ct_main_label)
+.word LOCAL(ct_r8_pop) - datalabel LOCAL(ct_main_label)
+.word LOCAL(ct_r9_fph) - datalabel LOCAL(ct_main_label)
+.word LOCAL(ct_r9_fpl) - datalabel LOCAL(ct_main_label)
+.word LOCAL(ct_r9_ld) - datalabel LOCAL(ct_main_label)
+.word LOCAL(ct_r9_pop) - datalabel LOCAL(ct_main_label)
+.word LOCAL(ct_pop_seq) - datalabel LOCAL(ct_main_label)
+.word LOCAL(ct_pop_seq) - datalabel LOCAL(ct_main_label)
+.word LOCAL(ct_r9_pop) - datalabel LOCAL(ct_main_label)
+.word LOCAL(ct_ret_wide) - datalabel LOCAL(ct_main_label)
+.word LOCAL(ct_call_func) - datalabel LOCAL(ct_main_label)
+ .mode SHmedia
+ .section .text..SHmedia32, "ax"
+ .align 2
+
+ /* This function loads 64-bit general-purpose registers from the
+ stack, from a memory address contained in them or from an FP
+ register, according to a cookie passed in r1. Its execution
+ time is linear on the number of registers that actually have
+ to be copied. See sh.h for details on the actual bit pattern.
+
+ The function to be called is passed in r0. If a 32-bit return
+ value is expected, the actual function will be tail-called,
+ otherwise the return address will be stored in r10 (that the
+ caller should expect to be clobbered) and the return value
+ will be expanded into r2/r3 upon return. */
+
+ .global GLOBAL(GCC_shcompact_call_trampoline)
+ FUNC(GLOBAL(GCC_shcompact_call_trampoline))
+GLOBAL(GCC_shcompact_call_trampoline):
+ ptabs/l r0, tr0 /* Prepare to call the actual function. */
+ movi ((datalabel LOCAL(ct_main_table) - 31 * 2) >> 16) & 65535, r0
+ pt/l LOCAL(ct_loop), tr1
+ addz.l r1, r63, r1
+ shori ((datalabel LOCAL(ct_main_table) - 31 * 2)) & 65535, r0
+LOCAL(ct_loop):
+ nsb r1, r28
+ shlli r28, 1, r29
+ ldx.w r0, r29, r30
+LOCAL(ct_main_label):
+ ptrel/l r30, tr2
+ blink tr2, r63
+LOCAL(ct_r2_fp): /* Copy r2 from an FP register. */
+ /* It must be dr0, so just do it. */
+ fmov.dq dr0, r2
+ movi 7, r30
+ shlli r30, 29, r31
+ andc r1, r31, r1
+ blink tr1, r63
+LOCAL(ct_r3_fp): /* Copy r3 from an FP register. */
+ /* It is either dr0 or dr2. */
+ movi 7, r30
+ shlri r1, 26, r32
+ shlli r30, 26, r31
+ andc r1, r31, r1
+ fmov.dq dr0, r3
+ beqi/l r32, 4, tr1
+ fmov.dq dr2, r3
+ blink tr1, r63
+LOCAL(ct_r4_fp): /* Copy r4 from an FP register. */
+ shlri r1, 23 - 3, r34
+ andi r34, 3 << 3, r33
+ addi r33, LOCAL(ct_r4_fp_copy) - datalabel LOCAL(ct_r4_fp_base), r32
+LOCAL(ct_r4_fp_base):
+ ptrel/l r32, tr2
+ movi 7, r30
+ shlli r30, 23, r31
+ andc r1, r31, r1
+ blink tr2, r63
+LOCAL(ct_r4_fp_copy):
+ fmov.dq dr0, r4
+ blink tr1, r63
+ fmov.dq dr2, r4
+ blink tr1, r63
+ fmov.dq dr4, r4
+ blink tr1, r63
+LOCAL(ct_r5_fp): /* Copy r5 from an FP register. */
+ shlri r1, 20 - 3, r34
+ andi r34, 3 << 3, r33
+ addi r33, LOCAL(ct_r5_fp_copy) - datalabel LOCAL(ct_r5_fp_base), r32
+LOCAL(ct_r5_fp_base):
+ ptrel/l r32, tr2
+ movi 7, r30
+ shlli r30, 20, r31
+ andc r1, r31, r1
+ blink tr2, r63
+LOCAL(ct_r5_fp_copy):
+ fmov.dq dr0, r5
+ blink tr1, r63
+ fmov.dq dr2, r5
+ blink tr1, r63
+ fmov.dq dr4, r5
+ blink tr1, r63
+ fmov.dq dr6, r5
+ blink tr1, r63
+LOCAL(ct_r6_fph): /* Copy r6 from a high FP register. */
+ /* It must be dr8. */
+ fmov.dq dr8, r6
+ movi 15, r30
+ shlli r30, 16, r31
+ andc r1, r31, r1
+ blink tr1, r63
+LOCAL(ct_r6_fpl): /* Copy r6 from a low FP register. */
+ shlri r1, 16 - 3, r34
+ andi r34, 3 << 3, r33
+ addi r33, LOCAL(ct_r6_fp_copy) - datalabel LOCAL(ct_r6_fp_base), r32
+LOCAL(ct_r6_fp_base):
+ ptrel/l r32, tr2
+ movi 7, r30
+ shlli r30, 16, r31
+ andc r1, r31, r1
+ blink tr2, r63
+LOCAL(ct_r6_fp_copy):
+ fmov.dq dr0, r6
+ blink tr1, r63
+ fmov.dq dr2, r6
+ blink tr1, r63
+ fmov.dq dr4, r6
+ blink tr1, r63
+ fmov.dq dr6, r6
+ blink tr1, r63
+LOCAL(ct_r7_fph): /* Copy r7 from a high FP register. */
+ /* It is either dr8 or dr10. */
+ movi 15 << 12, r31
+ shlri r1, 12, r32
+ andc r1, r31, r1
+ fmov.dq dr8, r7
+ beqi/l r32, 8, tr1
+ fmov.dq dr10, r7
+ blink tr1, r63
+LOCAL(ct_r7_fpl): /* Copy r7 from a low FP register. */
+ shlri r1, 12 - 3, r34
+ andi r34, 3 << 3, r33
+ addi r33, LOCAL(ct_r7_fp_copy) - datalabel LOCAL(ct_r7_fp_base), r32
+LOCAL(ct_r7_fp_base):
+ ptrel/l r32, tr2
+ movi 7 << 12, r31
+ andc r1, r31, r1
+ blink tr2, r63
+LOCAL(ct_r7_fp_copy):
+ fmov.dq dr0, r7
+ blink tr1, r63
+ fmov.dq dr2, r7
+ blink tr1, r63
+ fmov.dq dr4, r7
+ blink tr1, r63
+ fmov.dq dr6, r7
+ blink tr1, r63
+LOCAL(ct_r8_fph): /* Copy r8 from a high FP register. */
+ /* It is either dr8 or dr10. */
+ movi 15 << 8, r31
+ andi r1, 1 << 8, r32
+ andc r1, r31, r1
+ fmov.dq dr8, r8
+ beq/l r32, r63, tr1
+ fmov.dq dr10, r8
+ blink tr1, r63
+LOCAL(ct_r8_fpl): /* Copy r8 from a low FP register. */
+ shlri r1, 8 - 3, r34
+ andi r34, 3 << 3, r33
+ addi r33, LOCAL(ct_r8_fp_copy) - datalabel LOCAL(ct_r8_fp_base), r32
+LOCAL(ct_r8_fp_base):
+ ptrel/l r32, tr2
+ movi 7 << 8, r31
+ andc r1, r31, r1
+ blink tr2, r63
+LOCAL(ct_r8_fp_copy):
+ fmov.dq dr0, r8
+ blink tr1, r63
+ fmov.dq dr2, r8
+ blink tr1, r63
+ fmov.dq dr4, r8
+ blink tr1, r63
+ fmov.dq dr6, r8
+ blink tr1, r63
+LOCAL(ct_r9_fph): /* Copy r9 from a high FP register. */
+ /* It is either dr8 or dr10. */
+ movi 15 << 4, r31
+ andi r1, 1 << 4, r32
+ andc r1, r31, r1
+ fmov.dq dr8, r9
+ beq/l r32, r63, tr1
+ fmov.dq dr10, r9
+ blink tr1, r63
+LOCAL(ct_r9_fpl): /* Copy r9 from a low FP register. */
+ shlri r1, 4 - 3, r34
+ andi r34, 3 << 3, r33
+ addi r33, LOCAL(ct_r9_fp_copy) - datalabel LOCAL(ct_r9_fp_base), r32
+LOCAL(ct_r9_fp_base):
+ ptrel/l r32, tr2
+ movi 7 << 4, r31
+ andc r1, r31, r1
+ blink tr2, r63
+LOCAL(ct_r9_fp_copy):
+ fmov.dq dr0, r9
+ blink tr1, r63
+ fmov.dq dr2, r9
+ blink tr1, r63
+ fmov.dq dr4, r9
+ blink tr1, r63
+ fmov.dq dr6, r9
+ blink tr1, r63
+LOCAL(ct_r2_ld): /* Copy r2 from a memory address. */
+ pt/l LOCAL(ct_r2_load), tr2
+ movi 3, r30
+ shlli r30, 29, r31
+ and r1, r31, r32
+ andc r1, r31, r1
+ beq/l r31, r32, tr2
+ addi.l r2, 8, r3
+ ldx.q r2, r63, r2
+ /* Fall through. */
+LOCAL(ct_r3_ld): /* Copy r3 from a memory address. */
+ pt/l LOCAL(ct_r3_load), tr2
+ movi 3, r30
+ shlli r30, 26, r31
+ and r1, r31, r32
+ andc r1, r31, r1
+ beq/l r31, r32, tr2
+ addi.l r3, 8, r4
+ ldx.q r3, r63, r3
+LOCAL(ct_r4_ld): /* Copy r4 from a memory address. */
+ pt/l LOCAL(ct_r4_load), tr2
+ movi 3, r30
+ shlli r30, 23, r31
+ and r1, r31, r32
+ andc r1, r31, r1
+ beq/l r31, r32, tr2
+ addi.l r4, 8, r5
+ ldx.q r4, r63, r4
+LOCAL(ct_r5_ld): /* Copy r5 from a memory address. */
+ pt/l LOCAL(ct_r5_load), tr2
+ movi 3, r30
+ shlli r30, 20, r31
+ and r1, r31, r32
+ andc r1, r31, r1
+ beq/l r31, r32, tr2
+ addi.l r5, 8, r6
+ ldx.q r5, r63, r5
+LOCAL(ct_r6_ld): /* Copy r6 from a memory address. */
+ pt/l LOCAL(ct_r6_load), tr2
+ movi 3 << 16, r31
+ and r1, r31, r32
+ andc r1, r31, r1
+ beq/l r31, r32, tr2
+ addi.l r6, 8, r7
+ ldx.q r6, r63, r6
+LOCAL(ct_r7_ld): /* Copy r7 from a memory address. */
+ pt/l LOCAL(ct_r7_load), tr2
+ movi 3 << 12, r31
+ and r1, r31, r32
+ andc r1, r31, r1
+ beq/l r31, r32, tr2
+ addi.l r7, 8, r8
+ ldx.q r7, r63, r7
+LOCAL(ct_r8_ld): /* Copy r8 from a memory address. */
+ pt/l LOCAL(ct_r8_load), tr2
+ movi 3 << 8, r31
+ and r1, r31, r32
+ andc r1, r31, r1
+ beq/l r31, r32, tr2
+ addi.l r8, 8, r9
+ ldx.q r8, r63, r8
+LOCAL(ct_r9_ld): /* Copy r9 from a memory address. */
+ pt/l LOCAL(ct_check_tramp), tr2
+ ldx.q r9, r63, r9
+ blink tr2, r63
+LOCAL(ct_r2_load):
+ ldx.q r2, r63, r2
+ blink tr1, r63
+LOCAL(ct_r3_load):
+ ldx.q r3, r63, r3
+ blink tr1, r63
+LOCAL(ct_r4_load):
+ ldx.q r4, r63, r4
+ blink tr1, r63
+LOCAL(ct_r5_load):
+ ldx.q r5, r63, r5
+ blink tr1, r63
+LOCAL(ct_r6_load):
+ ldx.q r6, r63, r6
+ blink tr1, r63
+LOCAL(ct_r7_load):
+ ldx.q r7, r63, r7
+ blink tr1, r63
+LOCAL(ct_r8_load):
+ ldx.q r8, r63, r8
+ blink tr1, r63
+LOCAL(ct_r2_pop): /* Pop r2 from the stack. */
+ movi 1, r30
+ ldx.q r15, r63, r2
+ shlli r30, 29, r31
+ addi.l r15, 8, r15
+ andc r1, r31, r1
+ blink tr1, r63
+LOCAL(ct_r3_pop): /* Pop r3 from the stack. */
+ movi 1, r30
+ ldx.q r15, r63, r3
+ shlli r30, 26, r31
+ addi.l r15, 8, r15
+ andc r1, r31, r1
+ blink tr1, r63
+LOCAL(ct_r4_pop): /* Pop r4 from the stack. */
+ movi 1, r30
+ ldx.q r15, r63, r4
+ shlli r30, 23, r31
+ addi.l r15, 8, r15
+ andc r1, r31, r1
+ blink tr1, r63
+LOCAL(ct_r5_pop): /* Pop r5 from the stack. */
+ movi 1, r30
+ ldx.q r15, r63, r5
+ shlli r30, 20, r31
+ addi.l r15, 8, r15
+ andc r1, r31, r1
+ blink tr1, r63
+LOCAL(ct_r6_pop): /* Pop r6 from the stack. */
+ movi 1, r30
+ ldx.q r15, r63, r6
+ shlli r30, 16, r31
+ addi.l r15, 8, r15
+ andc r1, r31, r1
+ blink tr1, r63
+LOCAL(ct_r7_pop): /* Pop r7 from the stack. */
+ ldx.q r15, r63, r7
+ movi 1 << 12, r31
+ addi.l r15, 8, r15
+ andc r1, r31, r1
+ blink tr1, r63
+LOCAL(ct_r8_pop): /* Pop r8 from the stack. */
+ ldx.q r15, r63, r8
+ movi 1 << 8, r31
+ addi.l r15, 8, r15
+ andc r1, r31, r1
+ blink tr1, r63
+LOCAL(ct_pop_seq): /* Pop a sequence of registers off the stack. */
+ andi r1, 7 << 1, r30
+ movi (LOCAL(ct_end_of_pop_seq) >> 16) & 65535, r32
+ shlli r30, 2, r31
+ shori LOCAL(ct_end_of_pop_seq) & 65535, r32
+ sub.l r32, r31, r33
+ ptabs/l r33, tr2
+ blink tr2, r63
+LOCAL(ct_start_of_pop_seq): /* Beginning of pop sequence. */
+ ldx.q r15, r63, r3
+ addi.l r15, 8, r15
+ ldx.q r15, r63, r4
+ addi.l r15, 8, r15
+ ldx.q r15, r63, r5
+ addi.l r15, 8, r15
+ ldx.q r15, r63, r6
+ addi.l r15, 8, r15
+ ldx.q r15, r63, r7
+ addi.l r15, 8, r15
+ ldx.q r15, r63, r8
+ addi.l r15, 8, r15
+LOCAL(ct_r9_pop): /* Pop r9 from the stack. */
+ ldx.q r15, r63, r9
+ addi.l r15, 8, r15
+LOCAL(ct_end_of_pop_seq): /* Label used to compute first pop instruction. */
+LOCAL(ct_check_tramp): /* Check whether we need a trampoline. */
+ pt/u LOCAL(ct_ret_wide), tr2
+ andi r1, 1, r1
+ bne/u r1, r63, tr2
+LOCAL(ct_call_func): /* Just branch to the function. */
+ blink tr0, r63
+LOCAL(ct_ret_wide): /* Call the function, so that we can unpack its
+ 64-bit return value. */
+ add.l r18, r63, r10
+ blink tr0, r18
+ ptabs r10, tr0
+#if __LITTLE_ENDIAN__
+ shari r2, 32, r3
+ add.l r2, r63, r2
+#else
+ add.l r2, r63, r3
+ shari r2, 32, r2
+#endif
+ blink tr0, r63
+
+ ENDFUNC(GLOBAL(GCC_shcompact_call_trampoline))
+#endif /* L_shcompact_call_trampoline */
+
+#ifdef L_shcompact_return_trampoline
+ /* This function does the converse of the code in `ret_wide'
+ above. It is tail-called by SHcompact functions returning
+ 64-bit non-floating-point values, to pack the 32-bit values in
+ r2 and r3 into r2. */
+
+ .mode SHmedia
+ .section .text..SHmedia32, "ax"
+ .align 2
+ .global GLOBAL(GCC_shcompact_return_trampoline)
+ HIDDEN_FUNC(GLOBAL(GCC_shcompact_return_trampoline))
+GLOBAL(GCC_shcompact_return_trampoline):
+ ptabs/l r18, tr0
+#if __LITTLE_ENDIAN__
+ addz.l r2, r63, r2
+ shlli r3, 32, r3
+#else
+ addz.l r3, r63, r3
+ shlli r2, 32, r2
+#endif
+ or r3, r2, r2
+ blink tr0, r63
+
+ ENDFUNC(GLOBAL(GCC_shcompact_return_trampoline))
+#endif /* L_shcompact_return_trampoline */
+
+#ifdef L_shcompact_incoming_args
+ .section .rodata
+ .align 1
+LOCAL(ia_main_table):
+.word 1 /* Invalid, just loop */
+.word LOCAL(ia_r2_ld) - datalabel LOCAL(ia_main_label)
+.word LOCAL(ia_r2_push) - datalabel LOCAL(ia_main_label)
+.word 1 /* Invalid, just loop */
+.word LOCAL(ia_r3_ld) - datalabel LOCAL(ia_main_label)
+.word LOCAL(ia_r3_push) - datalabel LOCAL(ia_main_label)
+.word 1 /* Invalid, just loop */
+.word LOCAL(ia_r4_ld) - datalabel LOCAL(ia_main_label)
+.word LOCAL(ia_r4_push) - datalabel LOCAL(ia_main_label)
+.word 1 /* Invalid, just loop */
+.word LOCAL(ia_r5_ld) - datalabel LOCAL(ia_main_label)
+.word LOCAL(ia_r5_push) - datalabel LOCAL(ia_main_label)
+.word 1 /* Invalid, just loop */
+.word 1 /* Invalid, just loop */
+.word LOCAL(ia_r6_ld) - datalabel LOCAL(ia_main_label)
+.word LOCAL(ia_r6_push) - datalabel LOCAL(ia_main_label)
+.word 1 /* Invalid, just loop */
+.word 1 /* Invalid, just loop */
+.word LOCAL(ia_r7_ld) - datalabel LOCAL(ia_main_label)
+.word LOCAL(ia_r7_push) - datalabel LOCAL(ia_main_label)
+.word 1 /* Invalid, just loop */
+.word 1 /* Invalid, just loop */
+.word LOCAL(ia_r8_ld) - datalabel LOCAL(ia_main_label)
+.word LOCAL(ia_r8_push) - datalabel LOCAL(ia_main_label)
+.word 1 /* Invalid, just loop */
+.word 1 /* Invalid, just loop */
+.word LOCAL(ia_r9_ld) - datalabel LOCAL(ia_main_label)
+.word LOCAL(ia_r9_push) - datalabel LOCAL(ia_main_label)
+.word LOCAL(ia_push_seq) - datalabel LOCAL(ia_main_label)
+.word LOCAL(ia_push_seq) - datalabel LOCAL(ia_main_label)
+.word LOCAL(ia_r9_push) - datalabel LOCAL(ia_main_label)
+.word LOCAL(ia_return) - datalabel LOCAL(ia_main_label)
+.word LOCAL(ia_return) - datalabel LOCAL(ia_main_label)
+ .mode SHmedia
+ .section .text..SHmedia32, "ax"
+ .align 2
+
+ /* This function stores 64-bit general-purpose registers back in
+ the stack, and loads the address in which each register
+ was stored into itself. The lower 32 bits of r17 hold the address
+ to begin storing, and the upper 32 bits of r17 hold the cookie.
+ Its execution time is linear on the
+ number of registers that actually have to be copied, and it is
+ optimized for structures larger than 64 bits, as opposed to
+ individual `long long' arguments. See sh.h for details on the
+ actual bit pattern. */
+
+ .global GLOBAL(GCC_shcompact_incoming_args)
+ FUNC(GLOBAL(GCC_shcompact_incoming_args))
+GLOBAL(GCC_shcompact_incoming_args):
+ ptabs/l r18, tr0 /* Prepare to return. */
+ shlri r17, 32, r0 /* Load the cookie. */
+ movi ((datalabel LOCAL(ia_main_table) - 31 * 2) >> 16) & 65535, r43
+ pt/l LOCAL(ia_loop), tr1
+ add.l r17, r63, r17
+ shori ((datalabel LOCAL(ia_main_table) - 31 * 2)) & 65535, r43
+LOCAL(ia_loop):
+ nsb r0, r36
+ shlli r36, 1, r37
+ ldx.w r43, r37, r38
+LOCAL(ia_main_label):
+ ptrel/l r38, tr2
+ blink tr2, r63
+LOCAL(ia_r2_ld): /* Store r2 and load its address. */
+ movi 3, r38
+ shlli r38, 29, r39
+ and r0, r39, r40
+ andc r0, r39, r0
+ stx.q r17, r63, r2
+ add.l r17, r63, r2
+ addi.l r17, 8, r17
+ beq/u r39, r40, tr1
+LOCAL(ia_r3_ld): /* Store r3 and load its address. */
+ movi 3, r38
+ shlli r38, 26, r39
+ and r0, r39, r40
+ andc r0, r39, r0
+ stx.q r17, r63, r3
+ add.l r17, r63, r3
+ addi.l r17, 8, r17
+ beq/u r39, r40, tr1
+LOCAL(ia_r4_ld): /* Store r4 and load its address. */
+ movi 3, r38
+ shlli r38, 23, r39
+ and r0, r39, r40
+ andc r0, r39, r0
+ stx.q r17, r63, r4
+ add.l r17, r63, r4
+ addi.l r17, 8, r17
+ beq/u r39, r40, tr1
+LOCAL(ia_r5_ld): /* Store r5 and load its address. */
+ movi 3, r38
+ shlli r38, 20, r39
+ and r0, r39, r40
+ andc r0, r39, r0
+ stx.q r17, r63, r5
+ add.l r17, r63, r5
+ addi.l r17, 8, r17
+ beq/u r39, r40, tr1
+LOCAL(ia_r6_ld): /* Store r6 and load its address. */
+ movi 3, r38
+ shlli r38, 16, r39
+ and r0, r39, r40
+ andc r0, r39, r0
+ stx.q r17, r63, r6
+ add.l r17, r63, r6
+ addi.l r17, 8, r17
+ beq/u r39, r40, tr1
+LOCAL(ia_r7_ld): /* Store r7 and load its address. */
+ movi 3 << 12, r39
+ and r0, r39, r40
+ andc r0, r39, r0
+ stx.q r17, r63, r7
+ add.l r17, r63, r7
+ addi.l r17, 8, r17
+ beq/u r39, r40, tr1
+LOCAL(ia_r8_ld): /* Store r8 and load its address. */
+ movi 3 << 8, r39
+ and r0, r39, r40
+ andc r0, r39, r0
+ stx.q r17, r63, r8
+ add.l r17, r63, r8
+ addi.l r17, 8, r17
+ beq/u r39, r40, tr1
+LOCAL(ia_r9_ld): /* Store r9 and load its address. */
+ stx.q r17, r63, r9
+ add.l r17, r63, r9
+ blink tr0, r63
+LOCAL(ia_r2_push): /* Push r2 onto the stack. */
+ movi 1, r38
+ shlli r38, 29, r39
+ andc r0, r39, r0
+ stx.q r17, r63, r2
+ addi.l r17, 8, r17
+ blink tr1, r63
+LOCAL(ia_r3_push): /* Push r3 onto the stack. */
+ movi 1, r38
+ shlli r38, 26, r39
+ andc r0, r39, r0
+ stx.q r17, r63, r3
+ addi.l r17, 8, r17
+ blink tr1, r63
+LOCAL(ia_r4_push): /* Push r4 onto the stack. */
+ movi 1, r38
+ shlli r38, 23, r39
+ andc r0, r39, r0
+ stx.q r17, r63, r4
+ addi.l r17, 8, r17
+ blink tr1, r63
+LOCAL(ia_r5_push): /* Push r5 onto the stack. */
+ movi 1, r38
+ shlli r38, 20, r39
+ andc r0, r39, r0
+ stx.q r17, r63, r5
+ addi.l r17, 8, r17
+ blink tr1, r63
+LOCAL(ia_r6_push): /* Push r6 onto the stack. */
+ movi 1, r38
+ shlli r38, 16, r39
+ andc r0, r39, r0
+ stx.q r17, r63, r6
+ addi.l r17, 8, r17
+ blink tr1, r63
+LOCAL(ia_r7_push): /* Push r7 onto the stack. */
+ movi 1 << 12, r39
+ andc r0, r39, r0
+ stx.q r17, r63, r7
+ addi.l r17, 8, r17
+ blink tr1, r63
+LOCAL(ia_r8_push): /* Push r8 onto the stack. */
+ movi 1 << 8, r39
+ andc r0, r39, r0
+ stx.q r17, r63, r8
+ addi.l r17, 8, r17
+ blink tr1, r63
+LOCAL(ia_push_seq): /* Push a sequence of registers onto the stack. */
+ andi r0, 7 << 1, r38
+ movi (LOCAL(ia_end_of_push_seq) >> 16) & 65535, r40
+ shlli r38, 2, r39
+ shori LOCAL(ia_end_of_push_seq) & 65535, r40
+ sub.l r40, r39, r41
+ ptabs/l r41, tr2
+ blink tr2, r63
+LOCAL(ia_stack_of_push_seq): /* Beginning of push sequence. */
+ stx.q r17, r63, r3
+ addi.l r17, 8, r17
+ stx.q r17, r63, r4
+ addi.l r17, 8, r17
+ stx.q r17, r63, r5
+ addi.l r17, 8, r17
+ stx.q r17, r63, r6
+ addi.l r17, 8, r17
+ stx.q r17, r63, r7
+ addi.l r17, 8, r17
+ stx.q r17, r63, r8
+ addi.l r17, 8, r17
+LOCAL(ia_r9_push): /* Push r9 onto the stack. */
+ stx.q r17, r63, r9
+LOCAL(ia_return): /* Return. */
+ blink tr0, r63
+LOCAL(ia_end_of_push_seq): /* Label used to compute the first push instruction. */
+ ENDFUNC(GLOBAL(GCC_shcompact_incoming_args))
+#endif /* L_shcompact_incoming_args */
+#endif
+#if __SH5__
+#ifdef L_nested_trampoline
+#if __SH5__ == 32
+ .section .text..SHmedia32,"ax"
+#else
+ .text
+#endif
+ .align 3 /* It is copied in units of 8 bytes in SHmedia mode. */
+ .global GLOBAL(GCC_nested_trampoline)
+ HIDDEN_FUNC(GLOBAL(GCC_nested_trampoline))
+GLOBAL(GCC_nested_trampoline):
+ .mode SHmedia
+ ptrel/u r63, tr0
+ gettr tr0, r0
+#if __SH5__ == 64
+ ld.q r0, 24, r1
+#else
+ ld.l r0, 24, r1
+#endif
+ ptabs/l r1, tr1
+#if __SH5__ == 64
+ ld.q r0, 32, r1
+#else
+ ld.l r0, 28, r1
+#endif
+ blink tr1, r63
+
+ ENDFUNC(GLOBAL(GCC_nested_trampoline))
+#endif /* L_nested_trampoline */
+#endif /* __SH5__ */
+#if __SH5__ == 32
+#ifdef L_push_pop_shmedia_regs
+ .section .text..SHmedia32,"ax"
+ .mode SHmedia
+ .align 2
+#ifndef __SH4_NOFPU__
+ .global GLOBAL(GCC_push_shmedia_regs)
+ FUNC(GLOBAL(GCC_push_shmedia_regs))
+GLOBAL(GCC_push_shmedia_regs):
+ addi.l r15, -14*8, r15
+ fst.d r15, 13*8, dr62
+ fst.d r15, 12*8, dr60
+ fst.d r15, 11*8, dr58
+ fst.d r15, 10*8, dr56
+ fst.d r15, 9*8, dr54
+ fst.d r15, 8*8, dr52
+ fst.d r15, 7*8, dr50
+ fst.d r15, 6*8, dr48
+ fst.d r15, 5*8, dr46
+ fst.d r15, 4*8, dr44
+ fst.d r15, 3*8, dr42
+ fst.d r15, 2*8, dr40
+ fst.d r15, 1*8, dr38
+ fst.d r15, 0*8, dr36
+#else /* ! __SH4_NOFPU__ */
+ .global GLOBAL(GCC_push_shmedia_regs_nofpu)
+ FUNC(GLOBAL(GCC_push_shmedia_regs_nofpu))
+GLOBAL(GCC_push_shmedia_regs_nofpu):
+#endif /* ! __SH4_NOFPU__ */
+ ptabs/l r18, tr0
+ addi.l r15, -27*8, r15
+ gettr tr7, r62
+ gettr tr6, r61
+ gettr tr5, r60
+ st.q r15, 26*8, r62
+ st.q r15, 25*8, r61
+ st.q r15, 24*8, r60
+ st.q r15, 23*8, r59
+ st.q r15, 22*8, r58
+ st.q r15, 21*8, r57
+ st.q r15, 20*8, r56
+ st.q r15, 19*8, r55
+ st.q r15, 18*8, r54
+ st.q r15, 17*8, r53
+ st.q r15, 16*8, r52
+ st.q r15, 15*8, r51
+ st.q r15, 14*8, r50
+ st.q r15, 13*8, r49
+ st.q r15, 12*8, r48
+ st.q r15, 11*8, r47
+ st.q r15, 10*8, r46
+ st.q r15, 9*8, r45
+ st.q r15, 8*8, r44
+ st.q r15, 7*8, r35
+ st.q r15, 6*8, r34
+ st.q r15, 5*8, r33
+ st.q r15, 4*8, r32
+ st.q r15, 3*8, r31
+ st.q r15, 2*8, r30
+ st.q r15, 1*8, r29
+ st.q r15, 0*8, r28
+ blink tr0, r63
+#ifndef __SH4_NOFPU__
+ ENDFUNC(GLOBAL(GCC_push_shmedia_regs))
+#else
+ ENDFUNC(GLOBAL(GCC_push_shmedia_regs_nofpu))
+#endif
+#ifndef __SH4_NOFPU__
+ .global GLOBAL(GCC_pop_shmedia_regs)
+ FUNC(GLOBAL(GCC_pop_shmedia_regs))
+GLOBAL(GCC_pop_shmedia_regs):
+ pt .L0, tr1
+ movi 41*8, r0
+ fld.d r15, 40*8, dr62
+ fld.d r15, 39*8, dr60
+ fld.d r15, 38*8, dr58
+ fld.d r15, 37*8, dr56
+ fld.d r15, 36*8, dr54
+ fld.d r15, 35*8, dr52
+ fld.d r15, 34*8, dr50
+ fld.d r15, 33*8, dr48
+ fld.d r15, 32*8, dr46
+ fld.d r15, 31*8, dr44
+ fld.d r15, 30*8, dr42
+ fld.d r15, 29*8, dr40
+ fld.d r15, 28*8, dr38
+ fld.d r15, 27*8, dr36
+ blink tr1, r63
+#else /* ! __SH4_NOFPU__ */
+ .global GLOBAL(GCC_pop_shmedia_regs_nofpu)
+ FUNC(GLOBAL(GCC_pop_shmedia_regs_nofpu))
+GLOBAL(GCC_pop_shmedia_regs_nofpu):
+#endif /* ! __SH4_NOFPU__ */
+ movi 27*8, r0
+.L0:
+ ptabs r18, tr0
+ ld.q r15, 26*8, r62
+ ld.q r15, 25*8, r61
+ ld.q r15, 24*8, r60
+ ptabs r62, tr7
+ ptabs r61, tr6
+ ptabs r60, tr5
+ ld.q r15, 23*8, r59
+ ld.q r15, 22*8, r58
+ ld.q r15, 21*8, r57
+ ld.q r15, 20*8, r56
+ ld.q r15, 19*8, r55
+ ld.q r15, 18*8, r54
+ ld.q r15, 17*8, r53
+ ld.q r15, 16*8, r52
+ ld.q r15, 15*8, r51
+ ld.q r15, 14*8, r50
+ ld.q r15, 13*8, r49
+ ld.q r15, 12*8, r48
+ ld.q r15, 11*8, r47
+ ld.q r15, 10*8, r46
+ ld.q r15, 9*8, r45
+ ld.q r15, 8*8, r44
+ ld.q r15, 7*8, r35
+ ld.q r15, 6*8, r34
+ ld.q r15, 5*8, r33
+ ld.q r15, 4*8, r32
+ ld.q r15, 3*8, r31
+ ld.q r15, 2*8, r30
+ ld.q r15, 1*8, r29
+ ld.q r15, 0*8, r28
+ add.l r15, r0, r15
+ blink tr0, r63
+
+#ifndef __SH4_NOFPU__
+ ENDFUNC(GLOBAL(GCC_pop_shmedia_regs))
+#else
+ ENDFUNC(GLOBAL(GCC_pop_shmedia_regs_nofpu))
+#endif
+#endif /* __SH5__ == 32 */
+#endif /* L_push_pop_shmedia_regs */
+
+#ifdef L_div_table
+#if __SH5__
+#if defined(__pic__) && defined(__SHMEDIA__)
+ .global GLOBAL(sdivsi3)
+ FUNC(GLOBAL(sdivsi3))
+#if __SH5__ == 32
+ .section .text..SHmedia32,"ax"
+#else
+ .text
+#endif
+#if 0
+/* ??? FIXME: Presumably due to a linker bug, exporting data symbols
+ in a text section does not work (at least for shared libraries):
+ the linker sets the LSB of the address as if this was SHmedia code. */
+#define TEXT_DATA_BUG
+#endif
+ .align 2
+ // inputs: r4,r5
+ // clobbered: r1,r18,r19,r20,r21,r25,tr0
+ // result in r0
+ .global GLOBAL(sdivsi3)
+GLOBAL(sdivsi3):
+#ifdef TEXT_DATA_BUG
+ ptb datalabel Local_div_table,tr0
+#else
+ ptb GLOBAL(div_table_internal),tr0
+#endif
+ nsb r5, r1
+ shlld r5, r1, r25 // normalize; [-2 ..1, 1..2) in s2.62
+ shari r25, 58, r21 // extract 5(6) bit index (s2.4 with hole -1..1)
+ /* bubble */
+ gettr tr0,r20
+ ldx.ub r20, r21, r19 // u0.8
+ shari r25, 32, r25 // normalize to s2.30
+ shlli r21, 1, r21
+ muls.l r25, r19, r19 // s2.38
+ ldx.w r20, r21, r21 // s2.14
+ ptabs r18, tr0
+ shari r19, 24, r19 // truncate to s2.14
+ sub r21, r19, r19 // some 11 bit inverse in s1.14
+ muls.l r19, r19, r21 // u0.28
+ sub r63, r1, r1
+ addi r1, 92, r1
+ muls.l r25, r21, r18 // s2.58
+ shlli r19, 45, r19 // multiply by two and convert to s2.58
+ /* bubble */
+ sub r19, r18, r18
+ shari r18, 28, r18 // some 22 bit inverse in s1.30
+ muls.l r18, r25, r0 // s2.60
+ muls.l r18, r4, r25 // s32.30
+ /* bubble */
+ shari r0, 16, r19 // s-16.44
+ muls.l r19, r18, r19 // s-16.74
+ shari r25, 63, r0
+ shari r4, 14, r18 // s19.-14
+ shari r19, 30, r19 // s-16.44
+ muls.l r19, r18, r19 // s15.30
+ xor r21, r0, r21 // You could also use the constant 1 << 27.
+ add r21, r25, r21
+ sub r21, r19, r21
+ shard r21, r1, r21
+ sub r21, r0, r0
+ blink tr0, r63
+ ENDFUNC(GLOBAL(sdivsi3))
+/* This table has been generated by divtab.c .
+Defects for bias -330:
+ Max defect: 6.081536e-07 at -1.000000e+00
+ Min defect: 2.849516e-08 at 1.030651e+00
+ Max 2nd step defect: 9.606539e-12 at -1.000000e+00
+ Min 2nd step defect: 0.000000e+00 at 0.000000e+00
+ Defect at 1: 1.238659e-07
+ Defect at -2: 1.061708e-07 */
+#else /* ! __pic__ || ! __SHMEDIA__ */
+ .section .rodata
+#endif /* __pic__ */
+#if defined(TEXT_DATA_BUG) && defined(__pic__) && defined(__SHMEDIA__)
+ .balign 2
+ .type Local_div_table,@object
+ .size Local_div_table,128
+/* negative division constants */
+ .word -16638
+ .word -17135
+ .word -17737
+ .word -18433
+ .word -19103
+ .word -19751
+ .word -20583
+ .word -21383
+ .word -22343
+ .word -23353
+ .word -24407
+ .word -25582
+ .word -26863
+ .word -28382
+ .word -29965
+ .word -31800
+/* negative division factors */
+ .byte 66
+ .byte 70
+ .byte 75
+ .byte 81
+ .byte 87
+ .byte 93
+ .byte 101
+ .byte 109
+ .byte 119
+ .byte 130
+ .byte 142
+ .byte 156
+ .byte 172
+ .byte 192
+ .byte 214
+ .byte 241
+ .skip 16
+Local_div_table:
+ .skip 16
+/* positive division factors */
+ .byte 241
+ .byte 214
+ .byte 192
+ .byte 172
+ .byte 156
+ .byte 142
+ .byte 130
+ .byte 119
+ .byte 109
+ .byte 101
+ .byte 93
+ .byte 87
+ .byte 81
+ .byte 75
+ .byte 70
+ .byte 66
+/* positive division constants */
+ .word 31801
+ .word 29966
+ .word 28383
+ .word 26864
+ .word 25583
+ .word 24408
+ .word 23354
+ .word 22344
+ .word 21384
+ .word 20584
+ .word 19752
+ .word 19104
+ .word 18434
+ .word 17738
+ .word 17136
+ .word 16639
+ .section .rodata
+#endif /* TEXT_DATA_BUG */
+ .balign 2
+ .type GLOBAL(div_table),@object
+ .size GLOBAL(div_table),128
+/* negative division constants */
+ .word -16638
+ .word -17135
+ .word -17737
+ .word -18433
+ .word -19103
+ .word -19751
+ .word -20583
+ .word -21383
+ .word -22343
+ .word -23353
+ .word -24407
+ .word -25582
+ .word -26863
+ .word -28382
+ .word -29965
+ .word -31800
+/* negative division factors */
+ .byte 66
+ .byte 70
+ .byte 75
+ .byte 81
+ .byte 87
+ .byte 93
+ .byte 101
+ .byte 109
+ .byte 119
+ .byte 130
+ .byte 142
+ .byte 156
+ .byte 172
+ .byte 192
+ .byte 214
+ .byte 241
+ .skip 16
+ .global GLOBAL(div_table)
+GLOBAL(div_table):
+ HIDDEN_ALIAS(div_table_internal,div_table)
+ .skip 16
+/* positive division factors */
+ .byte 241
+ .byte 214
+ .byte 192
+ .byte 172
+ .byte 156
+ .byte 142
+ .byte 130
+ .byte 119
+ .byte 109
+ .byte 101
+ .byte 93
+ .byte 87
+ .byte 81
+ .byte 75
+ .byte 70
+ .byte 66
+/* positive division constants */
+ .word 31801
+ .word 29966
+ .word 28383
+ .word 26864
+ .word 25583
+ .word 24408
+ .word 23354
+ .word 22344
+ .word 21384
+ .word 20584
+ .word 19752
+ .word 19104
+ .word 18434
+ .word 17738
+ .word 17136
+ .word 16639
+
+#elif defined (__SH3__) || defined (__SH3E__) || defined (__SH4__) || defined (__SH4_SINGLE__) || defined (__SH4_SINGLE_ONLY__) || defined (__SH4_NOFPU__)
+/* This code used shld, thus is not suitable for SH1 / SH2. */
+
+/* Signed / unsigned division without use of FPU, optimized for SH4.
+ Uses a lookup table for divisors in the range -128 .. +128, and
+ div1 with case distinction for larger divisors in three more ranges.
+ The code is lumped together with the table to allow the use of mova. */
+#ifdef __LITTLE_ENDIAN__
+#define L_LSB 0
+#define L_LSWMSB 1
+#define L_MSWLSB 2
+#else
+#define L_LSB 3
+#define L_LSWMSB 2
+#define L_MSWLSB 1
+#endif
+
+ .balign 4
+ .global GLOBAL(udivsi3_i4i)
+ FUNC(GLOBAL(udivsi3_i4i))
+GLOBAL(udivsi3_i4i):
+ mov.w LOCAL(c128_w), r1
+ div0u
+ mov r4,r0
+ shlr8 r0
+ cmp/hi r1,r5
+ extu.w r5,r1
+ bf LOCAL(udiv_le128)
+ cmp/eq r5,r1
+ bf LOCAL(udiv_ge64k)
+ shlr r0
+ mov r5,r1
+ shll16 r5
+ mov.l r4,@-r15
+ div1 r5,r0
+ mov.l r1,@-r15
+ div1 r5,r0
+ div1 r5,r0
+ bra LOCAL(udiv_25)
+ div1 r5,r0
+
+LOCAL(div_le128):
+ mova LOCAL(div_table_ix),r0
+ bra LOCAL(div_le128_2)
+ mov.b @(r0,r5),r1
+LOCAL(udiv_le128):
+ mov.l r4,@-r15
+ mova LOCAL(div_table_ix),r0
+ mov.b @(r0,r5),r1
+ mov.l r5,@-r15
+LOCAL(div_le128_2):
+ mova LOCAL(div_table_inv),r0
+ mov.l @(r0,r1),r1
+ mov r5,r0
+ tst #0xfe,r0
+ mova LOCAL(div_table_clz),r0
+ dmulu.l r1,r4
+ mov.b @(r0,r5),r1
+ bt/s LOCAL(div_by_1)
+ mov r4,r0
+ mov.l @r15+,r5
+ sts mach,r0
+ /* clrt */
+ addc r4,r0
+ mov.l @r15+,r4
+ rotcr r0
+ rts
+ shld r1,r0
+
+LOCAL(div_by_1_neg):
+ neg r4,r0
+LOCAL(div_by_1):
+ mov.l @r15+,r5
+ rts
+ mov.l @r15+,r4
+
+LOCAL(div_ge64k):
+ bt/s LOCAL(div_r8)
+ div0u
+ shll8 r5
+ bra LOCAL(div_ge64k_2)
+ div1 r5,r0
+LOCAL(udiv_ge64k):
+ cmp/hi r0,r5
+ mov r5,r1
+ bt LOCAL(udiv_r8)
+ shll8 r5
+ mov.l r4,@-r15
+ div1 r5,r0
+ mov.l r1,@-r15
+LOCAL(div_ge64k_2):
+ div1 r5,r0
+ mov.l LOCAL(zero_l),r1
+ .rept 4
+ div1 r5,r0
+ .endr
+ mov.l r1,@-r15
+ div1 r5,r0
+ mov.w LOCAL(m256_w),r1
+ div1 r5,r0
+ mov.b r0,@(L_LSWMSB,r15)
+ xor r4,r0
+ and r1,r0
+ bra LOCAL(div_ge64k_end)
+ xor r4,r0
+
+LOCAL(div_r8):
+ shll16 r4
+ bra LOCAL(div_r8_2)
+ shll8 r4
+LOCAL(udiv_r8):
+ mov.l r4,@-r15
+ shll16 r4
+ clrt
+ shll8 r4
+ mov.l r5,@-r15
+LOCAL(div_r8_2):
+ rotcl r4
+ mov r0,r1
+ div1 r5,r1
+ mov r4,r0
+ rotcl r0
+ mov r5,r4
+ div1 r5,r1
+ .rept 5
+ rotcl r0; div1 r5,r1
+ .endr
+ rotcl r0
+ mov.l @r15+,r5
+ div1 r4,r1
+ mov.l @r15+,r4
+ rts
+ rotcl r0
+
+ ENDFUNC(GLOBAL(udivsi3_i4i))
+
+ .global GLOBAL(sdivsi3_i4i)
+ FUNC(GLOBAL(sdivsi3_i4i))
+ /* This is link-compatible with a GLOBAL(sdivsi3) call,
+ but we effectively clobber only r1. */
+GLOBAL(sdivsi3_i4i):
+ mov.l r4,@-r15
+ cmp/pz r5
+ mov.w LOCAL(c128_w), r1
+ bt/s LOCAL(pos_divisor)
+ cmp/pz r4
+ mov.l r5,@-r15
+ neg r5,r5
+ bt/s LOCAL(neg_result)
+ cmp/hi r1,r5
+ neg r4,r4
+LOCAL(pos_result):
+ extu.w r5,r0
+ bf LOCAL(div_le128)
+ cmp/eq r5,r0
+ mov r4,r0
+ shlr8 r0
+ bf/s LOCAL(div_ge64k)
+ cmp/hi r0,r5
+ div0u
+ shll16 r5
+ div1 r5,r0
+ div1 r5,r0
+ div1 r5,r0
+LOCAL(udiv_25):
+ mov.l LOCAL(zero_l),r1
+ div1 r5,r0
+ div1 r5,r0
+ mov.l r1,@-r15
+ .rept 3
+ div1 r5,r0
+ .endr
+ mov.b r0,@(L_MSWLSB,r15)
+ xtrct r4,r0
+ swap.w r0,r0
+ .rept 8
+ div1 r5,r0
+ .endr
+ mov.b r0,@(L_LSWMSB,r15)
+LOCAL(div_ge64k_end):
+ .rept 8
+ div1 r5,r0
+ .endr
+ mov.l @r15+,r4 ! zero-extension and swap using LS unit.
+ extu.b r0,r0
+ mov.l @r15+,r5
+ or r4,r0
+ mov.l @r15+,r4
+ rts
+ rotcl r0
+
+LOCAL(div_le128_neg):
+ tst #0xfe,r0
+ mova LOCAL(div_table_ix),r0
+ mov.b @(r0,r5),r1
+ mova LOCAL(div_table_inv),r0
+ bt/s LOCAL(div_by_1_neg)
+ mov.l @(r0,r1),r1
+ mova LOCAL(div_table_clz),r0
+ dmulu.l r1,r4
+ mov.b @(r0,r5),r1
+ mov.l @r15+,r5
+ sts mach,r0
+ /* clrt */
+ addc r4,r0
+ mov.l @r15+,r4
+ rotcr r0
+ shld r1,r0
+ rts
+ neg r0,r0
+
+LOCAL(pos_divisor):
+ mov.l r5,@-r15
+ bt/s LOCAL(pos_result)
+ cmp/hi r1,r5
+ neg r4,r4
+LOCAL(neg_result):
+ extu.w r5,r0
+ bf LOCAL(div_le128_neg)
+ cmp/eq r5,r0
+ mov r4,r0
+ shlr8 r0
+ bf/s LOCAL(div_ge64k_neg)
+ cmp/hi r0,r5
+ div0u
+ mov.l LOCAL(zero_l),r1
+ shll16 r5
+ div1 r5,r0
+ mov.l r1,@-r15
+ .rept 7
+ div1 r5,r0
+ .endr
+ mov.b r0,@(L_MSWLSB,r15)
+ xtrct r4,r0
+ swap.w r0,r0
+ .rept 8
+ div1 r5,r0
+ .endr
+ mov.b r0,@(L_LSWMSB,r15)
+LOCAL(div_ge64k_neg_end):
+ .rept 8
+ div1 r5,r0
+ .endr
+ mov.l @r15+,r4 ! zero-extension and swap using LS unit.
+ extu.b r0,r1
+ mov.l @r15+,r5
+ or r4,r1
+LOCAL(div_r8_neg_end):
+ mov.l @r15+,r4
+ rotcl r1
+ rts
+ neg r1,r0
+
+LOCAL(div_ge64k_neg):
+ bt/s LOCAL(div_r8_neg)
+ div0u
+ shll8 r5
+ mov.l LOCAL(zero_l),r1
+ .rept 6
+ div1 r5,r0
+ .endr
+ mov.l r1,@-r15
+ div1 r5,r0
+ mov.w LOCAL(m256_w),r1
+ div1 r5,r0
+ mov.b r0,@(L_LSWMSB,r15)
+ xor r4,r0
+ and r1,r0
+ bra LOCAL(div_ge64k_neg_end)
+ xor r4,r0
+
+LOCAL(c128_w):
+ .word 128
+
+LOCAL(div_r8_neg):
+ clrt
+ shll16 r4
+ mov r4,r1
+ shll8 r1
+ mov r5,r4
+ .rept 7
+ rotcl r1; div1 r5,r0
+ .endr
+ mov.l @r15+,r5
+ rotcl r1
+ bra LOCAL(div_r8_neg_end)
+ div1 r4,r0
+
+LOCAL(m256_w):
+ .word 0xff00
+/* This table has been generated by divtab-sh4.c. */
+ .balign 4
+LOCAL(div_table_clz):
+ .byte 0
+ .byte 1
+ .byte 0
+ .byte -1
+ .byte -1
+ .byte -2
+ .byte -2
+ .byte -2
+ .byte -2
+ .byte -3
+ .byte -3
+ .byte -3
+ .byte -3
+ .byte -3
+ .byte -3
+ .byte -3
+ .byte -3
+ .byte -4
+ .byte -4
+ .byte -4
+ .byte -4
+ .byte -4
+ .byte -4
+ .byte -4
+ .byte -4
+ .byte -4
+ .byte -4
+ .byte -4
+ .byte -4
+ .byte -4
+ .byte -4
+ .byte -4
+ .byte -4
+ .byte -5
+ .byte -5
+ .byte -5
+ .byte -5
+ .byte -5
+ .byte -5
+ .byte -5
+ .byte -5
+ .byte -5
+ .byte -5
+ .byte -5
+ .byte -5
+ .byte -5
+ .byte -5
+ .byte -5
+ .byte -5
+ .byte -5
+ .byte -5
+ .byte -5
+ .byte -5
+ .byte -5
+ .byte -5
+ .byte -5
+ .byte -5
+ .byte -5
+ .byte -5
+ .byte -5
+ .byte -5
+ .byte -5
+ .byte -5
+ .byte -5
+ .byte -5
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+ .byte -6
+/* Lookup table translating positive divisor to index into table of
+ normalized inverse. N.B. the '0' entry is also the last entry of the
+ previous table, and causes an unaligned access for division by zero. */
+LOCAL(div_table_ix):
+ .byte -6
+ .byte -128
+ .byte -128
+ .byte 0
+ .byte -128
+ .byte -64
+ .byte 0
+ .byte 64
+ .byte -128
+ .byte -96
+ .byte -64
+ .byte -32
+ .byte 0
+ .byte 32
+ .byte 64
+ .byte 96
+ .byte -128
+ .byte -112
+ .byte -96
+ .byte -80
+ .byte -64
+ .byte -48
+ .byte -32
+ .byte -16
+ .byte 0
+ .byte 16
+ .byte 32
+ .byte 48
+ .byte 64
+ .byte 80
+ .byte 96
+ .byte 112
+ .byte -128
+ .byte -120
+ .byte -112
+ .byte -104
+ .byte -96
+ .byte -88
+ .byte -80
+ .byte -72
+ .byte -64
+ .byte -56
+ .byte -48
+ .byte -40
+ .byte -32
+ .byte -24
+ .byte -16
+ .byte -8
+ .byte 0
+ .byte 8
+ .byte 16
+ .byte 24
+ .byte 32
+ .byte 40
+ .byte 48
+ .byte 56
+ .byte 64
+ .byte 72
+ .byte 80
+ .byte 88
+ .byte 96
+ .byte 104
+ .byte 112
+ .byte 120
+ .byte -128
+ .byte -124
+ .byte -120
+ .byte -116
+ .byte -112
+ .byte -108
+ .byte -104
+ .byte -100
+ .byte -96
+ .byte -92
+ .byte -88
+ .byte -84
+ .byte -80
+ .byte -76
+ .byte -72
+ .byte -68
+ .byte -64
+ .byte -60
+ .byte -56
+ .byte -52
+ .byte -48
+ .byte -44
+ .byte -40
+ .byte -36
+ .byte -32
+ .byte -28
+ .byte -24
+ .byte -20
+ .byte -16
+ .byte -12
+ .byte -8
+ .byte -4
+ .byte 0
+ .byte 4
+ .byte 8
+ .byte 12
+ .byte 16
+ .byte 20
+ .byte 24
+ .byte 28
+ .byte 32
+ .byte 36
+ .byte 40
+ .byte 44
+ .byte 48
+ .byte 52
+ .byte 56
+ .byte 60
+ .byte 64
+ .byte 68
+ .byte 72
+ .byte 76
+ .byte 80
+ .byte 84
+ .byte 88
+ .byte 92
+ .byte 96
+ .byte 100
+ .byte 104
+ .byte 108
+ .byte 112
+ .byte 116
+ .byte 120
+ .byte 124
+ .byte -128
+/* 1/64 .. 1/127, normalized. There is an implicit leading 1 in bit 32. */
+ .balign 4
+LOCAL(zero_l):
+ .long 0x0
+ .long 0xF81F81F9
+ .long 0xF07C1F08
+ .long 0xE9131AC0
+ .long 0xE1E1E1E2
+ .long 0xDAE6076C
+ .long 0xD41D41D5
+ .long 0xCD856891
+ .long 0xC71C71C8
+ .long 0xC0E07039
+ .long 0xBACF914D
+ .long 0xB4E81B4F
+ .long 0xAF286BCB
+ .long 0xA98EF607
+ .long 0xA41A41A5
+ .long 0x9EC8E952
+ .long 0x9999999A
+ .long 0x948B0FCE
+ .long 0x8F9C18FA
+ .long 0x8ACB90F7
+ .long 0x86186187
+ .long 0x81818182
+ .long 0x7D05F418
+ .long 0x78A4C818
+ .long 0x745D1746
+ .long 0x702E05C1
+ .long 0x6C16C16D
+ .long 0x68168169
+ .long 0x642C8591
+ .long 0x60581606
+ .long 0x5C9882BA
+ .long 0x58ED2309
+LOCAL(div_table_inv):
+ .long 0x55555556
+ .long 0x51D07EAF
+ .long 0x4E5E0A73
+ .long 0x4AFD6A06
+ .long 0x47AE147B
+ .long 0x446F8657
+ .long 0x41414142
+ .long 0x3E22CBCF
+ .long 0x3B13B13C
+ .long 0x38138139
+ .long 0x3521CFB3
+ .long 0x323E34A3
+ .long 0x2F684BDB
+ .long 0x2C9FB4D9
+ .long 0x29E4129F
+ .long 0x27350B89
+ .long 0x24924925
+ .long 0x21FB7813
+ .long 0x1F7047DD
+ .long 0x1CF06ADB
+ .long 0x1A7B9612
+ .long 0x18118119
+ .long 0x15B1E5F8
+ .long 0x135C8114
+ .long 0x11111112
+ .long 0xECF56BF
+ .long 0xC9714FC
+ .long 0xA6810A7
+ .long 0x8421085
+ .long 0x624DD30
+ .long 0x4104105
+ .long 0x2040811
+ /* maximum error: 0.987342 scaled: 0.921875*/
+
+ ENDFUNC(GLOBAL(sdivsi3_i4i))
+#endif /* SH3 / SH4 */
+
+#endif /* L_div_table */
+
+#ifdef L_udiv_qrnnd_16
+#if !__SHMEDIA__
+ HIDDEN_FUNC(GLOBAL(udiv_qrnnd_16))
+ /* r0: rn r1: qn */ /* r0: n1 r4: n0 r5: d r6: d1 */ /* r2: __m */
+ /* n1 < d, but n1 might be larger than d1. */
+ .global GLOBAL(udiv_qrnnd_16)
+ .balign 8
+GLOBAL(udiv_qrnnd_16):
+ div0u
+ cmp/hi r6,r0
+ bt .Lots
+ .rept 16
+ div1 r6,r0
+ .endr
+ extu.w r0,r1
+ bt 0f
+ add r6,r0
+0: rotcl r1
+ mulu.w r1,r5
+ xtrct r4,r0
+ swap.w r0,r0
+ sts macl,r2
+ cmp/hs r2,r0
+ sub r2,r0
+ bt 0f
+ addc r5,r0
+ add #-1,r1
+ bt 0f
+1: add #-1,r1
+ rts
+ add r5,r0
+ .balign 8
+.Lots:
+ sub r5,r0
+ swap.w r4,r1
+ xtrct r0,r1
+ clrt
+ mov r1,r0
+ addc r5,r0
+ mov #-1,r1
+ SL1(bf, 1b,
+ shlr16 r1)
+0: rts
+ nop
+ ENDFUNC(GLOBAL(udiv_qrnnd_16))
+#endif /* !__SHMEDIA__ */
+#endif /* L_udiv_qrnnd_16 */
diff --git a/libgcc/config/sh/lib1funcs.h b/libgcc/config/sh/lib1funcs.h
new file mode 100644
index 00000000000..af4b41cc314
--- /dev/null
+++ b/libgcc/config/sh/lib1funcs.h
@@ -0,0 +1,76 @@
+/* Copyright (C) 1994, 1995, 1997, 1998, 1999, 2000, 2001, 2002, 2003,
+ 2004, 2005, 2006, 2009
+ Free Software Foundation, Inc.
+
+This file is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 3, or (at your option) any
+later version.
+
+This file is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+#ifdef __ELF__
+#define LOCAL(X) .L_##X
+#define FUNC(X) .type X,@function
+#define HIDDEN_FUNC(X) FUNC(X); .hidden X
+#define HIDDEN_ALIAS(X,Y) ALIAS (X,Y); .hidden GLOBAL(X)
+#define ENDFUNC0(X) .Lfe_##X: .size X,.Lfe_##X-X
+#define ENDFUNC(X) ENDFUNC0(X)
+#else
+#define LOCAL(X) L_##X
+#define FUNC(X)
+#define HIDDEN_FUNC(X)
+#define HIDDEN_ALIAS(X,Y) ALIAS (X,Y)
+#define ENDFUNC(X)
+#endif
+
+#define CONCAT(A,B) A##B
+#define GLOBAL0(U,X) CONCAT(U,__##X)
+#define GLOBAL(X) GLOBAL0(__USER_LABEL_PREFIX__,X)
+
+#define ALIAS(X,Y) .global GLOBAL(X); .set GLOBAL(X),GLOBAL(Y)
+
+#if defined __SH2A__ && defined __FMOVD_ENABLED__
+#undef FMOVD_WORKS
+#define FMOVD_WORKS
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define DR00 fr1
+#define DR01 fr0
+#define DR20 fr3
+#define DR21 fr2
+#define DR40 fr5
+#define DR41 fr4
+#else /* !__LITTLE_ENDIAN__ */
+#define DR00 fr0
+#define DR01 fr1
+#define DR20 fr2
+#define DR21 fr3
+#define DR40 fr4
+#define DR41 fr5
+#endif /* !__LITTLE_ENDIAN__ */
+
+#ifdef __sh1__
+#define SL(branch, dest, in_slot, in_slot_arg2) \
+ in_slot, in_slot_arg2; branch dest
+#define SL1(branch, dest, in_slot) \
+ in_slot; branch dest
+#else /* ! __sh1__ */
+#define SL(branch, dest, in_slot, in_slot_arg2) \
+ branch##.s dest; in_slot, in_slot_arg2
+#define SL1(branch, dest, in_slot) \
+ branch##/s dest; in_slot
+#endif /* !__sh1__ */
diff --git a/libgcc/config/sh/libgcc-excl.ver b/libgcc/config/sh/libgcc-excl.ver
new file mode 100644
index 00000000000..325c74054ec
--- /dev/null
+++ b/libgcc/config/sh/libgcc-excl.ver
@@ -0,0 +1,8 @@
+# Exclude various symbols which should not be visible in libgcc.so for SH.
+%exclude {
+ __ashlsi3
+ __ashrsi3
+ __lshrsi3
+ __mulsi3 # this is an SH1-only symbol.
+ __udivsi3
+}
diff --git a/libgcc/config/sh/libgcc-glibc.ver b/libgcc/config/sh/libgcc-glibc.ver
new file mode 100644
index 00000000000..b8ec3265310
--- /dev/null
+++ b/libgcc/config/sh/libgcc-glibc.ver
@@ -0,0 +1,48 @@
+# Copyright (C) 2002, 2008 Free Software Foundation, Inc.
+#
+# This file is part of GCC.
+#
+# GCC is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GCC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>.
+
+# In order to work around the very problems that force us to now generally
+# create a libgcc.so, glibc reexported a number of routines from libgcc.a.
+# By now choosing the same version tags for these specific routines, we
+# maintain enough binary compatibility to allow future versions of glibc
+# to defer implementation of these routines to libgcc.so via DT_AUXILIARY.
+
+# Note that we cannot use the default libgcc-glibc.ver file on sh,
+# because GLIBC_2.0 does not exist on this architecture, as the first
+# ever glibc release on the platform was GLIBC_2.2.
+
+%exclude {
+ __register_frame
+ __register_frame_table
+ __deregister_frame
+ __register_frame_info
+ __deregister_frame_info
+ __frame_state_for
+ __register_frame_info_table
+}
+
+%inherit GCC_3.0 GLIBC_2.2
+GLIBC_2.2 {
+ __register_frame
+ __register_frame_table
+ __deregister_frame
+ __register_frame_info
+ __deregister_frame_info
+ __frame_state_for
+ __register_frame_info_table
+}
diff --git a/libgcc/config/sh/linux-atomic.S b/libgcc/config/sh/linux-atomic.S
new file mode 100644
index 00000000000..743c61bb76c
--- /dev/null
+++ b/libgcc/config/sh/linux-atomic.S
@@ -0,0 +1,223 @@
+/* Copyright (C) 2006, 2008, 2009 Free Software Foundation, Inc.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3, or (at your option)
+ any later version.
+
+ GCC is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+
+!! Linux specific atomic routines for the Renesas / SuperH SH CPUs.
+!! Linux kernel for SH3/4 has implemented the support for software
+!! atomic sequences.
+
+#define FUNC(X) .type X,@function
+#define HIDDEN_FUNC(X) FUNC(X); .hidden X
+#define ENDFUNC0(X) .Lfe_##X: .size X,.Lfe_##X-X
+#define ENDFUNC(X) ENDFUNC0(X)
+
+#if ! __SH5__
+
+#define ATOMIC_TEST_AND_SET(N,T,EXT) \
+ .global __sync_lock_test_and_set_##N; \
+ HIDDEN_FUNC(__sync_lock_test_and_set_##N); \
+ .align 2; \
+__sync_lock_test_and_set_##N:; \
+ mova 1f, r0; \
+ nop; \
+ mov r15, r1; \
+ mov #(0f-1f), r15; \
+0: mov.##T @r4, r2; \
+ mov.##T r5, @r4; \
+1: mov r1, r15; \
+ rts; \
+ EXT r2, r0; \
+ ENDFUNC(__sync_lock_test_and_set_##N)
+
+ATOMIC_TEST_AND_SET (1,b,extu.b)
+ATOMIC_TEST_AND_SET (2,w,extu.w)
+ATOMIC_TEST_AND_SET (4,l,mov)
+
+#define ATOMIC_COMPARE_AND_SWAP(N,T,EXTS,EXT) \
+ .global __sync_val_compare_and_swap_##N; \
+ HIDDEN_FUNC(__sync_val_compare_and_swap_##N); \
+ .align 2; \
+__sync_val_compare_and_swap_##N:; \
+ mova 1f, r0; \
+ EXTS r5, r5; \
+ mov r15, r1; \
+ mov #(0f-1f), r15; \
+0: mov.##T @r4, r2; \
+ cmp/eq r2, r5; \
+ bf 1f; \
+ mov.##T r6, @r4; \
+1: mov r1, r15; \
+ rts; \
+ EXT r2, r0; \
+ ENDFUNC(__sync_val_compare_and_swap_##N)
+
+ATOMIC_COMPARE_AND_SWAP (1,b,exts.b,extu.b)
+ATOMIC_COMPARE_AND_SWAP (2,w,exts.w,extu.w)
+ATOMIC_COMPARE_AND_SWAP (4,l,mov,mov)
+
+#define ATOMIC_BOOL_COMPARE_AND_SWAP(N,T,EXTS) \
+ .global __sync_bool_compare_and_swap_##N; \
+ HIDDEN_FUNC(__sync_bool_compare_and_swap_##N); \
+ .align 2; \
+__sync_bool_compare_and_swap_##N:; \
+ mova 1f, r0; \
+ EXTS r5, r5; \
+ mov r15, r1; \
+ mov #(0f-1f), r15; \
+0: mov.##T @r4, r2; \
+ cmp/eq r2, r5; \
+ bf 1f; \
+ mov.##T r6, @r4; \
+1: mov r1, r15; \
+ rts; \
+ movt r0; \
+ ENDFUNC(__sync_bool_compare_and_swap_##N)
+
+ATOMIC_BOOL_COMPARE_AND_SWAP (1,b,exts.b)
+ATOMIC_BOOL_COMPARE_AND_SWAP (2,w,exts.w)
+ATOMIC_BOOL_COMPARE_AND_SWAP (4,l,mov)
+
+#define ATOMIC_FETCH_AND_OP(OP,N,T,EXT) \
+ .global __sync_fetch_and_##OP##_##N; \
+ HIDDEN_FUNC(__sync_fetch_and_##OP##_##N); \
+ .align 2; \
+__sync_fetch_and_##OP##_##N:; \
+ mova 1f, r0; \
+ nop; \
+ mov r15, r1; \
+ mov #(0f-1f), r15; \
+0: mov.##T @r4, r2; \
+ mov r5, r3; \
+ OP r2, r3; \
+ mov.##T r3, @r4; \
+1: mov r1, r15; \
+ rts; \
+ EXT r2, r0; \
+ ENDFUNC(__sync_fetch_and_##OP##_##N)
+
+ATOMIC_FETCH_AND_OP(add,1,b,extu.b)
+ATOMIC_FETCH_AND_OP(add,2,w,extu.w)
+ATOMIC_FETCH_AND_OP(add,4,l,mov)
+
+ATOMIC_FETCH_AND_OP(or,1,b,extu.b)
+ATOMIC_FETCH_AND_OP(or,2,w,extu.w)
+ATOMIC_FETCH_AND_OP(or,4,l,mov)
+
+ATOMIC_FETCH_AND_OP(and,1,b,extu.b)
+ATOMIC_FETCH_AND_OP(and,2,w,extu.w)
+ATOMIC_FETCH_AND_OP(and,4,l,mov)
+
+ATOMIC_FETCH_AND_OP(xor,1,b,extu.b)
+ATOMIC_FETCH_AND_OP(xor,2,w,extu.w)
+ATOMIC_FETCH_AND_OP(xor,4,l,mov)
+
+#define ATOMIC_FETCH_AND_COMBOP(OP,OP0,OP1,N,T,EXT) \
+ .global __sync_fetch_and_##OP##_##N; \
+ HIDDEN_FUNC(__sync_fetch_and_##OP##_##N); \
+ .align 2; \
+__sync_fetch_and_##OP##_##N:; \
+ mova 1f, r0; \
+ mov r15, r1; \
+ mov #(0f-1f), r15; \
+0: mov.##T @r4, r2; \
+ mov r5, r3; \
+ OP0 r2, r3; \
+ OP1 r3, r3; \
+ mov.##T r3, @r4; \
+1: mov r1, r15; \
+ rts; \
+ EXT r2, r0; \
+ ENDFUNC(__sync_fetch_and_##OP##_##N)
+
+ATOMIC_FETCH_AND_COMBOP(sub,sub,neg,1,b,extu.b)
+ATOMIC_FETCH_AND_COMBOP(sub,sub,neg,2,w,extu.w)
+ATOMIC_FETCH_AND_COMBOP(sub,sub,neg,4,l,mov)
+
+ATOMIC_FETCH_AND_COMBOP(nand,and,not,1,b,extu.b)
+ATOMIC_FETCH_AND_COMBOP(nand,and,not,2,w,extu.w)
+ATOMIC_FETCH_AND_COMBOP(nand,and,not,4,l,mov)
+
+#define ATOMIC_OP_AND_FETCH(OP,N,T,EXT) \
+ .global __sync_##OP##_and_fetch_##N; \
+ HIDDEN_FUNC(__sync_##OP##_and_fetch_##N); \
+ .align 2; \
+__sync_##OP##_and_fetch_##N:; \
+ mova 1f, r0; \
+ nop; \
+ mov r15, r1; \
+ mov #(0f-1f), r15; \
+0: mov.##T @r4, r2; \
+ mov r5, r3; \
+ OP r2, r3; \
+ mov.##T r3, @r4; \
+1: mov r1, r15; \
+ rts; \
+ EXT r3, r0; \
+ ENDFUNC(__sync_##OP##_and_fetch_##N)
+
+ATOMIC_OP_AND_FETCH(add,1,b,extu.b)
+ATOMIC_OP_AND_FETCH(add,2,w,extu.w)
+ATOMIC_OP_AND_FETCH(add,4,l,mov)
+
+ATOMIC_OP_AND_FETCH(or,1,b,extu.b)
+ATOMIC_OP_AND_FETCH(or,2,w,extu.w)
+ATOMIC_OP_AND_FETCH(or,4,l,mov)
+
+ATOMIC_OP_AND_FETCH(and,1,b,extu.b)
+ATOMIC_OP_AND_FETCH(and,2,w,extu.w)
+ATOMIC_OP_AND_FETCH(and,4,l,mov)
+
+ATOMIC_OP_AND_FETCH(xor,1,b,extu.b)
+ATOMIC_OP_AND_FETCH(xor,2,w,extu.w)
+ATOMIC_OP_AND_FETCH(xor,4,l,mov)
+
+#define ATOMIC_COMBOP_AND_FETCH(OP,OP0,OP1,N,T,EXT) \
+ .global __sync_##OP##_and_fetch_##N; \
+ HIDDEN_FUNC(__sync_##OP##_and_fetch_##N); \
+ .align 2; \
+__sync_##OP##_and_fetch_##N:; \
+ mova 1f, r0; \
+ mov r15, r1; \
+ mov #(0f-1f), r15; \
+0: mov.##T @r4, r2; \
+ mov r5, r3; \
+ OP0 r2, r3; \
+ OP1 r3, r3; \
+ mov.##T r3, @r4; \
+1: mov r1, r15; \
+ rts; \
+ EXT r3, r0; \
+ ENDFUNC(__sync_##OP##_and_fetch_##N)
+
+ATOMIC_COMBOP_AND_FETCH(sub,sub,neg,1,b,extu.b)
+ATOMIC_COMBOP_AND_FETCH(sub,sub,neg,2,w,extu.w)
+ATOMIC_COMBOP_AND_FETCH(sub,sub,neg,4,l,mov)
+
+ATOMIC_COMBOP_AND_FETCH(nand,and,not,1,b,extu.b)
+ATOMIC_COMBOP_AND_FETCH(nand,and,not,2,w,extu.w)
+ATOMIC_COMBOP_AND_FETCH(nand,and,not,4,l,mov)
+
+.section .note.GNU-stack,"",%progbits
+.previous
+
+#endif /* ! __SH5__ */
diff --git a/libgcc/config/sh/t-linux b/libgcc/config/sh/t-linux
index a98fb18e8d1..d0f92405fd8 100644
--- a/libgcc/config/sh/t-linux
+++ b/libgcc/config/sh/t-linux
@@ -1,4 +1,8 @@
-HOST_LIBGCC2_CFLAGS = -fpic -mieee -DNO_FPSCR_VALUES
+LIB1ASMFUNCS_CACHE = _ic_invalidate _ic_invalidate_array
+
+LIB2ADD = $(srcdir)/config/sh/linux-atomic.S
+
+HOST_LIBGCC2_CFLAGS += -mieee -DNO_FPSCR_VALUES
# Override t-slibgcc-elf-ver to export some libgcc symbols with
# the symbol versions that glibc used, and hide some lib1func
@@ -6,32 +10,5 @@ HOST_LIBGCC2_CFLAGS = -fpic -mieee -DNO_FPSCR_VALUES
# the list from scratch.
SHLIB_MAPFILES = \
libgcc-std.ver \
- $(gcc_srcdir)/config/sh/libgcc-excl.ver \
- $(gcc_srcdir)/config/sh/libgcc-glibc.ver
-
-# Override SHLIB_LINK and SHLIB_INSTALL to use linker script
-# libgcc_s.so.
-SHLIB_LINK = $(CC) $(LIBGCC2_CFLAGS) -shared -nodefaultlibs \
- -Wl,--soname=@shlib_base_name@.so.1 \
- -Wl,--version-script=@shlib_map_file@ \
- -o @multilib_dir@/@shlib_base_name@.so.1.tmp @multilib_flags@ \
- @shlib_objs@ -lc && \
- rm -f @multilib_dir@/@shlib_base_name@.so && \
- if [ -f @multilib_dir@/@shlib_base_name@.so.1 ]; then \
- mv -f @multilib_dir@/@shlib_base_name@.so.1 \
- @multilib_dir@/@shlib_base_name@.so.1.backup; \
- else true; fi && \
- mv @multilib_dir@/@shlib_base_name@.so.1.tmp \
- @multilib_dir@/@shlib_base_name@.so.1 && \
- (echo "/* GNU ld script"; \
- echo " Use the shared library, but some functions are only in"; \
- echo " the static library. */"; \
- echo "GROUP ( @shlib_base_name@.so.1 libgcc.a )" \
- ) > @multilib_dir@/@shlib_base_name@.so
-SHLIB_INSTALL = \
- $(mkinstalldirs) $(DESTDIR)$(slibdir)@shlib_slibdir_qual@; \
- $(INSTALL_DATA) @multilib_dir@/@shlib_base_name@.so.1 \
- $(DESTDIR)$(slibdir)@shlib_slibdir_qual@/@shlib_base_name@.so.1; \
- rm -f $(DESTDIR)$(slibdir)@shlib_slibdir_qual@/@shlib_base_name@.so; \
- $(INSTALL_DATA) @multilib_dir@/@shlib_base_name@.so \
- $(DESTDIR)$(slibdir)@shlib_slibdir_qual@/@shlib_base_name@.so
+ $(srcdir)/config/sh/libgcc-excl.ver \
+ $(srcdir)/config/sh/libgcc-glibc.ver
diff --git a/libgcc/config/sh/t-netbsd b/libgcc/config/sh/t-netbsd
new file mode 100644
index 00000000000..d4df407fa16
--- /dev/null
+++ b/libgcc/config/sh/t-netbsd
@@ -0,0 +1,5 @@
+LIB1ASMFUNCS_CACHE = _ic_invalidate
+
+LIB2ADD =
+
+HOST_LIBGCC2_CFLAGS += -mieee
diff --git a/libgcc/config/sh/t-sh b/libgcc/config/sh/t-sh
new file mode 100644
index 00000000000..efbaff8479e
--- /dev/null
+++ b/libgcc/config/sh/t-sh
@@ -0,0 +1,63 @@
+# Copyright (C) 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
+# 2003, 2004, 2006, 2008, 2009, 2011 Free Software Foundation, Inc.
+#
+# This file is part of GCC.
+#
+# GCC is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GCC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>.
+
+LIB1ASMSRC = sh/lib1funcs.S
+LIB1ASMFUNCS = _ashiftrt _ashiftrt_n _ashiftlt _lshiftrt _movmem \
+ _movmem_i4 _mulsi3 _sdivsi3 _sdivsi3_i4 _udivsi3 _udivsi3_i4 _set_fpscr \
+ _div_table _udiv_qrnnd_16 \
+ $(LIB1ASMFUNCS_CACHE)
+LIB1ASMFUNCS_CACHE = _ic_invalidate _ic_invalidate_array
+
+crt1.o: $(srcdir)/config/sh/crt1.S
+ $(gcc_compile) -c $<
+
+ic_invalidate_array_4-100.o: $(srcdir)/config/sh/lib1funcs.S
+ $(gcc_compile) -c -DL_ic_invalidate_array -DWAYS=1 -DWAY_SIZE=0x2000 $<
+libic_invalidate_array_4-100.a: ic_invalidate_array_4-100.o
+ $(AR_CREATE_FOR_TARGET) $@ $<
+
+ic_invalidate_array_4-200.o: $(srcdir)/config/sh/lib1funcs.S
+ $(gcc_compile) -c -DL_ic_invalidate_array -DWAYS=2 -DWAY_SIZE=0x2000 $<
+libic_invalidate_array_4-200.a: ic_invalidate_array_4-200.o
+ $(AR_CREATE_FOR_TARGET) $@ $<
+
+ic_invalidate_array_4a.o: $(srcdir)/config/sh/lib1funcs.S
+ $(gcc_compile) -c -DL_ic_invalidate_array -D__FORCE_SH4A__ $<
+libic_invalidate_array_4a.a: ic_invalidate_array_4a.o
+ $(AR_CREATE_FOR_TARGET) $@ $<
+
+sdivsi3_i4i-Os-4-200.o: $(srcdir)/config/sh/lib1funcs-Os-4-200.S
+ $(gcc_compile) -c -DL_sdivsi3_i4i $<
+udivsi3_i4i-Os-4-200.o: $(srcdir)/config/sh/lib1funcs-Os-4-200.S
+ $(gcc_compile) -c -DL_udivsi3_i4i $<
+unwind-dw2-Os-4-200.o: $(srcdir)/unwind-dw2.c
+ $(gcc_compile) $(LIBGCC2_CFLAGS) $(vis_hide) -fexceptions -Os -c $<
+
+OBJS_Os_4_200=sdivsi3_i4i-Os-4-200.o udivsi3_i4i-Os-4-200.o unwind-dw2-Os-4-200.o
+libgcc-Os-4-200.a: $(OBJS_Os_4_200)
+ $(AR_CREATE_FOR_TARGET) $@ $(OBJS_Os_4_200)
+
+div_table-4-300.o: $(srcdir)/config/sh/lib1funcs-4-300.S
+ $(gcc_compile) -c -DL_div_table $<
+
+libgcc-4-300.a: div_table-4-300.o
+ $(AR_CREATE_FOR_TARGET) $@ div_table-4-300.o
+
+HOST_LIBGCC2_CFLAGS += -mieee
+
diff --git a/libgcc/config/sh/t-sh64 b/libgcc/config/sh/t-sh64
new file mode 100644
index 00000000000..fa9950e03b2
--- /dev/null
+++ b/libgcc/config/sh/t-sh64
@@ -0,0 +1,6 @@
+LIB1ASMFUNCS = \
+ _sdivsi3 _sdivsi3_i4 _udivsi3 _udivsi3_i4 _set_fpscr \
+ _shcompact_call_trampoline _shcompact_return_trampoline \
+ _shcompact_incoming_args _ic_invalidate _nested_trampoline \
+ _push_pop_shmedia_regs \
+ _udivdi3 _divdi3 _umoddi3 _moddi3 _div_table
diff --git a/libgcc/config/sh/t-superh b/libgcc/config/sh/t-superh
new file mode 100644
index 00000000000..b87aa5a3167
--- /dev/null
+++ b/libgcc/config/sh/t-superh
@@ -0,0 +1,11 @@
+# Compile crt1-mmu.o as crt1.o with -DMMU_SUPPORT
+crt1-mmu.o: $(srcdir)/config/sh/crt1.S
+ $(gcc_compile) -c -DMMU_SUPPORT $<
+
+# Compile gcrt1-mmu.o as crt1-mmu.o with -DPROFILE
+gcrt1-mmu.o: $(srcdir)/config/sh/crt1.S
+ $(gcc_compile) -c -DPROFILE -DMMU_SUPPORT $<
+
+# For sh4-400: Compile gcrt1.o as crt1.o with -DPROFILE
+gcrt1.o: $(srcdir)/config/sh/crt1.S
+ $(gcc_compile) -c -DPROFILE $<
diff --git a/libgcc/config/sparc/sol2-ci.S b/libgcc/config/sparc/crti.S
index a89cc20c86f..a5c1da5e86b 100644
--- a/libgcc/config/sparc/sol2-ci.S
+++ b/libgcc/config/sparc/crti.S
@@ -1,4 +1,4 @@
-! crti.s for solaris 2.0.
+! crti.S for SPARC
! Copyright (C) 1992, 2008, 2009, 2011 Free Software Foundation, Inc.
! Written By David Vinayak Henkel-Wallace, June 1992
diff --git a/libgcc/config/sparc/sol2-cn.S b/libgcc/config/sparc/crtn.S
index 08862a78e02..1084628c6f6 100644
--- a/libgcc/config/sparc/sol2-cn.S
+++ b/libgcc/config/sparc/crtn.S
@@ -1,4 +1,4 @@
-! crtn.s for solaris 2.0.
+! crtn.S for SPARC
! Copyright (C) 1992, 2008, 2009, 2011 Free Software Foundation, Inc.
! Written By David Vinayak Henkel-Wallace, June 1992
diff --git a/libgcc/config/sparc/lb1spc.S b/libgcc/config/sparc/lb1spc.S
new file mode 100644
index 00000000000..b60bd5740e7
--- /dev/null
+++ b/libgcc/config/sparc/lb1spc.S
@@ -0,0 +1,784 @@
+/* This is an assembly language implementation of mulsi3, divsi3, and modsi3
+ for the sparc processor.
+
+ These routines are derived from the SPARC Architecture Manual, version 8,
+ slightly edited to match the desired calling convention, and also to
+ optimize them for our purposes. */
+
+#ifdef L_mulsi3
+.text
+ .align 4
+ .global .umul
+ .proc 4
+.umul:
+ or %o0, %o1, %o4 ! logical or of multiplier and multiplicand
+ mov %o0, %y ! multiplier to Y register
+ andncc %o4, 0xfff, %o5 ! mask out lower 12 bits
+ be mul_shortway ! can do it the short way
+ andcc %g0, %g0, %o4 ! zero the partial product and clear NV cc
+ !
+ ! long multiply
+ !
+ mulscc %o4, %o1, %o4 ! first iteration of 33
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4 ! 32nd iteration
+ mulscc %o4, %g0, %o4 ! last iteration only shifts
+ ! the upper 32 bits of product are wrong, but we do not care
+ retl
+ rd %y, %o0
+ !
+ ! short multiply
+ !
+mul_shortway:
+ mulscc %o4, %o1, %o4 ! first iteration of 13
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4 ! 12th iteration
+ mulscc %o4, %g0, %o4 ! last iteration only shifts
+ rd %y, %o5
+ sll %o4, 12, %o4 ! left shift partial product by 12 bits
+ srl %o5, 20, %o5 ! right shift partial product by 20 bits
+ retl
+ or %o5, %o4, %o0 ! merge for true product
+#endif
+
+#ifdef L_divsi3
+/*
+ * Division and remainder, from Appendix E of the SPARC Version 8
+ * Architecture Manual, with fixes from Gordon Irlam.
+ */
+
+/*
+ * Input: dividend and divisor in %o0 and %o1 respectively.
+ *
+ * m4 parameters:
+ * .div name of function to generate
+ * div div=div => %o0 / %o1; div=rem => %o0 % %o1
+ * true true=true => signed; true=false => unsigned
+ *
+ * Algorithm parameters:
+ * N how many bits per iteration we try to get (4)
+ * WORDSIZE total number of bits (32)
+ *
+ * Derived constants:
+ * TOPBITS number of bits in the top decade of a number
+ *
+ * Important variables:
+ * Q the partial quotient under development (initially 0)
+ * R the remainder so far, initially the dividend
+ * ITER number of main division loop iterations required;
+ * equal to ceil(log2(quotient) / N). Note that this
+ * is the log base (2^N) of the quotient.
+ * V the current comparand, initially divisor*2^(ITER*N-1)
+ *
+ * Cost:
+ * Current estimate for non-large dividend is
+ * ceil(log2(quotient) / N) * (10 + 7N/2) + C
+ * A large dividend is one greater than 2^(31-TOPBITS) and takes a
+ * different path, as the upper bits of the quotient must be developed
+ * one bit at a time.
+ */
+ .global .udiv
+ .align 4
+ .proc 4
+ .text
+.udiv:
+ b ready_to_divide
+ mov 0, %g3 ! result is always positive
+
+ .global .div
+ .align 4
+ .proc 4
+ .text
+.div:
+ ! compute sign of result; if neither is negative, no problem
+ orcc %o1, %o0, %g0 ! either negative?
+ bge ready_to_divide ! no, go do the divide
+ xor %o1, %o0, %g3 ! compute sign in any case
+ tst %o1
+ bge 1f
+ tst %o0
+ ! %o1 is definitely negative; %o0 might also be negative
+ bge ready_to_divide ! if %o0 not negative...
+ sub %g0, %o1, %o1 ! in any case, make %o1 nonneg
+1: ! %o0 is negative, %o1 is nonnegative
+ sub %g0, %o0, %o0 ! make %o0 nonnegative
+
+
+ready_to_divide:
+
+ ! Ready to divide. Compute size of quotient; scale comparand.
+ orcc %o1, %g0, %o5
+ bne 1f
+ mov %o0, %o3
+
+ ! Divide by zero trap. If it returns, return 0 (about as
+ ! wrong as possible, but that is what SunOS does...).
+ ta 0x2 ! ST_DIV0
+ retl
+ clr %o0
+
+1:
+ cmp %o3, %o5 ! if %o1 exceeds %o0, done
+ blu got_result ! (and algorithm fails otherwise)
+ clr %o2
+ sethi %hi(1 << (32 - 4 - 1)), %g1
+ cmp %o3, %g1
+ blu not_really_big
+ clr %o4
+
+ ! Here the dividend is >= 2**(31-N) or so. We must be careful here,
+ ! as our usual N-at-a-shot divide step will cause overflow and havoc.
+ ! The number of bits in the result here is N*ITER+SC, where SC <= N.
+ ! Compute ITER in an unorthodox manner: know we need to shift V into
+ ! the top decade: so do not even bother to compare to R.
+ 1:
+ cmp %o5, %g1
+ bgeu 3f
+ mov 1, %g2
+ sll %o5, 4, %o5
+ b 1b
+ add %o4, 1, %o4
+
+ ! Now compute %g2.
+ 2: addcc %o5, %o5, %o5
+ bcc not_too_big
+ add %g2, 1, %g2
+
+ ! We get here if the %o1 overflowed while shifting.
+ ! This means that %o3 has the high-order bit set.
+ ! Restore %o5 and subtract from %o3.
+ sll %g1, 4, %g1 ! high order bit
+ srl %o5, 1, %o5 ! rest of %o5
+ add %o5, %g1, %o5
+ b do_single_div
+ sub %g2, 1, %g2
+
+ not_too_big:
+ 3: cmp %o5, %o3
+ blu 2b
+ nop
+ be do_single_div
+ nop
+ /* NB: these are commented out in the V8-SPARC manual as well */
+ /* (I do not understand this) */
+ ! %o5 > %o3: went too far: back up 1 step
+ ! srl %o5, 1, %o5
+ ! dec %g2
+ ! do single-bit divide steps
+ !
+ ! We have to be careful here. We know that %o3 >= %o5, so we can do the
+ ! first divide step without thinking. BUT, the others are conditional,
+ ! and are only done if %o3 >= 0. Because both %o3 and %o5 may have the high-
+ ! order bit set in the first step, just falling into the regular
+ ! division loop will mess up the first time around.
+ ! So we unroll slightly...
+ do_single_div:
+ subcc %g2, 1, %g2
+ bl end_regular_divide
+ nop
+ sub %o3, %o5, %o3
+ mov 1, %o2
+ b end_single_divloop
+ nop
+ single_divloop:
+ sll %o2, 1, %o2
+ bl 1f
+ srl %o5, 1, %o5
+ ! %o3 >= 0
+ sub %o3, %o5, %o3
+ b 2f
+ add %o2, 1, %o2
+ 1: ! %o3 < 0
+ add %o3, %o5, %o3
+ sub %o2, 1, %o2
+ 2:
+ end_single_divloop:
+ subcc %g2, 1, %g2
+ bge single_divloop
+ tst %o3
+ b,a end_regular_divide
+
+not_really_big:
+1:
+ sll %o5, 4, %o5
+ cmp %o5, %o3
+ bleu 1b
+ addcc %o4, 1, %o4
+ be got_result
+ sub %o4, 1, %o4
+
+ tst %o3 ! set up for initial iteration
+divloop:
+ sll %o2, 4, %o2
+ ! depth 1, accumulated bits 0
+ bl L1.16
+ srl %o5,1,%o5
+ ! remainder is positive
+ subcc %o3,%o5,%o3
+ ! depth 2, accumulated bits 1
+ bl L2.17
+ srl %o5,1,%o5
+ ! remainder is positive
+ subcc %o3,%o5,%o3
+ ! depth 3, accumulated bits 3
+ bl L3.19
+ srl %o5,1,%o5
+ ! remainder is positive
+ subcc %o3,%o5,%o3
+ ! depth 4, accumulated bits 7
+ bl L4.23
+ srl %o5,1,%o5
+ ! remainder is positive
+ subcc %o3,%o5,%o3
+ b 9f
+ add %o2, (7*2+1), %o2
+
+L4.23:
+ ! remainder is negative
+ addcc %o3,%o5,%o3
+ b 9f
+ add %o2, (7*2-1), %o2
+
+
+L3.19:
+ ! remainder is negative
+ addcc %o3,%o5,%o3
+ ! depth 4, accumulated bits 5
+ bl L4.21
+ srl %o5,1,%o5
+ ! remainder is positive
+ subcc %o3,%o5,%o3
+ b 9f
+ add %o2, (5*2+1), %o2
+
+L4.21:
+ ! remainder is negative
+ addcc %o3,%o5,%o3
+ b 9f
+ add %o2, (5*2-1), %o2
+
+L2.17:
+ ! remainder is negative
+ addcc %o3,%o5,%o3
+ ! depth 3, accumulated bits 1
+ bl L3.17
+ srl %o5,1,%o5
+ ! remainder is positive
+ subcc %o3,%o5,%o3
+ ! depth 4, accumulated bits 3
+ bl L4.19
+ srl %o5,1,%o5
+ ! remainder is positive
+ subcc %o3,%o5,%o3
+ b 9f
+ add %o2, (3*2+1), %o2
+
+L4.19:
+ ! remainder is negative
+ addcc %o3,%o5,%o3
+ b 9f
+ add %o2, (3*2-1), %o2
+
+L3.17:
+ ! remainder is negative
+ addcc %o3,%o5,%o3
+ ! depth 4, accumulated bits 1
+ bl L4.17
+ srl %o5,1,%o5
+ ! remainder is positive
+ subcc %o3,%o5,%o3
+ b 9f
+ add %o2, (1*2+1), %o2
+
+L4.17:
+ ! remainder is negative
+ addcc %o3,%o5,%o3
+ b 9f
+ add %o2, (1*2-1), %o2
+
+L1.16:
+ ! remainder is negative
+ addcc %o3,%o5,%o3
+ ! depth 2, accumulated bits -1
+ bl L2.15
+ srl %o5,1,%o5
+ ! remainder is positive
+ subcc %o3,%o5,%o3
+ ! depth 3, accumulated bits -1
+ bl L3.15
+ srl %o5,1,%o5
+ ! remainder is positive
+ subcc %o3,%o5,%o3
+ ! depth 4, accumulated bits -1
+ bl L4.15
+ srl %o5,1,%o5
+ ! remainder is positive
+ subcc %o3,%o5,%o3
+ b 9f
+ add %o2, (-1*2+1), %o2
+
+L4.15:
+ ! remainder is negative
+ addcc %o3,%o5,%o3
+ b 9f
+ add %o2, (-1*2-1), %o2
+
+L3.15:
+ ! remainder is negative
+ addcc %o3,%o5,%o3
+ ! depth 4, accumulated bits -3
+ bl L4.13
+ srl %o5,1,%o5
+ ! remainder is positive
+ subcc %o3,%o5,%o3
+ b 9f
+ add %o2, (-3*2+1), %o2
+
+L4.13:
+ ! remainder is negative
+ addcc %o3,%o5,%o3
+ b 9f
+ add %o2, (-3*2-1), %o2
+
+L2.15:
+ ! remainder is negative
+ addcc %o3,%o5,%o3
+ ! depth 3, accumulated bits -3
+ bl L3.13
+ srl %o5,1,%o5
+ ! remainder is positive
+ subcc %o3,%o5,%o3
+ ! depth 4, accumulated bits -5
+ bl L4.11
+ srl %o5,1,%o5
+ ! remainder is positive
+ subcc %o3,%o5,%o3
+ b 9f
+ add %o2, (-5*2+1), %o2
+
+L4.11:
+ ! remainder is negative
+ addcc %o3,%o5,%o3
+ b 9f
+ add %o2, (-5*2-1), %o2
+
+L3.13:
+ ! remainder is negative
+ addcc %o3,%o5,%o3
+ ! depth 4, accumulated bits -7
+ bl L4.9
+ srl %o5,1,%o5
+ ! remainder is positive
+ subcc %o3,%o5,%o3
+ b 9f
+ add %o2, (-7*2+1), %o2
+
+L4.9:
+ ! remainder is negative
+ addcc %o3,%o5,%o3
+ b 9f
+ add %o2, (-7*2-1), %o2
+
+ 9:
+end_regular_divide:
+ subcc %o4, 1, %o4
+ bge divloop
+ tst %o3
+ bl,a got_result
+ ! non-restoring fixup here (one instruction only!)
+ sub %o2, 1, %o2
+
+
+got_result:
+ ! check to see if answer should be < 0
+ tst %g3
+ bl,a 1f
+ sub %g0, %o2, %o2
+1:
+ retl
+ mov %o2, %o0
+#endif
+
+#ifdef L_modsi3
+/* This implementation was taken from glibc:
+ *
+ * Input: dividend and divisor in %o0 and %o1 respectively.
+ *
+ * Algorithm parameters:
+ * N how many bits per iteration we try to get (4)
+ * WORDSIZE total number of bits (32)
+ *
+ * Derived constants:
+ * TOPBITS number of bits in the top decade of a number
+ *
+ * Important variables:
+ * Q the partial quotient under development (initially 0)
+ * R the remainder so far, initially the dividend
+ * ITER number of main division loop iterations required;
+ * equal to ceil(log2(quotient) / N). Note that this
+ * is the log base (2^N) of the quotient.
+ * V the current comparand, initially divisor*2^(ITER*N-1)
+ *
+ * Cost:
+ * Current estimate for non-large dividend is
+ * ceil(log2(quotient) / N) * (10 + 7N/2) + C
+ * A large dividend is one greater than 2^(31-TOPBITS) and takes a
+ * different path, as the upper bits of the quotient must be developed
+ * one bit at a time.
+ */
+.text
+ .align 4
+ .global .urem
+ .proc 4
+.urem:
+ b divide
+ mov 0, %g3 ! result always positive
+
+ .align 4
+ .global .rem
+ .proc 4
+.rem:
+ ! compute sign of result; if neither is negative, no problem
+ orcc %o1, %o0, %g0 ! either negative?
+ bge 2f ! no, go do the divide
+ mov %o0, %g3 ! sign of remainder matches %o0
+ tst %o1
+ bge 1f
+ tst %o0
+ ! %o1 is definitely negative; %o0 might also be negative
+ bge 2f ! if %o0 not negative...
+ sub %g0, %o1, %o1 ! in any case, make %o1 nonneg
+1: ! %o0 is negative, %o1 is nonnegative
+ sub %g0, %o0, %o0 ! make %o0 nonnegative
+2:
+
+ ! Ready to divide. Compute size of quotient; scale comparand.
+divide:
+ orcc %o1, %g0, %o5
+ bne 1f
+ mov %o0, %o3
+
+ ! Divide by zero trap. If it returns, return 0 (about as
+ ! wrong as possible, but that is what SunOS does...).
+ ta 0x2 !ST_DIV0
+ retl
+ clr %o0
+
+1:
+ cmp %o3, %o5 ! if %o1 exceeds %o0, done
+ blu got_result ! (and algorithm fails otherwise)
+ clr %o2
+ sethi %hi(1 << (32 - 4 - 1)), %g1
+ cmp %o3, %g1
+ blu not_really_big
+ clr %o4
+
+ ! Here the dividend is >= 2**(31-N) or so. We must be careful here,
+ ! as our usual N-at-a-shot divide step will cause overflow and havoc.
+ ! The number of bits in the result here is N*ITER+SC, where SC <= N.
+ ! Compute ITER in an unorthodox manner: know we need to shift V into
+ ! the top decade: so do not even bother to compare to R.
+ 1:
+ cmp %o5, %g1
+ bgeu 3f
+ mov 1, %g2
+ sll %o5, 4, %o5
+ b 1b
+ add %o4, 1, %o4
+
+ ! Now compute %g2.
+ 2: addcc %o5, %o5, %o5
+ bcc not_too_big
+ add %g2, 1, %g2
+
+ ! We get here if the %o1 overflowed while shifting.
+ ! This means that %o3 has the high-order bit set.
+ ! Restore %o5 and subtract from %o3.
+ sll %g1, 4, %g1 ! high order bit
+ srl %o5, 1, %o5 ! rest of %o5
+ add %o5, %g1, %o5
+ b do_single_div
+ sub %g2, 1, %g2
+
+ not_too_big:
+ 3: cmp %o5, %o3
+ blu 2b
+ nop
+ be do_single_div
+ nop
+ /* NB: these are commented out in the V8-SPARC manual as well */
+ /* (I do not understand this) */
+ ! %o5 > %o3: went too far: back up 1 step
+ ! srl %o5, 1, %o5
+ ! dec %g2
+ ! do single-bit divide steps
+ !
+ ! We have to be careful here. We know that %o3 >= %o5, so we can do the
+ ! first divide step without thinking. BUT, the others are conditional,
+ ! and are only done if %o3 >= 0. Because both %o3 and %o5 may have the high-
+ ! order bit set in the first step, just falling into the regular
+ ! division loop will mess up the first time around.
+ ! So we unroll slightly...
+ do_single_div:
+ subcc %g2, 1, %g2
+ bl end_regular_divide
+ nop
+ sub %o3, %o5, %o3
+ mov 1, %o2
+ b end_single_divloop
+ nop
+ single_divloop:
+ sll %o2, 1, %o2
+ bl 1f
+ srl %o5, 1, %o5
+ ! %o3 >= 0
+ sub %o3, %o5, %o3
+ b 2f
+ add %o2, 1, %o2
+ 1: ! %o3 < 0
+ add %o3, %o5, %o3
+ sub %o2, 1, %o2
+ 2:
+ end_single_divloop:
+ subcc %g2, 1, %g2
+ bge single_divloop
+ tst %o3
+ b,a end_regular_divide
+
+not_really_big:
+1:
+ sll %o5, 4, %o5
+ cmp %o5, %o3
+ bleu 1b
+ addcc %o4, 1, %o4
+ be got_result
+ sub %o4, 1, %o4
+
+ tst %o3 ! set up for initial iteration
+divloop:
+ sll %o2, 4, %o2
+ ! depth 1, accumulated bits 0
+ bl L1.16
+ srl %o5,1,%o5
+ ! remainder is positive
+ subcc %o3,%o5,%o3
+ ! depth 2, accumulated bits 1
+ bl L2.17
+ srl %o5,1,%o5
+ ! remainder is positive
+ subcc %o3,%o5,%o3
+ ! depth 3, accumulated bits 3
+ bl L3.19
+ srl %o5,1,%o5
+ ! remainder is positive
+ subcc %o3,%o5,%o3
+ ! depth 4, accumulated bits 7
+ bl L4.23
+ srl %o5,1,%o5
+ ! remainder is positive
+ subcc %o3,%o5,%o3
+ b 9f
+ add %o2, (7*2+1), %o2
+L4.23:
+ ! remainder is negative
+ addcc %o3,%o5,%o3
+ b 9f
+ add %o2, (7*2-1), %o2
+
+L3.19:
+ ! remainder is negative
+ addcc %o3,%o5,%o3
+ ! depth 4, accumulated bits 5
+ bl L4.21
+ srl %o5,1,%o5
+ ! remainder is positive
+ subcc %o3,%o5,%o3
+ b 9f
+ add %o2, (5*2+1), %o2
+
+L4.21:
+ ! remainder is negative
+ addcc %o3,%o5,%o3
+ b 9f
+ add %o2, (5*2-1), %o2
+
+L2.17:
+ ! remainder is negative
+ addcc %o3,%o5,%o3
+ ! depth 3, accumulated bits 1
+ bl L3.17
+ srl %o5,1,%o5
+ ! remainder is positive
+ subcc %o3,%o5,%o3
+ ! depth 4, accumulated bits 3
+ bl L4.19
+ srl %o5,1,%o5
+ ! remainder is positive
+ subcc %o3,%o5,%o3
+ b 9f
+ add %o2, (3*2+1), %o2
+
+L4.19:
+ ! remainder is negative
+ addcc %o3,%o5,%o3
+ b 9f
+ add %o2, (3*2-1), %o2
+
+L3.17:
+ ! remainder is negative
+ addcc %o3,%o5,%o3
+ ! depth 4, accumulated bits 1
+ bl L4.17
+ srl %o5,1,%o5
+ ! remainder is positive
+ subcc %o3,%o5,%o3
+ b 9f
+ add %o2, (1*2+1), %o2
+
+L4.17:
+ ! remainder is negative
+ addcc %o3,%o5,%o3
+ b 9f
+ add %o2, (1*2-1), %o2
+
+L1.16:
+ ! remainder is negative
+ addcc %o3,%o5,%o3
+ ! depth 2, accumulated bits -1
+ bl L2.15
+ srl %o5,1,%o5
+ ! remainder is positive
+ subcc %o3,%o5,%o3
+ ! depth 3, accumulated bits -1
+ bl L3.15
+ srl %o5,1,%o5
+ ! remainder is positive
+ subcc %o3,%o5,%o3
+ ! depth 4, accumulated bits -1
+ bl L4.15
+ srl %o5,1,%o5
+ ! remainder is positive
+ subcc %o3,%o5,%o3
+ b 9f
+ add %o2, (-1*2+1), %o2
+
+L4.15:
+ ! remainder is negative
+ addcc %o3,%o5,%o3
+ b 9f
+ add %o2, (-1*2-1), %o2
+
+L3.15:
+ ! remainder is negative
+ addcc %o3,%o5,%o3
+ ! depth 4, accumulated bits -3
+ bl L4.13
+ srl %o5,1,%o5
+ ! remainder is positive
+ subcc %o3,%o5,%o3
+ b 9f
+ add %o2, (-3*2+1), %o2
+
+L4.13:
+ ! remainder is negative
+ addcc %o3,%o5,%o3
+ b 9f
+ add %o2, (-3*2-1), %o2
+
+L2.15:
+ ! remainder is negative
+ addcc %o3,%o5,%o3
+ ! depth 3, accumulated bits -3
+ bl L3.13
+ srl %o5,1,%o5
+ ! remainder is positive
+ subcc %o3,%o5,%o3
+ ! depth 4, accumulated bits -5
+ bl L4.11
+ srl %o5,1,%o5
+ ! remainder is positive
+ subcc %o3,%o5,%o3
+ b 9f
+ add %o2, (-5*2+1), %o2
+
+L4.11:
+ ! remainder is negative
+ addcc %o3,%o5,%o3
+ b 9f
+ add %o2, (-5*2-1), %o2
+
+L3.13:
+ ! remainder is negative
+ addcc %o3,%o5,%o3
+ ! depth 4, accumulated bits -7
+ bl L4.9
+ srl %o5,1,%o5
+ ! remainder is positive
+ subcc %o3,%o5,%o3
+ b 9f
+ add %o2, (-7*2+1), %o2
+
+L4.9:
+ ! remainder is negative
+ addcc %o3,%o5,%o3
+ b 9f
+ add %o2, (-7*2-1), %o2
+
+ 9:
+end_regular_divide:
+ subcc %o4, 1, %o4
+ bge divloop
+ tst %o3
+ bl,a got_result
+ ! non-restoring fixup here (one instruction only!)
+ add %o3, %o1, %o3
+
+got_result:
+ ! check to see if answer should be < 0
+ tst %g3
+ bl,a 1f
+ sub %g0, %o3, %o3
+1:
+ retl
+ mov %o3, %o0
+
+#endif
+
diff --git a/libgcc/config/sparc/libgcc-glibc.ver b/libgcc/config/sparc/libgcc-glibc.ver
new file mode 100644
index 00000000000..91138d3795e
--- /dev/null
+++ b/libgcc/config/sparc/libgcc-glibc.ver
@@ -0,0 +1,93 @@
+# Copyright (C) 2002, 2006, 2008 Free Software Foundation, Inc.
+#
+# This file is part of GCC.
+#
+# GCC is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GCC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>.
+
+# In order to work around the very problems that force us to now generally
+# create a libgcc.so, glibc reexported a number of routines from libgcc.a.
+# By now choosing the same version tags for these specific routines, we
+# maintain enough binary compatibility to allow future versions of glibc
+# to defer implementation of these routines to libgcc.so via DT_AUXILIARY.
+
+%exclude {
+ __divdi3
+ __moddi3
+ __udivdi3
+ __umoddi3
+ __register_frame
+ __register_frame_table
+ __deregister_frame
+ __register_frame_info
+ __deregister_frame_info
+ __frame_state_for
+ __register_frame_info_table
+}
+
+%ifdef __arch64__
+%define GLIBC_VER GLIBC_2.2
+%else
+%define GLIBC_VER GLIBC_2.0
+%endif
+%inherit GCC_3.0 GLIBC_VER
+GLIBC_VER {
+ # Sampling of DImode arithmetic used by (at least) i386 and m68k.
+ __divdi3
+ __moddi3
+ __udivdi3
+ __umoddi3
+
+ # Exception handling support functions used by most everyone.
+ __register_frame
+ __register_frame_table
+ __deregister_frame
+ __register_frame_info
+ __deregister_frame_info
+ __frame_state_for
+ __register_frame_info_table
+}
+
+%if !defined (__arch64__) && defined (__LONG_DOUBLE_128__)
+
+# long double 128 bit support from 32-bit libgcc_s.so.1 is only available
+# when configured with --with-long-double-128. Make sure all the
+# symbols are available at @@GCC_LDBL_* versions to make it clear
+# there is a configurable symbol set.
+
+%exclude {
+ __fixtfdi
+ __fixunstfdi
+ __floatditf
+
+ __divtc3
+ __multc3
+ __powitf2
+}
+
+%inherit GCC_LDBL_3.0 GCC_3.0
+GCC_LDBL_3.0 {
+ __fixtfdi
+ __fixunstfdi
+ __floatditf
+}
+
+%inherit GCC_LDBL_4.0.0 GCC_4.0.0
+GCC_LDBL_4.0.0 {
+ __divtc3
+ __multc3
+ __powitf2
+}
+
+%endif
diff --git a/libgcc/config/sparc/t-linux b/libgcc/config/sparc/t-linux
new file mode 100644
index 00000000000..474f1755ea3
--- /dev/null
+++ b/libgcc/config/sparc/t-linux
@@ -0,0 +1,4 @@
+# Override t-slibgcc-elf-ver to export some libgcc symbols with
+# the symbol versions that glibc used.
+# Avoid the t-linux version file.
+SHLIB_MAPFILES = libgcc-std.ver $(srcdir)/config/sparc/libgcc-glibc.ver
diff --git a/libgcc/config/sparc/t-linux64 b/libgcc/config/sparc/t-linux64
new file mode 100644
index 00000000000..6583fe25ac7
--- /dev/null
+++ b/libgcc/config/sparc/t-linux64
@@ -0,0 +1 @@
+CRTSTUFF_T_CFLAGS = -mcmodel=medany
diff --git a/libgcc/config/sparc/t-softmul b/libgcc/config/sparc/t-softmul
index 49faae47c53..7142200600f 100644
--- a/libgcc/config/sparc/t-softmul
+++ b/libgcc/config/sparc/t-softmul
@@ -1,2 +1,2 @@
-LIB1ASMSRC = sparc/lb1spc.asm
+LIB1ASMSRC = sparc/lb1spc.S
LIB1ASMFUNCS = _mulsi3 _divsi3 _modsi3
diff --git a/libgcc/config/sparc/t-sol2 b/libgcc/config/sparc/t-sol2
index 372522bd0e4..ea3fa63a272 100644
--- a/libgcc/config/sparc/t-sol2
+++ b/libgcc/config/sparc/t-sol2
@@ -3,4 +3,4 @@
# to produce a shared library, but since we don't know ahead of time when
# we will be doing that, we just always use -fPIC when compiling the
# routines in crtstuff.c.
-CRTSTUFF_T_CFLAGS = -fPIC
+CRTSTUFF_T_CFLAGS = $(PICFLAG)
diff --git a/libgcc/config/spu/cache.S b/libgcc/config/spu/cache.S
new file mode 100644
index 00000000000..9ffb6a0d194
--- /dev/null
+++ b/libgcc/config/spu/cache.S
@@ -0,0 +1,43 @@
+/* Copyright (C) 2008, 2009 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+ .data
+ .p2align 7
+ .global __cache
+__cache:
+ .rept __CACHE_SIZE__ * 8
+ .fill 128
+ .endr
+
+ .p2align 7
+ .global __cache_tag_array
+__cache_tag_array:
+ .rept __CACHE_SIZE__ * 2
+ .long 1, 1, 1, 1
+ .fill 128-16
+ .endr
+__end_cache_tag_array:
+
+ .globl __cache_tag_array_size
+ .set __cache_tag_array_size, __end_cache_tag_array-__cache_tag_array
+
diff --git a/libgcc/config/spu/cachemgr.c b/libgcc/config/spu/cachemgr.c
new file mode 100644
index 00000000000..e7abd5e62db
--- /dev/null
+++ b/libgcc/config/spu/cachemgr.c
@@ -0,0 +1,438 @@
+/* Copyright (C) 2008, 2009 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+#include <spu_mfcio.h>
+#include <spu_internals.h>
+#include <spu_intrinsics.h>
+#include <spu_cache.h>
+
+extern unsigned long long __ea_local_store;
+extern char __cache_tag_array_size;
+
+#define LINE_SIZE 128
+#define TAG_MASK (LINE_SIZE - 1)
+
+#define WAYS 4
+#define SET_MASK ((int) &__cache_tag_array_size - LINE_SIZE)
+
+#define CACHE_LINES ((int) &__cache_tag_array_size / \
+ sizeof (struct __cache_tag_array) * WAYS)
+
+struct __cache_tag_array
+{
+ unsigned int tag_lo[WAYS];
+ unsigned int tag_hi[WAYS];
+ void *base[WAYS];
+ int reserved[WAYS];
+ vector unsigned short dirty_bits[WAYS];
+};
+
+extern struct __cache_tag_array __cache_tag_array[];
+extern char __cache[];
+
+/* In order to make the code seem a little cleaner, and to avoid having
+ 64/32 bit ifdefs all over the place, we use macros. */
+
+#ifdef __EA64__
+typedef unsigned long long addr;
+
+#define CHECK_TAG(_entry, _way, _tag) \
+ ((_entry)->tag_lo[(_way)] == ((_tag) & 0xFFFFFFFF) \
+ && (_entry)->tag_hi[(_way)] == ((_tag) >> 32))
+
+#define GET_TAG(_entry, _way) \
+ ((unsigned long long)(_entry)->tag_hi[(_way)] << 32 \
+ | (unsigned long long)(_entry)->tag_lo[(_way)])
+
+#define SET_TAG(_entry, _way, _tag) \
+ (_entry)->tag_lo[(_way)] = (_tag) & 0xFFFFFFFF; \
+ (_entry)->tag_hi[(_way)] = (_tag) >> 32
+
+#else /*__EA32__*/
+typedef unsigned long addr;
+
+#define CHECK_TAG(_entry, _way, _tag) \
+ ((_entry)->tag_lo[(_way)] == (_tag))
+
+#define GET_TAG(_entry, _way) \
+ ((_entry)->tag_lo[(_way)])
+
+#define SET_TAG(_entry, _way, _tag) \
+ (_entry)->tag_lo[(_way)] = (_tag)
+
+#endif
+
+/* In GET_ENTRY, we cast away the high 32 bits,
+ as the tag is only in the low 32. */
+
+#define GET_ENTRY(_addr) \
+ ((struct __cache_tag_array *) \
+ si_to_uint (si_a (si_and (si_from_uint ((unsigned int) (addr) (_addr)), \
+ si_from_uint (SET_MASK)), \
+ si_from_uint ((unsigned int) __cache_tag_array))))
+
+#define GET_CACHE_LINE(_addr, _way) \
+ ((void *) (__cache + ((_addr) & SET_MASK) * WAYS) + ((_way) * LINE_SIZE));
+
+#define CHECK_DIRTY(_vec) (si_to_uint (si_orx ((qword) (_vec))))
+#define SET_EMPTY(_entry, _way) ((_entry)->tag_lo[(_way)] = 1)
+#define CHECK_EMPTY(_entry, _way) ((_entry)->tag_lo[(_way)] == 1)
+
+#define LS_FLAG 0x80000000
+#define SET_IS_LS(_entry, _way) ((_entry)->reserved[(_way)] |= LS_FLAG)
+#define CHECK_IS_LS(_entry, _way) ((_entry)->reserved[(_way)] & LS_FLAG)
+#define GET_LRU(_entry, _way) ((_entry)->reserved[(_way)] & ~LS_FLAG)
+
+static int dma_tag = 32;
+
+static void
+__cache_evict_entry (struct __cache_tag_array *entry, int way)
+{
+ addr tag = GET_TAG (entry, way);
+
+ if (CHECK_DIRTY (entry->dirty_bits[way]) && !CHECK_IS_LS (entry, way))
+ {
+#ifdef NONATOMIC
+ /* Non-atomic writes. */
+ unsigned int oldmask, mach_stat;
+ char *line = ((void *) 0);
+
+ /* Enter critical section. */
+ mach_stat = spu_readch (SPU_RdMachStat);
+ spu_idisable ();
+
+ /* Issue DMA request. */
+ line = GET_CACHE_LINE (entry->tag_lo[way], way);
+ mfc_put (line, tag, LINE_SIZE, dma_tag, 0, 0);
+
+ /* Wait for DMA completion. */
+ oldmask = mfc_read_tag_mask ();
+ mfc_write_tag_mask (1 << dma_tag);
+ mfc_read_tag_status_all ();
+ mfc_write_tag_mask (oldmask);
+
+ /* Leave critical section. */
+ if (__builtin_expect (mach_stat & 1, 0))
+ spu_ienable ();
+#else
+ /* Allocate a buffer large enough that we know it has 128 bytes
+ that are 128 byte aligned (for DMA). */
+
+ char buffer[LINE_SIZE + 127];
+ qword *buf_ptr = (qword *) (((unsigned int) (buffer) + 127) & ~127);
+ qword *line = GET_CACHE_LINE (entry->tag_lo[way], way);
+ qword bits;
+ unsigned int mach_stat;
+
+ /* Enter critical section. */
+ mach_stat = spu_readch (SPU_RdMachStat);
+ spu_idisable ();
+
+ do
+ {
+ /* We atomically read the current memory into a buffer
+ modify the dirty bytes in the buffer, and write it
+ back. If writeback fails, loop and try again. */
+
+ mfc_getllar (buf_ptr, tag, 0, 0);
+ mfc_read_atomic_status ();
+
+ /* The method we're using to write 16 dirty bytes into
+ the buffer at a time uses fsmb which in turn uses
+ the least significant 16 bits of word 0, so we
+ load the bits and rotate so that the first bit of
+ the bitmap is in the first bit that fsmb will use. */
+
+ bits = (qword) entry->dirty_bits[way];
+ bits = si_rotqbyi (bits, -2);
+
+ /* Si_fsmb creates the mask of dirty bytes.
+ Use selb to nab the appropriate bits. */
+ buf_ptr[0] = si_selb (buf_ptr[0], line[0], si_fsmb (bits));
+
+ /* Rotate to next 16 byte section of cache. */
+ bits = si_rotqbyi (bits, 2);
+
+ buf_ptr[1] = si_selb (buf_ptr[1], line[1], si_fsmb (bits));
+ bits = si_rotqbyi (bits, 2);
+ buf_ptr[2] = si_selb (buf_ptr[2], line[2], si_fsmb (bits));
+ bits = si_rotqbyi (bits, 2);
+ buf_ptr[3] = si_selb (buf_ptr[3], line[3], si_fsmb (bits));
+ bits = si_rotqbyi (bits, 2);
+ buf_ptr[4] = si_selb (buf_ptr[4], line[4], si_fsmb (bits));
+ bits = si_rotqbyi (bits, 2);
+ buf_ptr[5] = si_selb (buf_ptr[5], line[5], si_fsmb (bits));
+ bits = si_rotqbyi (bits, 2);
+ buf_ptr[6] = si_selb (buf_ptr[6], line[6], si_fsmb (bits));
+ bits = si_rotqbyi (bits, 2);
+ buf_ptr[7] = si_selb (buf_ptr[7], line[7], si_fsmb (bits));
+ bits = si_rotqbyi (bits, 2);
+
+ mfc_putllc (buf_ptr, tag, 0, 0);
+ }
+ while (mfc_read_atomic_status ());
+
+ /* Leave critical section. */
+ if (__builtin_expect (mach_stat & 1, 0))
+ spu_ienable ();
+#endif
+ }
+
+ /* In any case, marking the lo tag with 1 which denotes empty. */
+ SET_EMPTY (entry, way);
+ entry->dirty_bits[way] = (vector unsigned short) si_from_uint (0);
+}
+
+void
+__cache_evict (__ea void *ea)
+{
+ addr tag = (addr) ea & ~TAG_MASK;
+ struct __cache_tag_array *entry = GET_ENTRY (ea);
+ int i = 0;
+
+ /* Cycles through all the possible ways an address could be at
+ and evicts the way if found. */
+
+ for (i = 0; i < WAYS; i++)
+ if (CHECK_TAG (entry, i, tag))
+ __cache_evict_entry (entry, i);
+}
+
+static void *
+__cache_fill (int way, addr tag)
+{
+ unsigned int oldmask, mach_stat;
+ char *line = ((void *) 0);
+
+ /* Reserve our DMA tag. */
+ if (dma_tag == 32)
+ dma_tag = mfc_tag_reserve ();
+
+ /* Enter critical section. */
+ mach_stat = spu_readch (SPU_RdMachStat);
+ spu_idisable ();
+
+ /* Issue DMA request. */
+ line = GET_CACHE_LINE (tag, way);
+ mfc_get (line, tag, LINE_SIZE, dma_tag, 0, 0);
+
+ /* Wait for DMA completion. */
+ oldmask = mfc_read_tag_mask ();
+ mfc_write_tag_mask (1 << dma_tag);
+ mfc_read_tag_status_all ();
+ mfc_write_tag_mask (oldmask);
+
+ /* Leave critical section. */
+ if (__builtin_expect (mach_stat & 1, 0))
+ spu_ienable ();
+
+ return (void *) line;
+}
+
+static void
+__cache_miss (__ea void *ea, struct __cache_tag_array *entry, int way)
+{
+
+ addr tag = (addr) ea & ~TAG_MASK;
+ unsigned int lru = 0;
+ int i = 0;
+ int idx = 0;
+
+ /* If way > 4, then there are no empty slots, so we must evict
+ the least recently used entry. */
+ if (way >= 4)
+ {
+ for (i = 0; i < WAYS; i++)
+ {
+ if (GET_LRU (entry, i) > lru)
+ {
+ lru = GET_LRU (entry, i);
+ idx = i;
+ }
+ }
+ __cache_evict_entry (entry, idx);
+ way = idx;
+ }
+
+ /* Set the empty entry's tag and fill it's cache line. */
+
+ SET_TAG (entry, way, tag);
+ entry->reserved[way] = 0;
+
+ /* Check if the address is just an effective address within the
+ SPU's local store. */
+
+ /* Because the LS is not 256k aligned, we can't do a nice and mask
+ here to compare, so we must check the whole range. */
+
+ if ((addr) ea >= (addr) __ea_local_store
+ && (addr) ea < (addr) (__ea_local_store + 0x40000))
+ {
+ SET_IS_LS (entry, way);
+ entry->base[way] =
+ (void *) ((unsigned int) ((addr) ea -
+ (addr) __ea_local_store) & ~0x7f);
+ }
+ else
+ {
+ entry->base[way] = __cache_fill (way, tag);
+ }
+}
+
+void *
+__cache_fetch_dirty (__ea void *ea, int n_bytes_dirty)
+{
+#ifdef __EA64__
+ unsigned int tag_hi;
+ qword etag_hi;
+#endif
+ unsigned int tag_lo;
+ struct __cache_tag_array *entry;
+
+ qword etag_lo;
+ qword equal;
+ qword bit_mask;
+ qword way;
+
+ /* This first chunk, we merely fill the pointer and tag. */
+
+ entry = GET_ENTRY (ea);
+
+#ifndef __EA64__
+ tag_lo =
+ si_to_uint (si_andc
+ (si_shufb
+ (si_from_uint ((addr) ea), si_from_uint (0),
+ si_from_uint (0x00010203)), si_from_uint (TAG_MASK)));
+#else
+ tag_lo =
+ si_to_uint (si_andc
+ (si_shufb
+ (si_from_ullong ((addr) ea), si_from_uint (0),
+ si_from_uint (0x04050607)), si_from_uint (TAG_MASK)));
+
+ tag_hi =
+ si_to_uint (si_shufb
+ (si_from_ullong ((addr) ea), si_from_uint (0),
+ si_from_uint (0x00010203)));
+#endif
+
+ /* Increment LRU in reserved bytes. */
+ si_stqd (si_ai (si_lqd (si_from_ptr (entry), 48), 1),
+ si_from_ptr (entry), 48);
+
+missreturn:
+ /* Check if the entry's lo_tag is equal to the address' lo_tag. */
+ etag_lo = si_lqd (si_from_ptr (entry), 0);
+ equal = si_ceq (etag_lo, si_from_uint (tag_lo));
+#ifdef __EA64__
+ /* And the high tag too. */
+ etag_hi = si_lqd (si_from_ptr (entry), 16);
+ equal = si_and (equal, (si_ceq (etag_hi, si_from_uint (tag_hi))));
+#endif
+
+ if ((si_to_uint (si_orx (equal)) == 0))
+ goto misshandler;
+
+ if (n_bytes_dirty)
+ {
+ /* way = 0x40,0x50,0x60,0x70 for each way, which is also the
+ offset of the appropriate dirty bits. */
+ way = si_shli (si_clz (si_gbb (equal)), 2);
+
+ /* To create the bit_mask, we set it to all 1s (uint -1), then we
+ shift it over (128 - n_bytes_dirty) times. */
+
+ bit_mask = si_from_uint (-1);
+
+ bit_mask =
+ si_shlqby (bit_mask, si_from_uint ((LINE_SIZE - n_bytes_dirty) / 8));
+
+ bit_mask =
+ si_shlqbi (bit_mask, si_from_uint ((LINE_SIZE - n_bytes_dirty) % 8));
+
+ /* Rotate it around to the correct offset. */
+ bit_mask =
+ si_rotqby (bit_mask,
+ si_from_uint (-1 * ((addr) ea & TAG_MASK) / 8));
+
+ bit_mask =
+ si_rotqbi (bit_mask,
+ si_from_uint (-1 * ((addr) ea & TAG_MASK) % 8));
+
+ /* Update the dirty bits. */
+ si_stqx (si_or (si_lqx (si_from_ptr (entry), way), bit_mask),
+ si_from_ptr (entry), way);
+ };
+
+ /* We've definitely found the right entry, set LRU (reserved) to 0
+ maintaining the LS flag (MSB). */
+
+ si_stqd (si_andc
+ (si_lqd (si_from_ptr (entry), 48),
+ si_and (equal, si_from_uint (~(LS_FLAG)))),
+ si_from_ptr (entry), 48);
+
+ return (void *)
+ si_to_uint (si_a
+ (si_orx
+ (si_and (si_lqd (si_from_ptr (entry), 32), equal)),
+ si_from_uint (((unsigned int) (addr) ea) & TAG_MASK)));
+
+misshandler:
+ equal = si_ceqi (etag_lo, 1);
+ __cache_miss (ea, entry, (si_to_uint (si_clz (si_gbb (equal))) - 16) >> 2);
+ goto missreturn;
+}
+
+void *
+__cache_fetch (__ea void *ea)
+{
+ return __cache_fetch_dirty (ea, 0);
+}
+
+void
+__cache_touch (__ea void *ea __attribute__ ((unused)))
+{
+ /* NO-OP for now. */
+}
+
+void __cache_flush (void) __attribute__ ((destructor));
+void
+__cache_flush (void)
+{
+ struct __cache_tag_array *entry = __cache_tag_array;
+ unsigned int i;
+ int j;
+
+ /* Cycle through each cache entry and evict all used ways. */
+
+ for (i = 0; i < CACHE_LINES / WAYS; i++)
+ {
+ for (j = 0; j < WAYS; j++)
+ if (!CHECK_EMPTY (entry, j))
+ __cache_evict_entry (entry, j);
+
+ entry++;
+ }
+}
diff --git a/libgcc/config/spu/divmodti4.c b/libgcc/config/spu/divmodti4.c
new file mode 100644
index 00000000000..c63fb6b393c
--- /dev/null
+++ b/libgcc/config/spu/divmodti4.c
@@ -0,0 +1,188 @@
+/* Copyright (C) 2008, 2009, 2011 Free Software Foundation, Inc.
+
+ This file is free software; you can redistribute it and/or modify it under
+ the terms of the GNU General Public License as published by the Free
+ Software Foundation; either version 3 of the License, or (at your option)
+ any later version.
+
+ This file is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ for more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include <spu_intrinsics.h>
+
+typedef unsigned int UTItype __attribute__ ((mode (TI)));
+typedef int TItype __attribute__ ((mode (TI)));
+TItype __divti3 (TItype u, TItype v);
+TItype __modti3 (TItype u, TItype v);
+UTItype __udivti3 (UTItype u, UTItype v);
+UTItype __umodti3 (UTItype u, UTItype v);
+UTItype __udivmodti4 (UTItype u, UTItype v, UTItype *w);
+
+union qword_UTItype
+ {
+ qword q;
+ UTItype t;
+ };
+
+inline static qword
+si_from_UTItype (UTItype t)
+{
+ union qword_UTItype u;
+ u.t = t;
+ return u.q;
+}
+
+inline static UTItype
+si_to_UTItype (qword q)
+{
+ union qword_UTItype u;
+ u.q = q;
+ return u.t;
+}
+
+inline static unsigned int
+count_leading_zeros (UTItype x)
+{
+ qword c = si_clz (*(qword *) & x);
+ qword cmp0 = si_cgti (c, 31);
+ qword cmp1 = si_and (cmp0, si_shlqbyi (cmp0, 4));
+ qword cmp2 = si_and (cmp1, si_shlqbyi (cmp0, 8));
+ qword s = si_a (c, si_and (cmp0, si_shlqbyi (c, 4)));
+ s = si_a (s, si_and (cmp1, si_shlqbyi (c, 8)));
+ s = si_a (s, si_and (cmp2, si_shlqbyi (c, 12)));
+ return si_to_uint (s);
+}
+
+/* Based on implementation of udivmodsi4, which is essentially
+ * an optimized version of libgcc/udivmodsi4.c
+ clz %7,%2
+ clz %4,%1
+ il %5,1
+ fsmbi %0,0
+ sf %7,%4,%7
+ ori %3,%1,0
+ shl %5,%5,%7
+ shl %4,%2,%7
+1: or %8,%0,%5
+ rotmi %5,%5,-1
+ clgt %6,%4,%3
+ sf %7,%4,%3
+ rotmi %4,%4,-1
+ selb %0,%8,%0,%6
+ selb %3,%7,%3,%6
+3: brnz %5,1b
+ */
+
+UTItype
+__udivmodti4 (UTItype num, UTItype den, UTItype * rp)
+{
+ qword shift =
+ si_from_uint (count_leading_zeros (den) - count_leading_zeros (num));
+ qword n0 = si_from_UTItype (num);
+ qword d0 = si_from_UTItype (den);
+ qword bit = si_andi (si_fsmbi (1), 1);
+ qword r0 = si_il (0);
+ qword m1 = si_fsmbi (0x000f);
+ qword mask, r1, n1;
+
+ d0 = si_shlqbybi (si_shlqbi (d0, shift), shift);
+ bit = si_shlqbybi (si_shlqbi (bit, shift), shift);
+
+ do
+ {
+ r1 = si_or (r0, bit);
+
+ // n1 = n0 - d0 in TImode
+ n1 = si_bg (d0, n0);
+ n1 = si_shlqbyi (n1, 4);
+ n1 = si_sf (m1, n1);
+ n1 = si_bgx (d0, n0, n1);
+ n1 = si_shlqbyi (n1, 4);
+ n1 = si_sf (m1, n1);
+ n1 = si_bgx (d0, n0, n1);
+ n1 = si_shlqbyi (n1, 4);
+ n1 = si_sf (m1, n1);
+ n1 = si_sfx (d0, n0, n1);
+
+ mask = si_fsm (si_cgti (n1, -1));
+ r0 = si_selb (r0, r1, mask);
+ n0 = si_selb (n0, n1, mask);
+ bit = si_rotqmbii (bit, -1);
+ d0 = si_rotqmbii (d0, -1);
+ }
+ while (si_to_uint (si_orx (bit)));
+ if (rp)
+ *rp = si_to_UTItype (n0);
+ return si_to_UTItype (r0);
+}
+
+UTItype
+__udivti3 (UTItype n, UTItype d)
+{
+ return __udivmodti4 (n, d, (UTItype *)0);
+}
+
+UTItype
+__umodti3 (UTItype n, UTItype d)
+{
+ UTItype w;
+ __udivmodti4 (n, d, &w);
+ return w;
+}
+
+TItype
+__divti3 (TItype n, TItype d)
+{
+ int c = 0;
+ TItype w;
+
+ if (n < 0)
+ {
+ c = ~c;
+ n = -n;
+ }
+ if (d < 0)
+ {
+ c = ~c;
+ d = -d;
+ }
+
+ w = __udivmodti4 (n, d, (UTItype *)0);
+ if (c)
+ w = -w;
+ return w;
+}
+
+TItype
+__modti3 (TItype n, TItype d)
+{
+ int c = 0;
+ TItype w;
+
+ if (n < 0)
+ {
+ c = ~c;
+ n = -n;
+ }
+ if (d < 0)
+ {
+ c = ~c;
+ d = -d;
+ }
+
+ __udivmodti4 (n, d, (UTItype *) &w);
+ if (c)
+ w = -w;
+ return w;
+}
diff --git a/libgcc/config/spu/divv2df3.c b/libgcc/config/spu/divv2df3.c
new file mode 100644
index 00000000000..9d5e1a594e1
--- /dev/null
+++ b/libgcc/config/spu/divv2df3.c
@@ -0,0 +1,195 @@
+/* Copyright (C) 2009 Free Software Foundation, Inc.
+
+ This file is free software; you can redistribute it and/or modify it under
+ the terms of the GNU General Public License as published by the Free
+ Software Foundation; either version 3 of the License, or (at your option)
+ any later version.
+
+ This file is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ for more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include <spu_intrinsics.h>
+
+vector double __divv2df3 (vector double a_in, vector double b_in);
+
+/* __divv2df3 divides the vector dividend a by the vector divisor b and
+ returns the resulting vector quotient. Maximum error about 0.5 ulp
+ over entire double range including denorms, compared to true result
+ in round-to-nearest rounding mode. Handles Inf or NaN operands and
+ results correctly. */
+
+vector double
+__divv2df3 (vector double a_in, vector double b_in)
+{
+ /* Variables */
+ vec_int4 exp, exp_bias;
+ vec_uint4 no_underflow, overflow;
+ vec_float4 mant_bf, inv_bf;
+ vec_ullong2 exp_a, exp_b;
+ vec_ullong2 a_nan, a_zero, a_inf, a_denorm, a_denorm0;
+ vec_ullong2 b_nan, b_zero, b_inf, b_denorm, b_denorm0;
+ vec_ullong2 nan;
+ vec_uint4 a_exp, b_exp;
+ vec_ullong2 a_mant_0, b_mant_0;
+ vec_ullong2 a_exp_1s, b_exp_1s;
+ vec_ullong2 sign_exp_mask;
+
+ vec_double2 a, b;
+ vec_double2 mant_a, mant_b, inv_b, q0, q1, q2, mult;
+
+ /* Constants */
+ vec_uint4 exp_mask_u32 = spu_splats((unsigned int)0x7FF00000);
+ vec_uchar16 splat_hi = (vec_uchar16){0,1,2,3, 0,1,2,3, 8, 9,10,11, 8,9,10,11};
+ vec_uchar16 swap_32 = (vec_uchar16){4,5,6,7, 0,1,2,3, 12,13,14,15, 8,9,10,11};
+ vec_ullong2 exp_mask = spu_splats(0x7FF0000000000000ULL);
+ vec_ullong2 sign_mask = spu_splats(0x8000000000000000ULL);
+ vec_float4 onef = spu_splats(1.0f);
+ vec_double2 one = spu_splats(1.0);
+ vec_double2 exp_53 = (vec_double2)spu_splats(0x0350000000000000ULL);
+
+ sign_exp_mask = spu_or(sign_mask, exp_mask);
+
+ /* Extract the floating point components from each of the operands including
+ * exponent and mantissa.
+ */
+ a_exp = (vec_uint4)spu_and((vec_uint4)a_in, exp_mask_u32);
+ a_exp = spu_shuffle(a_exp, a_exp, splat_hi);
+ b_exp = (vec_uint4)spu_and((vec_uint4)b_in, exp_mask_u32);
+ b_exp = spu_shuffle(b_exp, b_exp, splat_hi);
+
+ a_mant_0 = (vec_ullong2)spu_cmpeq((vec_uint4)spu_andc((vec_ullong2)a_in, sign_exp_mask), 0);
+ a_mant_0 = spu_and(a_mant_0, spu_shuffle(a_mant_0, a_mant_0, swap_32));
+
+ b_mant_0 = (vec_ullong2)spu_cmpeq((vec_uint4)spu_andc((vec_ullong2)b_in, sign_exp_mask), 0);
+ b_mant_0 = spu_and(b_mant_0, spu_shuffle(b_mant_0, b_mant_0, swap_32));
+
+ a_exp_1s = (vec_ullong2)spu_cmpeq(a_exp, exp_mask_u32);
+ b_exp_1s = (vec_ullong2)spu_cmpeq(b_exp, exp_mask_u32);
+
+ /* Identify all possible special values that must be accomodated including:
+ * +-denorm, +-0, +-infinity, and NaNs.
+ */
+ a_denorm0= (vec_ullong2)spu_cmpeq(a_exp, 0);
+ a_nan = spu_andc(a_exp_1s, a_mant_0);
+ a_zero = spu_and (a_denorm0, a_mant_0);
+ a_inf = spu_and (a_exp_1s, a_mant_0);
+ a_denorm = spu_andc(a_denorm0, a_zero);
+
+ b_denorm0= (vec_ullong2)spu_cmpeq(b_exp, 0);
+ b_nan = spu_andc(b_exp_1s, b_mant_0);
+ b_zero = spu_and (b_denorm0, b_mant_0);
+ b_inf = spu_and (b_exp_1s, b_mant_0);
+ b_denorm = spu_andc(b_denorm0, b_zero);
+
+ /* Scale denorm inputs to into normalized numbers by conditionally scaling the
+ * input parameters.
+ */
+ a = spu_sub(spu_or(a_in, exp_53), spu_sel(exp_53, a_in, sign_mask));
+ a = spu_sel(a_in, a, a_denorm);
+
+ b = spu_sub(spu_or(b_in, exp_53), spu_sel(exp_53, b_in, sign_mask));
+ b = spu_sel(b_in, b, b_denorm);
+
+ /* Extract the divisor and dividend exponent and force parameters into the signed
+ * range [1.0,2.0) or [-1.0,2.0).
+ */
+ exp_a = spu_and((vec_ullong2)a, exp_mask);
+ exp_b = spu_and((vec_ullong2)b, exp_mask);
+
+ mant_a = spu_sel(a, one, (vec_ullong2)exp_mask);
+ mant_b = spu_sel(b, one, (vec_ullong2)exp_mask);
+
+ /* Approximate the single reciprocal of b by using
+ * the single precision reciprocal estimate followed by one
+ * single precision iteration of Newton-Raphson.
+ */
+ mant_bf = spu_roundtf(mant_b);
+ inv_bf = spu_re(mant_bf);
+ inv_bf = spu_madd(spu_nmsub(mant_bf, inv_bf, onef), inv_bf, inv_bf);
+
+ /* Perform 2 more Newton-Raphson iterations in double precision. The
+ * result (q1) is in the range (0.5, 2.0).
+ */
+ inv_b = spu_extend(inv_bf);
+ inv_b = spu_madd(spu_nmsub(mant_b, inv_b, one), inv_b, inv_b);
+ q0 = spu_mul(mant_a, inv_b);
+ q1 = spu_madd(spu_nmsub(mant_b, q0, mant_a), inv_b, q0);
+
+ /* Determine the exponent correction factor that must be applied
+ * to q1 by taking into account the exponent of the normalized inputs
+ * and the scale factors that were applied to normalize them.
+ */
+ exp = spu_rlmaska(spu_sub((vec_int4)exp_a, (vec_int4)exp_b), -20);
+ exp = spu_add(exp, (vec_int4)spu_add(spu_and((vec_int4)a_denorm, -0x34), spu_and((vec_int4)b_denorm, 0x34)));
+
+ /* Bias the quotient exponent depending on the sign of the exponent correction
+ * factor so that a single multiplier will ensure the entire double precision
+ * domain (including denorms) can be achieved.
+ *
+ * exp bias q1 adjust exp
+ * ===== ======== ==========
+ * positive 2^+65 -65
+ * negative 2^-64 +64
+ */
+ exp_bias = spu_xor(spu_rlmaska(exp, -31), 64);
+ exp = spu_sub(exp, exp_bias);
+
+ q1 = spu_sel(q1, (vec_double2)spu_add((vec_int4)q1, spu_sl(exp_bias, 20)), exp_mask);
+
+ /* Compute a multiplier (mult) to applied to the quotient (q1) to produce the
+ * expected result. On overflow, clamp the multiplier to the maximum non-infinite
+ * number in case the rounding mode is not round-to-nearest.
+ */
+ exp = spu_add(exp, 0x3FF);
+ no_underflow = spu_cmpgt(exp, 0);
+ overflow = spu_cmpgt(exp, 0x7FE);
+ exp = spu_and(spu_sl(exp, 20), (vec_int4)no_underflow);
+ exp = spu_and(exp, (vec_int4)exp_mask);
+
+ mult = spu_sel((vec_double2)exp, (vec_double2)(spu_add((vec_uint4)exp_mask, -1)), (vec_ullong2)overflow);
+
+ /* Handle special value conditions. These include:
+ *
+ * 1) IF either operand is a NaN OR both operands are 0 or INFINITY THEN a NaN
+ * results.
+ * 2) ELSE IF the dividend is an INFINITY OR the divisor is 0 THEN a INFINITY results.
+ * 3) ELSE IF the dividend is 0 OR the divisor is INFINITY THEN a 0 results.
+ */
+ mult = spu_andc(mult, (vec_double2)spu_or(a_zero, b_inf));
+ mult = spu_sel(mult, (vec_double2)exp_mask, spu_or(a_inf, b_zero));
+
+ nan = spu_or(a_nan, b_nan);
+ nan = spu_or(nan, spu_and(a_zero, b_zero));
+ nan = spu_or(nan, spu_and(a_inf, b_inf));
+
+ mult = spu_or(mult, (vec_double2)nan);
+
+ /* Scale the final quotient */
+
+ q2 = spu_mul(q1, mult);
+
+ return (q2);
+}
+
+
+/* We use the same function for vector and scalar division. Provide the
+ scalar entry point as an alias. */
+double __divdf3 (double a, double b)
+ __attribute__ ((__alias__ ("__divv2df3")));
+
+/* Some toolchain builds used the __fast_divdf3 name for this helper function.
+ Provide this as another alternate entry point for compatibility. */
+double __fast_divdf3 (double a, double b)
+ __attribute__ ((__alias__ ("__divv2df3")));
+
diff --git a/libgcc/config/spu/float_disf.c b/libgcc/config/spu/float_disf.c
new file mode 100644
index 00000000000..0f4fe3d8e29
--- /dev/null
+++ b/libgcc/config/spu/float_disf.c
@@ -0,0 +1,31 @@
+/* Copyright (C) 2008, 2009 Free Software Foundation, Inc.
+
+ This file is free software; you can redistribute it and/or modify it under
+ the terms of the GNU General Public License as published by the Free
+ Software Foundation; either version 3 of the License, or (at your option)
+ any later version.
+
+ This file is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ for more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+/* Prototype. */
+float __floatdisf (long long x);
+
+float __floatdisf (long long x)
+{
+ /* The SPU back-end now generates inline code for this conversion.
+ This file is solely used to provide the __floatdisf functions
+ for objects generated with prior versions of GCC. */
+ return x;
+}
diff --git a/libgcc/config/spu/float_unsdidf.c b/libgcc/config/spu/float_unsdidf.c
new file mode 100644
index 00000000000..4fdf0b88a2b
--- /dev/null
+++ b/libgcc/config/spu/float_unsdidf.c
@@ -0,0 +1,54 @@
+/* Copyright (C) 2006, 2008, 2009 Free Software Foundation, Inc.
+
+ This file is free software; you can redistribute it and/or modify it under
+ the terms of the GNU General Public License as published by the Free
+ Software Foundation; either version 3 of the License, or (at your option)
+ any later version.
+
+ This file is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ for more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include <spu_intrinsics.h>
+const unsigned char __didf_scale[16] __attribute__ ((__aligned__ (16))) = {
+ 0x00, 0x00, 0x04, 0x3e,
+ 0x00, 0x00, 0x04, 0x1e,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00
+};
+const unsigned char __didf_pat[16] __attribute__ ((__aligned__ (16))) = {
+ 0x02, 0x03, 0x10, 0x11,
+ 0x12, 0x13, 0x80, 0x80,
+ 0x06, 0x07, 0x14, 0x15,
+ 0x16, 0x17, 0x80, 0x80
+};
+
+/* double __float_unsdidf (unsigned long long int)
+ Construct two exact doubles representing the high and low parts (in
+ parallel), then add them. */
+qword __float_unsdidf (qword DI);
+qword
+__float_unsdidf (qword DI)
+{
+ qword t0, t1, t2, t3, t4, t5, t6, t7, t8;
+ t0 = si_clz (DI);
+ t1 = si_shl (DI, t0);
+ t2 = si_ceqi (t0, 32);
+ t3 = si_sf (t0, *(const qword *) __didf_scale);
+ t4 = si_a (t1, t1);
+ t5 = si_andc (t3, t2);
+ t6 = si_shufb (t5, t4, *(const qword *) __didf_pat);
+ t7 = si_shlqbii (t6, 4);
+ t8 = si_shlqbyi (t7, 8);
+ return si_dfa (t7, t8);
+}
diff --git a/libgcc/config/spu/float_unsdisf.c b/libgcc/config/spu/float_unsdisf.c
new file mode 100644
index 00000000000..7af120ecc8c
--- /dev/null
+++ b/libgcc/config/spu/float_unsdisf.c
@@ -0,0 +1,31 @@
+/* Copyright (C) 2008, 2009 Free Software Foundation, Inc.
+
+ This file is free software; you can redistribute it and/or modify it under
+ the terms of the GNU General Public License as published by the Free
+ Software Foundation; either version 3 of the License, or (at your option)
+ any later version.
+
+ This file is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ for more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+/* Prototype. */
+float __floatundisf (unsigned long long x);
+
+float __floatundisf (unsigned long long x)
+{
+ /* The SPU back-end now generates inline code for this conversion.
+ This file is solely used to provide the __floatundisf function
+ for objects generated with prior versions of GCC. */
+ return x;
+}
diff --git a/libgcc/config/spu/float_unssidf.c b/libgcc/config/spu/float_unssidf.c
new file mode 100644
index 00000000000..b255f81af55
--- /dev/null
+++ b/libgcc/config/spu/float_unssidf.c
@@ -0,0 +1,45 @@
+/* Copyright (C) 2006, 2008, 2009 Free Software Foundation, Inc.
+
+ This file is free software; you can redistribute it and/or modify it under
+ the terms of the GNU General Public License as published by the Free
+ Software Foundation; either version 3 of the License, or (at your option)
+ any later version.
+
+ This file is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ for more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include <spu_intrinsics.h>
+const unsigned char __sidf_pat[16] __attribute__ ((__aligned__ (16))) = {
+ 0x02, 0x03, 0x10, 0x11,
+ 0x12, 0x13, 0x80, 0x80,
+ 0x06, 0x07, 0x14, 0x15,
+ 0x16, 0x17, 0x80, 0x80
+};
+
+/* double __float_unssidf (unsigned int SI) */
+qword __float_unssidf (qword SI);
+qword
+__float_unssidf (qword SI)
+{
+ qword t0, t1, t2, t3, t4, t5, t6, t7;
+ t0 = si_clz (SI);
+ t1 = si_il (1054);
+ t2 = si_shl (SI, t0);
+ t3 = si_ceqi (t0, 32);
+ t4 = si_sf (t0, t1);
+ t5 = si_a (t2, t2);
+ t6 = si_andc (t4, t3);
+ t7 = si_shufb (t6, t5, *(const qword *) __sidf_pat);
+ return si_shlqbii (t7, 4);
+}
diff --git a/libgcc/config/spu/mfc_multi_tag_release.c b/libgcc/config/spu/mfc_multi_tag_release.c
new file mode 100644
index 00000000000..62eb2beeb8f
--- /dev/null
+++ b/libgcc/config/spu/mfc_multi_tag_release.c
@@ -0,0 +1,72 @@
+/* Copyright (C) 2007, 2009 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+#include <spu_mfcio.h>
+extern vector unsigned int __mfc_tag_table;
+
+/* Release a sequential group of tags from exclusive use. The sequential
+ group of tags is the range starting from <first_tag> through
+ <first_tag>+<number_of_tags>-1. Upon sucessful release, MFC_DMA_TAG_VALID
+ is returned and the tags become available for future reservation.
+
+ If the specified tags were not previously reserved, no action is
+ taken and MFC_DMA_TAG_INVALID is returned. */
+
+unsigned int
+__mfc_multi_tag_release (unsigned int first_tag, unsigned int number_of_tags)
+{
+ vector unsigned int table_copy, tmp, tmp1;
+ vector unsigned int one = (vector unsigned int)
+ { 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF };
+ vector unsigned int is_invalid;
+ unsigned int last_tag;
+ vector unsigned int has_been_reserved;
+
+ last_tag = first_tag + number_of_tags;
+
+ table_copy = spu_sl (one, number_of_tags);
+ table_copy = spu_rl (table_copy, -last_tag);
+ table_copy = spu_xor (table_copy, -1);
+
+ /* Make sure the tags are in range and valid. */
+ tmp = spu_cmpgt (spu_promote(last_tag, 0), 32);
+ tmp1 = spu_cmpgt (spu_promote(number_of_tags, 0), 32);
+ is_invalid = spu_cmpgt (spu_promote(first_tag, 0), 31);
+
+ /* All bits are set to 1 if invalid, 0 if valid. */
+ is_invalid = spu_or (tmp, is_invalid);
+ is_invalid = spu_or (tmp1, is_invalid);
+
+ /* check whether these tags have been reserved */
+ tmp = spu_rlmask (one, (int)-number_of_tags);
+ tmp1 = spu_sl (__mfc_tag_table, first_tag);
+ has_been_reserved = spu_cmpgt(tmp1, tmp);
+
+ is_invalid = spu_or (has_been_reserved, is_invalid);
+
+ table_copy = spu_sel (__mfc_tag_table, table_copy, table_copy);
+ __mfc_tag_table = spu_sel (table_copy, __mfc_tag_table, is_invalid);
+
+ return spu_extract (is_invalid, 0);
+}
+
diff --git a/libgcc/config/spu/mfc_multi_tag_reserve.c b/libgcc/config/spu/mfc_multi_tag_reserve.c
new file mode 100644
index 00000000000..06d70259276
--- /dev/null
+++ b/libgcc/config/spu/mfc_multi_tag_reserve.c
@@ -0,0 +1,84 @@
+/* Copyright (C) 2007, 2009 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+#include <spu_mfcio.h>
+extern vector unsigned int __mfc_tag_table;
+
+/* Reserve a sequential group of tags for exclusive use. The number of
+ tags to be reserved is specified by the <number_of_tags> parameter.
+ This routine returns the first tag ID for a sequential list of
+ available tags and marks them as reserved. The reserved group
+ of tags is in the range starting from the returned tag through
+ the returned tag + <number_of_tags>-1.
+
+ If the number of tags requested exceeds the number of available
+ sequential tags, then MFC_DMA_TAG_INVALID is returned indicating
+ that the request could not be serviced. */
+
+unsigned int
+__mfc_multi_tag_reserve (unsigned int number_of_tags)
+{
+ vector unsigned int table_copy;
+ vector unsigned int one = (vector unsigned int)
+ { 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF };
+ vector unsigned int count_busy, is_valid;
+ vector unsigned int count_total;
+ vector unsigned int count_avail = (vector unsigned int) { 0, 0, 0, 0 };
+ vector unsigned int index = (vector unsigned int) { 0, 0, 0, 0 };
+
+ table_copy = __mfc_tag_table;
+
+
+ /* count_busy: number of consecutive busy tags
+ count_avail: number of consecutive free tags
+ table_copy: temporary copy of the tag table
+ count_total: sum of count_busy and count_avail
+ index: index of the current working tag */
+ do
+ {
+ table_copy = spu_sl (table_copy, count_avail);
+
+ count_busy = spu_cntlz (table_copy);
+ table_copy = spu_sl (table_copy, count_busy);
+ count_avail = spu_cntlz (spu_xor(table_copy, -1));
+ count_total = spu_add (count_busy, count_avail);
+ index = spu_add (index, count_total);
+ }
+ while (spu_extract (count_avail, 0) < number_of_tags
+ && spu_extract (table_copy, 0) != 0);
+
+ index = spu_sub (index, count_avail);
+
+ /* is_valid is set to 0xFFFFFFFF if table_copy == 0, 0 otherwise. */
+ is_valid = spu_cmpeq (table_copy, 0);
+ index = spu_sel (index, is_valid, is_valid);
+
+ /* Now I need to actually mark the tags as used. */
+ table_copy = spu_sl (one, number_of_tags);
+ table_copy = spu_rl (table_copy, -number_of_tags - spu_extract (index, 0));
+ table_copy = spu_sel (table_copy, __mfc_tag_table, table_copy);
+ __mfc_tag_table = spu_sel (table_copy, __mfc_tag_table, is_valid);
+
+ return spu_extract (index, 0);
+}
+
diff --git a/libgcc/config/spu/mfc_tag_release.c b/libgcc/config/spu/mfc_tag_release.c
new file mode 100644
index 00000000000..d59c5713053
--- /dev/null
+++ b/libgcc/config/spu/mfc_tag_release.c
@@ -0,0 +1,59 @@
+/* Copyright (C) 2007, 2009 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+#include <spu_mfcio.h>
+extern vector unsigned int __mfc_tag_table;
+
+/* Release the specified DMA tag from exclusive use. Once released, the
+ tag is available for future reservation. Upon sucessful release,
+ MFC_DMA_TAG_VALID is returned. If the specified tag is not in the
+ range 0 to 31, or had not been reserved, no action is taken and
+ MFC_DMA_TAG_INVALID is returned. */
+
+unsigned int
+__mfc_tag_release (unsigned int tag)
+{
+ vector unsigned int is_invalid;
+ vector unsigned int mask = (vector unsigned int)
+ { 0x80000000, 0x80000000, 0x80000000, 0x80000000 };
+ vector signed int zero = (vector signed int) { 0, 0, 0, 0 };
+
+ vector signed int has_been_reserved;
+
+ /* Check if the tag is out of range. */
+ is_invalid = spu_cmpgt (spu_promote (tag, 0), 31);
+
+ /* Check whether the tag has been reserved, set to all 1 if has not
+ been reserved, 0 otherwise. */
+ has_been_reserved = (vector signed int) spu_rl (__mfc_tag_table, tag);
+ has_been_reserved = (vector signed int) spu_cmpgt (zero, has_been_reserved);
+
+ /* Set invalid. */
+ is_invalid = spu_or ((vector unsigned int) has_been_reserved, is_invalid);
+
+ mask = spu_rlmask (mask, (int)(-tag));
+ __mfc_tag_table = spu_or (__mfc_tag_table, mask);
+
+ return spu_extract(is_invalid, 0);
+}
+
diff --git a/libgcc/config/spu/mfc_tag_reserve.c b/libgcc/config/spu/mfc_tag_reserve.c
new file mode 100644
index 00000000000..23b4817c74f
--- /dev/null
+++ b/libgcc/config/spu/mfc_tag_reserve.c
@@ -0,0 +1,51 @@
+/* Copyright (C) 2007, 2009 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+#include <spu_mfcio.h>
+extern vector unsigned int __mfc_tag_table;
+
+/* Reserves a DMA tag for exclusive use. This routine returns an available
+ tag id in the range 0 to 31 and marks the tag as reserved. If no tags
+ are available, MFC_DMA_TAG_INVALID is returned indicating that all tags
+ are already reserved. */
+
+unsigned int
+__mfc_tag_reserve (void)
+{
+ vector unsigned int mask = (vector unsigned int)
+ { 0x80000000, 0x80000000, 0x80000000, 0x80000000 };
+ vector unsigned int count_zeros, is_valid;
+ vector signed int count_neg;
+
+ count_zeros = spu_cntlz (__mfc_tag_table);
+ count_neg = spu_sub (0, (vector signed int) count_zeros);
+
+ mask = spu_rlmask (mask, (vector signed int) count_neg);
+ __mfc_tag_table = spu_andc (__mfc_tag_table, mask);
+
+ is_valid = spu_cmpeq (count_zeros, 32);
+ count_zeros = spu_sel (count_zeros, is_valid, is_valid);
+
+ return spu_extract (count_zeros, 0);
+}
+
diff --git a/libgcc/config/spu/mfc_tag_table.c b/libgcc/config/spu/mfc_tag_table.c
new file mode 100644
index 00000000000..bd08c580c18
--- /dev/null
+++ b/libgcc/config/spu/mfc_tag_table.c
@@ -0,0 +1,39 @@
+/* Copyright (C) 2007, 2009 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+/* The free tag table used by the MFC tag manager, with tag0
+ reserved for the overlay manager. */
+__vector unsigned int
+__mfc_tag_table = (__vector unsigned int) { 0x7FFFFFFF, -1, -1, -1 };
+
+/* Arrange to release tag0 if overlays are not present. */
+static void __mfc_tag_init (void) __attribute__ ((constructor));
+
+static void
+__mfc_tag_init (void)
+{
+ extern void _ovly_table __attribute__ ((weak));
+
+ if (&_ovly_table == 0)
+ __mfc_tag_table = (__vector unsigned int) { -1, -1, -1, -1 };
+}
diff --git a/libgcc/config/spu/multi3.c b/libgcc/config/spu/multi3.c
new file mode 100644
index 00000000000..b8b0e90ee25
--- /dev/null
+++ b/libgcc/config/spu/multi3.c
@@ -0,0 +1,119 @@
+/* Copyright (C) 2008, 2009 Free Software Foundation, Inc.
+
+ This file is free software; you can redistribute it and/or modify it under
+ the terms of the GNU General Public License as published by the Free
+ Software Foundation; either version 3 of the License, or (at your option)
+ any later version.
+
+ This file is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ for more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include <spu_intrinsics.h>
+
+typedef int TItype __attribute__ ((mode (TI)));
+
+union qword_TItype
+ {
+ qword q;
+ TItype t;
+ };
+
+inline static qword
+si_from_TItype (TItype t)
+{
+ union qword_TItype u;
+ u.t = t;
+ return u.q;
+}
+
+inline static TItype
+si_to_TItype (qword q)
+{
+ union qword_TItype u;
+ u.q = q;
+ return u.t;
+}
+
+/* A straight forward vectorization and unrolling of
+ * short l[8], r[8];
+ * TItype total = 0;
+ * for (i = 0; i < 8; i++)
+ * for (j = 0; j < 8; j++)
+ * total += (TItype)((l[7-i] * r[7-j]) << (16 * (i + j)));
+ */
+TItype
+__multi3 (TItype l, TItype r)
+{
+ qword u = si_from_TItype (l);
+ qword v = si_from_TItype (r);
+ qword splat0 = si_shufb (v, v, si_ilh (0x0001));
+ qword splat1 = si_shufb (v, v, si_ilh (0x0203));
+ qword splat2 = si_shufb (v, v, si_ilh (0x0405));
+ qword splat3 = si_shufb (v, v, si_ilh (0x0607));
+ qword splat4 = si_shufb (v, v, si_ilh (0x0809));
+ qword splat5 = si_shufb (v, v, si_ilh (0x0a0b));
+ qword splat6 = si_shufb (v, v, si_ilh (0x0c0d));
+ qword splat7 = si_shufb (v, v, si_ilh (0x0e0f));
+
+ qword part0l = si_shlqbyi (si_mpyu (u, splat0), 14);
+ qword part1h = si_shlqbyi (si_mpyhhu (u, splat1), 14);
+ qword part1l = si_shlqbyi (si_mpyu (u, splat1), 12);
+ qword part2h = si_shlqbyi (si_mpyhhu (u, splat2), 12);
+ qword part2l = si_shlqbyi (si_mpyu (u, splat2), 10);
+ qword part3h = si_shlqbyi (si_mpyhhu (u, splat3), 10);
+ qword part3l = si_shlqbyi (si_mpyu (u, splat3), 8);
+ qword part4h = si_shlqbyi (si_mpyhhu (u, splat4), 8);
+ qword part4l = si_shlqbyi (si_mpyu (u, splat4), 6);
+ qword part5h = si_shlqbyi (si_mpyhhu (u, splat5), 6);
+ qword part5l = si_shlqbyi (si_mpyu (u, splat5), 4);
+ qword part6h = si_shlqbyi (si_mpyhhu (u, splat6), 4);
+ qword part6l = si_shlqbyi (si_mpyu (u, splat6), 2);
+ qword part7h = si_shlqbyi (si_mpyhhu (u, splat7), 2);
+ qword part7l = si_mpyu (u, splat7);
+
+ qword carry, total0, total1, total2, total3, total4;
+ qword total5, total6, total7, total8, total9, total10;
+ qword total;
+
+ total0 = si_a (si_a (si_a (part0l, part1h), si_a (part1l, part2h)), part7l);
+ total1 = si_a (part2l, part3h);
+ total2 = si_a (part3l, part4h);
+ total3 = si_a (part4l, part5h);
+ total4 = si_a (part5l, part6h);
+ total5 = si_a (part6l, part7h);
+ total6 = si_a (total0, total1);
+ total7 = si_a (total2, total3);
+ total8 = si_a (total4, total5);
+ total9 = si_a (total6, total7);
+ total10 = si_a (total8, total9);
+
+ carry = si_cg (part2l, part3h);
+ carry = si_a (carry, si_cg (part3l, part4h));
+ carry = si_a (carry, si_cg (part4l, part5h));
+ carry = si_a (carry, si_cg (part5l, part6h));
+ carry = si_a (carry, si_cg (part6l, part7h));
+ carry = si_a (carry, si_cg (total0, total1));
+ carry = si_a (carry, si_cg (total2, total3));
+ carry = si_a (carry, si_cg (total4, total5));
+ carry = si_a (carry, si_cg (total6, total7));
+ carry = si_a (carry, si_cg (total8, total9));
+ carry = si_shlqbyi (carry, 4);
+
+ total = si_cg (total10, carry);
+ total = si_shlqbyi (total, 4);
+ total = si_cgx (total10, carry, total);
+ total = si_shlqbyi (total, 4);
+ total = si_addx (total10, carry, total);
+ return si_to_TItype (total);
+}
diff --git a/libgcc/config/spu/t-elf b/libgcc/config/spu/t-elf
index 7094da526bb..83616c1ca7d 100644
--- a/libgcc/config/spu/t-elf
+++ b/libgcc/config/spu/t-elf
@@ -1,3 +1,59 @@
+# Don't let CTOR_LIST end up in sdata section.
+# FIXME: This is the default.
+CRTSTUFF_T_CFLAGS =
+
+# We exclude those because the libgcc2.c default versions do not support
+# the SPU single-precision format (round towards zero). We provide our
+# own versions below and/or via direct expansion.
+LIB2ADD = _floatdisf _floatundisf _floattisf _floatunstisf
+
+LIB2ADD_ST = $(srcdir)/config/spu/float_unssidf.c \
+ $(srcdir)/config/spu/float_unsdidf.c \
+ $(srcdir)/config/spu/float_unsdisf.c \
+ $(srcdir)/config/spu/float_disf.c \
+ $(srcdir)/config/spu/mfc_tag_table.c \
+ $(srcdir)/config/spu/mfc_tag_reserve.c \
+ $(srcdir)/config/spu/mfc_tag_release.c \
+ $(srcdir)/config/spu/mfc_multi_tag_reserve.c \
+ $(srcdir)/config/spu/mfc_multi_tag_release.c \
+ $(srcdir)/config/spu/multi3.c \
+ $(srcdir)/config/spu/divmodti4.c \
+ $(srcdir)/config/spu/divv2df3.c
+
+# Build TImode conversion routines to support Fortran 128-bit
+# integer data types.
+LIB2_SIDITI_CONV_FUNCS = yes
+
+HOST_LIBGCC2_CFLAGS += -mwarn-reloc -D__IN_LIBGCC2
+
+# Neither gcc or newlib seem to have a standard way to generate multiple
+# crt*.o files. So we don't use the standard crt0.o name anymore.
+
+cachemgr.o: $(srcdir)/config/spu/cachemgr.c
+ $(gcc_compile) -c $<
+
+# Specialised rule to add a -D flag.
+cachemgr_nonatomic.o: $(srcdir)/config/spu/cachemgr.c
+ $(gcc_compile) -DNONATOMIC -c $<
+
+libgcc_%.a: %.o
+ $(AR_FOR_TARGET) -rcs $@ $<
+
+cache8k.o: $(srcdir)/config/spu/cache.S
+ $(gcc_compile) -D__CACHE_SIZE__=8 -c $<
+
+cache16k.o: $(srcdir)/config/spu/cache.S
+ $(gcc_compile) -D__CACHE_SIZE__=16 -c $<
+
+cache32k.o: $(srcdir)/config/spu/cache.S
+ $(gcc_compile) -D__CACHE_SIZE__=32 -c $<
+
+cache64k.o: $(srcdir)/config/spu/cache.S
+ $(gcc_compile) -D__CACHE_SIZE__=64 -c $<
+
+cache128k.o: $(srcdir)/config/spu/cache.S
+ $(gcc_compile) -D__CACHE_SIZE__=128 -c $<
+
# We provide our own version of __divdf3 that performs better and has
# better support for non-default rounding modes.
DPBIT_FUNCS := $(filter-out _div_df, $(DPBIT_FUNCS))
diff --git a/libgcc/config/stormy16/ashlsi3.c b/libgcc/config/stormy16/ashlsi3.c
new file mode 100644
index 00000000000..0ef42ad66d3
--- /dev/null
+++ b/libgcc/config/stormy16/ashlsi3.c
@@ -0,0 +1,2 @@
+#define XSTORMY16_ASHLSI3
+#include "lib2funcs.c"
diff --git a/libgcc/config/stormy16/ashrsi3.c b/libgcc/config/stormy16/ashrsi3.c
new file mode 100644
index 00000000000..67bcbbbe05b
--- /dev/null
+++ b/libgcc/config/stormy16/ashrsi3.c
@@ -0,0 +1,2 @@
+#define XSTORMY16_ASHRSI3
+#include "lib2funcs.c"
diff --git a/libgcc/config/stormy16/clzhi2.c b/libgcc/config/stormy16/clzhi2.c
new file mode 100644
index 00000000000..350ef41daaf
--- /dev/null
+++ b/libgcc/config/stormy16/clzhi2.c
@@ -0,0 +1,2 @@
+#define XSTORMY16_CLZHI2
+#include "lib2funcs.c"
diff --git a/libgcc/config/stormy16/cmpsi2.c b/libgcc/config/stormy16/cmpsi2.c
new file mode 100644
index 00000000000..fe32fda95cb
--- /dev/null
+++ b/libgcc/config/stormy16/cmpsi2.c
@@ -0,0 +1,2 @@
+#define XSTORMY16_CMPSI2
+#include "lib2funcs.c"
diff --git a/libgcc/config/stormy16/ctzhi2.c b/libgcc/config/stormy16/ctzhi2.c
new file mode 100644
index 00000000000..98ab76dcd69
--- /dev/null
+++ b/libgcc/config/stormy16/ctzhi2.c
@@ -0,0 +1,2 @@
+#define XSTORMY16_CTZHI2
+#include "lib2funcs.c"
diff --git a/libgcc/config/stormy16/divsi3.c b/libgcc/config/stormy16/divsi3.c
new file mode 100644
index 00000000000..0fa75342f4a
--- /dev/null
+++ b/libgcc/config/stormy16/divsi3.c
@@ -0,0 +1,2 @@
+#define XSTORMY16_DIVSI3
+#include "lib2funcs.c"
diff --git a/libgcc/config/stormy16/ffshi2.c b/libgcc/config/stormy16/ffshi2.c
new file mode 100644
index 00000000000..a36dec87eb9
--- /dev/null
+++ b/libgcc/config/stormy16/ffshi2.c
@@ -0,0 +1,2 @@
+#define XSTORMY16_FFSHI2
+#include "lib2funcs.c"
diff --git a/libgcc/config/stormy16/lib2funcs.c b/libgcc/config/stormy16/lib2funcs.c
new file mode 100644
index 00000000000..a10a9b28119
--- /dev/null
+++ b/libgcc/config/stormy16/lib2funcs.c
@@ -0,0 +1,358 @@
+/* This file contains 16-bit versions of some of the functions found in
+ libgcc2.c. Really libgcc ought to be moved out of the gcc directory
+ and into its own top level directory, and then split up into multiple
+ files. On this glorious day maybe this code can be integrated into
+ it too. */
+
+/* Copyright (C) 2005, 2008, 2009, 2010, 2011 Free Software Foundation, Inc.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it under
+ the terms of the GNU General Public License as published by the Free
+ Software Foundation; either version 3, or (at your option) any later
+ version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+ WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ for more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include "tconfig.h"
+#include "tsystem.h"
+#include "coretypes.h"
+#include "tm.h"
+#include "libgcc_tm.h"
+
+#ifdef HAVE_GAS_HIDDEN
+#define ATTRIBUTE_HIDDEN __attribute__ ((__visibility__ ("hidden")))
+#else
+#define ATTRIBUTE_HIDDEN
+#endif
+
+#ifndef MIN_UNITS_PER_WORD
+#define MIN_UNITS_PER_WORD UNITS_PER_WORD
+#endif
+
+#ifndef LIBGCC2_UNITS_PER_WORD
+# if MIN_UNITS_PER_WORD > 4
+# define LIBGCC2_UNITS_PER_WORD 8
+# elif (MIN_UNITS_PER_WORD > 2 \
+ || (MIN_UNITS_PER_WORD > 1 && LONG_LONG_TYPE_SIZE > 32))
+# define LIBGCC2_UNITS_PER_WORD 4
+# else
+# define LIBGCC2_UNITS_PER_WORD MIN_UNITS_PER_WORD
+# endif
+#endif
+
+#define word_type Wtype
+
+#include "libgcc2.h"
+#undef int
+
+/* These prototypes would normally live in libgcc2.h, but this can
+ only happen once the code below is integrated into libgcc2.c. */
+
+extern USItype udivmodsi4 (USItype, USItype, word_type);
+extern SItype __divsi3 (SItype, SItype);
+extern SItype __modsi3 (SItype, SItype);
+extern SItype __udivsi3 (SItype, SItype);
+extern SItype __umodsi3 (SItype, SItype);
+extern SItype __ashlsi3 (SItype, SItype);
+extern SItype __ashrsi3 (SItype, SItype);
+extern USItype __lshrsi3 (USItype, USItype);
+extern int __popcounthi2 (UHWtype);
+extern int __parityhi2 (UHWtype);
+extern int __clzhi2 (UHWtype);
+extern int __ctzhi2 (UHWtype);
+
+
+#ifdef XSTORMY16_UDIVMODSI4
+USItype
+udivmodsi4 (USItype num, USItype den, word_type modwanted)
+{
+ USItype bit = 1;
+ USItype res = 0;
+
+ while (den < num && bit && !(den & (1L << 31)))
+ {
+ den <<= 1;
+ bit <<= 1;
+ }
+ while (bit)
+ {
+ if (num >= den)
+ {
+ num -= den;
+ res |= bit;
+ }
+ bit >>= 1;
+ den >>= 1;
+ }
+
+ if (modwanted)
+ return num;
+ return res;
+}
+#endif
+
+#ifdef XSTORMY16_DIVSI3
+SItype
+__divsi3 (SItype a, SItype b)
+{
+ word_type neg = 0;
+ SItype res;
+
+ if (a < 0)
+ {
+ a = -a;
+ neg = !neg;
+ }
+
+ if (b < 0)
+ {
+ b = -b;
+ neg = !neg;
+ }
+
+ res = udivmodsi4 (a, b, 0);
+
+ if (neg)
+ res = -res;
+
+ return res;
+}
+#endif
+
+#ifdef XSTORMY16_MODSI3
+SItype
+__modsi3 (SItype a, SItype b)
+{
+ word_type neg = 0;
+ SItype res;
+
+ if (a < 0)
+ {
+ a = -a;
+ neg = 1;
+ }
+
+ if (b < 0)
+ b = -b;
+
+ res = udivmodsi4 (a, b, 1);
+
+ if (neg)
+ res = -res;
+
+ return res;
+}
+#endif
+
+#ifdef XSTORMY16_UDIVSI3
+SItype
+__udivsi3 (SItype a, SItype b)
+{
+ return udivmodsi4 (a, b, 0);
+}
+#endif
+
+#ifdef XSTORMY16_UMODSI3
+SItype
+__umodsi3 (SItype a, SItype b)
+{
+ return udivmodsi4 (a, b, 1);
+}
+#endif
+
+#ifdef XSTORMY16_ASHLSI3
+SItype
+__ashlsi3 (SItype a, SItype b)
+{
+ word_type i;
+
+ if (b & 16)
+ a <<= 16;
+ if (b & 8)
+ a <<= 8;
+ for (i = (b & 0x7); i > 0; --i)
+ a <<= 1;
+ return a;
+}
+#endif
+
+#ifdef XSTORMY16_ASHRSI3
+SItype
+__ashrsi3 (SItype a, SItype b)
+{
+ word_type i;
+
+ if (b & 16)
+ a >>= 16;
+ if (b & 8)
+ a >>= 8;
+ for (i = (b & 0x7); i > 0; --i)
+ a >>= 1;
+ return a;
+}
+#endif
+
+#ifdef XSTORMY16_LSHRSI3
+USItype
+__lshrsi3 (USItype a, USItype b)
+{
+ word_type i;
+
+ if (b & 16)
+ a >>= 16;
+ if (b & 8)
+ a >>= 8;
+ for (i = (b & 0x7); i > 0; --i)
+ a >>= 1;
+ return a;
+}
+#endif
+
+#ifdef XSTORMY16_POPCOUNTHI2
+/* Returns the number of set bits in X.
+ FIXME: The return type really should be "unsigned int"
+ but this is not how the builtin is prototyped. */
+int
+__popcounthi2 (UHWtype x)
+{
+ int ret;
+
+ ret = __popcount_tab [x & 0xff];
+ ret += __popcount_tab [(x >> 8) & 0xff];
+
+ return ret;
+}
+#endif
+
+#ifdef XSTORMY16_PARITYHI2
+/* Returns the number of set bits in X, modulo 2.
+ FIXME: The return type really should be "unsigned int"
+ but this is not how the builtin is prototyped. */
+
+int
+__parityhi2 (UHWtype x)
+{
+ x ^= x >> 8;
+ x ^= x >> 4;
+ x &= 0xf;
+ return (0x6996 >> x) & 1;
+}
+#endif
+
+#ifdef XSTORMY16_CLZHI2
+/* Returns the number of zero-bits from the most significant bit to the
+ first nonzero bit in X. Returns 16 for X == 0. Implemented as a
+ simple for loop in order to save space by removing the need for
+ the __clz_tab array.
+ FIXME: The return type really should be "unsigned int" but this is
+ not how the builtin is prototyped. */
+#undef unsigned
+int
+__clzhi2 (UHWtype x)
+{
+ unsigned int i;
+ unsigned int c;
+ unsigned int value = x;
+
+ for (c = 0, i = 1 << 15; i; i >>= 1, c++)
+ if (i & value)
+ break;
+ return c;
+}
+#endif
+
+#ifdef XSTORMY16_CTZHI2
+/* Returns the number of trailing zero bits in X.
+ FIXME: The return type really should be "signed int" since
+ ctz(0) returns -1, but this is not how the builtin is prototyped. */
+
+int
+__ctzhi2 (UHWtype x)
+{
+ /* This is cunning. It converts X into a number with only the one bit
+ set, the bit that was the least significant bit in X. From this we
+ can use the count_leading_zeros to compute the number of trailing
+ bits. */
+ x &= - x;
+
+ return 15 - __builtin_clz (x);
+}
+#endif
+
+#ifdef XSTORMY16_FFSHI2
+/* Returns one plus the index of the least significant 1-bit of X,
+ or if X is zero, returns zero. FIXME: The return type really
+ should be "unsigned int" but this is not how the builtin is
+ prototyped. */
+
+int
+__ffshi2 (UHWtype u)
+{
+ UHWtype count;
+
+ if (u == 0)
+ return 0;
+
+ return 16 - __builtin_clz (u & - u);
+}
+#endif
+
+#ifdef XSTORMY16_UCMPSI2
+/* Performs an unsigned comparison of two 32-bit values: A and B.
+ If A is less than B, then 0 is returned. If A is greater than B,
+ then 2 is returned. Otherwise A and B are equal and 1 is returned. */
+
+word_type
+__ucmpsi2 (USItype a, USItype b)
+{
+ word_type hi_a = (a >> 16);
+ word_type hi_b = (b >> 16);
+
+ if (hi_a == hi_b)
+ {
+ word_type low_a = (a & 0xffff);
+ word_type low_b = (b & 0xffff);
+
+ return low_a < low_b ? 0 : (low_a > low_b ? 2 : 1);
+ }
+
+ return hi_a < hi_b ? 0 : 2;
+}
+#endif
+
+#ifdef XSTORMY16_CMPSI2
+/* Performs an signed comparison of two 32-bit values: A and B.
+ If A is less than B, then 0 is returned. If A is greater than B,
+ then 2 is returned. Otherwise A and B are equal and 1 is returned. */
+
+word_type
+__cmpsi2 (SItype a, SItype b)
+{
+ word_type hi_a = (a >> 16);
+ word_type hi_b = (b >> 16);
+
+ if (hi_a == hi_b)
+ {
+ word_type low_a = (a & 0xffff);
+ word_type low_b = (b & 0xffff);
+
+ return low_a < low_b ? 0 : (low_a > low_b ? 2 : 1);
+ }
+
+ return hi_a < hi_b ? 0 : 2;
+}
+#endif
diff --git a/libgcc/config/stormy16/lshrsi3.c b/libgcc/config/stormy16/lshrsi3.c
new file mode 100644
index 00000000000..13903d3d24a
--- /dev/null
+++ b/libgcc/config/stormy16/lshrsi3.c
@@ -0,0 +1,2 @@
+#define XSTORMY16_LSHRSI3
+#include "lib2funcs.c"
diff --git a/libgcc/config/stormy16/modsi3.c b/libgcc/config/stormy16/modsi3.c
new file mode 100644
index 00000000000..c63e8906824
--- /dev/null
+++ b/libgcc/config/stormy16/modsi3.c
@@ -0,0 +1,2 @@
+#define XSTORMY16_MODSI3
+#include "lib2funcs.c"
diff --git a/libgcc/config/stormy16/parityhi2.c b/libgcc/config/stormy16/parityhi2.c
new file mode 100644
index 00000000000..4be7fbf3e14
--- /dev/null
+++ b/libgcc/config/stormy16/parityhi2.c
@@ -0,0 +1,2 @@
+#define XSTORMY16_PARITYHI2
+#include "lib2funcs.c"
diff --git a/libgcc/config/stormy16/popcounthi2.c b/libgcc/config/stormy16/popcounthi2.c
new file mode 100644
index 00000000000..30bf0950d3d
--- /dev/null
+++ b/libgcc/config/stormy16/popcounthi2.c
@@ -0,0 +1,2 @@
+#define XSTORMY16_POPCOUNTHI2
+#include "lib2funcs.c"
diff --git a/libgcc/config/stormy16/t-stormy16 b/libgcc/config/stormy16/t-stormy16
new file mode 100644
index 00000000000..d62d167d93d
--- /dev/null
+++ b/libgcc/config/stormy16/t-stormy16
@@ -0,0 +1,39 @@
+# -*- makefile -*-
+#
+# Copyright (C) 2001, 2004, 2010, 2011 Free Software Foundation, Inc.
+#
+# This file is part of GCC.
+#
+# GCC is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GCC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>.
+
+# SImode arithmetic and logical routines, HImode bit counting routines.
+LIB2ADD = \
+ $(srcdir)/config/stormy16/udivmodsi4.c \
+ $(srcdir)/config/stormy16/divsi3.c \
+ $(srcdir)/config/stormy16/modsi3.c \
+ $(srcdir)/config/stormy16/udivsi3.c \
+ $(srcdir)/config/stormy16/umodsi3.c \
+ $(srcdir)/config/stormy16/ashlsi3.c \
+ $(srcdir)/config/stormy16/ashrsi3.c \
+ $(srcdir)/config/stormy16/lshrsi3.c \
+ $(srcdir)/config/stormy16/popcounthi2.c \
+ $(srcdir)/config/stormy16/parityhi2.c \
+ $(srcdir)/config/stormy16/clzhi2.c \
+ $(srcdir)/config/stormy16/ctzhi2.c \
+ $(srcdir)/config/stormy16/ffshi2.c \
+ $(srcdir)/config/stormy16/cmpsi2.c \
+ $(srcdir)/config/stormy16/ucmpsi2.c
+
+HOST_LIBGCC2_CFLAGS += -O2
diff --git a/libgcc/config/stormy16/ucmpsi2.c b/libgcc/config/stormy16/ucmpsi2.c
new file mode 100644
index 00000000000..ee327b1da15
--- /dev/null
+++ b/libgcc/config/stormy16/ucmpsi2.c
@@ -0,0 +1,2 @@
+#define XSTORMY16_UCMPSI2
+#include "lib2funcs.c"
diff --git a/libgcc/config/stormy16/udivmodsi4.c b/libgcc/config/stormy16/udivmodsi4.c
new file mode 100644
index 00000000000..5fdd0f9189e
--- /dev/null
+++ b/libgcc/config/stormy16/udivmodsi4.c
@@ -0,0 +1,2 @@
+#define XSTORMY16_UDIVMODSI4
+#include "lib2funcs.c"
diff --git a/libgcc/config/stormy16/udivsi3.c b/libgcc/config/stormy16/udivsi3.c
new file mode 100644
index 00000000000..ad12bd86a58
--- /dev/null
+++ b/libgcc/config/stormy16/udivsi3.c
@@ -0,0 +1,2 @@
+#define XSTORMY16_UDIVSI3
+#include "lib2funcs.c"
diff --git a/libgcc/config/stormy16/umodsi3.c b/libgcc/config/stormy16/umodsi3.c
new file mode 100644
index 00000000000..eeec67f56c0
--- /dev/null
+++ b/libgcc/config/stormy16/umodsi3.c
@@ -0,0 +1,2 @@
+#define XSTORMY16_UMODSI3
+#include "lib2funcs.c"
diff --git a/libgcc/config/t-crtin b/libgcc/config/t-crtin
deleted file mode 100644
index b30e0d52e2f..00000000000
--- a/libgcc/config/t-crtin
+++ /dev/null
@@ -1,4 +0,0 @@
-crti.o: $(srcdir)/config/$(cpu_type)/sol2-ci.S
- $(crt_compile) -c $<
-crtn.o: $(srcdir)/config/$(cpu_type)/sol2-cn.S
- $(crt_compile) -c $<
diff --git a/libgcc/config/t-crtstuff-pic b/libgcc/config/t-crtstuff-pic
new file mode 100644
index 00000000000..4cda4c9bffb
--- /dev/null
+++ b/libgcc/config/t-crtstuff-pic
@@ -0,0 +1,2 @@
+# Compile crtbeginS.o and crtendS.o with pic.
+CRTSTUFF_T_CFLAGS_S = $(CRTSTUFF_T_CFLAGS) $(PICFLAG)
diff --git a/libgcc/config/t-darwin b/libgcc/config/t-darwin
index 311b7e2679f..e32127e9d8a 100644
--- a/libgcc/config/t-darwin
+++ b/libgcc/config/t-darwin
@@ -3,6 +3,12 @@ crt3.o: $(srcdir)/config/darwin-crt3.c
$(crt_compile) \
-fno-tree-dominator-opts $(DARWIN_EXTRA_CRT_BUILD_CFLAGS) -c $<
+# -pipe because there's an assembler bug, 4077127, which causes
+# it to not properly process the first # directive, causing temporary
+# file names to appear in stabs, causing the bootstrap to fail. Using -pipe
+# works around this by not having any temporary file names.
+HOST_LIBGCC2_CFLAGS += -pipe
+
# Use unwind-dw2-fde-darwin
LIB2ADDEH = $(srcdir)/unwind-dw2.c $(srcdir)/config/unwind-dw2-fde-darwin.c \
$(srcdir)/unwind-sjlj.c $(srcdir)/unwind-c.c
diff --git a/libgcc/config/t-freebsd-thread b/libgcc/config/t-freebsd-thread
new file mode 100644
index 00000000000..2948dc1a44c
--- /dev/null
+++ b/libgcc/config/t-freebsd-thread
@@ -0,0 +1,2 @@
+# This is currently needed to compile libgcc2 for threads support
+HOST_LIBGCC2_CFLAGS += -pthread
diff --git a/libgcc/config/t-libgcc-pic b/libgcc/config/t-libgcc-pic
new file mode 100644
index 00000000000..0eea16ebc39
--- /dev/null
+++ b/libgcc/config/t-libgcc-pic
@@ -0,0 +1,2 @@
+# Compile libgcc2.a with pic.
+HOST_LIBGCC2_CFLAGS += $(PICFLAG)
diff --git a/libgcc/config/t-libunwind b/libgcc/config/t-libunwind
index 1c7a898675f..5244928da3c 100644
--- a/libgcc/config/t-libunwind
+++ b/libgcc/config/t-libunwind
@@ -1,3 +1,12 @@
+# Use the system libunwind library.
+
+HOST_LIBGCC2_CFLAGS += -DUSE_GAS_SYMVER
+
LIB2ADDEH = $(srcdir)/unwind-sjlj.c $(srcdir)/unwind-c.c \
$(srcdir)/unwind-compat.c $(srcdir)/unwind-dw2-fde-compat.c
LIB2ADDEHSTATIC = $(srcdir)/unwind-sjlj.c $(srcdir)/unwind-c.c
+
+# Override the default value from t-slibgcc-elf-ver and mention -lunwind
+# so that the resulting libgcc_s.so has the necessary DT_NEEDED entry for
+# libunwind.
+SHLIB_LC = -lunwind -lc
diff --git a/libgcc/config/t-linux b/libgcc/config/t-linux
new file mode 100644
index 00000000000..d1bdac4f32d
--- /dev/null
+++ b/libgcc/config/t-linux
@@ -0,0 +1,3 @@
+# Override t-slibgcc-elf-ver to export some libgcc symbols with
+# the symbol versions that glibc used.
+SHLIB_MAPFILES += $(srcdir)/config/libgcc-glibc.ver
diff --git a/libgcc/config/t-openbsd-thread b/libgcc/config/t-openbsd-thread
new file mode 100644
index 00000000000..50b61e5865f
--- /dev/null
+++ b/libgcc/config/t-openbsd-thread
@@ -0,0 +1,3 @@
+# This is currently needed to compile libgcc2 for threads support
+HOST_LIBGCC2_CFLAGS += -pthread
+
diff --git a/libgcc/config/t-rtems b/libgcc/config/t-rtems
index 85e931e60f6..85f1da90d3b 100644
--- a/libgcc/config/t-rtems
+++ b/libgcc/config/t-rtems
@@ -1,4 +1,4 @@
# If we are building next to newlib, this will let us find the RTEMS
# limits.h when building libgcc2. Otherwise, newlib must be installed
# first.
-HOST_LIBGCC2_CFLAGS = -I$(srcdir)/../newlib/libc/sys/rtems/include
+LIBGCC2_INCLUDES = -I$(srcdir)/../newlib/libc/sys/rtems/include
diff --git a/libgcc/config/t-slibgcc b/libgcc/config/t-slibgcc
index 0b5cc4bfc50..3727244c424 100644
--- a/libgcc/config/t-slibgcc
+++ b/libgcc/config/t-slibgcc
@@ -43,9 +43,12 @@ SHLIB_LINK = $(CC) $(LIBGCC2_CFLAGS) -shared -nodefaultlibs \
else true; fi && \
mv $(SHLIB_DIR)/$(SHLIB_SONAME).tmp $(SHLIB_DIR)/$(SHLIB_SONAME) && \
$(SHLIB_MAKE_SOLINK)
+
+INSTALL_SHLIB = $(INSTALL_DATA)
+
SHLIB_INSTALL = \
$(mkinstalldirs) $(DESTDIR)$(slibdir)$(SHLIB_SLIBDIR_QUAL); \
- $(INSTALL_DATA) $(SHLIB_DIR)/$(SHLIB_SONAME) \
+ $(INSTALL_SHLIB) $(SHLIB_DIR)/$(SHLIB_SONAME) \
$(DESTDIR)$(slibdir)$(SHLIB_SLIBDIR_QUAL)/$(SHLIB_SONAME); \
rm -f $(DESTDIR)$(slibdir)$(SHLIB_SLIBDIR_QUAL)/$(SHLIB_SOLINK); \
$(SHLIB_INSTALL_SOLINK)
diff --git a/libgcc/config/t-slibgcc-darwin b/libgcc/config/t-slibgcc-darwin
index bb6569a87d9..9970d003436 100644
--- a/libgcc/config/t-slibgcc-darwin
+++ b/libgcc/config/t-slibgcc-darwin
@@ -22,9 +22,10 @@ SHLIB_LINK = $(CC) $(LIBGCC2_CFLAGS) -dynamiclib -nodefaultlibs \
$(SHLIB_VERSTRING) \
@multilib_flags@ $(SHLIB_OBJS) $(SHLIB_LC)
-SHLIB_MKMAP = $(gcc_srcdir)/mkmap-flat.awk
+SHLIB_MKMAP = $(srcdir)/mkmap-flat.awk
SHLIB_MKMAP_OPTS = -v leading_underscore=1
-SHLIB_MAPFILES += libgcc-std.ver $(gcc_srcdir)/libgcc-libsystem.ver
+SHLIB_MAPFILES = libgcc-std.ver $(srcdir)/config/libgcc-libsystem.ver
+SHLIB_VERPFX = $(srcdir)/config/$(cpu_type)/libgcc-darwin
# we're only going to build the stubs if the target slib is /usr/lib
# there is no other case in which they're useful in a live system.
diff --git a/libgcc/config/t-slibgcc-elf-ver b/libgcc/config/t-slibgcc-elf-ver
index 2616d31cae5..4a00edc9663 100644
--- a/libgcc/config/t-slibgcc-elf-ver
+++ b/libgcc/config/t-slibgcc-elf-ver
@@ -1,4 +1,4 @@
# Build a shared libgcc library for ELF with symbol versioning.
-SHLIB_MKMAP = $(gcc_srcdir)/mkmap-symver.awk
+SHLIB_MKMAP = $(srcdir)/mkmap-symver.awk
SHLIB_MAPFILES = libgcc-std.ver
diff --git a/libgcc/config/t-slibgcc-gld-nover b/libgcc/config/t-slibgcc-gld-nover
new file mode 100644
index 00000000000..e9b78ea9410
--- /dev/null
+++ b/libgcc/config/t-slibgcc-gld-nover
@@ -0,0 +1,3 @@
+# Build a shared libgcc library for ELF with the GNU linker.
+
+SHLIB_LDFLAGS = -Wl,--soname=$(SHLIB_SONAME)
diff --git a/libgcc/config/t-slibgcc-hpux b/libgcc/config/t-slibgcc-hpux
new file mode 100644
index 00000000000..866457fe9ae
--- /dev/null
+++ b/libgcc/config/t-slibgcc-hpux
@@ -0,0 +1,7 @@
+# Build a shared libgcc library with the HP-UX linker.
+
+SHLIB_LDFLAGS = -Wl,+h,$(SHLIB_SONAME)
+
+INSTALL_SHLIB = $(INSTALL_DATA) -m 555
+
+SHLIB_MKMAP = $(srcdir)/mkmap-flat.awk
diff --git a/libgcc/config/t-slibgcc-libgcc b/libgcc/config/t-slibgcc-libgcc
new file mode 100644
index 00000000000..ec36b2727ae
--- /dev/null
+++ b/libgcc/config/t-slibgcc-libgcc
@@ -0,0 +1,32 @@
+# Copyright (C) 2009, 2011 Free Software Foundation, Inc.
+#
+# This file is part of GCC.
+#
+# GCC is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GCC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>.
+
+# Instead of creating $(SHLIB_SOLINK) symlink create a GNU ld
+# linker script which sources in both $(SHLIB_SONAME) and libgcc.a.
+# This is needed on targets where libgcc.a contains routines that aren't in
+# $(SHLIB_SONAME) and are needed for shared libraries.
+
+SHLIB_MAKE_SOLINK = \
+ (echo "/* GNU ld script"; \
+ echo " Use the shared library, but some functions are only in"; \
+ echo " the static library. */"; \
+ echo "GROUP ( $(SHLIB_SONAME) libgcc.a )" \
+ ) > $(SHLIB_DIR)/$(SHLIB_SOLINK)
+SHLIB_INSTALL_SOLINK = \
+ $(INSTALL_DATA) $(SHLIB_DIR)/$(SHLIB_SOLINK) \
+ $(DESTDIR)$(slibdir)$(SHLIB_SLIBDIR_QUAL)/$(SHLIB_SOLINK)
diff --git a/libgcc/config/t-slibgcc-nolc-override b/libgcc/config/t-slibgcc-nolc-override
new file mode 100644
index 00000000000..959d2cc2a1f
--- /dev/null
+++ b/libgcc/config/t-slibgcc-nolc-override
@@ -0,0 +1 @@
+SHLIB_LC =
diff --git a/libgcc/config/t-slibgcc-vms b/libgcc/config/t-slibgcc-vms
new file mode 100644
index 00000000000..d01c34341c8
--- /dev/null
+++ b/libgcc/config/t-slibgcc-vms
@@ -0,0 +1,28 @@
+shlib_version:=$(shell $(CC) --version 2>&1 | \
+ sed -n 's/^.*(GCC).* \([0-9]*\.[0-9.]*\).*$/\1/p' | \
+ sed -e 's/\./,/' -e 's/\.//g')
+SHLIB_EXT = .exe
+SHLIB_OBJS = @shlib_objs@
+SHLIB_NAME = @shlib_base_name@.exe
+SHLIB_MULTILIB =
+SHLIB_INSTALL = $(INSTALL_DATA) $(SHLIB_NAME) $(DESTDIR)$(libsubdir)/$(SHLIB_NAME)
+SHLIB_SYMVEC = \
+ grep -F -e "\$$BSS\$$" -e "\$$DATA\$$" -e " sdata " -e " data.rel " -e " data.rel.ro " -e " sbss " \
+ -e "\$$LINK\$$" -e "\$$READONLY\$$" | \
+ sed -e "s/.*\$$LINK\$$ \(.*\)/SYMBOL_VECTOR=(\1=PROCEDURE)/" \
+ -e "s/.*\$$DATA\$$ \(.*\)/SYMBOL_VECTOR=(\1=DATA)/" \
+ -e "s/.* sbss \(.*\)/SYMBOL_VECTOR=(\1=DATA)/" \
+ -e "s/.* sdata \(.*\)/SYMBOL_VECTOR=(\1=DATA)/" \
+ -e "s/.* data.rel \(.*\)/SYMBOL_VECTOR=(\1=DATA)/" \
+ -e "s/.* data.rel.ro \(.*\)/SYMBOL_VECTOR=(\1=DATA)/" \
+ -e "s/.*\$$BSS\$$ \(.*\)/SYMBOL_VECTOR=(\1=DATA)/" \
+ -e "s/.*\$$READONLY\$$ \(.*\)/SYMBOL_VECTOR=(\1=DATA)/"
+SHLIB_LINK = \
+ echo "case_sensitive=yes" > SYMVEC_$$$$.opt; \
+ objdump --syms $(SHLIB_OBJS) | \
+ $(SHLIB_SYMVEC) >> SYMVEC_$$$$.opt ; \
+ echo "case_sensitive=NO" >> SYMVEC_$$$$.opt; \
+ $(CC) $(LIBGCC2_CFLAGS) -nodefaultlibs \
+ -shared --for-linker=/noinform -o $(SHLIB_NAME) $(SHLIB_OBJS) \
+ --for-linker=SYMVEC_$$$$.opt \
+ --for-linker=gsmatch=equal,$(shlib_version)
diff --git a/libgcc/config/t-sol2 b/libgcc/config/t-sol2
index bfb8f3b5cc2..cb511abd2fc 100644
--- a/libgcc/config/t-sol2
+++ b/libgcc/config/t-sol2
@@ -25,5 +25,3 @@ crt1.o: $(srcdir)/config/$(cpu_type)/sol2-c1.S
$(crt_compile) -c $<
gcrt1.o: $(srcdir)/config/$(cpu_type)/sol2-c1.S
$(crt_compile) -c -DGCRT1 $<
-
-HOST_LIBGCC2_CFLAGS = -fPIC
diff --git a/libgcc/config/t-vxworks b/libgcc/config/t-vxworks
new file mode 100644
index 00000000000..3647f75d572
--- /dev/null
+++ b/libgcc/config/t-vxworks
@@ -0,0 +1,14 @@
+# Don't build libgcc.a with debug info
+LIBGCC2_DEBUG_CFLAGS =
+
+# Extra libgcc2 modules used by gthr-vxworks.h functions
+LIB2ADD = $(srcdir)/config/vxlib.c $(srcdir)/config/vxlib-tls.c
+
+# This ensures that the correct target headers are used; some
+# VxWorks system headers have names that collide with GCC's
+# internal (host) headers, e.g. regs.h.
+LIBGCC2_INCLUDES = -nostdinc -I \
+ `case "/$$(MULTIDIR)" in \
+ */mrtp*) echo $(WIND_USR)/h ;; \
+ *) echo $(WIND_BASE)/target/h ;; \
+ esac`
diff --git a/libgcc/config/v850/lib1funcs.S b/libgcc/config/v850/lib1funcs.S
new file mode 100644
index 00000000000..04e9b1e0ad4
--- /dev/null
+++ b/libgcc/config/v850/lib1funcs.S
@@ -0,0 +1,2330 @@
+/* libgcc routines for NEC V850.
+ Copyright (C) 1996, 1997, 2002, 2005, 2009, 2010
+ Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 3, or (at your option) any
+later version.
+
+This file is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+#ifdef L_mulsi3
+ .text
+ .globl ___mulsi3
+ .type ___mulsi3,@function
+___mulsi3:
+#ifdef __v850__
+/*
+ #define SHIFT 12
+ #define MASK ((1 << SHIFT) - 1)
+
+ #define STEP(i, j) \
+ ({ \
+ short a_part = (a >> (i)) & MASK; \
+ short b_part = (b >> (j)) & MASK; \
+ int res = (((int) a_part) * ((int) b_part)); \
+ res; \
+ })
+
+ int
+ __mulsi3 (unsigned a, unsigned b)
+ {
+ return STEP (0, 0) +
+ ((STEP (SHIFT, 0) + STEP (0, SHIFT)) << SHIFT) +
+ ((STEP (0, 2 * SHIFT) + STEP (SHIFT, SHIFT) + STEP (2 * SHIFT, 0))
+ << (2 * SHIFT));
+ }
+*/
+ mov r6, r14
+ movea lo(32767), r0, r10
+ and r10, r14
+ mov r7, r15
+ and r10, r15
+ shr 15, r6
+ mov r6, r13
+ and r10, r13
+ shr 15, r7
+ mov r7, r12
+ and r10, r12
+ shr 15, r6
+ shr 15, r7
+ mov r14, r10
+ mulh r15, r10
+ mov r14, r11
+ mulh r12, r11
+ mov r13, r16
+ mulh r15, r16
+ mulh r14, r7
+ mulh r15, r6
+ add r16, r11
+ mulh r13, r12
+ shl 15, r11
+ add r11, r10
+ add r12, r7
+ add r6, r7
+ shl 30, r7
+ add r7, r10
+ jmp [r31]
+#endif /* __v850__ */
+#if defined(__v850e__) || defined(__v850ea__) || defined(__v850e2__) || defined(__v850e2v3__)
+ /* This routine is almost unneccesarry because gcc
+ generates the MUL instruction for the RTX mulsi3.
+ But if someone wants to link his application with
+ previsously compiled v850 objects then they will
+ need this function. */
+
+ /* It isn't good to put the inst sequence as below;
+ mul r7, r6,
+ mov r6, r10, r0
+ In this case, there is a RAW hazard between them.
+ MUL inst takes 2 cycle in EX stage, then MOV inst
+ must wait 1cycle. */
+ mov r7, r10
+ mul r6, r10, r0
+ jmp [r31]
+#endif /* __v850e__ */
+ .size ___mulsi3,.-___mulsi3
+#endif /* L_mulsi3 */
+
+
+#ifdef L_udivsi3
+ .text
+ .global ___udivsi3
+ .type ___udivsi3,@function
+___udivsi3:
+#ifdef __v850__
+ mov 1,r12
+ mov 0,r10
+ cmp r6,r7
+ bnl .L12
+ movhi hi(-2147483648),r0,r13
+ cmp r0,r7
+ blt .L12
+.L4:
+ shl 1,r7
+ shl 1,r12
+ cmp r6,r7
+ bnl .L12
+ cmp r0,r12
+ be .L8
+ mov r7,r19
+ and r13,r19
+ be .L4
+ br .L12
+.L9:
+ cmp r7,r6
+ bl .L10
+ sub r7,r6
+ or r12,r10
+.L10:
+ shr 1,r12
+ shr 1,r7
+.L12:
+ cmp r0,r12
+ bne .L9
+.L8:
+ jmp [r31]
+
+#else /* defined(__v850e__) */
+
+ /* See comments at end of __mulsi3. */
+ mov r6, r10
+ divu r7, r10, r0
+ jmp [r31]
+
+#endif /* __v850e__ */
+
+ .size ___udivsi3,.-___udivsi3
+#endif
+
+#ifdef L_divsi3
+ .text
+ .globl ___divsi3
+ .type ___divsi3,@function
+___divsi3:
+#ifdef __v850__
+ add -8,sp
+ st.w r31,4[sp]
+ st.w r22,0[sp]
+ mov 1,r22
+ tst r7,r7
+ bp .L3
+ subr r0,r7
+ subr r0,r22
+.L3:
+ tst r6,r6
+ bp .L4
+ subr r0,r6
+ subr r0,r22
+.L4:
+ jarl ___udivsi3,r31
+ cmp r0,r22
+ bp .L7
+ subr r0,r10
+.L7:
+ ld.w 0[sp],r22
+ ld.w 4[sp],r31
+ add 8,sp
+ jmp [r31]
+
+#else /* defined(__v850e__) */
+
+ /* See comments at end of __mulsi3. */
+ mov r6, r10
+ div r7, r10, r0
+ jmp [r31]
+
+#endif /* __v850e__ */
+
+ .size ___divsi3,.-___divsi3
+#endif
+
+#ifdef L_umodsi3
+ .text
+ .globl ___umodsi3
+ .type ___umodsi3,@function
+___umodsi3:
+#ifdef __v850__
+ add -12,sp
+ st.w r31,8[sp]
+ st.w r7,4[sp]
+ st.w r6,0[sp]
+ jarl ___udivsi3,r31
+ ld.w 4[sp],r7
+ mov r10,r6
+ jarl ___mulsi3,r31
+ ld.w 0[sp],r6
+ subr r6,r10
+ ld.w 8[sp],r31
+ add 12,sp
+ jmp [r31]
+
+#else /* defined(__v850e__) */
+
+ /* See comments at end of __mulsi3. */
+ divu r7, r6, r10
+ jmp [r31]
+
+#endif /* __v850e__ */
+
+ .size ___umodsi3,.-___umodsi3
+#endif /* L_umodsi3 */
+
+#ifdef L_modsi3
+ .text
+ .globl ___modsi3
+ .type ___modsi3,@function
+___modsi3:
+#ifdef __v850__
+ add -12,sp
+ st.w r31,8[sp]
+ st.w r7,4[sp]
+ st.w r6,0[sp]
+ jarl ___divsi3,r31
+ ld.w 4[sp],r7
+ mov r10,r6
+ jarl ___mulsi3,r31
+ ld.w 0[sp],r6
+ subr r6,r10
+ ld.w 8[sp],r31
+ add 12,sp
+ jmp [r31]
+
+#else /* defined(__v850e__) */
+
+ /* See comments at end of __mulsi3. */
+ div r7, r6, r10
+ jmp [r31]
+
+#endif /* __v850e__ */
+
+ .size ___modsi3,.-___modsi3
+#endif /* L_modsi3 */
+
+#ifdef L_save_2
+ .text
+ .align 2
+ .globl __save_r2_r29
+ .type __save_r2_r29,@function
+ /* Allocate space and save registers 2, 20 .. 29 on the stack. */
+ /* Called via: jalr __save_r2_r29,r10. */
+__save_r2_r29:
+#ifdef __EP__
+ mov ep,r1
+ addi -44,sp,sp
+ mov sp,ep
+ sst.w r29,0[ep]
+ sst.w r28,4[ep]
+ sst.w r27,8[ep]
+ sst.w r26,12[ep]
+ sst.w r25,16[ep]
+ sst.w r24,20[ep]
+ sst.w r23,24[ep]
+ sst.w r22,28[ep]
+ sst.w r21,32[ep]
+ sst.w r20,36[ep]
+ sst.w r2,40[ep]
+ mov r1,ep
+#else
+ addi -44,sp,sp
+ st.w r29,0[sp]
+ st.w r28,4[sp]
+ st.w r27,8[sp]
+ st.w r26,12[sp]
+ st.w r25,16[sp]
+ st.w r24,20[sp]
+ st.w r23,24[sp]
+ st.w r22,28[sp]
+ st.w r21,32[sp]
+ st.w r20,36[sp]
+ st.w r2,40[sp]
+#endif
+ jmp [r10]
+ .size __save_r2_r29,.-__save_r2_r29
+
+ /* Restore saved registers, deallocate stack and return to the user. */
+ /* Called via: jr __return_r2_r29. */
+ .align 2
+ .globl __return_r2_r29
+ .type __return_r2_r29,@function
+__return_r2_r29:
+#ifdef __EP__
+ mov ep,r1
+ mov sp,ep
+ sld.w 0[ep],r29
+ sld.w 4[ep],r28
+ sld.w 8[ep],r27
+ sld.w 12[ep],r26
+ sld.w 16[ep],r25
+ sld.w 20[ep],r24
+ sld.w 24[ep],r23
+ sld.w 28[ep],r22
+ sld.w 32[ep],r21
+ sld.w 36[ep],r20
+ sld.w 40[ep],r2
+ addi 44,sp,sp
+ mov r1,ep
+#else
+ ld.w 0[sp],r29
+ ld.w 4[sp],r28
+ ld.w 8[sp],r27
+ ld.w 12[sp],r26
+ ld.w 16[sp],r25
+ ld.w 20[sp],r24
+ ld.w 24[sp],r23
+ ld.w 28[sp],r22
+ ld.w 32[sp],r21
+ ld.w 36[sp],r20
+ ld.w 40[sp],r2
+ addi 44,sp,sp
+#endif
+ jmp [r31]
+ .size __return_r2_r29,.-__return_r2_r29
+#endif /* L_save_2 */
+
+#ifdef L_save_20
+ .text
+ .align 2
+ .globl __save_r20_r29
+ .type __save_r20_r29,@function
+ /* Allocate space and save registers 20 .. 29 on the stack. */
+ /* Called via: jalr __save_r20_r29,r10. */
+__save_r20_r29:
+#ifdef __EP__
+ mov ep,r1
+ addi -40,sp,sp
+ mov sp,ep
+ sst.w r29,0[ep]
+ sst.w r28,4[ep]
+ sst.w r27,8[ep]
+ sst.w r26,12[ep]
+ sst.w r25,16[ep]
+ sst.w r24,20[ep]
+ sst.w r23,24[ep]
+ sst.w r22,28[ep]
+ sst.w r21,32[ep]
+ sst.w r20,36[ep]
+ mov r1,ep
+#else
+ addi -40,sp,sp
+ st.w r29,0[sp]
+ st.w r28,4[sp]
+ st.w r27,8[sp]
+ st.w r26,12[sp]
+ st.w r25,16[sp]
+ st.w r24,20[sp]
+ st.w r23,24[sp]
+ st.w r22,28[sp]
+ st.w r21,32[sp]
+ st.w r20,36[sp]
+#endif
+ jmp [r10]
+ .size __save_r20_r29,.-__save_r20_r29
+
+ /* Restore saved registers, deallocate stack and return to the user. */
+ /* Called via: jr __return_r20_r29. */
+ .align 2
+ .globl __return_r20_r29
+ .type __return_r20_r29,@function
+__return_r20_r29:
+#ifdef __EP__
+ mov ep,r1
+ mov sp,ep
+ sld.w 0[ep],r29
+ sld.w 4[ep],r28
+ sld.w 8[ep],r27
+ sld.w 12[ep],r26
+ sld.w 16[ep],r25
+ sld.w 20[ep],r24
+ sld.w 24[ep],r23
+ sld.w 28[ep],r22
+ sld.w 32[ep],r21
+ sld.w 36[ep],r20
+ addi 40,sp,sp
+ mov r1,ep
+#else
+ ld.w 0[sp],r29
+ ld.w 4[sp],r28
+ ld.w 8[sp],r27
+ ld.w 12[sp],r26
+ ld.w 16[sp],r25
+ ld.w 20[sp],r24
+ ld.w 24[sp],r23
+ ld.w 28[sp],r22
+ ld.w 32[sp],r21
+ ld.w 36[sp],r20
+ addi 40,sp,sp
+#endif
+ jmp [r31]
+ .size __return_r20_r29,.-__return_r20_r29
+#endif /* L_save_20 */
+
+#ifdef L_save_21
+ .text
+ .align 2
+ .globl __save_r21_r29
+ .type __save_r21_r29,@function
+ /* Allocate space and save registers 21 .. 29 on the stack. */
+ /* Called via: jalr __save_r21_r29,r10. */
+__save_r21_r29:
+#ifdef __EP__
+ mov ep,r1
+ addi -36,sp,sp
+ mov sp,ep
+ sst.w r29,0[ep]
+ sst.w r28,4[ep]
+ sst.w r27,8[ep]
+ sst.w r26,12[ep]
+ sst.w r25,16[ep]
+ sst.w r24,20[ep]
+ sst.w r23,24[ep]
+ sst.w r22,28[ep]
+ sst.w r21,32[ep]
+ mov r1,ep
+#else
+ addi -36,sp,sp
+ st.w r29,0[sp]
+ st.w r28,4[sp]
+ st.w r27,8[sp]
+ st.w r26,12[sp]
+ st.w r25,16[sp]
+ st.w r24,20[sp]
+ st.w r23,24[sp]
+ st.w r22,28[sp]
+ st.w r21,32[sp]
+#endif
+ jmp [r10]
+ .size __save_r21_r29,.-__save_r21_r29
+
+ /* Restore saved registers, deallocate stack and return to the user. */
+ /* Called via: jr __return_r21_r29. */
+ .align 2
+ .globl __return_r21_r29
+ .type __return_r21_r29,@function
+__return_r21_r29:
+#ifdef __EP__
+ mov ep,r1
+ mov sp,ep
+ sld.w 0[ep],r29
+ sld.w 4[ep],r28
+ sld.w 8[ep],r27
+ sld.w 12[ep],r26
+ sld.w 16[ep],r25
+ sld.w 20[ep],r24
+ sld.w 24[ep],r23
+ sld.w 28[ep],r22
+ sld.w 32[ep],r21
+ addi 36,sp,sp
+ mov r1,ep
+#else
+ ld.w 0[sp],r29
+ ld.w 4[sp],r28
+ ld.w 8[sp],r27
+ ld.w 12[sp],r26
+ ld.w 16[sp],r25
+ ld.w 20[sp],r24
+ ld.w 24[sp],r23
+ ld.w 28[sp],r22
+ ld.w 32[sp],r21
+ addi 36,sp,sp
+#endif
+ jmp [r31]
+ .size __return_r21_r29,.-__return_r21_r29
+#endif /* L_save_21 */
+
+#ifdef L_save_22
+ .text
+ .align 2
+ .globl __save_r22_r29
+ .type __save_r22_r29,@function
+ /* Allocate space and save registers 22 .. 29 on the stack. */
+ /* Called via: jalr __save_r22_r29,r10. */
+__save_r22_r29:
+#ifdef __EP__
+ mov ep,r1
+ addi -32,sp,sp
+ mov sp,ep
+ sst.w r29,0[ep]
+ sst.w r28,4[ep]
+ sst.w r27,8[ep]
+ sst.w r26,12[ep]
+ sst.w r25,16[ep]
+ sst.w r24,20[ep]
+ sst.w r23,24[ep]
+ sst.w r22,28[ep]
+ mov r1,ep
+#else
+ addi -32,sp,sp
+ st.w r29,0[sp]
+ st.w r28,4[sp]
+ st.w r27,8[sp]
+ st.w r26,12[sp]
+ st.w r25,16[sp]
+ st.w r24,20[sp]
+ st.w r23,24[sp]
+ st.w r22,28[sp]
+#endif
+ jmp [r10]
+ .size __save_r22_r29,.-__save_r22_r29
+
+ /* Restore saved registers, deallocate stack and return to the user. */
+ /* Called via: jr __return_r22_r29. */
+ .align 2
+ .globl __return_r22_r29
+ .type __return_r22_r29,@function
+__return_r22_r29:
+#ifdef __EP__
+ mov ep,r1
+ mov sp,ep
+ sld.w 0[ep],r29
+ sld.w 4[ep],r28
+ sld.w 8[ep],r27
+ sld.w 12[ep],r26
+ sld.w 16[ep],r25
+ sld.w 20[ep],r24
+ sld.w 24[ep],r23
+ sld.w 28[ep],r22
+ addi 32,sp,sp
+ mov r1,ep
+#else
+ ld.w 0[sp],r29
+ ld.w 4[sp],r28
+ ld.w 8[sp],r27
+ ld.w 12[sp],r26
+ ld.w 16[sp],r25
+ ld.w 20[sp],r24
+ ld.w 24[sp],r23
+ ld.w 28[sp],r22
+ addi 32,sp,sp
+#endif
+ jmp [r31]
+ .size __return_r22_r29,.-__return_r22_r29
+#endif /* L_save_22 */
+
+#ifdef L_save_23
+ .text
+ .align 2
+ .globl __save_r23_r29
+ .type __save_r23_r29,@function
+ /* Allocate space and save registers 23 .. 29 on the stack. */
+ /* Called via: jalr __save_r23_r29,r10. */
+__save_r23_r29:
+#ifdef __EP__
+ mov ep,r1
+ addi -28,sp,sp
+ mov sp,ep
+ sst.w r29,0[ep]
+ sst.w r28,4[ep]
+ sst.w r27,8[ep]
+ sst.w r26,12[ep]
+ sst.w r25,16[ep]
+ sst.w r24,20[ep]
+ sst.w r23,24[ep]
+ mov r1,ep
+#else
+ addi -28,sp,sp
+ st.w r29,0[sp]
+ st.w r28,4[sp]
+ st.w r27,8[sp]
+ st.w r26,12[sp]
+ st.w r25,16[sp]
+ st.w r24,20[sp]
+ st.w r23,24[sp]
+#endif
+ jmp [r10]
+ .size __save_r23_r29,.-__save_r23_r29
+
+ /* Restore saved registers, deallocate stack and return to the user. */
+ /* Called via: jr __return_r23_r29. */
+ .align 2
+ .globl __return_r23_r29
+ .type __return_r23_r29,@function
+__return_r23_r29:
+#ifdef __EP__
+ mov ep,r1
+ mov sp,ep
+ sld.w 0[ep],r29
+ sld.w 4[ep],r28
+ sld.w 8[ep],r27
+ sld.w 12[ep],r26
+ sld.w 16[ep],r25
+ sld.w 20[ep],r24
+ sld.w 24[ep],r23
+ addi 28,sp,sp
+ mov r1,ep
+#else
+ ld.w 0[sp],r29
+ ld.w 4[sp],r28
+ ld.w 8[sp],r27
+ ld.w 12[sp],r26
+ ld.w 16[sp],r25
+ ld.w 20[sp],r24
+ ld.w 24[sp],r23
+ addi 28,sp,sp
+#endif
+ jmp [r31]
+ .size __return_r23_r29,.-__return_r23_r29
+#endif /* L_save_23 */
+
+#ifdef L_save_24
+ .text
+ .align 2
+ .globl __save_r24_r29
+ .type __save_r24_r29,@function
+ /* Allocate space and save registers 24 .. 29 on the stack. */
+ /* Called via: jalr __save_r24_r29,r10. */
+__save_r24_r29:
+#ifdef __EP__
+ mov ep,r1
+ addi -24,sp,sp
+ mov sp,ep
+ sst.w r29,0[ep]
+ sst.w r28,4[ep]
+ sst.w r27,8[ep]
+ sst.w r26,12[ep]
+ sst.w r25,16[ep]
+ sst.w r24,20[ep]
+ mov r1,ep
+#else
+ addi -24,sp,sp
+ st.w r29,0[sp]
+ st.w r28,4[sp]
+ st.w r27,8[sp]
+ st.w r26,12[sp]
+ st.w r25,16[sp]
+ st.w r24,20[sp]
+#endif
+ jmp [r10]
+ .size __save_r24_r29,.-__save_r24_r29
+
+ /* Restore saved registers, deallocate stack and return to the user. */
+ /* Called via: jr __return_r24_r29. */
+ .align 2
+ .globl __return_r24_r29
+ .type __return_r24_r29,@function
+__return_r24_r29:
+#ifdef __EP__
+ mov ep,r1
+ mov sp,ep
+ sld.w 0[ep],r29
+ sld.w 4[ep],r28
+ sld.w 8[ep],r27
+ sld.w 12[ep],r26
+ sld.w 16[ep],r25
+ sld.w 20[ep],r24
+ addi 24,sp,sp
+ mov r1,ep
+#else
+ ld.w 0[sp],r29
+ ld.w 4[sp],r28
+ ld.w 8[sp],r27
+ ld.w 12[sp],r26
+ ld.w 16[sp],r25
+ ld.w 20[sp],r24
+ addi 24,sp,sp
+#endif
+ jmp [r31]
+ .size __return_r24_r29,.-__return_r24_r29
+#endif /* L_save_24 */
+
+#ifdef L_save_25
+ .text
+ .align 2
+ .globl __save_r25_r29
+ .type __save_r25_r29,@function
+ /* Allocate space and save registers 25 .. 29 on the stack. */
+ /* Called via: jalr __save_r25_r29,r10. */
+__save_r25_r29:
+#ifdef __EP__
+ mov ep,r1
+ addi -20,sp,sp
+ mov sp,ep
+ sst.w r29,0[ep]
+ sst.w r28,4[ep]
+ sst.w r27,8[ep]
+ sst.w r26,12[ep]
+ sst.w r25,16[ep]
+ mov r1,ep
+#else
+ addi -20,sp,sp
+ st.w r29,0[sp]
+ st.w r28,4[sp]
+ st.w r27,8[sp]
+ st.w r26,12[sp]
+ st.w r25,16[sp]
+#endif
+ jmp [r10]
+ .size __save_r25_r29,.-__save_r25_r29
+
+ /* Restore saved registers, deallocate stack and return to the user. */
+ /* Called via: jr __return_r25_r29. */
+ .align 2
+ .globl __return_r25_r29
+ .type __return_r25_r29,@function
+__return_r25_r29:
+#ifdef __EP__
+ mov ep,r1
+ mov sp,ep
+ sld.w 0[ep],r29
+ sld.w 4[ep],r28
+ sld.w 8[ep],r27
+ sld.w 12[ep],r26
+ sld.w 16[ep],r25
+ addi 20,sp,sp
+ mov r1,ep
+#else
+ ld.w 0[ep],r29
+ ld.w 4[ep],r28
+ ld.w 8[ep],r27
+ ld.w 12[ep],r26
+ ld.w 16[ep],r25
+ addi 20,sp,sp
+#endif
+ jmp [r31]
+ .size __return_r25_r29,.-__return_r25_r29
+#endif /* L_save_25 */
+
+#ifdef L_save_26
+ .text
+ .align 2
+ .globl __save_r26_r29
+ .type __save_r26_r29,@function
+ /* Allocate space and save registers 26 .. 29 on the stack. */
+ /* Called via: jalr __save_r26_r29,r10. */
+__save_r26_r29:
+#ifdef __EP__
+ mov ep,r1
+ add -16,sp
+ mov sp,ep
+ sst.w r29,0[ep]
+ sst.w r28,4[ep]
+ sst.w r27,8[ep]
+ sst.w r26,12[ep]
+ mov r1,ep
+#else
+ add -16,sp
+ st.w r29,0[sp]
+ st.w r28,4[sp]
+ st.w r27,8[sp]
+ st.w r26,12[sp]
+#endif
+ jmp [r10]
+ .size __save_r26_r29,.-__save_r26_r29
+
+ /* Restore saved registers, deallocate stack and return to the user. */
+ /* Called via: jr __return_r26_r29. */
+ .align 2
+ .globl __return_r26_r29
+ .type __return_r26_r29,@function
+__return_r26_r29:
+#ifdef __EP__
+ mov ep,r1
+ mov sp,ep
+ sld.w 0[ep],r29
+ sld.w 4[ep],r28
+ sld.w 8[ep],r27
+ sld.w 12[ep],r26
+ addi 16,sp,sp
+ mov r1,ep
+#else
+ ld.w 0[sp],r29
+ ld.w 4[sp],r28
+ ld.w 8[sp],r27
+ ld.w 12[sp],r26
+ addi 16,sp,sp
+#endif
+ jmp [r31]
+ .size __return_r26_r29,.-__return_r26_r29
+#endif /* L_save_26 */
+
+#ifdef L_save_27
+ .text
+ .align 2
+ .globl __save_r27_r29
+ .type __save_r27_r29,@function
+ /* Allocate space and save registers 27 .. 29 on the stack. */
+ /* Called via: jalr __save_r27_r29,r10. */
+__save_r27_r29:
+ add -12,sp
+ st.w r29,0[sp]
+ st.w r28,4[sp]
+ st.w r27,8[sp]
+ jmp [r10]
+ .size __save_r27_r29,.-__save_r27_r29
+
+ /* Restore saved registers, deallocate stack and return to the user. */
+ /* Called via: jr __return_r27_r29. */
+ .align 2
+ .globl __return_r27_r29
+ .type __return_r27_r29,@function
+__return_r27_r29:
+ ld.w 0[sp],r29
+ ld.w 4[sp],r28
+ ld.w 8[sp],r27
+ add 12,sp
+ jmp [r31]
+ .size __return_r27_r29,.-__return_r27_r29
+#endif /* L_save_27 */
+
+#ifdef L_save_28
+ .text
+ .align 2
+ .globl __save_r28_r29
+ .type __save_r28_r29,@function
+ /* Allocate space and save registers 28,29 on the stack. */
+ /* Called via: jalr __save_r28_r29,r10. */
+__save_r28_r29:
+ add -8,sp
+ st.w r29,0[sp]
+ st.w r28,4[sp]
+ jmp [r10]
+ .size __save_r28_r29,.-__save_r28_r29
+
+ /* Restore saved registers, deallocate stack and return to the user. */
+ /* Called via: jr __return_r28_r29. */
+ .align 2
+ .globl __return_r28_r29
+ .type __return_r28_r29,@function
+__return_r28_r29:
+ ld.w 0[sp],r29
+ ld.w 4[sp],r28
+ add 8,sp
+ jmp [r31]
+ .size __return_r28_r29,.-__return_r28_r29
+#endif /* L_save_28 */
+
+#ifdef L_save_29
+ .text
+ .align 2
+ .globl __save_r29
+ .type __save_r29,@function
+ /* Allocate space and save register 29 on the stack. */
+ /* Called via: jalr __save_r29,r10. */
+__save_r29:
+ add -4,sp
+ st.w r29,0[sp]
+ jmp [r10]
+ .size __save_r29,.-__save_r29
+
+ /* Restore saved register 29, deallocate stack and return to the user. */
+ /* Called via: jr __return_r29. */
+ .align 2
+ .globl __return_r29
+ .type __return_r29,@function
+__return_r29:
+ ld.w 0[sp],r29
+ add 4,sp
+ jmp [r31]
+ .size __return_r29,.-__return_r29
+#endif /* L_save_28 */
+
+#ifdef L_save_2c
+ .text
+ .align 2
+ .globl __save_r2_r31
+ .type __save_r2_r31,@function
+ /* Allocate space and save registers 20 .. 29, 31 on the stack. */
+ /* Also allocate space for the argument save area. */
+ /* Called via: jalr __save_r2_r31,r10. */
+__save_r2_r31:
+#ifdef __EP__
+ mov ep,r1
+ addi -48,sp,sp
+ mov sp,ep
+ sst.w r29,0[ep]
+ sst.w r28,4[ep]
+ sst.w r27,8[ep]
+ sst.w r26,12[ep]
+ sst.w r25,16[ep]
+ sst.w r24,20[ep]
+ sst.w r23,24[ep]
+ sst.w r22,28[ep]
+ sst.w r21,32[ep]
+ sst.w r20,36[ep]
+ sst.w r2,40[ep]
+ sst.w r31,44[ep]
+ mov r1,ep
+#else
+ addi -48,sp,sp
+ st.w r29,0[sp]
+ st.w r28,4[sp]
+ st.w r27,8[sp]
+ st.w r26,12[sp]
+ st.w r25,16[sp]
+ st.w r24,20[sp]
+ st.w r23,24[sp]
+ st.w r22,28[sp]
+ st.w r21,32[sp]
+ st.w r20,36[sp]
+ st.w r2,40[sp]
+ st.w r31,44[sp]
+#endif
+ jmp [r10]
+ .size __save_r2_r31,.-__save_r2_r31
+
+ /* Restore saved registers, deallocate stack and return to the user. */
+ /* Called via: jr __return_r20_r31. */
+ .align 2
+ .globl __return_r2_r31
+ .type __return_r2_r31,@function
+__return_r2_r31:
+#ifdef __EP__
+ mov ep,r1
+ mov sp,ep
+ sld.w 0[ep],r29
+ sld.w 4[ep],r28
+ sld.w 8[ep],r27
+ sld.w 12[ep],r26
+ sld.w 16[ep],r25
+ sld.w 20[ep],r24
+ sld.w 24[ep],r23
+ sld.w 28[ep],r22
+ sld.w 32[ep],r21
+ sld.w 36[ep],r20
+ sld.w 40[ep],r2
+ sld.w 44[ep],r31
+ addi 48,sp,sp
+ mov r1,ep
+#else
+ ld.w 44[sp],r29
+ ld.w 40[sp],r28
+ ld.w 36[sp],r27
+ ld.w 32[sp],r26
+ ld.w 28[sp],r25
+ ld.w 24[sp],r24
+ ld.w 20[sp],r23
+ ld.w 16[sp],r22
+ ld.w 12[sp],r21
+ ld.w 8[sp],r20
+ ld.w 4[sp],r2
+ ld.w 0[sp],r31
+ addi 48,sp,sp
+#endif
+ jmp [r31]
+ .size __return_r2_r31,.-__return_r2_r31
+#endif /* L_save_2c */
+
+#ifdef L_save_20c
+ .text
+ .align 2
+ .globl __save_r20_r31
+ .type __save_r20_r31,@function
+ /* Allocate space and save registers 20 .. 29, 31 on the stack. */
+ /* Also allocate space for the argument save area. */
+ /* Called via: jalr __save_r20_r31,r10. */
+__save_r20_r31:
+#ifdef __EP__
+ mov ep,r1
+ addi -44,sp,sp
+ mov sp,ep
+ sst.w r29,0[ep]
+ sst.w r28,4[ep]
+ sst.w r27,8[ep]
+ sst.w r26,12[ep]
+ sst.w r25,16[ep]
+ sst.w r24,20[ep]
+ sst.w r23,24[ep]
+ sst.w r22,28[ep]
+ sst.w r21,32[ep]
+ sst.w r20,36[ep]
+ sst.w r31,40[ep]
+ mov r1,ep
+#else
+ addi -44,sp,sp
+ st.w r29,0[sp]
+ st.w r28,4[sp]
+ st.w r27,8[sp]
+ st.w r26,12[sp]
+ st.w r25,16[sp]
+ st.w r24,20[sp]
+ st.w r23,24[sp]
+ st.w r22,28[sp]
+ st.w r21,32[sp]
+ st.w r20,36[sp]
+ st.w r31,40[sp]
+#endif
+ jmp [r10]
+ .size __save_r20_r31,.-__save_r20_r31
+
+ /* Restore saved registers, deallocate stack and return to the user. */
+ /* Called via: jr __return_r20_r31. */
+ .align 2
+ .globl __return_r20_r31
+ .type __return_r20_r31,@function
+__return_r20_r31:
+#ifdef __EP__
+ mov ep,r1
+ mov sp,ep
+ sld.w 0[ep],r29
+ sld.w 4[ep],r28
+ sld.w 8[ep],r27
+ sld.w 12[ep],r26
+ sld.w 16[ep],r25
+ sld.w 20[ep],r24
+ sld.w 24[ep],r23
+ sld.w 28[ep],r22
+ sld.w 32[ep],r21
+ sld.w 36[ep],r20
+ sld.w 40[ep],r31
+ addi 44,sp,sp
+ mov r1,ep
+#else
+ ld.w 0[sp],r29
+ ld.w 4[sp],r28
+ ld.w 8[sp],r27
+ ld.w 12[sp],r26
+ ld.w 16[sp],r25
+ ld.w 20[sp],r24
+ ld.w 24[sp],r23
+ ld.w 28[sp],r22
+ ld.w 32[sp],r21
+ ld.w 36[sp],r20
+ ld.w 40[sp],r31
+ addi 44,sp,sp
+#endif
+ jmp [r31]
+ .size __return_r20_r31,.-__return_r20_r31
+#endif /* L_save_20c */
+
+#ifdef L_save_21c
+ .text
+ .align 2
+ .globl __save_r21_r31
+ .type __save_r21_r31,@function
+ /* Allocate space and save registers 21 .. 29, 31 on the stack. */
+ /* Also allocate space for the argument save area. */
+ /* Called via: jalr __save_r21_r31,r10. */
+__save_r21_r31:
+#ifdef __EP__
+ mov ep,r1
+ addi -40,sp,sp
+ mov sp,ep
+ sst.w r29,0[ep]
+ sst.w r28,4[ep]
+ sst.w r27,8[ep]
+ sst.w r26,12[ep]
+ sst.w r25,16[ep]
+ sst.w r24,20[ep]
+ sst.w r23,24[ep]
+ sst.w r22,28[ep]
+ sst.w r21,32[ep]
+ sst.w r31,36[ep]
+ mov r1,ep
+ jmp [r10]
+#else
+ addi -40,sp,sp
+ st.w r29,0[sp]
+ st.w r28,4[sp]
+ st.w r27,8[sp]
+ st.w r26,12[sp]
+ st.w r25,16[sp]
+ st.w r24,20[sp]
+ st.w r23,24[sp]
+ st.w r22,28[sp]
+ st.w r21,32[sp]
+ st.w r31,36[sp]
+ jmp [r10]
+#endif
+ .size __save_r21_r31,.-__save_r21_r31
+
+ /* Restore saved registers, deallocate stack and return to the user. */
+ /* Called via: jr __return_r21_r31. */
+ .align 2
+ .globl __return_r21_r31
+ .type __return_r21_r31,@function
+__return_r21_r31:
+#ifdef __EP__
+ mov ep,r1
+ mov sp,ep
+ sld.w 0[ep],r29
+ sld.w 4[ep],r28
+ sld.w 8[ep],r27
+ sld.w 12[ep],r26
+ sld.w 16[ep],r25
+ sld.w 20[ep],r24
+ sld.w 24[ep],r23
+ sld.w 28[ep],r22
+ sld.w 32[ep],r21
+ sld.w 36[ep],r31
+ addi 40,sp,sp
+ mov r1,ep
+#else
+ ld.w 0[sp],r29
+ ld.w 4[sp],r28
+ ld.w 8[sp],r27
+ ld.w 12[sp],r26
+ ld.w 16[sp],r25
+ ld.w 20[sp],r24
+ ld.w 24[sp],r23
+ ld.w 28[sp],r22
+ ld.w 32[sp],r21
+ ld.w 36[sp],r31
+ addi 40,sp,sp
+#endif
+ jmp [r31]
+ .size __return_r21_r31,.-__return_r21_r31
+#endif /* L_save_21c */
+
+#ifdef L_save_22c
+ .text
+ .align 2
+ .globl __save_r22_r31
+ .type __save_r22_r31,@function
+ /* Allocate space and save registers 22 .. 29, 31 on the stack. */
+ /* Also allocate space for the argument save area. */
+ /* Called via: jalr __save_r22_r31,r10. */
+__save_r22_r31:
+#ifdef __EP__
+ mov ep,r1
+ addi -36,sp,sp
+ mov sp,ep
+ sst.w r29,0[ep]
+ sst.w r28,4[ep]
+ sst.w r27,8[ep]
+ sst.w r26,12[ep]
+ sst.w r25,16[ep]
+ sst.w r24,20[ep]
+ sst.w r23,24[ep]
+ sst.w r22,28[ep]
+ sst.w r31,32[ep]
+ mov r1,ep
+#else
+ addi -36,sp,sp
+ st.w r29,0[sp]
+ st.w r28,4[sp]
+ st.w r27,8[sp]
+ st.w r26,12[sp]
+ st.w r25,16[sp]
+ st.w r24,20[sp]
+ st.w r23,24[sp]
+ st.w r22,28[sp]
+ st.w r31,32[sp]
+#endif
+ jmp [r10]
+ .size __save_r22_r31,.-__save_r22_r31
+
+ /* Restore saved registers, deallocate stack and return to the user. */
+ /* Called via: jr __return_r22_r31. */
+ .align 2
+ .globl __return_r22_r31
+ .type __return_r22_r31,@function
+__return_r22_r31:
+#ifdef __EP__
+ mov ep,r1
+ mov sp,ep
+ sld.w 0[ep],r29
+ sld.w 4[ep],r28
+ sld.w 8[ep],r27
+ sld.w 12[ep],r26
+ sld.w 16[ep],r25
+ sld.w 20[ep],r24
+ sld.w 24[ep],r23
+ sld.w 28[ep],r22
+ sld.w 32[ep],r31
+ addi 36,sp,sp
+ mov r1,ep
+#else
+ ld.w 0[sp],r29
+ ld.w 4[sp],r28
+ ld.w 8[sp],r27
+ ld.w 12[sp],r26
+ ld.w 16[sp],r25
+ ld.w 20[sp],r24
+ ld.w 24[sp],r23
+ ld.w 28[sp],r22
+ ld.w 32[sp],r31
+ addi 36,sp,sp
+#endif
+ jmp [r31]
+ .size __return_r22_r31,.-__return_r22_r31
+#endif /* L_save_22c */
+
+#ifdef L_save_23c
+ .text
+ .align 2
+ .globl __save_r23_r31
+ .type __save_r23_r31,@function
+ /* Allocate space and save registers 23 .. 29, 31 on the stack. */
+ /* Also allocate space for the argument save area. */
+ /* Called via: jalr __save_r23_r31,r10. */
+__save_r23_r31:
+#ifdef __EP__
+ mov ep,r1
+ addi -32,sp,sp
+ mov sp,ep
+ sst.w r29,0[ep]
+ sst.w r28,4[ep]
+ sst.w r27,8[ep]
+ sst.w r26,12[ep]
+ sst.w r25,16[ep]
+ sst.w r24,20[ep]
+ sst.w r23,24[ep]
+ sst.w r31,28[ep]
+ mov r1,ep
+#else
+ addi -32,sp,sp
+ st.w r29,0[sp]
+ st.w r28,4[sp]
+ st.w r27,8[sp]
+ st.w r26,12[sp]
+ st.w r25,16[sp]
+ st.w r24,20[sp]
+ st.w r23,24[sp]
+ st.w r31,28[sp]
+#endif
+ jmp [r10]
+ .size __save_r23_r31,.-__save_r23_r31
+
+ /* Restore saved registers, deallocate stack and return to the user. */
+ /* Called via: jr __return_r23_r31. */
+ .align 2
+ .globl __return_r23_r31
+ .type __return_r23_r31,@function
+__return_r23_r31:
+#ifdef __EP__
+ mov ep,r1
+ mov sp,ep
+ sld.w 0[ep],r29
+ sld.w 4[ep],r28
+ sld.w 8[ep],r27
+ sld.w 12[ep],r26
+ sld.w 16[ep],r25
+ sld.w 20[ep],r24
+ sld.w 24[ep],r23
+ sld.w 28[ep],r31
+ addi 32,sp,sp
+ mov r1,ep
+#else
+ ld.w 0[sp],r29
+ ld.w 4[sp],r28
+ ld.w 8[sp],r27
+ ld.w 12[sp],r26
+ ld.w 16[sp],r25
+ ld.w 20[sp],r24
+ ld.w 24[sp],r23
+ ld.w 28[sp],r31
+ addi 32,sp,sp
+#endif
+ jmp [r31]
+ .size __return_r23_r31,.-__return_r23_r31
+#endif /* L_save_23c */
+
+#ifdef L_save_24c
+ .text
+ .align 2
+ .globl __save_r24_r31
+ .type __save_r24_r31,@function
+ /* Allocate space and save registers 24 .. 29, 31 on the stack. */
+ /* Also allocate space for the argument save area. */
+ /* Called via: jalr __save_r24_r31,r10. */
+__save_r24_r31:
+#ifdef __EP__
+ mov ep,r1
+ addi -28,sp,sp
+ mov sp,ep
+ sst.w r29,0[ep]
+ sst.w r28,4[ep]
+ sst.w r27,8[ep]
+ sst.w r26,12[ep]
+ sst.w r25,16[ep]
+ sst.w r24,20[ep]
+ sst.w r31,24[ep]
+ mov r1,ep
+#else
+ addi -28,sp,sp
+ st.w r29,0[sp]
+ st.w r28,4[sp]
+ st.w r27,8[sp]
+ st.w r26,12[sp]
+ st.w r25,16[sp]
+ st.w r24,20[sp]
+ st.w r31,24[sp]
+#endif
+ jmp [r10]
+ .size __save_r24_r31,.-__save_r24_r31
+
+ /* Restore saved registers, deallocate stack and return to the user. */
+ /* Called via: jr __return_r24_r31. */
+ .align 2
+ .globl __return_r24_r31
+ .type __return_r24_r31,@function
+__return_r24_r31:
+#ifdef __EP__
+ mov ep,r1
+ mov sp,ep
+ sld.w 0[ep],r29
+ sld.w 4[ep],r28
+ sld.w 8[ep],r27
+ sld.w 12[ep],r26
+ sld.w 16[ep],r25
+ sld.w 20[ep],r24
+ sld.w 24[ep],r31
+ addi 28,sp,sp
+ mov r1,ep
+#else
+ ld.w 0[sp],r29
+ ld.w 4[sp],r28
+ ld.w 8[sp],r27
+ ld.w 12[sp],r26
+ ld.w 16[sp],r25
+ ld.w 20[sp],r24
+ ld.w 24[sp],r31
+ addi 28,sp,sp
+#endif
+ jmp [r31]
+ .size __return_r24_r31,.-__return_r24_r31
+#endif /* L_save_24c */
+
+#ifdef L_save_25c
+ .text
+ .align 2
+ .globl __save_r25_r31
+ .type __save_r25_r31,@function
+ /* Allocate space and save registers 25 .. 29, 31 on the stack. */
+ /* Also allocate space for the argument save area. */
+ /* Called via: jalr __save_r25_r31,r10. */
+__save_r25_r31:
+#ifdef __EP__
+ mov ep,r1
+ addi -24,sp,sp
+ mov sp,ep
+ sst.w r29,0[ep]
+ sst.w r28,4[ep]
+ sst.w r27,8[ep]
+ sst.w r26,12[ep]
+ sst.w r25,16[ep]
+ sst.w r31,20[ep]
+ mov r1,ep
+#else
+ addi -24,sp,sp
+ st.w r29,0[sp]
+ st.w r28,4[sp]
+ st.w r27,8[sp]
+ st.w r26,12[sp]
+ st.w r25,16[sp]
+ st.w r31,20[sp]
+#endif
+ jmp [r10]
+ .size __save_r25_r31,.-__save_r25_r31
+
+ /* Restore saved registers, deallocate stack and return to the user. */
+ /* Called via: jr __return_r25_r31. */
+ .align 2
+ .globl __return_r25_r31
+ .type __return_r25_r31,@function
+__return_r25_r31:
+#ifdef __EP__
+ mov ep,r1
+ mov sp,ep
+ sld.w 0[ep],r29
+ sld.w 4[ep],r28
+ sld.w 8[ep],r27
+ sld.w 12[ep],r26
+ sld.w 16[ep],r25
+ sld.w 20[ep],r31
+ addi 24,sp,sp
+ mov r1,ep
+#else
+ ld.w 0[sp],r29
+ ld.w 4[sp],r28
+ ld.w 8[sp],r27
+ ld.w 12[sp],r26
+ ld.w 16[sp],r25
+ ld.w 20[sp],r31
+ addi 24,sp,sp
+#endif
+ jmp [r31]
+ .size __return_r25_r31,.-__return_r25_r31
+#endif /* L_save_25c */
+
+#ifdef L_save_26c
+ .text
+ .align 2
+ .globl __save_r26_r31
+ .type __save_r26_r31,@function
+ /* Allocate space and save registers 26 .. 29, 31 on the stack. */
+ /* Also allocate space for the argument save area. */
+ /* Called via: jalr __save_r26_r31,r10. */
+__save_r26_r31:
+#ifdef __EP__
+ mov ep,r1
+ addi -20,sp,sp
+ mov sp,ep
+ sst.w r29,0[ep]
+ sst.w r28,4[ep]
+ sst.w r27,8[ep]
+ sst.w r26,12[ep]
+ sst.w r31,16[ep]
+ mov r1,ep
+#else
+ addi -20,sp,sp
+ st.w r29,0[sp]
+ st.w r28,4[sp]
+ st.w r27,8[sp]
+ st.w r26,12[sp]
+ st.w r31,16[sp]
+#endif
+ jmp [r10]
+ .size __save_r26_r31,.-__save_r26_r31
+
+ /* Restore saved registers, deallocate stack and return to the user. */
+ /* Called via: jr __return_r26_r31. */
+ .align 2
+ .globl __return_r26_r31
+ .type __return_r26_r31,@function
+__return_r26_r31:
+#ifdef __EP__
+ mov ep,r1
+ mov sp,ep
+ sld.w 0[ep],r29
+ sld.w 4[ep],r28
+ sld.w 8[ep],r27
+ sld.w 12[ep],r26
+ sld.w 16[ep],r31
+ addi 20,sp,sp
+ mov r1,ep
+#else
+ ld.w 0[sp],r29
+ ld.w 4[sp],r28
+ ld.w 8[sp],r27
+ ld.w 12[sp],r26
+ ld.w 16[sp],r31
+ addi 20,sp,sp
+#endif
+ jmp [r31]
+ .size __return_r26_r31,.-__return_r26_r31
+#endif /* L_save_26c */
+
+#ifdef L_save_27c
+ .text
+ .align 2
+ .globl __save_r27_r31
+ .type __save_r27_r31,@function
+ /* Allocate space and save registers 27 .. 29, 31 on the stack. */
+ /* Also allocate space for the argument save area. */
+ /* Called via: jalr __save_r27_r31,r10. */
+__save_r27_r31:
+#ifdef __EP__
+ mov ep,r1
+ addi -16,sp,sp
+ mov sp,ep
+ sst.w r29,0[ep]
+ sst.w r28,4[ep]
+ sst.w r27,8[ep]
+ sst.w r31,12[ep]
+ mov r1,ep
+#else
+ addi -16,sp,sp
+ st.w r29,0[sp]
+ st.w r28,4[sp]
+ st.w r27,8[sp]
+ st.w r31,12[sp]
+#endif
+ jmp [r10]
+ .size __save_r27_r31,.-__save_r27_r31
+
+ /* Restore saved registers, deallocate stack and return to the user. */
+ /* Called via: jr __return_r27_r31. */
+ .align 2
+ .globl __return_r27_r31
+ .type __return_r27_r31,@function
+__return_r27_r31:
+#ifdef __EP__
+ mov ep,r1
+ mov sp,ep
+ sld.w 0[ep],r29
+ sld.w 4[ep],r28
+ sld.w 8[ep],r27
+ sld.w 12[ep],r31
+ addi 16,sp,sp
+ mov r1,ep
+#else
+ ld.w 0[sp],r29
+ ld.w 4[sp],r28
+ ld.w 8[sp],r27
+ ld.w 12[sp],r31
+ addi 16,sp,sp
+#endif
+ jmp [r31]
+ .size __return_r27_r31,.-__return_r27_r31
+#endif /* L_save_27c */
+
+#ifdef L_save_28c
+ .text
+ .align 2
+ .globl __save_r28_r31
+ .type __save_r28_r31,@function
+ /* Allocate space and save registers 28 .. 29, 31 on the stack. */
+ /* Also allocate space for the argument save area. */
+ /* Called via: jalr __save_r28_r31,r10. */
+__save_r28_r31:
+ addi -12,sp,sp
+ st.w r29,0[sp]
+ st.w r28,4[sp]
+ st.w r31,8[sp]
+ jmp [r10]
+ .size __save_r28_r31,.-__save_r28_r31
+
+ /* Restore saved registers, deallocate stack and return to the user. */
+ /* Called via: jr __return_r28_r31. */
+ .align 2
+ .globl __return_r28_r31
+ .type __return_r28_r31,@function
+__return_r28_r31:
+ ld.w 0[sp],r29
+ ld.w 4[sp],r28
+ ld.w 8[sp],r31
+ addi 12,sp,sp
+ jmp [r31]
+ .size __return_r28_r31,.-__return_r28_r31
+#endif /* L_save_28c */
+
+#ifdef L_save_29c
+ .text
+ .align 2
+ .globl __save_r29_r31
+ .type __save_r29_r31,@function
+ /* Allocate space and save registers 29 & 31 on the stack. */
+ /* Also allocate space for the argument save area. */
+ /* Called via: jalr __save_r29_r31,r10. */
+__save_r29_r31:
+ addi -8,sp,sp
+ st.w r29,0[sp]
+ st.w r31,4[sp]
+ jmp [r10]
+ .size __save_r29_r31,.-__save_r29_r31
+
+ /* Restore saved registers, deallocate stack and return to the user. */
+ /* Called via: jr __return_r29_r31. */
+ .align 2
+ .globl __return_r29_r31
+ .type __return_r29_r31,@function
+__return_r29_r31:
+ ld.w 0[sp],r29
+ ld.w 4[sp],r31
+ addi 8,sp,sp
+ jmp [r31]
+ .size __return_r29_r31,.-__return_r29_r31
+#endif /* L_save_29c */
+
+#ifdef L_save_31c
+ .text
+ .align 2
+ .globl __save_r31
+ .type __save_r31,@function
+ /* Allocate space and save register 31 on the stack. */
+ /* Also allocate space for the argument save area. */
+ /* Called via: jalr __save_r31,r10. */
+__save_r31:
+ addi -4,sp,sp
+ st.w r31,0[sp]
+ jmp [r10]
+ .size __save_r31,.-__save_r31
+
+ /* Restore saved registers, deallocate stack and return to the user. */
+ /* Called via: jr __return_r31. */
+ .align 2
+ .globl __return_r31
+ .type __return_r31,@function
+__return_r31:
+ ld.w 0[sp],r31
+ addi 4,sp,sp
+ jmp [r31]
+ .size __return_r31,.-__return_r31
+#endif /* L_save_31c */
+
+#ifdef L_save_interrupt
+ .text
+ .align 2
+ .globl __save_interrupt
+ .type __save_interrupt,@function
+ /* Save registers r1, r4 on stack and load up with expected values. */
+ /* Note, 20 bytes of stack have already been allocated. */
+ /* Called via: jalr __save_interrupt,r10. */
+__save_interrupt:
+ /* add -20,sp ; st.w r11,16[sp] ; st.w r10,12[sp] ; */
+ st.w ep,0[sp]
+ st.w gp,4[sp]
+ st.w r1,8[sp]
+ movhi hi(__ep),r0,ep
+ movea lo(__ep),ep,ep
+ movhi hi(__gp),r0,gp
+ movea lo(__gp),gp,gp
+ jmp [r10]
+ .size __save_interrupt,.-__save_interrupt
+
+ /* Restore saved registers, deallocate stack and return from the interrupt. */
+ /* Called via: jr __return_interrupt. */
+ .align 2
+ .globl __return_interrupt
+ .type __return_interrupt,@function
+__return_interrupt:
+ ld.w 0[sp],ep
+ ld.w 4[sp],gp
+ ld.w 8[sp],r1
+ ld.w 12[sp],r10
+ ld.w 16[sp],r11
+ addi 20,sp,sp
+ reti
+ .size __return_interrupt,.-__return_interrupt
+#endif /* L_save_interrupt */
+
+#ifdef L_save_all_interrupt
+ .text
+ .align 2
+ .globl __save_all_interrupt
+ .type __save_all_interrupt,@function
+ /* Save all registers except for those saved in __save_interrupt. */
+ /* Allocate enough stack for all of the registers & 16 bytes of space. */
+ /* Called via: jalr __save_all_interrupt,r10. */
+__save_all_interrupt:
+ addi -104,sp,sp
+#ifdef __EP__
+ mov ep,r1
+ mov sp,ep
+ sst.w r31,100[ep]
+ sst.w r2,96[ep]
+ sst.w gp,92[ep]
+ sst.w r6,88[ep]
+ sst.w r7,84[ep]
+ sst.w r8,80[ep]
+ sst.w r9,76[ep]
+ sst.w r11,72[ep]
+ sst.w r12,68[ep]
+ sst.w r13,64[ep]
+ sst.w r14,60[ep]
+ sst.w r15,56[ep]
+ sst.w r16,52[ep]
+ sst.w r17,48[ep]
+ sst.w r18,44[ep]
+ sst.w r19,40[ep]
+ sst.w r20,36[ep]
+ sst.w r21,32[ep]
+ sst.w r22,28[ep]
+ sst.w r23,24[ep]
+ sst.w r24,20[ep]
+ sst.w r25,16[ep]
+ sst.w r26,12[ep]
+ sst.w r27,8[ep]
+ sst.w r28,4[ep]
+ sst.w r29,0[ep]
+ mov r1,ep
+#else
+ st.w r31,100[sp]
+ st.w r2,96[sp]
+ st.w gp,92[sp]
+ st.w r6,88[sp]
+ st.w r7,84[sp]
+ st.w r8,80[sp]
+ st.w r9,76[sp]
+ st.w r11,72[sp]
+ st.w r12,68[sp]
+ st.w r13,64[sp]
+ st.w r14,60[sp]
+ st.w r15,56[sp]
+ st.w r16,52[sp]
+ st.w r17,48[sp]
+ st.w r18,44[sp]
+ st.w r19,40[sp]
+ st.w r20,36[sp]
+ st.w r21,32[sp]
+ st.w r22,28[sp]
+ st.w r23,24[sp]
+ st.w r24,20[sp]
+ st.w r25,16[sp]
+ st.w r26,12[sp]
+ st.w r27,8[sp]
+ st.w r28,4[sp]
+ st.w r29,0[sp]
+#endif
+ jmp [r10]
+ .size __save_all_interrupt,.-__save_all_interrupt
+
+ .globl __restore_all_interrupt
+ .type __restore_all_interrupt,@function
+ /* Restore all registers saved in __save_all_interrupt and
+ deallocate the stack space. */
+ /* Called via: jalr __restore_all_interrupt,r10. */
+__restore_all_interrupt:
+#ifdef __EP__
+ mov ep,r1
+ mov sp,ep
+ sld.w 100[ep],r31
+ sld.w 96[ep],r2
+ sld.w 92[ep],gp
+ sld.w 88[ep],r6
+ sld.w 84[ep],r7
+ sld.w 80[ep],r8
+ sld.w 76[ep],r9
+ sld.w 72[ep],r11
+ sld.w 68[ep],r12
+ sld.w 64[ep],r13
+ sld.w 60[ep],r14
+ sld.w 56[ep],r15
+ sld.w 52[ep],r16
+ sld.w 48[ep],r17
+ sld.w 44[ep],r18
+ sld.w 40[ep],r19
+ sld.w 36[ep],r20
+ sld.w 32[ep],r21
+ sld.w 28[ep],r22
+ sld.w 24[ep],r23
+ sld.w 20[ep],r24
+ sld.w 16[ep],r25
+ sld.w 12[ep],r26
+ sld.w 8[ep],r27
+ sld.w 4[ep],r28
+ sld.w 0[ep],r29
+ mov r1,ep
+#else
+ ld.w 100[sp],r31
+ ld.w 96[sp],r2
+ ld.w 92[sp],gp
+ ld.w 88[sp],r6
+ ld.w 84[sp],r7
+ ld.w 80[sp],r8
+ ld.w 76[sp],r9
+ ld.w 72[sp],r11
+ ld.w 68[sp],r12
+ ld.w 64[sp],r13
+ ld.w 60[sp],r14
+ ld.w 56[sp],r15
+ ld.w 52[sp],r16
+ ld.w 48[sp],r17
+ ld.w 44[sp],r18
+ ld.w 40[sp],r19
+ ld.w 36[sp],r20
+ ld.w 32[sp],r21
+ ld.w 28[sp],r22
+ ld.w 24[sp],r23
+ ld.w 20[sp],r24
+ ld.w 16[sp],r25
+ ld.w 12[sp],r26
+ ld.w 8[sp],r27
+ ld.w 4[sp],r28
+ ld.w 0[sp],r29
+#endif
+ addi 104,sp,sp
+ jmp [r10]
+ .size __restore_all_interrupt,.-__restore_all_interrupt
+#endif /* L_save_all_interrupt */
+
+#if defined(__v850e__) || defined(__v850e1__) || defined(__v850e2__) || defined(__v850e2v3__)
+#ifdef L_callt_save_r2_r29
+ /* Put these functions into the call table area. */
+ .call_table_text
+
+ /* Allocate space and save registers 2, 20 .. 29 on the stack. */
+ /* Called via: callt ctoff(__callt_save_r2_r29). */
+ .align 2
+.L_save_r2_r29:
+ add -4, sp
+ st.w r2, 0[sp]
+ prepare {r20 - r29}, 0
+ ctret
+
+ /* Restore saved registers, deallocate stack and return to the user. */
+ /* Called via: callt ctoff(__callt_return_r2_r29). */
+ .align 2
+.L_return_r2_r29:
+ dispose 0, {r20-r29}
+ ld.w 0[sp], r2
+ add 4, sp
+ jmp [r31]
+
+ /* Place the offsets of the start of these routines into the call table. */
+ .call_table_data
+
+ .global __callt_save_r2_r29
+ .type __callt_save_r2_r29,@function
+__callt_save_r2_r29: .short ctoff(.L_save_r2_r29)
+
+ .global __callt_return_r2_r29
+ .type __callt_return_r2_r29,@function
+__callt_return_r2_r29: .short ctoff(.L_return_r2_r29)
+
+#endif /* L_callt_save_r2_r29. */
+
+#ifdef L_callt_save_r2_r31
+ /* Put these functions into the call table area. */
+ .call_table_text
+
+ /* Allocate space and save registers 2 and 20 .. 29, 31 on the stack. */
+ /* Also allocate space for the argument save area. */
+ /* Called via: callt ctoff(__callt_save_r2_r31). */
+ .align 2
+.L_save_r2_r31:
+ add -4, sp
+ st.w r2, 0[sp]
+ prepare {r20 - r29, r31}, 0
+ ctret
+
+ /* Restore saved registers, deallocate stack and return to the user. */
+ /* Called via: callt ctoff(__callt_return_r2_r31). */
+ .align 2
+.L_return_r2_r31:
+ dispose 0, {r20 - r29, r31}
+ ld.w 0[sp], r2
+ addi 4, sp, sp
+ jmp [r31]
+
+ /* Place the offsets of the start of these routines into the call table. */
+ .call_table_data
+
+ .global __callt_save_r2_r31
+ .type __callt_save_r2_r31,@function
+__callt_save_r2_r31: .short ctoff(.L_save_r2_r31)
+
+ .global __callt_return_r2_r31
+ .type __callt_return_r2_r31,@function
+__callt_return_r2_r31: .short ctoff(.L_return_r2_r31)
+
+#endif /* L_callt_save_r2_r31 */
+
+#ifdef L_callt_save_interrupt
+ /* Put these functions into the call table area. */
+ .call_table_text
+
+ /* Save registers r1, ep, gp, r10 on stack and load up with expected values. */
+ /* Called via: callt ctoff(__callt_save_interrupt). */
+ .align 2
+.L_save_interrupt:
+ /* SP has already been moved before callt ctoff(_save_interrupt). */
+ /* R1,R10,R11,ctpc,ctpsw has alread been saved bofore callt ctoff(_save_interrupt). */
+ /* addi -28, sp, sp */
+ /* st.w r1, 24[sp] */
+ /* st.w r10, 12[sp] */
+ /* st.w r11, 16[sp] */
+ /* stsr ctpc, r10 */
+ /* st.w r10, 20[sp] */
+ /* stsr ctpsw, r10 */
+ /* st.w r10, 24[sp] */
+ st.w ep, 0[sp]
+ st.w gp, 4[sp]
+ st.w r1, 8[sp]
+ mov hilo(__ep),ep
+ mov hilo(__gp),gp
+ ctret
+
+ .call_table_text
+ /* Restore saved registers, deallocate stack and return from the interrupt. */
+ /* Called via: callt ctoff(__callt_restore_interrupt). */
+ .align 2
+ .globl __return_interrupt
+ .type __return_interrupt,@function
+.L_return_interrupt:
+ ld.w 24[sp], r1
+ ldsr r1, ctpsw
+ ld.w 20[sp], r1
+ ldsr r1, ctpc
+ ld.w 16[sp], r11
+ ld.w 12[sp], r10
+ ld.w 8[sp], r1
+ ld.w 4[sp], gp
+ ld.w 0[sp], ep
+ addi 28, sp, sp
+ reti
+
+ /* Place the offsets of the start of these routines into the call table. */
+ .call_table_data
+
+ .global __callt_save_interrupt
+ .type __callt_save_interrupt,@function
+__callt_save_interrupt: .short ctoff(.L_save_interrupt)
+
+ .global __callt_return_interrupt
+ .type __callt_return_interrupt,@function
+__callt_return_interrupt: .short ctoff(.L_return_interrupt)
+
+#endif /* L_callt_save_interrupt */
+
+#ifdef L_callt_save_all_interrupt
+ /* Put these functions into the call table area. */
+ .call_table_text
+
+ /* Save all registers except for those saved in __save_interrupt. */
+ /* Allocate enough stack for all of the registers & 16 bytes of space. */
+ /* Called via: callt ctoff(__callt_save_all_interrupt). */
+ .align 2
+.L_save_all_interrupt:
+ addi -60, sp, sp
+#ifdef __EP__
+ mov ep, r1
+ mov sp, ep
+ sst.w r2, 56[ep]
+ sst.w r5, 52[ep]
+ sst.w r6, 48[ep]
+ sst.w r7, 44[ep]
+ sst.w r8, 40[ep]
+ sst.w r9, 36[ep]
+ sst.w r11, 32[ep]
+ sst.w r12, 28[ep]
+ sst.w r13, 24[ep]
+ sst.w r14, 20[ep]
+ sst.w r15, 16[ep]
+ sst.w r16, 12[ep]
+ sst.w r17, 8[ep]
+ sst.w r18, 4[ep]
+ sst.w r19, 0[ep]
+ mov r1, ep
+#else
+ st.w r2, 56[sp]
+ st.w r5, 52[sp]
+ st.w r6, 48[sp]
+ st.w r7, 44[sp]
+ st.w r8, 40[sp]
+ st.w r9, 36[sp]
+ st.w r11, 32[sp]
+ st.w r12, 28[sp]
+ st.w r13, 24[sp]
+ st.w r14, 20[sp]
+ st.w r15, 16[sp]
+ st.w r16, 12[sp]
+ st.w r17, 8[sp]
+ st.w r18, 4[sp]
+ st.w r19, 0[sp]
+#endif
+ prepare {r20 - r29, r31}, 0
+ ctret
+
+ /* Restore all registers saved in __save_all_interrupt
+ deallocate the stack space. */
+ /* Called via: callt ctoff(__callt_restore_all_interrupt). */
+ .align 2
+.L_restore_all_interrupt:
+ dispose 0, {r20 - r29, r31}
+#ifdef __EP__
+ mov ep, r1
+ mov sp, ep
+ sld.w 0 [ep], r19
+ sld.w 4 [ep], r18
+ sld.w 8 [ep], r17
+ sld.w 12[ep], r16
+ sld.w 16[ep], r15
+ sld.w 20[ep], r14
+ sld.w 24[ep], r13
+ sld.w 28[ep], r12
+ sld.w 32[ep], r11
+ sld.w 36[ep], r9
+ sld.w 40[ep], r8
+ sld.w 44[ep], r7
+ sld.w 48[ep], r6
+ sld.w 52[ep], r5
+ sld.w 56[ep], r2
+ mov r1, ep
+#else
+ ld.w 0 [sp], r19
+ ld.w 4 [sp], r18
+ ld.w 8 [sp], r17
+ ld.w 12[sp], r16
+ ld.w 16[sp], r15
+ ld.w 20[sp], r14
+ ld.w 24[sp], r13
+ ld.w 28[sp], r12
+ ld.w 32[sp], r11
+ ld.w 36[sp], r9
+ ld.w 40[sp], r8
+ ld.w 44[sp], r7
+ ld.w 48[sp], r6
+ ld.w 52[sp], r5
+ ld.w 56[sp], r2
+#endif
+ addi 60, sp, sp
+ ctret
+
+ /* Place the offsets of the start of these routines into the call table. */
+ .call_table_data
+
+ .global __callt_save_all_interrupt
+ .type __callt_save_all_interrupt,@function
+__callt_save_all_interrupt: .short ctoff(.L_save_all_interrupt)
+
+ .global __callt_restore_all_interrupt
+ .type __callt_restore_all_interrupt,@function
+__callt_restore_all_interrupt: .short ctoff(.L_restore_all_interrupt)
+
+#endif /* L_callt_save_all_interrupt */
+
+
+#define MAKE_CALLT_FUNCS( START ) \
+ .call_table_text ;\
+ .align 2 ;\
+ /* Allocate space and save registers START .. r29 on the stack. */ ;\
+ /* Called via: callt ctoff(__callt_save_START_r29). */ ;\
+.L_save_##START##_r29: ;\
+ prepare { START - r29 }, 0 ;\
+ ctret ;\
+ ;\
+ /* Restore saved registers, deallocate stack and return. */ ;\
+ /* Called via: callt ctoff(__return_START_r29). */ ;\
+ .align 2 ;\
+.L_return_##START##_r29: ;\
+ dispose 0, { START - r29 }, r31 ;\
+ ;\
+ /* Place the offsets of the start of these funcs into the call table. */;\
+ .call_table_data ;\
+ ;\
+ .global __callt_save_##START##_r29 ;\
+ .type __callt_save_##START##_r29,@function ;\
+__callt_save_##START##_r29: .short ctoff(.L_save_##START##_r29 ) ;\
+ ;\
+ .global __callt_return_##START##_r29 ;\
+ .type __callt_return_##START##_r29,@function ;\
+__callt_return_##START##_r29: .short ctoff(.L_return_##START##_r29 )
+
+
+#define MAKE_CALLT_CFUNCS( START ) \
+ .call_table_text ;\
+ .align 2 ;\
+ /* Allocate space and save registers START .. r31 on the stack. */ ;\
+ /* Called via: callt ctoff(__callt_save_START_r31c). */ ;\
+.L_save_##START##_r31c: ;\
+ prepare { START - r29, r31}, 0 ;\
+ ctret ;\
+ ;\
+ /* Restore saved registers, deallocate stack and return. */ ;\
+ /* Called via: callt ctoff(__return_START_r31c). */ ;\
+ .align 2 ;\
+.L_return_##START##_r31c: ;\
+ dispose 0, { START - r29, r31}, r31 ;\
+ ;\
+ /* Place the offsets of the start of these funcs into the call table. */;\
+ .call_table_data ;\
+ ;\
+ .global __callt_save_##START##_r31c ;\
+ .type __callt_save_##START##_r31c,@function ;\
+__callt_save_##START##_r31c: .short ctoff(.L_save_##START##_r31c ) ;\
+ ;\
+ .global __callt_return_##START##_r31c ;\
+ .type __callt_return_##START##_r31c,@function ;\
+__callt_return_##START##_r31c: .short ctoff(.L_return_##START##_r31c )
+
+
+#ifdef L_callt_save_20
+ MAKE_CALLT_FUNCS (r20)
+#endif
+#ifdef L_callt_save_21
+ MAKE_CALLT_FUNCS (r21)
+#endif
+#ifdef L_callt_save_22
+ MAKE_CALLT_FUNCS (r22)
+#endif
+#ifdef L_callt_save_23
+ MAKE_CALLT_FUNCS (r23)
+#endif
+#ifdef L_callt_save_24
+ MAKE_CALLT_FUNCS (r24)
+#endif
+#ifdef L_callt_save_25
+ MAKE_CALLT_FUNCS (r25)
+#endif
+#ifdef L_callt_save_26
+ MAKE_CALLT_FUNCS (r26)
+#endif
+#ifdef L_callt_save_27
+ MAKE_CALLT_FUNCS (r27)
+#endif
+#ifdef L_callt_save_28
+ MAKE_CALLT_FUNCS (r28)
+#endif
+#ifdef L_callt_save_29
+ MAKE_CALLT_FUNCS (r29)
+#endif
+
+#ifdef L_callt_save_20c
+ MAKE_CALLT_CFUNCS (r20)
+#endif
+#ifdef L_callt_save_21c
+ MAKE_CALLT_CFUNCS (r21)
+#endif
+#ifdef L_callt_save_22c
+ MAKE_CALLT_CFUNCS (r22)
+#endif
+#ifdef L_callt_save_23c
+ MAKE_CALLT_CFUNCS (r23)
+#endif
+#ifdef L_callt_save_24c
+ MAKE_CALLT_CFUNCS (r24)
+#endif
+#ifdef L_callt_save_25c
+ MAKE_CALLT_CFUNCS (r25)
+#endif
+#ifdef L_callt_save_26c
+ MAKE_CALLT_CFUNCS (r26)
+#endif
+#ifdef L_callt_save_27c
+ MAKE_CALLT_CFUNCS (r27)
+#endif
+#ifdef L_callt_save_28c
+ MAKE_CALLT_CFUNCS (r28)
+#endif
+#ifdef L_callt_save_29c
+ MAKE_CALLT_CFUNCS (r29)
+#endif
+
+
+#ifdef L_callt_save_31c
+ .call_table_text
+ .align 2
+ /* Allocate space and save register r31 on the stack. */
+ /* Called via: callt ctoff(__callt_save_r31c). */
+.L_callt_save_r31c:
+ prepare {r31}, 0
+ ctret
+
+ /* Restore saved registers, deallocate stack and return. */
+ /* Called via: callt ctoff(__return_r31c). */
+ .align 2
+.L_callt_return_r31c:
+ dispose 0, {r31}, r31
+
+ /* Place the offsets of the start of these funcs into the call table. */
+ .call_table_data
+
+ .global __callt_save_r31c
+ .type __callt_save_r31c,@function
+__callt_save_r31c: .short ctoff(.L_callt_save_r31c)
+
+ .global __callt_return_r31c
+ .type __callt_return_r31c,@function
+__callt_return_r31c: .short ctoff(.L_callt_return_r31c)
+#endif
+
+#endif /* __v850e__ */
+
+/* libgcc2 routines for NEC V850. */
+/* Double Integer Arithmetical Operation. */
+
+#ifdef L_negdi2
+ .text
+ .global ___negdi2
+ .type ___negdi2, @function
+___negdi2:
+ not r6, r10
+ add 1, r10
+ setf l, r6
+ not r7, r11
+ add r6, r11
+ jmp [lp]
+
+ .size ___negdi2,.-___negdi2
+#endif
+
+#ifdef L_cmpdi2
+ .text
+ .global ___cmpdi2
+ .type ___cmpdi2,@function
+___cmpdi2:
+ # Signed comparison bitween each high word.
+ cmp r9, r7
+ be .L_cmpdi_cmp_low
+ setf ge, r10
+ setf gt, r6
+ add r6, r10
+ jmp [lp]
+.L_cmpdi_cmp_low:
+ # Unsigned comparigon bitween each low word.
+ cmp r8, r6
+ setf nl, r10
+ setf h, r6
+ add r6, r10
+ jmp [lp]
+ .size ___cmpdi2, . - ___cmpdi2
+#endif
+
+#ifdef L_ucmpdi2
+ .text
+ .global ___ucmpdi2
+ .type ___ucmpdi2,@function
+___ucmpdi2:
+ cmp r9, r7 # Check if each high word are same.
+ bne .L_ucmpdi_check_psw
+ cmp r8, r6 # Compare the word.
+.L_ucmpdi_check_psw:
+ setf nl, r10 #
+ setf h, r6 #
+ add r6, r10 # Add the result of comparison NL and comparison H.
+ jmp [lp]
+ .size ___ucmpdi2, . - ___ucmpdi2
+#endif
+
+#ifdef L_muldi3
+ .text
+ .global ___muldi3
+ .type ___muldi3,@function
+___muldi3:
+#ifdef __v850__
+ jarl __save_r26_r31, r10
+ addi 16, sp, sp
+ mov r6, r28
+ shr 15, r28
+ movea lo(32767), r0, r14
+ and r14, r28
+ mov r8, r10
+ shr 15, r10
+ and r14, r10
+ mov r6, r19
+ shr 30, r19
+ mov r7, r12
+ shl 2, r12
+ or r12, r19
+ and r14, r19
+ mov r8, r13
+ shr 30, r13
+ mov r9, r12
+ shl 2, r12
+ or r12, r13
+ and r14, r13
+ mov r7, r11
+ shr 13, r11
+ and r14, r11
+ mov r9, r31
+ shr 13, r31
+ and r14, r31
+ mov r7, r29
+ shr 28, r29
+ and r14, r29
+ mov r9, r12
+ shr 28, r12
+ and r14, r12
+ and r14, r6
+ and r14, r8
+ mov r6, r14
+ mulh r8, r14
+ mov r6, r16
+ mulh r10, r16
+ mov r6, r18
+ mulh r13, r18
+ mov r6, r15
+ mulh r31, r15
+ mulh r12, r6
+ mov r28, r17
+ mulh r10, r17
+ add -16, sp
+ mov r28, r12
+ mulh r8, r12
+ add r17, r18
+ mov r28, r17
+ mulh r31, r17
+ add r12, r16
+ mov r28, r12
+ mulh r13, r12
+ add r17, r6
+ mov r19, r17
+ add r12, r15
+ mov r19, r12
+ mulh r8, r12
+ mulh r10, r17
+ add r12, r18
+ mov r19, r12
+ mulh r13, r12
+ add r17, r15
+ mov r11, r13
+ mulh r8, r13
+ add r12, r6
+ mov r11, r12
+ mulh r10, r12
+ add r13, r15
+ mulh r29, r8
+ add r12, r6
+ mov r16, r13
+ shl 15, r13
+ add r14, r13
+ mov r18, r12
+ shl 30, r12
+ mov r13, r26
+ add r12, r26
+ shr 15, r14
+ movhi hi(131071), r0, r12
+ movea lo(131071), r12, r13
+ and r13, r14
+ mov r16, r12
+ and r13, r12
+ add r12, r14
+ mov r18, r12
+ shl 15, r12
+ and r13, r12
+ add r12, r14
+ shr 17, r14
+ shr 17, r16
+ add r14, r16
+ shl 13, r15
+ shr 2, r18
+ add r18, r15
+ add r15, r16
+ mov r16, r27
+ add r8, r6
+ shl 28, r6
+ add r6, r27
+ mov r26, r10
+ mov r27, r11
+ jr __return_r26_r31
+#else /* defined(__v850e__) */
+ /* (Ahi << 32 + Alo) * (Bhi << 32 + Blo) */
+ /* r7 r6 r9 r8 */
+ mov r8, r10
+ mulu r7, r8, r0 /* Ahi * Blo */
+ mulu r6, r9, r0 /* Alo * Bhi */
+ mulu r6, r10, r11 /* Alo * Blo */
+ add r8, r11
+ add r9, r11
+ jmp [r31]
+#endif /* defined(__v850e__) */
+ .size ___muldi3, . - ___muldi3
+#endif
+
diff --git a/libgcc/config/v850/t-v850 b/libgcc/config/v850/t-v850
new file mode 100644
index 00000000000..b61703ace09
--- /dev/null
+++ b/libgcc/config/v850/t-v850
@@ -0,0 +1,60 @@
+LIB1ASMSRC = v850/lib1funcs.S
+LIB1ASMFUNCS = _mulsi3 \
+ _divsi3 \
+ _udivsi3 \
+ _modsi3 \
+ _umodsi3 \
+ _save_2 \
+ _save_20 \
+ _save_21 \
+ _save_22 \
+ _save_23 \
+ _save_24 \
+ _save_25 \
+ _save_26 \
+ _save_27 \
+ _save_28 \
+ _save_29 \
+ _save_2c \
+ _save_20c \
+ _save_21c \
+ _save_22c \
+ _save_23c \
+ _save_24c \
+ _save_25c \
+ _save_26c \
+ _save_27c \
+ _save_28c \
+ _save_29c \
+ _save_31c \
+ _save_interrupt \
+ _save_all_interrupt \
+ _callt_save_20 \
+ _callt_save_21 \
+ _callt_save_22 \
+ _callt_save_23 \
+ _callt_save_24 \
+ _callt_save_25 \
+ _callt_save_26 \
+ _callt_save_27 \
+ _callt_save_28 \
+ _callt_save_29 \
+ _callt_save_20c \
+ _callt_save_21c \
+ _callt_save_22c \
+ _callt_save_23c \
+ _callt_save_24c \
+ _callt_save_25c \
+ _callt_save_26c \
+ _callt_save_27c \
+ _callt_save_28c \
+ _callt_save_29c \
+ _callt_save_31c \
+ _callt_save_interrupt \
+ _callt_save_all_interrupt \
+ _callt_save_r2_r29 \
+ _callt_save_r2_r31 \
+ _negdi2 \
+ _cmpdi2 \
+ _ucmpdi2 \
+ _muldi3
diff --git a/libgcc/config/vax/lib1funcs.S b/libgcc/config/vax/lib1funcs.S
new file mode 100644
index 00000000000..1d57b56dad9
--- /dev/null
+++ b/libgcc/config/vax/lib1funcs.S
@@ -0,0 +1,92 @@
+/* Copyright (C) 2009 Free Software Foundation, Inc.
+ This file is part of GCC.
+ Contributed by Maciej W. Rozycki <macro@linux-mips.org>.
+
+ This file is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by the
+ Free Software Foundation; either version 3, or (at your option) any
+ later version.
+
+ This file is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#ifdef L_udivsi3
+ .text
+ .globl __udivsi3
+ .type __udivsi3, @function
+__udivsi3:
+ .word 0
+ movl 8(%ap), %r1
+ blss 0f /* Check bit #31 of divisor. */
+ movl 4(%ap), %r2
+ blss 1f /* Check bit #31 of dividend. */
+
+ /* Both zero, do a standard division. */
+
+ divl3 %r1, %r2, %r0
+ ret
+
+ /* MSB of divisor set, only 1 or 0 may result. */
+0:
+ decl %r1
+ clrl %r0
+ cmpl %r1, 4(%ap)
+ adwc $0, %r0
+ ret
+
+ /* MSB of dividend set, do an extended division. */
+1:
+ clrl %r3
+ ediv %r1, %r2, %r0, %r3
+ ret
+ .size __udivsi3, . - __udivsi3
+ .previous
+#endif
+
+#ifdef L_umodsi3
+ .text
+ .globl __umodsi3
+ .type __umodsi3, @function
+__umodsi3:
+ .word 0
+ movl 8(%ap), %r1
+ blss 0f /* Check bit #31 of divisor. */
+ movl 4(%ap), %r2
+ blss 1f /* Check bit #31 of dividend. */
+
+ /* Both zero, do a standard division. */
+
+ divl3 %r1, %r2, %r0
+ mull2 %r0, %r1
+ subl3 %r1, %r2, %r0
+ ret
+
+ /* MSB of divisor set, subtract the divisor at most once. */
+0:
+ movl 4(%ap), %r2
+ clrl %r0
+ cmpl %r2, %r1
+ sbwc $0, %r0
+ bicl2 %r0, %r1
+ subl3 %r1, %r2, %r0
+ ret
+
+ /* MSB of dividend set, do an extended division. */
+1:
+ clrl %r3
+ ediv %r1, %r2, %r3, %r0
+ ret
+ .size __umodsi3, . - __umodsi3
+ .previous
+#endif
diff --git a/libgcc/config/vax/t-linux b/libgcc/config/vax/t-linux
new file mode 100644
index 00000000000..17929c8717c
--- /dev/null
+++ b/libgcc/config/vax/t-linux
@@ -0,0 +1,2 @@
+LIB1ASMSRC = vax/lib1funcs.S
+LIB1ASMFUNCS = _udivsi3 _umodsi3
diff --git a/libgcc/config/vms/t-vms b/libgcc/config/vms/t-vms
new file mode 100644
index 00000000000..93d8255dcb0
--- /dev/null
+++ b/libgcc/config/vms/t-vms
@@ -0,0 +1,6 @@
+# Assemble startup files.
+vcrt0.o: $(srcdir)/config/vms/vms-ucrt0.c
+ $(gcc_compile) -c $<
+
+pcrt0.o: $(srcdir)/config/vms/vms-ucrt0.c
+ $(gcc_compile) -c -DCRT0_POSIX_EXIT $<
diff --git a/libgcc/config/vms/vms-ucrt0.c b/libgcc/config/vms/vms-ucrt0.c
new file mode 100644
index 00000000000..344b59520e0
--- /dev/null
+++ b/libgcc/config/vms/vms-ucrt0.c
@@ -0,0 +1,127 @@
+/* VMS crt0 returning Unix style condition codes.
+ Copyright (C) 2001, 2009, 2010 Free Software Foundation, Inc.
+ Contributed by Douglas B. Rupp (rupp@gnat.com).
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3, or (at your option)
+ any later version.
+
+ GCC is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include <stdlib.h>
+
+/* Lots of cheat to handle 32bits/64bits pointer conversions.
+ We use 'long long' for 64 bits pointers and 'int' for 32 bits pointers. */
+
+extern void decc$main (void *arg1, void *arg2, void *arg3,
+ void *image_file_desc, void *arg5, void *arg6,
+ int *, int *, int *);
+extern int main (int, char **, char **);
+extern int _malloc32 (int);
+
+#ifdef __ia64__
+#define MAIN_ASM_NAME asm ("ELF$TFRADR")
+#else
+#define MAIN_ASM_NAME
+#endif
+
+int __main (void *arg1, void *arg2, void *arg3,
+ void *image_file_desc, void *arg5, void *arg6) MAIN_ASM_NAME;
+
+/* From errnodef.h, but we need to emulate the globalval. */
+extern int C$_EXIT1;
+
+/* From stsdef.h */
+#define STS$V_MSG_NO 0x03
+#define STS$M_INHIB_MSG 0x10000000
+
+/* From ssdef.h */
+#define SS$_NORMAL 1
+
+int
+__main (void *arg1, void *arg2, void *arg3,
+ void *image_file_desc, void *arg5, void *arg6)
+{
+ int argc;
+ int argv;
+ int envp;
+ int status;
+ int i;
+ long long *long_argv;
+ long long *long_envp;
+
+ /* The argv and envp arrays are 32 bits pointers to 32 bits pointers. */
+ decc$main (arg1, arg2, arg3, image_file_desc,
+ arg5, arg6, &argc, &argv, &envp);
+
+ if (sizeof (void *) == 8)
+ {
+ /* Reallocate argv and envp with 64 bit pointers. */
+ long_argv = (long long *)
+ (long long) _malloc32 (sizeof (long long) * (argc + 1));
+
+ for (i = 0; i < argc; i++)
+ long_argv[i] = ((int *) (long long) argv)[i];
+
+ long_argv[argc] = 0;
+
+ for (i = 0; ((int *) (long long) envp)[i]; i++)
+ ;
+ long_envp = (long long *)
+ (long long) _malloc32 (sizeof (long long) * (i + 1));
+
+ for (i = 0; ((int *) (long long) envp)[i]; i++)
+ long_envp[i] = ((int *) (long long) envp)[i];
+
+ long_envp[i] = 0;
+ }
+ else
+ {
+ long_argv = (long long *) argv;
+ long_envp = (long long *) envp;
+ }
+ status = main (argc, (char **)long_argv, (char **)long_envp);
+
+#ifdef CRT0_POSIX_EXIT
+ /* Map into a range of 0 - 255. */
+ status = status & 255;
+
+ if (status > 0)
+ {
+ int save_status = status;
+
+ status = (long) &C$_EXIT1 + ((status - 1) << STS$V_MSG_NO);
+
+ /* An exit failure status requires a "severe" error. All status values
+ are defined in errno with a successful (1) severity but can be
+ changed to an error (2) severity by adding 1. In addition for
+ compatibility with UNIX exit() routines we inhibit a run-time error
+ message from being generated on exit(1). */
+
+ if (save_status == 1)
+ {
+ status++;
+ status |= STS$M_INHIB_MSG;
+ }
+ }
+ else
+ status = SS$_NORMAL;
+#endif /* CRT0_POSIX_EXIT */
+
+ return status;
+}
diff --git a/libgcc/config/vxlib-tls.c b/libgcc/config/vxlib-tls.c
new file mode 100644
index 00000000000..c4696768f0f
--- /dev/null
+++ b/libgcc/config/vxlib-tls.c
@@ -0,0 +1,362 @@
+/* Copyright (C) 2002, 2003, 2004, 2005, 2009 Free Software Foundation, Inc.
+ Contributed by Zack Weinberg <zack@codesourcery.com>
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+/* Threads compatibility routines for libgcc2 for VxWorks.
+ These are out-of-line routines called from gthr-vxworks.h.
+
+ This file provides the TLS related support routines, calling specific
+ VxWorks kernel entry points for this purpose. The base VxWorks 5.x kernels
+ don't feature these entry points, and we provide gthr_supp_vxw_5x.c as an
+ option to fill this gap. Asking users to rebuild a kernel is not to be
+ taken lightly, still, so we have isolated these routines from the rest of
+ vxlib to ensure that the kernel dependencies are only dragged when really
+ necessary. */
+
+#include "tconfig.h"
+#include "tsystem.h"
+#include "gthr.h"
+
+#if defined(__GTHREADS)
+#include <vxWorks.h>
+#ifndef __RTP__
+#include <vxLib.h>
+#endif
+#include <taskLib.h>
+#ifndef __RTP__
+#include <taskHookLib.h>
+#else
+# include <errno.h>
+#endif
+
+/* Thread-local storage.
+
+ We reserve a field in the TCB to point to a dynamically allocated
+ array which is used to store TLS values. A TLS key is simply an
+ offset in this array. The exact location of the TCB field is not
+ known to this code nor to vxlib.c -- all access to it indirects
+ through the routines __gthread_get_tls_data and
+ __gthread_set_tls_data, which are provided by the VxWorks kernel.
+
+ There is also a global array which records which keys are valid and
+ which have destructors.
+
+ A task delete hook is installed to execute key destructors. The
+ routines __gthread_enter_tls_dtor_context and
+ __gthread_leave_tls_dtor_context, which are also provided by the
+ kernel, ensure that it is safe to call free() on memory allocated
+ by the task being deleted. (This is a no-op on VxWorks 5, but
+ a major undertaking on AE.)
+
+ The task delete hook is only installed when at least one thread
+ has TLS data. This is a necessary precaution, to allow this module
+ to be unloaded - a module with a hook can not be removed.
+
+ Since this interface is used to allocate only a small number of
+ keys, the table size is small and static, which simplifies the
+ code quite a bit. Revisit this if and when it becomes necessary. */
+
+#define MAX_KEYS 4
+
+/* This is the structure pointed to by the pointer returned
+ by __gthread_get_tls_data. */
+struct tls_data
+{
+ int *owner;
+ void *values[MAX_KEYS];
+ unsigned int generation[MAX_KEYS];
+};
+
+/* To make sure we only delete TLS data associated with this object,
+ include a pointer to a local variable in the TLS data object. */
+static int self_owner;
+
+/* Flag to check whether the delete hook is installed. Once installed
+ it is only removed when unloading this module. */
+static volatile int delete_hook_installed;
+
+/* kernel provided routines */
+extern void *__gthread_get_tls_data (void);
+extern void __gthread_set_tls_data (void *data);
+
+extern void __gthread_enter_tls_dtor_context (void);
+extern void __gthread_leave_tls_dtor_context (void);
+
+
+/* This is a global structure which records all of the active keys.
+
+ A key is potentially valid (i.e. has been handed out by
+ __gthread_key_create) iff its generation count in this structure is
+ even. In that case, the matching entry in the dtors array is a
+ routine to be called when a thread terminates with a valid,
+ non-NULL specific value for that key.
+
+ A key is actually valid in a thread T iff the generation count
+ stored in this structure is equal to the generation count stored in
+ T's specific-value structure. */
+
+typedef void (*tls_dtor) (void *);
+
+struct tls_keys
+{
+ tls_dtor dtor[MAX_KEYS];
+ unsigned int generation[MAX_KEYS];
+};
+
+#define KEY_VALID_P(key) !(tls_keys.generation[key] & 1)
+
+/* Note: if MAX_KEYS is increased, this initializer must be updated
+ to match. All the generation counts begin at 1, which means no
+ key is valid. */
+static struct tls_keys tls_keys =
+{
+ { 0, 0, 0, 0 },
+ { 1, 1, 1, 1 }
+};
+
+/* This lock protects the tls_keys structure. */
+static __gthread_mutex_t tls_lock;
+
+static __gthread_once_t tls_init_guard = __GTHREAD_ONCE_INIT;
+
+/* Internal routines. */
+
+/* The task TCB has just been deleted. Call the destructor
+ function for each TLS key that has both a destructor and
+ a non-NULL specific value in this thread.
+
+ This routine does not need to take tls_lock; the generation
+ count protects us from calling a stale destructor. It does
+ need to read tls_keys.dtor[key] atomically. */
+
+static void
+tls_delete_hook (void *tcb ATTRIBUTE_UNUSED)
+{
+ struct tls_data *data;
+ __gthread_key_t key;
+
+#ifdef __RTP__
+ data = __gthread_get_tls_data ();
+#else
+ /* In kernel mode, we can be called in the context of the thread
+ doing the killing, so must use the TCB to determine the data of
+ the thread being killed. */
+ data = __gthread_get_tsd_data (tcb);
+#endif
+
+ if (data && data->owner == &self_owner)
+ {
+#ifdef __RTP__
+ __gthread_enter_tls_dtor_context ();
+#else
+ __gthread_enter_tsd_dtor_context (tcb);
+#endif
+ for (key = 0; key < MAX_KEYS; key++)
+ {
+ if (data->generation[key] == tls_keys.generation[key])
+ {
+ tls_dtor dtor = tls_keys.dtor[key];
+
+ if (dtor)
+ dtor (data->values[key]);
+ }
+ }
+ free (data);
+#ifdef __RTP__
+ __gthread_leave_tls_dtor_context ();
+#else
+ __gthread_leave_tsd_dtor_context ();
+#endif
+
+#ifdef __RTP__
+ __gthread_set_tls_data (0);
+#else
+ __gthread_set_tsd_data (tcb, 0);
+#endif
+ }
+}
+
+/* Initialize global data used by the TLS system. */
+static void
+tls_init (void)
+{
+ __GTHREAD_MUTEX_INIT_FUNCTION (&tls_lock);
+}
+
+static void tls_destructor (void) __attribute__ ((destructor));
+static void
+tls_destructor (void)
+{
+#ifdef __RTP__
+ /* All threads but this one should have exited by now. */
+ tls_delete_hook (NULL);
+#endif
+ /* Unregister the hook. */
+ if (delete_hook_installed)
+ taskDeleteHookDelete ((FUNCPTR)tls_delete_hook);
+
+ if (tls_init_guard.done && __gthread_mutex_lock (&tls_lock) != ERROR)
+ semDelete (tls_lock);
+}
+
+/* External interface */
+
+/* Store in KEYP a value which can be passed to __gthread_setspecific/
+ __gthread_getspecific to store and retrieve a value which is
+ specific to each calling thread. If DTOR is not NULL, it will be
+ called when a thread terminates with a non-NULL specific value for
+ this key, with the value as its sole argument. */
+
+int
+__gthread_key_create (__gthread_key_t *keyp, tls_dtor dtor)
+{
+ __gthread_key_t key;
+
+ __gthread_once (&tls_init_guard, tls_init);
+
+ if (__gthread_mutex_lock (&tls_lock) == ERROR)
+ return errno;
+
+ for (key = 0; key < MAX_KEYS; key++)
+ if (!KEY_VALID_P (key))
+ goto found_slot;
+
+ /* no room */
+ __gthread_mutex_unlock (&tls_lock);
+ return EAGAIN;
+
+ found_slot:
+ tls_keys.generation[key]++; /* making it even */
+ tls_keys.dtor[key] = dtor;
+ *keyp = key;
+ __gthread_mutex_unlock (&tls_lock);
+ return 0;
+}
+
+/* Invalidate KEY; it can no longer be used as an argument to
+ setspecific/getspecific. Note that this does NOT call destructor
+ functions for any live values for this key. */
+int
+__gthread_key_delete (__gthread_key_t key)
+{
+ if (key >= MAX_KEYS)
+ return EINVAL;
+
+ __gthread_once (&tls_init_guard, tls_init);
+
+ if (__gthread_mutex_lock (&tls_lock) == ERROR)
+ return errno;
+
+ if (!KEY_VALID_P (key))
+ {
+ __gthread_mutex_unlock (&tls_lock);
+ return EINVAL;
+ }
+
+ tls_keys.generation[key]++; /* making it odd */
+ tls_keys.dtor[key] = 0;
+
+ __gthread_mutex_unlock (&tls_lock);
+ return 0;
+}
+
+/* Retrieve the thread-specific value for KEY. If it has never been
+ set in this thread, or KEY is invalid, returns NULL.
+
+ It does not matter if this function races with key_create or
+ key_delete; the worst that can happen is you get a value other than
+ the one that a serialized implementation would have provided. */
+
+void *
+__gthread_getspecific (__gthread_key_t key)
+{
+ struct tls_data *data;
+
+ if (key >= MAX_KEYS)
+ return 0;
+
+ data = __gthread_get_tls_data ();
+
+ if (!data)
+ return 0;
+
+ if (data->generation[key] != tls_keys.generation[key])
+ return 0;
+
+ return data->values[key];
+}
+
+/* Set the thread-specific value for KEY. If KEY is invalid, or
+ memory allocation fails, returns -1, otherwise 0.
+
+ The generation count protects this function against races with
+ key_create/key_delete; the worst thing that can happen is that a
+ value is successfully stored into a dead generation (and then
+ immediately becomes invalid). However, we do have to make sure
+ to read tls_keys.generation[key] atomically. */
+
+int
+__gthread_setspecific (__gthread_key_t key, void *value)
+{
+ struct tls_data *data;
+ unsigned int generation;
+
+ if (key >= MAX_KEYS)
+ return EINVAL;
+
+ data = __gthread_get_tls_data ();
+ if (!data)
+ {
+ if (!delete_hook_installed)
+ {
+ /* Install the delete hook. */
+ if (__gthread_mutex_lock (&tls_lock) == ERROR)
+ return ENOMEM;
+ if (!delete_hook_installed)
+ {
+ taskDeleteHookAdd ((FUNCPTR)tls_delete_hook);
+ delete_hook_installed = 1;
+ }
+ __gthread_mutex_unlock (&tls_lock);
+ }
+
+ data = malloc (sizeof (struct tls_data));
+ if (!data)
+ return ENOMEM;
+
+ memset (data, 0, sizeof (struct tls_data));
+ data->owner = &self_owner;
+ __gthread_set_tls_data (data);
+ }
+
+ generation = tls_keys.generation[key];
+
+ if (generation & 1)
+ return EINVAL;
+
+ data->generation[key] = generation;
+ data->values[key] = value;
+
+ return 0;
+}
+#endif /* __GTHREADS */
diff --git a/libgcc/config/vxlib.c b/libgcc/config/vxlib.c
new file mode 100644
index 00000000000..0ff996cfced
--- /dev/null
+++ b/libgcc/config/vxlib.c
@@ -0,0 +1,95 @@
+/* Copyright (C) 2002, 2003, 2004, 2005, 2009 Free Software Foundation, Inc.
+ Contributed by Zack Weinberg <zack@codesourcery.com>
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+/* Threads compatibility routines for libgcc2 for VxWorks.
+ These are out-of-line routines called from gthr-vxworks.h. */
+
+#include "tconfig.h"
+#include "tsystem.h"
+#include "gthr.h"
+
+#if defined(__GTHREADS)
+#include <vxWorks.h>
+#ifndef __RTP__
+#include <vxLib.h>
+#endif
+#include <taskLib.h>
+#ifndef __RTP__
+#include <taskHookLib.h>
+#else
+# include <errno.h>
+#endif
+
+/* Init-once operation.
+
+ This would be a clone of the implementation from gthr-solaris.h,
+ except that we have a bootstrap problem - the whole point of this
+ exercise is to prevent double initialization, but if two threads
+ are racing with each other, once->mutex is liable to be initialized
+ by both. Then each thread will lock its own mutex, and proceed to
+ call the initialization routine.
+
+ So instead we use a bare atomic primitive (vxTas()) to handle
+ mutual exclusion. Threads losing the race then busy-wait, calling
+ taskDelay() to yield the processor, until the initialization is
+ completed. Inefficient, but reliable. */
+
+int
+__gthread_once (__gthread_once_t *guard, void (*func)(void))
+{
+ if (guard->done)
+ return 0;
+
+#ifdef __RTP__
+ __gthread_lock_library ();
+#else
+ while (!vxTas ((void *)&guard->busy))
+ {
+#ifdef __PPC__
+ /* This can happen on powerpc, which is using all 32 bits
+ of the gthread_once_t structure. */
+ if (guard->done)
+ return;
+#endif
+ taskDelay (1);
+ }
+#endif
+
+ /* Only one thread at a time gets here. Check ->done again, then
+ go ahead and call func() if no one has done it yet. */
+ if (!guard->done)
+ {
+ func ();
+ guard->done = 1;
+ }
+
+#ifdef __RTP__
+ __gthread_unlock_library ();
+#else
+ guard->busy = 0;
+#endif
+ return 0;
+}
+
+#endif /* __GTHREADS */
diff --git a/libgcc/config/xtensa/crti.S b/libgcc/config/xtensa/crti.S
new file mode 100644
index 00000000000..cbe91b0e748
--- /dev/null
+++ b/libgcc/config/xtensa/crti.S
@@ -0,0 +1,51 @@
+# Start .init and .fini sections.
+# Copyright (C) 2003, 2009 Free Software Foundation, Inc.
+#
+# This file is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+# for more details.
+#
+# Under Section 7 of GPL version 3, you are granted additional
+# permissions described in the GCC Runtime Library Exception, version
+# 3.1, as published by the Free Software Foundation.
+#
+# You should have received a copy of the GNU General Public License and
+# a copy of the GCC Runtime Library Exception along with this program;
+# see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+# <http://www.gnu.org/licenses/>.
+
+# This file just makes a stack frame for the contents of the .fini and
+# .init sections. Users may put any desired instructions in those
+# sections.
+
+#include "xtensa-config.h"
+
+ .section .init
+ .globl _init
+ .type _init,@function
+ .align 4
+_init:
+#if XCHAL_HAVE_WINDOWED && !__XTENSA_CALL0_ABI__
+ entry sp, 64
+#else
+ addi sp, sp, -32
+ s32i a0, sp, 0
+#endif
+
+ .section .fini
+ .globl _fini
+ .type _fini,@function
+ .align 4
+_fini:
+#if XCHAL_HAVE_WINDOWED && !__XTENSA_CALL0_ABI__
+ entry sp, 64
+#else
+ addi sp, sp, -32
+ s32i a0, sp, 0
+#endif
diff --git a/libgcc/config/xtensa/crtn.S b/libgcc/config/xtensa/crtn.S
new file mode 100644
index 00000000000..413cfa0ac10
--- /dev/null
+++ b/libgcc/config/xtensa/crtn.S
@@ -0,0 +1,46 @@
+# End of .init and .fini sections.
+# Copyright (C) 2003, 2009 Free Software Foundation, Inc.
+#
+# This file is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+# for more details.
+#
+# Under Section 7 of GPL version 3, you are granted additional
+# permissions described in the GCC Runtime Library Exception, version
+# 3.1, as published by the Free Software Foundation.
+#
+# You should have received a copy of the GNU General Public License and
+# a copy of the GCC Runtime Library Exception along with this program;
+# see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+# <http://www.gnu.org/licenses/>.
+
+
+# This file just makes sure that the .fini and .init sections do in
+# fact return. Users may put any desired instructions in those sections.
+# This file is the last thing linked into any executable.
+
+#include "xtensa-config.h"
+
+ .section .init
+#if XCHAL_HAVE_WINDOWED && !__XTENSA_CALL0_ABI__
+ retw
+#else
+ l32i a0, sp, 0
+ addi sp, sp, 32
+ ret
+#endif
+
+ .section .fini
+#if XCHAL_HAVE_WINDOWED && !__XTENSA_CALL0_ABI__
+ retw
+#else
+ l32i a0, sp, 0
+ addi sp, sp, 32
+ ret
+#endif
diff --git a/libgcc/config/xtensa/ieee754-df.S b/libgcc/config/xtensa/ieee754-df.S
new file mode 100644
index 00000000000..9b46889bdc2
--- /dev/null
+++ b/libgcc/config/xtensa/ieee754-df.S
@@ -0,0 +1,2388 @@
+/* IEEE-754 double-precision functions for Xtensa
+ Copyright (C) 2006, 2007, 2009 Free Software Foundation, Inc.
+ Contributed by Bob Wilson (bwilson@tensilica.com) at Tensilica.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3, or (at your option)
+ any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#ifdef __XTENSA_EB__
+#define xh a2
+#define xl a3
+#define yh a4
+#define yl a5
+#else
+#define xh a3
+#define xl a2
+#define yh a5
+#define yl a4
+#endif
+
+/* Warning! The branch displacements for some Xtensa branch instructions
+ are quite small, and this code has been carefully laid out to keep
+ branch targets in range. If you change anything, be sure to check that
+ the assembler is not relaxing anything to branch over a jump. */
+
+#ifdef L_negdf2
+
+ .align 4
+ .global __negdf2
+ .type __negdf2, @function
+__negdf2:
+ leaf_entry sp, 16
+ movi a4, 0x80000000
+ xor xh, xh, a4
+ leaf_return
+
+#endif /* L_negdf2 */
+
+#ifdef L_addsubdf3
+
+ /* Addition */
+__adddf3_aux:
+
+ /* Handle NaNs and Infinities. (This code is placed before the
+ start of the function just to keep it in range of the limited
+ branch displacements.) */
+
+.Ladd_xnan_or_inf:
+ /* If y is neither Infinity nor NaN, return x. */
+ bnall yh, a6, 1f
+ /* If x is a NaN, return it. Otherwise, return y. */
+ slli a7, xh, 12
+ or a7, a7, xl
+ beqz a7, .Ladd_ynan_or_inf
+1: leaf_return
+
+.Ladd_ynan_or_inf:
+ /* Return y. */
+ mov xh, yh
+ mov xl, yl
+ leaf_return
+
+.Ladd_opposite_signs:
+ /* Operand signs differ. Do a subtraction. */
+ slli a7, a6, 11
+ xor yh, yh, a7
+ j .Lsub_same_sign
+
+ .align 4
+ .global __adddf3
+ .type __adddf3, @function
+__adddf3:
+ leaf_entry sp, 16
+ movi a6, 0x7ff00000
+
+ /* Check if the two operands have the same sign. */
+ xor a7, xh, yh
+ bltz a7, .Ladd_opposite_signs
+
+.Ladd_same_sign:
+ /* Check if either exponent == 0x7ff (i.e., NaN or Infinity). */
+ ball xh, a6, .Ladd_xnan_or_inf
+ ball yh, a6, .Ladd_ynan_or_inf
+
+ /* Compare the exponents. The smaller operand will be shifted
+ right by the exponent difference and added to the larger
+ one. */
+ extui a7, xh, 20, 12
+ extui a8, yh, 20, 12
+ bltu a7, a8, .Ladd_shiftx
+
+.Ladd_shifty:
+ /* Check if the smaller (or equal) exponent is zero. */
+ bnone yh, a6, .Ladd_yexpzero
+
+ /* Replace yh sign/exponent with 0x001. */
+ or yh, yh, a6
+ slli yh, yh, 11
+ srli yh, yh, 11
+
+.Ladd_yexpdiff:
+ /* Compute the exponent difference. Optimize for difference < 32. */
+ sub a10, a7, a8
+ bgeui a10, 32, .Ladd_bigshifty
+
+ /* Shift yh/yl right by the exponent difference. Any bits that are
+ shifted out of yl are saved in a9 for rounding the result. */
+ ssr a10
+ movi a9, 0
+ src a9, yl, a9
+ src yl, yh, yl
+ srl yh, yh
+
+.Ladd_addy:
+ /* Do the 64-bit addition. */
+ add xl, xl, yl
+ add xh, xh, yh
+ bgeu xl, yl, 1f
+ addi xh, xh, 1
+1:
+ /* Check if the add overflowed into the exponent. */
+ extui a10, xh, 20, 12
+ beq a10, a7, .Ladd_round
+ mov a8, a7
+ j .Ladd_carry
+
+.Ladd_yexpzero:
+ /* y is a subnormal value. Replace its sign/exponent with zero,
+ i.e., no implicit "1.0", and increment the apparent exponent
+ because subnormals behave as if they had the minimum (nonzero)
+ exponent. Test for the case when both exponents are zero. */
+ slli yh, yh, 12
+ srli yh, yh, 12
+ bnone xh, a6, .Ladd_bothexpzero
+ addi a8, a8, 1
+ j .Ladd_yexpdiff
+
+.Ladd_bothexpzero:
+ /* Both exponents are zero. Handle this as a special case. There
+ is no need to shift or round, and the normal code for handling
+ a carry into the exponent field will not work because it
+ assumes there is an implicit "1.0" that needs to be added. */
+ add xl, xl, yl
+ add xh, xh, yh
+ bgeu xl, yl, 1f
+ addi xh, xh, 1
+1: leaf_return
+
+.Ladd_bigshifty:
+ /* Exponent difference > 64 -- just return the bigger value. */
+ bgeui a10, 64, 1b
+
+ /* Shift yh/yl right by the exponent difference. Any bits that are
+ shifted out are saved in a9 for rounding the result. */
+ ssr a10
+ sll a11, yl /* lost bits shifted out of yl */
+ src a9, yh, yl
+ srl yl, yh
+ movi yh, 0
+ beqz a11, .Ladd_addy
+ or a9, a9, a10 /* any positive, nonzero value will work */
+ j .Ladd_addy
+
+.Ladd_xexpzero:
+ /* Same as "yexpzero" except skip handling the case when both
+ exponents are zero. */
+ slli xh, xh, 12
+ srli xh, xh, 12
+ addi a7, a7, 1
+ j .Ladd_xexpdiff
+
+.Ladd_shiftx:
+ /* Same thing as the "shifty" code, but with x and y swapped. Also,
+ because the exponent difference is always nonzero in this version,
+ the shift sequence can use SLL and skip loading a constant zero. */
+ bnone xh, a6, .Ladd_xexpzero
+
+ or xh, xh, a6
+ slli xh, xh, 11
+ srli xh, xh, 11
+
+.Ladd_xexpdiff:
+ sub a10, a8, a7
+ bgeui a10, 32, .Ladd_bigshiftx
+
+ ssr a10
+ sll a9, xl
+ src xl, xh, xl
+ srl xh, xh
+
+.Ladd_addx:
+ add xl, xl, yl
+ add xh, xh, yh
+ bgeu xl, yl, 1f
+ addi xh, xh, 1
+1:
+ /* Check if the add overflowed into the exponent. */
+ extui a10, xh, 20, 12
+ bne a10, a8, .Ladd_carry
+
+.Ladd_round:
+ /* Round up if the leftover fraction is >= 1/2. */
+ bgez a9, 1f
+ addi xl, xl, 1
+ beqz xl, .Ladd_roundcarry
+
+ /* Check if the leftover fraction is exactly 1/2. */
+ slli a9, a9, 1
+ beqz a9, .Ladd_exactlyhalf
+1: leaf_return
+
+.Ladd_bigshiftx:
+ /* Mostly the same thing as "bigshifty".... */
+ bgeui a10, 64, .Ladd_returny
+
+ ssr a10
+ sll a11, xl
+ src a9, xh, xl
+ srl xl, xh
+ movi xh, 0
+ beqz a11, .Ladd_addx
+ or a9, a9, a10
+ j .Ladd_addx
+
+.Ladd_returny:
+ mov xh, yh
+ mov xl, yl
+ leaf_return
+
+.Ladd_carry:
+ /* The addition has overflowed into the exponent field, so the
+ value needs to be renormalized. The mantissa of the result
+ can be recovered by subtracting the original exponent and
+ adding 0x100000 (which is the explicit "1.0" for the
+ mantissa of the non-shifted operand -- the "1.0" for the
+ shifted operand was already added). The mantissa can then
+ be shifted right by one bit. The explicit "1.0" of the
+ shifted mantissa then needs to be replaced by the exponent,
+ incremented by one to account for the normalizing shift.
+ It is faster to combine these operations: do the shift first
+ and combine the additions and subtractions. If x is the
+ original exponent, the result is:
+ shifted mantissa - (x << 19) + (1 << 19) + (x << 20)
+ or:
+ shifted mantissa + ((x + 1) << 19)
+ Note that the exponent is incremented here by leaving the
+ explicit "1.0" of the mantissa in the exponent field. */
+
+ /* Shift xh/xl right by one bit. Save the lsb of xl. */
+ mov a10, xl
+ ssai 1
+ src xl, xh, xl
+ srl xh, xh
+
+ /* See explanation above. The original exponent is in a8. */
+ addi a8, a8, 1
+ slli a8, a8, 19
+ add xh, xh, a8
+
+ /* Return an Infinity if the exponent overflowed. */
+ ball xh, a6, .Ladd_infinity
+
+ /* Same thing as the "round" code except the msb of the leftover
+ fraction is bit 0 of a10, with the rest of the fraction in a9. */
+ bbci.l a10, 0, 1f
+ addi xl, xl, 1
+ beqz xl, .Ladd_roundcarry
+ beqz a9, .Ladd_exactlyhalf
+1: leaf_return
+
+.Ladd_infinity:
+ /* Clear the mantissa. */
+ movi xl, 0
+ srli xh, xh, 20
+ slli xh, xh, 20
+
+ /* The sign bit may have been lost in a carry-out. Put it back. */
+ slli a8, a8, 1
+ or xh, xh, a8
+ leaf_return
+
+.Ladd_exactlyhalf:
+ /* Round down to the nearest even value. */
+ srli xl, xl, 1
+ slli xl, xl, 1
+ leaf_return
+
+.Ladd_roundcarry:
+ /* xl is always zero when the rounding increment overflows, so
+ there's no need to round it to an even value. */
+ addi xh, xh, 1
+ /* Overflow to the exponent is OK. */
+ leaf_return
+
+
+ /* Subtraction */
+__subdf3_aux:
+
+ /* Handle NaNs and Infinities. (This code is placed before the
+ start of the function just to keep it in range of the limited
+ branch displacements.) */
+
+.Lsub_xnan_or_inf:
+ /* If y is neither Infinity nor NaN, return x. */
+ bnall yh, a6, 1f
+ /* Both x and y are either NaN or Inf, so the result is NaN. */
+ movi a4, 0x80000 /* make it a quiet NaN */
+ or xh, xh, a4
+1: leaf_return
+
+.Lsub_ynan_or_inf:
+ /* Negate y and return it. */
+ slli a7, a6, 11
+ xor xh, yh, a7
+ mov xl, yl
+ leaf_return
+
+.Lsub_opposite_signs:
+ /* Operand signs differ. Do an addition. */
+ slli a7, a6, 11
+ xor yh, yh, a7
+ j .Ladd_same_sign
+
+ .align 4
+ .global __subdf3
+ .type __subdf3, @function
+__subdf3:
+ leaf_entry sp, 16
+ movi a6, 0x7ff00000
+
+ /* Check if the two operands have the same sign. */
+ xor a7, xh, yh
+ bltz a7, .Lsub_opposite_signs
+
+.Lsub_same_sign:
+ /* Check if either exponent == 0x7ff (i.e., NaN or Infinity). */
+ ball xh, a6, .Lsub_xnan_or_inf
+ ball yh, a6, .Lsub_ynan_or_inf
+
+ /* Compare the operands. In contrast to addition, the entire
+ value matters here. */
+ extui a7, xh, 20, 11
+ extui a8, yh, 20, 11
+ bltu xh, yh, .Lsub_xsmaller
+ beq xh, yh, .Lsub_compare_low
+
+.Lsub_ysmaller:
+ /* Check if the smaller (or equal) exponent is zero. */
+ bnone yh, a6, .Lsub_yexpzero
+
+ /* Replace yh sign/exponent with 0x001. */
+ or yh, yh, a6
+ slli yh, yh, 11
+ srli yh, yh, 11
+
+.Lsub_yexpdiff:
+ /* Compute the exponent difference. Optimize for difference < 32. */
+ sub a10, a7, a8
+ bgeui a10, 32, .Lsub_bigshifty
+
+ /* Shift yh/yl right by the exponent difference. Any bits that are
+ shifted out of yl are saved in a9 for rounding the result. */
+ ssr a10
+ movi a9, 0
+ src a9, yl, a9
+ src yl, yh, yl
+ srl yh, yh
+
+.Lsub_suby:
+ /* Do the 64-bit subtraction. */
+ sub xh, xh, yh
+ bgeu xl, yl, 1f
+ addi xh, xh, -1
+1: sub xl, xl, yl
+
+ /* Subtract the leftover bits in a9 from zero and propagate any
+ borrow from xh/xl. */
+ neg a9, a9
+ beqz a9, 1f
+ addi a5, xh, -1
+ moveqz xh, a5, xl
+ addi xl, xl, -1
+1:
+ /* Check if the subtract underflowed into the exponent. */
+ extui a10, xh, 20, 11
+ beq a10, a7, .Lsub_round
+ j .Lsub_borrow
+
+.Lsub_compare_low:
+ /* The high words are equal. Compare the low words. */
+ bltu xl, yl, .Lsub_xsmaller
+ bltu yl, xl, .Lsub_ysmaller
+ /* The operands are equal. Return 0.0. */
+ movi xh, 0
+ movi xl, 0
+1: leaf_return
+
+.Lsub_yexpzero:
+ /* y is a subnormal value. Replace its sign/exponent with zero,
+ i.e., no implicit "1.0". Unless x is also a subnormal, increment
+ y's apparent exponent because subnormals behave as if they had
+ the minimum (nonzero) exponent. */
+ slli yh, yh, 12
+ srli yh, yh, 12
+ bnone xh, a6, .Lsub_yexpdiff
+ addi a8, a8, 1
+ j .Lsub_yexpdiff
+
+.Lsub_bigshifty:
+ /* Exponent difference > 64 -- just return the bigger value. */
+ bgeui a10, 64, 1b
+
+ /* Shift yh/yl right by the exponent difference. Any bits that are
+ shifted out are saved in a9 for rounding the result. */
+ ssr a10
+ sll a11, yl /* lost bits shifted out of yl */
+ src a9, yh, yl
+ srl yl, yh
+ movi yh, 0
+ beqz a11, .Lsub_suby
+ or a9, a9, a10 /* any positive, nonzero value will work */
+ j .Lsub_suby
+
+.Lsub_xsmaller:
+ /* Same thing as the "ysmaller" code, but with x and y swapped and
+ with y negated. */
+ bnone xh, a6, .Lsub_xexpzero
+
+ or xh, xh, a6
+ slli xh, xh, 11
+ srli xh, xh, 11
+
+.Lsub_xexpdiff:
+ sub a10, a8, a7
+ bgeui a10, 32, .Lsub_bigshiftx
+
+ ssr a10
+ movi a9, 0
+ src a9, xl, a9
+ src xl, xh, xl
+ srl xh, xh
+
+ /* Negate y. */
+ slli a11, a6, 11
+ xor yh, yh, a11
+
+.Lsub_subx:
+ sub xl, yl, xl
+ sub xh, yh, xh
+ bgeu yl, xl, 1f
+ addi xh, xh, -1
+1:
+ /* Subtract the leftover bits in a9 from zero and propagate any
+ borrow from xh/xl. */
+ neg a9, a9
+ beqz a9, 1f
+ addi a5, xh, -1
+ moveqz xh, a5, xl
+ addi xl, xl, -1
+1:
+ /* Check if the subtract underflowed into the exponent. */
+ extui a10, xh, 20, 11
+ bne a10, a8, .Lsub_borrow
+
+.Lsub_round:
+ /* Round up if the leftover fraction is >= 1/2. */
+ bgez a9, 1f
+ addi xl, xl, 1
+ beqz xl, .Lsub_roundcarry
+
+ /* Check if the leftover fraction is exactly 1/2. */
+ slli a9, a9, 1
+ beqz a9, .Lsub_exactlyhalf
+1: leaf_return
+
+.Lsub_xexpzero:
+ /* Same as "yexpzero". */
+ slli xh, xh, 12
+ srli xh, xh, 12
+ bnone yh, a6, .Lsub_xexpdiff
+ addi a7, a7, 1
+ j .Lsub_xexpdiff
+
+.Lsub_bigshiftx:
+ /* Mostly the same thing as "bigshifty", but with the sign bit of the
+ shifted value set so that the subsequent subtraction flips the
+ sign of y. */
+ bgeui a10, 64, .Lsub_returny
+
+ ssr a10
+ sll a11, xl
+ src a9, xh, xl
+ srl xl, xh
+ slli xh, a6, 11 /* set sign bit of xh */
+ beqz a11, .Lsub_subx
+ or a9, a9, a10
+ j .Lsub_subx
+
+.Lsub_returny:
+ /* Negate and return y. */
+ slli a7, a6, 11
+ xor xh, yh, a7
+ mov xl, yl
+ leaf_return
+
+.Lsub_borrow:
+ /* The subtraction has underflowed into the exponent field, so the
+ value needs to be renormalized. Shift the mantissa left as
+ needed to remove any leading zeros and adjust the exponent
+ accordingly. If the exponent is not large enough to remove
+ all the leading zeros, the result will be a subnormal value. */
+
+ slli a8, xh, 12
+ beqz a8, .Lsub_xhzero
+ do_nsau a6, a8, a7, a11
+ srli a8, a8, 12
+ bge a6, a10, .Lsub_subnormal
+ addi a6, a6, 1
+
+.Lsub_shift_lt32:
+ /* Shift the mantissa (a8/xl/a9) left by a6. */
+ ssl a6
+ src a8, a8, xl
+ src xl, xl, a9
+ sll a9, a9
+
+ /* Combine the shifted mantissa with the sign and exponent,
+ decrementing the exponent by a6. (The exponent has already
+ been decremented by one due to the borrow from the subtraction,
+ but adding the mantissa will increment the exponent by one.) */
+ srli xh, xh, 20
+ sub xh, xh, a6
+ slli xh, xh, 20
+ add xh, xh, a8
+ j .Lsub_round
+
+.Lsub_exactlyhalf:
+ /* Round down to the nearest even value. */
+ srli xl, xl, 1
+ slli xl, xl, 1
+ leaf_return
+
+.Lsub_roundcarry:
+ /* xl is always zero when the rounding increment overflows, so
+ there's no need to round it to an even value. */
+ addi xh, xh, 1
+ /* Overflow to the exponent is OK. */
+ leaf_return
+
+.Lsub_xhzero:
+ /* When normalizing the result, all the mantissa bits in the high
+ word are zero. Shift by "20 + (leading zero count of xl) + 1". */
+ do_nsau a6, xl, a7, a11
+ addi a6, a6, 21
+ blt a10, a6, .Lsub_subnormal
+
+.Lsub_normalize_shift:
+ bltui a6, 32, .Lsub_shift_lt32
+
+ ssl a6
+ src a8, xl, a9
+ sll xl, a9
+ movi a9, 0
+
+ srli xh, xh, 20
+ sub xh, xh, a6
+ slli xh, xh, 20
+ add xh, xh, a8
+ j .Lsub_round
+
+.Lsub_subnormal:
+ /* The exponent is too small to shift away all the leading zeros.
+ Set a6 to the current exponent (which has already been
+ decremented by the borrow) so that the exponent of the result
+ will be zero. Do not add 1 to a6 in this case, because: (1)
+ adding the mantissa will not increment the exponent, so there is
+ no need to subtract anything extra from the exponent to
+ compensate, and (2) the effective exponent of a subnormal is 1
+ not 0 so the shift amount must be 1 smaller than normal. */
+ mov a6, a10
+ j .Lsub_normalize_shift
+
+#endif /* L_addsubdf3 */
+
+#ifdef L_muldf3
+
+ /* Multiplication */
+#if !XCHAL_HAVE_MUL16 && !XCHAL_HAVE_MUL32 && !XCHAL_HAVE_MAC16
+#define XCHAL_NO_MUL 1
+#endif
+
+__muldf3_aux:
+
+ /* Handle unusual cases (zeros, subnormals, NaNs and Infinities).
+ (This code is placed before the start of the function just to
+ keep it in range of the limited branch displacements.) */
+
+.Lmul_xexpzero:
+ /* Clear the sign bit of x. */
+ slli xh, xh, 1
+ srli xh, xh, 1
+
+ /* If x is zero, return zero. */
+ or a10, xh, xl
+ beqz a10, .Lmul_return_zero
+
+ /* Normalize x. Adjust the exponent in a8. */
+ beqz xh, .Lmul_xh_zero
+ do_nsau a10, xh, a11, a12
+ addi a10, a10, -11
+ ssl a10
+ src xh, xh, xl
+ sll xl, xl
+ movi a8, 1
+ sub a8, a8, a10
+ j .Lmul_xnormalized
+.Lmul_xh_zero:
+ do_nsau a10, xl, a11, a12
+ addi a10, a10, -11
+ movi a8, -31
+ sub a8, a8, a10
+ ssl a10
+ bltz a10, .Lmul_xl_srl
+ sll xh, xl
+ movi xl, 0
+ j .Lmul_xnormalized
+.Lmul_xl_srl:
+ srl xh, xl
+ sll xl, xl
+ j .Lmul_xnormalized
+
+.Lmul_yexpzero:
+ /* Clear the sign bit of y. */
+ slli yh, yh, 1
+ srli yh, yh, 1
+
+ /* If y is zero, return zero. */
+ or a10, yh, yl
+ beqz a10, .Lmul_return_zero
+
+ /* Normalize y. Adjust the exponent in a9. */
+ beqz yh, .Lmul_yh_zero
+ do_nsau a10, yh, a11, a12
+ addi a10, a10, -11
+ ssl a10
+ src yh, yh, yl
+ sll yl, yl
+ movi a9, 1
+ sub a9, a9, a10
+ j .Lmul_ynormalized
+.Lmul_yh_zero:
+ do_nsau a10, yl, a11, a12
+ addi a10, a10, -11
+ movi a9, -31
+ sub a9, a9, a10
+ ssl a10
+ bltz a10, .Lmul_yl_srl
+ sll yh, yl
+ movi yl, 0
+ j .Lmul_ynormalized
+.Lmul_yl_srl:
+ srl yh, yl
+ sll yl, yl
+ j .Lmul_ynormalized
+
+.Lmul_return_zero:
+ /* Return zero with the appropriate sign bit. */
+ srli xh, a7, 31
+ slli xh, xh, 31
+ movi xl, 0
+ j .Lmul_done
+
+.Lmul_xnan_or_inf:
+ /* If y is zero, return NaN. */
+ bnez yl, 1f
+ slli a8, yh, 1
+ bnez a8, 1f
+ movi a4, 0x80000 /* make it a quiet NaN */
+ or xh, xh, a4
+ j .Lmul_done
+1:
+ /* If y is NaN, return y. */
+ bnall yh, a6, .Lmul_returnx
+ slli a8, yh, 12
+ or a8, a8, yl
+ beqz a8, .Lmul_returnx
+
+.Lmul_returny:
+ mov xh, yh
+ mov xl, yl
+
+.Lmul_returnx:
+ /* Set the sign bit and return. */
+ extui a7, a7, 31, 1
+ slli xh, xh, 1
+ ssai 1
+ src xh, a7, xh
+ j .Lmul_done
+
+.Lmul_ynan_or_inf:
+ /* If x is zero, return NaN. */
+ bnez xl, .Lmul_returny
+ slli a8, xh, 1
+ bnez a8, .Lmul_returny
+ movi a7, 0x80000 /* make it a quiet NaN */
+ or xh, yh, a7
+ j .Lmul_done
+
+ .align 4
+ .global __muldf3
+ .type __muldf3, @function
+__muldf3:
+#if __XTENSA_CALL0_ABI__
+ leaf_entry sp, 32
+ addi sp, sp, -32
+ s32i a12, sp, 16
+ s32i a13, sp, 20
+ s32i a14, sp, 24
+ s32i a15, sp, 28
+#elif XCHAL_NO_MUL
+ /* This is not really a leaf function; allocate enough stack space
+ to allow CALL12s to a helper function. */
+ leaf_entry sp, 64
+#else
+ leaf_entry sp, 32
+#endif
+ movi a6, 0x7ff00000
+
+ /* Get the sign of the result. */
+ xor a7, xh, yh
+
+ /* Check for NaN and infinity. */
+ ball xh, a6, .Lmul_xnan_or_inf
+ ball yh, a6, .Lmul_ynan_or_inf
+
+ /* Extract the exponents. */
+ extui a8, xh, 20, 11
+ extui a9, yh, 20, 11
+
+ beqz a8, .Lmul_xexpzero
+.Lmul_xnormalized:
+ beqz a9, .Lmul_yexpzero
+.Lmul_ynormalized:
+
+ /* Add the exponents. */
+ add a8, a8, a9
+
+ /* Replace sign/exponent fields with explicit "1.0". */
+ movi a10, 0x1fffff
+ or xh, xh, a6
+ and xh, xh, a10
+ or yh, yh, a6
+ and yh, yh, a10
+
+ /* Multiply 64x64 to 128 bits. The result ends up in xh/xl/a6.
+ The least-significant word of the result is thrown away except
+ that if it is nonzero, the lsb of a6 is set to 1. */
+#if XCHAL_HAVE_MUL32_HIGH
+
+ /* Compute a6 with any carry-outs in a10. */
+ movi a10, 0
+ mull a6, xl, yh
+ mull a11, xh, yl
+ add a6, a6, a11
+ bgeu a6, a11, 1f
+ addi a10, a10, 1
+1:
+ muluh a11, xl, yl
+ add a6, a6, a11
+ bgeu a6, a11, 1f
+ addi a10, a10, 1
+1:
+ /* If the low word of the result is nonzero, set the lsb of a6. */
+ mull a11, xl, yl
+ beqz a11, 1f
+ movi a9, 1
+ or a6, a6, a9
+1:
+ /* Compute xl with any carry-outs in a9. */
+ movi a9, 0
+ mull a11, xh, yh
+ add a10, a10, a11
+ bgeu a10, a11, 1f
+ addi a9, a9, 1
+1:
+ muluh a11, xh, yl
+ add a10, a10, a11
+ bgeu a10, a11, 1f
+ addi a9, a9, 1
+1:
+ muluh xl, xl, yh
+ add xl, xl, a10
+ bgeu xl, a10, 1f
+ addi a9, a9, 1
+1:
+ /* Compute xh. */
+ muluh xh, xh, yh
+ add xh, xh, a9
+
+#else /* ! XCHAL_HAVE_MUL32_HIGH */
+
+ /* Break the inputs into 16-bit chunks and compute 16 32-bit partial
+ products. These partial products are:
+
+ 0 xll * yll
+
+ 1 xll * ylh
+ 2 xlh * yll
+
+ 3 xll * yhl
+ 4 xlh * ylh
+ 5 xhl * yll
+
+ 6 xll * yhh
+ 7 xlh * yhl
+ 8 xhl * ylh
+ 9 xhh * yll
+
+ 10 xlh * yhh
+ 11 xhl * yhl
+ 12 xhh * ylh
+
+ 13 xhl * yhh
+ 14 xhh * yhl
+
+ 15 xhh * yhh
+
+ where the input chunks are (hh, hl, lh, ll). If using the Mul16
+ or Mul32 multiplier options, these input chunks must be stored in
+ separate registers. For Mac16, the UMUL.AA.* opcodes can specify
+ that the inputs come from either half of the registers, so there
+ is no need to shift them out ahead of time. If there is no
+ multiply hardware, the 16-bit chunks can be extracted when setting
+ up the arguments to the separate multiply function. */
+
+ /* Save a7 since it is needed to hold a temporary value. */
+ s32i a7, sp, 4
+#if __XTENSA_CALL0_ABI__ && XCHAL_NO_MUL
+ /* Calling a separate multiply function will clobber a0 and requires
+ use of a8 as a temporary, so save those values now. (The function
+ uses a custom ABI so nothing else needs to be saved.) */
+ s32i a0, sp, 0
+ s32i a8, sp, 8
+#endif
+
+#if XCHAL_HAVE_MUL16 || XCHAL_HAVE_MUL32
+
+#define xlh a12
+#define ylh a13
+#define xhh a14
+#define yhh a15
+
+ /* Get the high halves of the inputs into registers. */
+ srli xlh, xl, 16
+ srli ylh, yl, 16
+ srli xhh, xh, 16
+ srli yhh, yh, 16
+
+#define xll xl
+#define yll yl
+#define xhl xh
+#define yhl yh
+
+#if XCHAL_HAVE_MUL32 && !XCHAL_HAVE_MUL16
+ /* Clear the high halves of the inputs. This does not matter
+ for MUL16 because the high bits are ignored. */
+ extui xl, xl, 0, 16
+ extui xh, xh, 0, 16
+ extui yl, yl, 0, 16
+ extui yh, yh, 0, 16
+#endif
+#endif /* MUL16 || MUL32 */
+
+
+#if XCHAL_HAVE_MUL16
+
+#define do_mul(dst, xreg, xhalf, yreg, yhalf) \
+ mul16u dst, xreg ## xhalf, yreg ## yhalf
+
+#elif XCHAL_HAVE_MUL32
+
+#define do_mul(dst, xreg, xhalf, yreg, yhalf) \
+ mull dst, xreg ## xhalf, yreg ## yhalf
+
+#elif XCHAL_HAVE_MAC16
+
+/* The preprocessor insists on inserting a space when concatenating after
+ a period in the definition of do_mul below. These macros are a workaround
+ using underscores instead of periods when doing the concatenation. */
+#define umul_aa_ll umul.aa.ll
+#define umul_aa_lh umul.aa.lh
+#define umul_aa_hl umul.aa.hl
+#define umul_aa_hh umul.aa.hh
+
+#define do_mul(dst, xreg, xhalf, yreg, yhalf) \
+ umul_aa_ ## xhalf ## yhalf xreg, yreg; \
+ rsr dst, ACCLO
+
+#else /* no multiply hardware */
+
+#define set_arg_l(dst, src) \
+ extui dst, src, 0, 16
+#define set_arg_h(dst, src) \
+ srli dst, src, 16
+
+#if __XTENSA_CALL0_ABI__
+#define do_mul(dst, xreg, xhalf, yreg, yhalf) \
+ set_arg_ ## xhalf (a13, xreg); \
+ set_arg_ ## yhalf (a14, yreg); \
+ call0 .Lmul_mulsi3; \
+ mov dst, a12
+#else
+#define do_mul(dst, xreg, xhalf, yreg, yhalf) \
+ set_arg_ ## xhalf (a14, xreg); \
+ set_arg_ ## yhalf (a15, yreg); \
+ call12 .Lmul_mulsi3; \
+ mov dst, a14
+#endif /* __XTENSA_CALL0_ABI__ */
+
+#endif /* no multiply hardware */
+
+ /* Add pp1 and pp2 into a10 with carry-out in a9. */
+ do_mul(a10, xl, l, yl, h) /* pp 1 */
+ do_mul(a11, xl, h, yl, l) /* pp 2 */
+ movi a9, 0
+ add a10, a10, a11
+ bgeu a10, a11, 1f
+ addi a9, a9, 1
+1:
+ /* Initialize a6 with a9/a10 shifted into position. Note that
+ this value can be safely incremented without any carry-outs. */
+ ssai 16
+ src a6, a9, a10
+
+ /* Compute the low word into a10. */
+ do_mul(a11, xl, l, yl, l) /* pp 0 */
+ sll a10, a10
+ add a10, a10, a11
+ bgeu a10, a11, 1f
+ addi a6, a6, 1
+1:
+ /* Compute the contributions of pp0-5 to a6, with carry-outs in a9.
+ This is good enough to determine the low half of a6, so that any
+ nonzero bits from the low word of the result can be collapsed
+ into a6, freeing up a register. */
+ movi a9, 0
+ do_mul(a11, xl, l, yh, l) /* pp 3 */
+ add a6, a6, a11
+ bgeu a6, a11, 1f
+ addi a9, a9, 1
+1:
+ do_mul(a11, xl, h, yl, h) /* pp 4 */
+ add a6, a6, a11
+ bgeu a6, a11, 1f
+ addi a9, a9, 1
+1:
+ do_mul(a11, xh, l, yl, l) /* pp 5 */
+ add a6, a6, a11
+ bgeu a6, a11, 1f
+ addi a9, a9, 1
+1:
+ /* Collapse any nonzero bits from the low word into a6. */
+ beqz a10, 1f
+ movi a11, 1
+ or a6, a6, a11
+1:
+ /* Add pp6-9 into a11 with carry-outs in a10. */
+ do_mul(a7, xl, l, yh, h) /* pp 6 */
+ do_mul(a11, xh, h, yl, l) /* pp 9 */
+ movi a10, 0
+ add a11, a11, a7
+ bgeu a11, a7, 1f
+ addi a10, a10, 1
+1:
+ do_mul(a7, xl, h, yh, l) /* pp 7 */
+ add a11, a11, a7
+ bgeu a11, a7, 1f
+ addi a10, a10, 1
+1:
+ do_mul(a7, xh, l, yl, h) /* pp 8 */
+ add a11, a11, a7
+ bgeu a11, a7, 1f
+ addi a10, a10, 1
+1:
+ /* Shift a10/a11 into position, and add low half of a11 to a6. */
+ src a10, a10, a11
+ add a10, a10, a9
+ sll a11, a11
+ add a6, a6, a11
+ bgeu a6, a11, 1f
+ addi a10, a10, 1
+1:
+ /* Add pp10-12 into xl with carry-outs in a9. */
+ movi a9, 0
+ do_mul(xl, xl, h, yh, h) /* pp 10 */
+ add xl, xl, a10
+ bgeu xl, a10, 1f
+ addi a9, a9, 1
+1:
+ do_mul(a10, xh, l, yh, l) /* pp 11 */
+ add xl, xl, a10
+ bgeu xl, a10, 1f
+ addi a9, a9, 1
+1:
+ do_mul(a10, xh, h, yl, h) /* pp 12 */
+ add xl, xl, a10
+ bgeu xl, a10, 1f
+ addi a9, a9, 1
+1:
+ /* Add pp13-14 into a11 with carry-outs in a10. */
+ do_mul(a11, xh, l, yh, h) /* pp 13 */
+ do_mul(a7, xh, h, yh, l) /* pp 14 */
+ movi a10, 0
+ add a11, a11, a7
+ bgeu a11, a7, 1f
+ addi a10, a10, 1
+1:
+ /* Shift a10/a11 into position, and add low half of a11 to a6. */
+ src a10, a10, a11
+ add a10, a10, a9
+ sll a11, a11
+ add xl, xl, a11
+ bgeu xl, a11, 1f
+ addi a10, a10, 1
+1:
+ /* Compute xh. */
+ do_mul(xh, xh, h, yh, h) /* pp 15 */
+ add xh, xh, a10
+
+ /* Restore values saved on the stack during the multiplication. */
+ l32i a7, sp, 4
+#if __XTENSA_CALL0_ABI__ && XCHAL_NO_MUL
+ l32i a0, sp, 0
+ l32i a8, sp, 8
+#endif
+#endif /* ! XCHAL_HAVE_MUL32_HIGH */
+
+ /* Shift left by 12 bits, unless there was a carry-out from the
+ multiply, in which case, shift by 11 bits and increment the
+ exponent. Note: It is convenient to use the constant 0x3ff
+ instead of 0x400 when removing the extra exponent bias (so that
+ it is easy to construct 0x7fe for the overflow check). Reverse
+ the logic here to decrement the exponent sum by one unless there
+ was a carry-out. */
+ movi a4, 11
+ srli a5, xh, 21 - 12
+ bnez a5, 1f
+ addi a4, a4, 1
+ addi a8, a8, -1
+1: ssl a4
+ src xh, xh, xl
+ src xl, xl, a6
+ sll a6, a6
+
+ /* Subtract the extra bias from the exponent sum (plus one to account
+ for the explicit "1.0" of the mantissa that will be added to the
+ exponent in the final result). */
+ movi a4, 0x3ff
+ sub a8, a8, a4
+
+ /* Check for over/underflow. The value in a8 is one less than the
+ final exponent, so values in the range 0..7fd are OK here. */
+ slli a4, a4, 1 /* 0x7fe */
+ bgeu a8, a4, .Lmul_overflow
+
+.Lmul_round:
+ /* Round. */
+ bgez a6, .Lmul_rounded
+ addi xl, xl, 1
+ beqz xl, .Lmul_roundcarry
+ slli a6, a6, 1
+ beqz a6, .Lmul_exactlyhalf
+
+.Lmul_rounded:
+ /* Add the exponent to the mantissa. */
+ slli a8, a8, 20
+ add xh, xh, a8
+
+.Lmul_addsign:
+ /* Add the sign bit. */
+ srli a7, a7, 31
+ slli a7, a7, 31
+ or xh, xh, a7
+
+.Lmul_done:
+#if __XTENSA_CALL0_ABI__
+ l32i a12, sp, 16
+ l32i a13, sp, 20
+ l32i a14, sp, 24
+ l32i a15, sp, 28
+ addi sp, sp, 32
+#endif
+ leaf_return
+
+.Lmul_exactlyhalf:
+ /* Round down to the nearest even value. */
+ srli xl, xl, 1
+ slli xl, xl, 1
+ j .Lmul_rounded
+
+.Lmul_roundcarry:
+ /* xl is always zero when the rounding increment overflows, so
+ there's no need to round it to an even value. */
+ addi xh, xh, 1
+ /* Overflow is OK -- it will be added to the exponent. */
+ j .Lmul_rounded
+
+.Lmul_overflow:
+ bltz a8, .Lmul_underflow
+ /* Return +/- Infinity. */
+ addi a8, a4, 1 /* 0x7ff */
+ slli xh, a8, 20
+ movi xl, 0
+ j .Lmul_addsign
+
+.Lmul_underflow:
+ /* Create a subnormal value, where the exponent field contains zero,
+ but the effective exponent is 1. The value of a8 is one less than
+ the actual exponent, so just negate it to get the shift amount. */
+ neg a8, a8
+ mov a9, a6
+ ssr a8
+ bgeui a8, 32, .Lmul_bigshift
+
+ /* Shift xh/xl right. Any bits that are shifted out of xl are saved
+ in a6 (combined with the shifted-out bits currently in a6) for
+ rounding the result. */
+ sll a6, xl
+ src xl, xh, xl
+ srl xh, xh
+ j 1f
+
+.Lmul_bigshift:
+ bgeui a8, 64, .Lmul_flush_to_zero
+ sll a10, xl /* lost bits shifted out of xl */
+ src a6, xh, xl
+ srl xl, xh
+ movi xh, 0
+ or a9, a9, a10
+
+ /* Set the exponent to zero. */
+1: movi a8, 0
+
+ /* Pack any nonzero bits shifted out into a6. */
+ beqz a9, .Lmul_round
+ movi a9, 1
+ or a6, a6, a9
+ j .Lmul_round
+
+.Lmul_flush_to_zero:
+ /* Return zero with the appropriate sign bit. */
+ srli xh, a7, 31
+ slli xh, xh, 31
+ movi xl, 0
+ j .Lmul_done
+
+#if XCHAL_NO_MUL
+
+ /* For Xtensa processors with no multiply hardware, this simplified
+ version of _mulsi3 is used for multiplying 16-bit chunks of
+ the floating-point mantissas. When using CALL0, this function
+ uses a custom ABI: the inputs are passed in a13 and a14, the
+ result is returned in a12, and a8 and a15 are clobbered. */
+ .align 4
+.Lmul_mulsi3:
+ leaf_entry sp, 16
+ .macro mul_mulsi3_body dst, src1, src2, tmp1, tmp2
+ movi \dst, 0
+1: add \tmp1, \src2, \dst
+ extui \tmp2, \src1, 0, 1
+ movnez \dst, \tmp1, \tmp2
+
+ do_addx2 \tmp1, \src2, \dst, \tmp1
+ extui \tmp2, \src1, 1, 1
+ movnez \dst, \tmp1, \tmp2
+
+ do_addx4 \tmp1, \src2, \dst, \tmp1
+ extui \tmp2, \src1, 2, 1
+ movnez \dst, \tmp1, \tmp2
+
+ do_addx8 \tmp1, \src2, \dst, \tmp1
+ extui \tmp2, \src1, 3, 1
+ movnez \dst, \tmp1, \tmp2
+
+ srli \src1, \src1, 4
+ slli \src2, \src2, 4
+ bnez \src1, 1b
+ .endm
+#if __XTENSA_CALL0_ABI__
+ mul_mulsi3_body a12, a13, a14, a15, a8
+#else
+ /* The result will be written into a2, so save that argument in a4. */
+ mov a4, a2
+ mul_mulsi3_body a2, a4, a3, a5, a6
+#endif
+ leaf_return
+#endif /* XCHAL_NO_MUL */
+#endif /* L_muldf3 */
+
+#ifdef L_divdf3
+
+ /* Division */
+__divdf3_aux:
+
+ /* Handle unusual cases (zeros, subnormals, NaNs and Infinities).
+ (This code is placed before the start of the function just to
+ keep it in range of the limited branch displacements.) */
+
+.Ldiv_yexpzero:
+ /* Clear the sign bit of y. */
+ slli yh, yh, 1
+ srli yh, yh, 1
+
+ /* Check for division by zero. */
+ or a10, yh, yl
+ beqz a10, .Ldiv_yzero
+
+ /* Normalize y. Adjust the exponent in a9. */
+ beqz yh, .Ldiv_yh_zero
+ do_nsau a10, yh, a11, a9
+ addi a10, a10, -11
+ ssl a10
+ src yh, yh, yl
+ sll yl, yl
+ movi a9, 1
+ sub a9, a9, a10
+ j .Ldiv_ynormalized
+.Ldiv_yh_zero:
+ do_nsau a10, yl, a11, a9
+ addi a10, a10, -11
+ movi a9, -31
+ sub a9, a9, a10
+ ssl a10
+ bltz a10, .Ldiv_yl_srl
+ sll yh, yl
+ movi yl, 0
+ j .Ldiv_ynormalized
+.Ldiv_yl_srl:
+ srl yh, yl
+ sll yl, yl
+ j .Ldiv_ynormalized
+
+.Ldiv_yzero:
+ /* y is zero. Return NaN if x is also zero; otherwise, infinity. */
+ slli xh, xh, 1
+ srli xh, xh, 1
+ or xl, xl, xh
+ srli xh, a7, 31
+ slli xh, xh, 31
+ or xh, xh, a6
+ bnez xl, 1f
+ movi a4, 0x80000 /* make it a quiet NaN */
+ or xh, xh, a4
+1: movi xl, 0
+ leaf_return
+
+.Ldiv_xexpzero:
+ /* Clear the sign bit of x. */
+ slli xh, xh, 1
+ srli xh, xh, 1
+
+ /* If x is zero, return zero. */
+ or a10, xh, xl
+ beqz a10, .Ldiv_return_zero
+
+ /* Normalize x. Adjust the exponent in a8. */
+ beqz xh, .Ldiv_xh_zero
+ do_nsau a10, xh, a11, a8
+ addi a10, a10, -11
+ ssl a10
+ src xh, xh, xl
+ sll xl, xl
+ movi a8, 1
+ sub a8, a8, a10
+ j .Ldiv_xnormalized
+.Ldiv_xh_zero:
+ do_nsau a10, xl, a11, a8
+ addi a10, a10, -11
+ movi a8, -31
+ sub a8, a8, a10
+ ssl a10
+ bltz a10, .Ldiv_xl_srl
+ sll xh, xl
+ movi xl, 0
+ j .Ldiv_xnormalized
+.Ldiv_xl_srl:
+ srl xh, xl
+ sll xl, xl
+ j .Ldiv_xnormalized
+
+.Ldiv_return_zero:
+ /* Return zero with the appropriate sign bit. */
+ srli xh, a7, 31
+ slli xh, xh, 31
+ movi xl, 0
+ leaf_return
+
+.Ldiv_xnan_or_inf:
+ /* Set the sign bit of the result. */
+ srli a7, yh, 31
+ slli a7, a7, 31
+ xor xh, xh, a7
+ /* If y is NaN or Inf, return NaN. */
+ bnall yh, a6, 1f
+ movi a4, 0x80000 /* make it a quiet NaN */
+ or xh, xh, a4
+1: leaf_return
+
+.Ldiv_ynan_or_inf:
+ /* If y is Infinity, return zero. */
+ slli a8, yh, 12
+ or a8, a8, yl
+ beqz a8, .Ldiv_return_zero
+ /* y is NaN; return it. */
+ mov xh, yh
+ mov xl, yl
+ leaf_return
+
+.Ldiv_highequal1:
+ bltu xl, yl, 2f
+ j 3f
+
+ .align 4
+ .global __divdf3
+ .type __divdf3, @function
+__divdf3:
+ leaf_entry sp, 16
+ movi a6, 0x7ff00000
+
+ /* Get the sign of the result. */
+ xor a7, xh, yh
+
+ /* Check for NaN and infinity. */
+ ball xh, a6, .Ldiv_xnan_or_inf
+ ball yh, a6, .Ldiv_ynan_or_inf
+
+ /* Extract the exponents. */
+ extui a8, xh, 20, 11
+ extui a9, yh, 20, 11
+
+ beqz a9, .Ldiv_yexpzero
+.Ldiv_ynormalized:
+ beqz a8, .Ldiv_xexpzero
+.Ldiv_xnormalized:
+
+ /* Subtract the exponents. */
+ sub a8, a8, a9
+
+ /* Replace sign/exponent fields with explicit "1.0". */
+ movi a10, 0x1fffff
+ or xh, xh, a6
+ and xh, xh, a10
+ or yh, yh, a6
+ and yh, yh, a10
+
+ /* Set SAR for left shift by one. */
+ ssai (32 - 1)
+
+ /* The first digit of the mantissa division must be a one.
+ Shift x (and adjust the exponent) as needed to make this true. */
+ bltu yh, xh, 3f
+ beq yh, xh, .Ldiv_highequal1
+2: src xh, xh, xl
+ sll xl, xl
+ addi a8, a8, -1
+3:
+ /* Do the first subtraction and shift. */
+ sub xh, xh, yh
+ bgeu xl, yl, 1f
+ addi xh, xh, -1
+1: sub xl, xl, yl
+ src xh, xh, xl
+ sll xl, xl
+
+ /* Put the quotient into a10/a11. */
+ movi a10, 0
+ movi a11, 1
+
+ /* Divide one bit at a time for 52 bits. */
+ movi a9, 52
+#if XCHAL_HAVE_LOOPS
+ loop a9, .Ldiv_loopend
+#endif
+.Ldiv_loop:
+ /* Shift the quotient << 1. */
+ src a10, a10, a11
+ sll a11, a11
+
+ /* Is this digit a 0 or 1? */
+ bltu xh, yh, 3f
+ beq xh, yh, .Ldiv_highequal2
+
+ /* Output a 1 and subtract. */
+2: addi a11, a11, 1
+ sub xh, xh, yh
+ bgeu xl, yl, 1f
+ addi xh, xh, -1
+1: sub xl, xl, yl
+
+ /* Shift the dividend << 1. */
+3: src xh, xh, xl
+ sll xl, xl
+
+#if !XCHAL_HAVE_LOOPS
+ addi a9, a9, -1
+ bnez a9, .Ldiv_loop
+#endif
+.Ldiv_loopend:
+
+ /* Add the exponent bias (less one to account for the explicit "1.0"
+ of the mantissa that will be added to the exponent in the final
+ result). */
+ movi a9, 0x3fe
+ add a8, a8, a9
+
+ /* Check for over/underflow. The value in a8 is one less than the
+ final exponent, so values in the range 0..7fd are OK here. */
+ addmi a9, a9, 0x400 /* 0x7fe */
+ bgeu a8, a9, .Ldiv_overflow
+
+.Ldiv_round:
+ /* Round. The remainder (<< 1) is in xh/xl. */
+ bltu xh, yh, .Ldiv_rounded
+ beq xh, yh, .Ldiv_highequal3
+.Ldiv_roundup:
+ addi a11, a11, 1
+ beqz a11, .Ldiv_roundcarry
+
+.Ldiv_rounded:
+ mov xl, a11
+ /* Add the exponent to the mantissa. */
+ slli a8, a8, 20
+ add xh, a10, a8
+
+.Ldiv_addsign:
+ /* Add the sign bit. */
+ srli a7, a7, 31
+ slli a7, a7, 31
+ or xh, xh, a7
+ leaf_return
+
+.Ldiv_highequal2:
+ bgeu xl, yl, 2b
+ j 3b
+
+.Ldiv_highequal3:
+ bltu xl, yl, .Ldiv_rounded
+ bne xl, yl, .Ldiv_roundup
+
+ /* Remainder is exactly half the divisor. Round even. */
+ addi a11, a11, 1
+ beqz a11, .Ldiv_roundcarry
+ srli a11, a11, 1
+ slli a11, a11, 1
+ j .Ldiv_rounded
+
+.Ldiv_overflow:
+ bltz a8, .Ldiv_underflow
+ /* Return +/- Infinity. */
+ addi a8, a9, 1 /* 0x7ff */
+ slli xh, a8, 20
+ movi xl, 0
+ j .Ldiv_addsign
+
+.Ldiv_underflow:
+ /* Create a subnormal value, where the exponent field contains zero,
+ but the effective exponent is 1. The value of a8 is one less than
+ the actual exponent, so just negate it to get the shift amount. */
+ neg a8, a8
+ ssr a8
+ bgeui a8, 32, .Ldiv_bigshift
+
+ /* Shift a10/a11 right. Any bits that are shifted out of a11 are
+ saved in a6 for rounding the result. */
+ sll a6, a11
+ src a11, a10, a11
+ srl a10, a10
+ j 1f
+
+.Ldiv_bigshift:
+ bgeui a8, 64, .Ldiv_flush_to_zero
+ sll a9, a11 /* lost bits shifted out of a11 */
+ src a6, a10, a11
+ srl a11, a10
+ movi a10, 0
+ or xl, xl, a9
+
+ /* Set the exponent to zero. */
+1: movi a8, 0
+
+ /* Pack any nonzero remainder (in xh/xl) into a6. */
+ or xh, xh, xl
+ beqz xh, 1f
+ movi a9, 1
+ or a6, a6, a9
+
+ /* Round a10/a11 based on the bits shifted out into a6. */
+1: bgez a6, .Ldiv_rounded
+ addi a11, a11, 1
+ beqz a11, .Ldiv_roundcarry
+ slli a6, a6, 1
+ bnez a6, .Ldiv_rounded
+ srli a11, a11, 1
+ slli a11, a11, 1
+ j .Ldiv_rounded
+
+.Ldiv_roundcarry:
+ /* a11 is always zero when the rounding increment overflows, so
+ there's no need to round it to an even value. */
+ addi a10, a10, 1
+ /* Overflow to the exponent field is OK. */
+ j .Ldiv_rounded
+
+.Ldiv_flush_to_zero:
+ /* Return zero with the appropriate sign bit. */
+ srli xh, a7, 31
+ slli xh, xh, 31
+ movi xl, 0
+ leaf_return
+
+#endif /* L_divdf3 */
+
+#ifdef L_cmpdf2
+
+ /* Equal and Not Equal */
+
+ .align 4
+ .global __eqdf2
+ .global __nedf2
+ .set __nedf2, __eqdf2
+ .type __eqdf2, @function
+__eqdf2:
+ leaf_entry sp, 16
+ bne xl, yl, 2f
+ bne xh, yh, 4f
+
+ /* The values are equal but NaN != NaN. Check the exponent. */
+ movi a6, 0x7ff00000
+ ball xh, a6, 3f
+
+ /* Equal. */
+ movi a2, 0
+ leaf_return
+
+ /* Not equal. */
+2: movi a2, 1
+ leaf_return
+
+ /* Check if the mantissas are nonzero. */
+3: slli a7, xh, 12
+ or a7, a7, xl
+ j 5f
+
+ /* Check if x and y are zero with different signs. */
+4: or a7, xh, yh
+ slli a7, a7, 1
+ or a7, a7, xl /* xl == yl here */
+
+ /* Equal if a7 == 0, where a7 is either abs(x | y) or the mantissa
+ or x when exponent(x) = 0x7ff and x == y. */
+5: movi a2, 0
+ movi a3, 1
+ movnez a2, a3, a7
+ leaf_return
+
+
+ /* Greater Than */
+
+ .align 4
+ .global __gtdf2
+ .type __gtdf2, @function
+__gtdf2:
+ leaf_entry sp, 16
+ movi a6, 0x7ff00000
+ ball xh, a6, 2f
+1: bnall yh, a6, .Lle_cmp
+
+ /* Check if y is a NaN. */
+ slli a7, yh, 12
+ or a7, a7, yl
+ beqz a7, .Lle_cmp
+ movi a2, 0
+ leaf_return
+
+ /* Check if x is a NaN. */
+2: slli a7, xh, 12
+ or a7, a7, xl
+ beqz a7, 1b
+ movi a2, 0
+ leaf_return
+
+
+ /* Less Than or Equal */
+
+ .align 4
+ .global __ledf2
+ .type __ledf2, @function
+__ledf2:
+ leaf_entry sp, 16
+ movi a6, 0x7ff00000
+ ball xh, a6, 2f
+1: bnall yh, a6, .Lle_cmp
+
+ /* Check if y is a NaN. */
+ slli a7, yh, 12
+ or a7, a7, yl
+ beqz a7, .Lle_cmp
+ movi a2, 1
+ leaf_return
+
+ /* Check if x is a NaN. */
+2: slli a7, xh, 12
+ or a7, a7, xl
+ beqz a7, 1b
+ movi a2, 1
+ leaf_return
+
+.Lle_cmp:
+ /* Check if x and y have different signs. */
+ xor a7, xh, yh
+ bltz a7, .Lle_diff_signs
+
+ /* Check if x is negative. */
+ bltz xh, .Lle_xneg
+
+ /* Check if x <= y. */
+ bltu xh, yh, 4f
+ bne xh, yh, 5f
+ bltu yl, xl, 5f
+4: movi a2, 0
+ leaf_return
+
+.Lle_xneg:
+ /* Check if y <= x. */
+ bltu yh, xh, 4b
+ bne yh, xh, 5f
+ bgeu xl, yl, 4b
+5: movi a2, 1
+ leaf_return
+
+.Lle_diff_signs:
+ bltz xh, 4b
+
+ /* Check if both x and y are zero. */
+ or a7, xh, yh
+ slli a7, a7, 1
+ or a7, a7, xl
+ or a7, a7, yl
+ movi a2, 1
+ movi a3, 0
+ moveqz a2, a3, a7
+ leaf_return
+
+
+ /* Greater Than or Equal */
+
+ .align 4
+ .global __gedf2
+ .type __gedf2, @function
+__gedf2:
+ leaf_entry sp, 16
+ movi a6, 0x7ff00000
+ ball xh, a6, 2f
+1: bnall yh, a6, .Llt_cmp
+
+ /* Check if y is a NaN. */
+ slli a7, yh, 12
+ or a7, a7, yl
+ beqz a7, .Llt_cmp
+ movi a2, -1
+ leaf_return
+
+ /* Check if x is a NaN. */
+2: slli a7, xh, 12
+ or a7, a7, xl
+ beqz a7, 1b
+ movi a2, -1
+ leaf_return
+
+
+ /* Less Than */
+
+ .align 4
+ .global __ltdf2
+ .type __ltdf2, @function
+__ltdf2:
+ leaf_entry sp, 16
+ movi a6, 0x7ff00000
+ ball xh, a6, 2f
+1: bnall yh, a6, .Llt_cmp
+
+ /* Check if y is a NaN. */
+ slli a7, yh, 12
+ or a7, a7, yl
+ beqz a7, .Llt_cmp
+ movi a2, 0
+ leaf_return
+
+ /* Check if x is a NaN. */
+2: slli a7, xh, 12
+ or a7, a7, xl
+ beqz a7, 1b
+ movi a2, 0
+ leaf_return
+
+.Llt_cmp:
+ /* Check if x and y have different signs. */
+ xor a7, xh, yh
+ bltz a7, .Llt_diff_signs
+
+ /* Check if x is negative. */
+ bltz xh, .Llt_xneg
+
+ /* Check if x < y. */
+ bltu xh, yh, 4f
+ bne xh, yh, 5f
+ bgeu xl, yl, 5f
+4: movi a2, -1
+ leaf_return
+
+.Llt_xneg:
+ /* Check if y < x. */
+ bltu yh, xh, 4b
+ bne yh, xh, 5f
+ bltu yl, xl, 4b
+5: movi a2, 0
+ leaf_return
+
+.Llt_diff_signs:
+ bgez xh, 5b
+
+ /* Check if both x and y are nonzero. */
+ or a7, xh, yh
+ slli a7, a7, 1
+ or a7, a7, xl
+ or a7, a7, yl
+ movi a2, 0
+ movi a3, -1
+ movnez a2, a3, a7
+ leaf_return
+
+
+ /* Unordered */
+
+ .align 4
+ .global __unorddf2
+ .type __unorddf2, @function
+__unorddf2:
+ leaf_entry sp, 16
+ movi a6, 0x7ff00000
+ ball xh, a6, 3f
+1: ball yh, a6, 4f
+2: movi a2, 0
+ leaf_return
+
+3: slli a7, xh, 12
+ or a7, a7, xl
+ beqz a7, 1b
+ movi a2, 1
+ leaf_return
+
+4: slli a7, yh, 12
+ or a7, a7, yl
+ beqz a7, 2b
+ movi a2, 1
+ leaf_return
+
+#endif /* L_cmpdf2 */
+
+#ifdef L_fixdfsi
+
+ .align 4
+ .global __fixdfsi
+ .type __fixdfsi, @function
+__fixdfsi:
+ leaf_entry sp, 16
+
+ /* Check for NaN and Infinity. */
+ movi a6, 0x7ff00000
+ ball xh, a6, .Lfixdfsi_nan_or_inf
+
+ /* Extract the exponent and check if 0 < (exp - 0x3fe) < 32. */
+ extui a4, xh, 20, 11
+ extui a5, a6, 19, 10 /* 0x3fe */
+ sub a4, a4, a5
+ bgei a4, 32, .Lfixdfsi_maxint
+ blti a4, 1, .Lfixdfsi_zero
+
+ /* Add explicit "1.0" and shift << 11. */
+ or a7, xh, a6
+ ssai (32 - 11)
+ src a5, a7, xl
+
+ /* Shift back to the right, based on the exponent. */
+ ssl a4 /* shift by 32 - a4 */
+ srl a5, a5
+
+ /* Negate the result if sign != 0. */
+ neg a2, a5
+ movgez a2, a5, a7
+ leaf_return
+
+.Lfixdfsi_nan_or_inf:
+ /* Handle Infinity and NaN. */
+ slli a4, xh, 12
+ or a4, a4, xl
+ beqz a4, .Lfixdfsi_maxint
+
+ /* Translate NaN to +maxint. */
+ movi xh, 0
+
+.Lfixdfsi_maxint:
+ slli a4, a6, 11 /* 0x80000000 */
+ addi a5, a4, -1 /* 0x7fffffff */
+ movgez a4, a5, xh
+ mov a2, a4
+ leaf_return
+
+.Lfixdfsi_zero:
+ movi a2, 0
+ leaf_return
+
+#endif /* L_fixdfsi */
+
+#ifdef L_fixdfdi
+
+ .align 4
+ .global __fixdfdi
+ .type __fixdfdi, @function
+__fixdfdi:
+ leaf_entry sp, 16
+
+ /* Check for NaN and Infinity. */
+ movi a6, 0x7ff00000
+ ball xh, a6, .Lfixdfdi_nan_or_inf
+
+ /* Extract the exponent and check if 0 < (exp - 0x3fe) < 64. */
+ extui a4, xh, 20, 11
+ extui a5, a6, 19, 10 /* 0x3fe */
+ sub a4, a4, a5
+ bgei a4, 64, .Lfixdfdi_maxint
+ blti a4, 1, .Lfixdfdi_zero
+
+ /* Add explicit "1.0" and shift << 11. */
+ or a7, xh, a6
+ ssai (32 - 11)
+ src xh, a7, xl
+ sll xl, xl
+
+ /* Shift back to the right, based on the exponent. */
+ ssl a4 /* shift by 64 - a4 */
+ bgei a4, 32, .Lfixdfdi_smallshift
+ srl xl, xh
+ movi xh, 0
+
+.Lfixdfdi_shifted:
+ /* Negate the result if sign != 0. */
+ bgez a7, 1f
+ neg xl, xl
+ neg xh, xh
+ beqz xl, 1f
+ addi xh, xh, -1
+1: leaf_return
+
+.Lfixdfdi_smallshift:
+ src xl, xh, xl
+ srl xh, xh
+ j .Lfixdfdi_shifted
+
+.Lfixdfdi_nan_or_inf:
+ /* Handle Infinity and NaN. */
+ slli a4, xh, 12
+ or a4, a4, xl
+ beqz a4, .Lfixdfdi_maxint
+
+ /* Translate NaN to +maxint. */
+ movi xh, 0
+
+.Lfixdfdi_maxint:
+ slli a7, a6, 11 /* 0x80000000 */
+ bgez xh, 1f
+ mov xh, a7
+ movi xl, 0
+ leaf_return
+
+1: addi xh, a7, -1 /* 0x7fffffff */
+ movi xl, -1
+ leaf_return
+
+.Lfixdfdi_zero:
+ movi xh, 0
+ movi xl, 0
+ leaf_return
+
+#endif /* L_fixdfdi */
+
+#ifdef L_fixunsdfsi
+
+ .align 4
+ .global __fixunsdfsi
+ .type __fixunsdfsi, @function
+__fixunsdfsi:
+ leaf_entry sp, 16
+
+ /* Check for NaN and Infinity. */
+ movi a6, 0x7ff00000
+ ball xh, a6, .Lfixunsdfsi_nan_or_inf
+
+ /* Extract the exponent and check if 0 <= (exp - 0x3ff) < 32. */
+ extui a4, xh, 20, 11
+ extui a5, a6, 20, 10 /* 0x3ff */
+ sub a4, a4, a5
+ bgei a4, 32, .Lfixunsdfsi_maxint
+ bltz a4, .Lfixunsdfsi_zero
+
+ /* Add explicit "1.0" and shift << 11. */
+ or a7, xh, a6
+ ssai (32 - 11)
+ src a5, a7, xl
+
+ /* Shift back to the right, based on the exponent. */
+ addi a4, a4, 1
+ beqi a4, 32, .Lfixunsdfsi_bigexp
+ ssl a4 /* shift by 32 - a4 */
+ srl a5, a5
+
+ /* Negate the result if sign != 0. */
+ neg a2, a5
+ movgez a2, a5, a7
+ leaf_return
+
+.Lfixunsdfsi_nan_or_inf:
+ /* Handle Infinity and NaN. */
+ slli a4, xh, 12
+ or a4, a4, xl
+ beqz a4, .Lfixunsdfsi_maxint
+
+ /* Translate NaN to 0xffffffff. */
+ movi a2, -1
+ leaf_return
+
+.Lfixunsdfsi_maxint:
+ slli a4, a6, 11 /* 0x80000000 */
+ movi a5, -1 /* 0xffffffff */
+ movgez a4, a5, xh
+ mov a2, a4
+ leaf_return
+
+.Lfixunsdfsi_zero:
+ movi a2, 0
+ leaf_return
+
+.Lfixunsdfsi_bigexp:
+ /* Handle unsigned maximum exponent case. */
+ bltz xh, 1f
+ mov a2, a5 /* no shift needed */
+ leaf_return
+
+ /* Return 0x80000000 if negative. */
+1: slli a2, a6, 11
+ leaf_return
+
+#endif /* L_fixunsdfsi */
+
+#ifdef L_fixunsdfdi
+
+ .align 4
+ .global __fixunsdfdi
+ .type __fixunsdfdi, @function
+__fixunsdfdi:
+ leaf_entry sp, 16
+
+ /* Check for NaN and Infinity. */
+ movi a6, 0x7ff00000
+ ball xh, a6, .Lfixunsdfdi_nan_or_inf
+
+ /* Extract the exponent and check if 0 <= (exp - 0x3ff) < 64. */
+ extui a4, xh, 20, 11
+ extui a5, a6, 20, 10 /* 0x3ff */
+ sub a4, a4, a5
+ bgei a4, 64, .Lfixunsdfdi_maxint
+ bltz a4, .Lfixunsdfdi_zero
+
+ /* Add explicit "1.0" and shift << 11. */
+ or a7, xh, a6
+ ssai (32 - 11)
+ src xh, a7, xl
+ sll xl, xl
+
+ /* Shift back to the right, based on the exponent. */
+ addi a4, a4, 1
+ beqi a4, 64, .Lfixunsdfdi_bigexp
+ ssl a4 /* shift by 64 - a4 */
+ bgei a4, 32, .Lfixunsdfdi_smallshift
+ srl xl, xh
+ movi xh, 0
+
+.Lfixunsdfdi_shifted:
+ /* Negate the result if sign != 0. */
+ bgez a7, 1f
+ neg xl, xl
+ neg xh, xh
+ beqz xl, 1f
+ addi xh, xh, -1
+1: leaf_return
+
+.Lfixunsdfdi_smallshift:
+ src xl, xh, xl
+ srl xh, xh
+ j .Lfixunsdfdi_shifted
+
+.Lfixunsdfdi_nan_or_inf:
+ /* Handle Infinity and NaN. */
+ slli a4, xh, 12
+ or a4, a4, xl
+ beqz a4, .Lfixunsdfdi_maxint
+
+ /* Translate NaN to 0xffffffff.... */
+1: movi xh, -1
+ movi xl, -1
+ leaf_return
+
+.Lfixunsdfdi_maxint:
+ bgez xh, 1b
+2: slli xh, a6, 11 /* 0x80000000 */
+ movi xl, 0
+ leaf_return
+
+.Lfixunsdfdi_zero:
+ movi xh, 0
+ movi xl, 0
+ leaf_return
+
+.Lfixunsdfdi_bigexp:
+ /* Handle unsigned maximum exponent case. */
+ bltz a7, 2b
+ leaf_return /* no shift needed */
+
+#endif /* L_fixunsdfdi */
+
+#ifdef L_floatsidf
+
+ .align 4
+ .global __floatunsidf
+ .type __floatunsidf, @function
+__floatunsidf:
+ leaf_entry sp, 16
+ beqz a2, .Lfloatsidf_return_zero
+
+ /* Set the sign to zero and jump to the floatsidf code. */
+ movi a7, 0
+ j .Lfloatsidf_normalize
+
+ .align 4
+ .global __floatsidf
+ .type __floatsidf, @function
+__floatsidf:
+ leaf_entry sp, 16
+
+ /* Check for zero. */
+ beqz a2, .Lfloatsidf_return_zero
+
+ /* Save the sign. */
+ extui a7, a2, 31, 1
+
+ /* Get the absolute value. */
+#if XCHAL_HAVE_ABS
+ abs a2, a2
+#else
+ neg a4, a2
+ movltz a2, a4, a2
+#endif
+
+.Lfloatsidf_normalize:
+ /* Normalize with the first 1 bit in the msb. */
+ do_nsau a4, a2, a5, a6
+ ssl a4
+ sll a5, a2
+
+ /* Shift the mantissa into position. */
+ srli xh, a5, 11
+ slli xl, a5, (32 - 11)
+
+ /* Set the exponent. */
+ movi a5, 0x41d /* 0x3fe + 31 */
+ sub a5, a5, a4
+ slli a5, a5, 20
+ add xh, xh, a5
+
+ /* Add the sign and return. */
+ slli a7, a7, 31
+ or xh, xh, a7
+ leaf_return
+
+.Lfloatsidf_return_zero:
+ movi a3, 0
+ leaf_return
+
+#endif /* L_floatsidf */
+
+#ifdef L_floatdidf
+
+ .align 4
+ .global __floatundidf
+ .type __floatundidf, @function
+__floatundidf:
+ leaf_entry sp, 16
+
+ /* Check for zero. */
+ or a4, xh, xl
+ beqz a4, 2f
+
+ /* Set the sign to zero and jump to the floatdidf code. */
+ movi a7, 0
+ j .Lfloatdidf_normalize
+
+ .align 4
+ .global __floatdidf
+ .type __floatdidf, @function
+__floatdidf:
+ leaf_entry sp, 16
+
+ /* Check for zero. */
+ or a4, xh, xl
+ beqz a4, 2f
+
+ /* Save the sign. */
+ extui a7, xh, 31, 1
+
+ /* Get the absolute value. */
+ bgez xh, .Lfloatdidf_normalize
+ neg xl, xl
+ neg xh, xh
+ beqz xl, .Lfloatdidf_normalize
+ addi xh, xh, -1
+
+.Lfloatdidf_normalize:
+ /* Normalize with the first 1 bit in the msb of xh. */
+ beqz xh, .Lfloatdidf_bigshift
+ do_nsau a4, xh, a5, a6
+ ssl a4
+ src xh, xh, xl
+ sll xl, xl
+
+.Lfloatdidf_shifted:
+ /* Shift the mantissa into position, with rounding bits in a6. */
+ ssai 11
+ sll a6, xl
+ src xl, xh, xl
+ srl xh, xh
+
+ /* Set the exponent. */
+ movi a5, 0x43d /* 0x3fe + 63 */
+ sub a5, a5, a4
+ slli a5, a5, 20
+ add xh, xh, a5
+
+ /* Add the sign. */
+ slli a7, a7, 31
+ or xh, xh, a7
+
+ /* Round up if the leftover fraction is >= 1/2. */
+ bgez a6, 2f
+ addi xl, xl, 1
+ beqz xl, .Lfloatdidf_roundcarry
+
+ /* Check if the leftover fraction is exactly 1/2. */
+ slli a6, a6, 1
+ beqz a6, .Lfloatdidf_exactlyhalf
+2: leaf_return
+
+.Lfloatdidf_bigshift:
+ /* xh is zero. Normalize with first 1 bit of xl in the msb of xh. */
+ do_nsau a4, xl, a5, a6
+ ssl a4
+ sll xh, xl
+ movi xl, 0
+ addi a4, a4, 32
+ j .Lfloatdidf_shifted
+
+.Lfloatdidf_exactlyhalf:
+ /* Round down to the nearest even value. */
+ srli xl, xl, 1
+ slli xl, xl, 1
+ leaf_return
+
+.Lfloatdidf_roundcarry:
+ /* xl is always zero when the rounding increment overflows, so
+ there's no need to round it to an even value. */
+ addi xh, xh, 1
+ /* Overflow to the exponent is OK. */
+ leaf_return
+
+#endif /* L_floatdidf */
+
+#ifdef L_truncdfsf2
+
+ .align 4
+ .global __truncdfsf2
+ .type __truncdfsf2, @function
+__truncdfsf2:
+ leaf_entry sp, 16
+
+ /* Adjust the exponent bias. */
+ movi a4, (0x3ff - 0x7f) << 20
+ sub a5, xh, a4
+
+ /* Check for underflow. */
+ xor a6, xh, a5
+ bltz a6, .Ltrunc_underflow
+ extui a6, a5, 20, 11
+ beqz a6, .Ltrunc_underflow
+
+ /* Check for overflow. */
+ movi a4, 255
+ bge a6, a4, .Ltrunc_overflow
+
+ /* Shift a5/xl << 3 into a5/a4. */
+ ssai (32 - 3)
+ src a5, a5, xl
+ sll a4, xl
+
+.Ltrunc_addsign:
+ /* Add the sign bit. */
+ extui a6, xh, 31, 1
+ slli a6, a6, 31
+ or a2, a6, a5
+
+ /* Round up if the leftover fraction is >= 1/2. */
+ bgez a4, 1f
+ addi a2, a2, 1
+ /* Overflow to the exponent is OK. The answer will be correct. */
+
+ /* Check if the leftover fraction is exactly 1/2. */
+ slli a4, a4, 1
+ beqz a4, .Ltrunc_exactlyhalf
+1: leaf_return
+
+.Ltrunc_exactlyhalf:
+ /* Round down to the nearest even value. */
+ srli a2, a2, 1
+ slli a2, a2, 1
+ leaf_return
+
+.Ltrunc_overflow:
+ /* Check if exponent == 0x7ff. */
+ movi a4, 0x7ff00000
+ bnall xh, a4, 1f
+
+ /* Check if mantissa is nonzero. */
+ slli a5, xh, 12
+ or a5, a5, xl
+ beqz a5, 1f
+
+ /* Shift a4 to set a bit in the mantissa, making a quiet NaN. */
+ srli a4, a4, 1
+
+1: slli a4, a4, 4 /* 0xff000000 or 0xff800000 */
+ /* Add the sign bit. */
+ extui a6, xh, 31, 1
+ ssai 1
+ src a2, a6, a4
+ leaf_return
+
+.Ltrunc_underflow:
+ /* Find shift count for a subnormal. Flush to zero if >= 32. */
+ extui a6, xh, 20, 11
+ movi a5, 0x3ff - 0x7f
+ sub a6, a5, a6
+ addi a6, a6, 1
+ bgeui a6, 32, 1f
+
+ /* Replace the exponent with an explicit "1.0". */
+ slli a5, a5, 13 /* 0x700000 */
+ or a5, a5, xh
+ slli a5, a5, 11
+ srli a5, a5, 11
+
+ /* Shift the mantissa left by 3 bits (into a5/a4). */
+ ssai (32 - 3)
+ src a5, a5, xl
+ sll a4, xl
+
+ /* Shift right by a6. */
+ ssr a6
+ sll a7, a4
+ src a4, a5, a4
+ srl a5, a5
+ beqz a7, .Ltrunc_addsign
+ or a4, a4, a6 /* any positive, nonzero value will work */
+ j .Ltrunc_addsign
+
+ /* Return +/- zero. */
+1: extui a2, xh, 31, 1
+ slli a2, a2, 31
+ leaf_return
+
+#endif /* L_truncdfsf2 */
+
+#ifdef L_extendsfdf2
+
+ .align 4
+ .global __extendsfdf2
+ .type __extendsfdf2, @function
+__extendsfdf2:
+ leaf_entry sp, 16
+
+ /* Save the sign bit and then shift it off. */
+ extui a5, a2, 31, 1
+ slli a5, a5, 31
+ slli a4, a2, 1
+
+ /* Extract and check the exponent. */
+ extui a6, a2, 23, 8
+ beqz a6, .Lextend_expzero
+ addi a6, a6, 1
+ beqi a6, 256, .Lextend_nan_or_inf
+
+ /* Shift >> 3 into a4/xl. */
+ srli a4, a4, 4
+ slli xl, a2, (32 - 3)
+
+ /* Adjust the exponent bias. */
+ movi a6, (0x3ff - 0x7f) << 20
+ add a4, a4, a6
+
+ /* Add the sign bit. */
+ or xh, a4, a5
+ leaf_return
+
+.Lextend_nan_or_inf:
+ movi a4, 0x7ff00000
+
+ /* Check for NaN. */
+ slli a7, a2, 9
+ beqz a7, 1f
+
+ slli a6, a6, 11 /* 0x80000 */
+ or a4, a4, a6
+
+ /* Add the sign and return. */
+1: or xh, a4, a5
+ movi xl, 0
+ leaf_return
+
+.Lextend_expzero:
+ beqz a4, 1b
+
+ /* Normalize it to have 8 zero bits before the first 1 bit. */
+ do_nsau a7, a4, a2, a3
+ addi a7, a7, -8
+ ssl a7
+ sll a4, a4
+
+ /* Shift >> 3 into a4/xl. */
+ slli xl, a4, (32 - 3)
+ srli a4, a4, 3
+
+ /* Set the exponent. */
+ movi a6, 0x3fe - 0x7f
+ sub a6, a6, a7
+ slli a6, a6, 20
+ add a4, a4, a6
+
+ /* Add the sign and return. */
+ or xh, a4, a5
+ leaf_return
+
+#endif /* L_extendsfdf2 */
+
+
diff --git a/libgcc/config/xtensa/ieee754-sf.S b/libgcc/config/xtensa/ieee754-sf.S
new file mode 100644
index 00000000000..d75be0e5ae5
--- /dev/null
+++ b/libgcc/config/xtensa/ieee754-sf.S
@@ -0,0 +1,1757 @@
+/* IEEE-754 single-precision functions for Xtensa
+ Copyright (C) 2006, 2007, 2009 Free Software Foundation, Inc.
+ Contributed by Bob Wilson (bwilson@tensilica.com) at Tensilica.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3, or (at your option)
+ any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#ifdef __XTENSA_EB__
+#define xh a2
+#define xl a3
+#define yh a4
+#define yl a5
+#else
+#define xh a3
+#define xl a2
+#define yh a5
+#define yl a4
+#endif
+
+/* Warning! The branch displacements for some Xtensa branch instructions
+ are quite small, and this code has been carefully laid out to keep
+ branch targets in range. If you change anything, be sure to check that
+ the assembler is not relaxing anything to branch over a jump. */
+
+#ifdef L_negsf2
+
+ .align 4
+ .global __negsf2
+ .type __negsf2, @function
+__negsf2:
+ leaf_entry sp, 16
+ movi a4, 0x80000000
+ xor a2, a2, a4
+ leaf_return
+
+#endif /* L_negsf2 */
+
+#ifdef L_addsubsf3
+
+ /* Addition */
+__addsf3_aux:
+
+ /* Handle NaNs and Infinities. (This code is placed before the
+ start of the function just to keep it in range of the limited
+ branch displacements.) */
+
+.Ladd_xnan_or_inf:
+ /* If y is neither Infinity nor NaN, return x. */
+ bnall a3, a6, 1f
+ /* If x is a NaN, return it. Otherwise, return y. */
+ slli a7, a2, 9
+ beqz a7, .Ladd_ynan_or_inf
+1: leaf_return
+
+.Ladd_ynan_or_inf:
+ /* Return y. */
+ mov a2, a3
+ leaf_return
+
+.Ladd_opposite_signs:
+ /* Operand signs differ. Do a subtraction. */
+ slli a7, a6, 8
+ xor a3, a3, a7
+ j .Lsub_same_sign
+
+ .align 4
+ .global __addsf3
+ .type __addsf3, @function
+__addsf3:
+ leaf_entry sp, 16
+ movi a6, 0x7f800000
+
+ /* Check if the two operands have the same sign. */
+ xor a7, a2, a3
+ bltz a7, .Ladd_opposite_signs
+
+.Ladd_same_sign:
+ /* Check if either exponent == 0x7f8 (i.e., NaN or Infinity). */
+ ball a2, a6, .Ladd_xnan_or_inf
+ ball a3, a6, .Ladd_ynan_or_inf
+
+ /* Compare the exponents. The smaller operand will be shifted
+ right by the exponent difference and added to the larger
+ one. */
+ extui a7, a2, 23, 9
+ extui a8, a3, 23, 9
+ bltu a7, a8, .Ladd_shiftx
+
+.Ladd_shifty:
+ /* Check if the smaller (or equal) exponent is zero. */
+ bnone a3, a6, .Ladd_yexpzero
+
+ /* Replace y sign/exponent with 0x008. */
+ or a3, a3, a6
+ slli a3, a3, 8
+ srli a3, a3, 8
+
+.Ladd_yexpdiff:
+ /* Compute the exponent difference. */
+ sub a10, a7, a8
+
+ /* Exponent difference > 32 -- just return the bigger value. */
+ bgeui a10, 32, 1f
+
+ /* Shift y right by the exponent difference. Any bits that are
+ shifted out of y are saved in a9 for rounding the result. */
+ ssr a10
+ movi a9, 0
+ src a9, a3, a9
+ srl a3, a3
+
+ /* Do the addition. */
+ add a2, a2, a3
+
+ /* Check if the add overflowed into the exponent. */
+ extui a10, a2, 23, 9
+ beq a10, a7, .Ladd_round
+ mov a8, a7
+ j .Ladd_carry
+
+.Ladd_yexpzero:
+ /* y is a subnormal value. Replace its sign/exponent with zero,
+ i.e., no implicit "1.0", and increment the apparent exponent
+ because subnormals behave as if they had the minimum (nonzero)
+ exponent. Test for the case when both exponents are zero. */
+ slli a3, a3, 9
+ srli a3, a3, 9
+ bnone a2, a6, .Ladd_bothexpzero
+ addi a8, a8, 1
+ j .Ladd_yexpdiff
+
+.Ladd_bothexpzero:
+ /* Both exponents are zero. Handle this as a special case. There
+ is no need to shift or round, and the normal code for handling
+ a carry into the exponent field will not work because it
+ assumes there is an implicit "1.0" that needs to be added. */
+ add a2, a2, a3
+1: leaf_return
+
+.Ladd_xexpzero:
+ /* Same as "yexpzero" except skip handling the case when both
+ exponents are zero. */
+ slli a2, a2, 9
+ srli a2, a2, 9
+ addi a7, a7, 1
+ j .Ladd_xexpdiff
+
+.Ladd_shiftx:
+ /* Same thing as the "shifty" code, but with x and y swapped. Also,
+ because the exponent difference is always nonzero in this version,
+ the shift sequence can use SLL and skip loading a constant zero. */
+ bnone a2, a6, .Ladd_xexpzero
+
+ or a2, a2, a6
+ slli a2, a2, 8
+ srli a2, a2, 8
+
+.Ladd_xexpdiff:
+ sub a10, a8, a7
+ bgeui a10, 32, .Ladd_returny
+
+ ssr a10
+ sll a9, a2
+ srl a2, a2
+
+ add a2, a2, a3
+
+ /* Check if the add overflowed into the exponent. */
+ extui a10, a2, 23, 9
+ bne a10, a8, .Ladd_carry
+
+.Ladd_round:
+ /* Round up if the leftover fraction is >= 1/2. */
+ bgez a9, 1f
+ addi a2, a2, 1
+
+ /* Check if the leftover fraction is exactly 1/2. */
+ slli a9, a9, 1
+ beqz a9, .Ladd_exactlyhalf
+1: leaf_return
+
+.Ladd_returny:
+ mov a2, a3
+ leaf_return
+
+.Ladd_carry:
+ /* The addition has overflowed into the exponent field, so the
+ value needs to be renormalized. The mantissa of the result
+ can be recovered by subtracting the original exponent and
+ adding 0x800000 (which is the explicit "1.0" for the
+ mantissa of the non-shifted operand -- the "1.0" for the
+ shifted operand was already added). The mantissa can then
+ be shifted right by one bit. The explicit "1.0" of the
+ shifted mantissa then needs to be replaced by the exponent,
+ incremented by one to account for the normalizing shift.
+ It is faster to combine these operations: do the shift first
+ and combine the additions and subtractions. If x is the
+ original exponent, the result is:
+ shifted mantissa - (x << 22) + (1 << 22) + (x << 23)
+ or:
+ shifted mantissa + ((x + 1) << 22)
+ Note that the exponent is incremented here by leaving the
+ explicit "1.0" of the mantissa in the exponent field. */
+
+ /* Shift x right by one bit. Save the lsb. */
+ mov a10, a2
+ srli a2, a2, 1
+
+ /* See explanation above. The original exponent is in a8. */
+ addi a8, a8, 1
+ slli a8, a8, 22
+ add a2, a2, a8
+
+ /* Return an Infinity if the exponent overflowed. */
+ ball a2, a6, .Ladd_infinity
+
+ /* Same thing as the "round" code except the msb of the leftover
+ fraction is bit 0 of a10, with the rest of the fraction in a9. */
+ bbci.l a10, 0, 1f
+ addi a2, a2, 1
+ beqz a9, .Ladd_exactlyhalf
+1: leaf_return
+
+.Ladd_infinity:
+ /* Clear the mantissa. */
+ srli a2, a2, 23
+ slli a2, a2, 23
+
+ /* The sign bit may have been lost in a carry-out. Put it back. */
+ slli a8, a8, 1
+ or a2, a2, a8
+ leaf_return
+
+.Ladd_exactlyhalf:
+ /* Round down to the nearest even value. */
+ srli a2, a2, 1
+ slli a2, a2, 1
+ leaf_return
+
+
+ /* Subtraction */
+__subsf3_aux:
+
+ /* Handle NaNs and Infinities. (This code is placed before the
+ start of the function just to keep it in range of the limited
+ branch displacements.) */
+
+.Lsub_xnan_or_inf:
+ /* If y is neither Infinity nor NaN, return x. */
+ bnall a3, a6, 1f
+ /* Both x and y are either NaN or Inf, so the result is NaN. */
+ movi a4, 0x400000 /* make it a quiet NaN */
+ or a2, a2, a4
+1: leaf_return
+
+.Lsub_ynan_or_inf:
+ /* Negate y and return it. */
+ slli a7, a6, 8
+ xor a2, a3, a7
+ leaf_return
+
+.Lsub_opposite_signs:
+ /* Operand signs differ. Do an addition. */
+ slli a7, a6, 8
+ xor a3, a3, a7
+ j .Ladd_same_sign
+
+ .align 4
+ .global __subsf3
+ .type __subsf3, @function
+__subsf3:
+ leaf_entry sp, 16
+ movi a6, 0x7f800000
+
+ /* Check if the two operands have the same sign. */
+ xor a7, a2, a3
+ bltz a7, .Lsub_opposite_signs
+
+.Lsub_same_sign:
+ /* Check if either exponent == 0x7f8 (i.e., NaN or Infinity). */
+ ball a2, a6, .Lsub_xnan_or_inf
+ ball a3, a6, .Lsub_ynan_or_inf
+
+ /* Compare the operands. In contrast to addition, the entire
+ value matters here. */
+ extui a7, a2, 23, 8
+ extui a8, a3, 23, 8
+ bltu a2, a3, .Lsub_xsmaller
+
+.Lsub_ysmaller:
+ /* Check if the smaller (or equal) exponent is zero. */
+ bnone a3, a6, .Lsub_yexpzero
+
+ /* Replace y sign/exponent with 0x008. */
+ or a3, a3, a6
+ slli a3, a3, 8
+ srli a3, a3, 8
+
+.Lsub_yexpdiff:
+ /* Compute the exponent difference. */
+ sub a10, a7, a8
+
+ /* Exponent difference > 32 -- just return the bigger value. */
+ bgeui a10, 32, 1f
+
+ /* Shift y right by the exponent difference. Any bits that are
+ shifted out of y are saved in a9 for rounding the result. */
+ ssr a10
+ movi a9, 0
+ src a9, a3, a9
+ srl a3, a3
+
+ sub a2, a2, a3
+
+ /* Subtract the leftover bits in a9 from zero and propagate any
+ borrow from a2. */
+ neg a9, a9
+ addi a10, a2, -1
+ movnez a2, a10, a9
+
+ /* Check if the subtract underflowed into the exponent. */
+ extui a10, a2, 23, 8
+ beq a10, a7, .Lsub_round
+ j .Lsub_borrow
+
+.Lsub_yexpzero:
+ /* Return zero if the inputs are equal. (For the non-subnormal
+ case, subtracting the "1.0" will cause a borrow from the exponent
+ and this case can be detected when handling the borrow.) */
+ beq a2, a3, .Lsub_return_zero
+
+ /* y is a subnormal value. Replace its sign/exponent with zero,
+ i.e., no implicit "1.0". Unless x is also a subnormal, increment
+ y's apparent exponent because subnormals behave as if they had
+ the minimum (nonzero) exponent. */
+ slli a3, a3, 9
+ srli a3, a3, 9
+ bnone a2, a6, .Lsub_yexpdiff
+ addi a8, a8, 1
+ j .Lsub_yexpdiff
+
+.Lsub_returny:
+ /* Negate and return y. */
+ slli a7, a6, 8
+ xor a2, a3, a7
+1: leaf_return
+
+.Lsub_xsmaller:
+ /* Same thing as the "ysmaller" code, but with x and y swapped and
+ with y negated. */
+ bnone a2, a6, .Lsub_xexpzero
+
+ or a2, a2, a6
+ slli a2, a2, 8
+ srli a2, a2, 8
+
+.Lsub_xexpdiff:
+ sub a10, a8, a7
+ bgeui a10, 32, .Lsub_returny
+
+ ssr a10
+ movi a9, 0
+ src a9, a2, a9
+ srl a2, a2
+
+ /* Negate y. */
+ slli a11, a6, 8
+ xor a3, a3, a11
+
+ sub a2, a3, a2
+
+ neg a9, a9
+ addi a10, a2, -1
+ movnez a2, a10, a9
+
+ /* Check if the subtract underflowed into the exponent. */
+ extui a10, a2, 23, 8
+ bne a10, a8, .Lsub_borrow
+
+.Lsub_round:
+ /* Round up if the leftover fraction is >= 1/2. */
+ bgez a9, 1f
+ addi a2, a2, 1
+
+ /* Check if the leftover fraction is exactly 1/2. */
+ slli a9, a9, 1
+ beqz a9, .Lsub_exactlyhalf
+1: leaf_return
+
+.Lsub_xexpzero:
+ /* Same as "yexpzero". */
+ beq a2, a3, .Lsub_return_zero
+ slli a2, a2, 9
+ srli a2, a2, 9
+ bnone a3, a6, .Lsub_xexpdiff
+ addi a7, a7, 1
+ j .Lsub_xexpdiff
+
+.Lsub_return_zero:
+ movi a2, 0
+ leaf_return
+
+.Lsub_borrow:
+ /* The subtraction has underflowed into the exponent field, so the
+ value needs to be renormalized. Shift the mantissa left as
+ needed to remove any leading zeros and adjust the exponent
+ accordingly. If the exponent is not large enough to remove
+ all the leading zeros, the result will be a subnormal value. */
+
+ slli a8, a2, 9
+ beqz a8, .Lsub_xzero
+ do_nsau a6, a8, a7, a11
+ srli a8, a8, 9
+ bge a6, a10, .Lsub_subnormal
+ addi a6, a6, 1
+
+.Lsub_normalize_shift:
+ /* Shift the mantissa (a8/a9) left by a6. */
+ ssl a6
+ src a8, a8, a9
+ sll a9, a9
+
+ /* Combine the shifted mantissa with the sign and exponent,
+ decrementing the exponent by a6. (The exponent has already
+ been decremented by one due to the borrow from the subtraction,
+ but adding the mantissa will increment the exponent by one.) */
+ srli a2, a2, 23
+ sub a2, a2, a6
+ slli a2, a2, 23
+ add a2, a2, a8
+ j .Lsub_round
+
+.Lsub_exactlyhalf:
+ /* Round down to the nearest even value. */
+ srli a2, a2, 1
+ slli a2, a2, 1
+ leaf_return
+
+.Lsub_xzero:
+ /* If there was a borrow from the exponent, and the mantissa and
+ guard digits are all zero, then the inputs were equal and the
+ result should be zero. */
+ beqz a9, .Lsub_return_zero
+
+ /* Only the guard digit is nonzero. Shift by min(24, a10). */
+ addi a11, a10, -24
+ movi a6, 24
+ movltz a6, a10, a11
+ j .Lsub_normalize_shift
+
+.Lsub_subnormal:
+ /* The exponent is too small to shift away all the leading zeros.
+ Set a6 to the current exponent (which has already been
+ decremented by the borrow) so that the exponent of the result
+ will be zero. Do not add 1 to a6 in this case, because: (1)
+ adding the mantissa will not increment the exponent, so there is
+ no need to subtract anything extra from the exponent to
+ compensate, and (2) the effective exponent of a subnormal is 1
+ not 0 so the shift amount must be 1 smaller than normal. */
+ mov a6, a10
+ j .Lsub_normalize_shift
+
+#endif /* L_addsubsf3 */
+
+#ifdef L_mulsf3
+
+ /* Multiplication */
+#if !XCHAL_HAVE_MUL16 && !XCHAL_HAVE_MUL32 && !XCHAL_HAVE_MAC16
+#define XCHAL_NO_MUL 1
+#endif
+
+__mulsf3_aux:
+
+ /* Handle unusual cases (zeros, subnormals, NaNs and Infinities).
+ (This code is placed before the start of the function just to
+ keep it in range of the limited branch displacements.) */
+
+.Lmul_xexpzero:
+ /* Clear the sign bit of x. */
+ slli a2, a2, 1
+ srli a2, a2, 1
+
+ /* If x is zero, return zero. */
+ beqz a2, .Lmul_return_zero
+
+ /* Normalize x. Adjust the exponent in a8. */
+ do_nsau a10, a2, a11, a12
+ addi a10, a10, -8
+ ssl a10
+ sll a2, a2
+ movi a8, 1
+ sub a8, a8, a10
+ j .Lmul_xnormalized
+
+.Lmul_yexpzero:
+ /* Clear the sign bit of y. */
+ slli a3, a3, 1
+ srli a3, a3, 1
+
+ /* If y is zero, return zero. */
+ beqz a3, .Lmul_return_zero
+
+ /* Normalize y. Adjust the exponent in a9. */
+ do_nsau a10, a3, a11, a12
+ addi a10, a10, -8
+ ssl a10
+ sll a3, a3
+ movi a9, 1
+ sub a9, a9, a10
+ j .Lmul_ynormalized
+
+.Lmul_return_zero:
+ /* Return zero with the appropriate sign bit. */
+ srli a2, a7, 31
+ slli a2, a2, 31
+ j .Lmul_done
+
+.Lmul_xnan_or_inf:
+ /* If y is zero, return NaN. */
+ slli a8, a3, 1
+ bnez a8, 1f
+ movi a4, 0x400000 /* make it a quiet NaN */
+ or a2, a2, a4
+ j .Lmul_done
+1:
+ /* If y is NaN, return y. */
+ bnall a3, a6, .Lmul_returnx
+ slli a8, a3, 9
+ beqz a8, .Lmul_returnx
+
+.Lmul_returny:
+ mov a2, a3
+
+.Lmul_returnx:
+ /* Set the sign bit and return. */
+ extui a7, a7, 31, 1
+ slli a2, a2, 1
+ ssai 1
+ src a2, a7, a2
+ j .Lmul_done
+
+.Lmul_ynan_or_inf:
+ /* If x is zero, return NaN. */
+ slli a8, a2, 1
+ bnez a8, .Lmul_returny
+ movi a7, 0x400000 /* make it a quiet NaN */
+ or a2, a3, a7
+ j .Lmul_done
+
+ .align 4
+ .global __mulsf3
+ .type __mulsf3, @function
+__mulsf3:
+#if __XTENSA_CALL0_ABI__
+ leaf_entry sp, 32
+ addi sp, sp, -32
+ s32i a12, sp, 16
+ s32i a13, sp, 20
+ s32i a14, sp, 24
+ s32i a15, sp, 28
+#elif XCHAL_NO_MUL
+ /* This is not really a leaf function; allocate enough stack space
+ to allow CALL12s to a helper function. */
+ leaf_entry sp, 64
+#else
+ leaf_entry sp, 32
+#endif
+ movi a6, 0x7f800000
+
+ /* Get the sign of the result. */
+ xor a7, a2, a3
+
+ /* Check for NaN and infinity. */
+ ball a2, a6, .Lmul_xnan_or_inf
+ ball a3, a6, .Lmul_ynan_or_inf
+
+ /* Extract the exponents. */
+ extui a8, a2, 23, 8
+ extui a9, a3, 23, 8
+
+ beqz a8, .Lmul_xexpzero
+.Lmul_xnormalized:
+ beqz a9, .Lmul_yexpzero
+.Lmul_ynormalized:
+
+ /* Add the exponents. */
+ add a8, a8, a9
+
+ /* Replace sign/exponent fields with explicit "1.0". */
+ movi a10, 0xffffff
+ or a2, a2, a6
+ and a2, a2, a10
+ or a3, a3, a6
+ and a3, a3, a10
+
+ /* Multiply 32x32 to 64 bits. The result ends up in a2/a6. */
+
+#if XCHAL_HAVE_MUL32_HIGH
+
+ mull a6, a2, a3
+ muluh a2, a2, a3
+
+#else
+
+ /* Break the inputs into 16-bit chunks and compute 4 32-bit partial
+ products. These partial products are:
+
+ 0 xl * yl
+
+ 1 xl * yh
+ 2 xh * yl
+
+ 3 xh * yh
+
+ If using the Mul16 or Mul32 multiplier options, these input
+ chunks must be stored in separate registers. For Mac16, the
+ UMUL.AA.* opcodes can specify that the inputs come from either
+ half of the registers, so there is no need to shift them out
+ ahead of time. If there is no multiply hardware, the 16-bit
+ chunks can be extracted when setting up the arguments to the
+ separate multiply function. */
+
+#if __XTENSA_CALL0_ABI__ && XCHAL_NO_MUL
+ /* Calling a separate multiply function will clobber a0 and requires
+ use of a8 as a temporary, so save those values now. (The function
+ uses a custom ABI so nothing else needs to be saved.) */
+ s32i a0, sp, 0
+ s32i a8, sp, 4
+#endif
+
+#if XCHAL_HAVE_MUL16 || XCHAL_HAVE_MUL32
+
+#define a2h a4
+#define a3h a5
+
+ /* Get the high halves of the inputs into registers. */
+ srli a2h, a2, 16
+ srli a3h, a3, 16
+
+#define a2l a2
+#define a3l a3
+
+#if XCHAL_HAVE_MUL32 && !XCHAL_HAVE_MUL16
+ /* Clear the high halves of the inputs. This does not matter
+ for MUL16 because the high bits are ignored. */
+ extui a2, a2, 0, 16
+ extui a3, a3, 0, 16
+#endif
+#endif /* MUL16 || MUL32 */
+
+
+#if XCHAL_HAVE_MUL16
+
+#define do_mul(dst, xreg, xhalf, yreg, yhalf) \
+ mul16u dst, xreg ## xhalf, yreg ## yhalf
+
+#elif XCHAL_HAVE_MUL32
+
+#define do_mul(dst, xreg, xhalf, yreg, yhalf) \
+ mull dst, xreg ## xhalf, yreg ## yhalf
+
+#elif XCHAL_HAVE_MAC16
+
+/* The preprocessor insists on inserting a space when concatenating after
+ a period in the definition of do_mul below. These macros are a workaround
+ using underscores instead of periods when doing the concatenation. */
+#define umul_aa_ll umul.aa.ll
+#define umul_aa_lh umul.aa.lh
+#define umul_aa_hl umul.aa.hl
+#define umul_aa_hh umul.aa.hh
+
+#define do_mul(dst, xreg, xhalf, yreg, yhalf) \
+ umul_aa_ ## xhalf ## yhalf xreg, yreg; \
+ rsr dst, ACCLO
+
+#else /* no multiply hardware */
+
+#define set_arg_l(dst, src) \
+ extui dst, src, 0, 16
+#define set_arg_h(dst, src) \
+ srli dst, src, 16
+
+#if __XTENSA_CALL0_ABI__
+#define do_mul(dst, xreg, xhalf, yreg, yhalf) \
+ set_arg_ ## xhalf (a13, xreg); \
+ set_arg_ ## yhalf (a14, yreg); \
+ call0 .Lmul_mulsi3; \
+ mov dst, a12
+#else
+#define do_mul(dst, xreg, xhalf, yreg, yhalf) \
+ set_arg_ ## xhalf (a14, xreg); \
+ set_arg_ ## yhalf (a15, yreg); \
+ call12 .Lmul_mulsi3; \
+ mov dst, a14
+#endif /* __XTENSA_CALL0_ABI__ */
+
+#endif /* no multiply hardware */
+
+ /* Add pp1 and pp2 into a6 with carry-out in a9. */
+ do_mul(a6, a2, l, a3, h) /* pp 1 */
+ do_mul(a11, a2, h, a3, l) /* pp 2 */
+ movi a9, 0
+ add a6, a6, a11
+ bgeu a6, a11, 1f
+ addi a9, a9, 1
+1:
+ /* Shift the high half of a9/a6 into position in a9. Note that
+ this value can be safely incremented without any carry-outs. */
+ ssai 16
+ src a9, a9, a6
+
+ /* Compute the low word into a6. */
+ do_mul(a11, a2, l, a3, l) /* pp 0 */
+ sll a6, a6
+ add a6, a6, a11
+ bgeu a6, a11, 1f
+ addi a9, a9, 1
+1:
+ /* Compute the high word into a2. */
+ do_mul(a2, a2, h, a3, h) /* pp 3 */
+ add a2, a2, a9
+
+#if __XTENSA_CALL0_ABI__ && XCHAL_NO_MUL
+ /* Restore values saved on the stack during the multiplication. */
+ l32i a0, sp, 0
+ l32i a8, sp, 4
+#endif
+#endif /* ! XCHAL_HAVE_MUL32_HIGH */
+
+ /* Shift left by 9 bits, unless there was a carry-out from the
+ multiply, in which case, shift by 8 bits and increment the
+ exponent. */
+ movi a4, 9
+ srli a5, a2, 24 - 9
+ beqz a5, 1f
+ addi a4, a4, -1
+ addi a8, a8, 1
+1: ssl a4
+ src a2, a2, a6
+ sll a6, a6
+
+ /* Subtract the extra bias from the exponent sum (plus one to account
+ for the explicit "1.0" of the mantissa that will be added to the
+ exponent in the final result). */
+ movi a4, 0x80
+ sub a8, a8, a4
+
+ /* Check for over/underflow. The value in a8 is one less than the
+ final exponent, so values in the range 0..fd are OK here. */
+ movi a4, 0xfe
+ bgeu a8, a4, .Lmul_overflow
+
+.Lmul_round:
+ /* Round. */
+ bgez a6, .Lmul_rounded
+ addi a2, a2, 1
+ slli a6, a6, 1
+ beqz a6, .Lmul_exactlyhalf
+
+.Lmul_rounded:
+ /* Add the exponent to the mantissa. */
+ slli a8, a8, 23
+ add a2, a2, a8
+
+.Lmul_addsign:
+ /* Add the sign bit. */
+ srli a7, a7, 31
+ slli a7, a7, 31
+ or a2, a2, a7
+
+.Lmul_done:
+#if __XTENSA_CALL0_ABI__
+ l32i a12, sp, 16
+ l32i a13, sp, 20
+ l32i a14, sp, 24
+ l32i a15, sp, 28
+ addi sp, sp, 32
+#endif
+ leaf_return
+
+.Lmul_exactlyhalf:
+ /* Round down to the nearest even value. */
+ srli a2, a2, 1
+ slli a2, a2, 1
+ j .Lmul_rounded
+
+.Lmul_overflow:
+ bltz a8, .Lmul_underflow
+ /* Return +/- Infinity. */
+ movi a8, 0xff
+ slli a2, a8, 23
+ j .Lmul_addsign
+
+.Lmul_underflow:
+ /* Create a subnormal value, where the exponent field contains zero,
+ but the effective exponent is 1. The value of a8 is one less than
+ the actual exponent, so just negate it to get the shift amount. */
+ neg a8, a8
+ mov a9, a6
+ ssr a8
+ bgeui a8, 32, .Lmul_flush_to_zero
+
+ /* Shift a2 right. Any bits that are shifted out of a2 are saved
+ in a6 (combined with the shifted-out bits currently in a6) for
+ rounding the result. */
+ sll a6, a2
+ srl a2, a2
+
+ /* Set the exponent to zero. */
+ movi a8, 0
+
+ /* Pack any nonzero bits shifted out into a6. */
+ beqz a9, .Lmul_round
+ movi a9, 1
+ or a6, a6, a9
+ j .Lmul_round
+
+.Lmul_flush_to_zero:
+ /* Return zero with the appropriate sign bit. */
+ srli a2, a7, 31
+ slli a2, a2, 31
+ j .Lmul_done
+
+#if XCHAL_NO_MUL
+
+ /* For Xtensa processors with no multiply hardware, this simplified
+ version of _mulsi3 is used for multiplying 16-bit chunks of
+ the floating-point mantissas. When using CALL0, this function
+ uses a custom ABI: the inputs are passed in a13 and a14, the
+ result is returned in a12, and a8 and a15 are clobbered. */
+ .align 4
+.Lmul_mulsi3:
+ leaf_entry sp, 16
+ .macro mul_mulsi3_body dst, src1, src2, tmp1, tmp2
+ movi \dst, 0
+1: add \tmp1, \src2, \dst
+ extui \tmp2, \src1, 0, 1
+ movnez \dst, \tmp1, \tmp2
+
+ do_addx2 \tmp1, \src2, \dst, \tmp1
+ extui \tmp2, \src1, 1, 1
+ movnez \dst, \tmp1, \tmp2
+
+ do_addx4 \tmp1, \src2, \dst, \tmp1
+ extui \tmp2, \src1, 2, 1
+ movnez \dst, \tmp1, \tmp2
+
+ do_addx8 \tmp1, \src2, \dst, \tmp1
+ extui \tmp2, \src1, 3, 1
+ movnez \dst, \tmp1, \tmp2
+
+ srli \src1, \src1, 4
+ slli \src2, \src2, 4
+ bnez \src1, 1b
+ .endm
+#if __XTENSA_CALL0_ABI__
+ mul_mulsi3_body a12, a13, a14, a15, a8
+#else
+ /* The result will be written into a2, so save that argument in a4. */
+ mov a4, a2
+ mul_mulsi3_body a2, a4, a3, a5, a6
+#endif
+ leaf_return
+#endif /* XCHAL_NO_MUL */
+#endif /* L_mulsf3 */
+
+#ifdef L_divsf3
+
+ /* Division */
+__divsf3_aux:
+
+ /* Handle unusual cases (zeros, subnormals, NaNs and Infinities).
+ (This code is placed before the start of the function just to
+ keep it in range of the limited branch displacements.) */
+
+.Ldiv_yexpzero:
+ /* Clear the sign bit of y. */
+ slli a3, a3, 1
+ srli a3, a3, 1
+
+ /* Check for division by zero. */
+ beqz a3, .Ldiv_yzero
+
+ /* Normalize y. Adjust the exponent in a9. */
+ do_nsau a10, a3, a4, a5
+ addi a10, a10, -8
+ ssl a10
+ sll a3, a3
+ movi a9, 1
+ sub a9, a9, a10
+ j .Ldiv_ynormalized
+
+.Ldiv_yzero:
+ /* y is zero. Return NaN if x is also zero; otherwise, infinity. */
+ slli a4, a2, 1
+ srli a4, a4, 1
+ srli a2, a7, 31
+ slli a2, a2, 31
+ or a2, a2, a6
+ bnez a4, 1f
+ movi a4, 0x400000 /* make it a quiet NaN */
+ or a2, a2, a4
+1: leaf_return
+
+.Ldiv_xexpzero:
+ /* Clear the sign bit of x. */
+ slli a2, a2, 1
+ srli a2, a2, 1
+
+ /* If x is zero, return zero. */
+ beqz a2, .Ldiv_return_zero
+
+ /* Normalize x. Adjust the exponent in a8. */
+ do_nsau a10, a2, a4, a5
+ addi a10, a10, -8
+ ssl a10
+ sll a2, a2
+ movi a8, 1
+ sub a8, a8, a10
+ j .Ldiv_xnormalized
+
+.Ldiv_return_zero:
+ /* Return zero with the appropriate sign bit. */
+ srli a2, a7, 31
+ slli a2, a2, 31
+ leaf_return
+
+.Ldiv_xnan_or_inf:
+ /* Set the sign bit of the result. */
+ srli a7, a3, 31
+ slli a7, a7, 31
+ xor a2, a2, a7
+ /* If y is NaN or Inf, return NaN. */
+ bnall a3, a6, 1f
+ movi a4, 0x400000 /* make it a quiet NaN */
+ or a2, a2, a4
+1: leaf_return
+
+.Ldiv_ynan_or_inf:
+ /* If y is Infinity, return zero. */
+ slli a8, a3, 9
+ beqz a8, .Ldiv_return_zero
+ /* y is NaN; return it. */
+ mov a2, a3
+ leaf_return
+
+ .align 4
+ .global __divsf3
+ .type __divsf3, @function
+__divsf3:
+ leaf_entry sp, 16
+ movi a6, 0x7f800000
+
+ /* Get the sign of the result. */
+ xor a7, a2, a3
+
+ /* Check for NaN and infinity. */
+ ball a2, a6, .Ldiv_xnan_or_inf
+ ball a3, a6, .Ldiv_ynan_or_inf
+
+ /* Extract the exponents. */
+ extui a8, a2, 23, 8
+ extui a9, a3, 23, 8
+
+ beqz a9, .Ldiv_yexpzero
+.Ldiv_ynormalized:
+ beqz a8, .Ldiv_xexpzero
+.Ldiv_xnormalized:
+
+ /* Subtract the exponents. */
+ sub a8, a8, a9
+
+ /* Replace sign/exponent fields with explicit "1.0". */
+ movi a10, 0xffffff
+ or a2, a2, a6
+ and a2, a2, a10
+ or a3, a3, a6
+ and a3, a3, a10
+
+ /* The first digit of the mantissa division must be a one.
+ Shift x (and adjust the exponent) as needed to make this true. */
+ bltu a3, a2, 1f
+ slli a2, a2, 1
+ addi a8, a8, -1
+1:
+ /* Do the first subtraction and shift. */
+ sub a2, a2, a3
+ slli a2, a2, 1
+
+ /* Put the quotient into a10. */
+ movi a10, 1
+
+ /* Divide one bit at a time for 23 bits. */
+ movi a9, 23
+#if XCHAL_HAVE_LOOPS
+ loop a9, .Ldiv_loopend
+#endif
+.Ldiv_loop:
+ /* Shift the quotient << 1. */
+ slli a10, a10, 1
+
+ /* Is this digit a 0 or 1? */
+ bltu a2, a3, 1f
+
+ /* Output a 1 and subtract. */
+ addi a10, a10, 1
+ sub a2, a2, a3
+
+ /* Shift the dividend << 1. */
+1: slli a2, a2, 1
+
+#if !XCHAL_HAVE_LOOPS
+ addi a9, a9, -1
+ bnez a9, .Ldiv_loop
+#endif
+.Ldiv_loopend:
+
+ /* Add the exponent bias (less one to account for the explicit "1.0"
+ of the mantissa that will be added to the exponent in the final
+ result). */
+ addi a8, a8, 0x7e
+
+ /* Check for over/underflow. The value in a8 is one less than the
+ final exponent, so values in the range 0..fd are OK here. */
+ movi a4, 0xfe
+ bgeu a8, a4, .Ldiv_overflow
+
+.Ldiv_round:
+ /* Round. The remainder (<< 1) is in a2. */
+ bltu a2, a3, .Ldiv_rounded
+ addi a10, a10, 1
+ beq a2, a3, .Ldiv_exactlyhalf
+
+.Ldiv_rounded:
+ /* Add the exponent to the mantissa. */
+ slli a8, a8, 23
+ add a2, a10, a8
+
+.Ldiv_addsign:
+ /* Add the sign bit. */
+ srli a7, a7, 31
+ slli a7, a7, 31
+ or a2, a2, a7
+ leaf_return
+
+.Ldiv_overflow:
+ bltz a8, .Ldiv_underflow
+ /* Return +/- Infinity. */
+ addi a8, a4, 1 /* 0xff */
+ slli a2, a8, 23
+ j .Ldiv_addsign
+
+.Ldiv_exactlyhalf:
+ /* Remainder is exactly half the divisor. Round even. */
+ srli a10, a10, 1
+ slli a10, a10, 1
+ j .Ldiv_rounded
+
+.Ldiv_underflow:
+ /* Create a subnormal value, where the exponent field contains zero,
+ but the effective exponent is 1. The value of a8 is one less than
+ the actual exponent, so just negate it to get the shift amount. */
+ neg a8, a8
+ ssr a8
+ bgeui a8, 32, .Ldiv_flush_to_zero
+
+ /* Shift a10 right. Any bits that are shifted out of a10 are
+ saved in a6 for rounding the result. */
+ sll a6, a10
+ srl a10, a10
+
+ /* Set the exponent to zero. */
+ movi a8, 0
+
+ /* Pack any nonzero remainder (in a2) into a6. */
+ beqz a2, 1f
+ movi a9, 1
+ or a6, a6, a9
+
+ /* Round a10 based on the bits shifted out into a6. */
+1: bgez a6, .Ldiv_rounded
+ addi a10, a10, 1
+ slli a6, a6, 1
+ bnez a6, .Ldiv_rounded
+ srli a10, a10, 1
+ slli a10, a10, 1
+ j .Ldiv_rounded
+
+.Ldiv_flush_to_zero:
+ /* Return zero with the appropriate sign bit. */
+ srli a2, a7, 31
+ slli a2, a2, 31
+ leaf_return
+
+#endif /* L_divsf3 */
+
+#ifdef L_cmpsf2
+
+ /* Equal and Not Equal */
+
+ .align 4
+ .global __eqsf2
+ .global __nesf2
+ .set __nesf2, __eqsf2
+ .type __eqsf2, @function
+__eqsf2:
+ leaf_entry sp, 16
+ bne a2, a3, 4f
+
+ /* The values are equal but NaN != NaN. Check the exponent. */
+ movi a6, 0x7f800000
+ ball a2, a6, 3f
+
+ /* Equal. */
+ movi a2, 0
+ leaf_return
+
+ /* Not equal. */
+2: movi a2, 1
+ leaf_return
+
+ /* Check if the mantissas are nonzero. */
+3: slli a7, a2, 9
+ j 5f
+
+ /* Check if x and y are zero with different signs. */
+4: or a7, a2, a3
+ slli a7, a7, 1
+
+ /* Equal if a7 == 0, where a7 is either abs(x | y) or the mantissa
+ or x when exponent(x) = 0x7f8 and x == y. */
+5: movi a2, 0
+ movi a3, 1
+ movnez a2, a3, a7
+ leaf_return
+
+
+ /* Greater Than */
+
+ .align 4
+ .global __gtsf2
+ .type __gtsf2, @function
+__gtsf2:
+ leaf_entry sp, 16
+ movi a6, 0x7f800000
+ ball a2, a6, 2f
+1: bnall a3, a6, .Lle_cmp
+
+ /* Check if y is a NaN. */
+ slli a7, a3, 9
+ beqz a7, .Lle_cmp
+ movi a2, 0
+ leaf_return
+
+ /* Check if x is a NaN. */
+2: slli a7, a2, 9
+ beqz a7, 1b
+ movi a2, 0
+ leaf_return
+
+
+ /* Less Than or Equal */
+
+ .align 4
+ .global __lesf2
+ .type __lesf2, @function
+__lesf2:
+ leaf_entry sp, 16
+ movi a6, 0x7f800000
+ ball a2, a6, 2f
+1: bnall a3, a6, .Lle_cmp
+
+ /* Check if y is a NaN. */
+ slli a7, a3, 9
+ beqz a7, .Lle_cmp
+ movi a2, 1
+ leaf_return
+
+ /* Check if x is a NaN. */
+2: slli a7, a2, 9
+ beqz a7, 1b
+ movi a2, 1
+ leaf_return
+
+.Lle_cmp:
+ /* Check if x and y have different signs. */
+ xor a7, a2, a3
+ bltz a7, .Lle_diff_signs
+
+ /* Check if x is negative. */
+ bltz a2, .Lle_xneg
+
+ /* Check if x <= y. */
+ bltu a3, a2, 5f
+4: movi a2, 0
+ leaf_return
+
+.Lle_xneg:
+ /* Check if y <= x. */
+ bgeu a2, a3, 4b
+5: movi a2, 1
+ leaf_return
+
+.Lle_diff_signs:
+ bltz a2, 4b
+
+ /* Check if both x and y are zero. */
+ or a7, a2, a3
+ slli a7, a7, 1
+ movi a2, 1
+ movi a3, 0
+ moveqz a2, a3, a7
+ leaf_return
+
+
+ /* Greater Than or Equal */
+
+ .align 4
+ .global __gesf2
+ .type __gesf2, @function
+__gesf2:
+ leaf_entry sp, 16
+ movi a6, 0x7f800000
+ ball a2, a6, 2f
+1: bnall a3, a6, .Llt_cmp
+
+ /* Check if y is a NaN. */
+ slli a7, a3, 9
+ beqz a7, .Llt_cmp
+ movi a2, -1
+ leaf_return
+
+ /* Check if x is a NaN. */
+2: slli a7, a2, 9
+ beqz a7, 1b
+ movi a2, -1
+ leaf_return
+
+
+ /* Less Than */
+
+ .align 4
+ .global __ltsf2
+ .type __ltsf2, @function
+__ltsf2:
+ leaf_entry sp, 16
+ movi a6, 0x7f800000
+ ball a2, a6, 2f
+1: bnall a3, a6, .Llt_cmp
+
+ /* Check if y is a NaN. */
+ slli a7, a3, 9
+ beqz a7, .Llt_cmp
+ movi a2, 0
+ leaf_return
+
+ /* Check if x is a NaN. */
+2: slli a7, a2, 9
+ beqz a7, 1b
+ movi a2, 0
+ leaf_return
+
+.Llt_cmp:
+ /* Check if x and y have different signs. */
+ xor a7, a2, a3
+ bltz a7, .Llt_diff_signs
+
+ /* Check if x is negative. */
+ bltz a2, .Llt_xneg
+
+ /* Check if x < y. */
+ bgeu a2, a3, 5f
+4: movi a2, -1
+ leaf_return
+
+.Llt_xneg:
+ /* Check if y < x. */
+ bltu a3, a2, 4b
+5: movi a2, 0
+ leaf_return
+
+.Llt_diff_signs:
+ bgez a2, 5b
+
+ /* Check if both x and y are nonzero. */
+ or a7, a2, a3
+ slli a7, a7, 1
+ movi a2, 0
+ movi a3, -1
+ movnez a2, a3, a7
+ leaf_return
+
+
+ /* Unordered */
+
+ .align 4
+ .global __unordsf2
+ .type __unordsf2, @function
+__unordsf2:
+ leaf_entry sp, 16
+ movi a6, 0x7f800000
+ ball a2, a6, 3f
+1: ball a3, a6, 4f
+2: movi a2, 0
+ leaf_return
+
+3: slli a7, a2, 9
+ beqz a7, 1b
+ movi a2, 1
+ leaf_return
+
+4: slli a7, a3, 9
+ beqz a7, 2b
+ movi a2, 1
+ leaf_return
+
+#endif /* L_cmpsf2 */
+
+#ifdef L_fixsfsi
+
+ .align 4
+ .global __fixsfsi
+ .type __fixsfsi, @function
+__fixsfsi:
+ leaf_entry sp, 16
+
+ /* Check for NaN and Infinity. */
+ movi a6, 0x7f800000
+ ball a2, a6, .Lfixsfsi_nan_or_inf
+
+ /* Extract the exponent and check if 0 < (exp - 0x7e) < 32. */
+ extui a4, a2, 23, 8
+ addi a4, a4, -0x7e
+ bgei a4, 32, .Lfixsfsi_maxint
+ blti a4, 1, .Lfixsfsi_zero
+
+ /* Add explicit "1.0" and shift << 8. */
+ or a7, a2, a6
+ slli a5, a7, 8
+
+ /* Shift back to the right, based on the exponent. */
+ ssl a4 /* shift by 32 - a4 */
+ srl a5, a5
+
+ /* Negate the result if sign != 0. */
+ neg a2, a5
+ movgez a2, a5, a7
+ leaf_return
+
+.Lfixsfsi_nan_or_inf:
+ /* Handle Infinity and NaN. */
+ slli a4, a2, 9
+ beqz a4, .Lfixsfsi_maxint
+
+ /* Translate NaN to +maxint. */
+ movi a2, 0
+
+.Lfixsfsi_maxint:
+ slli a4, a6, 8 /* 0x80000000 */
+ addi a5, a4, -1 /* 0x7fffffff */
+ movgez a4, a5, a2
+ mov a2, a4
+ leaf_return
+
+.Lfixsfsi_zero:
+ movi a2, 0
+ leaf_return
+
+#endif /* L_fixsfsi */
+
+#ifdef L_fixsfdi
+
+ .align 4
+ .global __fixsfdi
+ .type __fixsfdi, @function
+__fixsfdi:
+ leaf_entry sp, 16
+
+ /* Check for NaN and Infinity. */
+ movi a6, 0x7f800000
+ ball a2, a6, .Lfixsfdi_nan_or_inf
+
+ /* Extract the exponent and check if 0 < (exp - 0x7e) < 64. */
+ extui a4, a2, 23, 8
+ addi a4, a4, -0x7e
+ bgei a4, 64, .Lfixsfdi_maxint
+ blti a4, 1, .Lfixsfdi_zero
+
+ /* Add explicit "1.0" and shift << 8. */
+ or a7, a2, a6
+ slli xh, a7, 8
+
+ /* Shift back to the right, based on the exponent. */
+ ssl a4 /* shift by 64 - a4 */
+ bgei a4, 32, .Lfixsfdi_smallshift
+ srl xl, xh
+ movi xh, 0
+
+.Lfixsfdi_shifted:
+ /* Negate the result if sign != 0. */
+ bgez a7, 1f
+ neg xl, xl
+ neg xh, xh
+ beqz xl, 1f
+ addi xh, xh, -1
+1: leaf_return
+
+.Lfixsfdi_smallshift:
+ movi xl, 0
+ sll xl, xh
+ srl xh, xh
+ j .Lfixsfdi_shifted
+
+.Lfixsfdi_nan_or_inf:
+ /* Handle Infinity and NaN. */
+ slli a4, a2, 9
+ beqz a4, .Lfixsfdi_maxint
+
+ /* Translate NaN to +maxint. */
+ movi a2, 0
+
+.Lfixsfdi_maxint:
+ slli a7, a6, 8 /* 0x80000000 */
+ bgez a2, 1f
+ mov xh, a7
+ movi xl, 0
+ leaf_return
+
+1: addi xh, a7, -1 /* 0x7fffffff */
+ movi xl, -1
+ leaf_return
+
+.Lfixsfdi_zero:
+ movi xh, 0
+ movi xl, 0
+ leaf_return
+
+#endif /* L_fixsfdi */
+
+#ifdef L_fixunssfsi
+
+ .align 4
+ .global __fixunssfsi
+ .type __fixunssfsi, @function
+__fixunssfsi:
+ leaf_entry sp, 16
+
+ /* Check for NaN and Infinity. */
+ movi a6, 0x7f800000
+ ball a2, a6, .Lfixunssfsi_nan_or_inf
+
+ /* Extract the exponent and check if 0 <= (exp - 0x7f) < 32. */
+ extui a4, a2, 23, 8
+ addi a4, a4, -0x7f
+ bgei a4, 32, .Lfixunssfsi_maxint
+ bltz a4, .Lfixunssfsi_zero
+
+ /* Add explicit "1.0" and shift << 8. */
+ or a7, a2, a6
+ slli a5, a7, 8
+
+ /* Shift back to the right, based on the exponent. */
+ addi a4, a4, 1
+ beqi a4, 32, .Lfixunssfsi_bigexp
+ ssl a4 /* shift by 32 - a4 */
+ srl a5, a5
+
+ /* Negate the result if sign != 0. */
+ neg a2, a5
+ movgez a2, a5, a7
+ leaf_return
+
+.Lfixunssfsi_nan_or_inf:
+ /* Handle Infinity and NaN. */
+ slli a4, a2, 9
+ beqz a4, .Lfixunssfsi_maxint
+
+ /* Translate NaN to 0xffffffff. */
+ movi a2, -1
+ leaf_return
+
+.Lfixunssfsi_maxint:
+ slli a4, a6, 8 /* 0x80000000 */
+ movi a5, -1 /* 0xffffffff */
+ movgez a4, a5, a2
+ mov a2, a4
+ leaf_return
+
+.Lfixunssfsi_zero:
+ movi a2, 0
+ leaf_return
+
+.Lfixunssfsi_bigexp:
+ /* Handle unsigned maximum exponent case. */
+ bltz a2, 1f
+ mov a2, a5 /* no shift needed */
+ leaf_return
+
+ /* Return 0x80000000 if negative. */
+1: slli a2, a6, 8
+ leaf_return
+
+#endif /* L_fixunssfsi */
+
+#ifdef L_fixunssfdi
+
+ .align 4
+ .global __fixunssfdi
+ .type __fixunssfdi, @function
+__fixunssfdi:
+ leaf_entry sp, 16
+
+ /* Check for NaN and Infinity. */
+ movi a6, 0x7f800000
+ ball a2, a6, .Lfixunssfdi_nan_or_inf
+
+ /* Extract the exponent and check if 0 <= (exp - 0x7f) < 64. */
+ extui a4, a2, 23, 8
+ addi a4, a4, -0x7f
+ bgei a4, 64, .Lfixunssfdi_maxint
+ bltz a4, .Lfixunssfdi_zero
+
+ /* Add explicit "1.0" and shift << 8. */
+ or a7, a2, a6
+ slli xh, a7, 8
+
+ /* Shift back to the right, based on the exponent. */
+ addi a4, a4, 1
+ beqi a4, 64, .Lfixunssfdi_bigexp
+ ssl a4 /* shift by 64 - a4 */
+ bgei a4, 32, .Lfixunssfdi_smallshift
+ srl xl, xh
+ movi xh, 0
+
+.Lfixunssfdi_shifted:
+ /* Negate the result if sign != 0. */
+ bgez a7, 1f
+ neg xl, xl
+ neg xh, xh
+ beqz xl, 1f
+ addi xh, xh, -1
+1: leaf_return
+
+.Lfixunssfdi_smallshift:
+ movi xl, 0
+ src xl, xh, xl
+ srl xh, xh
+ j .Lfixunssfdi_shifted
+
+.Lfixunssfdi_nan_or_inf:
+ /* Handle Infinity and NaN. */
+ slli a4, a2, 9
+ beqz a4, .Lfixunssfdi_maxint
+
+ /* Translate NaN to 0xffffffff.... */
+1: movi xh, -1
+ movi xl, -1
+ leaf_return
+
+.Lfixunssfdi_maxint:
+ bgez a2, 1b
+2: slli xh, a6, 8 /* 0x80000000 */
+ movi xl, 0
+ leaf_return
+
+.Lfixunssfdi_zero:
+ movi xh, 0
+ movi xl, 0
+ leaf_return
+
+.Lfixunssfdi_bigexp:
+ /* Handle unsigned maximum exponent case. */
+ bltz a7, 2b
+ movi xl, 0
+ leaf_return /* no shift needed */
+
+#endif /* L_fixunssfdi */
+
+#ifdef L_floatsisf
+
+ .align 4
+ .global __floatunsisf
+ .type __floatunsisf, @function
+__floatunsisf:
+ leaf_entry sp, 16
+ beqz a2, .Lfloatsisf_return
+
+ /* Set the sign to zero and jump to the floatsisf code. */
+ movi a7, 0
+ j .Lfloatsisf_normalize
+
+ .align 4
+ .global __floatsisf
+ .type __floatsisf, @function
+__floatsisf:
+ leaf_entry sp, 16
+
+ /* Check for zero. */
+ beqz a2, .Lfloatsisf_return
+
+ /* Save the sign. */
+ extui a7, a2, 31, 1
+
+ /* Get the absolute value. */
+#if XCHAL_HAVE_ABS
+ abs a2, a2
+#else
+ neg a4, a2
+ movltz a2, a4, a2
+#endif
+
+.Lfloatsisf_normalize:
+ /* Normalize with the first 1 bit in the msb. */
+ do_nsau a4, a2, a5, a6
+ ssl a4
+ sll a5, a2
+
+ /* Shift the mantissa into position, with rounding bits in a6. */
+ srli a2, a5, 8
+ slli a6, a5, (32 - 8)
+
+ /* Set the exponent. */
+ movi a5, 0x9d /* 0x7e + 31 */
+ sub a5, a5, a4
+ slli a5, a5, 23
+ add a2, a2, a5
+
+ /* Add the sign. */
+ slli a7, a7, 31
+ or a2, a2, a7
+
+ /* Round up if the leftover fraction is >= 1/2. */
+ bgez a6, .Lfloatsisf_return
+ addi a2, a2, 1 /* Overflow to the exponent is OK. */
+
+ /* Check if the leftover fraction is exactly 1/2. */
+ slli a6, a6, 1
+ beqz a6, .Lfloatsisf_exactlyhalf
+
+.Lfloatsisf_return:
+ leaf_return
+
+.Lfloatsisf_exactlyhalf:
+ /* Round down to the nearest even value. */
+ srli a2, a2, 1
+ slli a2, a2, 1
+ leaf_return
+
+#endif /* L_floatsisf */
+
+#ifdef L_floatdisf
+
+ .align 4
+ .global __floatundisf
+ .type __floatundisf, @function
+__floatundisf:
+ leaf_entry sp, 16
+
+ /* Check for zero. */
+ or a4, xh, xl
+ beqz a4, 2f
+
+ /* Set the sign to zero and jump to the floatdisf code. */
+ movi a7, 0
+ j .Lfloatdisf_normalize
+
+ .align 4
+ .global __floatdisf
+ .type __floatdisf, @function
+__floatdisf:
+ leaf_entry sp, 16
+
+ /* Check for zero. */
+ or a4, xh, xl
+ beqz a4, 2f
+
+ /* Save the sign. */
+ extui a7, xh, 31, 1
+
+ /* Get the absolute value. */
+ bgez xh, .Lfloatdisf_normalize
+ neg xl, xl
+ neg xh, xh
+ beqz xl, .Lfloatdisf_normalize
+ addi xh, xh, -1
+
+.Lfloatdisf_normalize:
+ /* Normalize with the first 1 bit in the msb of xh. */
+ beqz xh, .Lfloatdisf_bigshift
+ do_nsau a4, xh, a5, a6
+ ssl a4
+ src xh, xh, xl
+ sll xl, xl
+
+.Lfloatdisf_shifted:
+ /* Shift the mantissa into position, with rounding bits in a6. */
+ ssai 8
+ sll a5, xl
+ src a6, xh, xl
+ srl xh, xh
+ beqz a5, 1f
+ movi a5, 1
+ or a6, a6, a5
+1:
+ /* Set the exponent. */
+ movi a5, 0xbd /* 0x7e + 63 */
+ sub a5, a5, a4
+ slli a5, a5, 23
+ add a2, xh, a5
+
+ /* Add the sign. */
+ slli a7, a7, 31
+ or a2, a2, a7
+
+ /* Round up if the leftover fraction is >= 1/2. */
+ bgez a6, 2f
+ addi a2, a2, 1 /* Overflow to the exponent is OK. */
+
+ /* Check if the leftover fraction is exactly 1/2. */
+ slli a6, a6, 1
+ beqz a6, .Lfloatdisf_exactlyhalf
+2: leaf_return
+
+.Lfloatdisf_bigshift:
+ /* xh is zero. Normalize with first 1 bit of xl in the msb of xh. */
+ do_nsau a4, xl, a5, a6
+ ssl a4
+ sll xh, xl
+ movi xl, 0
+ addi a4, a4, 32
+ j .Lfloatdisf_shifted
+
+.Lfloatdisf_exactlyhalf:
+ /* Round down to the nearest even value. */
+ srli a2, a2, 1
+ slli a2, a2, 1
+ leaf_return
+
+#endif /* L_floatdisf */
diff --git a/libgcc/config/xtensa/lib1funcs.S b/libgcc/config/xtensa/lib1funcs.S
new file mode 100644
index 00000000000..071b9171177
--- /dev/null
+++ b/libgcc/config/xtensa/lib1funcs.S
@@ -0,0 +1,845 @@
+/* Assembly functions for the Xtensa version of libgcc1.
+ Copyright (C) 2001, 2002, 2003, 2005, 2006, 2007, 2009
+ Free Software Foundation, Inc.
+ Contributed by Bob Wilson (bwilson@tensilica.com) at Tensilica.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+#include "xtensa-config.h"
+
+/* Define macros for the ABS and ADDX* instructions to handle cases
+ where they are not included in the Xtensa processor configuration. */
+
+ .macro do_abs dst, src, tmp
+#if XCHAL_HAVE_ABS
+ abs \dst, \src
+#else
+ neg \tmp, \src
+ movgez \tmp, \src, \src
+ mov \dst, \tmp
+#endif
+ .endm
+
+ .macro do_addx2 dst, as, at, tmp
+#if XCHAL_HAVE_ADDX
+ addx2 \dst, \as, \at
+#else
+ slli \tmp, \as, 1
+ add \dst, \tmp, \at
+#endif
+ .endm
+
+ .macro do_addx4 dst, as, at, tmp
+#if XCHAL_HAVE_ADDX
+ addx4 \dst, \as, \at
+#else
+ slli \tmp, \as, 2
+ add \dst, \tmp, \at
+#endif
+ .endm
+
+ .macro do_addx8 dst, as, at, tmp
+#if XCHAL_HAVE_ADDX
+ addx8 \dst, \as, \at
+#else
+ slli \tmp, \as, 3
+ add \dst, \tmp, \at
+#endif
+ .endm
+
+/* Define macros for leaf function entry and return, supporting either the
+ standard register windowed ABI or the non-windowed call0 ABI. These
+ macros do not allocate any extra stack space, so they only work for
+ leaf functions that do not need to spill anything to the stack. */
+
+ .macro leaf_entry reg, size
+#if XCHAL_HAVE_WINDOWED && !__XTENSA_CALL0_ABI__
+ entry \reg, \size
+#else
+ /* do nothing */
+#endif
+ .endm
+
+ .macro leaf_return
+#if XCHAL_HAVE_WINDOWED && !__XTENSA_CALL0_ABI__
+ retw
+#else
+ ret
+#endif
+ .endm
+
+
+#ifdef L_mulsi3
+ .align 4
+ .global __mulsi3
+ .type __mulsi3, @function
+__mulsi3:
+ leaf_entry sp, 16
+
+#if XCHAL_HAVE_MUL32
+ mull a2, a2, a3
+
+#elif XCHAL_HAVE_MUL16
+ or a4, a2, a3
+ srai a4, a4, 16
+ bnez a4, .LMUL16
+ mul16u a2, a2, a3
+ leaf_return
+.LMUL16:
+ srai a4, a2, 16
+ srai a5, a3, 16
+ mul16u a7, a4, a3
+ mul16u a6, a5, a2
+ mul16u a4, a2, a3
+ add a7, a7, a6
+ slli a7, a7, 16
+ add a2, a7, a4
+
+#elif XCHAL_HAVE_MAC16
+ mul.aa.hl a2, a3
+ mula.aa.lh a2, a3
+ rsr a5, ACCLO
+ umul.aa.ll a2, a3
+ rsr a4, ACCLO
+ slli a5, a5, 16
+ add a2, a4, a5
+
+#else /* !MUL32 && !MUL16 && !MAC16 */
+
+ /* Multiply one bit at a time, but unroll the loop 4x to better
+ exploit the addx instructions and avoid overhead.
+ Peel the first iteration to save a cycle on init. */
+
+ /* Avoid negative numbers. */
+ xor a5, a2, a3 /* Top bit is 1 if one input is negative. */
+ do_abs a3, a3, a6
+ do_abs a2, a2, a6
+
+ /* Swap so the second argument is smaller. */
+ sub a7, a2, a3
+ mov a4, a3
+ movgez a4, a2, a7 /* a4 = max (a2, a3) */
+ movltz a3, a2, a7 /* a3 = min (a2, a3) */
+
+ movi a2, 0
+ extui a6, a3, 0, 1
+ movnez a2, a4, a6
+
+ do_addx2 a7, a4, a2, a7
+ extui a6, a3, 1, 1
+ movnez a2, a7, a6
+
+ do_addx4 a7, a4, a2, a7
+ extui a6, a3, 2, 1
+ movnez a2, a7, a6
+
+ do_addx8 a7, a4, a2, a7
+ extui a6, a3, 3, 1
+ movnez a2, a7, a6
+
+ bgeui a3, 16, .Lmult_main_loop
+ neg a3, a2
+ movltz a2, a3, a5
+ leaf_return
+
+ .align 4
+.Lmult_main_loop:
+ srli a3, a3, 4
+ slli a4, a4, 4
+
+ add a7, a4, a2
+ extui a6, a3, 0, 1
+ movnez a2, a7, a6
+
+ do_addx2 a7, a4, a2, a7
+ extui a6, a3, 1, 1
+ movnez a2, a7, a6
+
+ do_addx4 a7, a4, a2, a7
+ extui a6, a3, 2, 1
+ movnez a2, a7, a6
+
+ do_addx8 a7, a4, a2, a7
+ extui a6, a3, 3, 1
+ movnez a2, a7, a6
+
+ bgeui a3, 16, .Lmult_main_loop
+
+ neg a3, a2
+ movltz a2, a3, a5
+
+#endif /* !MUL32 && !MUL16 && !MAC16 */
+
+ leaf_return
+ .size __mulsi3, . - __mulsi3
+
+#endif /* L_mulsi3 */
+
+
+#ifdef L_umulsidi3
+
+#if !XCHAL_HAVE_MUL16 && !XCHAL_HAVE_MUL32 && !XCHAL_HAVE_MAC16
+#define XCHAL_NO_MUL 1
+#endif
+
+ .align 4
+ .global __umulsidi3
+ .type __umulsidi3, @function
+__umulsidi3:
+#if __XTENSA_CALL0_ABI__
+ leaf_entry sp, 32
+ addi sp, sp, -32
+ s32i a12, sp, 16
+ s32i a13, sp, 20
+ s32i a14, sp, 24
+ s32i a15, sp, 28
+#elif XCHAL_NO_MUL
+ /* This is not really a leaf function; allocate enough stack space
+ to allow CALL12s to a helper function. */
+ leaf_entry sp, 48
+#else
+ leaf_entry sp, 16
+#endif
+
+#ifdef __XTENSA_EB__
+#define wh a2
+#define wl a3
+#else
+#define wh a3
+#define wl a2
+#endif /* __XTENSA_EB__ */
+
+ /* This code is taken from the mulsf3 routine in ieee754-sf.S.
+ See more comments there. */
+
+#if XCHAL_HAVE_MUL32_HIGH
+ mull a6, a2, a3
+ muluh wh, a2, a3
+ mov wl, a6
+
+#else /* ! MUL32_HIGH */
+
+#if __XTENSA_CALL0_ABI__ && XCHAL_NO_MUL
+ /* a0 and a8 will be clobbered by calling the multiply function
+ but a8 is not used here and need not be saved. */
+ s32i a0, sp, 0
+#endif
+
+#if XCHAL_HAVE_MUL16 || XCHAL_HAVE_MUL32
+
+#define a2h a4
+#define a3h a5
+
+ /* Get the high halves of the inputs into registers. */
+ srli a2h, a2, 16
+ srli a3h, a3, 16
+
+#define a2l a2
+#define a3l a3
+
+#if XCHAL_HAVE_MUL32 && !XCHAL_HAVE_MUL16
+ /* Clear the high halves of the inputs. This does not matter
+ for MUL16 because the high bits are ignored. */
+ extui a2, a2, 0, 16
+ extui a3, a3, 0, 16
+#endif
+#endif /* MUL16 || MUL32 */
+
+
+#if XCHAL_HAVE_MUL16
+
+#define do_mul(dst, xreg, xhalf, yreg, yhalf) \
+ mul16u dst, xreg ## xhalf, yreg ## yhalf
+
+#elif XCHAL_HAVE_MUL32
+
+#define do_mul(dst, xreg, xhalf, yreg, yhalf) \
+ mull dst, xreg ## xhalf, yreg ## yhalf
+
+#elif XCHAL_HAVE_MAC16
+
+/* The preprocessor insists on inserting a space when concatenating after
+ a period in the definition of do_mul below. These macros are a workaround
+ using underscores instead of periods when doing the concatenation. */
+#define umul_aa_ll umul.aa.ll
+#define umul_aa_lh umul.aa.lh
+#define umul_aa_hl umul.aa.hl
+#define umul_aa_hh umul.aa.hh
+
+#define do_mul(dst, xreg, xhalf, yreg, yhalf) \
+ umul_aa_ ## xhalf ## yhalf xreg, yreg; \
+ rsr dst, ACCLO
+
+#else /* no multiply hardware */
+
+#define set_arg_l(dst, src) \
+ extui dst, src, 0, 16
+#define set_arg_h(dst, src) \
+ srli dst, src, 16
+
+#if __XTENSA_CALL0_ABI__
+#define do_mul(dst, xreg, xhalf, yreg, yhalf) \
+ set_arg_ ## xhalf (a13, xreg); \
+ set_arg_ ## yhalf (a14, yreg); \
+ call0 .Lmul_mulsi3; \
+ mov dst, a12
+#else
+#define do_mul(dst, xreg, xhalf, yreg, yhalf) \
+ set_arg_ ## xhalf (a14, xreg); \
+ set_arg_ ## yhalf (a15, yreg); \
+ call12 .Lmul_mulsi3; \
+ mov dst, a14
+#endif /* __XTENSA_CALL0_ABI__ */
+
+#endif /* no multiply hardware */
+
+ /* Add pp1 and pp2 into a6 with carry-out in a9. */
+ do_mul(a6, a2, l, a3, h) /* pp 1 */
+ do_mul(a11, a2, h, a3, l) /* pp 2 */
+ movi a9, 0
+ add a6, a6, a11
+ bgeu a6, a11, 1f
+ addi a9, a9, 1
+1:
+ /* Shift the high half of a9/a6 into position in a9. Note that
+ this value can be safely incremented without any carry-outs. */
+ ssai 16
+ src a9, a9, a6
+
+ /* Compute the low word into a6. */
+ do_mul(a11, a2, l, a3, l) /* pp 0 */
+ sll a6, a6
+ add a6, a6, a11
+ bgeu a6, a11, 1f
+ addi a9, a9, 1
+1:
+ /* Compute the high word into wh. */
+ do_mul(wh, a2, h, a3, h) /* pp 3 */
+ add wh, wh, a9
+ mov wl, a6
+
+#endif /* !MUL32_HIGH */
+
+#if __XTENSA_CALL0_ABI__ && XCHAL_NO_MUL
+ /* Restore the original return address. */
+ l32i a0, sp, 0
+#endif
+#if __XTENSA_CALL0_ABI__
+ l32i a12, sp, 16
+ l32i a13, sp, 20
+ l32i a14, sp, 24
+ l32i a15, sp, 28
+ addi sp, sp, 32
+#endif
+ leaf_return
+
+#if XCHAL_NO_MUL
+
+ /* For Xtensa processors with no multiply hardware, this simplified
+ version of _mulsi3 is used for multiplying 16-bit chunks of
+ the floating-point mantissas. When using CALL0, this function
+ uses a custom ABI: the inputs are passed in a13 and a14, the
+ result is returned in a12, and a8 and a15 are clobbered. */
+ .align 4
+.Lmul_mulsi3:
+ leaf_entry sp, 16
+ .macro mul_mulsi3_body dst, src1, src2, tmp1, tmp2
+ movi \dst, 0
+1: add \tmp1, \src2, \dst
+ extui \tmp2, \src1, 0, 1
+ movnez \dst, \tmp1, \tmp2
+
+ do_addx2 \tmp1, \src2, \dst, \tmp1
+ extui \tmp2, \src1, 1, 1
+ movnez \dst, \tmp1, \tmp2
+
+ do_addx4 \tmp1, \src2, \dst, \tmp1
+ extui \tmp2, \src1, 2, 1
+ movnez \dst, \tmp1, \tmp2
+
+ do_addx8 \tmp1, \src2, \dst, \tmp1
+ extui \tmp2, \src1, 3, 1
+ movnez \dst, \tmp1, \tmp2
+
+ srli \src1, \src1, 4
+ slli \src2, \src2, 4
+ bnez \src1, 1b
+ .endm
+#if __XTENSA_CALL0_ABI__
+ mul_mulsi3_body a12, a13, a14, a15, a8
+#else
+ /* The result will be written into a2, so save that argument in a4. */
+ mov a4, a2
+ mul_mulsi3_body a2, a4, a3, a5, a6
+#endif
+ leaf_return
+#endif /* XCHAL_NO_MUL */
+
+ .size __umulsidi3, . - __umulsidi3
+
+#endif /* L_umulsidi3 */
+
+
+/* Define a macro for the NSAU (unsigned normalize shift amount)
+ instruction, which computes the number of leading zero bits,
+ to handle cases where it is not included in the Xtensa processor
+ configuration. */
+
+ .macro do_nsau cnt, val, tmp, a
+#if XCHAL_HAVE_NSA
+ nsau \cnt, \val
+#else
+ mov \a, \val
+ movi \cnt, 0
+ extui \tmp, \a, 16, 16
+ bnez \tmp, 0f
+ movi \cnt, 16
+ slli \a, \a, 16
+0:
+ extui \tmp, \a, 24, 8
+ bnez \tmp, 1f
+ addi \cnt, \cnt, 8
+ slli \a, \a, 8
+1:
+ movi \tmp, __nsau_data
+ extui \a, \a, 24, 8
+ add \tmp, \tmp, \a
+ l8ui \tmp, \tmp, 0
+ add \cnt, \cnt, \tmp
+#endif /* !XCHAL_HAVE_NSA */
+ .endm
+
+#ifdef L_clz
+ .section .rodata
+ .align 4
+ .global __nsau_data
+ .type __nsau_data, @object
+__nsau_data:
+#if !XCHAL_HAVE_NSA
+ .byte 8, 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4
+ .byte 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3
+ .byte 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2
+ .byte 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2
+ .byte 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ .byte 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ .byte 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ .byte 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+#endif /* !XCHAL_HAVE_NSA */
+ .size __nsau_data, . - __nsau_data
+ .hidden __nsau_data
+#endif /* L_clz */
+
+
+#ifdef L_clzsi2
+ .align 4
+ .global __clzsi2
+ .type __clzsi2, @function
+__clzsi2:
+ leaf_entry sp, 16
+ do_nsau a2, a2, a3, a4
+ leaf_return
+ .size __clzsi2, . - __clzsi2
+
+#endif /* L_clzsi2 */
+
+
+#ifdef L_ctzsi2
+ .align 4
+ .global __ctzsi2
+ .type __ctzsi2, @function
+__ctzsi2:
+ leaf_entry sp, 16
+ neg a3, a2
+ and a3, a3, a2
+ do_nsau a2, a3, a4, a5
+ neg a2, a2
+ addi a2, a2, 31
+ leaf_return
+ .size __ctzsi2, . - __ctzsi2
+
+#endif /* L_ctzsi2 */
+
+
+#ifdef L_ffssi2
+ .align 4
+ .global __ffssi2
+ .type __ffssi2, @function
+__ffssi2:
+ leaf_entry sp, 16
+ neg a3, a2
+ and a3, a3, a2
+ do_nsau a2, a3, a4, a5
+ neg a2, a2
+ addi a2, a2, 32
+ leaf_return
+ .size __ffssi2, . - __ffssi2
+
+#endif /* L_ffssi2 */
+
+
+#ifdef L_udivsi3
+ .align 4
+ .global __udivsi3
+ .type __udivsi3, @function
+__udivsi3:
+ leaf_entry sp, 16
+#if XCHAL_HAVE_DIV32
+ quou a2, a2, a3
+#else
+ bltui a3, 2, .Lle_one /* check if the divisor <= 1 */
+
+ mov a6, a2 /* keep dividend in a6 */
+ do_nsau a5, a6, a2, a7 /* dividend_shift = nsau (dividend) */
+ do_nsau a4, a3, a2, a7 /* divisor_shift = nsau (divisor) */
+ bgeu a5, a4, .Lspecial
+
+ sub a4, a4, a5 /* count = divisor_shift - dividend_shift */
+ ssl a4
+ sll a3, a3 /* divisor <<= count */
+ movi a2, 0 /* quotient = 0 */
+
+ /* test-subtract-and-shift loop; one quotient bit on each iteration */
+#if XCHAL_HAVE_LOOPS
+ loopnez a4, .Lloopend
+#endif /* XCHAL_HAVE_LOOPS */
+.Lloop:
+ bltu a6, a3, .Lzerobit
+ sub a6, a6, a3
+ addi a2, a2, 1
+.Lzerobit:
+ slli a2, a2, 1
+ srli a3, a3, 1
+#if !XCHAL_HAVE_LOOPS
+ addi a4, a4, -1
+ bnez a4, .Lloop
+#endif /* !XCHAL_HAVE_LOOPS */
+.Lloopend:
+
+ bltu a6, a3, .Lreturn
+ addi a2, a2, 1 /* increment quotient if dividend >= divisor */
+.Lreturn:
+ leaf_return
+
+.Lle_one:
+ beqz a3, .Lerror /* if divisor == 1, return the dividend */
+ leaf_return
+
+.Lspecial:
+ /* return dividend >= divisor */
+ bltu a6, a3, .Lreturn0
+ movi a2, 1
+ leaf_return
+
+.Lerror:
+ /* Divide by zero: Use an illegal instruction to force an exception.
+ The subsequent "DIV0" string can be recognized by the exception
+ handler to identify the real cause of the exception. */
+ ill
+ .ascii "DIV0"
+
+.Lreturn0:
+ movi a2, 0
+#endif /* XCHAL_HAVE_DIV32 */
+ leaf_return
+ .size __udivsi3, . - __udivsi3
+
+#endif /* L_udivsi3 */
+
+
+#ifdef L_divsi3
+ .align 4
+ .global __divsi3
+ .type __divsi3, @function
+__divsi3:
+ leaf_entry sp, 16
+#if XCHAL_HAVE_DIV32
+ quos a2, a2, a3
+#else
+ xor a7, a2, a3 /* sign = dividend ^ divisor */
+ do_abs a6, a2, a4 /* udividend = abs (dividend) */
+ do_abs a3, a3, a4 /* udivisor = abs (divisor) */
+ bltui a3, 2, .Lle_one /* check if udivisor <= 1 */
+ do_nsau a5, a6, a2, a8 /* udividend_shift = nsau (udividend) */
+ do_nsau a4, a3, a2, a8 /* udivisor_shift = nsau (udivisor) */
+ bgeu a5, a4, .Lspecial
+
+ sub a4, a4, a5 /* count = udivisor_shift - udividend_shift */
+ ssl a4
+ sll a3, a3 /* udivisor <<= count */
+ movi a2, 0 /* quotient = 0 */
+
+ /* test-subtract-and-shift loop; one quotient bit on each iteration */
+#if XCHAL_HAVE_LOOPS
+ loopnez a4, .Lloopend
+#endif /* XCHAL_HAVE_LOOPS */
+.Lloop:
+ bltu a6, a3, .Lzerobit
+ sub a6, a6, a3
+ addi a2, a2, 1
+.Lzerobit:
+ slli a2, a2, 1
+ srli a3, a3, 1
+#if !XCHAL_HAVE_LOOPS
+ addi a4, a4, -1
+ bnez a4, .Lloop
+#endif /* !XCHAL_HAVE_LOOPS */
+.Lloopend:
+
+ bltu a6, a3, .Lreturn
+ addi a2, a2, 1 /* increment if udividend >= udivisor */
+.Lreturn:
+ neg a5, a2
+ movltz a2, a5, a7 /* return (sign < 0) ? -quotient : quotient */
+ leaf_return
+
+.Lle_one:
+ beqz a3, .Lerror
+ neg a2, a6 /* if udivisor == 1, then return... */
+ movgez a2, a6, a7 /* (sign < 0) ? -udividend : udividend */
+ leaf_return
+
+.Lspecial:
+ bltu a6, a3, .Lreturn0 /* if dividend < divisor, return 0 */
+ movi a2, 1
+ movi a4, -1
+ movltz a2, a4, a7 /* else return (sign < 0) ? -1 : 1 */
+ leaf_return
+
+.Lerror:
+ /* Divide by zero: Use an illegal instruction to force an exception.
+ The subsequent "DIV0" string can be recognized by the exception
+ handler to identify the real cause of the exception. */
+ ill
+ .ascii "DIV0"
+
+.Lreturn0:
+ movi a2, 0
+#endif /* XCHAL_HAVE_DIV32 */
+ leaf_return
+ .size __divsi3, . - __divsi3
+
+#endif /* L_divsi3 */
+
+
+#ifdef L_umodsi3
+ .align 4
+ .global __umodsi3
+ .type __umodsi3, @function
+__umodsi3:
+ leaf_entry sp, 16
+#if XCHAL_HAVE_DIV32
+ remu a2, a2, a3
+#else
+ bltui a3, 2, .Lle_one /* check if the divisor is <= 1 */
+
+ do_nsau a5, a2, a6, a7 /* dividend_shift = nsau (dividend) */
+ do_nsau a4, a3, a6, a7 /* divisor_shift = nsau (divisor) */
+ bgeu a5, a4, .Lspecial
+
+ sub a4, a4, a5 /* count = divisor_shift - dividend_shift */
+ ssl a4
+ sll a3, a3 /* divisor <<= count */
+
+ /* test-subtract-and-shift loop */
+#if XCHAL_HAVE_LOOPS
+ loopnez a4, .Lloopend
+#endif /* XCHAL_HAVE_LOOPS */
+.Lloop:
+ bltu a2, a3, .Lzerobit
+ sub a2, a2, a3
+.Lzerobit:
+ srli a3, a3, 1
+#if !XCHAL_HAVE_LOOPS
+ addi a4, a4, -1
+ bnez a4, .Lloop
+#endif /* !XCHAL_HAVE_LOOPS */
+.Lloopend:
+
+.Lspecial:
+ bltu a2, a3, .Lreturn
+ sub a2, a2, a3 /* subtract once more if dividend >= divisor */
+.Lreturn:
+ leaf_return
+
+.Lle_one:
+ bnez a3, .Lreturn0
+
+ /* Divide by zero: Use an illegal instruction to force an exception.
+ The subsequent "DIV0" string can be recognized by the exception
+ handler to identify the real cause of the exception. */
+ ill
+ .ascii "DIV0"
+
+.Lreturn0:
+ movi a2, 0
+#endif /* XCHAL_HAVE_DIV32 */
+ leaf_return
+ .size __umodsi3, . - __umodsi3
+
+#endif /* L_umodsi3 */
+
+
+#ifdef L_modsi3
+ .align 4
+ .global __modsi3
+ .type __modsi3, @function
+__modsi3:
+ leaf_entry sp, 16
+#if XCHAL_HAVE_DIV32
+ rems a2, a2, a3
+#else
+ mov a7, a2 /* save original (signed) dividend */
+ do_abs a2, a2, a4 /* udividend = abs (dividend) */
+ do_abs a3, a3, a4 /* udivisor = abs (divisor) */
+ bltui a3, 2, .Lle_one /* check if udivisor <= 1 */
+ do_nsau a5, a2, a6, a8 /* udividend_shift = nsau (udividend) */
+ do_nsau a4, a3, a6, a8 /* udivisor_shift = nsau (udivisor) */
+ bgeu a5, a4, .Lspecial
+
+ sub a4, a4, a5 /* count = udivisor_shift - udividend_shift */
+ ssl a4
+ sll a3, a3 /* udivisor <<= count */
+
+ /* test-subtract-and-shift loop */
+#if XCHAL_HAVE_LOOPS
+ loopnez a4, .Lloopend
+#endif /* XCHAL_HAVE_LOOPS */
+.Lloop:
+ bltu a2, a3, .Lzerobit
+ sub a2, a2, a3
+.Lzerobit:
+ srli a3, a3, 1
+#if !XCHAL_HAVE_LOOPS
+ addi a4, a4, -1
+ bnez a4, .Lloop
+#endif /* !XCHAL_HAVE_LOOPS */
+.Lloopend:
+
+.Lspecial:
+ bltu a2, a3, .Lreturn
+ sub a2, a2, a3 /* subtract again if udividend >= udivisor */
+.Lreturn:
+ bgez a7, .Lpositive
+ neg a2, a2 /* if (dividend < 0), return -udividend */
+.Lpositive:
+ leaf_return
+
+.Lle_one:
+ bnez a3, .Lreturn0
+
+ /* Divide by zero: Use an illegal instruction to force an exception.
+ The subsequent "DIV0" string can be recognized by the exception
+ handler to identify the real cause of the exception. */
+ ill
+ .ascii "DIV0"
+
+.Lreturn0:
+ movi a2, 0
+#endif /* XCHAL_HAVE_DIV32 */
+ leaf_return
+ .size __modsi3, . - __modsi3
+
+#endif /* L_modsi3 */
+
+
+#ifdef __XTENSA_EB__
+#define uh a2
+#define ul a3
+#else
+#define uh a3
+#define ul a2
+#endif /* __XTENSA_EB__ */
+
+
+#ifdef L_ashldi3
+ .align 4
+ .global __ashldi3
+ .type __ashldi3, @function
+__ashldi3:
+ leaf_entry sp, 16
+ ssl a4
+ bgei a4, 32, .Llow_only
+ src uh, uh, ul
+ sll ul, ul
+ leaf_return
+
+.Llow_only:
+ sll uh, ul
+ movi ul, 0
+ leaf_return
+ .size __ashldi3, . - __ashldi3
+
+#endif /* L_ashldi3 */
+
+
+#ifdef L_ashrdi3
+ .align 4
+ .global __ashrdi3
+ .type __ashrdi3, @function
+__ashrdi3:
+ leaf_entry sp, 16
+ ssr a4
+ bgei a4, 32, .Lhigh_only
+ src ul, uh, ul
+ sra uh, uh
+ leaf_return
+
+.Lhigh_only:
+ sra ul, uh
+ srai uh, uh, 31
+ leaf_return
+ .size __ashrdi3, . - __ashrdi3
+
+#endif /* L_ashrdi3 */
+
+
+#ifdef L_lshrdi3
+ .align 4
+ .global __lshrdi3
+ .type __lshrdi3, @function
+__lshrdi3:
+ leaf_entry sp, 16
+ ssr a4
+ bgei a4, 32, .Lhigh_only1
+ src ul, uh, ul
+ srl uh, uh
+ leaf_return
+
+.Lhigh_only1:
+ srl ul, uh
+ movi uh, 0
+ leaf_return
+ .size __lshrdi3, . - __lshrdi3
+
+#endif /* L_lshrdi3 */
+
+
+#include "ieee754-df.S"
+#include "ieee754-sf.S"
diff --git a/libgcc/config/xtensa/lib2funcs.S b/libgcc/config/xtensa/lib2funcs.S
new file mode 100644
index 00000000000..65134e24ccf
--- /dev/null
+++ b/libgcc/config/xtensa/lib2funcs.S
@@ -0,0 +1,186 @@
+/* Assembly functions for libgcc2.
+ Copyright (C) 2001, 2006, 2007, 2008, 2009 Free Software Foundation, Inc.
+ Contributed by Bob Wilson (bwilson@tensilica.com) at Tensilica.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+#include "xtensa-config.h"
+
+/* __xtensa_libgcc_window_spill: This function flushes out all but the
+ current register window. This is used to set up the stack so that
+ arbitrary frames can be accessed. */
+
+ .align 4
+ .global __xtensa_libgcc_window_spill
+ .type __xtensa_libgcc_window_spill,@function
+__xtensa_libgcc_window_spill:
+ entry sp, 32
+ movi a2, 0
+ syscall
+ retw
+ .size __xtensa_libgcc_window_spill, .-__xtensa_libgcc_window_spill
+
+
+/* __xtensa_nonlocal_goto: This code does all the hard work of a
+ nonlocal goto on Xtensa. It is here in the library to avoid the
+ code size bloat of generating it in-line. There are two
+ arguments:
+
+ a2 = frame pointer for the procedure containing the label
+ a3 = goto handler address
+
+ This function never returns to its caller but instead goes directly
+ to the address of the specified goto handler. */
+
+ .align 4
+ .global __xtensa_nonlocal_goto
+ .type __xtensa_nonlocal_goto,@function
+__xtensa_nonlocal_goto:
+ entry sp, 32
+
+ /* Flush registers. */
+ mov a5, a2
+ movi a2, 0
+ syscall
+ mov a2, a5
+
+ /* Because the save area for a0-a3 is stored one frame below
+ the one identified by a2, the only way to restore those
+ registers is to unwind the stack. If alloca() were never
+ called, we could just unwind until finding the sp value
+ matching a2. However, a2 is a frame pointer, not a stack
+ pointer, and may not be encountered during the unwinding.
+ The solution is to unwind until going _past_ the value
+ given by a2. This involves keeping three stack pointer
+ values during the unwinding:
+
+ next = sp of frame N-1
+ cur = sp of frame N
+ prev = sp of frame N+1
+
+ When next > a2, the desired save area is stored relative
+ to prev. At this point, cur will be the same as a2
+ except in the alloca() case.
+
+ Besides finding the values to be restored to a0-a3, we also
+ need to find the current window size for the target
+ function. This can be extracted from the high bits of the
+ return address, initially in a0. As the unwinding
+ proceeds, the window size is taken from the value of a0
+ saved _two_ frames below the current frame. */
+
+ addi a5, sp, -16 /* a5 = prev - save area */
+ l32i a6, a5, 4
+ addi a6, a6, -16 /* a6 = cur - save area */
+ mov a8, a0 /* a8 = return address (for window size) */
+ j .Lfirstframe
+
+.Lnextframe:
+ l32i a8, a5, 0 /* next return address (for window size) */
+ mov a5, a6 /* advance prev */
+ addi a6, a7, -16 /* advance cur */
+.Lfirstframe:
+ l32i a7, a6, 4 /* a7 = next */
+ bgeu a2, a7, .Lnextframe
+
+ /* At this point, prev (a5) points to the save area with the saved
+ values of a0-a3. Copy those values into the save area at the
+ current sp so they will be reloaded when the return from this
+ function underflows. We don't have to worry about exceptions
+ while updating the current save area, because the windows have
+ already been flushed. */
+
+ addi a4, sp, -16 /* a4 = save area of this function */
+ l32i a6, a5, 0
+ l32i a7, a5, 4
+ s32i a6, a4, 0
+ s32i a7, a4, 4
+ l32i a6, a5, 8
+ l32i a7, a5, 12
+ s32i a6, a4, 8
+ s32i a7, a4, 12
+
+ /* Set return address to goto handler. Use the window size bits
+ from the return address two frames below the target. */
+ extui a8, a8, 30, 2 /* get window size from return addr. */
+ slli a3, a3, 2 /* get goto handler addr. << 2 */
+ ssai 2
+ src a0, a8, a3 /* combine them with a funnel shift */
+
+ retw
+ .size __xtensa_nonlocal_goto, .-__xtensa_nonlocal_goto
+
+
+/* __xtensa_sync_caches: This function is called after writing a trampoline
+ on the stack to force all the data writes to memory and invalidate the
+ instruction cache. a2 is the address of the new trampoline.
+
+ After the trampoline data is written out, it must be flushed out of
+ the data cache into memory. We use DHWB in case we have a writeback
+ cache. At least one DHWB instruction is needed for each data cache
+ line which may be touched by the trampoline. An ISYNC instruction
+ must follow the DHWBs.
+
+ We have to flush the i-cache to make sure that the new values get used.
+ At least one IHI instruction is needed for each i-cache line which may
+ be touched by the trampoline. An ISYNC instruction is also needed to
+ make sure that the modified instructions are loaded into the instruction
+ fetch buffer. */
+
+/* Use the maximum trampoline size. Flushing a bit extra is OK. */
+#define TRAMPOLINE_SIZE 60
+
+ .text
+ .align 4
+ .global __xtensa_sync_caches
+ .type __xtensa_sync_caches,@function
+__xtensa_sync_caches:
+ entry sp, 32
+#if XCHAL_DCACHE_SIZE > 0
+ /* Flush the trampoline from the data cache. */
+ extui a4, a2, 0, XCHAL_DCACHE_LINEWIDTH
+ addi a4, a4, TRAMPOLINE_SIZE
+ addi a4, a4, (1 << XCHAL_DCACHE_LINEWIDTH) - 1
+ srli a4, a4, XCHAL_DCACHE_LINEWIDTH
+ mov a3, a2
+.Ldcache_loop:
+ dhwb a3, 0
+ addi a3, a3, (1 << XCHAL_DCACHE_LINEWIDTH)
+ addi a4, a4, -1
+ bnez a4, .Ldcache_loop
+ isync
+#endif
+#if XCHAL_ICACHE_SIZE > 0
+ /* Invalidate the corresponding lines in the instruction cache. */
+ extui a4, a2, 0, XCHAL_ICACHE_LINEWIDTH
+ addi a4, a4, TRAMPOLINE_SIZE
+ addi a4, a4, (1 << XCHAL_ICACHE_LINEWIDTH) - 1
+ srli a4, a4, XCHAL_ICACHE_LINEWIDTH
+.Licache_loop:
+ ihi a2, 0
+ addi a2, a2, (1 << XCHAL_ICACHE_LINEWIDTH)
+ addi a4, a4, -1
+ bnez a4, .Licache_loop
+#endif
+ isync
+ retw
+ .size __xtensa_sync_caches, .-__xtensa_sync_caches
diff --git a/libgcc/config/xtensa/libgcc-glibc.ver b/libgcc/config/xtensa/libgcc-glibc.ver
new file mode 100644
index 00000000000..43e7d4fc7c7
--- /dev/null
+++ b/libgcc/config/xtensa/libgcc-glibc.ver
@@ -0,0 +1,3 @@
+GCC_4.3.0 {
+ __umulsidi3
+}
diff --git a/libgcc/config/xtensa/t-elf b/libgcc/config/xtensa/t-elf
new file mode 100644
index 00000000000..59d51210b95
--- /dev/null
+++ b/libgcc/config/xtensa/t-elf
@@ -0,0 +1,5 @@
+# Build CRT files and libgcc with the "longcalls" option
+CRTSTUFF_T_CFLAGS += -mlongcalls
+CRTSTUFF_T_CFLAGS_S += -mlongcalls
+
+HOST_LIBGCC2_CFLAGS += -mlongcalls
diff --git a/libgcc/config/xtensa/t-linux b/libgcc/config/xtensa/t-linux
new file mode 100644
index 00000000000..6f4ae893486
--- /dev/null
+++ b/libgcc/config/xtensa/t-linux
@@ -0,0 +1 @@
+SHLIB_MAPFILES += $(srcdir)/config/xtensa/libgcc-glibc.ver
diff --git a/libgcc/config/xtensa/t-xtensa b/libgcc/config/xtensa/t-xtensa
index 7d9e9db0487..27399e67fa0 100644
--- a/libgcc/config/xtensa/t-xtensa
+++ b/libgcc/config/xtensa/t-xtensa
@@ -1,2 +1,16 @@
+LIB1ASMSRC = xtensa/lib1funcs.S
+LIB1ASMFUNCS = _mulsi3 _divsi3 _modsi3 _udivsi3 _umodsi3 \
+ _umulsidi3 _clz _clzsi2 _ctzsi2 _ffssi2 \
+ _ashldi3 _ashrdi3 _lshrdi3 \
+ _negsf2 _addsubsf3 _mulsf3 _divsf3 _cmpsf2 _fixsfsi _fixsfdi \
+ _fixunssfsi _fixunssfdi _floatsisf _floatunsisf \
+ _floatdisf _floatundisf \
+ _negdf2 _addsubdf3 _muldf3 _divdf3 _cmpdf2 _fixdfsi _fixdfdi \
+ _fixunsdfsi _fixunsdfdi _floatsidf _floatunsidf \
+ _floatdidf _floatundidf \
+ _truncdfsf2 _extendsfdf2
+
+LIB2ADD = $(srcdir)/config/xtensa/lib2funcs.S
+
LIB2ADDEH = $(srcdir)/config/xtensa/unwind-dw2-xtensa.c \
$(srcdir)/unwind-dw2-fde.c $(srcdir)/unwind-sjlj.c $(srcdir)/unwind-c.c
diff --git a/libgcc/config/xtensa/unwind-dw2-xtensa.c b/libgcc/config/xtensa/unwind-dw2-xtensa.c
index 54daf7637ce..2ded579929f 100644
--- a/libgcc/config/xtensa/unwind-dw2-xtensa.c
+++ b/libgcc/config/xtensa/unwind-dw2-xtensa.c
@@ -28,6 +28,7 @@
#include "tsystem.h"
#include "coretypes.h"
#include "tm.h"
+#include "libgcc_tm.h"
#include "dwarf2.h"
#include "unwind.h"
#ifdef __USING_SJLJ_EXCEPTIONS__
diff --git a/libgcc/configure b/libgcc/configure
index 80bb61c80af..e1c58f57937 100644
--- a/libgcc/configure
+++ b/libgcc/configure
@@ -593,6 +593,8 @@ LIBOBJS
asm_hidden_op
extra_parts
cpu_type
+tm_defines
+tm_file
tmake_file
sfp_machine_header
set_use_emutls
@@ -627,6 +629,13 @@ build_subdir
build_libsubdir
target_noncanonical
host_noncanonical
+AWK
+INSTALL_DATA
+INSTALL_SCRIPT
+INSTALL_PROGRAM
+MAINT
+slibdir
+PICFLAG
host_os
host_vendor
host_cpu
@@ -635,12 +644,6 @@ build_os
build_vendor
build_cpu
build
-AWK
-INSTALL_DATA
-INSTALL_SCRIPT
-INSTALL_PROGRAM
-MAINT
-slibdir
enable_shared
libgcc_topdir
target_alias
@@ -694,6 +697,7 @@ enable_maintainer_mode
with_build_libsubdir
enable_decimal_float
with_system_libunwind
+enable_sjlj_exceptions
enable_tls
'
ac_precious_vars='build_alias
@@ -1328,6 +1332,8 @@ Optional Features:
enable decimal float extension to C. Selecting 'bid'
or 'dpd' choses which decimal floating point format
to use
+ --enable-sjlj-exceptions
+ force use of builtin_setjmp for exceptions
--enable-tls Use thread-local storage [default=yes]
Optional Packages:
@@ -2194,6 +2200,158 @@ fi
+# Make sure we can run config.sub.
+$SHELL "$ac_aux_dir/config.sub" sun4 >/dev/null 2>&1 ||
+ as_fn_error "cannot run $SHELL $ac_aux_dir/config.sub" "$LINENO" 5
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking build system type" >&5
+$as_echo_n "checking build system type... " >&6; }
+if test "${ac_cv_build+set}" = set; then :
+ $as_echo_n "(cached) " >&6
+else
+ ac_build_alias=$build_alias
+test "x$ac_build_alias" = x &&
+ ac_build_alias=`$SHELL "$ac_aux_dir/config.guess"`
+test "x$ac_build_alias" = x &&
+ as_fn_error "cannot guess build type; you must specify one" "$LINENO" 5
+ac_cv_build=`$SHELL "$ac_aux_dir/config.sub" $ac_build_alias` ||
+ as_fn_error "$SHELL $ac_aux_dir/config.sub $ac_build_alias failed" "$LINENO" 5
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_build" >&5
+$as_echo "$ac_cv_build" >&6; }
+case $ac_cv_build in
+*-*-*) ;;
+*) as_fn_error "invalid value of canonical build" "$LINENO" 5;;
+esac
+build=$ac_cv_build
+ac_save_IFS=$IFS; IFS='-'
+set x $ac_cv_build
+shift
+build_cpu=$1
+build_vendor=$2
+shift; shift
+# Remember, the first character of IFS is used to create $*,
+# except with old shells:
+build_os=$*
+IFS=$ac_save_IFS
+case $build_os in *\ *) build_os=`echo "$build_os" | sed 's/ /-/g'`;; esac
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking host system type" >&5
+$as_echo_n "checking host system type... " >&6; }
+if test "${ac_cv_host+set}" = set; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test "x$host_alias" = x; then
+ ac_cv_host=$ac_cv_build
+else
+ ac_cv_host=`$SHELL "$ac_aux_dir/config.sub" $host_alias` ||
+ as_fn_error "$SHELL $ac_aux_dir/config.sub $host_alias failed" "$LINENO" 5
+fi
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_host" >&5
+$as_echo "$ac_cv_host" >&6; }
+case $ac_cv_host in
+*-*-*) ;;
+*) as_fn_error "invalid value of canonical host" "$LINENO" 5;;
+esac
+host=$ac_cv_host
+ac_save_IFS=$IFS; IFS='-'
+set x $ac_cv_host
+shift
+host_cpu=$1
+host_vendor=$2
+shift; shift
+# Remember, the first character of IFS is used to create $*,
+# except with old shells:
+host_os=$*
+IFS=$ac_save_IFS
+case $host_os in *\ *) host_os=`echo "$host_os" | sed 's/ /-/g'`;; esac
+
+
+
+
+
+
+case "${host}" in
+ # PIC is the default on some targets or must not be used.
+ *-*-darwin*)
+ # PIC is the default on this platform
+ # Common symbols not allowed in MH_DYLIB files
+ PICFLAG=-fno-common
+ ;;
+ alpha*-dec-osf5*)
+ # PIC is the default.
+ ;;
+ hppa*64*-*-hpux*)
+ # PIC is the default for 64-bit PA HP-UX.
+ ;;
+ i[34567]86-*-cygwin* | i[34567]86-*-mingw* | x86_64-*-mingw*)
+ ;;
+ i[34567]86-*-interix3*)
+ # Interix 3.x gcc -fpic/-fPIC options generate broken code.
+ # Instead, we relocate shared libraries at runtime.
+ ;;
+ i[34567]86-*-nto-qnx*)
+ # QNX uses GNU C++, but need to define -shared option too, otherwise
+ # it will coredump.
+ PICFLAG='-fPIC -shared'
+ ;;
+ i[34567]86-pc-msdosdjgpp*)
+ # DJGPP does not support shared libraries at all.
+ ;;
+ ia64*-*-hpux*)
+ # On IA64 HP-UX, PIC is the default but the pic flag
+ # sets the default TLS model and affects inlining.
+ PICFLAG=-fPIC
+ ;;
+ mips-sgi-irix6*)
+ # PIC is the default.
+ ;;
+ rs6000-ibm-aix* | powerpc-ibm-aix*)
+ # All AIX code is PIC.
+ ;;
+
+ # Some targets support both -fPIC and -fpic, but prefer the latter.
+ # FIXME: Why?
+ i[34567]86-*-* | x86_64-*-*)
+ PICFLAG=-fpic
+ ;;
+ m68k-*-*)
+ PICFLAG=-fpic
+ ;;
+ s390*-*-*)
+ PICFLAG=-fpic
+ ;;
+ # FIXME: Override -fPIC default in libgcc only?
+ sh-*-linux* | sh[2346lbe]*-*-linux*)
+ PICFLAG=-fpic
+ ;;
+ # FIXME: Simplify to sh*-*-netbsd*?
+ sh-*-netbsdelf* | shl*-*-netbsdelf* | sh5-*-netbsd* | sh5l*-*-netbsd* | \
+ sh64-*-netbsd* | sh64l*-*-netbsd*)
+ PICFLAG=-fpic
+ ;;
+ # Default to -fPIC unless specified otherwise.
+ *)
+ PICFLAG=-fPIC
+ ;;
+esac
+
+# If the user explicitly uses -fpic/-fPIC, keep that.
+case "${CFLAGS}" in
+ *-fpic*)
+ PICFLAG=-fpic
+ ;;
+ *-fPIC*)
+ PICFLAG=-fPIC
+ ;;
+esac
+
+
+
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for --enable-version-specific-runtime-libs" >&5
$as_echo_n "checking for --enable-version-specific-runtime-libs... " >&6; }
# Check whether --enable-version-specific-runtime-libs was given.
@@ -2382,76 +2540,6 @@ case ${AWK} in
"") as_fn_error "can't build without awk, bailing out" "$LINENO" 5 ;;
esac
-# Make sure we can run config.sub.
-$SHELL "$ac_aux_dir/config.sub" sun4 >/dev/null 2>&1 ||
- as_fn_error "cannot run $SHELL $ac_aux_dir/config.sub" "$LINENO" 5
-
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking build system type" >&5
-$as_echo_n "checking build system type... " >&6; }
-if test "${ac_cv_build+set}" = set; then :
- $as_echo_n "(cached) " >&6
-else
- ac_build_alias=$build_alias
-test "x$ac_build_alias" = x &&
- ac_build_alias=`$SHELL "$ac_aux_dir/config.guess"`
-test "x$ac_build_alias" = x &&
- as_fn_error "cannot guess build type; you must specify one" "$LINENO" 5
-ac_cv_build=`$SHELL "$ac_aux_dir/config.sub" $ac_build_alias` ||
- as_fn_error "$SHELL $ac_aux_dir/config.sub $ac_build_alias failed" "$LINENO" 5
-
-fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_build" >&5
-$as_echo "$ac_cv_build" >&6; }
-case $ac_cv_build in
-*-*-*) ;;
-*) as_fn_error "invalid value of canonical build" "$LINENO" 5;;
-esac
-build=$ac_cv_build
-ac_save_IFS=$IFS; IFS='-'
-set x $ac_cv_build
-shift
-build_cpu=$1
-build_vendor=$2
-shift; shift
-# Remember, the first character of IFS is used to create $*,
-# except with old shells:
-build_os=$*
-IFS=$ac_save_IFS
-case $build_os in *\ *) build_os=`echo "$build_os" | sed 's/ /-/g'`;; esac
-
-
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking host system type" >&5
-$as_echo_n "checking host system type... " >&6; }
-if test "${ac_cv_host+set}" = set; then :
- $as_echo_n "(cached) " >&6
-else
- if test "x$host_alias" = x; then
- ac_cv_host=$ac_cv_build
-else
- ac_cv_host=`$SHELL "$ac_aux_dir/config.sub" $host_alias` ||
- as_fn_error "$SHELL $ac_aux_dir/config.sub $host_alias failed" "$LINENO" 5
-fi
-
-fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_host" >&5
-$as_echo "$ac_cv_host" >&6; }
-case $ac_cv_host in
-*-*-*) ;;
-*) as_fn_error "invalid value of canonical host" "$LINENO" 5;;
-esac
-host=$ac_cv_host
-ac_save_IFS=$IFS; IFS='-'
-set x $ac_cv_host
-shift
-host_cpu=$1
-host_vendor=$2
-shift; shift
-# Remember, the first character of IFS is used to create $*,
-# except with old shells:
-host_os=$*
-IFS=$ac_save_IFS
-case $host_os in *\ *) host_os=`echo "$host_os" | sed 's/ /-/g'`;; esac
-
case ${build_alias} in
"") build_noncanonical=${build} ;;
@@ -4408,6 +4496,72 @@ $as_echo "#define HAVE_GETIPINFO 1" >>confdefs.h
fi
+# The sjlj test is almost duplicated here and in libgo/configure.ac (for C),
+# libstdc++-v3/acinclude.m4 and libjava/configure.ac (for C++), and
+# libobjc/configure.ac (for Objective-C).
+# FIXME: This should be centralized in config/sjlj.m4.
+# Check whether --enable-sjlj-exceptions was given.
+if test "${enable_sjlj_exceptions+set}" = set; then :
+ enableval=$enable_sjlj_exceptions; case "$enableval" in
+ yes|no|auto) ;;
+ *) as_fn_error "unknown argument to --enable-sjlj-exceptions" "$LINENO" 5 ;;
+ esac
+else
+ enable_sjlj_exceptions=auto
+fi
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to use setjmp/longjmp exceptions" >&5
+$as_echo_n "checking whether to use setjmp/longjmp exceptions... " >&6; }
+if test "${libgcc_cv_lib_sjlj_exceptions+set}" = set; then :
+ $as_echo_n "(cached) " >&6
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+void bar ();
+void clean (int *);
+void foo ()
+{
+ int i __attribute__ ((cleanup (clean)));
+ bar();
+}
+
+_ACEOF
+CFLAGS_hold=$CFLAGS
+CFLAGS="--save-temps -fexceptions"
+libgcc_cv_lib_sjlj_exceptions=unknown
+if ac_fn_c_try_compile; then :
+ if grep _Unwind_SjLj_Resume conftest.s >/dev/null 2>&1; then
+ libgcc_cv_lib_sjlj_exceptions=yes
+ elif grep _Unwind_Resume conftest.s >/dev/null 2>&1; then
+ libgcc_cv_lib_sjlj_exceptions=no
+ fi
+fi
+CFLAGS=$CFLAGS_hold
+rm -f conftest*
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $libgcc_cv_lib_sjlj_exceptions" >&5
+$as_echo "$libgcc_cv_lib_sjlj_exceptions" >&6; }
+
+if test "$enable_sjlj_exceptions" = "auto"; then
+ enable_sjlj_exceptions=$libgcc_cv_lib_sjlj_exceptions
+fi
+
+case $enable_sjlj_exceptions in
+yes)
+
+$as_echo "#define LIBGCC_SJLJ_EXCEPTIONS 1" >>confdefs.h
+
+ ;;
+no)
+ ;;
+*)
+ as_fn_error "unable to detect exception model" "$LINENO" 5
+ ;;
+esac
+
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if the linker ($LD) is GNU ld" >&5
$as_echo_n "checking if the linker ($LD) is GNU ld... " >&6; }
if test "${acl_cv_prog_gnu_ld+set}" = set; then :
@@ -4455,20 +4609,16 @@ fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $libgcc_cv_cfi" >&5
$as_echo "$libgcc_cv_cfi" >&6; }
-# Check 32bit or 64bit for x86.
-case ${host} in
-i?86*-*-* | x86_64*-*-*)
- cat > conftest.c <<EOF
-#ifdef __x86_64__
+# Check 32bit or 64bit
+cat > conftest.c <<EOF
+#if defined(__x86_64__) || (!defined(__i386__) && defined(__LP64__))
host_address=64
#else
host_address=32
#endif
EOF
- eval `${CC-cc} -E conftest.c | grep host_address=`
- rm -f conftest.c
- ;;
-esac
+eval `${CC-cc} -E conftest.c | grep host_address=`
+rm -f conftest.c
# Collect host-machine-specific information.
. ${srcdir}/config.host
@@ -4632,6 +4782,30 @@ done
tmake_file="${tmake_file_}"
+# Likewise export definitions for libgcc_tm.h
+tm_file_=
+for f in ${tm_file}
+do
+ tm_file_="${tm_file_} \$(srcdir)/config/$f"
+done
+tm_file="${tm_file_}"
+
+
+
+# Map from thread model to thread header.
+case $target_thread_file in
+ aix) thread_header=config/rs6000/gthr-aix.h ;;
+ dce) thread_header=config/pa/gthr-dce.h ;;
+ lynx) thread_header=config/gthr-lynx.h ;;
+ mipssde) thread_header=config/mips/gthr-mipssde.h ;;
+ posix) thread_header=gthr-posix.h ;;
+ rtems) thread_header=config/gthr-rtems.h ;;
+ single) thread_header=gthr-single.h ;;
+ tpf) thread_header=config/s390/gthr-tpf.h ;;
+ vxworks) thread_header=config/gthr-vxworks.h ;;
+ win32) thread_header=config/i386/gthr-win32.h ;;
+esac
+
# Substitute configuration variables
@@ -4644,6 +4818,8 @@ ac_config_links="$ac_config_links md-unwind-support.h:config/$md_unwind_header"
ac_config_links="$ac_config_links sfp-machine.h:config/$sfp_machine_header"
+ac_config_links="$ac_config_links gthr-default.h:$thread_header"
+
# We need multilib support.
ac_config_files="$ac_config_files Makefile"
@@ -5373,6 +5549,7 @@ do
"unwind.h") CONFIG_LINKS="$CONFIG_LINKS unwind.h:$unwind_header" ;;
"md-unwind-support.h") CONFIG_LINKS="$CONFIG_LINKS md-unwind-support.h:config/$md_unwind_header" ;;
"sfp-machine.h") CONFIG_LINKS="$CONFIG_LINKS sfp-machine.h:config/$sfp_machine_header" ;;
+ "gthr-default.h") CONFIG_LINKS="$CONFIG_LINKS gthr-default.h:$thread_header" ;;
"Makefile") CONFIG_FILES="$CONFIG_FILES Makefile" ;;
"default") CONFIG_COMMANDS="$CONFIG_COMMANDS default" ;;
diff --git a/libgcc/configure.ac b/libgcc/configure.ac
index c1a3dce0f5b..748189393e6 100644
--- a/libgcc/configure.ac
+++ b/libgcc/configure.ac
@@ -6,6 +6,7 @@ sinclude(../config/acx.m4)
sinclude(../config/no-executables.m4)
sinclude(../config/lib-ld.m4)
sinclude(../config/override.m4)
+sinclude(../config/picflag.m4)
sinclude(../config/dfp.m4)
sinclude(../config/unwind_ipinfo.m4)
@@ -58,6 +59,9 @@ AC_ARG_ENABLE(shared,
], [enable_shared=yes])
AC_SUBST(enable_shared)
+GCC_PICFLAG
+AC_SUBST(PICFLAG)
+
AC_MSG_CHECKING([for --enable-version-specific-runtime-libs])
AC_ARG_ENABLE(version-specific-runtime-libs,
[ --enable-version-specific-runtime-libs Specify that runtime libraries should be installed in a compiler-specific directory ],
@@ -180,6 +184,60 @@ AC_SUBST(fixed_point)
# config.gcc also contains tests of with_system_libunwind.
GCC_CHECK_UNWIND_GETIPINFO
+# The sjlj test is almost duplicated here and in libgo/configure.ac (for C),
+# libstdc++-v3/acinclude.m4 and libjava/configure.ac (for C++), and
+# libobjc/configure.ac (for Objective-C).
+# FIXME: This should be centralized in config/sjlj.m4.
+AC_ARG_ENABLE(sjlj-exceptions,
+ AC_HELP_STRING([--enable-sjlj-exceptions],
+ [force use of builtin_setjmp for exceptions]),
+ [case "$enableval" in
+ yes|no|auto) ;;
+ *) AC_MSG_ERROR([unknown argument to --enable-sjlj-exceptions]) ;;
+ esac],
+ [enable_sjlj_exceptions=auto])
+
+AC_CACHE_CHECK([whether to use setjmp/longjmp exceptions],
+[libgcc_cv_lib_sjlj_exceptions],
+[AC_LANG_CONFTEST(
+ [AC_LANG_SOURCE([
+void bar ();
+void clean (int *);
+void foo ()
+{
+ int i __attribute__ ((cleanup (clean)));
+ bar();
+}
+])])
+CFLAGS_hold=$CFLAGS
+CFLAGS="--save-temps -fexceptions"
+libgcc_cv_lib_sjlj_exceptions=unknown
+AS_IF([ac_fn_c_try_compile],
+ [if grep _Unwind_SjLj_Resume conftest.s >/dev/null 2>&1; then
+ libgcc_cv_lib_sjlj_exceptions=yes
+ elif grep _Unwind_Resume conftest.s >/dev/null 2>&1; then
+ libgcc_cv_lib_sjlj_exceptions=no
+ fi])
+CFLAGS=$CFLAGS_hold
+rm -f conftest*
+])
+
+if test "$enable_sjlj_exceptions" = "auto"; then
+ enable_sjlj_exceptions=$libgcc_cv_lib_sjlj_exceptions
+fi
+
+case $enable_sjlj_exceptions in
+yes)
+ AC_DEFINE(LIBGCC_SJLJ_EXCEPTIONS, 1,
+ [Define if the C compiler is configured for setjmp/longjmp exceptions.])
+ ;;
+no)
+ ;;
+*)
+ AC_MSG_ERROR([unable to detect exception model])
+ ;;
+esac
+
AC_LIB_PROG_LD_GNU
AC_MSG_CHECKING([for thread model used by GCC])
@@ -197,20 +255,16 @@ AC_CACHE_CHECK([whether assembler supports CFI directives], [libgcc_cv_cfi],
[libgcc_cv_cfi=yes],
[libgcc_cv_cfi=no])])
-# Check 32bit or 64bit for x86.
-case ${host} in
-i?86*-*-* | x86_64*-*-*)
- cat > conftest.c <<EOF
-#ifdef __x86_64__
+# Check 32bit or 64bit
+cat > conftest.c <<EOF
+#if defined(__x86_64__) || (!defined(__i386__) && defined(__LP64__))
host_address=64
#else
host_address=32
#endif
EOF
- eval `${CC-cc} -E conftest.c | grep host_address=`
- rm -f conftest.c
- ;;
-esac
+eval `${CC-cc} -E conftest.c | grep host_address=`
+rm -f conftest.c
# Collect host-machine-specific information.
. ${srcdir}/config.host
@@ -296,6 +350,30 @@ done
tmake_file="${tmake_file_}"
AC_SUBST(tmake_file)
+# Likewise export definitions for libgcc_tm.h
+tm_file_=
+for f in ${tm_file}
+do
+ tm_file_="${tm_file_} \$(srcdir)/config/$f"
+done
+tm_file="${tm_file_}"
+AC_SUBST(tm_file)
+AC_SUBST(tm_defines)
+
+# Map from thread model to thread header.
+case $target_thread_file in
+ aix) thread_header=config/rs6000/gthr-aix.h ;;
+ dce) thread_header=config/pa/gthr-dce.h ;;
+ lynx) thread_header=config/gthr-lynx.h ;;
+ mipssde) thread_header=config/mips/gthr-mipssde.h ;;
+ posix) thread_header=gthr-posix.h ;;
+ rtems) thread_header=config/gthr-rtems.h ;;
+ single) thread_header=gthr-single.h ;;
+ tpf) thread_header=config/s390/gthr-tpf.h ;;
+ vxworks) thread_header=config/gthr-vxworks.h ;;
+ win32) thread_header=config/i386/gthr-win32.h ;;
+esac
+
# Substitute configuration variables
AC_SUBST(cpu_type)
AC_SUBST(extra_parts)
@@ -304,6 +382,7 @@ AC_CONFIG_LINKS([enable-execute-stack.c:$enable_execute_stack])
AC_CONFIG_LINKS([unwind.h:$unwind_header])
AC_CONFIG_LINKS([md-unwind-support.h:config/$md_unwind_header])
AC_CONFIG_LINKS([sfp-machine.h:config/$sfp_machine_header])
+AC_CONFIG_LINKS([gthr-default.h:$thread_header])
# We need multilib support.
AC_CONFIG_FILES([Makefile])
diff --git a/libgcc/crtstuff.c b/libgcc/crtstuff.c
new file mode 100644
index 00000000000..66b2cdf2446
--- /dev/null
+++ b/libgcc/crtstuff.c
@@ -0,0 +1,651 @@
+/* Specialized bits of code needed to support construction and
+ destruction of file-scope objects in C++ code.
+ Copyright (C) 1991, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001
+ 2002, 2003, 2004, 2005, 2006, 2007, 2009, 2010, 2011
+ Free Software Foundation, Inc.
+ Contributed by Ron Guilmette (rfg@monkeys.com).
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+/* This file is a bit like libgcc2.c in that it is compiled
+ multiple times and yields multiple .o files.
+
+ This file is useful on target machines where the object file format
+ supports multiple "user-defined" sections (e.g. COFF, ELF, ROSE). On
+ such systems, this file allows us to avoid running collect (or any
+ other such slow and painful kludge). Additionally, if the target
+ system supports a .init section, this file allows us to support the
+ linking of C++ code with a non-C++ main program.
+
+ Note that if INIT_SECTION_ASM_OP is defined in the tm.h file, then
+ this file *will* make use of the .init section. If that symbol is
+ not defined however, then the .init section will not be used.
+
+ Currently, only ELF and COFF are supported. It is likely however that
+ ROSE could also be supported, if someone was willing to do the work to
+ make whatever (small?) adaptations are needed. (Some work may be
+ needed on the ROSE assembler and linker also.)
+
+ This file must be compiled with gcc. */
+
+/* Target machine header files require this define. */
+#define IN_LIBGCC2
+
+/* FIXME: Including auto-host is incorrect, but until we have
+ identified the set of defines that need to go into auto-target.h,
+ this will have to do. */
+#include "auto-host.h"
+#undef pid_t
+#undef rlim_t
+#undef ssize_t
+#undef vfork
+#include "tconfig.h"
+#include "tsystem.h"
+#include "coretypes.h"
+#include "tm.h"
+#include "libgcc_tm.h"
+#include "unwind-dw2-fde.h"
+
+#ifndef FORCE_CODE_SECTION_ALIGN
+# define FORCE_CODE_SECTION_ALIGN
+#endif
+
+#ifndef CRT_CALL_STATIC_FUNCTION
+# define CRT_CALL_STATIC_FUNCTION(SECTION_OP, FUNC) \
+static void __attribute__((__used__)) \
+call_ ## FUNC (void) \
+{ \
+ asm (SECTION_OP); \
+ FUNC (); \
+ FORCE_CODE_SECTION_ALIGN \
+ asm (TEXT_SECTION_ASM_OP); \
+}
+#endif
+
+#if defined(OBJECT_FORMAT_ELF) \
+ && !defined(OBJECT_FORMAT_FLAT) \
+ && defined(HAVE_LD_EH_FRAME_HDR) \
+ && !defined(inhibit_libc) && !defined(CRTSTUFFT_O) \
+ && defined(__FreeBSD__) && __FreeBSD__ >= 7
+#include <link.h>
+# define USE_PT_GNU_EH_FRAME
+#endif
+
+#if defined(OBJECT_FORMAT_ELF) \
+ && !defined(OBJECT_FORMAT_FLAT) \
+ && defined(HAVE_LD_EH_FRAME_HDR) && defined(TARGET_DL_ITERATE_PHDR) \
+ && !defined(inhibit_libc) && !defined(CRTSTUFFT_O) \
+ && defined(__sun__) && defined(__svr4__)
+#include <link.h>
+# define USE_PT_GNU_EH_FRAME
+#endif
+
+#if defined(OBJECT_FORMAT_ELF) \
+ && !defined(OBJECT_FORMAT_FLAT) \
+ && defined(HAVE_LD_EH_FRAME_HDR) \
+ && !defined(inhibit_libc) && !defined(CRTSTUFFT_O) \
+ && defined(__GLIBC__) && __GLIBC__ >= 2
+#include <link.h>
+/* uClibc pretends to be glibc 2.2 and DT_CONFIG is defined in its link.h.
+ But it doesn't use PT_GNU_EH_FRAME ELF segment currently. */
+# if !defined(__UCLIBC__) \
+ && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ > 2) \
+ || (__GLIBC__ == 2 && __GLIBC_MINOR__ == 2 && defined(DT_CONFIG)))
+# define USE_PT_GNU_EH_FRAME
+# endif
+#endif
+#if defined(EH_FRAME_SECTION_NAME) && !defined(USE_PT_GNU_EH_FRAME)
+# define USE_EH_FRAME_REGISTRY
+#endif
+#if defined(EH_FRAME_SECTION_NAME) && EH_TABLES_CAN_BE_READ_ONLY
+# define EH_FRAME_SECTION_CONST const
+#else
+# define EH_FRAME_SECTION_CONST
+#endif
+
+#if !defined(DTOR_LIST_END) && defined(OBJECT_FORMAT_ELF) \
+ && defined(HAVE_GAS_HIDDEN) && !defined(FINI_ARRAY_SECTION_ASM_OP)
+# define HIDDEN_DTOR_LIST_END
+#endif
+
+/* We do not want to add the weak attribute to the declarations of these
+ routines in unwind-dw2-fde.h because that will cause the definition of
+ these symbols to be weak as well.
+
+ This exposes a core issue, how to handle creating weak references vs
+ how to create weak definitions. Either we have to have the definition
+ of TARGET_WEAK_ATTRIBUTE be conditional in the shared header files or
+ have a second declaration if we want a function's references to be weak,
+ but not its definition.
+
+ Making TARGET_WEAK_ATTRIBUTE conditional seems like a good solution until
+ one thinks about scaling to larger problems -- i.e., the condition under
+ which TARGET_WEAK_ATTRIBUTE is active will eventually get far too
+ complicated.
+
+ So, we take an approach similar to #pragma weak -- we have a second
+ declaration for functions that we want to have weak references.
+
+ Neither way is particularly good. */
+
+/* References to __register_frame_info and __deregister_frame_info should
+ be weak in this file if at all possible. */
+extern void __register_frame_info (const void *, struct object *)
+ TARGET_ATTRIBUTE_WEAK;
+extern void __register_frame_info_bases (const void *, struct object *,
+ void *, void *)
+ TARGET_ATTRIBUTE_WEAK;
+extern void *__deregister_frame_info (const void *)
+ TARGET_ATTRIBUTE_WEAK;
+extern void *__deregister_frame_info_bases (const void *)
+ TARGET_ATTRIBUTE_WEAK;
+extern void __do_global_ctors_1 (void);
+
+/* Likewise for _Jv_RegisterClasses. */
+extern void _Jv_RegisterClasses (void *) TARGET_ATTRIBUTE_WEAK;
+
+#ifdef OBJECT_FORMAT_ELF
+
+/* Declare a pointer to void function type. */
+typedef void (*func_ptr) (void);
+#define STATIC static
+
+#else /* OBJECT_FORMAT_ELF */
+
+#include "gbl-ctors.h"
+
+#define STATIC
+
+#endif /* OBJECT_FORMAT_ELF */
+
+#ifdef CRT_BEGIN
+
+/* NOTE: In order to be able to support SVR4 shared libraries, we arrange
+ to have one set of symbols { __CTOR_LIST__, __DTOR_LIST__, __CTOR_END__,
+ __DTOR_END__ } per root executable and also one set of these symbols
+ per shared library. So in any given whole process image, we may have
+ multiple definitions of each of these symbols. In order to prevent
+ these definitions from conflicting with one another, and in order to
+ ensure that the proper lists are used for the initialization/finalization
+ of each individual shared library (respectively), we give these symbols
+ only internal (i.e. `static') linkage, and we also make it a point to
+ refer to only the __CTOR_END__ symbol in crtend.o and the __DTOR_LIST__
+ symbol in crtbegin.o, where they are defined. */
+
+/* No need for .ctors/.dtors section if linker can place them in
+ .init_array/.fini_array section. */
+#ifndef USE_INITFINI_ARRAY
+/* The -1 is a flag to __do_global_[cd]tors indicating that this table
+ does not start with a count of elements. */
+#ifdef CTOR_LIST_BEGIN
+CTOR_LIST_BEGIN;
+#elif defined(CTORS_SECTION_ASM_OP)
+/* Hack: force cc1 to switch to .data section early, so that assembling
+ __CTOR_LIST__ does not undo our behind-the-back change to .ctors. */
+static func_ptr force_to_data[1] __attribute__ ((__used__)) = { };
+asm (CTORS_SECTION_ASM_OP);
+STATIC func_ptr __CTOR_LIST__[1]
+ __attribute__ ((__used__, aligned(sizeof(func_ptr))))
+ = { (func_ptr) (-1) };
+#else
+STATIC func_ptr __CTOR_LIST__[1]
+ __attribute__ ((__used__, section(".ctors"), aligned(sizeof(func_ptr))))
+ = { (func_ptr) (-1) };
+#endif /* __CTOR_LIST__ alternatives */
+
+#ifdef DTOR_LIST_BEGIN
+DTOR_LIST_BEGIN;
+#elif defined(DTORS_SECTION_ASM_OP)
+asm (DTORS_SECTION_ASM_OP);
+STATIC func_ptr __DTOR_LIST__[1]
+ __attribute__ ((aligned(sizeof(func_ptr))))
+ = { (func_ptr) (-1) };
+#else
+STATIC func_ptr __DTOR_LIST__[1]
+ __attribute__((section(".dtors"), aligned(sizeof(func_ptr))))
+ = { (func_ptr) (-1) };
+#endif /* __DTOR_LIST__ alternatives */
+#endif /* USE_INITFINI_ARRAY */
+
+#ifdef USE_EH_FRAME_REGISTRY
+/* Stick a label at the beginning of the frame unwind info so we can register
+ and deregister it with the exception handling library code. */
+STATIC EH_FRAME_SECTION_CONST char __EH_FRAME_BEGIN__[]
+ __attribute__((section(EH_FRAME_SECTION_NAME), aligned(4)))
+ = { };
+#endif /* USE_EH_FRAME_REGISTRY */
+
+#ifdef JCR_SECTION_NAME
+/* Stick a label at the beginning of the java class registration info
+ so we can register them properly. */
+STATIC void *__JCR_LIST__[]
+ __attribute__ ((used, section(JCR_SECTION_NAME), aligned(sizeof(void*))))
+ = { };
+#endif /* JCR_SECTION_NAME */
+
+#if defined(INIT_SECTION_ASM_OP) || defined(INIT_ARRAY_SECTION_ASM_OP)
+
+#ifdef OBJECT_FORMAT_ELF
+
+/* Declare the __dso_handle variable. It should have a unique value
+ in every shared-object; in a main program its value is zero. The
+ object should in any case be protected. This means the instance
+ in one DSO or the main program is not used in another object. The
+ dynamic linker takes care of this. */
+
+#ifdef TARGET_LIBGCC_SDATA_SECTION
+extern void *__dso_handle __attribute__ ((__section__ (TARGET_LIBGCC_SDATA_SECTION)));
+#endif
+#ifdef HAVE_GAS_HIDDEN
+extern void *__dso_handle __attribute__ ((__visibility__ ("hidden")));
+#endif
+#ifdef CRTSTUFFS_O
+void *__dso_handle = &__dso_handle;
+#else
+void *__dso_handle = 0;
+#endif
+
+/* The __cxa_finalize function may not be available so we use only a
+ weak declaration. */
+extern void __cxa_finalize (void *) TARGET_ATTRIBUTE_WEAK;
+
+/* Run all the global destructors on exit from the program. */
+
+/* Some systems place the number of pointers in the first word of the
+ table. On SVR4 however, that word is -1. In all cases, the table is
+ null-terminated. On SVR4, we start from the beginning of the list and
+ invoke each per-compilation-unit destructor routine in order
+ until we find that null.
+
+ Note that this function MUST be static. There will be one of these
+ functions in each root executable and one in each shared library, but
+ although they all have the same code, each one is unique in that it
+ refers to one particular associated `__DTOR_LIST__' which belongs to the
+ same particular root executable or shared library file.
+
+ On some systems, this routine is run more than once from the .fini,
+ when exit is called recursively, so we arrange to remember where in
+ the list we left off processing, and we resume at that point,
+ should we be re-invoked. */
+
+static void __attribute__((used))
+__do_global_dtors_aux (void)
+{
+ static _Bool completed;
+
+ if (__builtin_expect (completed, 0))
+ return;
+
+#ifdef CRTSTUFFS_O
+ if (__cxa_finalize)
+ __cxa_finalize (__dso_handle);
+#endif
+
+#ifdef FINI_ARRAY_SECTION_ASM_OP
+ /* If we are using .fini_array then destructors will be run via that
+ mechanism. */
+#elif defined(HIDDEN_DTOR_LIST_END)
+ {
+ /* Safer version that makes sure only .dtors function pointers are
+ called even if the static variable is maliciously changed. */
+ extern func_ptr __DTOR_END__[] __attribute__((visibility ("hidden")));
+ static size_t dtor_idx;
+ const size_t max_idx = __DTOR_END__ - __DTOR_LIST__ - 1;
+ func_ptr f;
+
+ while (dtor_idx < max_idx)
+ {
+ f = __DTOR_LIST__[++dtor_idx];
+ f ();
+ }
+ }
+#else /* !defined (FINI_ARRAY_SECTION_ASM_OP) */
+ {
+ static func_ptr *p = __DTOR_LIST__ + 1;
+ func_ptr f;
+
+ while ((f = *p))
+ {
+ p++;
+ f ();
+ }
+ }
+#endif /* !defined(FINI_ARRAY_SECTION_ASM_OP) */
+
+#ifdef USE_EH_FRAME_REGISTRY
+#ifdef CRT_GET_RFIB_DATA
+ /* If we used the new __register_frame_info_bases interface,
+ make sure that we deregister from the same place. */
+ if (__deregister_frame_info_bases)
+ __deregister_frame_info_bases (__EH_FRAME_BEGIN__);
+#else
+ if (__deregister_frame_info)
+ __deregister_frame_info (__EH_FRAME_BEGIN__);
+#endif
+#endif
+
+ completed = 1;
+}
+
+/* Stick a call to __do_global_dtors_aux into the .fini section. */
+#ifdef FINI_SECTION_ASM_OP
+CRT_CALL_STATIC_FUNCTION (FINI_SECTION_ASM_OP, __do_global_dtors_aux)
+#elif defined (FINI_ARRAY_SECTION_ASM_OP)
+static func_ptr __do_global_dtors_aux_fini_array_entry[]
+ __attribute__ ((__used__, section(".fini_array")))
+ = { __do_global_dtors_aux };
+#else /* !FINI_SECTION_ASM_OP && !FINI_ARRAY_SECTION_ASM_OP */
+static void __attribute__((used))
+__do_global_dtors_aux_1 (void)
+{
+ atexit (__do_global_dtors_aux);
+}
+CRT_CALL_STATIC_FUNCTION (INIT_SECTION_ASM_OP, __do_global_dtors_aux_1)
+#endif
+
+#if defined(USE_EH_FRAME_REGISTRY) || defined(JCR_SECTION_NAME)
+/* Stick a call to __register_frame_info into the .init section. For some
+ reason calls with no arguments work more reliably in .init, so stick the
+ call in another function. */
+
+static void __attribute__((used))
+frame_dummy (void)
+{
+#ifdef USE_EH_FRAME_REGISTRY
+ static struct object object;
+#ifdef CRT_GET_RFIB_DATA
+ void *tbase, *dbase;
+ tbase = 0;
+ CRT_GET_RFIB_DATA (dbase);
+ if (__register_frame_info_bases)
+ __register_frame_info_bases (__EH_FRAME_BEGIN__, &object, tbase, dbase);
+#else
+ if (__register_frame_info)
+ __register_frame_info (__EH_FRAME_BEGIN__, &object);
+#endif /* CRT_GET_RFIB_DATA */
+#endif /* USE_EH_FRAME_REGISTRY */
+#ifdef JCR_SECTION_NAME
+ if (__JCR_LIST__[0])
+ {
+ void (*register_classes) (void *) = _Jv_RegisterClasses;
+ __asm ("" : "+r" (register_classes));
+ if (register_classes)
+ register_classes (__JCR_LIST__);
+ }
+#endif /* JCR_SECTION_NAME */
+}
+
+#ifdef INIT_SECTION_ASM_OP
+CRT_CALL_STATIC_FUNCTION (INIT_SECTION_ASM_OP, frame_dummy)
+#else /* defined(INIT_SECTION_ASM_OP) */
+static func_ptr __frame_dummy_init_array_entry[]
+ __attribute__ ((__used__, section(".init_array")))
+ = { frame_dummy };
+#endif /* !defined(INIT_SECTION_ASM_OP) */
+#endif /* USE_EH_FRAME_REGISTRY || JCR_SECTION_NAME */
+
+#else /* OBJECT_FORMAT_ELF */
+
+/* The function __do_global_ctors_aux is compiled twice (once in crtbegin.o
+ and once in crtend.o). It must be declared static to avoid a link
+ error. Here, we define __do_global_ctors as an externally callable
+ function. It is externally callable so that __main can invoke it when
+ INVOKE__main is defined. This has the additional effect of forcing cc1
+ to switch to the .text section. */
+
+static void __do_global_ctors_aux (void);
+void
+__do_global_ctors (void)
+{
+#ifdef INVOKE__main
+ /* If __main won't actually call __do_global_ctors then it doesn't matter
+ what's inside the function. The inside of __do_global_ctors_aux is
+ called automatically in that case. And the Alliant fx2800 linker
+ crashes on this reference. So prevent the crash. */
+ __do_global_ctors_aux ();
+#endif
+}
+
+asm (INIT_SECTION_ASM_OP); /* cc1 doesn't know that we are switching! */
+
+/* A routine to invoke all of the global constructors upon entry to the
+ program. We put this into the .init section (for systems that have
+ such a thing) so that we can properly perform the construction of
+ file-scope static-storage C++ objects within shared libraries. */
+
+static void __attribute__((used))
+__do_global_ctors_aux (void) /* prologue goes in .init section */
+{
+ FORCE_CODE_SECTION_ALIGN /* explicit align before switch to .text */
+ asm (TEXT_SECTION_ASM_OP); /* don't put epilogue and body in .init */
+ DO_GLOBAL_CTORS_BODY;
+ atexit (__do_global_dtors);
+}
+
+#endif /* OBJECT_FORMAT_ELF */
+
+#elif defined(HAS_INIT_SECTION) /* ! INIT_SECTION_ASM_OP */
+
+extern void __do_global_dtors (void);
+
+/* This case is used by the Irix 6 port, which supports named sections but
+ not an SVR4-style .fini section. __do_global_dtors can be non-static
+ in this case because we protect it with -hidden_symbol. */
+
+void
+__do_global_dtors (void)
+{
+ func_ptr *p, f;
+ for (p = __DTOR_LIST__ + 1; (f = *p); p++)
+ f ();
+
+#ifdef USE_EH_FRAME_REGISTRY
+ if (__deregister_frame_info)
+ __deregister_frame_info (__EH_FRAME_BEGIN__);
+#endif
+}
+
+#if defined(USE_EH_FRAME_REGISTRY) || defined(JCR_SECTION_NAME)
+/* A helper function for __do_global_ctors, which is in crtend.o. Here
+ in crtbegin.o, we can reference a couple of symbols not visible there.
+ Plus, since we're before libgcc.a, we have no problems referencing
+ functions from there. */
+void
+__do_global_ctors_1(void)
+{
+#ifdef USE_EH_FRAME_REGISTRY
+ static struct object object;
+ if (__register_frame_info)
+ __register_frame_info (__EH_FRAME_BEGIN__, &object);
+#endif
+#ifdef JCR_SECTION_NAME
+ if (__JCR_LIST__[0])
+ {
+ void (*register_classes) (void *) = _Jv_RegisterClasses;
+ __asm ("" : "+r" (register_classes));
+ if (register_classes)
+ register_classes (__JCR_LIST__);
+ }
+#endif
+}
+#endif /* USE_EH_FRAME_REGISTRY || JCR_SECTION_NAME */
+
+#else /* ! INIT_SECTION_ASM_OP && ! HAS_INIT_SECTION */
+#error "What are you doing with crtstuff.c, then?"
+#endif
+
+#elif defined(CRT_END) /* ! CRT_BEGIN */
+
+/* No need for .ctors/.dtors section if linker can place them in
+ .init_array/.fini_array section. */
+#ifndef USE_INITFINI_ARRAY
+/* Put a word containing zero at the end of each of our two lists of function
+ addresses. Note that the words defined here go into the .ctors and .dtors
+ sections of the crtend.o file, and since that file is always linked in
+ last, these words naturally end up at the very ends of the two lists
+ contained in these two sections. */
+
+#ifdef CTOR_LIST_END
+CTOR_LIST_END;
+#elif defined(CTORS_SECTION_ASM_OP)
+/* Hack: force cc1 to switch to .data section early, so that assembling
+ __CTOR_LIST__ does not undo our behind-the-back change to .ctors. */
+static func_ptr force_to_data[1] __attribute__ ((__used__)) = { };
+asm (CTORS_SECTION_ASM_OP);
+STATIC func_ptr __CTOR_END__[1]
+ __attribute__((aligned(sizeof(func_ptr))))
+ = { (func_ptr) 0 };
+#else
+STATIC func_ptr __CTOR_END__[1]
+ __attribute__((section(".ctors"), aligned(sizeof(func_ptr))))
+ = { (func_ptr) 0 };
+#endif
+
+#ifdef DTOR_LIST_END
+DTOR_LIST_END;
+#elif defined(HIDDEN_DTOR_LIST_END)
+#ifdef DTORS_SECTION_ASM_OP
+asm (DTORS_SECTION_ASM_OP);
+#endif
+func_ptr __DTOR_END__[1]
+ __attribute__ ((used,
+#ifndef DTORS_SECTION_ASM_OP
+ section(".dtors"),
+#endif
+ aligned(sizeof(func_ptr)), visibility ("hidden")))
+ = { (func_ptr) 0 };
+#elif defined(DTORS_SECTION_ASM_OP)
+asm (DTORS_SECTION_ASM_OP);
+STATIC func_ptr __DTOR_END__[1]
+ __attribute__ ((used, aligned(sizeof(func_ptr))))
+ = { (func_ptr) 0 };
+#else
+STATIC func_ptr __DTOR_END__[1]
+ __attribute__((used, section(".dtors"), aligned(sizeof(func_ptr))))
+ = { (func_ptr) 0 };
+#endif
+#endif /* USE_INITFINI_ARRAY */
+
+#ifdef EH_FRAME_SECTION_NAME
+/* Terminate the frame unwind info section with a 4byte 0 as a sentinel;
+ this would be the 'length' field in a real FDE. */
+# if __INT_MAX__ == 2147483647
+typedef int int32;
+# elif __LONG_MAX__ == 2147483647
+typedef long int32;
+# elif __SHRT_MAX__ == 2147483647
+typedef short int32;
+# else
+# error "Missing a 4 byte integer"
+# endif
+STATIC EH_FRAME_SECTION_CONST int32 __FRAME_END__[]
+ __attribute__ ((used, section(EH_FRAME_SECTION_NAME),
+ aligned(sizeof(int32))))
+ = { 0 };
+#endif /* EH_FRAME_SECTION_NAME */
+
+#ifdef JCR_SECTION_NAME
+/* Null terminate the .jcr section array. */
+STATIC void *__JCR_END__[1]
+ __attribute__ ((used, section(JCR_SECTION_NAME),
+ aligned(sizeof(void *))))
+ = { 0 };
+#endif /* JCR_SECTION_NAME */
+
+#ifdef INIT_ARRAY_SECTION_ASM_OP
+
+/* If we are using .init_array, there is nothing to do. */
+
+#elif defined(INIT_SECTION_ASM_OP)
+
+#ifdef OBJECT_FORMAT_ELF
+static void __attribute__((used))
+__do_global_ctors_aux (void)
+{
+ func_ptr *p;
+ for (p = __CTOR_END__ - 1; *p != (func_ptr) -1; p--)
+ (*p) ();
+}
+
+/* Stick a call to __do_global_ctors_aux into the .init section. */
+CRT_CALL_STATIC_FUNCTION (INIT_SECTION_ASM_OP, __do_global_ctors_aux)
+#else /* OBJECT_FORMAT_ELF */
+
+/* Stick the real initialization code, followed by a normal sort of
+ function epilogue at the very end of the .init section for this
+ entire root executable file or for this entire shared library file.
+
+ Note that we use some tricks here to get *just* the body and just
+ a function epilogue (but no function prologue) into the .init
+ section of the crtend.o file. Specifically, we switch to the .text
+ section, start to define a function, and then we switch to the .init
+ section just before the body code.
+
+ Earlier on, we put the corresponding function prologue into the .init
+ section of the crtbegin.o file (which will be linked in first).
+
+ Note that we want to invoke all constructors for C++ file-scope static-
+ storage objects AFTER any other possible initialization actions which
+ may be performed by the code in the .init section contributions made by
+ other libraries, etc. That's because those other initializations may
+ include setup operations for very primitive things (e.g. initializing
+ the state of the floating-point coprocessor, etc.) which should be done
+ before we start to execute any of the user's code. */
+
+static void
+__do_global_ctors_aux (void) /* prologue goes in .text section */
+{
+ asm (INIT_SECTION_ASM_OP);
+ DO_GLOBAL_CTORS_BODY;
+ atexit (__do_global_dtors);
+} /* epilogue and body go in .init section */
+
+FORCE_CODE_SECTION_ALIGN
+asm (TEXT_SECTION_ASM_OP);
+
+#endif /* OBJECT_FORMAT_ELF */
+
+#elif defined(HAS_INIT_SECTION) /* ! INIT_SECTION_ASM_OP */
+
+extern void __do_global_ctors (void);
+
+/* This case is used by the Irix 6 port, which supports named sections but
+ not an SVR4-style .init section. __do_global_ctors can be non-static
+ in this case because we protect it with -hidden_symbol. */
+void
+__do_global_ctors (void)
+{
+ func_ptr *p;
+#if defined(USE_EH_FRAME_REGISTRY) || defined(JCR_SECTION_NAME)
+ __do_global_ctors_1();
+#endif
+ for (p = __CTOR_END__ - 1; *p != (func_ptr) -1; p--)
+ (*p) ();
+}
+
+#else /* ! INIT_SECTION_ASM_OP && ! HAS_INIT_SECTION */
+#error "What are you doing with crtstuff.c, then?"
+#endif
+
+#else /* ! CRT_BEGIN && ! CRT_END */
+#error "One of CRT_BEGIN or CRT_END must be defined."
+#endif
diff --git a/libgcc/dfp-bit.h b/libgcc/dfp-bit.h
index 45b79086bc9..c97869baca1 100644
--- a/libgcc/dfp-bit.h
+++ b/libgcc/dfp-bit.h
@@ -1,5 +1,5 @@
/* Header file for dfp-bit.c.
- Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010
+ Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2011
Free Software Foundation, Inc.
This file is part of GCC.
@@ -33,6 +33,7 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
#include "tconfig.h"
#include "coretypes.h"
#include "tm.h"
+#include "libgcc_tm.h"
#ifndef LIBGCC2_LONG_DOUBLE_TYPE_SIZE
#define LIBGCC2_LONG_DOUBLE_TYPE_SIZE LONG_DOUBLE_TYPE_SIZE
diff --git a/libgcc/divmod.c b/libgcc/divmod.c
new file mode 100644
index 00000000000..c227b99ccd2
--- /dev/null
+++ b/libgcc/divmod.c
@@ -0,0 +1,73 @@
+/* Copyright (C) 2000 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+long udivmodsi4 ();
+
+long
+__divsi3 (long a, long b)
+{
+ int neg = 0;
+ long res;
+
+ if (a < 0)
+ {
+ a = -a;
+ neg = !neg;
+ }
+
+ if (b < 0)
+ {
+ b = -b;
+ neg = !neg;
+ }
+
+ res = udivmodsi4 (a, b, 0);
+
+ if (neg)
+ res = -res;
+
+ return res;
+}
+
+long
+__modsi3 (long a, long b)
+{
+ int neg = 0;
+ long res;
+
+ if (a < 0)
+ {
+ a = -a;
+ neg = 1;
+ }
+
+ if (b < 0)
+ b = -b;
+
+ res = udivmodsi4 (a, b, 1);
+
+ if (neg)
+ res = -res;
+
+ return res;
+}
diff --git a/libgcc/emutls.c b/libgcc/emutls.c
index b7ee3bdfa7c..22ea4403edb 100644
--- a/libgcc/emutls.c
+++ b/libgcc/emutls.c
@@ -1,5 +1,5 @@
/* TLS emulation.
- Copyright (C) 2006, 2008, 2009 Free Software Foundation, Inc.
+ Copyright (C) 2006, 2008, 2009, 2011 Free Software Foundation, Inc.
Contributed by Jakub Jelinek <jakub@redhat.com>.
This file is part of GCC.
@@ -27,6 +27,7 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
#include "tsystem.h"
#include "coretypes.h"
#include "tm.h"
+#include "libgcc_tm.h"
#include "gthr.h"
typedef unsigned int word __attribute__((mode(word)));
diff --git a/libgcc/fixed-bit.c b/libgcc/fixed-bit.c
index d434d131eec..84e58155ccd 100644
--- a/libgcc/fixed-bit.c
+++ b/libgcc/fixed-bit.c
@@ -1,5 +1,5 @@
/* This is a software fixed-point library.
- Copyright (C) 2007, 2009 Free Software Foundation, Inc.
+ Copyright (C) 2007, 2009, 2011 Free Software Foundation, Inc.
This file is part of GCC.
@@ -46,6 +46,7 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
#include "tsystem.h"
#include "coretypes.h"
#include "tm.h"
+#include "libgcc_tm.h"
#ifndef MIN_UNITS_PER_WORD
#define MIN_UNITS_PER_WORD UNITS_PER_WORD
diff --git a/libgcc/floatunsidf.c b/libgcc/floatunsidf.c
new file mode 100644
index 00000000000..ff28112502b
--- /dev/null
+++ b/libgcc/floatunsidf.c
@@ -0,0 +1,15 @@
+/* Public domain. */
+typedef int SItype __attribute__ ((mode (SI)));
+typedef unsigned int USItype __attribute__ ((mode (SI)));
+typedef float DFtype __attribute__ ((mode (DF)));
+
+DFtype
+__floatunsidf (USItype u)
+{
+ SItype s = (SItype) u;
+ DFtype r = (DFtype) s;
+ if (s < 0)
+ r += (DFtype)2.0 * (DFtype) ((USItype) 1
+ << (sizeof (USItype) * __CHAR_BIT__ - 1));
+ return r;
+}
diff --git a/libgcc/floatunsisf.c b/libgcc/floatunsisf.c
new file mode 100644
index 00000000000..11d4aa78cbe
--- /dev/null
+++ b/libgcc/floatunsisf.c
@@ -0,0 +1,18 @@
+/* Public domain. */
+typedef int SItype __attribute__ ((mode (SI)));
+typedef unsigned int USItype __attribute__ ((mode (SI)));
+typedef float SFtype __attribute__ ((mode (SF)));
+
+SFtype
+__floatunsisf (USItype u)
+{
+ SItype s = (SItype) u;
+ if (s < 0)
+ {
+ /* As in expand_float, compute (u & 1) | (u >> 1) to ensure
+ correct rounding if a nonzero bit is shifted out. */
+ return (SFtype) 2.0 * (SFtype) (SItype) ((u & 1) | (u >> 1));
+ }
+ else
+ return (SFtype) s;
+}
diff --git a/libgcc/floatunsitf.c b/libgcc/floatunsitf.c
new file mode 100644
index 00000000000..955d67666c5
--- /dev/null
+++ b/libgcc/floatunsitf.c
@@ -0,0 +1,15 @@
+/* Public domain. */
+typedef int SItype __attribute__ ((mode (SI)));
+typedef unsigned int USItype __attribute__ ((mode (SI)));
+typedef float TFtype __attribute__ ((mode (TF)));
+
+TFtype
+__floatunsitf (USItype u)
+{
+ SItype s = (SItype) u;
+ TFtype r = (TFtype) s;
+ if (s < 0)
+ r += (TFtype)2.0 * (TFtype) ((USItype) 1
+ << (sizeof (USItype) * __CHAR_BIT__ - 1));
+ return r;
+}
diff --git a/libgcc/floatunsixf.c b/libgcc/floatunsixf.c
new file mode 100644
index 00000000000..52511688dad
--- /dev/null
+++ b/libgcc/floatunsixf.c
@@ -0,0 +1,15 @@
+/* Public domain. */
+typedef int SItype __attribute__ ((mode (SI)));
+typedef unsigned int USItype __attribute__ ((mode (SI)));
+typedef float XFtype __attribute__ ((mode (XF)));
+
+XFtype
+__floatunsixf (USItype u)
+{
+ SItype s = (SItype) u;
+ XFtype r = (XFtype) s;
+ if (s < 0)
+ r += (XFtype)2.0 * (XFtype) ((USItype) 1
+ << (sizeof (USItype) * __CHAR_BIT__ - 1));
+ return r;
+}
diff --git a/libgcc/fp-bit.c b/libgcc/fp-bit.c
index de9b3ada5ec..7509f76f71e 100644
--- a/libgcc/fp-bit.c
+++ b/libgcc/fp-bit.c
@@ -37,6 +37,7 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
#include "tconfig.h"
#include "coretypes.h"
#include "tm.h"
+#include "libgcc_tm.h"
#include "fp-bit.h"
/* The following macros can be defined to change the behavior of this file:
diff --git a/libgcc/gbl-ctors.h b/libgcc/gbl-ctors.h
new file mode 100644
index 00000000000..ac4faae1292
--- /dev/null
+++ b/libgcc/gbl-ctors.h
@@ -0,0 +1,87 @@
+/* Definitions relating to the special __do_global_init function used
+ for getting g++ file-scope static objects constructed. This file
+ will get included either by libgcc2.c (for systems that don't support
+ a .init section) or by crtstuff.c (for those that do).
+ Copyright (C) 1991, 1995, 1996, 1998, 1999, 2000, 2003, 2009
+ Free Software Foundation, Inc.
+ Contributed by Ron Guilmette (rfg@segfault.us.com)
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+/* This file contains definitions and declarations of things
+ relating to the normal start-up-time invocation of C++
+ file-scope static object constructors. These declarations
+ and definitions are used by *both* libgcc2.c and by crtstuff.c.
+
+ Note that this file should only be compiled with GCC.
+*/
+
+#ifndef GCC_GBL_CTORS_H
+#define GCC_GBL_CTORS_H
+
+/* Declare a pointer to void function type. */
+
+typedef void (*func_ptr) (void);
+
+/* Declare the set of symbols use as begin and end markers for the lists
+ of global object constructors and global object destructors. */
+
+extern func_ptr __CTOR_LIST__[];
+extern func_ptr __DTOR_LIST__[];
+
+/* Declare the routine which needs to get invoked at program start time. */
+
+extern void __do_global_ctors (void);
+
+/* Declare the routine which needs to get invoked at program exit time. */
+
+extern void __do_global_dtors (void);
+
+/* Define a macro with the code which needs to be executed at program
+ start-up time. This macro is used in two places in crtstuff.c (for
+ systems which support a .init section) and in one place in libgcc2.c
+ (for those system which do *not* support a .init section). For all
+ three places where this code might appear, it must be identical, so
+ we define it once here as a macro to avoid various instances getting
+ out-of-sync with one another. */
+
+/* Some systems place the number of pointers
+ in the first word of the table.
+ On other systems, that word is -1.
+ In all cases, the table is null-terminated.
+ If the length is not recorded, count up to the null. */
+
+/* Some systems use a different strategy for finding the ctors.
+ For example, svr3. */
+#ifndef DO_GLOBAL_CTORS_BODY
+#define DO_GLOBAL_CTORS_BODY \
+do { \
+ __SIZE_TYPE__ nptrs = (__SIZE_TYPE__) __CTOR_LIST__[0]; \
+ unsigned i; \
+ if (nptrs == (__SIZE_TYPE__)-1) \
+ for (nptrs = 0; __CTOR_LIST__[nptrs + 1] != 0; nptrs++); \
+ for (i = nptrs; i >= 1; i--) \
+ __CTOR_LIST__[i] (); \
+} while (0)
+#endif
+
+#endif /* GCC_GBL_CTORS_H */
diff --git a/libgcc/generic-morestack-thread.c b/libgcc/generic-morestack-thread.c
index 794fdc3dc54..bbe6dd12b5a 100644
--- a/libgcc/generic-morestack-thread.c
+++ b/libgcc/generic-morestack-thread.c
@@ -27,6 +27,7 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
#include "tsystem.h"
#include "coretypes.h"
#include "tm.h"
+#include "libgcc_tm.h"
/* If inhibit_libc is defined, we can not compile this file. The
effect is that people will not be able to use -fsplit-stack. That
diff --git a/libgcc/generic-morestack.c b/libgcc/generic-morestack.c
index 0b660cedd5f..07bc2a66073 100644
--- a/libgcc/generic-morestack.c
+++ b/libgcc/generic-morestack.c
@@ -27,6 +27,7 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
#include "tsystem.h"
#include "coretypes.h"
#include "tm.h"
+#include "libgcc_tm.h"
/* If inhibit_libc is defined, we can not compile this file. The
effect is that people will not be able to use -fsplit-stack. That
diff --git a/libgcc/gthr-posix.h b/libgcc/gthr-posix.h
new file mode 100644
index 00000000000..46054f6a7c2
--- /dev/null
+++ b/libgcc/gthr-posix.h
@@ -0,0 +1,879 @@
+/* Threads compatibility routines for libgcc2 and libobjc. */
+/* Compile this one with gcc. */
+/* Copyright (C) 1997, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007,
+ 2008, 2009, 2010, 2011 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_GTHR_POSIX_H
+#define GCC_GTHR_POSIX_H
+
+/* POSIX threads specific definitions.
+ Easy, since the interface is just one-to-one mapping. */
+
+#define __GTHREADS 1
+#define __GTHREADS_CXX0X 1
+
+/* Some implementations of <pthread.h> require this to be defined. */
+#if !defined(_REENTRANT) && defined(__osf__)
+#define _REENTRANT 1
+#endif
+
+#include <pthread.h>
+
+#if ((defined(_LIBOBJC) || defined(_LIBOBJC_WEAK)) \
+ || !defined(_GTHREAD_USE_MUTEX_TIMEDLOCK))
+# include <unistd.h>
+# if defined(_POSIX_TIMEOUTS) && _POSIX_TIMEOUTS >= 0
+# define _GTHREAD_USE_MUTEX_TIMEDLOCK 1
+# else
+# define _GTHREAD_USE_MUTEX_TIMEDLOCK 0
+# endif
+#endif
+
+typedef pthread_t __gthread_t;
+typedef pthread_key_t __gthread_key_t;
+typedef pthread_once_t __gthread_once_t;
+typedef pthread_mutex_t __gthread_mutex_t;
+typedef pthread_mutex_t __gthread_recursive_mutex_t;
+typedef pthread_cond_t __gthread_cond_t;
+typedef struct timespec __gthread_time_t;
+
+/* POSIX like conditional variables are supported. Please look at comments
+ in gthr.h for details. */
+#define __GTHREAD_HAS_COND 1
+
+#define __GTHREAD_MUTEX_INIT PTHREAD_MUTEX_INITIALIZER
+#define __GTHREAD_ONCE_INIT PTHREAD_ONCE_INIT
+#if defined(PTHREAD_RECURSIVE_MUTEX_INITIALIZER)
+#define __GTHREAD_RECURSIVE_MUTEX_INIT PTHREAD_RECURSIVE_MUTEX_INITIALIZER
+#elif defined(PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP)
+#define __GTHREAD_RECURSIVE_MUTEX_INIT PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP
+#else
+#define __GTHREAD_RECURSIVE_MUTEX_INIT_FUNCTION __gthread_recursive_mutex_init_function
+#endif
+#define __GTHREAD_COND_INIT PTHREAD_COND_INITIALIZER
+#define __GTHREAD_TIME_INIT {0,0}
+
+#if SUPPORTS_WEAK && GTHREAD_USE_WEAK
+# ifndef __gthrw_pragma
+# define __gthrw_pragma(pragma)
+# endif
+# define __gthrw2(name,name2,type) \
+ static __typeof(type) name __attribute__ ((__weakref__(#name2))); \
+ __gthrw_pragma(weak type)
+# define __gthrw_(name) __gthrw_ ## name
+#else
+# define __gthrw2(name,name2,type)
+# define __gthrw_(name) name
+#endif
+
+/* Typically, __gthrw_foo is a weak reference to symbol foo. */
+#define __gthrw(name) __gthrw2(__gthrw_ ## name,name,name)
+
+/* On Tru64, /usr/include/pthread.h uses #pragma extern_prefix "__" to
+ map a subset of the POSIX pthread API to mangled versions of their
+ names. */
+#if defined(__osf__) && defined(_PTHREAD_USE_MANGLED_NAMES_)
+#define __gthrw3(name) __gthrw2(__gthrw_ ## name, __ ## name, name)
+__gthrw3(pthread_once)
+__gthrw3(pthread_getspecific)
+__gthrw3(pthread_setspecific)
+
+__gthrw3(pthread_create)
+__gthrw3(pthread_join)
+__gthrw3(pthread_detach)
+__gthrw3(pthread_equal)
+__gthrw3(pthread_self)
+__gthrw3(pthread_cancel)
+__gthrw3(sched_yield)
+
+__gthrw3(pthread_mutex_lock)
+__gthrw3(pthread_mutex_trylock)
+#if _GTHREAD_USE_MUTEX_TIMEDLOCK
+__gthrw3(pthread_mutex_timedlock)
+#endif
+__gthrw3(pthread_mutex_unlock)
+__gthrw3(pthread_mutex_init)
+__gthrw3(pthread_mutex_destroy)
+
+__gthrw3(pthread_cond_broadcast)
+__gthrw3(pthread_cond_signal)
+__gthrw3(pthread_cond_wait)
+__gthrw3(pthread_cond_timedwait)
+__gthrw3(pthread_cond_destroy)
+#else
+__gthrw(pthread_once)
+__gthrw(pthread_getspecific)
+__gthrw(pthread_setspecific)
+
+__gthrw(pthread_create)
+__gthrw(pthread_join)
+__gthrw(pthread_equal)
+__gthrw(pthread_self)
+__gthrw(pthread_detach)
+#ifndef __BIONIC__
+__gthrw(pthread_cancel)
+#endif
+__gthrw(sched_yield)
+
+__gthrw(pthread_mutex_lock)
+__gthrw(pthread_mutex_trylock)
+#if _GTHREAD_USE_MUTEX_TIMEDLOCK
+__gthrw(pthread_mutex_timedlock)
+#endif
+__gthrw(pthread_mutex_unlock)
+__gthrw(pthread_mutex_init)
+__gthrw(pthread_mutex_destroy)
+
+__gthrw(pthread_cond_broadcast)
+__gthrw(pthread_cond_signal)
+__gthrw(pthread_cond_wait)
+__gthrw(pthread_cond_timedwait)
+__gthrw(pthread_cond_destroy)
+#endif
+
+__gthrw(pthread_key_create)
+__gthrw(pthread_key_delete)
+__gthrw(pthread_mutexattr_init)
+__gthrw(pthread_mutexattr_settype)
+__gthrw(pthread_mutexattr_destroy)
+
+
+#if defined(_LIBOBJC) || defined(_LIBOBJC_WEAK)
+/* Objective-C. */
+#if defined(__osf__) && defined(_PTHREAD_USE_MANGLED_NAMES_)
+__gthrw3(pthread_cond_init)
+__gthrw3(pthread_exit)
+#else
+__gthrw(pthread_cond_init)
+__gthrw(pthread_exit)
+#endif /* __osf__ && _PTHREAD_USE_MANGLED_NAMES_ */
+#ifdef _POSIX_PRIORITY_SCHEDULING
+#ifdef _POSIX_THREAD_PRIORITY_SCHEDULING
+__gthrw(sched_get_priority_max)
+__gthrw(sched_get_priority_min)
+#endif /* _POSIX_THREAD_PRIORITY_SCHEDULING */
+#endif /* _POSIX_PRIORITY_SCHEDULING */
+__gthrw(pthread_attr_destroy)
+__gthrw(pthread_attr_init)
+__gthrw(pthread_attr_setdetachstate)
+#ifdef _POSIX_THREAD_PRIORITY_SCHEDULING
+__gthrw(pthread_getschedparam)
+__gthrw(pthread_setschedparam)
+#endif /* _POSIX_THREAD_PRIORITY_SCHEDULING */
+#endif /* _LIBOBJC || _LIBOBJC_WEAK */
+
+#if SUPPORTS_WEAK && GTHREAD_USE_WEAK
+
+/* On Solaris 2.6 up to 9, the libc exposes a POSIX threads interface even if
+ -pthreads is not specified. The functions are dummies and most return an
+ error value. However pthread_once returns 0 without invoking the routine
+ it is passed so we cannot pretend that the interface is active if -pthreads
+ is not specified. On Solaris 2.5.1, the interface is not exposed at all so
+ we need to play the usual game with weak symbols. On Solaris 10 and up, a
+ working interface is always exposed. On FreeBSD 6 and later, libc also
+ exposes a dummy POSIX threads interface, similar to what Solaris 2.6 up
+ to 9 does. FreeBSD >= 700014 even provides a pthread_cancel stub in libc,
+ which means the alternate __gthread_active_p below cannot be used there. */
+
+#if defined(__FreeBSD__) || (defined(__sun) && defined(__svr4__))
+
+static volatile int __gthread_active = -1;
+
+static void
+__gthread_trigger (void)
+{
+ __gthread_active = 1;
+}
+
+static inline int
+__gthread_active_p (void)
+{
+ static pthread_mutex_t __gthread_active_mutex = PTHREAD_MUTEX_INITIALIZER;
+ static pthread_once_t __gthread_active_once = PTHREAD_ONCE_INIT;
+
+ /* Avoid reading __gthread_active twice on the main code path. */
+ int __gthread_active_latest_value = __gthread_active;
+
+ /* This test is not protected to avoid taking a lock on the main code
+ path so every update of __gthread_active in a threaded program must
+ be atomic with regard to the result of the test. */
+ if (__builtin_expect (__gthread_active_latest_value < 0, 0))
+ {
+ if (__gthrw_(pthread_once))
+ {
+ /* If this really is a threaded program, then we must ensure that
+ __gthread_active has been set to 1 before exiting this block. */
+ __gthrw_(pthread_mutex_lock) (&__gthread_active_mutex);
+ __gthrw_(pthread_once) (&__gthread_active_once, __gthread_trigger);
+ __gthrw_(pthread_mutex_unlock) (&__gthread_active_mutex);
+ }
+
+ /* Make sure we'll never enter this block again. */
+ if (__gthread_active < 0)
+ __gthread_active = 0;
+
+ __gthread_active_latest_value = __gthread_active;
+ }
+
+ return __gthread_active_latest_value != 0;
+}
+
+#else /* neither FreeBSD nor Solaris */
+
+static inline int
+__gthread_active_p (void)
+{
+/* Android's C library does not provide pthread_cancel, check for
+ `pthread_create' instead. */
+#ifndef __BIONIC__
+ static void *const __gthread_active_ptr
+ = __extension__ (void *) &__gthrw_(pthread_cancel);
+#else
+ static void *const __gthread_active_ptr
+ = __extension__ (void *) &__gthrw_(pthread_create);
+#endif
+ return __gthread_active_ptr != 0;
+}
+
+#endif /* FreeBSD or Solaris */
+
+#else /* not SUPPORTS_WEAK */
+
+/* Similar to Solaris, HP-UX 11 for PA-RISC provides stubs for pthread
+ calls in shared flavors of the HP-UX C library. Most of the stubs
+ have no functionality. The details are described in the "libc cumulative
+ patch" for each subversion of HP-UX 11. There are two special interfaces
+ provided for checking whether an application is linked to a shared pthread
+ library or not. However, these interfaces aren't available in early
+ libpthread libraries. We also need a test that works for archive
+ libraries. We can't use pthread_once as some libc versions call the
+ init function. We also can't use pthread_create or pthread_attr_init
+ as these create a thread and thereby prevent changing the default stack
+ size. The function pthread_default_stacksize_np is available in both
+ the archive and shared versions of libpthread. It can be used to
+ determine the default pthread stack size. There is a stub in some
+ shared libc versions which returns a zero size if pthreads are not
+ active. We provide an equivalent stub to handle cases where libc
+ doesn't provide one. */
+
+#if defined(__hppa__) && defined(__hpux__)
+
+static volatile int __gthread_active = -1;
+
+static inline int
+__gthread_active_p (void)
+{
+ /* Avoid reading __gthread_active twice on the main code path. */
+ int __gthread_active_latest_value = __gthread_active;
+ size_t __s;
+
+ if (__builtin_expect (__gthread_active_latest_value < 0, 0))
+ {
+ pthread_default_stacksize_np (0, &__s);
+ __gthread_active = __s ? 1 : 0;
+ __gthread_active_latest_value = __gthread_active;
+ }
+
+ return __gthread_active_latest_value != 0;
+}
+
+#else /* not hppa-hpux */
+
+static inline int
+__gthread_active_p (void)
+{
+ return 1;
+}
+
+#endif /* hppa-hpux */
+
+#endif /* SUPPORTS_WEAK */
+
+#ifdef _LIBOBJC
+
+/* This is the config.h file in libobjc/ */
+#include <config.h>
+
+#ifdef HAVE_SCHED_H
+# include <sched.h>
+#endif
+
+/* Key structure for maintaining thread specific storage */
+static pthread_key_t _objc_thread_storage;
+static pthread_attr_t _objc_thread_attribs;
+
+/* Thread local storage for a single thread */
+static void *thread_local_storage = NULL;
+
+/* Backend initialization functions */
+
+/* Initialize the threads subsystem. */
+static inline int
+__gthread_objc_init_thread_system (void)
+{
+ if (__gthread_active_p ())
+ {
+ /* Initialize the thread storage key. */
+ if (__gthrw_(pthread_key_create) (&_objc_thread_storage, NULL) == 0)
+ {
+ /* The normal default detach state for threads is
+ * PTHREAD_CREATE_JOINABLE which causes threads to not die
+ * when you think they should. */
+ if (__gthrw_(pthread_attr_init) (&_objc_thread_attribs) == 0
+ && __gthrw_(pthread_attr_setdetachstate) (&_objc_thread_attribs,
+ PTHREAD_CREATE_DETACHED) == 0)
+ return 0;
+ }
+ }
+
+ return -1;
+}
+
+/* Close the threads subsystem. */
+static inline int
+__gthread_objc_close_thread_system (void)
+{
+ if (__gthread_active_p ()
+ && __gthrw_(pthread_key_delete) (_objc_thread_storage) == 0
+ && __gthrw_(pthread_attr_destroy) (&_objc_thread_attribs) == 0)
+ return 0;
+
+ return -1;
+}
+
+/* Backend thread functions */
+
+/* Create a new thread of execution. */
+static inline objc_thread_t
+__gthread_objc_thread_detach (void (*func)(void *), void *arg)
+{
+ objc_thread_t thread_id;
+ pthread_t new_thread_handle;
+
+ if (!__gthread_active_p ())
+ return NULL;
+
+ if (!(__gthrw_(pthread_create) (&new_thread_handle, &_objc_thread_attribs,
+ (void *) func, arg)))
+ thread_id = (objc_thread_t) new_thread_handle;
+ else
+ thread_id = NULL;
+
+ return thread_id;
+}
+
+/* Set the current thread's priority. */
+static inline int
+__gthread_objc_thread_set_priority (int priority)
+{
+ if (!__gthread_active_p ())
+ return -1;
+ else
+ {
+#ifdef _POSIX_PRIORITY_SCHEDULING
+#ifdef _POSIX_THREAD_PRIORITY_SCHEDULING
+ pthread_t thread_id = __gthrw_(pthread_self) ();
+ int policy;
+ struct sched_param params;
+ int priority_min, priority_max;
+
+ if (__gthrw_(pthread_getschedparam) (thread_id, &policy, &params) == 0)
+ {
+ if ((priority_max = __gthrw_(sched_get_priority_max) (policy)) == -1)
+ return -1;
+
+ if ((priority_min = __gthrw_(sched_get_priority_min) (policy)) == -1)
+ return -1;
+
+ if (priority > priority_max)
+ priority = priority_max;
+ else if (priority < priority_min)
+ priority = priority_min;
+ params.sched_priority = priority;
+
+ /*
+ * The solaris 7 and several other man pages incorrectly state that
+ * this should be a pointer to policy but pthread.h is universally
+ * at odds with this.
+ */
+ if (__gthrw_(pthread_setschedparam) (thread_id, policy, &params) == 0)
+ return 0;
+ }
+#endif /* _POSIX_THREAD_PRIORITY_SCHEDULING */
+#endif /* _POSIX_PRIORITY_SCHEDULING */
+ return -1;
+ }
+}
+
+/* Return the current thread's priority. */
+static inline int
+__gthread_objc_thread_get_priority (void)
+{
+#ifdef _POSIX_PRIORITY_SCHEDULING
+#ifdef _POSIX_THREAD_PRIORITY_SCHEDULING
+ if (__gthread_active_p ())
+ {
+ int policy;
+ struct sched_param params;
+
+ if (__gthrw_(pthread_getschedparam) (__gthrw_(pthread_self) (), &policy, &params) == 0)
+ return params.sched_priority;
+ else
+ return -1;
+ }
+ else
+#endif /* _POSIX_THREAD_PRIORITY_SCHEDULING */
+#endif /* _POSIX_PRIORITY_SCHEDULING */
+ return OBJC_THREAD_INTERACTIVE_PRIORITY;
+}
+
+/* Yield our process time to another thread. */
+static inline void
+__gthread_objc_thread_yield (void)
+{
+ if (__gthread_active_p ())
+ __gthrw_(sched_yield) ();
+}
+
+/* Terminate the current thread. */
+static inline int
+__gthread_objc_thread_exit (void)
+{
+ if (__gthread_active_p ())
+ /* exit the thread */
+ __gthrw_(pthread_exit) (&__objc_thread_exit_status);
+
+ /* Failed if we reached here */
+ return -1;
+}
+
+/* Returns an integer value which uniquely describes a thread. */
+static inline objc_thread_t
+__gthread_objc_thread_id (void)
+{
+ if (__gthread_active_p ())
+ return (objc_thread_t) __gthrw_(pthread_self) ();
+ else
+ return (objc_thread_t) 1;
+}
+
+/* Sets the thread's local storage pointer. */
+static inline int
+__gthread_objc_thread_set_data (void *value)
+{
+ if (__gthread_active_p ())
+ return __gthrw_(pthread_setspecific) (_objc_thread_storage, value);
+ else
+ {
+ thread_local_storage = value;
+ return 0;
+ }
+}
+
+/* Returns the thread's local storage pointer. */
+static inline void *
+__gthread_objc_thread_get_data (void)
+{
+ if (__gthread_active_p ())
+ return __gthrw_(pthread_getspecific) (_objc_thread_storage);
+ else
+ return thread_local_storage;
+}
+
+/* Backend mutex functions */
+
+/* Allocate a mutex. */
+static inline int
+__gthread_objc_mutex_allocate (objc_mutex_t mutex)
+{
+ if (__gthread_active_p ())
+ {
+ mutex->backend = objc_malloc (sizeof (pthread_mutex_t));
+
+ if (__gthrw_(pthread_mutex_init) ((pthread_mutex_t *) mutex->backend, NULL))
+ {
+ objc_free (mutex->backend);
+ mutex->backend = NULL;
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+/* Deallocate a mutex. */
+static inline int
+__gthread_objc_mutex_deallocate (objc_mutex_t mutex)
+{
+ if (__gthread_active_p ())
+ {
+ int count;
+
+ /*
+ * Posix Threads specifically require that the thread be unlocked
+ * for __gthrw_(pthread_mutex_destroy) to work.
+ */
+
+ do
+ {
+ count = __gthrw_(pthread_mutex_unlock) ((pthread_mutex_t *) mutex->backend);
+ if (count < 0)
+ return -1;
+ }
+ while (count);
+
+ if (__gthrw_(pthread_mutex_destroy) ((pthread_mutex_t *) mutex->backend))
+ return -1;
+
+ objc_free (mutex->backend);
+ mutex->backend = NULL;
+ }
+ return 0;
+}
+
+/* Grab a lock on a mutex. */
+static inline int
+__gthread_objc_mutex_lock (objc_mutex_t mutex)
+{
+ if (__gthread_active_p ()
+ && __gthrw_(pthread_mutex_lock) ((pthread_mutex_t *) mutex->backend) != 0)
+ {
+ return -1;
+ }
+
+ return 0;
+}
+
+/* Try to grab a lock on a mutex. */
+static inline int
+__gthread_objc_mutex_trylock (objc_mutex_t mutex)
+{
+ if (__gthread_active_p ()
+ && __gthrw_(pthread_mutex_trylock) ((pthread_mutex_t *) mutex->backend) != 0)
+ {
+ return -1;
+ }
+
+ return 0;
+}
+
+/* Unlock the mutex */
+static inline int
+__gthread_objc_mutex_unlock (objc_mutex_t mutex)
+{
+ if (__gthread_active_p ()
+ && __gthrw_(pthread_mutex_unlock) ((pthread_mutex_t *) mutex->backend) != 0)
+ {
+ return -1;
+ }
+
+ return 0;
+}
+
+/* Backend condition mutex functions */
+
+/* Allocate a condition. */
+static inline int
+__gthread_objc_condition_allocate (objc_condition_t condition)
+{
+ if (__gthread_active_p ())
+ {
+ condition->backend = objc_malloc (sizeof (pthread_cond_t));
+
+ if (__gthrw_(pthread_cond_init) ((pthread_cond_t *) condition->backend, NULL))
+ {
+ objc_free (condition->backend);
+ condition->backend = NULL;
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+/* Deallocate a condition. */
+static inline int
+__gthread_objc_condition_deallocate (objc_condition_t condition)
+{
+ if (__gthread_active_p ())
+ {
+ if (__gthrw_(pthread_cond_destroy) ((pthread_cond_t *) condition->backend))
+ return -1;
+
+ objc_free (condition->backend);
+ condition->backend = NULL;
+ }
+ return 0;
+}
+
+/* Wait on the condition */
+static inline int
+__gthread_objc_condition_wait (objc_condition_t condition, objc_mutex_t mutex)
+{
+ if (__gthread_active_p ())
+ return __gthrw_(pthread_cond_wait) ((pthread_cond_t *) condition->backend,
+ (pthread_mutex_t *) mutex->backend);
+ else
+ return 0;
+}
+
+/* Wake up all threads waiting on this condition. */
+static inline int
+__gthread_objc_condition_broadcast (objc_condition_t condition)
+{
+ if (__gthread_active_p ())
+ return __gthrw_(pthread_cond_broadcast) ((pthread_cond_t *) condition->backend);
+ else
+ return 0;
+}
+
+/* Wake up one thread waiting on this condition. */
+static inline int
+__gthread_objc_condition_signal (objc_condition_t condition)
+{
+ if (__gthread_active_p ())
+ return __gthrw_(pthread_cond_signal) ((pthread_cond_t *) condition->backend);
+ else
+ return 0;
+}
+
+#else /* _LIBOBJC */
+
+static inline int
+__gthread_create (__gthread_t *__threadid, void *(*__func) (void*),
+ void *__args)
+{
+ return __gthrw_(pthread_create) (__threadid, NULL, __func, __args);
+}
+
+static inline int
+__gthread_join (__gthread_t __threadid, void **__value_ptr)
+{
+ return __gthrw_(pthread_join) (__threadid, __value_ptr);
+}
+
+static inline int
+__gthread_detach (__gthread_t __threadid)
+{
+ return __gthrw_(pthread_detach) (__threadid);
+}
+
+static inline int
+__gthread_equal (__gthread_t __t1, __gthread_t __t2)
+{
+ return __gthrw_(pthread_equal) (__t1, __t2);
+}
+
+static inline __gthread_t
+__gthread_self (void)
+{
+ return __gthrw_(pthread_self) ();
+}
+
+static inline int
+__gthread_yield (void)
+{
+ return __gthrw_(sched_yield) ();
+}
+
+static inline int
+__gthread_once (__gthread_once_t *__once, void (*__func) (void))
+{
+ if (__gthread_active_p ())
+ return __gthrw_(pthread_once) (__once, __func);
+ else
+ return -1;
+}
+
+static inline int
+__gthread_key_create (__gthread_key_t *__key, void (*__dtor) (void *))
+{
+ return __gthrw_(pthread_key_create) (__key, __dtor);
+}
+
+static inline int
+__gthread_key_delete (__gthread_key_t __key)
+{
+ return __gthrw_(pthread_key_delete) (__key);
+}
+
+static inline void *
+__gthread_getspecific (__gthread_key_t __key)
+{
+ return __gthrw_(pthread_getspecific) (__key);
+}
+
+static inline int
+__gthread_setspecific (__gthread_key_t __key, const void *__ptr)
+{
+ return __gthrw_(pthread_setspecific) (__key, __ptr);
+}
+
+static inline int
+__gthread_mutex_destroy (__gthread_mutex_t *__mutex)
+{
+ if (__gthread_active_p ())
+ return __gthrw_(pthread_mutex_destroy) (__mutex);
+ else
+ return 0;
+}
+
+static inline int
+__gthread_mutex_lock (__gthread_mutex_t *__mutex)
+{
+ if (__gthread_active_p ())
+ return __gthrw_(pthread_mutex_lock) (__mutex);
+ else
+ return 0;
+}
+
+static inline int
+__gthread_mutex_trylock (__gthread_mutex_t *__mutex)
+{
+ if (__gthread_active_p ())
+ return __gthrw_(pthread_mutex_trylock) (__mutex);
+ else
+ return 0;
+}
+
+#if _GTHREAD_USE_MUTEX_TIMEDLOCK
+static inline int
+__gthread_mutex_timedlock (__gthread_mutex_t *__mutex,
+ const __gthread_time_t *__abs_timeout)
+{
+ if (__gthread_active_p ())
+ return __gthrw_(pthread_mutex_timedlock) (__mutex, __abs_timeout);
+ else
+ return 0;
+}
+#endif
+
+static inline int
+__gthread_mutex_unlock (__gthread_mutex_t *__mutex)
+{
+ if (__gthread_active_p ())
+ return __gthrw_(pthread_mutex_unlock) (__mutex);
+ else
+ return 0;
+}
+
+#ifndef PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP
+static inline int
+__gthread_recursive_mutex_init_function (__gthread_recursive_mutex_t *__mutex)
+{
+ if (__gthread_active_p ())
+ {
+ pthread_mutexattr_t __attr;
+ int __r;
+
+ __r = __gthrw_(pthread_mutexattr_init) (&__attr);
+ if (!__r)
+ __r = __gthrw_(pthread_mutexattr_settype) (&__attr,
+ PTHREAD_MUTEX_RECURSIVE);
+ if (!__r)
+ __r = __gthrw_(pthread_mutex_init) (__mutex, &__attr);
+ if (!__r)
+ __r = __gthrw_(pthread_mutexattr_destroy) (&__attr);
+ return __r;
+ }
+ return 0;
+}
+#endif
+
+static inline int
+__gthread_recursive_mutex_lock (__gthread_recursive_mutex_t *__mutex)
+{
+ return __gthread_mutex_lock (__mutex);
+}
+
+static inline int
+__gthread_recursive_mutex_trylock (__gthread_recursive_mutex_t *__mutex)
+{
+ return __gthread_mutex_trylock (__mutex);
+}
+
+#if _GTHREAD_USE_MUTEX_TIMEDLOCK
+static inline int
+__gthread_recursive_mutex_timedlock (__gthread_recursive_mutex_t *__mutex,
+ const __gthread_time_t *__abs_timeout)
+{
+ return __gthread_mutex_timedlock (__mutex, __abs_timeout);
+}
+#endif
+
+static inline int
+__gthread_recursive_mutex_unlock (__gthread_recursive_mutex_t *__mutex)
+{
+ return __gthread_mutex_unlock (__mutex);
+}
+
+static inline int
+__gthread_cond_broadcast (__gthread_cond_t *__cond)
+{
+ return __gthrw_(pthread_cond_broadcast) (__cond);
+}
+
+static inline int
+__gthread_cond_signal (__gthread_cond_t *__cond)
+{
+ return __gthrw_(pthread_cond_signal) (__cond);
+}
+
+static inline int
+__gthread_cond_wait (__gthread_cond_t *__cond, __gthread_mutex_t *__mutex)
+{
+ return __gthrw_(pthread_cond_wait) (__cond, __mutex);
+}
+
+static inline int
+__gthread_cond_timedwait (__gthread_cond_t *__cond, __gthread_mutex_t *__mutex,
+ const __gthread_time_t *__abs_timeout)
+{
+ return __gthrw_(pthread_cond_timedwait) (__cond, __mutex, __abs_timeout);
+}
+
+static inline int
+__gthread_cond_wait_recursive (__gthread_cond_t *__cond,
+ __gthread_recursive_mutex_t *__mutex)
+{
+ return __gthread_cond_wait (__cond, __mutex);
+}
+
+static inline int
+__gthread_cond_timedwait_recursive (__gthread_cond_t *__cond,
+ __gthread_recursive_mutex_t *__mutex,
+ const __gthread_time_t *__abs_timeout)
+{
+ return __gthread_cond_timedwait (__cond, __mutex, __abs_timeout);
+}
+
+static inline int
+__gthread_cond_destroy (__gthread_cond_t* __cond)
+{
+ return __gthrw_(pthread_cond_destroy) (__cond);
+}
+
+#endif /* _LIBOBJC */
+
+#endif /* ! GCC_GTHR_POSIX_H */
diff --git a/libgcc/gthr-single.h b/libgcc/gthr-single.h
new file mode 100644
index 00000000000..357528ad1f1
--- /dev/null
+++ b/libgcc/gthr-single.h
@@ -0,0 +1,292 @@
+/* Threads compatibility routines for libgcc2 and libobjc. */
+/* Compile this one with gcc. */
+/* Copyright (C) 1997, 1999, 2000, 2004, 2008, 2009
+ Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_GTHR_SINGLE_H
+#define GCC_GTHR_SINGLE_H
+
+/* Just provide compatibility for mutex handling. */
+
+typedef int __gthread_key_t;
+typedef int __gthread_once_t;
+typedef int __gthread_mutex_t;
+typedef int __gthread_recursive_mutex_t;
+
+#define __GTHREAD_ONCE_INIT 0
+#define __GTHREAD_MUTEX_INIT 0
+#define __GTHREAD_RECURSIVE_MUTEX_INIT 0
+
+#define UNUSED __attribute__((unused))
+
+#ifdef _LIBOBJC
+
+/* Thread local storage for a single thread */
+static void *thread_local_storage = NULL;
+
+/* Backend initialization functions */
+
+/* Initialize the threads subsystem. */
+static inline int
+__gthread_objc_init_thread_system (void)
+{
+ /* No thread support available */
+ return -1;
+}
+
+/* Close the threads subsystem. */
+static inline int
+__gthread_objc_close_thread_system (void)
+{
+ /* No thread support available */
+ return -1;
+}
+
+/* Backend thread functions */
+
+/* Create a new thread of execution. */
+static inline objc_thread_t
+__gthread_objc_thread_detach (void (* func)(void *), void * arg UNUSED)
+{
+ /* No thread support available */
+ return NULL;
+}
+
+/* Set the current thread's priority. */
+static inline int
+__gthread_objc_thread_set_priority (int priority UNUSED)
+{
+ /* No thread support available */
+ return -1;
+}
+
+/* Return the current thread's priority. */
+static inline int
+__gthread_objc_thread_get_priority (void)
+{
+ return OBJC_THREAD_INTERACTIVE_PRIORITY;
+}
+
+/* Yield our process time to another thread. */
+static inline void
+__gthread_objc_thread_yield (void)
+{
+ return;
+}
+
+/* Terminate the current thread. */
+static inline int
+__gthread_objc_thread_exit (void)
+{
+ /* No thread support available */
+ /* Should we really exit the program */
+ /* exit (&__objc_thread_exit_status); */
+ return -1;
+}
+
+/* Returns an integer value which uniquely describes a thread. */
+static inline objc_thread_t
+__gthread_objc_thread_id (void)
+{
+ /* No thread support, use 1. */
+ return (objc_thread_t) 1;
+}
+
+/* Sets the thread's local storage pointer. */
+static inline int
+__gthread_objc_thread_set_data (void *value)
+{
+ thread_local_storage = value;
+ return 0;
+}
+
+/* Returns the thread's local storage pointer. */
+static inline void *
+__gthread_objc_thread_get_data (void)
+{
+ return thread_local_storage;
+}
+
+/* Backend mutex functions */
+
+/* Allocate a mutex. */
+static inline int
+__gthread_objc_mutex_allocate (objc_mutex_t mutex UNUSED)
+{
+ return 0;
+}
+
+/* Deallocate a mutex. */
+static inline int
+__gthread_objc_mutex_deallocate (objc_mutex_t mutex UNUSED)
+{
+ return 0;
+}
+
+/* Grab a lock on a mutex. */
+static inline int
+__gthread_objc_mutex_lock (objc_mutex_t mutex UNUSED)
+{
+ /* There can only be one thread, so we always get the lock */
+ return 0;
+}
+
+/* Try to grab a lock on a mutex. */
+static inline int
+__gthread_objc_mutex_trylock (objc_mutex_t mutex UNUSED)
+{
+ /* There can only be one thread, so we always get the lock */
+ return 0;
+}
+
+/* Unlock the mutex */
+static inline int
+__gthread_objc_mutex_unlock (objc_mutex_t mutex UNUSED)
+{
+ return 0;
+}
+
+/* Backend condition mutex functions */
+
+/* Allocate a condition. */
+static inline int
+__gthread_objc_condition_allocate (objc_condition_t condition UNUSED)
+{
+ return 0;
+}
+
+/* Deallocate a condition. */
+static inline int
+__gthread_objc_condition_deallocate (objc_condition_t condition UNUSED)
+{
+ return 0;
+}
+
+/* Wait on the condition */
+static inline int
+__gthread_objc_condition_wait (objc_condition_t condition UNUSED,
+ objc_mutex_t mutex UNUSED)
+{
+ return 0;
+}
+
+/* Wake up all threads waiting on this condition. */
+static inline int
+__gthread_objc_condition_broadcast (objc_condition_t condition UNUSED)
+{
+ return 0;
+}
+
+/* Wake up one thread waiting on this condition. */
+static inline int
+__gthread_objc_condition_signal (objc_condition_t condition UNUSED)
+{
+ return 0;
+}
+
+#else /* _LIBOBJC */
+
+static inline int
+__gthread_active_p (void)
+{
+ return 0;
+}
+
+static inline int
+__gthread_once (__gthread_once_t *__once UNUSED, void (*__func) (void) UNUSED)
+{
+ return 0;
+}
+
+static inline int UNUSED
+__gthread_key_create (__gthread_key_t *__key UNUSED, void (*__func) (void *) UNUSED)
+{
+ return 0;
+}
+
+static int UNUSED
+__gthread_key_delete (__gthread_key_t __key UNUSED)
+{
+ return 0;
+}
+
+static inline void *
+__gthread_getspecific (__gthread_key_t __key UNUSED)
+{
+ return 0;
+}
+
+static inline int
+__gthread_setspecific (__gthread_key_t __key UNUSED, const void *__v UNUSED)
+{
+ return 0;
+}
+
+static inline int
+__gthread_mutex_destroy (__gthread_mutex_t *__mutex UNUSED)
+{
+ return 0;
+}
+
+static inline int
+__gthread_mutex_lock (__gthread_mutex_t *__mutex UNUSED)
+{
+ return 0;
+}
+
+static inline int
+__gthread_mutex_trylock (__gthread_mutex_t *__mutex UNUSED)
+{
+ return 0;
+}
+
+static inline int
+__gthread_mutex_unlock (__gthread_mutex_t *__mutex UNUSED)
+{
+ return 0;
+}
+
+static inline int
+__gthread_recursive_mutex_lock (__gthread_recursive_mutex_t *__mutex)
+{
+ return __gthread_mutex_lock (__mutex);
+}
+
+static inline int
+__gthread_recursive_mutex_trylock (__gthread_recursive_mutex_t *__mutex)
+{
+ return __gthread_mutex_trylock (__mutex);
+}
+
+static inline int
+__gthread_recursive_mutex_unlock (__gthread_recursive_mutex_t *__mutex)
+{
+ return __gthread_mutex_unlock (__mutex);
+}
+
+#endif /* _LIBOBJC */
+
+#undef UNUSED
+
+#endif /* ! GCC_GTHR_SINGLE_H */
diff --git a/libgcc/gthr.h b/libgcc/gthr.h
new file mode 100644
index 00000000000..607c26eb563
--- /dev/null
+++ b/libgcc/gthr.h
@@ -0,0 +1,148 @@
+/* Threads compatibility routines for libgcc2. */
+/* Compile this one with gcc. */
+/* Copyright (C) 1997, 1998, 2004, 2008, 2009, 2011
+ Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_GTHR_H
+#define GCC_GTHR_H
+
+#ifndef HIDE_EXPORTS
+#pragma GCC visibility push(default)
+#endif
+
+/* If this file is compiled with threads support, it must
+ #define __GTHREADS 1
+ to indicate that threads support is present. Also it has define
+ function
+ int __gthread_active_p ()
+ that returns 1 if thread system is active, 0 if not.
+
+ The threads interface must define the following types:
+ __gthread_key_t
+ __gthread_once_t
+ __gthread_mutex_t
+ __gthread_recursive_mutex_t
+
+ The threads interface must define the following macros:
+
+ __GTHREAD_ONCE_INIT
+ to initialize __gthread_once_t
+ __GTHREAD_MUTEX_INIT
+ to initialize __gthread_mutex_t to get a fast
+ non-recursive mutex.
+ __GTHREAD_MUTEX_INIT_FUNCTION
+ some systems can't initialize a mutex without a
+ function call. On such systems, define this to a
+ function which looks like this:
+ void __GTHREAD_MUTEX_INIT_FUNCTION (__gthread_mutex_t *)
+ Don't define __GTHREAD_MUTEX_INIT in this case
+ __GTHREAD_RECURSIVE_MUTEX_INIT
+ __GTHREAD_RECURSIVE_MUTEX_INIT_FUNCTION
+ as above, but for a recursive mutex.
+
+ The threads interface must define the following static functions:
+
+ int __gthread_once (__gthread_once_t *once, void (*func) ())
+
+ int __gthread_key_create (__gthread_key_t *keyp, void (*dtor) (void *))
+ int __gthread_key_delete (__gthread_key_t key)
+
+ void *__gthread_getspecific (__gthread_key_t key)
+ int __gthread_setspecific (__gthread_key_t key, const void *ptr)
+
+ int __gthread_mutex_destroy (__gthread_mutex_t *mutex);
+
+ int __gthread_mutex_lock (__gthread_mutex_t *mutex);
+ int __gthread_mutex_trylock (__gthread_mutex_t *mutex);
+ int __gthread_mutex_unlock (__gthread_mutex_t *mutex);
+
+ int __gthread_recursive_mutex_lock (__gthread_recursive_mutex_t *mutex);
+ int __gthread_recursive_mutex_trylock (__gthread_recursive_mutex_t *mutex);
+ int __gthread_recursive_mutex_unlock (__gthread_recursive_mutex_t *mutex);
+
+ The following are supported in POSIX threads only. They are required to
+ fix a deadlock in static initialization inside libsupc++. The header file
+ gthr-posix.h defines a symbol __GTHREAD_HAS_COND to signify that these extra
+ features are supported.
+
+ Types:
+ __gthread_cond_t
+
+ Macros:
+ __GTHREAD_COND_INIT
+ __GTHREAD_COND_INIT_FUNCTION
+
+ Interface:
+ int __gthread_cond_broadcast (__gthread_cond_t *cond);
+ int __gthread_cond_wait (__gthread_cond_t *cond, __gthread_mutex_t *mutex);
+ int __gthread_cond_wait_recursive (__gthread_cond_t *cond,
+ __gthread_recursive_mutex_t *mutex);
+
+ All functions returning int should return zero on success or the error
+ number. If the operation is not supported, -1 is returned.
+
+ If the following are also defined, you should
+ #define __GTHREADS_CXX0X 1
+ to enable the c++0x thread library.
+
+ Types:
+ __gthread_t
+ __gthread_time_t
+
+ Interface:
+ int __gthread_create (__gthread_t *thread, void *(*func) (void*),
+ void *args);
+ int __gthread_join (__gthread_t thread, void **value_ptr);
+ int __gthread_detach (__gthread_t thread);
+ int __gthread_equal (__gthread_t t1, __gthread_t t2);
+ __gthread_t __gthread_self (void);
+ int __gthread_yield (void);
+
+ int __gthread_mutex_timedlock (__gthread_mutex_t *m,
+ const __gthread_time_t *abs_timeout);
+ int __gthread_recursive_mutex_timedlock (__gthread_recursive_mutex_t *m,
+ const __gthread_time_t *abs_time);
+
+ int __gthread_cond_signal (__gthread_cond_t *cond);
+ int __gthread_cond_timedwait (__gthread_cond_t *cond,
+ __gthread_mutex_t *mutex,
+ const __gthread_time_t *abs_timeout);
+ int __gthread_cond_timedwait_recursive (__gthread_cond_t *cond,
+ __gthread_recursive_mutex_t *mutex,
+ const __gthread_time_t *abs_time)
+
+*/
+
+#if SUPPORTS_WEAK
+#ifndef GTHREAD_USE_WEAK
+#define GTHREAD_USE_WEAK 1
+#endif
+#endif
+#include "gthr-default.h"
+
+#ifndef HIDE_EXPORTS
+#pragma GCC visibility pop
+#endif
+
+#endif /* ! GCC_GTHR_H */
diff --git a/libgcc/libgcc2.c b/libgcc/libgcc2.c
new file mode 100644
index 00000000000..b672bcbdd41
--- /dev/null
+++ b/libgcc/libgcc2.c
@@ -0,0 +1,2253 @@
+/* More subroutines needed by GCC output code on some machines. */
+/* Compile this one with gcc. */
+/* Copyright (C) 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
+ 2000, 2001, 2002, 2003, 2004, 2005, 2007, 2008, 2009, 2010, 2011
+ Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+#include "tconfig.h"
+#include "tsystem.h"
+#include "coretypes.h"
+#include "tm.h"
+#include "libgcc_tm.h"
+
+#ifdef HAVE_GAS_HIDDEN
+#define ATTRIBUTE_HIDDEN __attribute__ ((__visibility__ ("hidden")))
+#else
+#define ATTRIBUTE_HIDDEN
+#endif
+
+/* Work out the largest "word" size that we can deal with on this target. */
+#if MIN_UNITS_PER_WORD > 4
+# define LIBGCC2_MAX_UNITS_PER_WORD 8
+#elif (MIN_UNITS_PER_WORD > 2 \
+ || (MIN_UNITS_PER_WORD > 1 && __SIZEOF_LONG_LONG__ > 4))
+# define LIBGCC2_MAX_UNITS_PER_WORD 4
+#else
+# define LIBGCC2_MAX_UNITS_PER_WORD MIN_UNITS_PER_WORD
+#endif
+
+/* Work out what word size we are using for this compilation.
+ The value can be set on the command line. */
+#ifndef LIBGCC2_UNITS_PER_WORD
+#define LIBGCC2_UNITS_PER_WORD LIBGCC2_MAX_UNITS_PER_WORD
+#endif
+
+#if LIBGCC2_UNITS_PER_WORD <= LIBGCC2_MAX_UNITS_PER_WORD
+
+#include "libgcc2.h"
+
+#ifdef DECLARE_LIBRARY_RENAMES
+ DECLARE_LIBRARY_RENAMES
+#endif
+
+#if defined (L_negdi2)
+DWtype
+__negdi2 (DWtype u)
+{
+ const DWunion uu = {.ll = u};
+ const DWunion w = { {.low = -uu.s.low,
+ .high = -uu.s.high - ((UWtype) -uu.s.low > 0) } };
+
+ return w.ll;
+}
+#endif
+
+#ifdef L_addvsi3
+Wtype
+__addvSI3 (Wtype a, Wtype b)
+{
+ const Wtype w = (UWtype) a + (UWtype) b;
+
+ if (b >= 0 ? w < a : w > a)
+ abort ();
+
+ return w;
+}
+#ifdef COMPAT_SIMODE_TRAPPING_ARITHMETIC
+SItype
+__addvsi3 (SItype a, SItype b)
+{
+ const SItype w = (USItype) a + (USItype) b;
+
+ if (b >= 0 ? w < a : w > a)
+ abort ();
+
+ return w;
+}
+#endif /* COMPAT_SIMODE_TRAPPING_ARITHMETIC */
+#endif
+
+#ifdef L_addvdi3
+DWtype
+__addvDI3 (DWtype a, DWtype b)
+{
+ const DWtype w = (UDWtype) a + (UDWtype) b;
+
+ if (b >= 0 ? w < a : w > a)
+ abort ();
+
+ return w;
+}
+#endif
+
+#ifdef L_subvsi3
+Wtype
+__subvSI3 (Wtype a, Wtype b)
+{
+ const Wtype w = (UWtype) a - (UWtype) b;
+
+ if (b >= 0 ? w > a : w < a)
+ abort ();
+
+ return w;
+}
+#ifdef COMPAT_SIMODE_TRAPPING_ARITHMETIC
+SItype
+__subvsi3 (SItype a, SItype b)
+{
+ const SItype w = (USItype) a - (USItype) b;
+
+ if (b >= 0 ? w > a : w < a)
+ abort ();
+
+ return w;
+}
+#endif /* COMPAT_SIMODE_TRAPPING_ARITHMETIC */
+#endif
+
+#ifdef L_subvdi3
+DWtype
+__subvDI3 (DWtype a, DWtype b)
+{
+ const DWtype w = (UDWtype) a - (UDWtype) b;
+
+ if (b >= 0 ? w > a : w < a)
+ abort ();
+
+ return w;
+}
+#endif
+
+#ifdef L_mulvsi3
+Wtype
+__mulvSI3 (Wtype a, Wtype b)
+{
+ const DWtype w = (DWtype) a * (DWtype) b;
+
+ if ((Wtype) (w >> W_TYPE_SIZE) != (Wtype) w >> (W_TYPE_SIZE - 1))
+ abort ();
+
+ return w;
+}
+#ifdef COMPAT_SIMODE_TRAPPING_ARITHMETIC
+#undef WORD_SIZE
+#define WORD_SIZE (sizeof (SItype) * BITS_PER_UNIT)
+SItype
+__mulvsi3 (SItype a, SItype b)
+{
+ const DItype w = (DItype) a * (DItype) b;
+
+ if ((SItype) (w >> WORD_SIZE) != (SItype) w >> (WORD_SIZE-1))
+ abort ();
+
+ return w;
+}
+#endif /* COMPAT_SIMODE_TRAPPING_ARITHMETIC */
+#endif
+
+#ifdef L_negvsi2
+Wtype
+__negvSI2 (Wtype a)
+{
+ const Wtype w = -(UWtype) a;
+
+ if (a >= 0 ? w > 0 : w < 0)
+ abort ();
+
+ return w;
+}
+#ifdef COMPAT_SIMODE_TRAPPING_ARITHMETIC
+SItype
+__negvsi2 (SItype a)
+{
+ const SItype w = -(USItype) a;
+
+ if (a >= 0 ? w > 0 : w < 0)
+ abort ();
+
+ return w;
+}
+#endif /* COMPAT_SIMODE_TRAPPING_ARITHMETIC */
+#endif
+
+#ifdef L_negvdi2
+DWtype
+__negvDI2 (DWtype a)
+{
+ const DWtype w = -(UDWtype) a;
+
+ if (a >= 0 ? w > 0 : w < 0)
+ abort ();
+
+ return w;
+}
+#endif
+
+#ifdef L_absvsi2
+Wtype
+__absvSI2 (Wtype a)
+{
+ Wtype w = a;
+
+ if (a < 0)
+#ifdef L_negvsi2
+ w = __negvSI2 (a);
+#else
+ w = -(UWtype) a;
+
+ if (w < 0)
+ abort ();
+#endif
+
+ return w;
+}
+#ifdef COMPAT_SIMODE_TRAPPING_ARITHMETIC
+SItype
+__absvsi2 (SItype a)
+{
+ SItype w = a;
+
+ if (a < 0)
+#ifdef L_negvsi2
+ w = __negvsi2 (a);
+#else
+ w = -(USItype) a;
+
+ if (w < 0)
+ abort ();
+#endif
+
+ return w;
+}
+#endif /* COMPAT_SIMODE_TRAPPING_ARITHMETIC */
+#endif
+
+#ifdef L_absvdi2
+DWtype
+__absvDI2 (DWtype a)
+{
+ DWtype w = a;
+
+ if (a < 0)
+#ifdef L_negvdi2
+ w = __negvDI2 (a);
+#else
+ w = -(UDWtype) a;
+
+ if (w < 0)
+ abort ();
+#endif
+
+ return w;
+}
+#endif
+
+#ifdef L_mulvdi3
+DWtype
+__mulvDI3 (DWtype u, DWtype v)
+{
+ /* The unchecked multiplication needs 3 Wtype x Wtype multiplications,
+ but the checked multiplication needs only two. */
+ const DWunion uu = {.ll = u};
+ const DWunion vv = {.ll = v};
+
+ if (__builtin_expect (uu.s.high == uu.s.low >> (W_TYPE_SIZE - 1), 1))
+ {
+ /* u fits in a single Wtype. */
+ if (__builtin_expect (vv.s.high == vv.s.low >> (W_TYPE_SIZE - 1), 1))
+ {
+ /* v fits in a single Wtype as well. */
+ /* A single multiplication. No overflow risk. */
+ return (DWtype) uu.s.low * (DWtype) vv.s.low;
+ }
+ else
+ {
+ /* Two multiplications. */
+ DWunion w0 = {.ll = (UDWtype) (UWtype) uu.s.low
+ * (UDWtype) (UWtype) vv.s.low};
+ DWunion w1 = {.ll = (UDWtype) (UWtype) uu.s.low
+ * (UDWtype) (UWtype) vv.s.high};
+
+ if (vv.s.high < 0)
+ w1.s.high -= uu.s.low;
+ if (uu.s.low < 0)
+ w1.ll -= vv.ll;
+ w1.ll += (UWtype) w0.s.high;
+ if (__builtin_expect (w1.s.high == w1.s.low >> (W_TYPE_SIZE - 1), 1))
+ {
+ w0.s.high = w1.s.low;
+ return w0.ll;
+ }
+ }
+ }
+ else
+ {
+ if (__builtin_expect (vv.s.high == vv.s.low >> (W_TYPE_SIZE - 1), 1))
+ {
+ /* v fits into a single Wtype. */
+ /* Two multiplications. */
+ DWunion w0 = {.ll = (UDWtype) (UWtype) uu.s.low
+ * (UDWtype) (UWtype) vv.s.low};
+ DWunion w1 = {.ll = (UDWtype) (UWtype) uu.s.high
+ * (UDWtype) (UWtype) vv.s.low};
+
+ if (uu.s.high < 0)
+ w1.s.high -= vv.s.low;
+ if (vv.s.low < 0)
+ w1.ll -= uu.ll;
+ w1.ll += (UWtype) w0.s.high;
+ if (__builtin_expect (w1.s.high == w1.s.low >> (W_TYPE_SIZE - 1), 1))
+ {
+ w0.s.high = w1.s.low;
+ return w0.ll;
+ }
+ }
+ else
+ {
+ /* A few sign checks and a single multiplication. */
+ if (uu.s.high >= 0)
+ {
+ if (vv.s.high >= 0)
+ {
+ if (uu.s.high == 0 && vv.s.high == 0)
+ {
+ const DWtype w = (UDWtype) (UWtype) uu.s.low
+ * (UDWtype) (UWtype) vv.s.low;
+ if (__builtin_expect (w >= 0, 1))
+ return w;
+ }
+ }
+ else
+ {
+ if (uu.s.high == 0 && vv.s.high == (Wtype) -1)
+ {
+ DWunion ww = {.ll = (UDWtype) (UWtype) uu.s.low
+ * (UDWtype) (UWtype) vv.s.low};
+
+ ww.s.high -= uu.s.low;
+ if (__builtin_expect (ww.s.high < 0, 1))
+ return ww.ll;
+ }
+ }
+ }
+ else
+ {
+ if (vv.s.high >= 0)
+ {
+ if (uu.s.high == (Wtype) -1 && vv.s.high == 0)
+ {
+ DWunion ww = {.ll = (UDWtype) (UWtype) uu.s.low
+ * (UDWtype) (UWtype) vv.s.low};
+
+ ww.s.high -= vv.s.low;
+ if (__builtin_expect (ww.s.high < 0, 1))
+ return ww.ll;
+ }
+ }
+ else
+ {
+ if (uu.s.high == (Wtype) -1 && vv.s.high == (Wtype) - 1)
+ {
+ DWunion ww = {.ll = (UDWtype) (UWtype) uu.s.low
+ * (UDWtype) (UWtype) vv.s.low};
+
+ ww.s.high -= uu.s.low;
+ ww.s.high -= vv.s.low;
+ if (__builtin_expect (ww.s.high >= 0, 1))
+ return ww.ll;
+ }
+ }
+ }
+ }
+ }
+
+ /* Overflow. */
+ abort ();
+}
+#endif
+
+
+/* Unless shift functions are defined with full ANSI prototypes,
+ parameter b will be promoted to int if shift_count_type is smaller than an int. */
+#ifdef L_lshrdi3
+DWtype
+__lshrdi3 (DWtype u, shift_count_type b)
+{
+ if (b == 0)
+ return u;
+
+ const DWunion uu = {.ll = u};
+ const shift_count_type bm = W_TYPE_SIZE - b;
+ DWunion w;
+
+ if (bm <= 0)
+ {
+ w.s.high = 0;
+ w.s.low = (UWtype) uu.s.high >> -bm;
+ }
+ else
+ {
+ const UWtype carries = (UWtype) uu.s.high << bm;
+
+ w.s.high = (UWtype) uu.s.high >> b;
+ w.s.low = ((UWtype) uu.s.low >> b) | carries;
+ }
+
+ return w.ll;
+}
+#endif
+
+#ifdef L_ashldi3
+DWtype
+__ashldi3 (DWtype u, shift_count_type b)
+{
+ if (b == 0)
+ return u;
+
+ const DWunion uu = {.ll = u};
+ const shift_count_type bm = W_TYPE_SIZE - b;
+ DWunion w;
+
+ if (bm <= 0)
+ {
+ w.s.low = 0;
+ w.s.high = (UWtype) uu.s.low << -bm;
+ }
+ else
+ {
+ const UWtype carries = (UWtype) uu.s.low >> bm;
+
+ w.s.low = (UWtype) uu.s.low << b;
+ w.s.high = ((UWtype) uu.s.high << b) | carries;
+ }
+
+ return w.ll;
+}
+#endif
+
+#ifdef L_ashrdi3
+DWtype
+__ashrdi3 (DWtype u, shift_count_type b)
+{
+ if (b == 0)
+ return u;
+
+ const DWunion uu = {.ll = u};
+ const shift_count_type bm = W_TYPE_SIZE - b;
+ DWunion w;
+
+ if (bm <= 0)
+ {
+ /* w.s.high = 1..1 or 0..0 */
+ w.s.high = uu.s.high >> (W_TYPE_SIZE - 1);
+ w.s.low = uu.s.high >> -bm;
+ }
+ else
+ {
+ const UWtype carries = (UWtype) uu.s.high << bm;
+
+ w.s.high = uu.s.high >> b;
+ w.s.low = ((UWtype) uu.s.low >> b) | carries;
+ }
+
+ return w.ll;
+}
+#endif
+
+#ifdef L_bswapsi2
+SItype
+__bswapsi2 (SItype u)
+{
+ return ((((u) & 0xff000000) >> 24)
+ | (((u) & 0x00ff0000) >> 8)
+ | (((u) & 0x0000ff00) << 8)
+ | (((u) & 0x000000ff) << 24));
+}
+#endif
+#ifdef L_bswapdi2
+DItype
+__bswapdi2 (DItype u)
+{
+ return ((((u) & 0xff00000000000000ull) >> 56)
+ | (((u) & 0x00ff000000000000ull) >> 40)
+ | (((u) & 0x0000ff0000000000ull) >> 24)
+ | (((u) & 0x000000ff00000000ull) >> 8)
+ | (((u) & 0x00000000ff000000ull) << 8)
+ | (((u) & 0x0000000000ff0000ull) << 24)
+ | (((u) & 0x000000000000ff00ull) << 40)
+ | (((u) & 0x00000000000000ffull) << 56));
+}
+#endif
+#ifdef L_ffssi2
+#undef int
+int
+__ffsSI2 (UWtype u)
+{
+ UWtype count;
+
+ if (u == 0)
+ return 0;
+
+ count_trailing_zeros (count, u);
+ return count + 1;
+}
+#endif
+
+#ifdef L_ffsdi2
+#undef int
+int
+__ffsDI2 (DWtype u)
+{
+ const DWunion uu = {.ll = u};
+ UWtype word, count, add;
+
+ if (uu.s.low != 0)
+ word = uu.s.low, add = 0;
+ else if (uu.s.high != 0)
+ word = uu.s.high, add = W_TYPE_SIZE;
+ else
+ return 0;
+
+ count_trailing_zeros (count, word);
+ return count + add + 1;
+}
+#endif
+
+#ifdef L_muldi3
+DWtype
+__muldi3 (DWtype u, DWtype v)
+{
+ const DWunion uu = {.ll = u};
+ const DWunion vv = {.ll = v};
+ DWunion w = {.ll = __umulsidi3 (uu.s.low, vv.s.low)};
+
+ w.s.high += ((UWtype) uu.s.low * (UWtype) vv.s.high
+ + (UWtype) uu.s.high * (UWtype) vv.s.low);
+
+ return w.ll;
+}
+#endif
+
+#if (defined (L_udivdi3) || defined (L_divdi3) || \
+ defined (L_umoddi3) || defined (L_moddi3))
+#if defined (sdiv_qrnnd)
+#define L_udiv_w_sdiv
+#endif
+#endif
+
+#ifdef L_udiv_w_sdiv
+#if defined (sdiv_qrnnd)
+#if (defined (L_udivdi3) || defined (L_divdi3) || \
+ defined (L_umoddi3) || defined (L_moddi3))
+static inline __attribute__ ((__always_inline__))
+#endif
+UWtype
+__udiv_w_sdiv (UWtype *rp, UWtype a1, UWtype a0, UWtype d)
+{
+ UWtype q, r;
+ UWtype c0, c1, b1;
+
+ if ((Wtype) d >= 0)
+ {
+ if (a1 < d - a1 - (a0 >> (W_TYPE_SIZE - 1)))
+ {
+ /* Dividend, divisor, and quotient are nonnegative. */
+ sdiv_qrnnd (q, r, a1, a0, d);
+ }
+ else
+ {
+ /* Compute c1*2^32 + c0 = a1*2^32 + a0 - 2^31*d. */
+ sub_ddmmss (c1, c0, a1, a0, d >> 1, d << (W_TYPE_SIZE - 1));
+ /* Divide (c1*2^32 + c0) by d. */
+ sdiv_qrnnd (q, r, c1, c0, d);
+ /* Add 2^31 to quotient. */
+ q += (UWtype) 1 << (W_TYPE_SIZE - 1);
+ }
+ }
+ else
+ {
+ b1 = d >> 1; /* d/2, between 2^30 and 2^31 - 1 */
+ c1 = a1 >> 1; /* A/2 */
+ c0 = (a1 << (W_TYPE_SIZE - 1)) + (a0 >> 1);
+
+ if (a1 < b1) /* A < 2^32*b1, so A/2 < 2^31*b1 */
+ {
+ sdiv_qrnnd (q, r, c1, c0, b1); /* (A/2) / (d/2) */
+
+ r = 2*r + (a0 & 1); /* Remainder from A/(2*b1) */
+ if ((d & 1) != 0)
+ {
+ if (r >= q)
+ r = r - q;
+ else if (q - r <= d)
+ {
+ r = r - q + d;
+ q--;
+ }
+ else
+ {
+ r = r - q + 2*d;
+ q -= 2;
+ }
+ }
+ }
+ else if (c1 < b1) /* So 2^31 <= (A/2)/b1 < 2^32 */
+ {
+ c1 = (b1 - 1) - c1;
+ c0 = ~c0; /* logical NOT */
+
+ sdiv_qrnnd (q, r, c1, c0, b1); /* (A/2) / (d/2) */
+
+ q = ~q; /* (A/2)/b1 */
+ r = (b1 - 1) - r;
+
+ r = 2*r + (a0 & 1); /* A/(2*b1) */
+
+ if ((d & 1) != 0)
+ {
+ if (r >= q)
+ r = r - q;
+ else if (q - r <= d)
+ {
+ r = r - q + d;
+ q--;
+ }
+ else
+ {
+ r = r - q + 2*d;
+ q -= 2;
+ }
+ }
+ }
+ else /* Implies c1 = b1 */
+ { /* Hence a1 = d - 1 = 2*b1 - 1 */
+ if (a0 >= -d)
+ {
+ q = -1;
+ r = a0 + d;
+ }
+ else
+ {
+ q = -2;
+ r = a0 + 2*d;
+ }
+ }
+ }
+
+ *rp = r;
+ return q;
+}
+#else
+/* If sdiv_qrnnd doesn't exist, define dummy __udiv_w_sdiv. */
+UWtype
+__udiv_w_sdiv (UWtype *rp __attribute__ ((__unused__)),
+ UWtype a1 __attribute__ ((__unused__)),
+ UWtype a0 __attribute__ ((__unused__)),
+ UWtype d __attribute__ ((__unused__)))
+{
+ return 0;
+}
+#endif
+#endif
+
+#if (defined (L_udivdi3) || defined (L_divdi3) || \
+ defined (L_umoddi3) || defined (L_moddi3))
+#define L_udivmoddi4
+#endif
+
+#ifdef L_clz
+const UQItype __clz_tab[256] =
+{
+ 0,1,2,2,3,3,3,3,4,4,4,4,4,4,4,4,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,
+ 6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,
+ 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
+ 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
+ 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
+ 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
+ 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
+ 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8
+};
+#endif
+
+#ifdef L_clzsi2
+#undef int
+int
+__clzSI2 (UWtype x)
+{
+ Wtype ret;
+
+ count_leading_zeros (ret, x);
+
+ return ret;
+}
+#endif
+
+#ifdef L_clzdi2
+#undef int
+int
+__clzDI2 (UDWtype x)
+{
+ const DWunion uu = {.ll = x};
+ UWtype word;
+ Wtype ret, add;
+
+ if (uu.s.high)
+ word = uu.s.high, add = 0;
+ else
+ word = uu.s.low, add = W_TYPE_SIZE;
+
+ count_leading_zeros (ret, word);
+ return ret + add;
+}
+#endif
+
+#ifdef L_ctzsi2
+#undef int
+int
+__ctzSI2 (UWtype x)
+{
+ Wtype ret;
+
+ count_trailing_zeros (ret, x);
+
+ return ret;
+}
+#endif
+
+#ifdef L_ctzdi2
+#undef int
+int
+__ctzDI2 (UDWtype x)
+{
+ const DWunion uu = {.ll = x};
+ UWtype word;
+ Wtype ret, add;
+
+ if (uu.s.low)
+ word = uu.s.low, add = 0;
+ else
+ word = uu.s.high, add = W_TYPE_SIZE;
+
+ count_trailing_zeros (ret, word);
+ return ret + add;
+}
+#endif
+
+#ifdef L_clrsbsi2
+#undef int
+int
+__clrsbSI2 (Wtype x)
+{
+ Wtype ret;
+
+ if (x < 0)
+ x = ~x;
+ if (x == 0)
+ return W_TYPE_SIZE - 1;
+ count_leading_zeros (ret, x);
+ return ret - 1;
+}
+#endif
+
+#ifdef L_clrsbdi2
+#undef int
+int
+__clrsbDI2 (DWtype x)
+{
+ const DWunion uu = {.ll = x};
+ UWtype word;
+ Wtype ret, add;
+
+ if (uu.s.high == 0)
+ word = uu.s.low, add = W_TYPE_SIZE;
+ else if (uu.s.high == -1)
+ word = ~uu.s.low, add = W_TYPE_SIZE;
+ else if (uu.s.high >= 0)
+ word = uu.s.high, add = 0;
+ else
+ word = ~uu.s.high, add = 0;
+
+ if (word == 0)
+ ret = W_TYPE_SIZE;
+ else
+ count_leading_zeros (ret, word);
+
+ return ret + add - 1;
+}
+#endif
+
+#ifdef L_popcount_tab
+const UQItype __popcount_tab[256] =
+{
+ 0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,1,2,2,3,2,3,3,4,2,3,3,4,3,4,4,5,
+ 1,2,2,3,2,3,3,4,2,3,3,4,3,4,4,5,2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,
+ 1,2,2,3,2,3,3,4,2,3,3,4,3,4,4,5,2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,
+ 2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,3,4,4,5,4,5,5,6,4,5,5,6,5,6,6,7,
+ 1,2,2,3,2,3,3,4,2,3,3,4,3,4,4,5,2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,
+ 2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,3,4,4,5,4,5,5,6,4,5,5,6,5,6,6,7,
+ 2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,3,4,4,5,4,5,5,6,4,5,5,6,5,6,6,7,
+ 3,4,4,5,4,5,5,6,4,5,5,6,5,6,6,7,4,5,5,6,5,6,6,7,5,6,6,7,6,7,7,8
+};
+#endif
+
+#ifdef L_popcountsi2
+#undef int
+int
+__popcountSI2 (UWtype x)
+{
+ int i, ret = 0;
+
+ for (i = 0; i < W_TYPE_SIZE; i += 8)
+ ret += __popcount_tab[(x >> i) & 0xff];
+
+ return ret;
+}
+#endif
+
+#ifdef L_popcountdi2
+#undef int
+int
+__popcountDI2 (UDWtype x)
+{
+ int i, ret = 0;
+
+ for (i = 0; i < 2*W_TYPE_SIZE; i += 8)
+ ret += __popcount_tab[(x >> i) & 0xff];
+
+ return ret;
+}
+#endif
+
+#ifdef L_paritysi2
+#undef int
+int
+__paritySI2 (UWtype x)
+{
+#if W_TYPE_SIZE > 64
+# error "fill out the table"
+#endif
+#if W_TYPE_SIZE > 32
+ x ^= x >> 32;
+#endif
+#if W_TYPE_SIZE > 16
+ x ^= x >> 16;
+#endif
+ x ^= x >> 8;
+ x ^= x >> 4;
+ x &= 0xf;
+ return (0x6996 >> x) & 1;
+}
+#endif
+
+#ifdef L_paritydi2
+#undef int
+int
+__parityDI2 (UDWtype x)
+{
+ const DWunion uu = {.ll = x};
+ UWtype nx = uu.s.low ^ uu.s.high;
+
+#if W_TYPE_SIZE > 64
+# error "fill out the table"
+#endif
+#if W_TYPE_SIZE > 32
+ nx ^= nx >> 32;
+#endif
+#if W_TYPE_SIZE > 16
+ nx ^= nx >> 16;
+#endif
+ nx ^= nx >> 8;
+ nx ^= nx >> 4;
+ nx &= 0xf;
+ return (0x6996 >> nx) & 1;
+}
+#endif
+
+#ifdef L_udivmoddi4
+
+#if (defined (L_udivdi3) || defined (L_divdi3) || \
+ defined (L_umoddi3) || defined (L_moddi3))
+static inline __attribute__ ((__always_inline__))
+#endif
+UDWtype
+__udivmoddi4 (UDWtype n, UDWtype d, UDWtype *rp)
+{
+ const DWunion nn = {.ll = n};
+ const DWunion dd = {.ll = d};
+ DWunion rr;
+ UWtype d0, d1, n0, n1, n2;
+ UWtype q0, q1;
+ UWtype b, bm;
+
+ d0 = dd.s.low;
+ d1 = dd.s.high;
+ n0 = nn.s.low;
+ n1 = nn.s.high;
+
+#if !UDIV_NEEDS_NORMALIZATION
+ if (d1 == 0)
+ {
+ if (d0 > n1)
+ {
+ /* 0q = nn / 0D */
+
+ udiv_qrnnd (q0, n0, n1, n0, d0);
+ q1 = 0;
+
+ /* Remainder in n0. */
+ }
+ else
+ {
+ /* qq = NN / 0d */
+
+ if (d0 == 0)
+ d0 = 1 / d0; /* Divide intentionally by zero. */
+
+ udiv_qrnnd (q1, n1, 0, n1, d0);
+ udiv_qrnnd (q0, n0, n1, n0, d0);
+
+ /* Remainder in n0. */
+ }
+
+ if (rp != 0)
+ {
+ rr.s.low = n0;
+ rr.s.high = 0;
+ *rp = rr.ll;
+ }
+ }
+
+#else /* UDIV_NEEDS_NORMALIZATION */
+
+ if (d1 == 0)
+ {
+ if (d0 > n1)
+ {
+ /* 0q = nn / 0D */
+
+ count_leading_zeros (bm, d0);
+
+ if (bm != 0)
+ {
+ /* Normalize, i.e. make the most significant bit of the
+ denominator set. */
+
+ d0 = d0 << bm;
+ n1 = (n1 << bm) | (n0 >> (W_TYPE_SIZE - bm));
+ n0 = n0 << bm;
+ }
+
+ udiv_qrnnd (q0, n0, n1, n0, d0);
+ q1 = 0;
+
+ /* Remainder in n0 >> bm. */
+ }
+ else
+ {
+ /* qq = NN / 0d */
+
+ if (d0 == 0)
+ d0 = 1 / d0; /* Divide intentionally by zero. */
+
+ count_leading_zeros (bm, d0);
+
+ if (bm == 0)
+ {
+ /* From (n1 >= d0) /\ (the most significant bit of d0 is set),
+ conclude (the most significant bit of n1 is set) /\ (the
+ leading quotient digit q1 = 1).
+
+ This special case is necessary, not an optimization.
+ (Shifts counts of W_TYPE_SIZE are undefined.) */
+
+ n1 -= d0;
+ q1 = 1;
+ }
+ else
+ {
+ /* Normalize. */
+
+ b = W_TYPE_SIZE - bm;
+
+ d0 = d0 << bm;
+ n2 = n1 >> b;
+ n1 = (n1 << bm) | (n0 >> b);
+ n0 = n0 << bm;
+
+ udiv_qrnnd (q1, n1, n2, n1, d0);
+ }
+
+ /* n1 != d0... */
+
+ udiv_qrnnd (q0, n0, n1, n0, d0);
+
+ /* Remainder in n0 >> bm. */
+ }
+
+ if (rp != 0)
+ {
+ rr.s.low = n0 >> bm;
+ rr.s.high = 0;
+ *rp = rr.ll;
+ }
+ }
+#endif /* UDIV_NEEDS_NORMALIZATION */
+
+ else
+ {
+ if (d1 > n1)
+ {
+ /* 00 = nn / DD */
+
+ q0 = 0;
+ q1 = 0;
+
+ /* Remainder in n1n0. */
+ if (rp != 0)
+ {
+ rr.s.low = n0;
+ rr.s.high = n1;
+ *rp = rr.ll;
+ }
+ }
+ else
+ {
+ /* 0q = NN / dd */
+
+ count_leading_zeros (bm, d1);
+ if (bm == 0)
+ {
+ /* From (n1 >= d1) /\ (the most significant bit of d1 is set),
+ conclude (the most significant bit of n1 is set) /\ (the
+ quotient digit q0 = 0 or 1).
+
+ This special case is necessary, not an optimization. */
+
+ /* The condition on the next line takes advantage of that
+ n1 >= d1 (true due to program flow). */
+ if (n1 > d1 || n0 >= d0)
+ {
+ q0 = 1;
+ sub_ddmmss (n1, n0, n1, n0, d1, d0);
+ }
+ else
+ q0 = 0;
+
+ q1 = 0;
+
+ if (rp != 0)
+ {
+ rr.s.low = n0;
+ rr.s.high = n1;
+ *rp = rr.ll;
+ }
+ }
+ else
+ {
+ UWtype m1, m0;
+ /* Normalize. */
+
+ b = W_TYPE_SIZE - bm;
+
+ d1 = (d1 << bm) | (d0 >> b);
+ d0 = d0 << bm;
+ n2 = n1 >> b;
+ n1 = (n1 << bm) | (n0 >> b);
+ n0 = n0 << bm;
+
+ udiv_qrnnd (q0, n1, n2, n1, d1);
+ umul_ppmm (m1, m0, q0, d0);
+
+ if (m1 > n1 || (m1 == n1 && m0 > n0))
+ {
+ q0--;
+ sub_ddmmss (m1, m0, m1, m0, d1, d0);
+ }
+
+ q1 = 0;
+
+ /* Remainder in (n1n0 - m1m0) >> bm. */
+ if (rp != 0)
+ {
+ sub_ddmmss (n1, n0, n1, n0, m1, m0);
+ rr.s.low = (n1 << b) | (n0 >> bm);
+ rr.s.high = n1 >> bm;
+ *rp = rr.ll;
+ }
+ }
+ }
+ }
+
+ const DWunion ww = {{.low = q0, .high = q1}};
+ return ww.ll;
+}
+#endif
+
+#ifdef L_divdi3
+DWtype
+__divdi3 (DWtype u, DWtype v)
+{
+ Wtype c = 0;
+ DWunion uu = {.ll = u};
+ DWunion vv = {.ll = v};
+ DWtype w;
+
+ if (uu.s.high < 0)
+ c = ~c,
+ uu.ll = -uu.ll;
+ if (vv.s.high < 0)
+ c = ~c,
+ vv.ll = -vv.ll;
+
+ w = __udivmoddi4 (uu.ll, vv.ll, (UDWtype *) 0);
+ if (c)
+ w = -w;
+
+ return w;
+}
+#endif
+
+#ifdef L_moddi3
+DWtype
+__moddi3 (DWtype u, DWtype v)
+{
+ Wtype c = 0;
+ DWunion uu = {.ll = u};
+ DWunion vv = {.ll = v};
+ DWtype w;
+
+ if (uu.s.high < 0)
+ c = ~c,
+ uu.ll = -uu.ll;
+ if (vv.s.high < 0)
+ vv.ll = -vv.ll;
+
+ (void) __udivmoddi4 (uu.ll, vv.ll, (UDWtype*)&w);
+ if (c)
+ w = -w;
+
+ return w;
+}
+#endif
+
+#ifdef L_umoddi3
+UDWtype
+__umoddi3 (UDWtype u, UDWtype v)
+{
+ UDWtype w;
+
+ (void) __udivmoddi4 (u, v, &w);
+
+ return w;
+}
+#endif
+
+#ifdef L_udivdi3
+UDWtype
+__udivdi3 (UDWtype n, UDWtype d)
+{
+ return __udivmoddi4 (n, d, (UDWtype *) 0);
+}
+#endif
+
+#ifdef L_cmpdi2
+cmp_return_type
+__cmpdi2 (DWtype a, DWtype b)
+{
+ const DWunion au = {.ll = a};
+ const DWunion bu = {.ll = b};
+
+ if (au.s.high < bu.s.high)
+ return 0;
+ else if (au.s.high > bu.s.high)
+ return 2;
+ if ((UWtype) au.s.low < (UWtype) bu.s.low)
+ return 0;
+ else if ((UWtype) au.s.low > (UWtype) bu.s.low)
+ return 2;
+ return 1;
+}
+#endif
+
+#ifdef L_ucmpdi2
+cmp_return_type
+__ucmpdi2 (DWtype a, DWtype b)
+{
+ const DWunion au = {.ll = a};
+ const DWunion bu = {.ll = b};
+
+ if ((UWtype) au.s.high < (UWtype) bu.s.high)
+ return 0;
+ else if ((UWtype) au.s.high > (UWtype) bu.s.high)
+ return 2;
+ if ((UWtype) au.s.low < (UWtype) bu.s.low)
+ return 0;
+ else if ((UWtype) au.s.low > (UWtype) bu.s.low)
+ return 2;
+ return 1;
+}
+#endif
+
+#if defined(L_fixunstfdi) && LIBGCC2_HAS_TF_MODE
+UDWtype
+__fixunstfDI (TFtype a)
+{
+ if (a < 0)
+ return 0;
+
+ /* Compute high word of result, as a flonum. */
+ const TFtype b = (a / Wtype_MAXp1_F);
+ /* Convert that to fixed (but not to DWtype!),
+ and shift it into the high word. */
+ UDWtype v = (UWtype) b;
+ v <<= W_TYPE_SIZE;
+ /* Remove high part from the TFtype, leaving the low part as flonum. */
+ a -= (TFtype)v;
+ /* Convert that to fixed (but not to DWtype!) and add it in.
+ Sometimes A comes out negative. This is significant, since
+ A has more bits than a long int does. */
+ if (a < 0)
+ v -= (UWtype) (- a);
+ else
+ v += (UWtype) a;
+ return v;
+}
+#endif
+
+#if defined(L_fixtfdi) && LIBGCC2_HAS_TF_MODE
+DWtype
+__fixtfdi (TFtype a)
+{
+ if (a < 0)
+ return - __fixunstfDI (-a);
+ return __fixunstfDI (a);
+}
+#endif
+
+#if defined(L_fixunsxfdi) && LIBGCC2_HAS_XF_MODE
+UDWtype
+__fixunsxfDI (XFtype a)
+{
+ if (a < 0)
+ return 0;
+
+ /* Compute high word of result, as a flonum. */
+ const XFtype b = (a / Wtype_MAXp1_F);
+ /* Convert that to fixed (but not to DWtype!),
+ and shift it into the high word. */
+ UDWtype v = (UWtype) b;
+ v <<= W_TYPE_SIZE;
+ /* Remove high part from the XFtype, leaving the low part as flonum. */
+ a -= (XFtype)v;
+ /* Convert that to fixed (but not to DWtype!) and add it in.
+ Sometimes A comes out negative. This is significant, since
+ A has more bits than a long int does. */
+ if (a < 0)
+ v -= (UWtype) (- a);
+ else
+ v += (UWtype) a;
+ return v;
+}
+#endif
+
+#if defined(L_fixxfdi) && LIBGCC2_HAS_XF_MODE
+DWtype
+__fixxfdi (XFtype a)
+{
+ if (a < 0)
+ return - __fixunsxfDI (-a);
+ return __fixunsxfDI (a);
+}
+#endif
+
+#if defined(L_fixunsdfdi) && LIBGCC2_HAS_DF_MODE
+UDWtype
+__fixunsdfDI (DFtype a)
+{
+ /* Get high part of result. The division here will just moves the radix
+ point and will not cause any rounding. Then the conversion to integral
+ type chops result as desired. */
+ const UWtype hi = a / Wtype_MAXp1_F;
+
+ /* Get low part of result. Convert `hi' to floating type and scale it back,
+ then subtract this from the number being converted. This leaves the low
+ part. Convert that to integral type. */
+ const UWtype lo = a - (DFtype) hi * Wtype_MAXp1_F;
+
+ /* Assemble result from the two parts. */
+ return ((UDWtype) hi << W_TYPE_SIZE) | lo;
+}
+#endif
+
+#if defined(L_fixdfdi) && LIBGCC2_HAS_DF_MODE
+DWtype
+__fixdfdi (DFtype a)
+{
+ if (a < 0)
+ return - __fixunsdfDI (-a);
+ return __fixunsdfDI (a);
+}
+#endif
+
+#if defined(L_fixunssfdi) && LIBGCC2_HAS_SF_MODE
+UDWtype
+__fixunssfDI (SFtype a)
+{
+#if LIBGCC2_HAS_DF_MODE
+ /* Convert the SFtype to a DFtype, because that is surely not going
+ to lose any bits. Some day someone else can write a faster version
+ that avoids converting to DFtype, and verify it really works right. */
+ const DFtype dfa = a;
+
+ /* Get high part of result. The division here will just moves the radix
+ point and will not cause any rounding. Then the conversion to integral
+ type chops result as desired. */
+ const UWtype hi = dfa / Wtype_MAXp1_F;
+
+ /* Get low part of result. Convert `hi' to floating type and scale it back,
+ then subtract this from the number being converted. This leaves the low
+ part. Convert that to integral type. */
+ const UWtype lo = dfa - (DFtype) hi * Wtype_MAXp1_F;
+
+ /* Assemble result from the two parts. */
+ return ((UDWtype) hi << W_TYPE_SIZE) | lo;
+#elif FLT_MANT_DIG < W_TYPE_SIZE
+ if (a < 1)
+ return 0;
+ if (a < Wtype_MAXp1_F)
+ return (UWtype)a;
+ if (a < Wtype_MAXp1_F * Wtype_MAXp1_F)
+ {
+ /* Since we know that there are fewer significant bits in the SFmode
+ quantity than in a word, we know that we can convert out all the
+ significant bits in one step, and thus avoid losing bits. */
+
+ /* ??? This following loop essentially performs frexpf. If we could
+ use the real libm function, or poke at the actual bits of the fp
+ format, it would be significantly faster. */
+
+ UWtype shift = 0, counter;
+ SFtype msb;
+
+ a /= Wtype_MAXp1_F;
+ for (counter = W_TYPE_SIZE / 2; counter != 0; counter >>= 1)
+ {
+ SFtype counterf = (UWtype)1 << counter;
+ if (a >= counterf)
+ {
+ shift |= counter;
+ a /= counterf;
+ }
+ }
+
+ /* Rescale into the range of one word, extract the bits of that
+ one word, and shift the result into position. */
+ a *= Wtype_MAXp1_F;
+ counter = a;
+ return (DWtype)counter << shift;
+ }
+ return -1;
+#else
+# error
+#endif
+}
+#endif
+
+#if defined(L_fixsfdi) && LIBGCC2_HAS_SF_MODE
+DWtype
+__fixsfdi (SFtype a)
+{
+ if (a < 0)
+ return - __fixunssfDI (-a);
+ return __fixunssfDI (a);
+}
+#endif
+
+#if defined(L_floatdixf) && LIBGCC2_HAS_XF_MODE
+XFtype
+__floatdixf (DWtype u)
+{
+#if W_TYPE_SIZE > XF_SIZE
+# error
+#endif
+ XFtype d = (Wtype) (u >> W_TYPE_SIZE);
+ d *= Wtype_MAXp1_F;
+ d += (UWtype)u;
+ return d;
+}
+#endif
+
+#if defined(L_floatundixf) && LIBGCC2_HAS_XF_MODE
+XFtype
+__floatundixf (UDWtype u)
+{
+#if W_TYPE_SIZE > XF_SIZE
+# error
+#endif
+ XFtype d = (UWtype) (u >> W_TYPE_SIZE);
+ d *= Wtype_MAXp1_F;
+ d += (UWtype)u;
+ return d;
+}
+#endif
+
+#if defined(L_floatditf) && LIBGCC2_HAS_TF_MODE
+TFtype
+__floatditf (DWtype u)
+{
+#if W_TYPE_SIZE > TF_SIZE
+# error
+#endif
+ TFtype d = (Wtype) (u >> W_TYPE_SIZE);
+ d *= Wtype_MAXp1_F;
+ d += (UWtype)u;
+ return d;
+}
+#endif
+
+#if defined(L_floatunditf) && LIBGCC2_HAS_TF_MODE
+TFtype
+__floatunditf (UDWtype u)
+{
+#if W_TYPE_SIZE > TF_SIZE
+# error
+#endif
+ TFtype d = (UWtype) (u >> W_TYPE_SIZE);
+ d *= Wtype_MAXp1_F;
+ d += (UWtype)u;
+ return d;
+}
+#endif
+
+#if (defined(L_floatdisf) && LIBGCC2_HAS_SF_MODE) \
+ || (defined(L_floatdidf) && LIBGCC2_HAS_DF_MODE)
+#define DI_SIZE (W_TYPE_SIZE * 2)
+#define F_MODE_OK(SIZE) \
+ (SIZE < DI_SIZE \
+ && SIZE > (DI_SIZE - SIZE + FSSIZE) \
+ && !AVOID_FP_TYPE_CONVERSION(SIZE))
+#if defined(L_floatdisf)
+#define FUNC __floatdisf
+#define FSTYPE SFtype
+#define FSSIZE SF_SIZE
+#else
+#define FUNC __floatdidf
+#define FSTYPE DFtype
+#define FSSIZE DF_SIZE
+#endif
+
+FSTYPE
+FUNC (DWtype u)
+{
+#if FSSIZE >= W_TYPE_SIZE
+ /* When the word size is small, we never get any rounding error. */
+ FSTYPE f = (Wtype) (u >> W_TYPE_SIZE);
+ f *= Wtype_MAXp1_F;
+ f += (UWtype)u;
+ return f;
+#elif (LIBGCC2_HAS_DF_MODE && F_MODE_OK (DF_SIZE)) \
+ || (LIBGCC2_HAS_XF_MODE && F_MODE_OK (XF_SIZE)) \
+ || (LIBGCC2_HAS_TF_MODE && F_MODE_OK (TF_SIZE))
+
+#if (LIBGCC2_HAS_DF_MODE && F_MODE_OK (DF_SIZE))
+# define FSIZE DF_SIZE
+# define FTYPE DFtype
+#elif (LIBGCC2_HAS_XF_MODE && F_MODE_OK (XF_SIZE))
+# define FSIZE XF_SIZE
+# define FTYPE XFtype
+#elif (LIBGCC2_HAS_TF_MODE && F_MODE_OK (TF_SIZE))
+# define FSIZE TF_SIZE
+# define FTYPE TFtype
+#else
+# error
+#endif
+
+#define REP_BIT ((UDWtype) 1 << (DI_SIZE - FSIZE))
+
+ /* Protect against double-rounding error.
+ Represent any low-order bits, that might be truncated by a bit that
+ won't be lost. The bit can go in anywhere below the rounding position
+ of the FSTYPE. A fixed mask and bit position handles all usual
+ configurations. */
+ if (! (- ((DWtype) 1 << FSIZE) < u
+ && u < ((DWtype) 1 << FSIZE)))
+ {
+ if ((UDWtype) u & (REP_BIT - 1))
+ {
+ u &= ~ (REP_BIT - 1);
+ u |= REP_BIT;
+ }
+ }
+
+ /* Do the calculation in a wider type so that we don't lose any of
+ the precision of the high word while multiplying it. */
+ FTYPE f = (Wtype) (u >> W_TYPE_SIZE);
+ f *= Wtype_MAXp1_F;
+ f += (UWtype)u;
+ return (FSTYPE) f;
+#else
+#if FSSIZE >= W_TYPE_SIZE - 2
+# error
+#endif
+ /* Finally, the word size is larger than the number of bits in the
+ required FSTYPE, and we've got no suitable wider type. The only
+ way to avoid double rounding is to special case the
+ extraction. */
+
+ /* If there are no high bits set, fall back to one conversion. */
+ if ((Wtype)u == u)
+ return (FSTYPE)(Wtype)u;
+
+ /* Otherwise, find the power of two. */
+ Wtype hi = u >> W_TYPE_SIZE;
+ if (hi < 0)
+ hi = -hi;
+
+ UWtype count, shift;
+ count_leading_zeros (count, hi);
+
+ /* No leading bits means u == minimum. */
+ if (count == 0)
+ return -(Wtype_MAXp1_F * (Wtype_MAXp1_F / 2));
+
+ shift = 1 + W_TYPE_SIZE - count;
+
+ /* Shift down the most significant bits. */
+ hi = u >> shift;
+
+ /* If we lost any nonzero bits, set the lsb to ensure correct rounding. */
+ if ((UWtype)u << (W_TYPE_SIZE - shift))
+ hi |= 1;
+
+ /* Convert the one word of data, and rescale. */
+ FSTYPE f = hi, e;
+ if (shift == W_TYPE_SIZE)
+ e = Wtype_MAXp1_F;
+ /* The following two cases could be merged if we knew that the target
+ supported a native unsigned->float conversion. More often, we only
+ have a signed conversion, and have to add extra fixup code. */
+ else if (shift == W_TYPE_SIZE - 1)
+ e = Wtype_MAXp1_F / 2;
+ else
+ e = (Wtype)1 << shift;
+ return f * e;
+#endif
+}
+#endif
+
+#if (defined(L_floatundisf) && LIBGCC2_HAS_SF_MODE) \
+ || (defined(L_floatundidf) && LIBGCC2_HAS_DF_MODE)
+#define DI_SIZE (W_TYPE_SIZE * 2)
+#define F_MODE_OK(SIZE) \
+ (SIZE < DI_SIZE \
+ && SIZE > (DI_SIZE - SIZE + FSSIZE) \
+ && !AVOID_FP_TYPE_CONVERSION(SIZE))
+#if defined(L_floatundisf)
+#define FUNC __floatundisf
+#define FSTYPE SFtype
+#define FSSIZE SF_SIZE
+#else
+#define FUNC __floatundidf
+#define FSTYPE DFtype
+#define FSSIZE DF_SIZE
+#endif
+
+FSTYPE
+FUNC (UDWtype u)
+{
+#if FSSIZE >= W_TYPE_SIZE
+ /* When the word size is small, we never get any rounding error. */
+ FSTYPE f = (UWtype) (u >> W_TYPE_SIZE);
+ f *= Wtype_MAXp1_F;
+ f += (UWtype)u;
+ return f;
+#elif (LIBGCC2_HAS_DF_MODE && F_MODE_OK (DF_SIZE)) \
+ || (LIBGCC2_HAS_XF_MODE && F_MODE_OK (XF_SIZE)) \
+ || (LIBGCC2_HAS_TF_MODE && F_MODE_OK (TF_SIZE))
+
+#if (LIBGCC2_HAS_DF_MODE && F_MODE_OK (DF_SIZE))
+# define FSIZE DF_SIZE
+# define FTYPE DFtype
+#elif (LIBGCC2_HAS_XF_MODE && F_MODE_OK (XF_SIZE))
+# define FSIZE XF_SIZE
+# define FTYPE XFtype
+#elif (LIBGCC2_HAS_TF_MODE && F_MODE_OK (TF_SIZE))
+# define FSIZE TF_SIZE
+# define FTYPE TFtype
+#else
+# error
+#endif
+
+#define REP_BIT ((UDWtype) 1 << (DI_SIZE - FSIZE))
+
+ /* Protect against double-rounding error.
+ Represent any low-order bits, that might be truncated by a bit that
+ won't be lost. The bit can go in anywhere below the rounding position
+ of the FSTYPE. A fixed mask and bit position handles all usual
+ configurations. */
+ if (u >= ((UDWtype) 1 << FSIZE))
+ {
+ if ((UDWtype) u & (REP_BIT - 1))
+ {
+ u &= ~ (REP_BIT - 1);
+ u |= REP_BIT;
+ }
+ }
+
+ /* Do the calculation in a wider type so that we don't lose any of
+ the precision of the high word while multiplying it. */
+ FTYPE f = (UWtype) (u >> W_TYPE_SIZE);
+ f *= Wtype_MAXp1_F;
+ f += (UWtype)u;
+ return (FSTYPE) f;
+#else
+#if FSSIZE == W_TYPE_SIZE - 1
+# error
+#endif
+ /* Finally, the word size is larger than the number of bits in the
+ required FSTYPE, and we've got no suitable wider type. The only
+ way to avoid double rounding is to special case the
+ extraction. */
+
+ /* If there are no high bits set, fall back to one conversion. */
+ if ((UWtype)u == u)
+ return (FSTYPE)(UWtype)u;
+
+ /* Otherwise, find the power of two. */
+ UWtype hi = u >> W_TYPE_SIZE;
+
+ UWtype count, shift;
+ count_leading_zeros (count, hi);
+
+ shift = W_TYPE_SIZE - count;
+
+ /* Shift down the most significant bits. */
+ hi = u >> shift;
+
+ /* If we lost any nonzero bits, set the lsb to ensure correct rounding. */
+ if ((UWtype)u << (W_TYPE_SIZE - shift))
+ hi |= 1;
+
+ /* Convert the one word of data, and rescale. */
+ FSTYPE f = hi, e;
+ if (shift == W_TYPE_SIZE)
+ e = Wtype_MAXp1_F;
+ /* The following two cases could be merged if we knew that the target
+ supported a native unsigned->float conversion. More often, we only
+ have a signed conversion, and have to add extra fixup code. */
+ else if (shift == W_TYPE_SIZE - 1)
+ e = Wtype_MAXp1_F / 2;
+ else
+ e = (Wtype)1 << shift;
+ return f * e;
+#endif
+}
+#endif
+
+#if defined(L_fixunsxfsi) && LIBGCC2_HAS_XF_MODE
+/* Reenable the normal types, in case limits.h needs them. */
+#undef char
+#undef short
+#undef int
+#undef long
+#undef unsigned
+#undef float
+#undef double
+#undef MIN
+#undef MAX
+#include <limits.h>
+
+UWtype
+__fixunsxfSI (XFtype a)
+{
+ if (a >= - (DFtype) Wtype_MIN)
+ return (Wtype) (a + Wtype_MIN) - Wtype_MIN;
+ return (Wtype) a;
+}
+#endif
+
+#if defined(L_fixunsdfsi) && LIBGCC2_HAS_DF_MODE
+/* Reenable the normal types, in case limits.h needs them. */
+#undef char
+#undef short
+#undef int
+#undef long
+#undef unsigned
+#undef float
+#undef double
+#undef MIN
+#undef MAX
+#include <limits.h>
+
+UWtype
+__fixunsdfSI (DFtype a)
+{
+ if (a >= - (DFtype) Wtype_MIN)
+ return (Wtype) (a + Wtype_MIN) - Wtype_MIN;
+ return (Wtype) a;
+}
+#endif
+
+#if defined(L_fixunssfsi) && LIBGCC2_HAS_SF_MODE
+/* Reenable the normal types, in case limits.h needs them. */
+#undef char
+#undef short
+#undef int
+#undef long
+#undef unsigned
+#undef float
+#undef double
+#undef MIN
+#undef MAX
+#include <limits.h>
+
+UWtype
+__fixunssfSI (SFtype a)
+{
+ if (a >= - (SFtype) Wtype_MIN)
+ return (Wtype) (a + Wtype_MIN) - Wtype_MIN;
+ return (Wtype) a;
+}
+#endif
+
+/* Integer power helper used from __builtin_powi for non-constant
+ exponents. */
+
+#if (defined(L_powisf2) && LIBGCC2_HAS_SF_MODE) \
+ || (defined(L_powidf2) && LIBGCC2_HAS_DF_MODE) \
+ || (defined(L_powixf2) && LIBGCC2_HAS_XF_MODE) \
+ || (defined(L_powitf2) && LIBGCC2_HAS_TF_MODE)
+# if defined(L_powisf2)
+# define TYPE SFtype
+# define NAME __powisf2
+# elif defined(L_powidf2)
+# define TYPE DFtype
+# define NAME __powidf2
+# elif defined(L_powixf2)
+# define TYPE XFtype
+# define NAME __powixf2
+# elif defined(L_powitf2)
+# define TYPE TFtype
+# define NAME __powitf2
+# endif
+
+#undef int
+#undef unsigned
+TYPE
+NAME (TYPE x, int m)
+{
+ unsigned int n = m < 0 ? -m : m;
+ TYPE y = n % 2 ? x : 1;
+ while (n >>= 1)
+ {
+ x = x * x;
+ if (n % 2)
+ y = y * x;
+ }
+ return m < 0 ? 1/y : y;
+}
+
+#endif
+
+#if ((defined(L_mulsc3) || defined(L_divsc3)) && LIBGCC2_HAS_SF_MODE) \
+ || ((defined(L_muldc3) || defined(L_divdc3)) && LIBGCC2_HAS_DF_MODE) \
+ || ((defined(L_mulxc3) || defined(L_divxc3)) && LIBGCC2_HAS_XF_MODE) \
+ || ((defined(L_multc3) || defined(L_divtc3)) && LIBGCC2_HAS_TF_MODE)
+
+#undef float
+#undef double
+#undef long
+
+#if defined(L_mulsc3) || defined(L_divsc3)
+# define MTYPE SFtype
+# define CTYPE SCtype
+# define MODE sc
+# define CEXT f
+# define NOTRUNC __FLT_EVAL_METHOD__ == 0
+#elif defined(L_muldc3) || defined(L_divdc3)
+# define MTYPE DFtype
+# define CTYPE DCtype
+# define MODE dc
+# if LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 64
+# define CEXT l
+# define NOTRUNC 1
+# else
+# define CEXT
+# define NOTRUNC __FLT_EVAL_METHOD__ == 0 || __FLT_EVAL_METHOD__ == 1
+# endif
+#elif defined(L_mulxc3) || defined(L_divxc3)
+# define MTYPE XFtype
+# define CTYPE XCtype
+# define MODE xc
+# define CEXT l
+# define NOTRUNC 1
+#elif defined(L_multc3) || defined(L_divtc3)
+# define MTYPE TFtype
+# define CTYPE TCtype
+# define MODE tc
+# if LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 128
+# define CEXT l
+# else
+# define CEXT LIBGCC2_TF_CEXT
+# endif
+# define NOTRUNC 1
+#else
+# error
+#endif
+
+#define CONCAT3(A,B,C) _CONCAT3(A,B,C)
+#define _CONCAT3(A,B,C) A##B##C
+
+#define CONCAT2(A,B) _CONCAT2(A,B)
+#define _CONCAT2(A,B) A##B
+
+/* All of these would be present in a full C99 implementation of <math.h>
+ and <complex.h>. Our problem is that only a few systems have such full
+ implementations. Further, libgcc_s.so isn't currently linked against
+ libm.so, and even for systems that do provide full C99, the extra overhead
+ of all programs using libgcc having to link against libm. So avoid it. */
+
+#define isnan(x) __builtin_expect ((x) != (x), 0)
+#define isfinite(x) __builtin_expect (!isnan((x) - (x)), 1)
+#define isinf(x) __builtin_expect (!isnan(x) & !isfinite(x), 0)
+
+#define INFINITY CONCAT2(__builtin_huge_val, CEXT) ()
+#define I 1i
+
+/* Helpers to make the following code slightly less gross. */
+#define COPYSIGN CONCAT2(__builtin_copysign, CEXT)
+#define FABS CONCAT2(__builtin_fabs, CEXT)
+
+/* Verify that MTYPE matches up with CEXT. */
+extern void *compile_type_assert[sizeof(INFINITY) == sizeof(MTYPE) ? 1 : -1];
+
+/* Ensure that we've lost any extra precision. */
+#if NOTRUNC
+# define TRUNC(x)
+#else
+# define TRUNC(x) __asm__ ("" : "=m"(x) : "m"(x))
+#endif
+
+#if defined(L_mulsc3) || defined(L_muldc3) \
+ || defined(L_mulxc3) || defined(L_multc3)
+
+CTYPE
+CONCAT3(__mul,MODE,3) (MTYPE a, MTYPE b, MTYPE c, MTYPE d)
+{
+ MTYPE ac, bd, ad, bc, x, y;
+ CTYPE res;
+
+ ac = a * c;
+ bd = b * d;
+ ad = a * d;
+ bc = b * c;
+
+ TRUNC (ac);
+ TRUNC (bd);
+ TRUNC (ad);
+ TRUNC (bc);
+
+ x = ac - bd;
+ y = ad + bc;
+
+ if (isnan (x) && isnan (y))
+ {
+ /* Recover infinities that computed as NaN + iNaN. */
+ _Bool recalc = 0;
+ if (isinf (a) || isinf (b))
+ {
+ /* z is infinite. "Box" the infinity and change NaNs in
+ the other factor to 0. */
+ a = COPYSIGN (isinf (a) ? 1 : 0, a);
+ b = COPYSIGN (isinf (b) ? 1 : 0, b);
+ if (isnan (c)) c = COPYSIGN (0, c);
+ if (isnan (d)) d = COPYSIGN (0, d);
+ recalc = 1;
+ }
+ if (isinf (c) || isinf (d))
+ {
+ /* w is infinite. "Box" the infinity and change NaNs in
+ the other factor to 0. */
+ c = COPYSIGN (isinf (c) ? 1 : 0, c);
+ d = COPYSIGN (isinf (d) ? 1 : 0, d);
+ if (isnan (a)) a = COPYSIGN (0, a);
+ if (isnan (b)) b = COPYSIGN (0, b);
+ recalc = 1;
+ }
+ if (!recalc
+ && (isinf (ac) || isinf (bd)
+ || isinf (ad) || isinf (bc)))
+ {
+ /* Recover infinities from overflow by changing NaNs to 0. */
+ if (isnan (a)) a = COPYSIGN (0, a);
+ if (isnan (b)) b = COPYSIGN (0, b);
+ if (isnan (c)) c = COPYSIGN (0, c);
+ if (isnan (d)) d = COPYSIGN (0, d);
+ recalc = 1;
+ }
+ if (recalc)
+ {
+ x = INFINITY * (a * c - b * d);
+ y = INFINITY * (a * d + b * c);
+ }
+ }
+
+ __real__ res = x;
+ __imag__ res = y;
+ return res;
+}
+#endif /* complex multiply */
+
+#if defined(L_divsc3) || defined(L_divdc3) \
+ || defined(L_divxc3) || defined(L_divtc3)
+
+CTYPE
+CONCAT3(__div,MODE,3) (MTYPE a, MTYPE b, MTYPE c, MTYPE d)
+{
+ MTYPE denom, ratio, x, y;
+ CTYPE res;
+
+ /* ??? We can get better behavior from logarithmic scaling instead of
+ the division. But that would mean starting to link libgcc against
+ libm. We could implement something akin to ldexp/frexp as gcc builtins
+ fairly easily... */
+ if (FABS (c) < FABS (d))
+ {
+ ratio = c / d;
+ denom = (c * ratio) + d;
+ x = ((a * ratio) + b) / denom;
+ y = ((b * ratio) - a) / denom;
+ }
+ else
+ {
+ ratio = d / c;
+ denom = (d * ratio) + c;
+ x = ((b * ratio) + a) / denom;
+ y = (b - (a * ratio)) / denom;
+ }
+
+ /* Recover infinities and zeros that computed as NaN+iNaN; the only cases
+ are nonzero/zero, infinite/finite, and finite/infinite. */
+ if (isnan (x) && isnan (y))
+ {
+ if (c == 0.0 && d == 0.0 && (!isnan (a) || !isnan (b)))
+ {
+ x = COPYSIGN (INFINITY, c) * a;
+ y = COPYSIGN (INFINITY, c) * b;
+ }
+ else if ((isinf (a) || isinf (b)) && isfinite (c) && isfinite (d))
+ {
+ a = COPYSIGN (isinf (a) ? 1 : 0, a);
+ b = COPYSIGN (isinf (b) ? 1 : 0, b);
+ x = INFINITY * (a * c + b * d);
+ y = INFINITY * (b * c - a * d);
+ }
+ else if ((isinf (c) || isinf (d)) && isfinite (a) && isfinite (b))
+ {
+ c = COPYSIGN (isinf (c) ? 1 : 0, c);
+ d = COPYSIGN (isinf (d) ? 1 : 0, d);
+ x = 0.0 * (a * c + b * d);
+ y = 0.0 * (b * c - a * d);
+ }
+ }
+
+ __real__ res = x;
+ __imag__ res = y;
+ return res;
+}
+#endif /* complex divide */
+
+#endif /* all complex float routines */
+
+/* From here on down, the routines use normal data types. */
+
+#define SItype bogus_type
+#define USItype bogus_type
+#define DItype bogus_type
+#define UDItype bogus_type
+#define SFtype bogus_type
+#define DFtype bogus_type
+#undef Wtype
+#undef UWtype
+#undef HWtype
+#undef UHWtype
+#undef DWtype
+#undef UDWtype
+
+#undef char
+#undef short
+#undef int
+#undef long
+#undef unsigned
+#undef float
+#undef double
+
+#ifdef L__gcc_bcmp
+
+/* Like bcmp except the sign is meaningful.
+ Result is negative if S1 is less than S2,
+ positive if S1 is greater, 0 if S1 and S2 are equal. */
+
+int
+__gcc_bcmp (const unsigned char *s1, const unsigned char *s2, size_t size)
+{
+ while (size > 0)
+ {
+ const unsigned char c1 = *s1++, c2 = *s2++;
+ if (c1 != c2)
+ return c1 - c2;
+ size--;
+ }
+ return 0;
+}
+
+#endif
+
+/* __eprintf used to be used by GCC's private version of <assert.h>.
+ We no longer provide that header, but this routine remains in libgcc.a
+ for binary backward compatibility. Note that it is not included in
+ the shared version of libgcc. */
+#ifdef L_eprintf
+#ifndef inhibit_libc
+
+#undef NULL /* Avoid errors if stdio.h and our stddef.h mismatch. */
+#include <stdio.h>
+
+void
+__eprintf (const char *string, const char *expression,
+ unsigned int line, const char *filename)
+{
+ fprintf (stderr, string, expression, line, filename);
+ fflush (stderr);
+ abort ();
+}
+
+#endif
+#endif
+
+
+#ifdef L_clear_cache
+/* Clear part of an instruction cache. */
+
+void
+__clear_cache (char *beg __attribute__((__unused__)),
+ char *end __attribute__((__unused__)))
+{
+#ifdef CLEAR_INSN_CACHE
+ CLEAR_INSN_CACHE (beg, end);
+#endif /* CLEAR_INSN_CACHE */
+}
+
+#endif /* L_clear_cache */
+
+#ifdef L_trampoline
+
+/* Jump to a trampoline, loading the static chain address. */
+
+#if defined(WINNT) && ! defined(__CYGWIN__)
+#include <windows.h>
+int getpagesize (void);
+int mprotect (char *,int, int);
+
+int
+getpagesize (void)
+{
+#ifdef _ALPHA_
+ return 8192;
+#else
+ return 4096;
+#endif
+}
+
+int
+mprotect (char *addr, int len, int prot)
+{
+ DWORD np, op;
+
+ if (prot == 7)
+ np = 0x40;
+ else if (prot == 5)
+ np = 0x20;
+ else if (prot == 4)
+ np = 0x10;
+ else if (prot == 3)
+ np = 0x04;
+ else if (prot == 1)
+ np = 0x02;
+ else if (prot == 0)
+ np = 0x01;
+ else
+ return -1;
+
+ if (VirtualProtect (addr, len, np, &op))
+ return 0;
+ else
+ return -1;
+}
+
+#endif /* WINNT && ! __CYGWIN__ */
+
+#ifdef TRANSFER_FROM_TRAMPOLINE
+TRANSFER_FROM_TRAMPOLINE
+#endif
+#endif /* L_trampoline */
+
+#ifndef __CYGWIN__
+#ifdef L__main
+
+#include "gbl-ctors.h"
+
+/* Some systems use __main in a way incompatible with its use in gcc, in these
+ cases use the macros NAME__MAIN to give a quoted symbol and SYMBOL__MAIN to
+ give the same symbol without quotes for an alternative entry point. You
+ must define both, or neither. */
+#ifndef NAME__MAIN
+#define NAME__MAIN "__main"
+#define SYMBOL__MAIN __main
+#endif
+
+#if defined (INIT_SECTION_ASM_OP) || defined (INIT_ARRAY_SECTION_ASM_OP)
+#undef HAS_INIT_SECTION
+#define HAS_INIT_SECTION
+#endif
+
+#if !defined (HAS_INIT_SECTION) || !defined (OBJECT_FORMAT_ELF)
+
+/* Some ELF crosses use crtstuff.c to provide __CTOR_LIST__, but use this
+ code to run constructors. In that case, we need to handle EH here, too. */
+
+#ifdef EH_FRAME_SECTION_NAME
+#include "unwind-dw2-fde.h"
+extern unsigned char __EH_FRAME_BEGIN__[];
+#endif
+
+/* Run all the global destructors on exit from the program. */
+
+void
+__do_global_dtors (void)
+{
+#ifdef DO_GLOBAL_DTORS_BODY
+ DO_GLOBAL_DTORS_BODY;
+#else
+ static func_ptr *p = __DTOR_LIST__ + 1;
+ while (*p)
+ {
+ p++;
+ (*(p-1)) ();
+ }
+#endif
+#if defined (EH_FRAME_SECTION_NAME) && !defined (HAS_INIT_SECTION)
+ {
+ static int completed = 0;
+ if (! completed)
+ {
+ completed = 1;
+ __deregister_frame_info (__EH_FRAME_BEGIN__);
+ }
+ }
+#endif
+}
+#endif
+
+#ifndef HAS_INIT_SECTION
+/* Run all the global constructors on entry to the program. */
+
+void
+__do_global_ctors (void)
+{
+#ifdef EH_FRAME_SECTION_NAME
+ {
+ static struct object object;
+ __register_frame_info (__EH_FRAME_BEGIN__, &object);
+ }
+#endif
+ DO_GLOBAL_CTORS_BODY;
+ atexit (__do_global_dtors);
+}
+#endif /* no HAS_INIT_SECTION */
+
+#if !defined (HAS_INIT_SECTION) || defined (INVOKE__main)
+/* Subroutine called automatically by `main'.
+ Compiling a global function named `main'
+ produces an automatic call to this function at the beginning.
+
+ For many systems, this routine calls __do_global_ctors.
+ For systems which support a .init section we use the .init section
+ to run __do_global_ctors, so we need not do anything here. */
+
+extern void SYMBOL__MAIN (void);
+void
+SYMBOL__MAIN (void)
+{
+ /* Support recursive calls to `main': run initializers just once. */
+ static int initialized;
+ if (! initialized)
+ {
+ initialized = 1;
+ __do_global_ctors ();
+ }
+}
+#endif /* no HAS_INIT_SECTION or INVOKE__main */
+
+#endif /* L__main */
+#endif /* __CYGWIN__ */
+
+#ifdef L_ctors
+
+#include "gbl-ctors.h"
+
+/* Provide default definitions for the lists of constructors and
+ destructors, so that we don't get linker errors. These symbols are
+ intentionally bss symbols, so that gld and/or collect will provide
+ the right values. */
+
+/* We declare the lists here with two elements each,
+ so that they are valid empty lists if no other definition is loaded.
+
+ If we are using the old "set" extensions to have the gnu linker
+ collect ctors and dtors, then we __CTOR_LIST__ and __DTOR_LIST__
+ must be in the bss/common section.
+
+ Long term no port should use those extensions. But many still do. */
+#if !defined(INIT_SECTION_ASM_OP) && !defined(CTOR_LISTS_DEFINED_EXTERNALLY)
+#if defined (TARGET_ASM_CONSTRUCTOR) || defined (USE_COLLECT2)
+func_ptr __CTOR_LIST__[2] = {0, 0};
+func_ptr __DTOR_LIST__[2] = {0, 0};
+#else
+func_ptr __CTOR_LIST__[2];
+func_ptr __DTOR_LIST__[2];
+#endif
+#endif /* no INIT_SECTION_ASM_OP and not CTOR_LISTS_DEFINED_EXTERNALLY */
+#endif /* L_ctors */
+#endif /* LIBGCC2_UNITS_PER_WORD <= MIN_UNITS_PER_WORD */
diff --git a/libgcc/libgcc2.h b/libgcc/libgcc2.h
new file mode 100644
index 00000000000..0c7d0e15623
--- /dev/null
+++ b/libgcc/libgcc2.h
@@ -0,0 +1,530 @@
+/* Header file for libgcc2.c. */
+/* Copyright (C) 2000, 2001, 2004, 2005, 2009, 2010
+ Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_LIBGCC2_H
+#define GCC_LIBGCC2_H
+
+#ifndef HIDE_EXPORTS
+#pragma GCC visibility push(default)
+#endif
+
+extern int __gcc_bcmp (const unsigned char *, const unsigned char *, size_t);
+extern void __clear_cache (char *, char *);
+extern void __eprintf (const char *, const char *, unsigned int, const char *)
+ __attribute__ ((__noreturn__));
+
+#ifndef LIBGCC2_LONG_DOUBLE_TYPE_SIZE
+#define LIBGCC2_LONG_DOUBLE_TYPE_SIZE LONG_DOUBLE_TYPE_SIZE
+#endif
+
+#ifndef LIBGCC2_HAS_SF_MODE
+#define LIBGCC2_HAS_SF_MODE (BITS_PER_UNIT == 8)
+#endif
+
+#ifndef LIBGCC2_HAS_DF_MODE
+#define LIBGCC2_HAS_DF_MODE \
+ (BITS_PER_UNIT == 8 \
+ && (__SIZEOF_DOUBLE__ * __CHAR_BIT__ == 64 \
+ || LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 64))
+#endif
+
+#ifndef LIBGCC2_HAS_XF_MODE
+#define LIBGCC2_HAS_XF_MODE \
+ (BITS_PER_UNIT == 8 && LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 80)
+#endif
+
+#ifndef LIBGCC2_HAS_TF_MODE
+#define LIBGCC2_HAS_TF_MODE \
+ (BITS_PER_UNIT == 8 && LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 128)
+#endif
+
+#ifndef SF_SIZE
+#if LIBGCC2_HAS_SF_MODE
+#define SF_SIZE FLT_MANT_DIG
+#else
+#define SF_SIZE 0
+#endif
+#endif
+
+#ifndef DF_SIZE
+#if LIBGCC2_HAS_DF_MODE
+#if __SIZEOF_DOUBLE__ * __CHAR_BIT__ == 64
+#define DF_SIZE DBL_MANT_DIG
+#elif LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 64
+#define DF_SIZE LDBL_MANT_DIG
+#else
+#define DF_SIZE 0
+#endif
+#else
+#define DF_SIZE 0
+#endif
+#endif
+
+#ifndef XF_SIZE
+#if LIBGCC2_HAS_XF_MODE
+#define XF_SIZE LDBL_MANT_DIG
+#else
+#define XF_SIZE 0
+#endif
+#endif
+
+#ifndef TF_SIZE
+#if LIBGCC2_HAS_TF_MODE
+#define TF_SIZE LDBL_MANT_DIG
+#else
+#define TF_SIZE 0
+#endif
+#endif
+
+/* FIXME: This #ifdef probably should be removed, ie. enable the test
+ for mips too. */
+/* Don't use IBM Extended Double TFmode for TI->SF calculations.
+ The conversion from long double to float suffers from double
+ rounding, because we convert via double. In other cases, going
+ through the software fp routines is much slower than the fallback. */
+#ifdef __powerpc__
+#define AVOID_FP_TYPE_CONVERSION(SIZE) (SIZE == 106)
+#elif defined(WIDEST_HARDWARE_FP_SIZE)
+#define AVOID_FP_TYPE_CONVERSION(SIZE) (SIZE > WIDEST_HARDWARE_FP_SIZE)
+#else
+#define AVOID_FP_TYPE_CONVERSION(SIZE) 0
+#endif
+
+/* In the first part of this file, we are interfacing to calls generated
+ by the compiler itself. These calls pass values into these routines
+ which have very specific modes (rather than very specific types), and
+ these compiler-generated calls also expect any return values to have
+ very specific modes (rather than very specific types). Thus, we need
+ to avoid using regular C language type names in this part of the file
+ because the sizes for those types can be configured to be anything.
+ Instead we use the following special type names. */
+
+typedef int QItype __attribute__ ((mode (QI)));
+typedef unsigned int UQItype __attribute__ ((mode (QI)));
+typedef int HItype __attribute__ ((mode (HI)));
+typedef unsigned int UHItype __attribute__ ((mode (HI)));
+#if MIN_UNITS_PER_WORD > 1
+/* These typedefs are usually forbidden on dsp's with UNITS_PER_WORD 1. */
+typedef int SItype __attribute__ ((mode (SI)));
+typedef unsigned int USItype __attribute__ ((mode (SI)));
+#if __SIZEOF_LONG_LONG__ > 4
+/* These typedefs are usually forbidden on archs with UNITS_PER_WORD 2. */
+typedef int DItype __attribute__ ((mode (DI)));
+typedef unsigned int UDItype __attribute__ ((mode (DI)));
+#if MIN_UNITS_PER_WORD > 4
+/* These typedefs are usually forbidden on archs with UNITS_PER_WORD 4. */
+typedef int TItype __attribute__ ((mode (TI)));
+typedef unsigned int UTItype __attribute__ ((mode (TI)));
+#endif
+#endif
+#endif
+
+#if LIBGCC2_HAS_SF_MODE
+typedef float SFtype __attribute__ ((mode (SF)));
+typedef _Complex float SCtype __attribute__ ((mode (SC)));
+#endif
+#if LIBGCC2_HAS_DF_MODE
+typedef float DFtype __attribute__ ((mode (DF)));
+typedef _Complex float DCtype __attribute__ ((mode (DC)));
+#endif
+#if LIBGCC2_HAS_XF_MODE
+typedef float XFtype __attribute__ ((mode (XF)));
+typedef _Complex float XCtype __attribute__ ((mode (XC)));
+#endif
+#if LIBGCC2_HAS_TF_MODE
+typedef float TFtype __attribute__ ((mode (TF)));
+typedef _Complex float TCtype __attribute__ ((mode (TC)));
+#endif
+
+typedef int cmp_return_type __attribute__((mode (__libgcc_cmp_return__)));
+typedef int shift_count_type __attribute__((mode (__libgcc_shift_count__)));
+
+/* Make sure that we don't accidentally use any normal C language built-in
+ type names in the first part of this file. Instead we want to use *only*
+ the type names defined above. The following macro definitions insure
+ that if we *do* accidentally use some normal C language built-in type name,
+ we will get a syntax error. */
+
+#define char bogus_type
+#define short bogus_type
+#define int bogus_type
+#define long bogus_type
+#define unsigned bogus_type
+#define float bogus_type
+#define double bogus_type
+
+/* Versions prior to 3.4.4 were not taking into account the word size for
+ the 5 trapping arithmetic functions absv, addv, subv, mulv and negv. As
+ a consequence, the si and di variants were always and the only ones emitted.
+ To maintain backward compatibility, COMPAT_SIMODE_TRAPPING_ARITHMETIC is
+ defined on platforms where it makes sense to still have the si variants
+ emitted. As a bonus, their implementation is now correct. Note that the
+ same mechanism should have been implemented for the di variants, but it
+ turns out that no platform would define COMPAT_DIMODE_TRAPPING_ARITHMETIC
+ if it existed. */
+
+#if LIBGCC2_UNITS_PER_WORD == 8
+#define W_TYPE_SIZE (8 * BITS_PER_UNIT)
+#define Wtype DItype
+#define UWtype UDItype
+#define HWtype DItype
+#define UHWtype UDItype
+#define DWtype TItype
+#define UDWtype UTItype
+#ifdef LIBGCC2_GNU_PREFIX
+#define __NW(a,b) __gnu_ ## a ## di ## b
+#define __NDW(a,b) __gnu_ ## a ## ti ## b
+#else
+#define __NW(a,b) __ ## a ## di ## b
+#define __NDW(a,b) __ ## a ## ti ## b
+#endif
+#define COMPAT_SIMODE_TRAPPING_ARITHMETIC
+#elif LIBGCC2_UNITS_PER_WORD == 4
+#define W_TYPE_SIZE (4 * BITS_PER_UNIT)
+#define Wtype SItype
+#define UWtype USItype
+#define HWtype SItype
+#define UHWtype USItype
+#define DWtype DItype
+#define UDWtype UDItype
+#ifdef LIBGCC2_GNU_PREFIX
+#define __NW(a,b) __gnu_ ## a ## si ## b
+#define __NDW(a,b) __gnu_ ## a ## di ## b
+#else
+#define __NW(a,b) __ ## a ## si ## b
+#define __NDW(a,b) __ ## a ## di ## b
+#endif
+#elif LIBGCC2_UNITS_PER_WORD == 2
+#define W_TYPE_SIZE (2 * BITS_PER_UNIT)
+#define Wtype HItype
+#define UWtype UHItype
+#define HWtype HItype
+#define UHWtype UHItype
+#define DWtype SItype
+#define UDWtype USItype
+#ifdef LIBGCC2_GNU_PREFIX
+#define __NW(a,b) __gnu_ ## a ## hi ## b
+#define __NDW(a,b) __gnu_ ## a ## si ## b
+#else
+#define __NW(a,b) __ ## a ## hi ## b
+#define __NDW(a,b) __ ## a ## si ## b
+#endif
+#else
+#define W_TYPE_SIZE BITS_PER_UNIT
+#define Wtype QItype
+#define UWtype UQItype
+#define HWtype QItype
+#define UHWtype UQItype
+#define DWtype HItype
+#define UDWtype UHItype
+#ifdef LIBGCC2_GNU_PREFIX
+#define __NW(a,b) __gnu_ ## a ## qi ## b
+#define __NDW(a,b) __gnu_ ## a ## hi ## b
+#else
+#define __NW(a,b) __ ## a ## qi ## b
+#define __NDW(a,b) __ ## a ## hi ## b
+#endif
+#endif
+
+#ifdef LIBGCC2_GNU_PREFIX
+#define __N(a) __gnu_ ## a
+#else
+#define __N(a) __ ## a
+#endif
+#define Wtype_MAX ((Wtype)(((UWtype)1 << (W_TYPE_SIZE - 1)) - 1))
+#define Wtype_MIN (- Wtype_MAX - 1)
+
+#if W_TYPE_SIZE == 8
+# define Wtype_MAXp1_F 0x1p8f
+#elif W_TYPE_SIZE == 16
+# define Wtype_MAXp1_F 0x1p16f
+#elif W_TYPE_SIZE == 32
+# define Wtype_MAXp1_F 0x1p32f
+#elif W_TYPE_SIZE == 64
+# define Wtype_MAXp1_F 0x1p64f
+#else
+# error "expand the table"
+#endif
+
+#define __muldi3 __NDW(mul,3)
+#define __divdi3 __NDW(div,3)
+#define __udivdi3 __NDW(udiv,3)
+#define __moddi3 __NDW(mod,3)
+#define __umoddi3 __NDW(umod,3)
+#define __negdi2 __NDW(neg,2)
+#define __lshrdi3 __NDW(lshr,3)
+#define __ashldi3 __NDW(ashl,3)
+#define __ashrdi3 __NDW(ashr,3)
+#define __cmpdi2 __NDW(cmp,2)
+#define __ucmpdi2 __NDW(ucmp,2)
+#define __udivmoddi4 __NDW(udivmod,4)
+#define __fixunstfDI __NDW(fixunstf,)
+#define __fixtfdi __NDW(fixtf,)
+#define __fixunsxfDI __NDW(fixunsxf,)
+#define __fixxfdi __NDW(fixxf,)
+#define __fixunsdfDI __NDW(fixunsdf,)
+#define __fixdfdi __NDW(fixdf,)
+#define __fixunssfDI __NDW(fixunssf,)
+#define __fixsfdi __NDW(fixsf,)
+#define __floatdixf __NDW(float,xf)
+#define __floatditf __NDW(float,tf)
+#define __floatdidf __NDW(float,df)
+#define __floatdisf __NDW(float,sf)
+#define __floatundixf __NDW(floatun,xf)
+#define __floatunditf __NDW(floatun,tf)
+#define __floatundidf __NDW(floatun,df)
+#define __floatundisf __NDW(floatun,sf)
+#define __fixunsxfSI __NW(fixunsxf,)
+#define __fixunstfSI __NW(fixunstf,)
+#define __fixunsdfSI __NW(fixunsdf,)
+#define __fixunssfSI __NW(fixunssf,)
+
+#define __absvSI2 __NW(absv,2)
+#define __addvSI3 __NW(addv,3)
+#define __subvSI3 __NW(subv,3)
+#define __mulvSI3 __NW(mulv,3)
+#define __negvSI2 __NW(negv,2)
+#define __absvDI2 __NDW(absv,2)
+#define __addvDI3 __NDW(addv,3)
+#define __subvDI3 __NDW(subv,3)
+#define __mulvDI3 __NDW(mulv,3)
+#define __negvDI2 __NDW(negv,2)
+
+#define __ffsSI2 __NW(ffs,2)
+#define __clzSI2 __NW(clz,2)
+#define __ctzSI2 __NW(ctz,2)
+#define __clrsbSI2 __NW(clrsb,2)
+#define __popcountSI2 __NW(popcount,2)
+#define __paritySI2 __NW(parity,2)
+#define __ffsDI2 __NDW(ffs,2)
+#define __clzDI2 __NDW(clz,2)
+#define __ctzDI2 __NDW(ctz,2)
+#define __clrsbDI2 __NDW(clrsb,2)
+#define __popcountDI2 __NDW(popcount,2)
+#define __parityDI2 __NDW(parity,2)
+
+#define __clz_tab __N(clz_tab)
+#define __bswapsi2 __N(bswapsi2)
+#define __bswapdi2 __N(bswapdi2)
+#define __udiv_w_sdiv __N(udiv_w_sdiv)
+#define __clear_cache __N(clear_cache)
+#define __enable_execute_stack __N(enable_execute_stack)
+
+#ifndef __powisf2
+#define __powisf2 __N(powisf2)
+#endif
+#ifndef __powidf2
+#define __powidf2 __N(powidf2)
+#endif
+#ifndef __powitf2
+#define __powitf2 __N(powitf2)
+#endif
+#ifndef __powixf2
+#define __powixf2 __N(powixf2)
+#endif
+#ifndef __mulsc3
+#define __mulsc3 __N(mulsc3)
+#endif
+#ifndef __muldc3
+#define __muldc3 __N(muldc3)
+#endif
+#ifndef __mulxc3
+#define __mulxc3 __N(mulxc3)
+#endif
+#ifndef __multc3
+#define __multc3 __N(multc3)
+#endif
+#ifndef __divsc3
+#define __divsc3 __N(divsc3)
+#endif
+#ifndef __divdc3
+#define __divdc3 __N(divdc3)
+#endif
+#ifndef __divxc3
+#define __divxc3 __N(divxc3)
+#endif
+#ifndef __divtc3
+#define __divtc3 __N(divtc3)
+#endif
+
+extern DWtype __muldi3 (DWtype, DWtype);
+extern DWtype __divdi3 (DWtype, DWtype);
+extern UDWtype __udivdi3 (UDWtype, UDWtype);
+extern UDWtype __umoddi3 (UDWtype, UDWtype);
+extern DWtype __moddi3 (DWtype, DWtype);
+
+/* __udivmoddi4 is static inline when building other libgcc2 portions. */
+#if (!defined (L_udivdi3) && !defined (L_divdi3) && \
+ !defined (L_umoddi3) && !defined (L_moddi3))
+extern UDWtype __udivmoddi4 (UDWtype, UDWtype, UDWtype *);
+#endif
+
+/* __negdi2 is static inline when building other libgcc2 portions. */
+#if !defined(L_divdi3) && !defined(L_moddi3)
+extern DWtype __negdi2 (DWtype);
+#endif
+
+extern DWtype __lshrdi3 (DWtype, shift_count_type);
+extern DWtype __ashldi3 (DWtype, shift_count_type);
+extern DWtype __ashrdi3 (DWtype, shift_count_type);
+
+/* __udiv_w_sdiv is static inline when building other libgcc2 portions. */
+#if (!defined(L_udivdi3) && !defined(L_divdi3) && \
+ !defined(L_umoddi3) && !defined(L_moddi3))
+extern UWtype __udiv_w_sdiv (UWtype *, UWtype, UWtype, UWtype);
+#endif
+
+extern cmp_return_type __cmpdi2 (DWtype, DWtype);
+extern cmp_return_type __ucmpdi2 (DWtype, DWtype);
+
+#if MIN_UNITS_PER_WORD > 1
+extern SItype __bswapsi2 (SItype);
+#endif
+#if __SIZEOF_LONG_LONG__ > 4
+extern DItype __bswapdi2 (DItype);
+#endif
+
+extern Wtype __absvSI2 (Wtype);
+extern Wtype __addvSI3 (Wtype, Wtype);
+extern Wtype __subvSI3 (Wtype, Wtype);
+extern Wtype __mulvSI3 (Wtype, Wtype);
+extern Wtype __negvSI2 (Wtype);
+extern DWtype __absvDI2 (DWtype);
+extern DWtype __addvDI3 (DWtype, DWtype);
+extern DWtype __subvDI3 (DWtype, DWtype);
+extern DWtype __mulvDI3 (DWtype, DWtype);
+extern DWtype __negvDI2 (DWtype);
+
+#ifdef COMPAT_SIMODE_TRAPPING_ARITHMETIC
+#define __absvsi2 __N(absvsi2)
+#define __negvsi2 __N(negvsi2)
+#define __addvsi3 __N(addvsi3)
+#define __subvsi3 __N(subvsi3)
+#define __mulvsi3 __N(mulvsi3)
+
+extern SItype __absvsi2 (SItype);
+extern SItype __addvsi3 (SItype, SItype);
+extern SItype __subvsi3 (SItype, SItype);
+extern SItype __mulvsi3 (SItype, SItype);
+extern SItype __negvsi2 (SItype);
+#endif /* COMPAT_SIMODE_TRAPPING_ARITHMETIC */
+
+#undef int
+#if LIBGCC2_HAS_SF_MODE
+extern DWtype __fixsfdi (SFtype);
+extern SFtype __floatdisf (DWtype);
+extern SFtype __floatundisf (UDWtype);
+extern UWtype __fixunssfSI (SFtype);
+extern UDWtype __fixunssfDI (SFtype);
+extern SFtype __powisf2 (SFtype, int);
+extern SCtype __divsc3 (SFtype, SFtype, SFtype, SFtype);
+extern SCtype __mulsc3 (SFtype, SFtype, SFtype, SFtype);
+#endif
+#if LIBGCC2_HAS_DF_MODE
+extern DWtype __fixdfdi (DFtype);
+extern DFtype __floatdidf (DWtype);
+extern DFtype __floatundidf (UDWtype);
+extern UWtype __fixunsdfSI (DFtype);
+extern UDWtype __fixunsdfDI (DFtype);
+extern DFtype __powidf2 (DFtype, int);
+extern DCtype __divdc3 (DFtype, DFtype, DFtype, DFtype);
+extern DCtype __muldc3 (DFtype, DFtype, DFtype, DFtype);
+#endif
+
+#if LIBGCC2_HAS_XF_MODE
+extern DWtype __fixxfdi (XFtype);
+extern UDWtype __fixunsxfDI (XFtype);
+extern XFtype __floatdixf (DWtype);
+extern XFtype __floatundixf (UDWtype);
+extern UWtype __fixunsxfSI (XFtype);
+extern XFtype __powixf2 (XFtype, int);
+extern XCtype __divxc3 (XFtype, XFtype, XFtype, XFtype);
+extern XCtype __mulxc3 (XFtype, XFtype, XFtype, XFtype);
+#endif
+
+#if LIBGCC2_HAS_TF_MODE
+extern UDWtype __fixunstfDI (TFtype);
+extern DWtype __fixtfdi (TFtype);
+extern TFtype __floatditf (DWtype);
+extern TFtype __floatunditf (UDWtype);
+extern TFtype __powitf2 (TFtype, int);
+extern TCtype __divtc3 (TFtype, TFtype, TFtype, TFtype);
+extern TCtype __multc3 (TFtype, TFtype, TFtype, TFtype);
+#endif
+#define int bogus_type
+
+/* DWstructs are pairs of Wtype values in the order determined by
+ __BYTE_ORDER__. */
+
+#if __BYTE_ORDER__ != __ORDER_LITTLE_ENDIAN__
+ struct DWstruct {Wtype high, low;};
+#else
+ struct DWstruct {Wtype low, high;};
+#endif
+
+/* We need this union to unpack/pack DImode values, since we don't have
+ any arithmetic yet. Incoming DImode parameters are stored into the
+ `ll' field, and the unpacked result is read from the struct `s'. */
+
+typedef union
+{
+ struct DWstruct s;
+ DWtype ll;
+} DWunion;
+
+/* Defined for L_popcount_tab. Exported here because some targets may
+ want to use it for their own versions of the __popcount builtins. */
+extern const UQItype __popcount_tab[256];
+
+/* Defined for L_clz. Exported here because some targets may want to use
+ it for their own versions of the __clz builtins. It contains the bit
+ position of the first set bit for the numbers 0 - 255. This avoids the
+ need for a separate table for the __ctz builtins. */
+extern const UQItype __clz_tab[256];
+
+#include "longlong.h"
+
+#undef int
+extern int __clzDI2 (UDWtype);
+extern int __clzSI2 (UWtype);
+extern int __ctzSI2 (UWtype);
+extern int __ctzDI2 (UDWtype);
+extern int __clrsbSI2 (Wtype);
+extern int __clrsbDI2 (DWtype);
+extern int __ffsSI2 (UWtype);
+extern int __ffsDI2 (DWtype);
+extern int __popcountSI2 (UWtype);
+extern int __popcountDI2 (UDWtype);
+extern int __paritySI2 (UWtype);
+extern int __parityDI2 (UDWtype);
+#define int bogus_type
+
+extern void __enable_execute_stack (void *);
+
+#ifndef HIDE_EXPORTS
+#pragma GCC visibility pop
+#endif
+
+#endif /* ! GCC_LIBGCC2_H */
diff --git a/libgcc/libgcov.c b/libgcc/libgcov.c
index b39ef49343f..f7691df967c 100644
--- a/libgcc/libgcov.c
+++ b/libgcc/libgcov.c
@@ -1,7 +1,7 @@
/* Routines required for instrumenting a program. */
/* Compile this one with gcc. */
/* Copyright (C) 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
- 2000, 2001, 2002, 2003, 2004, 2005, 2008, 2009, 2010
+ 2000, 2001, 2002, 2003, 2004, 2005, 2008, 2009, 2010, 2011
Free Software Foundation, Inc.
This file is part of GCC.
@@ -29,6 +29,7 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
#include "tsystem.h"
#include "coretypes.h"
#include "tm.h"
+#include "libgcc_tm.h"
#if defined(inhibit_libc)
#define IN_LIBGCOV (-1)
@@ -77,6 +78,14 @@ void __gcov_merge_delta (gcov_type *counters __attribute__ ((unused)),
#ifdef L_gcov
#include "gcov-io.c"
+struct gcov_fn_buffer
+{
+ struct gcov_fn_buffer *next;
+ unsigned fn_ix;
+ struct gcov_fn_info info;
+ /* note gcov_fn_info ends in a trailing array. */
+};
+
/* Chain of per-object gcov structures. */
static struct gcov_info *gcov_list;
@@ -134,6 +143,64 @@ create_file_directory (char *filename)
#endif
}
+static struct gcov_fn_buffer **
+buffer_fn_data (struct gcov_info *gi_ptr, struct gcov_fn_buffer **end_ptr,
+ unsigned fn_ix)
+{
+ unsigned n_ctrs = 0, ix;
+ struct gcov_fn_buffer *fn_buffer;
+
+ for (ix = GCOV_COUNTERS; ix--;)
+ if (gi_ptr->merge[ix])
+ n_ctrs++;
+
+ fn_buffer = (struct gcov_fn_buffer *)malloc
+ (sizeof (*fn_buffer) + sizeof (fn_buffer->info.ctrs[0]) * n_ctrs);
+
+ if (!fn_buffer)
+ return 0; /* We'll horribly fail. */
+
+ fn_buffer->next = 0;
+ fn_buffer->fn_ix = fn_ix;
+ fn_buffer->info.ident = gcov_read_unsigned ();
+ fn_buffer->info.lineno_checksum = gcov_read_unsigned ();
+ fn_buffer->info.cfg_checksum = gcov_read_unsigned ();
+
+ for (n_ctrs = ix = 0; ix != GCOV_COUNTERS; ix++)
+ {
+ gcov_unsigned_t length;
+ gcov_type *values;
+
+ if (!gi_ptr->merge[ix])
+ continue;
+
+ if (gcov_read_unsigned () != GCOV_TAG_FOR_COUNTER (ix))
+ goto fail;
+
+ length = GCOV_TAG_COUNTER_NUM (gcov_read_unsigned ());
+ values = (gcov_type *)malloc (length * sizeof (gcov_type));
+ if (!values)
+ {
+ while (n_ctrs--)
+ free (fn_buffer->info.ctrs[n_ctrs].values);
+ goto fail;
+ }
+ fn_buffer->info.ctrs[n_ctrs].num = length;
+ fn_buffer->info.ctrs[n_ctrs].values = values;
+
+ while (length--)
+ *values++ = gcov_read_counter ();
+ n_ctrs++;
+ }
+
+ *end_ptr = fn_buffer;
+ return &fn_buffer->next;
+
+ fail:
+ free (fn_buffer);
+ return 0;
+}
+
/* Check if VERSION of the info block PTR matches libgcov one.
Return 1 on success, or zero in case of versions mismatch.
If FILENAME is not NULL, its value used for reporting purposes
@@ -169,39 +236,46 @@ static void
gcov_exit (void)
{
struct gcov_info *gi_ptr;
- struct gcov_summary this_program;
- struct gcov_summary all;
+ const struct gcov_fn_info *gfi_ptr;
+ struct gcov_summary this_prg; /* summary for program. */
+ struct gcov_summary all_prg; /* summary for all instances of program. */
struct gcov_ctr_summary *cs_ptr;
const struct gcov_ctr_info *ci_ptr;
- unsigned t_ix;
+ unsigned t_ix, f_ix;
gcov_unsigned_t c_num;
const char *gcov_prefix;
int gcov_prefix_strip = 0;
size_t prefix_length;
char *gi_filename, *gi_filename_up;
- memset (&all, 0, sizeof (all));
+ memset (&all_prg, 0, sizeof (all_prg));
/* Find the totals for this execution. */
- memset (&this_program, 0, sizeof (this_program));
+ memset (&this_prg, 0, sizeof (this_prg));
for (gi_ptr = gcov_list; gi_ptr; gi_ptr = gi_ptr->next)
- {
- ci_ptr = gi_ptr->counts;
- for (t_ix = 0; t_ix < GCOV_COUNTERS_SUMMABLE; t_ix++)
- {
- if (!((1 << t_ix) & gi_ptr->ctr_mask))
- continue;
+ for (f_ix = 0; f_ix != gi_ptr->n_functions; f_ix++)
+ {
+ gfi_ptr = gi_ptr->functions[f_ix];
+
+ if (!gfi_ptr || gfi_ptr->key != gi_ptr)
+ continue;
+
+ ci_ptr = gfi_ptr->ctrs;
+ for (t_ix = 0; t_ix != GCOV_COUNTERS_SUMMABLE; t_ix++)
+ {
+ if (!gi_ptr->merge[t_ix])
+ continue;
- cs_ptr = &this_program.ctrs[t_ix];
- cs_ptr->num += ci_ptr->num;
- for (c_num = 0; c_num < ci_ptr->num; c_num++)
- {
- cs_ptr->sum_all += ci_ptr->values[c_num];
- if (cs_ptr->run_max < ci_ptr->values[c_num])
- cs_ptr->run_max = ci_ptr->values[c_num];
- }
- ci_ptr++;
- }
- }
+ cs_ptr = &this_prg.ctrs[t_ix];
+ cs_ptr->num += ci_ptr->num;
+ for (c_num = 0; c_num < ci_ptr->num; c_num++)
+ {
+ cs_ptr->sum_all += ci_ptr->values[c_num];
+ if (cs_ptr->run_max < ci_ptr->values[c_num])
+ cs_ptr->run_max = ci_ptr->values[c_num];
+ }
+ ci_ptr++;
+ }
+ }
{
/* Check if the level of dirs to strip off specified. */
@@ -214,6 +288,7 @@ gcov_exit (void)
gcov_prefix_strip = 0;
}
}
+
/* Get file name relocation prefix. Non-absolute values are ignored. */
gcov_prefix = getenv("GCOV_PREFIX");
if (gcov_prefix)
@@ -243,24 +318,20 @@ gcov_exit (void)
/* Now merge each file. */
for (gi_ptr = gcov_list; gi_ptr; gi_ptr = gi_ptr->next)
{
- struct gcov_summary this_object;
- struct gcov_summary object, program;
- gcov_type *values[GCOV_COUNTERS];
- const struct gcov_fn_info *fi_ptr;
- unsigned fi_stride;
- unsigned c_ix, f_ix, n_counts;
- struct gcov_ctr_summary *cs_obj, *cs_tobj, *cs_prg, *cs_tprg, *cs_all;
+ unsigned n_counts;
+ struct gcov_summary prg; /* summary for this object over all
+ program. */
+ struct gcov_ctr_summary *cs_prg, *cs_tprg, *cs_all;
int error = 0;
gcov_unsigned_t tag, length;
gcov_position_t summary_pos = 0;
gcov_position_t eof_pos = 0;
const char *fname, *s;
+ struct gcov_fn_buffer *fn_buffer = 0;
+ struct gcov_fn_buffer **fn_tail = &fn_buffer;
fname = gi_ptr->filename;
- memset (&this_object, 0, sizeof (this_object));
- memset (&object, 0, sizeof (object));
-
/* Avoid to add multiple drive letters into combined path. */
if (prefix_length != 0 && HAS_DRIVE_SPEC(fname))
fname += 2;
@@ -282,6 +353,7 @@ gcov_exit (void)
level++;
}
}
+
/* Update complete filename with stripped original. */
if (prefix_length != 0 && !IS_DIR_SEPARATOR (*fname))
{
@@ -292,42 +364,6 @@ gcov_exit (void)
else
strcpy (gi_filename_up, fname);
- /* Totals for this object file. */
- ci_ptr = gi_ptr->counts;
- for (t_ix = 0; t_ix < GCOV_COUNTERS_SUMMABLE; t_ix++)
- {
- if (!((1 << t_ix) & gi_ptr->ctr_mask))
- continue;
-
- cs_ptr = &this_object.ctrs[t_ix];
- cs_ptr->num += ci_ptr->num;
- for (c_num = 0; c_num < ci_ptr->num; c_num++)
- {
- cs_ptr->sum_all += ci_ptr->values[c_num];
- if (cs_ptr->run_max < ci_ptr->values[c_num])
- cs_ptr->run_max = ci_ptr->values[c_num];
- }
-
- ci_ptr++;
- }
-
- c_ix = 0;
- for (t_ix = 0; t_ix < GCOV_COUNTERS; t_ix++)
- if ((1 << t_ix) & gi_ptr->ctr_mask)
- {
- values[c_ix] = gi_ptr->counts[c_ix].values;
- c_ix++;
- }
-
- /* Calculate the function_info stride. This depends on the
- number of counter types being measured. */
- fi_stride = sizeof (struct gcov_fn_info) + c_ix * sizeof (unsigned);
- if (__alignof__ (struct gcov_fn_info) > sizeof (unsigned))
- {
- fi_stride += __alignof__ (struct gcov_fn_info) - 1;
- fi_stride &= ~(__alignof__ (struct gcov_fn_info) - 1);
- }
-
if (!gcov_open (gi_filename))
{
/* Open failed likely due to missed directory.
@@ -363,83 +399,98 @@ gcov_exit (void)
/* Read from a different compilation. Overwrite the file. */
goto rewrite;
- /* Merge execution counts for each function. */
- for (f_ix = 0; f_ix < gi_ptr->n_functions; f_ix++)
+ /* Look for program summary. */
+ for (f_ix = ~0u;;)
{
- fi_ptr = (const struct gcov_fn_info *)
- ((const char *) gi_ptr->functions + f_ix * fi_stride);
+ struct gcov_summary tmp;
+
+ eof_pos = gcov_position ();
tag = gcov_read_unsigned ();
+ if (tag != GCOV_TAG_PROGRAM_SUMMARY)
+ break;
+
+ length = gcov_read_unsigned ();
+ if (length != GCOV_TAG_SUMMARY_LENGTH)
+ goto read_mismatch;
+ gcov_read_summary (&tmp);
+ if ((error = gcov_is_error ()))
+ goto read_error;
+ if (!summary_pos && tmp.checksum == gcov_crc32)
+ {
+ prg = tmp;
+ summary_pos = eof_pos;
+ }
+ }
+
+ /* Merge execution counts for each function. */
+ for (f_ix = 0; f_ix != gi_ptr->n_functions;
+ f_ix++, tag = gcov_read_unsigned ())
+ {
+ gfi_ptr = gi_ptr->functions[f_ix];
+
+ if (tag != GCOV_TAG_FUNCTION)
+ goto read_mismatch;
length = gcov_read_unsigned ();
- /* Check function. */
- if (tag != GCOV_TAG_FUNCTION
- || length != GCOV_TAG_FUNCTION_LENGTH
- || gcov_read_unsigned () != fi_ptr->ident
- || gcov_read_unsigned () != fi_ptr->lineno_checksum
- || gcov_read_unsigned () != fi_ptr->cfg_checksum)
+ if (!length)
+ /* This function did not appear in the other program.
+ We have nothing to merge. */
+ continue;
+
+ if (length != GCOV_TAG_FUNCTION_LENGTH)
+ goto read_mismatch;
+
+ if (!gfi_ptr || gfi_ptr->key != gi_ptr)
{
- read_mismatch:;
- fprintf (stderr, "profiling:%s:Merge mismatch for %s\n",
- gi_filename,
- f_ix + 1 ? "function" : "summaries");
- goto read_fatal;
+ /* This function appears in the other program. We
+ need to buffer the information in order to write
+ it back out -- we'll be inserting data before
+ this point, so cannot simply keep the data in the
+ file. */
+ fn_tail = buffer_fn_data (gi_ptr, fn_tail, f_ix);
+ if (!fn_tail)
+ goto read_mismatch;
+ continue;
}
- c_ix = 0;
+ if (gcov_read_unsigned () != gfi_ptr->ident
+ || gcov_read_unsigned () != gfi_ptr->lineno_checksum
+ || gcov_read_unsigned () != gfi_ptr->cfg_checksum)
+ goto read_mismatch;
+
+ ci_ptr = gfi_ptr->ctrs;
for (t_ix = 0; t_ix < GCOV_COUNTERS; t_ix++)
{
- gcov_merge_fn merge;
+ gcov_merge_fn merge = gi_ptr->merge[t_ix];
- if (!((1 << t_ix) & gi_ptr->ctr_mask))
+ if (!merge)
continue;
- n_counts = fi_ptr->n_ctrs[c_ix];
- merge = gi_ptr->counts[c_ix].merge;
-
tag = gcov_read_unsigned ();
length = gcov_read_unsigned ();
if (tag != GCOV_TAG_FOR_COUNTER (t_ix)
- || length != GCOV_TAG_COUNTER_LENGTH (n_counts))
+ || length != GCOV_TAG_COUNTER_LENGTH (ci_ptr->num))
goto read_mismatch;
- (*merge) (values[c_ix], n_counts);
- values[c_ix] += n_counts;
- c_ix++;
+ (*merge) (ci_ptr->values, ci_ptr->num);
+ ci_ptr++;
}
if ((error = gcov_is_error ()))
goto read_error;
}
- f_ix = ~0u;
- /* Check program & object summary */
- while (1)
+ if (tag)
{
- int is_program;
-
- eof_pos = gcov_position ();
- tag = gcov_read_unsigned ();
- if (!tag)
- break;
-
- length = gcov_read_unsigned ();
- is_program = tag == GCOV_TAG_PROGRAM_SUMMARY;
- if (length != GCOV_TAG_SUMMARY_LENGTH
- || (!is_program && tag != GCOV_TAG_OBJECT_SUMMARY))
- goto read_mismatch;
- gcov_read_summary (is_program ? &program : &object);
- if ((error = gcov_is_error ()))
- goto read_error;
- if (is_program && program.checksum == gcov_crc32)
- {
- summary_pos = eof_pos;
- goto rewrite;
- }
+ read_mismatch:;
+ fprintf (stderr, "profiling:%s:Merge mismatch for %s\n",
+ gi_filename, f_ix + 1 ? "function" : "summaries");
+ goto read_fatal;
}
}
goto rewrite;
read_error:;
- fprintf (stderr, error < 0 ? "profiling:%s:Overflow merging\n"
- : "profiling:%s:Error merging\n", gi_filename);
+ fprintf (stderr, "profiling:%s:%s merging\n", gi_filename,
+ error < 0 ? "Overflow": "Error");
read_fatal:;
gcov_close ();
@@ -448,29 +499,20 @@ gcov_exit (void)
rewrite:;
gcov_rewrite ();
if (!summary_pos)
- memset (&program, 0, sizeof (program));
+ {
+ memset (&prg, 0, sizeof (prg));
+ summary_pos = eof_pos;
+ }
/* Merge the summaries. */
- f_ix = ~0u;
for (t_ix = 0; t_ix < GCOV_COUNTERS_SUMMABLE; t_ix++)
{
- cs_obj = &object.ctrs[t_ix];
- cs_tobj = &this_object.ctrs[t_ix];
- cs_prg = &program.ctrs[t_ix];
- cs_tprg = &this_program.ctrs[t_ix];
- cs_all = &all.ctrs[t_ix];
+ cs_prg = &prg.ctrs[t_ix];
+ cs_tprg = &this_prg.ctrs[t_ix];
+ cs_all = &all_prg.ctrs[t_ix];
- if ((1 << t_ix) & gi_ptr->ctr_mask)
+ if (gi_ptr->merge[t_ix])
{
- if (!cs_obj->runs++)
- cs_obj->num = cs_tobj->num;
- else if (cs_obj->num != cs_tobj->num)
- goto read_mismatch;
- cs_obj->sum_all += cs_tobj->sum_all;
- if (cs_obj->run_max < cs_tobj->run_max)
- cs_obj->run_max = cs_tobj->run_max;
- cs_obj->sum_max += cs_tobj->run_max;
-
if (!cs_prg->runs++)
cs_prg->num = cs_tprg->num;
else if (cs_prg->num != cs_tprg->num)
@@ -480,78 +522,94 @@ gcov_exit (void)
cs_prg->run_max = cs_tprg->run_max;
cs_prg->sum_max += cs_tprg->run_max;
}
- else if (cs_obj->num || cs_prg->num)
+ else if (cs_prg->runs)
goto read_mismatch;
if (!cs_all->runs && cs_prg->runs)
memcpy (cs_all, cs_prg, sizeof (*cs_all));
- else if (!all.checksum
+ else if (!all_prg.checksum
&& (!GCOV_LOCKED || cs_all->runs == cs_prg->runs)
&& memcmp (cs_all, cs_prg, sizeof (*cs_all)))
{
- fprintf (stderr, "profiling:%s:Invocation mismatch - some data files may have been removed%s",
+ fprintf (stderr, "profiling:%s:Invocation mismatch - some data files may have been removed%s\n",
gi_filename, GCOV_LOCKED
- ? "" : " or concurrent update without locking support");
- all.checksum = ~0u;
+ ? "" : " or concurrently updated without locking support");
+ all_prg.checksum = ~0u;
}
}
- c_ix = 0;
- for (t_ix = 0; t_ix < GCOV_COUNTERS; t_ix++)
- if ((1 << t_ix) & gi_ptr->ctr_mask)
- {
- values[c_ix] = gi_ptr->counts[c_ix].values;
- c_ix++;
- }
-
- program.checksum = gcov_crc32;
+ prg.checksum = gcov_crc32;
/* Write out the data. */
- gcov_write_tag_length (GCOV_DATA_MAGIC, GCOV_VERSION);
- gcov_write_unsigned (gi_ptr->stamp);
+ if (!eof_pos)
+ {
+ gcov_write_tag_length (GCOV_DATA_MAGIC, GCOV_VERSION);
+ gcov_write_unsigned (gi_ptr->stamp);
+ }
+
+ if (summary_pos)
+ gcov_seek (summary_pos);
+
+ /* Generate whole program statistics. */
+ gcov_write_summary (GCOV_TAG_PROGRAM_SUMMARY, &prg);
+
+ if (summary_pos < eof_pos)
+ gcov_seek (eof_pos);
/* Write execution counts for each function. */
for (f_ix = 0; f_ix < gi_ptr->n_functions; f_ix++)
{
- fi_ptr = (const struct gcov_fn_info *)
- ((const char *) gi_ptr->functions + f_ix * fi_stride);
+ unsigned buffered = 0;
- /* Announce function. */
- gcov_write_tag_length (GCOV_TAG_FUNCTION, GCOV_TAG_FUNCTION_LENGTH);
- gcov_write_unsigned (fi_ptr->ident);
- gcov_write_unsigned (fi_ptr->lineno_checksum);
- gcov_write_unsigned (fi_ptr->cfg_checksum);
+ if (fn_buffer && fn_buffer->fn_ix == f_ix)
+ {
+ /* Buffered data from another program. */
+ buffered = 1;
+ gfi_ptr = &fn_buffer->info;
+ length = GCOV_TAG_FUNCTION_LENGTH;
+ }
+ else
+ {
+ gfi_ptr = gi_ptr->functions[f_ix];
+ if (gfi_ptr && gfi_ptr->key == gi_ptr)
+ length = GCOV_TAG_FUNCTION_LENGTH;
+ else
+ length = 0;
+ }
+
+ gcov_write_tag_length (GCOV_TAG_FUNCTION, length);
+ if (!length)
+ continue;
+
+ gcov_write_unsigned (gfi_ptr->ident);
+ gcov_write_unsigned (gfi_ptr->lineno_checksum);
+ gcov_write_unsigned (gfi_ptr->cfg_checksum);
- c_ix = 0;
+ ci_ptr = gfi_ptr->ctrs;
for (t_ix = 0; t_ix < GCOV_COUNTERS; t_ix++)
{
- gcov_type *c_ptr;
-
- if (!((1 << t_ix) & gi_ptr->ctr_mask))
+ if (!gi_ptr->merge[t_ix])
continue;
- n_counts = fi_ptr->n_ctrs[c_ix];
-
+ n_counts = ci_ptr->num;
gcov_write_tag_length (GCOV_TAG_FOR_COUNTER (t_ix),
GCOV_TAG_COUNTER_LENGTH (n_counts));
- c_ptr = values[c_ix];
+ gcov_type *c_ptr = ci_ptr->values;
while (n_counts--)
gcov_write_counter (*c_ptr++);
-
- values[c_ix] = c_ptr;
- c_ix++;
+ if (buffered)
+ free (ci_ptr->values);
+ ci_ptr++;
+ }
+ if (buffered)
+ {
+ struct gcov_fn_buffer *tmp = fn_buffer;
+ fn_buffer = fn_buffer->next;
+ free (tmp);
}
}
- /* Object file summary. */
- gcov_write_summary (GCOV_TAG_OBJECT_SUMMARY, &object);
-
- /* Generate whole program statistics. */
- if (eof_pos)
- gcov_seek (eof_pos);
- gcov_write_summary (GCOV_TAG_PROGRAM_SUMMARY, &program);
- if (!summary_pos)
- gcov_write_unsigned (0);
+ gcov_write_unsigned (0);
if ((error = gcov_close ()))
fprintf (stderr, error < 0 ?
"profiling:%s:Overflow writing\n" :
@@ -617,15 +675,25 @@ __gcov_flush (void)
gcov_exit ();
for (gi_ptr = gcov_list; gi_ptr; gi_ptr = gi_ptr->next)
{
- unsigned t_ix;
- const struct gcov_ctr_info *ci_ptr;
+ unsigned f_ix;
- for (t_ix = 0, ci_ptr = gi_ptr->counts; t_ix != GCOV_COUNTERS; t_ix++)
- if ((1 << t_ix) & gi_ptr->ctr_mask)
- {
- memset (ci_ptr->values, 0, sizeof (gcov_type) * ci_ptr->num);
- ci_ptr++;
- }
+ for (f_ix = 0; f_ix < gi_ptr->n_functions; f_ix++)
+ {
+ unsigned t_ix;
+ const struct gcov_fn_info *gfi_ptr = gi_ptr->functions[f_ix];
+
+ if (!gfi_ptr || gfi_ptr->key != gi_ptr)
+ continue;
+ const struct gcov_ctr_info *ci_ptr = gfi_ptr->ctrs;
+ for (t_ix = 0; t_ix != GCOV_COUNTERS; t_ix++)
+ {
+ if (!gi_ptr->merge[t_ix])
+ continue;
+
+ memset (ci_ptr->values, 0, sizeof (gcov_type) * ci_ptr->num);
+ ci_ptr++;
+ }
+ }
}
}
diff --git a/libgcc/longlong.h b/libgcc/longlong.h
new file mode 100644
index 00000000000..30cc2e337f3
--- /dev/null
+++ b/libgcc/longlong.h
@@ -0,0 +1,1660 @@
+/* longlong.h -- definitions for mixed size 32/64 bit arithmetic.
+ Copyright (C) 1991, 1992, 1994, 1995, 1996, 1997, 1998, 1999, 2000,
+ 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
+ Free Software Foundation, Inc.
+
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ In addition to the permissions in the GNU Lesser General Public
+ License, the Free Software Foundation gives you unlimited
+ permission to link the compiled version of this file into
+ combinations with other programs, and to distribute those
+ combinations without any restriction coming from the use of this
+ file. (The Lesser General Public License restrictions do apply in
+ other respects; for example, they cover modification of the file,
+ and distribution when not linked into a combine executable.)
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
+
+/* You have to define the following before including this file:
+
+ UWtype -- An unsigned type, default type for operations (typically a "word")
+ UHWtype -- An unsigned type, at least half the size of UWtype.
+ UDWtype -- An unsigned type, at least twice as large a UWtype
+ W_TYPE_SIZE -- size in bits of UWtype
+
+ UQItype -- Unsigned 8 bit type.
+ SItype, USItype -- Signed and unsigned 32 bit types.
+ DItype, UDItype -- Signed and unsigned 64 bit types.
+
+ On a 32 bit machine UWtype should typically be USItype;
+ on a 64 bit machine, UWtype should typically be UDItype. */
+
+#define __BITS4 (W_TYPE_SIZE / 4)
+#define __ll_B ((UWtype) 1 << (W_TYPE_SIZE / 2))
+#define __ll_lowpart(t) ((UWtype) (t) & (__ll_B - 1))
+#define __ll_highpart(t) ((UWtype) (t) >> (W_TYPE_SIZE / 2))
+
+#ifndef W_TYPE_SIZE
+#define W_TYPE_SIZE 32
+#define UWtype USItype
+#define UHWtype USItype
+#define UDWtype UDItype
+#endif
+
+/* Used in glibc only. */
+#ifndef attribute_hidden
+#define attribute_hidden
+#endif
+
+extern const UQItype __clz_tab[256] attribute_hidden;
+
+/* Define auxiliary asm macros.
+
+ 1) umul_ppmm(high_prod, low_prod, multiplier, multiplicand) multiplies two
+ UWtype integers MULTIPLIER and MULTIPLICAND, and generates a two UWtype
+ word product in HIGH_PROD and LOW_PROD.
+
+ 2) __umulsidi3(a,b) multiplies two UWtype integers A and B, and returns a
+ UDWtype product. This is just a variant of umul_ppmm.
+
+ 3) udiv_qrnnd(quotient, remainder, high_numerator, low_numerator,
+ denominator) divides a UDWtype, composed by the UWtype integers
+ HIGH_NUMERATOR and LOW_NUMERATOR, by DENOMINATOR and places the quotient
+ in QUOTIENT and the remainder in REMAINDER. HIGH_NUMERATOR must be less
+ than DENOMINATOR for correct operation. If, in addition, the most
+ significant bit of DENOMINATOR must be 1, then the pre-processor symbol
+ UDIV_NEEDS_NORMALIZATION is defined to 1.
+
+ 4) sdiv_qrnnd(quotient, remainder, high_numerator, low_numerator,
+ denominator). Like udiv_qrnnd but the numbers are signed. The quotient
+ is rounded towards 0.
+
+ 5) count_leading_zeros(count, x) counts the number of zero-bits from the
+ msb to the first nonzero bit in the UWtype X. This is the number of
+ steps X needs to be shifted left to set the msb. Undefined for X == 0,
+ unless the symbol COUNT_LEADING_ZEROS_0 is defined to some value.
+
+ 6) count_trailing_zeros(count, x) like count_leading_zeros, but counts
+ from the least significant end.
+
+ 7) add_ssaaaa(high_sum, low_sum, high_addend_1, low_addend_1,
+ high_addend_2, low_addend_2) adds two UWtype integers, composed by
+ HIGH_ADDEND_1 and LOW_ADDEND_1, and HIGH_ADDEND_2 and LOW_ADDEND_2
+ respectively. The result is placed in HIGH_SUM and LOW_SUM. Overflow
+ (i.e. carry out) is not stored anywhere, and is lost.
+
+ 8) sub_ddmmss(high_difference, low_difference, high_minuend, low_minuend,
+ high_subtrahend, low_subtrahend) subtracts two two-word UWtype integers,
+ composed by HIGH_MINUEND_1 and LOW_MINUEND_1, and HIGH_SUBTRAHEND_2 and
+ LOW_SUBTRAHEND_2 respectively. The result is placed in HIGH_DIFFERENCE
+ and LOW_DIFFERENCE. Overflow (i.e. carry out) is not stored anywhere,
+ and is lost.
+
+ If any of these macros are left undefined for a particular CPU,
+ C macros are used. */
+
+/* The CPUs come in alphabetical order below.
+
+ Please add support for more CPUs here, or improve the current support
+ for the CPUs below!
+ (E.g. WE32100, IBM360.) */
+
+#if defined (__GNUC__) && !defined (NO_ASM)
+
+/* We sometimes need to clobber "cc" with gcc2, but that would not be
+ understood by gcc1. Use cpp to avoid major code duplication. */
+#if __GNUC__ < 2
+#define __CLOBBER_CC
+#define __AND_CLOBBER_CC
+#else /* __GNUC__ >= 2 */
+#define __CLOBBER_CC : "cc"
+#define __AND_CLOBBER_CC , "cc"
+#endif /* __GNUC__ < 2 */
+
+#if defined (__alpha) && W_TYPE_SIZE == 64
+#define umul_ppmm(ph, pl, m0, m1) \
+ do { \
+ UDItype __m0 = (m0), __m1 = (m1); \
+ (ph) = __builtin_alpha_umulh (__m0, __m1); \
+ (pl) = __m0 * __m1; \
+ } while (0)
+#define UMUL_TIME 46
+#ifndef LONGLONG_STANDALONE
+#define udiv_qrnnd(q, r, n1, n0, d) \
+ do { UDItype __r; \
+ (q) = __udiv_qrnnd (&__r, (n1), (n0), (d)); \
+ (r) = __r; \
+ } while (0)
+extern UDItype __udiv_qrnnd (UDItype *, UDItype, UDItype, UDItype);
+#define UDIV_TIME 220
+#endif /* LONGLONG_STANDALONE */
+#ifdef __alpha_cix__
+#define count_leading_zeros(COUNT,X) ((COUNT) = __builtin_clzl (X))
+#define count_trailing_zeros(COUNT,X) ((COUNT) = __builtin_ctzl (X))
+#define COUNT_LEADING_ZEROS_0 64
+#else
+#define count_leading_zeros(COUNT,X) \
+ do { \
+ UDItype __xr = (X), __t, __a; \
+ __t = __builtin_alpha_cmpbge (0, __xr); \
+ __a = __clz_tab[__t ^ 0xff] - 1; \
+ __t = __builtin_alpha_extbl (__xr, __a); \
+ (COUNT) = 64 - (__clz_tab[__t] + __a*8); \
+ } while (0)
+#define count_trailing_zeros(COUNT,X) \
+ do { \
+ UDItype __xr = (X), __t, __a; \
+ __t = __builtin_alpha_cmpbge (0, __xr); \
+ __t = ~__t & -~__t; \
+ __a = ((__t & 0xCC) != 0) * 2; \
+ __a += ((__t & 0xF0) != 0) * 4; \
+ __a += ((__t & 0xAA) != 0); \
+ __t = __builtin_alpha_extbl (__xr, __a); \
+ __a <<= 3; \
+ __t &= -__t; \
+ __a += ((__t & 0xCC) != 0) * 2; \
+ __a += ((__t & 0xF0) != 0) * 4; \
+ __a += ((__t & 0xAA) != 0); \
+ (COUNT) = __a; \
+ } while (0)
+#endif /* __alpha_cix__ */
+#endif /* __alpha */
+
+#if defined (__arc__) && W_TYPE_SIZE == 32
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+ __asm__ ("add.f %1, %4, %5\n\tadc %0, %2, %3" \
+ : "=r" ((USItype) (sh)), \
+ "=&r" ((USItype) (sl)) \
+ : "%r" ((USItype) (ah)), \
+ "rIJ" ((USItype) (bh)), \
+ "%r" ((USItype) (al)), \
+ "rIJ" ((USItype) (bl)))
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+ __asm__ ("sub.f %1, %4, %5\n\tsbc %0, %2, %3" \
+ : "=r" ((USItype) (sh)), \
+ "=&r" ((USItype) (sl)) \
+ : "r" ((USItype) (ah)), \
+ "rIJ" ((USItype) (bh)), \
+ "r" ((USItype) (al)), \
+ "rIJ" ((USItype) (bl)))
+/* Call libgcc routine. */
+#define umul_ppmm(w1, w0, u, v) \
+do { \
+ DWunion __w; \
+ __w.ll = __umulsidi3 (u, v); \
+ w1 = __w.s.high; \
+ w0 = __w.s.low; \
+} while (0)
+#define __umulsidi3 __umulsidi3
+UDItype __umulsidi3 (USItype, USItype);
+#endif
+
+#if defined (__arm__) && !defined (__thumb__) && W_TYPE_SIZE == 32
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+ __asm__ ("adds %1, %4, %5\n\tadc %0, %2, %3" \
+ : "=r" ((USItype) (sh)), \
+ "=&r" ((USItype) (sl)) \
+ : "%r" ((USItype) (ah)), \
+ "rI" ((USItype) (bh)), \
+ "%r" ((USItype) (al)), \
+ "rI" ((USItype) (bl)) __CLOBBER_CC)
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+ __asm__ ("subs %1, %4, %5\n\tsbc %0, %2, %3" \
+ : "=r" ((USItype) (sh)), \
+ "=&r" ((USItype) (sl)) \
+ : "r" ((USItype) (ah)), \
+ "rI" ((USItype) (bh)), \
+ "r" ((USItype) (al)), \
+ "rI" ((USItype) (bl)) __CLOBBER_CC)
+#define umul_ppmm(xh, xl, a, b) \
+{register USItype __t0, __t1, __t2; \
+ __asm__ ("%@ Inlined umul_ppmm\n" \
+ " mov %2, %5, lsr #16\n" \
+ " mov %0, %6, lsr #16\n" \
+ " bic %3, %5, %2, lsl #16\n" \
+ " bic %4, %6, %0, lsl #16\n" \
+ " mul %1, %3, %4\n" \
+ " mul %4, %2, %4\n" \
+ " mul %3, %0, %3\n" \
+ " mul %0, %2, %0\n" \
+ " adds %3, %4, %3\n" \
+ " addcs %0, %0, #65536\n" \
+ " adds %1, %1, %3, lsl #16\n" \
+ " adc %0, %0, %3, lsr #16" \
+ : "=&r" ((USItype) (xh)), \
+ "=r" ((USItype) (xl)), \
+ "=&r" (__t0), "=&r" (__t1), "=r" (__t2) \
+ : "r" ((USItype) (a)), \
+ "r" ((USItype) (b)) __CLOBBER_CC );}
+#define UMUL_TIME 20
+#define UDIV_TIME 100
+#endif /* __arm__ */
+
+#if defined(__arm__)
+/* Let gcc decide how best to implement count_leading_zeros. */
+#define count_leading_zeros(COUNT,X) ((COUNT) = __builtin_clz (X))
+#define COUNT_LEADING_ZEROS_0 32
+#endif
+
+#if defined (__AVR__)
+
+#if W_TYPE_SIZE == 16
+#define count_leading_zeros(COUNT,X) ((COUNT) = __builtin_clz (X))
+#define count_trailing_zeros(COUNT,X) ((COUNT) = __builtin_ctz (X))
+#define COUNT_LEADING_ZEROS_0 16
+#endif /* W_TYPE_SIZE == 16 */
+
+#if W_TYPE_SIZE == 32
+#define count_leading_zeros(COUNT,X) ((COUNT) = __builtin_clzl (X))
+#define count_trailing_zeros(COUNT,X) ((COUNT) = __builtin_ctzl (X))
+#define COUNT_LEADING_ZEROS_0 32
+#endif /* W_TYPE_SIZE == 32 */
+
+#if W_TYPE_SIZE == 64
+#define count_leading_zeros(COUNT,X) ((COUNT) = __builtin_clzll (X))
+#define count_trailing_zeros(COUNT,X) ((COUNT) = __builtin_ctzll (X))
+#define COUNT_LEADING_ZEROS_0 64
+#endif /* W_TYPE_SIZE == 64 */
+
+#endif /* defined (__AVR__) */
+
+#if defined (__CRIS__) && __CRIS_arch_version >= 3
+#define count_leading_zeros(COUNT, X) ((COUNT) = __builtin_clz (X))
+#if __CRIS_arch_version >= 8
+#define count_trailing_zeros(COUNT, X) ((COUNT) = __builtin_ctz (X))
+#endif
+#endif /* __CRIS__ */
+
+#if defined (__hppa) && W_TYPE_SIZE == 32
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+ __asm__ ("add %4,%5,%1\n\taddc %2,%3,%0" \
+ : "=r" ((USItype) (sh)), \
+ "=&r" ((USItype) (sl)) \
+ : "%rM" ((USItype) (ah)), \
+ "rM" ((USItype) (bh)), \
+ "%rM" ((USItype) (al)), \
+ "rM" ((USItype) (bl)))
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+ __asm__ ("sub %4,%5,%1\n\tsubb %2,%3,%0" \
+ : "=r" ((USItype) (sh)), \
+ "=&r" ((USItype) (sl)) \
+ : "rM" ((USItype) (ah)), \
+ "rM" ((USItype) (bh)), \
+ "rM" ((USItype) (al)), \
+ "rM" ((USItype) (bl)))
+#if defined (_PA_RISC1_1)
+#define umul_ppmm(w1, w0, u, v) \
+ do { \
+ union \
+ { \
+ UDItype __f; \
+ struct {USItype __w1, __w0;} __w1w0; \
+ } __t; \
+ __asm__ ("xmpyu %1,%2,%0" \
+ : "=x" (__t.__f) \
+ : "x" ((USItype) (u)), \
+ "x" ((USItype) (v))); \
+ (w1) = __t.__w1w0.__w1; \
+ (w0) = __t.__w1w0.__w0; \
+ } while (0)
+#define UMUL_TIME 8
+#else
+#define UMUL_TIME 30
+#endif
+#define UDIV_TIME 40
+#define count_leading_zeros(count, x) \
+ do { \
+ USItype __tmp; \
+ __asm__ ( \
+ "ldi 1,%0\n" \
+" extru,= %1,15,16,%%r0 ; Bits 31..16 zero?\n" \
+" extru,tr %1,15,16,%1 ; No. Shift down, skip add.\n"\
+" ldo 16(%0),%0 ; Yes. Perform add.\n" \
+" extru,= %1,23,8,%%r0 ; Bits 15..8 zero?\n" \
+" extru,tr %1,23,8,%1 ; No. Shift down, skip add.\n"\
+" ldo 8(%0),%0 ; Yes. Perform add.\n" \
+" extru,= %1,27,4,%%r0 ; Bits 7..4 zero?\n" \
+" extru,tr %1,27,4,%1 ; No. Shift down, skip add.\n"\
+" ldo 4(%0),%0 ; Yes. Perform add.\n" \
+" extru,= %1,29,2,%%r0 ; Bits 3..2 zero?\n" \
+" extru,tr %1,29,2,%1 ; No. Shift down, skip add.\n"\
+" ldo 2(%0),%0 ; Yes. Perform add.\n" \
+" extru %1,30,1,%1 ; Extract bit 1.\n" \
+" sub %0,%1,%0 ; Subtract it.\n" \
+ : "=r" (count), "=r" (__tmp) : "1" (x)); \
+ } while (0)
+#endif
+
+#if (defined (__i370__) || defined (__s390__) || defined (__mvs__)) && W_TYPE_SIZE == 32
+#if !defined (__zarch__)
+#define smul_ppmm(xh, xl, m0, m1) \
+ do { \
+ union {DItype __ll; \
+ struct {USItype __h, __l;} __i; \
+ } __x; \
+ __asm__ ("lr %N0,%1\n\tmr %0,%2" \
+ : "=&r" (__x.__ll) \
+ : "r" (m0), "r" (m1)); \
+ (xh) = __x.__i.__h; (xl) = __x.__i.__l; \
+ } while (0)
+#define sdiv_qrnnd(q, r, n1, n0, d) \
+ do { \
+ union {DItype __ll; \
+ struct {USItype __h, __l;} __i; \
+ } __x; \
+ __x.__i.__h = n1; __x.__i.__l = n0; \
+ __asm__ ("dr %0,%2" \
+ : "=r" (__x.__ll) \
+ : "0" (__x.__ll), "r" (d)); \
+ (q) = __x.__i.__l; (r) = __x.__i.__h; \
+ } while (0)
+#else
+#define smul_ppmm(xh, xl, m0, m1) \
+ do { \
+ register SItype __r0 __asm__ ("0"); \
+ register SItype __r1 __asm__ ("1") = (m0); \
+ \
+ __asm__ ("mr\t%%r0,%3" \
+ : "=r" (__r0), "=r" (__r1) \
+ : "r" (__r1), "r" (m1)); \
+ (xh) = __r0; (xl) = __r1; \
+ } while (0)
+
+#define sdiv_qrnnd(q, r, n1, n0, d) \
+ do { \
+ register SItype __r0 __asm__ ("0") = (n1); \
+ register SItype __r1 __asm__ ("1") = (n0); \
+ \
+ __asm__ ("dr\t%%r0,%4" \
+ : "=r" (__r0), "=r" (__r1) \
+ : "r" (__r0), "r" (__r1), "r" (d)); \
+ (q) = __r1; (r) = __r0; \
+ } while (0)
+#endif /* __zarch__ */
+#endif
+
+#if (defined (__i386__) || defined (__i486__)) && W_TYPE_SIZE == 32
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+ __asm__ ("add{l} {%5,%1|%1,%5}\n\tadc{l} {%3,%0|%0,%3}" \
+ : "=r" ((USItype) (sh)), \
+ "=&r" ((USItype) (sl)) \
+ : "%0" ((USItype) (ah)), \
+ "g" ((USItype) (bh)), \
+ "%1" ((USItype) (al)), \
+ "g" ((USItype) (bl)))
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+ __asm__ ("sub{l} {%5,%1|%1,%5}\n\tsbb{l} {%3,%0|%0,%3}" \
+ : "=r" ((USItype) (sh)), \
+ "=&r" ((USItype) (sl)) \
+ : "0" ((USItype) (ah)), \
+ "g" ((USItype) (bh)), \
+ "1" ((USItype) (al)), \
+ "g" ((USItype) (bl)))
+#define umul_ppmm(w1, w0, u, v) \
+ __asm__ ("mul{l} %3" \
+ : "=a" ((USItype) (w0)), \
+ "=d" ((USItype) (w1)) \
+ : "%0" ((USItype) (u)), \
+ "rm" ((USItype) (v)))
+#define udiv_qrnnd(q, r, n1, n0, dv) \
+ __asm__ ("div{l} %4" \
+ : "=a" ((USItype) (q)), \
+ "=d" ((USItype) (r)) \
+ : "0" ((USItype) (n0)), \
+ "1" ((USItype) (n1)), \
+ "rm" ((USItype) (dv)))
+#define count_leading_zeros(count, x) ((count) = __builtin_clz (x))
+#define count_trailing_zeros(count, x) ((count) = __builtin_ctz (x))
+#define UMUL_TIME 40
+#define UDIV_TIME 40
+#endif /* 80x86 */
+
+#if (defined (__x86_64__) || defined (__i386__)) && W_TYPE_SIZE == 64
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+ __asm__ ("add{q} {%5,%1|%1,%5}\n\tadc{q} {%3,%0|%0,%3}" \
+ : "=r" ((UDItype) (sh)), \
+ "=&r" ((UDItype) (sl)) \
+ : "%0" ((UDItype) (ah)), \
+ "rme" ((UDItype) (bh)), \
+ "%1" ((UDItype) (al)), \
+ "rme" ((UDItype) (bl)))
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+ __asm__ ("sub{q} {%5,%1|%1,%5}\n\tsbb{q} {%3,%0|%0,%3}" \
+ : "=r" ((UDItype) (sh)), \
+ "=&r" ((UDItype) (sl)) \
+ : "0" ((UDItype) (ah)), \
+ "rme" ((UDItype) (bh)), \
+ "1" ((UDItype) (al)), \
+ "rme" ((UDItype) (bl)))
+#define umul_ppmm(w1, w0, u, v) \
+ __asm__ ("mul{q} %3" \
+ : "=a" ((UDItype) (w0)), \
+ "=d" ((UDItype) (w1)) \
+ : "%0" ((UDItype) (u)), \
+ "rm" ((UDItype) (v)))
+#define udiv_qrnnd(q, r, n1, n0, dv) \
+ __asm__ ("div{q} %4" \
+ : "=a" ((UDItype) (q)), \
+ "=d" ((UDItype) (r)) \
+ : "0" ((UDItype) (n0)), \
+ "1" ((UDItype) (n1)), \
+ "rm" ((UDItype) (dv)))
+#define count_leading_zeros(count, x) ((count) = __builtin_clzll (x))
+#define count_trailing_zeros(count, x) ((count) = __builtin_ctzll (x))
+#define UMUL_TIME 40
+#define UDIV_TIME 40
+#endif /* x86_64 */
+
+#if defined (__i960__) && W_TYPE_SIZE == 32
+#define umul_ppmm(w1, w0, u, v) \
+ ({union {UDItype __ll; \
+ struct {USItype __l, __h;} __i; \
+ } __xx; \
+ __asm__ ("emul %2,%1,%0" \
+ : "=d" (__xx.__ll) \
+ : "%dI" ((USItype) (u)), \
+ "dI" ((USItype) (v))); \
+ (w1) = __xx.__i.__h; (w0) = __xx.__i.__l;})
+#define __umulsidi3(u, v) \
+ ({UDItype __w; \
+ __asm__ ("emul %2,%1,%0" \
+ : "=d" (__w) \
+ : "%dI" ((USItype) (u)), \
+ "dI" ((USItype) (v))); \
+ __w; })
+#endif /* __i960__ */
+
+#if defined (__ia64) && W_TYPE_SIZE == 64
+/* This form encourages gcc (pre-release 3.4 at least) to emit predicated
+ "sub r=r,r" and "sub r=r,r,1", giving a 2 cycle latency. The generic
+ code using "al<bl" arithmetically comes out making an actual 0 or 1 in a
+ register, which takes an extra cycle. */
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+ do { \
+ UWtype __x; \
+ __x = (al) - (bl); \
+ if ((al) < (bl)) \
+ (sh) = (ah) - (bh) - 1; \
+ else \
+ (sh) = (ah) - (bh); \
+ (sl) = __x; \
+ } while (0)
+
+/* Do both product parts in assembly, since that gives better code with
+ all gcc versions. Some callers will just use the upper part, and in
+ that situation we waste an instruction, but not any cycles. */
+#define umul_ppmm(ph, pl, m0, m1) \
+ __asm__ ("xma.hu %0 = %2, %3, f0\n\txma.l %1 = %2, %3, f0" \
+ : "=&f" (ph), "=f" (pl) \
+ : "f" (m0), "f" (m1))
+#define count_leading_zeros(count, x) \
+ do { \
+ UWtype _x = (x), _y, _a, _c; \
+ __asm__ ("mux1 %0 = %1, @rev" : "=r" (_y) : "r" (_x)); \
+ __asm__ ("czx1.l %0 = %1" : "=r" (_a) : "r" (-_y | _y)); \
+ _c = (_a - 1) << 3; \
+ _x >>= _c; \
+ if (_x >= 1 << 4) \
+ _x >>= 4, _c += 4; \
+ if (_x >= 1 << 2) \
+ _x >>= 2, _c += 2; \
+ _c += _x >> 1; \
+ (count) = W_TYPE_SIZE - 1 - _c; \
+ } while (0)
+/* similar to what gcc does for __builtin_ffs, but 0 based rather than 1
+ based, and we don't need a special case for x==0 here */
+#define count_trailing_zeros(count, x) \
+ do { \
+ UWtype __ctz_x = (x); \
+ __asm__ ("popcnt %0 = %1" \
+ : "=r" (count) \
+ : "r" ((__ctz_x-1) & ~__ctz_x)); \
+ } while (0)
+#define UMUL_TIME 14
+#endif
+
+#if defined (__M32R__) && W_TYPE_SIZE == 32
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+ /* The cmp clears the condition bit. */ \
+ __asm__ ("cmp %0,%0\n\taddx %1,%5\n\taddx %0,%3" \
+ : "=r" ((USItype) (sh)), \
+ "=&r" ((USItype) (sl)) \
+ : "0" ((USItype) (ah)), \
+ "r" ((USItype) (bh)), \
+ "1" ((USItype) (al)), \
+ "r" ((USItype) (bl)) \
+ : "cbit")
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+ /* The cmp clears the condition bit. */ \
+ __asm__ ("cmp %0,%0\n\tsubx %1,%5\n\tsubx %0,%3" \
+ : "=r" ((USItype) (sh)), \
+ "=&r" ((USItype) (sl)) \
+ : "0" ((USItype) (ah)), \
+ "r" ((USItype) (bh)), \
+ "1" ((USItype) (al)), \
+ "r" ((USItype) (bl)) \
+ : "cbit")
+#endif /* __M32R__ */
+
+#if defined (__mc68000__) && W_TYPE_SIZE == 32
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+ __asm__ ("add%.l %5,%1\n\taddx%.l %3,%0" \
+ : "=d" ((USItype) (sh)), \
+ "=&d" ((USItype) (sl)) \
+ : "%0" ((USItype) (ah)), \
+ "d" ((USItype) (bh)), \
+ "%1" ((USItype) (al)), \
+ "g" ((USItype) (bl)))
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+ __asm__ ("sub%.l %5,%1\n\tsubx%.l %3,%0" \
+ : "=d" ((USItype) (sh)), \
+ "=&d" ((USItype) (sl)) \
+ : "0" ((USItype) (ah)), \
+ "d" ((USItype) (bh)), \
+ "1" ((USItype) (al)), \
+ "g" ((USItype) (bl)))
+
+/* The '020, '030, '040, '060 and CPU32 have 32x32->64 and 64/32->32q-32r. */
+#if (defined (__mc68020__) && !defined (__mc68060__))
+#define umul_ppmm(w1, w0, u, v) \
+ __asm__ ("mulu%.l %3,%1:%0" \
+ : "=d" ((USItype) (w0)), \
+ "=d" ((USItype) (w1)) \
+ : "%0" ((USItype) (u)), \
+ "dmi" ((USItype) (v)))
+#define UMUL_TIME 45
+#define udiv_qrnnd(q, r, n1, n0, d) \
+ __asm__ ("divu%.l %4,%1:%0" \
+ : "=d" ((USItype) (q)), \
+ "=d" ((USItype) (r)) \
+ : "0" ((USItype) (n0)), \
+ "1" ((USItype) (n1)), \
+ "dmi" ((USItype) (d)))
+#define UDIV_TIME 90
+#define sdiv_qrnnd(q, r, n1, n0, d) \
+ __asm__ ("divs%.l %4,%1:%0" \
+ : "=d" ((USItype) (q)), \
+ "=d" ((USItype) (r)) \
+ : "0" ((USItype) (n0)), \
+ "1" ((USItype) (n1)), \
+ "dmi" ((USItype) (d)))
+
+#elif defined (__mcoldfire__) /* not mc68020 */
+
+#define umul_ppmm(xh, xl, a, b) \
+ __asm__ ("| Inlined umul_ppmm\n" \
+ " move%.l %2,%/d0\n" \
+ " move%.l %3,%/d1\n" \
+ " move%.l %/d0,%/d2\n" \
+ " swap %/d0\n" \
+ " move%.l %/d1,%/d3\n" \
+ " swap %/d1\n" \
+ " move%.w %/d2,%/d4\n" \
+ " mulu %/d3,%/d4\n" \
+ " mulu %/d1,%/d2\n" \
+ " mulu %/d0,%/d3\n" \
+ " mulu %/d0,%/d1\n" \
+ " move%.l %/d4,%/d0\n" \
+ " clr%.w %/d0\n" \
+ " swap %/d0\n" \
+ " add%.l %/d0,%/d2\n" \
+ " add%.l %/d3,%/d2\n" \
+ " jcc 1f\n" \
+ " add%.l %#65536,%/d1\n" \
+ "1: swap %/d2\n" \
+ " moveq %#0,%/d0\n" \
+ " move%.w %/d2,%/d0\n" \
+ " move%.w %/d4,%/d2\n" \
+ " move%.l %/d2,%1\n" \
+ " add%.l %/d1,%/d0\n" \
+ " move%.l %/d0,%0" \
+ : "=g" ((USItype) (xh)), \
+ "=g" ((USItype) (xl)) \
+ : "g" ((USItype) (a)), \
+ "g" ((USItype) (b)) \
+ : "d0", "d1", "d2", "d3", "d4")
+#define UMUL_TIME 100
+#define UDIV_TIME 400
+#else /* not ColdFire */
+/* %/ inserts REGISTER_PREFIX, %# inserts IMMEDIATE_PREFIX. */
+#define umul_ppmm(xh, xl, a, b) \
+ __asm__ ("| Inlined umul_ppmm\n" \
+ " move%.l %2,%/d0\n" \
+ " move%.l %3,%/d1\n" \
+ " move%.l %/d0,%/d2\n" \
+ " swap %/d0\n" \
+ " move%.l %/d1,%/d3\n" \
+ " swap %/d1\n" \
+ " move%.w %/d2,%/d4\n" \
+ " mulu %/d3,%/d4\n" \
+ " mulu %/d1,%/d2\n" \
+ " mulu %/d0,%/d3\n" \
+ " mulu %/d0,%/d1\n" \
+ " move%.l %/d4,%/d0\n" \
+ " eor%.w %/d0,%/d0\n" \
+ " swap %/d0\n" \
+ " add%.l %/d0,%/d2\n" \
+ " add%.l %/d3,%/d2\n" \
+ " jcc 1f\n" \
+ " add%.l %#65536,%/d1\n" \
+ "1: swap %/d2\n" \
+ " moveq %#0,%/d0\n" \
+ " move%.w %/d2,%/d0\n" \
+ " move%.w %/d4,%/d2\n" \
+ " move%.l %/d2,%1\n" \
+ " add%.l %/d1,%/d0\n" \
+ " move%.l %/d0,%0" \
+ : "=g" ((USItype) (xh)), \
+ "=g" ((USItype) (xl)) \
+ : "g" ((USItype) (a)), \
+ "g" ((USItype) (b)) \
+ : "d0", "d1", "d2", "d3", "d4")
+#define UMUL_TIME 100
+#define UDIV_TIME 400
+
+#endif /* not mc68020 */
+
+/* The '020, '030, '040 and '060 have bitfield insns.
+ cpu32 disguises as a 68020, but lacks them. */
+#if defined (__mc68020__) && !defined (__mcpu32__)
+#define count_leading_zeros(count, x) \
+ __asm__ ("bfffo %1{%b2:%b2},%0" \
+ : "=d" ((USItype) (count)) \
+ : "od" ((USItype) (x)), "n" (0))
+/* Some ColdFire architectures have a ff1 instruction supported via
+ __builtin_clz. */
+#elif defined (__mcfisaaplus__) || defined (__mcfisac__)
+#define count_leading_zeros(count,x) ((count) = __builtin_clz (x))
+#define COUNT_LEADING_ZEROS_0 32
+#endif
+#endif /* mc68000 */
+
+#if defined (__m88000__) && W_TYPE_SIZE == 32
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+ __asm__ ("addu.co %1,%r4,%r5\n\taddu.ci %0,%r2,%r3" \
+ : "=r" ((USItype) (sh)), \
+ "=&r" ((USItype) (sl)) \
+ : "%rJ" ((USItype) (ah)), \
+ "rJ" ((USItype) (bh)), \
+ "%rJ" ((USItype) (al)), \
+ "rJ" ((USItype) (bl)))
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+ __asm__ ("subu.co %1,%r4,%r5\n\tsubu.ci %0,%r2,%r3" \
+ : "=r" ((USItype) (sh)), \
+ "=&r" ((USItype) (sl)) \
+ : "rJ" ((USItype) (ah)), \
+ "rJ" ((USItype) (bh)), \
+ "rJ" ((USItype) (al)), \
+ "rJ" ((USItype) (bl)))
+#define count_leading_zeros(count, x) \
+ do { \
+ USItype __cbtmp; \
+ __asm__ ("ff1 %0,%1" \
+ : "=r" (__cbtmp) \
+ : "r" ((USItype) (x))); \
+ (count) = __cbtmp ^ 31; \
+ } while (0)
+#define COUNT_LEADING_ZEROS_0 63 /* sic */
+#if defined (__mc88110__)
+#define umul_ppmm(wh, wl, u, v) \
+ do { \
+ union {UDItype __ll; \
+ struct {USItype __h, __l;} __i; \
+ } __xx; \
+ __asm__ ("mulu.d %0,%1,%2" \
+ : "=r" (__xx.__ll) \
+ : "r" ((USItype) (u)), \
+ "r" ((USItype) (v))); \
+ (wh) = __xx.__i.__h; \
+ (wl) = __xx.__i.__l; \
+ } while (0)
+#define udiv_qrnnd(q, r, n1, n0, d) \
+ ({union {UDItype __ll; \
+ struct {USItype __h, __l;} __i; \
+ } __xx; \
+ USItype __q; \
+ __xx.__i.__h = (n1); __xx.__i.__l = (n0); \
+ __asm__ ("divu.d %0,%1,%2" \
+ : "=r" (__q) \
+ : "r" (__xx.__ll), \
+ "r" ((USItype) (d))); \
+ (r) = (n0) - __q * (d); (q) = __q; })
+#define UMUL_TIME 5
+#define UDIV_TIME 25
+#else
+#define UMUL_TIME 17
+#define UDIV_TIME 150
+#endif /* __mc88110__ */
+#endif /* __m88000__ */
+
+#if defined (__mn10300__)
+# if defined (__AM33__)
+# define count_leading_zeros(COUNT,X) ((COUNT) = __builtin_clz (X))
+# define umul_ppmm(w1, w0, u, v) \
+ asm("mulu %3,%2,%1,%0" : "=r"(w0), "=r"(w1) : "r"(u), "r"(v))
+# define smul_ppmm(w1, w0, u, v) \
+ asm("mul %3,%2,%1,%0" : "=r"(w0), "=r"(w1) : "r"(u), "r"(v))
+# else
+# define umul_ppmm(w1, w0, u, v) \
+ asm("nop; nop; mulu %3,%0" : "=d"(w0), "=z"(w1) : "%0"(u), "d"(v))
+# define smul_ppmm(w1, w0, u, v) \
+ asm("nop; nop; mul %3,%0" : "=d"(w0), "=z"(w1) : "%0"(u), "d"(v))
+# endif
+# define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+ do { \
+ DWunion __s, __a, __b; \
+ __a.s.low = (al); __a.s.high = (ah); \
+ __b.s.low = (bl); __b.s.high = (bh); \
+ __s.ll = __a.ll + __b.ll; \
+ (sl) = __s.s.low; (sh) = __s.s.high; \
+ } while (0)
+# define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+ do { \
+ DWunion __s, __a, __b; \
+ __a.s.low = (al); __a.s.high = (ah); \
+ __b.s.low = (bl); __b.s.high = (bh); \
+ __s.ll = __a.ll - __b.ll; \
+ (sl) = __s.s.low; (sh) = __s.s.high; \
+ } while (0)
+# define udiv_qrnnd(q, r, nh, nl, d) \
+ asm("divu %2,%0" : "=D"(q), "=z"(r) : "D"(d), "0"(nl), "1"(nh))
+# define sdiv_qrnnd(q, r, nh, nl, d) \
+ asm("div %2,%0" : "=D"(q), "=z"(r) : "D"(d), "0"(nl), "1"(nh))
+# define UMUL_TIME 3
+# define UDIV_TIME 38
+#endif
+
+#if defined (__mips__) && W_TYPE_SIZE == 32
+#define umul_ppmm(w1, w0, u, v) \
+ do { \
+ UDItype __x = (UDItype) (USItype) (u) * (USItype) (v); \
+ (w1) = (USItype) (__x >> 32); \
+ (w0) = (USItype) (__x); \
+ } while (0)
+#define UMUL_TIME 10
+#define UDIV_TIME 100
+
+#if (__mips == 32 || __mips == 64) && ! __mips16
+#define count_leading_zeros(COUNT,X) ((COUNT) = __builtin_clz (X))
+#define COUNT_LEADING_ZEROS_0 32
+#endif
+#endif /* __mips__ */
+
+#if defined (__ns32000__) && W_TYPE_SIZE == 32
+#define umul_ppmm(w1, w0, u, v) \
+ ({union {UDItype __ll; \
+ struct {USItype __l, __h;} __i; \
+ } __xx; \
+ __asm__ ("meid %2,%0" \
+ : "=g" (__xx.__ll) \
+ : "%0" ((USItype) (u)), \
+ "g" ((USItype) (v))); \
+ (w1) = __xx.__i.__h; (w0) = __xx.__i.__l;})
+#define __umulsidi3(u, v) \
+ ({UDItype __w; \
+ __asm__ ("meid %2,%0" \
+ : "=g" (__w) \
+ : "%0" ((USItype) (u)), \
+ "g" ((USItype) (v))); \
+ __w; })
+#define udiv_qrnnd(q, r, n1, n0, d) \
+ ({union {UDItype __ll; \
+ struct {USItype __l, __h;} __i; \
+ } __xx; \
+ __xx.__i.__h = (n1); __xx.__i.__l = (n0); \
+ __asm__ ("deid %2,%0" \
+ : "=g" (__xx.__ll) \
+ : "0" (__xx.__ll), \
+ "g" ((USItype) (d))); \
+ (r) = __xx.__i.__l; (q) = __xx.__i.__h; })
+#define count_trailing_zeros(count,x) \
+ do { \
+ __asm__ ("ffsd %2,%0" \
+ : "=r" ((USItype) (count)) \
+ : "0" ((USItype) 0), \
+ "r" ((USItype) (x))); \
+ } while (0)
+#endif /* __ns32000__ */
+
+/* FIXME: We should test _IBMR2 here when we add assembly support for the
+ system vendor compilers.
+ FIXME: What's needed for gcc PowerPC VxWorks? __vxworks__ is not good
+ enough, since that hits ARM and m68k too. */
+#if (defined (_ARCH_PPC) /* AIX */ \
+ || defined (_ARCH_PWR) /* AIX */ \
+ || defined (_ARCH_COM) /* AIX */ \
+ || defined (__powerpc__) /* gcc */ \
+ || defined (__POWERPC__) /* BEOS */ \
+ || defined (__ppc__) /* Darwin */ \
+ || (defined (PPC) && ! defined (CPU_FAMILY)) /* gcc 2.7.x GNU&SysV */ \
+ || (defined (PPC) && defined (CPU_FAMILY) /* VxWorks */ \
+ && CPU_FAMILY == PPC) \
+ ) && W_TYPE_SIZE == 32
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+ do { \
+ if (__builtin_constant_p (bh) && (bh) == 0) \
+ __asm__ ("{a%I4|add%I4c} %1,%3,%4\n\t{aze|addze} %0,%2" \
+ : "=r" (sh), "=&r" (sl) : "r" (ah), "%r" (al), "rI" (bl));\
+ else if (__builtin_constant_p (bh) && (bh) == ~(USItype) 0) \
+ __asm__ ("{a%I4|add%I4c} %1,%3,%4\n\t{ame|addme} %0,%2" \
+ : "=r" (sh), "=&r" (sl) : "r" (ah), "%r" (al), "rI" (bl));\
+ else \
+ __asm__ ("{a%I5|add%I5c} %1,%4,%5\n\t{ae|adde} %0,%2,%3" \
+ : "=r" (sh), "=&r" (sl) \
+ : "%r" (ah), "r" (bh), "%r" (al), "rI" (bl)); \
+ } while (0)
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+ do { \
+ if (__builtin_constant_p (ah) && (ah) == 0) \
+ __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{sfze|subfze} %0,%2" \
+ : "=r" (sh), "=&r" (sl) : "r" (bh), "rI" (al), "r" (bl));\
+ else if (__builtin_constant_p (ah) && (ah) == ~(USItype) 0) \
+ __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{sfme|subfme} %0,%2" \
+ : "=r" (sh), "=&r" (sl) : "r" (bh), "rI" (al), "r" (bl));\
+ else if (__builtin_constant_p (bh) && (bh) == 0) \
+ __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{ame|addme} %0,%2" \
+ : "=r" (sh), "=&r" (sl) : "r" (ah), "rI" (al), "r" (bl));\
+ else if (__builtin_constant_p (bh) && (bh) == ~(USItype) 0) \
+ __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{aze|addze} %0,%2" \
+ : "=r" (sh), "=&r" (sl) : "r" (ah), "rI" (al), "r" (bl));\
+ else \
+ __asm__ ("{sf%I4|subf%I4c} %1,%5,%4\n\t{sfe|subfe} %0,%3,%2" \
+ : "=r" (sh), "=&r" (sl) \
+ : "r" (ah), "r" (bh), "rI" (al), "r" (bl)); \
+ } while (0)
+#define count_leading_zeros(count, x) \
+ __asm__ ("{cntlz|cntlzw} %0,%1" : "=r" (count) : "r" (x))
+#define COUNT_LEADING_ZEROS_0 32
+#if defined (_ARCH_PPC) || defined (__powerpc__) || defined (__POWERPC__) \
+ || defined (__ppc__) \
+ || (defined (PPC) && ! defined (CPU_FAMILY)) /* gcc 2.7.x GNU&SysV */ \
+ || (defined (PPC) && defined (CPU_FAMILY) /* VxWorks */ \
+ && CPU_FAMILY == PPC)
+#define umul_ppmm(ph, pl, m0, m1) \
+ do { \
+ USItype __m0 = (m0), __m1 = (m1); \
+ __asm__ ("mulhwu %0,%1,%2" : "=r" (ph) : "%r" (m0), "r" (m1)); \
+ (pl) = __m0 * __m1; \
+ } while (0)
+#define UMUL_TIME 15
+#define smul_ppmm(ph, pl, m0, m1) \
+ do { \
+ SItype __m0 = (m0), __m1 = (m1); \
+ __asm__ ("mulhw %0,%1,%2" : "=r" (ph) : "%r" (m0), "r" (m1)); \
+ (pl) = __m0 * __m1; \
+ } while (0)
+#define SMUL_TIME 14
+#define UDIV_TIME 120
+#elif defined (_ARCH_PWR)
+#define UMUL_TIME 8
+#define smul_ppmm(xh, xl, m0, m1) \
+ __asm__ ("mul %0,%2,%3" : "=r" (xh), "=q" (xl) : "r" (m0), "r" (m1))
+#define SMUL_TIME 4
+#define sdiv_qrnnd(q, r, nh, nl, d) \
+ __asm__ ("div %0,%2,%4" : "=r" (q), "=q" (r) : "r" (nh), "1" (nl), "r" (d))
+#define UDIV_TIME 100
+#endif
+#endif /* 32-bit POWER architecture variants. */
+
+/* We should test _IBMR2 here when we add assembly support for the system
+ vendor compilers. */
+#if (defined (_ARCH_PPC64) || defined (__powerpc64__)) && W_TYPE_SIZE == 64
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+ do { \
+ if (__builtin_constant_p (bh) && (bh) == 0) \
+ __asm__ ("{a%I4|add%I4c} %1,%3,%4\n\t{aze|addze} %0,%2" \
+ : "=r" (sh), "=&r" (sl) : "r" (ah), "%r" (al), "rI" (bl));\
+ else if (__builtin_constant_p (bh) && (bh) == ~(UDItype) 0) \
+ __asm__ ("{a%I4|add%I4c} %1,%3,%4\n\t{ame|addme} %0,%2" \
+ : "=r" (sh), "=&r" (sl) : "r" (ah), "%r" (al), "rI" (bl));\
+ else \
+ __asm__ ("{a%I5|add%I5c} %1,%4,%5\n\t{ae|adde} %0,%2,%3" \
+ : "=r" (sh), "=&r" (sl) \
+ : "%r" (ah), "r" (bh), "%r" (al), "rI" (bl)); \
+ } while (0)
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+ do { \
+ if (__builtin_constant_p (ah) && (ah) == 0) \
+ __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{sfze|subfze} %0,%2" \
+ : "=r" (sh), "=&r" (sl) : "r" (bh), "rI" (al), "r" (bl));\
+ else if (__builtin_constant_p (ah) && (ah) == ~(UDItype) 0) \
+ __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{sfme|subfme} %0,%2" \
+ : "=r" (sh), "=&r" (sl) : "r" (bh), "rI" (al), "r" (bl));\
+ else if (__builtin_constant_p (bh) && (bh) == 0) \
+ __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{ame|addme} %0,%2" \
+ : "=r" (sh), "=&r" (sl) : "r" (ah), "rI" (al), "r" (bl));\
+ else if (__builtin_constant_p (bh) && (bh) == ~(UDItype) 0) \
+ __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{aze|addze} %0,%2" \
+ : "=r" (sh), "=&r" (sl) : "r" (ah), "rI" (al), "r" (bl));\
+ else \
+ __asm__ ("{sf%I4|subf%I4c} %1,%5,%4\n\t{sfe|subfe} %0,%3,%2" \
+ : "=r" (sh), "=&r" (sl) \
+ : "r" (ah), "r" (bh), "rI" (al), "r" (bl)); \
+ } while (0)
+#define count_leading_zeros(count, x) \
+ __asm__ ("cntlzd %0,%1" : "=r" (count) : "r" (x))
+#define COUNT_LEADING_ZEROS_0 64
+#define umul_ppmm(ph, pl, m0, m1) \
+ do { \
+ UDItype __m0 = (m0), __m1 = (m1); \
+ __asm__ ("mulhdu %0,%1,%2" : "=r" (ph) : "%r" (m0), "r" (m1)); \
+ (pl) = __m0 * __m1; \
+ } while (0)
+#define UMUL_TIME 15
+#define smul_ppmm(ph, pl, m0, m1) \
+ do { \
+ DItype __m0 = (m0), __m1 = (m1); \
+ __asm__ ("mulhd %0,%1,%2" : "=r" (ph) : "%r" (m0), "r" (m1)); \
+ (pl) = __m0 * __m1; \
+ } while (0)
+#define SMUL_TIME 14 /* ??? */
+#define UDIV_TIME 120 /* ??? */
+#endif /* 64-bit PowerPC. */
+
+#if defined (__ibm032__) /* RT/ROMP */ && W_TYPE_SIZE == 32
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+ __asm__ ("a %1,%5\n\tae %0,%3" \
+ : "=r" ((USItype) (sh)), \
+ "=&r" ((USItype) (sl)) \
+ : "%0" ((USItype) (ah)), \
+ "r" ((USItype) (bh)), \
+ "%1" ((USItype) (al)), \
+ "r" ((USItype) (bl)))
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+ __asm__ ("s %1,%5\n\tse %0,%3" \
+ : "=r" ((USItype) (sh)), \
+ "=&r" ((USItype) (sl)) \
+ : "0" ((USItype) (ah)), \
+ "r" ((USItype) (bh)), \
+ "1" ((USItype) (al)), \
+ "r" ((USItype) (bl)))
+#define umul_ppmm(ph, pl, m0, m1) \
+ do { \
+ USItype __m0 = (m0), __m1 = (m1); \
+ __asm__ ( \
+ "s r2,r2\n" \
+" mts r10,%2\n" \
+" m r2,%3\n" \
+" m r2,%3\n" \
+" m r2,%3\n" \
+" m r2,%3\n" \
+" m r2,%3\n" \
+" m r2,%3\n" \
+" m r2,%3\n" \
+" m r2,%3\n" \
+" m r2,%3\n" \
+" m r2,%3\n" \
+" m r2,%3\n" \
+" m r2,%3\n" \
+" m r2,%3\n" \
+" m r2,%3\n" \
+" m r2,%3\n" \
+" m r2,%3\n" \
+" cas %0,r2,r0\n" \
+" mfs r10,%1" \
+ : "=r" ((USItype) (ph)), \
+ "=r" ((USItype) (pl)) \
+ : "%r" (__m0), \
+ "r" (__m1) \
+ : "r2"); \
+ (ph) += ((((SItype) __m0 >> 31) & __m1) \
+ + (((SItype) __m1 >> 31) & __m0)); \
+ } while (0)
+#define UMUL_TIME 20
+#define UDIV_TIME 200
+#define count_leading_zeros(count, x) \
+ do { \
+ if ((x) >= 0x10000) \
+ __asm__ ("clz %0,%1" \
+ : "=r" ((USItype) (count)) \
+ : "r" ((USItype) (x) >> 16)); \
+ else \
+ { \
+ __asm__ ("clz %0,%1" \
+ : "=r" ((USItype) (count)) \
+ : "r" ((USItype) (x))); \
+ (count) += 16; \
+ } \
+ } while (0)
+#endif
+
+#if defined(__sh__) && !__SHMEDIA__ && W_TYPE_SIZE == 32
+#ifndef __sh1__
+#define umul_ppmm(w1, w0, u, v) \
+ __asm__ ( \
+ "dmulu.l %2,%3\n\tsts%M1 macl,%1\n\tsts%M0 mach,%0" \
+ : "=r<" ((USItype)(w1)), \
+ "=r<" ((USItype)(w0)) \
+ : "r" ((USItype)(u)), \
+ "r" ((USItype)(v)) \
+ : "macl", "mach")
+#define UMUL_TIME 5
+#endif
+
+/* This is the same algorithm as __udiv_qrnnd_c. */
+#define UDIV_NEEDS_NORMALIZATION 1
+
+#define udiv_qrnnd(q, r, n1, n0, d) \
+ do { \
+ extern UWtype __udiv_qrnnd_16 (UWtype, UWtype) \
+ __attribute__ ((visibility ("hidden"))); \
+ /* r0: rn r1: qn */ /* r0: n1 r4: n0 r5: d r6: d1 */ /* r2: __m */ \
+ __asm__ ( \
+ "mov%M4 %4,r5\n" \
+" swap.w %3,r4\n" \
+" swap.w r5,r6\n" \
+" jsr @%5\n" \
+" shll16 r6\n" \
+" swap.w r4,r4\n" \
+" jsr @%5\n" \
+" swap.w r1,%0\n" \
+" or r1,%0" \
+ : "=r" (q), "=&z" (r) \
+ : "1" (n1), "r" (n0), "rm" (d), "r" (&__udiv_qrnnd_16) \
+ : "r1", "r2", "r4", "r5", "r6", "pr", "t"); \
+ } while (0)
+
+#define UDIV_TIME 80
+
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+ __asm__ ("clrt;subc %5,%1; subc %4,%0" \
+ : "=r" (sh), "=r" (sl) \
+ : "0" (ah), "1" (al), "r" (bh), "r" (bl) : "t")
+
+#endif /* __sh__ */
+
+#if defined (__SH5__) && __SHMEDIA__ && W_TYPE_SIZE == 32
+#define __umulsidi3(u,v) ((UDItype)(USItype)u*(USItype)v)
+#define count_leading_zeros(count, x) \
+ do \
+ { \
+ UDItype x_ = (USItype)(x); \
+ SItype c_; \
+ \
+ __asm__ ("nsb %1, %0" : "=r" (c_) : "r" (x_)); \
+ (count) = c_ - 31; \
+ } \
+ while (0)
+#define COUNT_LEADING_ZEROS_0 32
+#endif
+
+#if defined (__sparc__) && !defined (__arch64__) && !defined (__sparcv9) \
+ && W_TYPE_SIZE == 32
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+ __asm__ ("addcc %r4,%5,%1\n\taddx %r2,%3,%0" \
+ : "=r" ((USItype) (sh)), \
+ "=&r" ((USItype) (sl)) \
+ : "%rJ" ((USItype) (ah)), \
+ "rI" ((USItype) (bh)), \
+ "%rJ" ((USItype) (al)), \
+ "rI" ((USItype) (bl)) \
+ __CLOBBER_CC)
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+ __asm__ ("subcc %r4,%5,%1\n\tsubx %r2,%3,%0" \
+ : "=r" ((USItype) (sh)), \
+ "=&r" ((USItype) (sl)) \
+ : "rJ" ((USItype) (ah)), \
+ "rI" ((USItype) (bh)), \
+ "rJ" ((USItype) (al)), \
+ "rI" ((USItype) (bl)) \
+ __CLOBBER_CC)
+#if defined (__sparc_v8__)
+#define umul_ppmm(w1, w0, u, v) \
+ __asm__ ("umul %2,%3,%1;rd %%y,%0" \
+ : "=r" ((USItype) (w1)), \
+ "=r" ((USItype) (w0)) \
+ : "r" ((USItype) (u)), \
+ "r" ((USItype) (v)))
+#define udiv_qrnnd(__q, __r, __n1, __n0, __d) \
+ __asm__ ("mov %2,%%y;nop;nop;nop;udiv %3,%4,%0;umul %0,%4,%1;sub %3,%1,%1"\
+ : "=&r" ((USItype) (__q)), \
+ "=&r" ((USItype) (__r)) \
+ : "r" ((USItype) (__n1)), \
+ "r" ((USItype) (__n0)), \
+ "r" ((USItype) (__d)))
+#else
+#if defined (__sparclite__)
+/* This has hardware multiply but not divide. It also has two additional
+ instructions scan (ffs from high bit) and divscc. */
+#define umul_ppmm(w1, w0, u, v) \
+ __asm__ ("umul %2,%3,%1;rd %%y,%0" \
+ : "=r" ((USItype) (w1)), \
+ "=r" ((USItype) (w0)) \
+ : "r" ((USItype) (u)), \
+ "r" ((USItype) (v)))
+#define udiv_qrnnd(q, r, n1, n0, d) \
+ __asm__ ("! Inlined udiv_qrnnd\n" \
+" wr %%g0,%2,%%y ! Not a delayed write for sparclite\n" \
+" tst %%g0\n" \
+" divscc %3,%4,%%g1\n" \
+" divscc %%g1,%4,%%g1\n" \
+" divscc %%g1,%4,%%g1\n" \
+" divscc %%g1,%4,%%g1\n" \
+" divscc %%g1,%4,%%g1\n" \
+" divscc %%g1,%4,%%g1\n" \
+" divscc %%g1,%4,%%g1\n" \
+" divscc %%g1,%4,%%g1\n" \
+" divscc %%g1,%4,%%g1\n" \
+" divscc %%g1,%4,%%g1\n" \
+" divscc %%g1,%4,%%g1\n" \
+" divscc %%g1,%4,%%g1\n" \
+" divscc %%g1,%4,%%g1\n" \
+" divscc %%g1,%4,%%g1\n" \
+" divscc %%g1,%4,%%g1\n" \
+" divscc %%g1,%4,%%g1\n" \
+" divscc %%g1,%4,%%g1\n" \
+" divscc %%g1,%4,%%g1\n" \
+" divscc %%g1,%4,%%g1\n" \
+" divscc %%g1,%4,%%g1\n" \
+" divscc %%g1,%4,%%g1\n" \
+" divscc %%g1,%4,%%g1\n" \
+" divscc %%g1,%4,%%g1\n" \
+" divscc %%g1,%4,%%g1\n" \
+" divscc %%g1,%4,%%g1\n" \
+" divscc %%g1,%4,%%g1\n" \
+" divscc %%g1,%4,%%g1\n" \
+" divscc %%g1,%4,%%g1\n" \
+" divscc %%g1,%4,%%g1\n" \
+" divscc %%g1,%4,%%g1\n" \
+" divscc %%g1,%4,%%g1\n" \
+" divscc %%g1,%4,%0\n" \
+" rd %%y,%1\n" \
+" bl,a 1f\n" \
+" add %1,%4,%1\n" \
+"1: ! End of inline udiv_qrnnd" \
+ : "=r" ((USItype) (q)), \
+ "=r" ((USItype) (r)) \
+ : "r" ((USItype) (n1)), \
+ "r" ((USItype) (n0)), \
+ "rI" ((USItype) (d)) \
+ : "g1" __AND_CLOBBER_CC)
+#define UDIV_TIME 37
+#define count_leading_zeros(count, x) \
+ do { \
+ __asm__ ("scan %1,1,%0" \
+ : "=r" ((USItype) (count)) \
+ : "r" ((USItype) (x))); \
+ } while (0)
+/* Early sparclites return 63 for an argument of 0, but they warn that future
+ implementations might change this. Therefore, leave COUNT_LEADING_ZEROS_0
+ undefined. */
+#else
+/* SPARC without integer multiplication and divide instructions.
+ (i.e. at least Sun4/20,40,60,65,75,110,260,280,330,360,380,470,490) */
+#define umul_ppmm(w1, w0, u, v) \
+ __asm__ ("! Inlined umul_ppmm\n" \
+" wr %%g0,%2,%%y ! SPARC has 0-3 delay insn after a wr\n"\
+" sra %3,31,%%o5 ! Don't move this insn\n" \
+" and %2,%%o5,%%o5 ! Don't move this insn\n" \
+" andcc %%g0,0,%%g1 ! Don't move this insn\n" \
+" mulscc %%g1,%3,%%g1\n" \
+" mulscc %%g1,%3,%%g1\n" \
+" mulscc %%g1,%3,%%g1\n" \
+" mulscc %%g1,%3,%%g1\n" \
+" mulscc %%g1,%3,%%g1\n" \
+" mulscc %%g1,%3,%%g1\n" \
+" mulscc %%g1,%3,%%g1\n" \
+" mulscc %%g1,%3,%%g1\n" \
+" mulscc %%g1,%3,%%g1\n" \
+" mulscc %%g1,%3,%%g1\n" \
+" mulscc %%g1,%3,%%g1\n" \
+" mulscc %%g1,%3,%%g1\n" \
+" mulscc %%g1,%3,%%g1\n" \
+" mulscc %%g1,%3,%%g1\n" \
+" mulscc %%g1,%3,%%g1\n" \
+" mulscc %%g1,%3,%%g1\n" \
+" mulscc %%g1,%3,%%g1\n" \
+" mulscc %%g1,%3,%%g1\n" \
+" mulscc %%g1,%3,%%g1\n" \
+" mulscc %%g1,%3,%%g1\n" \
+" mulscc %%g1,%3,%%g1\n" \
+" mulscc %%g1,%3,%%g1\n" \
+" mulscc %%g1,%3,%%g1\n" \
+" mulscc %%g1,%3,%%g1\n" \
+" mulscc %%g1,%3,%%g1\n" \
+" mulscc %%g1,%3,%%g1\n" \
+" mulscc %%g1,%3,%%g1\n" \
+" mulscc %%g1,%3,%%g1\n" \
+" mulscc %%g1,%3,%%g1\n" \
+" mulscc %%g1,%3,%%g1\n" \
+" mulscc %%g1,%3,%%g1\n" \
+" mulscc %%g1,%3,%%g1\n" \
+" mulscc %%g1,0,%%g1\n" \
+" add %%g1,%%o5,%0\n" \
+" rd %%y,%1" \
+ : "=r" ((USItype) (w1)), \
+ "=r" ((USItype) (w0)) \
+ : "%rI" ((USItype) (u)), \
+ "r" ((USItype) (v)) \
+ : "g1", "o5" __AND_CLOBBER_CC)
+#define UMUL_TIME 39 /* 39 instructions */
+/* It's quite necessary to add this much assembler for the sparc.
+ The default udiv_qrnnd (in C) is more than 10 times slower! */
+#define udiv_qrnnd(__q, __r, __n1, __n0, __d) \
+ __asm__ ("! Inlined udiv_qrnnd\n" \
+" mov 32,%%g1\n" \
+" subcc %1,%2,%%g0\n" \
+"1: bcs 5f\n" \
+" addxcc %0,%0,%0 ! shift n1n0 and a q-bit in lsb\n" \
+" sub %1,%2,%1 ! this kills msb of n\n" \
+" addx %1,%1,%1 ! so this can't give carry\n" \
+" subcc %%g1,1,%%g1\n" \
+"2: bne 1b\n" \
+" subcc %1,%2,%%g0\n" \
+" bcs 3f\n" \
+" addxcc %0,%0,%0 ! shift n1n0 and a q-bit in lsb\n" \
+" b 3f\n" \
+" sub %1,%2,%1 ! this kills msb of n\n" \
+"4: sub %1,%2,%1\n" \
+"5: addxcc %1,%1,%1\n" \
+" bcc 2b\n" \
+" subcc %%g1,1,%%g1\n" \
+"! Got carry from n. Subtract next step to cancel this carry.\n" \
+" bne 4b\n" \
+" addcc %0,%0,%0 ! shift n1n0 and a 0-bit in lsb\n" \
+" sub %1,%2,%1\n" \
+"3: xnor %0,0,%0\n" \
+" ! End of inline udiv_qrnnd" \
+ : "=&r" ((USItype) (__q)), \
+ "=&r" ((USItype) (__r)) \
+ : "r" ((USItype) (__d)), \
+ "1" ((USItype) (__n1)), \
+ "0" ((USItype) (__n0)) : "g1" __AND_CLOBBER_CC)
+#define UDIV_TIME (3+7*32) /* 7 instructions/iteration. 32 iterations. */
+#endif /* __sparclite__ */
+#endif /* __sparc_v8__ */
+#endif /* sparc32 */
+
+#if ((defined (__sparc__) && defined (__arch64__)) || defined (__sparcv9)) \
+ && W_TYPE_SIZE == 64
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+ __asm__ ("addcc %r4,%5,%1\n\t" \
+ "add %r2,%3,%0\n\t" \
+ "bcs,a,pn %%xcc, 1f\n\t" \
+ "add %0, 1, %0\n" \
+ "1:" \
+ : "=r" ((UDItype)(sh)), \
+ "=&r" ((UDItype)(sl)) \
+ : "%rJ" ((UDItype)(ah)), \
+ "rI" ((UDItype)(bh)), \
+ "%rJ" ((UDItype)(al)), \
+ "rI" ((UDItype)(bl)) \
+ __CLOBBER_CC)
+
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+ __asm__ ("subcc %r4,%5,%1\n\t" \
+ "sub %r2,%3,%0\n\t" \
+ "bcs,a,pn %%xcc, 1f\n\t" \
+ "sub %0, 1, %0\n\t" \
+ "1:" \
+ : "=r" ((UDItype)(sh)), \
+ "=&r" ((UDItype)(sl)) \
+ : "rJ" ((UDItype)(ah)), \
+ "rI" ((UDItype)(bh)), \
+ "rJ" ((UDItype)(al)), \
+ "rI" ((UDItype)(bl)) \
+ __CLOBBER_CC)
+
+#define umul_ppmm(wh, wl, u, v) \
+ do { \
+ UDItype tmp1, tmp2, tmp3, tmp4; \
+ __asm__ __volatile__ ( \
+ "srl %7,0,%3\n\t" \
+ "mulx %3,%6,%1\n\t" \
+ "srlx %6,32,%2\n\t" \
+ "mulx %2,%3,%4\n\t" \
+ "sllx %4,32,%5\n\t" \
+ "srl %6,0,%3\n\t" \
+ "sub %1,%5,%5\n\t" \
+ "srlx %5,32,%5\n\t" \
+ "addcc %4,%5,%4\n\t" \
+ "srlx %7,32,%5\n\t" \
+ "mulx %3,%5,%3\n\t" \
+ "mulx %2,%5,%5\n\t" \
+ "sethi %%hi(0x80000000),%2\n\t" \
+ "addcc %4,%3,%4\n\t" \
+ "srlx %4,32,%4\n\t" \
+ "add %2,%2,%2\n\t" \
+ "movcc %%xcc,%%g0,%2\n\t" \
+ "addcc %5,%4,%5\n\t" \
+ "sllx %3,32,%3\n\t" \
+ "add %1,%3,%1\n\t" \
+ "add %5,%2,%0" \
+ : "=r" ((UDItype)(wh)), \
+ "=&r" ((UDItype)(wl)), \
+ "=&r" (tmp1), "=&r" (tmp2), "=&r" (tmp3), "=&r" (tmp4) \
+ : "r" ((UDItype)(u)), \
+ "r" ((UDItype)(v)) \
+ __CLOBBER_CC); \
+ } while (0)
+#define UMUL_TIME 96
+#define UDIV_TIME 230
+#endif /* sparc64 */
+
+#if defined (__vax__) && W_TYPE_SIZE == 32
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+ __asm__ ("addl2 %5,%1\n\tadwc %3,%0" \
+ : "=g" ((USItype) (sh)), \
+ "=&g" ((USItype) (sl)) \
+ : "%0" ((USItype) (ah)), \
+ "g" ((USItype) (bh)), \
+ "%1" ((USItype) (al)), \
+ "g" ((USItype) (bl)))
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+ __asm__ ("subl2 %5,%1\n\tsbwc %3,%0" \
+ : "=g" ((USItype) (sh)), \
+ "=&g" ((USItype) (sl)) \
+ : "0" ((USItype) (ah)), \
+ "g" ((USItype) (bh)), \
+ "1" ((USItype) (al)), \
+ "g" ((USItype) (bl)))
+#define umul_ppmm(xh, xl, m0, m1) \
+ do { \
+ union { \
+ UDItype __ll; \
+ struct {USItype __l, __h;} __i; \
+ } __xx; \
+ USItype __m0 = (m0), __m1 = (m1); \
+ __asm__ ("emul %1,%2,$0,%0" \
+ : "=r" (__xx.__ll) \
+ : "g" (__m0), \
+ "g" (__m1)); \
+ (xh) = __xx.__i.__h; \
+ (xl) = __xx.__i.__l; \
+ (xh) += ((((SItype) __m0 >> 31) & __m1) \
+ + (((SItype) __m1 >> 31) & __m0)); \
+ } while (0)
+#define sdiv_qrnnd(q, r, n1, n0, d) \
+ do { \
+ union {DItype __ll; \
+ struct {SItype __l, __h;} __i; \
+ } __xx; \
+ __xx.__i.__h = n1; __xx.__i.__l = n0; \
+ __asm__ ("ediv %3,%2,%0,%1" \
+ : "=g" (q), "=g" (r) \
+ : "g" (__xx.__ll), "g" (d)); \
+ } while (0)
+#endif /* __vax__ */
+
+#ifdef _TMS320C6X
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+ do \
+ { \
+ UDItype __ll; \
+ __asm__ ("addu .l1 %1, %2, %0" \
+ : "=a" (__ll) : "a" (al), "a" (bl)); \
+ (sl) = (USItype)__ll; \
+ (sh) = ((USItype)(__ll >> 32)) + (ah) + (bh); \
+ } \
+ while (0)
+
+#ifdef _TMS320C6400_PLUS
+#define __umulsidi3(u,v) ((UDItype)(USItype)u*(USItype)v)
+#define umul_ppmm(w1, w0, u, v) \
+ do { \
+ UDItype __x = (UDItype) (USItype) (u) * (USItype) (v); \
+ (w1) = (USItype) (__x >> 32); \
+ (w0) = (USItype) (__x); \
+ } while (0)
+#endif /* _TMS320C6400_PLUS */
+
+#define count_leading_zeros(count, x) ((count) = __builtin_clz (x))
+#ifdef _TMS320C6400
+#define count_trailing_zeros(count, x) ((count) = __builtin_ctz (x))
+#endif
+#define UMUL_TIME 4
+#define UDIV_TIME 40
+#endif /* _TMS320C6X */
+
+#if defined (__xtensa__) && W_TYPE_SIZE == 32
+/* This code is not Xtensa-configuration-specific, so rely on the compiler
+ to expand builtin functions depending on what configuration features
+ are available. This avoids library calls when the operation can be
+ performed in-line. */
+#define umul_ppmm(w1, w0, u, v) \
+ do { \
+ DWunion __w; \
+ __w.ll = __builtin_umulsidi3 (u, v); \
+ w1 = __w.s.high; \
+ w0 = __w.s.low; \
+ } while (0)
+#define __umulsidi3(u, v) __builtin_umulsidi3 (u, v)
+#define count_leading_zeros(COUNT, X) ((COUNT) = __builtin_clz (X))
+#define count_trailing_zeros(COUNT, X) ((COUNT) = __builtin_ctz (X))
+#endif /* __xtensa__ */
+
+#if defined xstormy16
+extern UHItype __stormy16_count_leading_zeros (UHItype);
+#define count_leading_zeros(count, x) \
+ do \
+ { \
+ UHItype size; \
+ \
+ /* We assume that W_TYPE_SIZE is a multiple of 16... */ \
+ for ((count) = 0, size = W_TYPE_SIZE; size; size -= 16) \
+ { \
+ UHItype c; \
+ \
+ c = __clzhi2 ((x) >> (size - 16)); \
+ (count) += c; \
+ if (c != 16) \
+ break; \
+ } \
+ } \
+ while (0)
+#define COUNT_LEADING_ZEROS_0 W_TYPE_SIZE
+#endif
+
+#if defined (__z8000__) && W_TYPE_SIZE == 16
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+ __asm__ ("add %H1,%H5\n\tadc %H0,%H3" \
+ : "=r" ((unsigned int)(sh)), \
+ "=&r" ((unsigned int)(sl)) \
+ : "%0" ((unsigned int)(ah)), \
+ "r" ((unsigned int)(bh)), \
+ "%1" ((unsigned int)(al)), \
+ "rQR" ((unsigned int)(bl)))
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+ __asm__ ("sub %H1,%H5\n\tsbc %H0,%H3" \
+ : "=r" ((unsigned int)(sh)), \
+ "=&r" ((unsigned int)(sl)) \
+ : "0" ((unsigned int)(ah)), \
+ "r" ((unsigned int)(bh)), \
+ "1" ((unsigned int)(al)), \
+ "rQR" ((unsigned int)(bl)))
+#define umul_ppmm(xh, xl, m0, m1) \
+ do { \
+ union {long int __ll; \
+ struct {unsigned int __h, __l;} __i; \
+ } __xx; \
+ unsigned int __m0 = (m0), __m1 = (m1); \
+ __asm__ ("mult %S0,%H3" \
+ : "=r" (__xx.__i.__h), \
+ "=r" (__xx.__i.__l) \
+ : "%1" (__m0), \
+ "rQR" (__m1)); \
+ (xh) = __xx.__i.__h; (xl) = __xx.__i.__l; \
+ (xh) += ((((signed int) __m0 >> 15) & __m1) \
+ + (((signed int) __m1 >> 15) & __m0)); \
+ } while (0)
+#endif /* __z8000__ */
+
+#endif /* __GNUC__ */
+
+/* If this machine has no inline assembler, use C macros. */
+
+#if !defined (add_ssaaaa)
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+ do { \
+ UWtype __x; \
+ __x = (al) + (bl); \
+ (sh) = (ah) + (bh) + (__x < (al)); \
+ (sl) = __x; \
+ } while (0)
+#endif
+
+#if !defined (sub_ddmmss)
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+ do { \
+ UWtype __x; \
+ __x = (al) - (bl); \
+ (sh) = (ah) - (bh) - (__x > (al)); \
+ (sl) = __x; \
+ } while (0)
+#endif
+
+/* If we lack umul_ppmm but have smul_ppmm, define umul_ppmm in terms of
+ smul_ppmm. */
+#if !defined (umul_ppmm) && defined (smul_ppmm)
+#define umul_ppmm(w1, w0, u, v) \
+ do { \
+ UWtype __w1; \
+ UWtype __xm0 = (u), __xm1 = (v); \
+ smul_ppmm (__w1, w0, __xm0, __xm1); \
+ (w1) = __w1 + (-(__xm0 >> (W_TYPE_SIZE - 1)) & __xm1) \
+ + (-(__xm1 >> (W_TYPE_SIZE - 1)) & __xm0); \
+ } while (0)
+#endif
+
+/* If we still don't have umul_ppmm, define it using plain C. */
+#if !defined (umul_ppmm)
+#define umul_ppmm(w1, w0, u, v) \
+ do { \
+ UWtype __x0, __x1, __x2, __x3; \
+ UHWtype __ul, __vl, __uh, __vh; \
+ \
+ __ul = __ll_lowpart (u); \
+ __uh = __ll_highpart (u); \
+ __vl = __ll_lowpart (v); \
+ __vh = __ll_highpart (v); \
+ \
+ __x0 = (UWtype) __ul * __vl; \
+ __x1 = (UWtype) __ul * __vh; \
+ __x2 = (UWtype) __uh * __vl; \
+ __x3 = (UWtype) __uh * __vh; \
+ \
+ __x1 += __ll_highpart (__x0);/* this can't give carry */ \
+ __x1 += __x2; /* but this indeed can */ \
+ if (__x1 < __x2) /* did we get it? */ \
+ __x3 += __ll_B; /* yes, add it in the proper pos. */ \
+ \
+ (w1) = __x3 + __ll_highpart (__x1); \
+ (w0) = __ll_lowpart (__x1) * __ll_B + __ll_lowpart (__x0); \
+ } while (0)
+#endif
+
+#if !defined (__umulsidi3)
+#define __umulsidi3(u, v) \
+ ({DWunion __w; \
+ umul_ppmm (__w.s.high, __w.s.low, u, v); \
+ __w.ll; })
+#endif
+
+/* Define this unconditionally, so it can be used for debugging. */
+#define __udiv_qrnnd_c(q, r, n1, n0, d) \
+ do { \
+ UWtype __d1, __d0, __q1, __q0; \
+ UWtype __r1, __r0, __m; \
+ __d1 = __ll_highpart (d); \
+ __d0 = __ll_lowpart (d); \
+ \
+ __r1 = (n1) % __d1; \
+ __q1 = (n1) / __d1; \
+ __m = (UWtype) __q1 * __d0; \
+ __r1 = __r1 * __ll_B | __ll_highpart (n0); \
+ if (__r1 < __m) \
+ { \
+ __q1--, __r1 += (d); \
+ if (__r1 >= (d)) /* i.e. we didn't get carry when adding to __r1 */\
+ if (__r1 < __m) \
+ __q1--, __r1 += (d); \
+ } \
+ __r1 -= __m; \
+ \
+ __r0 = __r1 % __d1; \
+ __q0 = __r1 / __d1; \
+ __m = (UWtype) __q0 * __d0; \
+ __r0 = __r0 * __ll_B | __ll_lowpart (n0); \
+ if (__r0 < __m) \
+ { \
+ __q0--, __r0 += (d); \
+ if (__r0 >= (d)) \
+ if (__r0 < __m) \
+ __q0--, __r0 += (d); \
+ } \
+ __r0 -= __m; \
+ \
+ (q) = (UWtype) __q1 * __ll_B | __q0; \
+ (r) = __r0; \
+ } while (0)
+
+/* If the processor has no udiv_qrnnd but sdiv_qrnnd, go through
+ __udiv_w_sdiv (defined in libgcc or elsewhere). */
+#if !defined (udiv_qrnnd) && defined (sdiv_qrnnd)
+#define udiv_qrnnd(q, r, nh, nl, d) \
+ do { \
+ USItype __r; \
+ (q) = __udiv_w_sdiv (&__r, nh, nl, d); \
+ (r) = __r; \
+ } while (0)
+#endif
+
+/* If udiv_qrnnd was not defined for this processor, use __udiv_qrnnd_c. */
+#if !defined (udiv_qrnnd)
+#define UDIV_NEEDS_NORMALIZATION 1
+#define udiv_qrnnd __udiv_qrnnd_c
+#endif
+
+#if !defined (count_leading_zeros)
+#define count_leading_zeros(count, x) \
+ do { \
+ UWtype __xr = (x); \
+ UWtype __a; \
+ \
+ if (W_TYPE_SIZE <= 32) \
+ { \
+ __a = __xr < ((UWtype)1<<2*__BITS4) \
+ ? (__xr < ((UWtype)1<<__BITS4) ? 0 : __BITS4) \
+ : (__xr < ((UWtype)1<<3*__BITS4) ? 2*__BITS4 : 3*__BITS4); \
+ } \
+ else \
+ { \
+ for (__a = W_TYPE_SIZE - 8; __a > 0; __a -= 8) \
+ if (((__xr >> __a) & 0xff) != 0) \
+ break; \
+ } \
+ \
+ (count) = W_TYPE_SIZE - (__clz_tab[__xr >> __a] + __a); \
+ } while (0)
+#define COUNT_LEADING_ZEROS_0 W_TYPE_SIZE
+#endif
+
+#if !defined (count_trailing_zeros)
+/* Define count_trailing_zeros using count_leading_zeros. The latter might be
+ defined in asm, but if it is not, the C version above is good enough. */
+#define count_trailing_zeros(count, x) \
+ do { \
+ UWtype __ctz_x = (x); \
+ UWtype __ctz_c; \
+ count_leading_zeros (__ctz_c, __ctz_x & -__ctz_x); \
+ (count) = W_TYPE_SIZE - 1 - __ctz_c; \
+ } while (0)
+#endif
+
+#ifndef UDIV_NEEDS_NORMALIZATION
+#define UDIV_NEEDS_NORMALIZATION 0
+#endif
diff --git a/libgcc/memcmp.c b/libgcc/memcmp.c
new file mode 100644
index 00000000000..2348afe1d27
--- /dev/null
+++ b/libgcc/memcmp.c
@@ -0,0 +1,16 @@
+/* Public domain. */
+#include <stddef.h>
+
+int
+memcmp (const void *str1, const void *str2, size_t count)
+{
+ const unsigned char *s1 = str1;
+ const unsigned char *s2 = str2;
+
+ while (count-- > 0)
+ {
+ if (*s1++ != *s2++)
+ return s1[-1] < s2[-1] ? -1 : 1;
+ }
+ return 0;
+}
diff --git a/libgcc/memcpy.c b/libgcc/memcpy.c
new file mode 100644
index 00000000000..58b1e405627
--- /dev/null
+++ b/libgcc/memcpy.c
@@ -0,0 +1,12 @@
+/* Public domain. */
+#include <stddef.h>
+
+void *
+memcpy (void *dest, const void *src, size_t len)
+{
+ char *d = dest;
+ const char *s = src;
+ while (len--)
+ *d++ = *s++;
+ return dest;
+}
diff --git a/libgcc/memmove.c b/libgcc/memmove.c
new file mode 100644
index 00000000000..13b340af6a0
--- /dev/null
+++ b/libgcc/memmove.c
@@ -0,0 +1,20 @@
+/* Public domain. */
+#include <stddef.h>
+
+void *
+memmove (void *dest, const void *src, size_t len)
+{
+ char *d = dest;
+ const char *s = src;
+ if (d < s)
+ while (len--)
+ *d++ = *s++;
+ else
+ {
+ char *lasts = s + (len-1);
+ char *lastd = d + (len-1);
+ while (len--)
+ *lastd-- = *lasts--;
+ }
+ return dest;
+}
diff --git a/libgcc/memset.c b/libgcc/memset.c
new file mode 100644
index 00000000000..3e7025ee394
--- /dev/null
+++ b/libgcc/memset.c
@@ -0,0 +1,11 @@
+/* Public domain. */
+#include <stddef.h>
+
+void *
+memset (void *dest, int val, size_t len)
+{
+ unsigned char *ptr = dest;
+ while (len-- > 0)
+ *ptr++ = val;
+ return dest;
+}
diff --git a/libgcc/mkheader.sh b/libgcc/mkheader.sh
new file mode 100755
index 00000000000..69ea3704901
--- /dev/null
+++ b/libgcc/mkheader.sh
@@ -0,0 +1,41 @@
+#! /bin/sh
+
+# Copyright (C) 2001, 2002, 2006, 2007, 2010, 2011 Free Software Foundation, Inc.
+# This file is part of GCC.
+
+# GCC is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+
+# GCC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>.
+
+
+# Print libgcc_tm.h to the standard output.
+# DEFINES and HEADERS are expected to be set in the environment.
+
+# Add multiple inclusion protection guard, part one.
+echo "#ifndef LIBGCC_TM_H"
+echo "#define LIBGCC_TM_H"
+
+# Generate the body of the file
+echo "/* Automatically generated by mkheader.sh. */"
+for def in $DEFINES; do
+ echo "#ifndef $def" | sed 's/=.*//'
+ echo "# define $def" | sed 's/=/ /'
+ echo "#endif"
+done
+
+for file in $HEADERS; do
+ echo "#include \"$file\""
+done
+
+# Add multiple inclusion protection guard, part two.
+echo "#endif /* LIBGCC_TM_H */"
diff --git a/libgcc/mkmap-flat.awk b/libgcc/mkmap-flat.awk
new file mode 100644
index 00000000000..ec5e1fdf513
--- /dev/null
+++ b/libgcc/mkmap-flat.awk
@@ -0,0 +1,109 @@
+# Generate a flat list of symbols to export.
+# Copyright (C) 2007, 2008, 2009, 2011 Free Software Foundation, Inc.
+# Contributed by Richard Henderson <rth@cygnus.com>
+#
+# This file is part of GCC.
+#
+# GCC is free software; you can redistribute it and/or modify it under
+# the terms of the GNU General Public License as published by the Free
+# Software Foundation; either version 3, or (at your option) any later
+# version.
+#
+# GCC is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+# License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>.
+
+# Options:
+# "-v leading_underscore=1" : Symbols in map need leading underscore.
+# "-v osf_export=1" : Create -input file for Tru64 UNIX linker
+# instead of map file.
+# "-v pe_dll=1" : Create .DEF file for Windows PECOFF
+# DLL link instead of map file.
+
+BEGIN {
+ state = "nm";
+ excluding = 0;
+ if (leading_underscore)
+ prefix = "_";
+ else
+ prefix = "";
+}
+
+# Remove comment and blank lines.
+/^ *#/ || /^ *$/ {
+ next;
+}
+
+# We begin with nm input. Collect the set of symbols that are present
+# so that we can elide undefined symbols.
+
+state == "nm" && /^%%/ {
+ state = "ver";
+ next;
+}
+
+state == "nm" && ($1 == "U" || $2 == "U") {
+ next;
+}
+
+state == "nm" && NF == 3 {
+ def[$3] = 1;
+ next;
+}
+
+state == "nm" {
+ next;
+}
+
+# Now we process a simplified variant of the Solaris symbol version
+# script. We have one symbol per line, no semicolons, simple markers
+# for beginning and ending each section, and %inherit markers for
+# describing version inheritance. A symbol may appear in more than
+# one symbol version, and the last seen takes effect.
+# The magic version name '%exclude' causes all the symbols given that
+# version to be dropped from the output (unless a later version overrides).
+
+NF == 3 && $1 == "%inherit" {
+ next;
+}
+
+NF == 2 && $2 == "{" {
+ if ($1 == "%exclude")
+ excluding = 1;
+ next;
+}
+
+$1 == "}" {
+ excluding = 0;
+ next;
+}
+
+{
+ sym = prefix $1;
+ if (excluding)
+ delete export[sym];
+ else
+ export[sym] = 1;
+ next;
+}
+
+END {
+
+ if (pe_dll) {
+ print "LIBRARY " pe_dll;
+ print "EXPORTS";
+ }
+
+ for (sym in export)
+ if (def[sym] || (pe_dll && def["_" sym])) {
+ if (!osf_export)
+ print sym;
+ else
+ print "-exported_symbol " sym;
+ }
+}
diff --git a/libgcc/mkmap-symver.awk b/libgcc/mkmap-symver.awk
new file mode 100644
index 00000000000..4877e905147
--- /dev/null
+++ b/libgcc/mkmap-symver.awk
@@ -0,0 +1,136 @@
+# Generate an ELF symbol version map a-la Solaris and GNU ld.
+# Copyright (C) 2007, 2008 Free Software Foundation, Inc.
+# Contributed by Richard Henderson <rth@cygnus.com>
+#
+# This file is part of GCC.
+#
+# GCC is free software; you can redistribute it and/or modify it under
+# the terms of the GNU General Public License as published by the Free
+# Software Foundation; either version 3, or (at your option) any later
+# version.
+#
+# GCC is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+# License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>.
+
+BEGIN {
+ state = "nm";
+ sawsymbol = 0;
+ if (leading_underscore)
+ prefix = "_";
+ else
+ prefix = "";
+}
+
+# Remove comment and blank lines.
+/^ *#/ || /^ *$/ {
+ next;
+}
+
+# We begin with nm input. Collect the set of symbols that are present
+# so that we can not emit them into the final version script -- Solaris
+# complains at us if we do.
+
+state == "nm" && /^%%/ {
+ state = "ver";
+ next;
+}
+
+state == "nm" && ($1 == "U" || $2 == "U") {
+ next;
+}
+
+state == "nm" && NF == 3 {
+ split ($3, s, "@")
+ def[s[1]] = 1;
+ sawsymbol = 1;
+ next;
+}
+
+state == "nm" {
+ next;
+}
+
+# Now we process a simplified variant of the Solaris symbol version
+# script. We have one symbol per line, no semicolons, simple markers
+# for beginning and ending each section, and %inherit markers for
+# describing version inheritance. A symbol may appear in more than
+# one symbol version, and the last seen takes effect.
+# The magic version name '%exclude' causes all the symbols given that
+# version to be dropped from the output (unless a later version overrides).
+
+NF == 3 && $1 == "%inherit" {
+ inherit[$2] = $3;
+ next;
+}
+
+NF == 2 && $2 == "{" {
+ if ($1 != "%exclude")
+ libs[$1] = 1;
+ thislib = $1;
+ next;
+}
+
+$1 == "}" {
+ thislib = "";
+ next;
+}
+
+{
+ sym = prefix $1;
+ symbols[sym] = 1
+ if (thislib != "%exclude")
+ ver[sym, thislib] = 1;
+ else {
+ for (l in libs)
+ ver[sym, l] = 0;
+ }
+ next;
+}
+
+END {
+ if (!sawsymbol)
+ {
+ print "No symbols seen -- broken or mis-installed nm?" | "cat 1>&2";
+ exit 1;
+ }
+ for (l in libs)
+ output(l);
+}
+
+function output(lib) {
+ if (done[lib])
+ return;
+ done[lib] = 1;
+ if (inherit[lib])
+ output(inherit[lib]);
+
+ empty=1
+ for (sym in symbols)
+ if ((ver[sym, lib] != 0) && (sym in def))
+ {
+ if (empty)
+ {
+ printf("%s {\n", lib);
+ printf(" global:\n");
+ empty = 0;
+ }
+ printf("\t%s;\n", sym);
+ }
+
+ if (empty)
+ {
+ for (l in libs)
+ if (inherit[l] == lib)
+ inherit[l] = inherit[lib];
+ }
+ else if (inherit[lib])
+ printf("} %s;\n", inherit[lib]);
+ else
+ printf ("\n local:\n\t*;\n};\n");
+}
diff --git a/libgcc/siditi-object.mk b/libgcc/siditi-object.mk
index 69df8338f52..77699cb79ec 100644
--- a/libgcc/siditi-object.mk
+++ b/libgcc/siditi-object.mk
@@ -11,12 +11,12 @@ iter-labels := $(wordlist 2,$(words $(iter-labels)),$(iter-labels))
$o-size := $(firstword $(iter-sizes))
iter-sizes := $(wordlist 2,$(words $(iter-sizes)),$(iter-sizes))
-$o$(objext): %$(objext): $(gcc_srcdir)/libgcc2.c
- $(gcc_compile) -DL$($*-label) -c $(gcc_srcdir)/libgcc2.c $(vis_hide) \
+$o$(objext): %$(objext): $(srcdir)/libgcc2.c
+ $(gcc_compile) -DL$($*-label) -c $< $(vis_hide) \
-DLIBGCC2_UNITS_PER_WORD=$($*-size)
ifeq ($(enable_shared),yes)
-$(o)_s$(objext): %_s$(objext): $(gcc_srcdir)/libgcc2.c
- $(gcc_s_compile) -DL$($*-label) -c $(gcc_srcdir)/libgcc2.c \
+$(o)_s$(objext): %_s$(objext): $(srcdir)/libgcc2.c
+ $(gcc_s_compile) -DL$($*-label) -c $< \
-DLIBGCC2_UNITS_PER_WORD=$($*-size)
endif
diff --git a/libgcc/udivmod.c b/libgcc/udivmod.c
new file mode 100644
index 00000000000..dc70de64fc7
--- /dev/null
+++ b/libgcc/udivmod.c
@@ -0,0 +1,37 @@
+/* Copyright (C) 2000 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+long udivmodsi4 ();
+
+long
+__udivsi3 (long a, long b)
+{
+ return udivmodsi4 (a, b, 0);
+}
+
+long
+__umodsi3 (long a, long b)
+{
+ return udivmodsi4 (a, b, 1);
+}
+
diff --git a/libgcc/udivmodsi4.c b/libgcc/udivmodsi4.c
new file mode 100644
index 00000000000..39c030fa4df
--- /dev/null
+++ b/libgcc/udivmodsi4.c
@@ -0,0 +1,47 @@
+/* Copyright (C) 2000 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+unsigned long
+udivmodsi4(unsigned long num, unsigned long den, int modwanted)
+{
+ unsigned long bit = 1;
+ unsigned long res = 0;
+
+ while (den < num && bit && !(den & (1L<<31)))
+ {
+ den <<=1;
+ bit <<=1;
+ }
+ while (bit)
+ {
+ if (num >= den)
+ {
+ num -= den;
+ res |= bit;
+ }
+ bit >>=1;
+ den >>=1;
+ }
+ if (modwanted) return num;
+ return res;
+}
diff --git a/libgcc/unwind-dw2-fde-dip.c b/libgcc/unwind-dw2-fde-dip.c
index d8e3c0e934b..f57dc8c3925 100644
--- a/libgcc/unwind-dw2-fde-dip.c
+++ b/libgcc/unwind-dw2-fde-dip.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2001, 2002, 2003, 2004, 2005, 2009, 2010
+/* Copyright (C) 2001, 2002, 2003, 2004, 2005, 2009, 2010, 2011
Free Software Foundation, Inc.
Contributed by Jakub Jelinek <jakub@redhat.com>.
@@ -38,6 +38,7 @@
#endif
#include "coretypes.h"
#include "tm.h"
+#include "libgcc_tm.h"
#include "dwarf2.h"
#include "unwind.h"
#define NO_BASE_OF_ENCODED_VALUE
diff --git a/libgcc/unwind-dw2-fde.c b/libgcc/unwind-dw2-fde.c
index 93d427165c4..7a783329f7c 100644
--- a/libgcc/unwind-dw2-fde.c
+++ b/libgcc/unwind-dw2-fde.c
@@ -1,6 +1,6 @@
/* Subroutines needed for unwinding stack frames for exception handling. */
/* Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2008,
- 2009, 2010 Free Software Foundation, Inc.
+ 2009, 2010, 2011 Free Software Foundation, Inc.
Contributed by Jason Merrill <jason@cygnus.com>.
This file is part of GCC.
@@ -29,6 +29,7 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
#include "tsystem.h"
#include "coretypes.h"
#include "tm.h"
+#include "libgcc_tm.h"
#include "dwarf2.h"
#include "unwind.h"
#define NO_BASE_OF_ENCODED_VALUE
diff --git a/libgcc/unwind-dw2.c b/libgcc/unwind-dw2.c
index 92aa233eb26..475ad00bf52 100644
--- a/libgcc/unwind-dw2.c
+++ b/libgcc/unwind-dw2.c
@@ -27,6 +27,7 @@
#include "tsystem.h"
#include "coretypes.h"
#include "tm.h"
+#include "libgcc_tm.h"
#include "dwarf2.h"
#include "unwind.h"
#ifdef __USING_SJLJ_EXCEPTIONS__
diff --git a/libgcc/unwind-sjlj.c b/libgcc/unwind-sjlj.c
index c71e79858ee..1fc1c5d3dc9 100644
--- a/libgcc/unwind-sjlj.c
+++ b/libgcc/unwind-sjlj.c
@@ -1,6 +1,6 @@
/* SJLJ exception handling and frame unwind runtime interface routines.
Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2006,
- 2009 Free Software Foundation, Inc.
+ 2009, 2011 Free Software Foundation, Inc.
This file is part of GCC.
@@ -27,6 +27,7 @@
#include "tsystem.h"
#include "coretypes.h"
#include "tm.h"
+#include "libgcc_tm.h"
#include "unwind.h"
#include "gthr.h"