comparison gcc/config/arm/lib1funcs.asm @ 55:77e2b8dfacca gcc-4.4.5

update it from 4.4.3 to 4.5.0
author ryoma <e075725@ie.u-ryukyu.ac.jp>
date Fri, 12 Feb 2010 23:39:51 +0900
parents a06113de4d67
children b7f97abdc517
comparison
equal deleted inserted replaced
52:c156f1bd5cd9 55:77e2b8dfacca
25 25
26 /* An executable stack is *not* required for these functions. */ 26 /* An executable stack is *not* required for these functions. */
27 #if defined(__ELF__) && defined(__linux__) 27 #if defined(__ELF__) && defined(__linux__)
28 .section .note.GNU-stack,"",%progbits 28 .section .note.GNU-stack,"",%progbits
29 .previous 29 .previous
30 #endif 30 #endif /* __ELF__ and __linux__ */
31 31
32 #ifdef __ARM_EABI__
33 /* Some attributes that are common to all routines in this file. */
34 /* Tag_ABI_align8_needed: This code does not require 8-byte
35 alignment from the caller. */
36 /* .eabi_attribute 24, 0 -- default setting. */
37 /* Tag_ABI_align8_preserved: This code preserves 8-byte
38 alignment in any callee. */
39 .eabi_attribute 25, 1
40 #endif /* __ARM_EABI__ */
32 /* ------------------------------------------------------------------------ */ 41 /* ------------------------------------------------------------------------ */
33 42
34 /* We need to know what prefix to add to function names. */ 43 /* We need to know what prefix to add to function names. */
35 44
36 #ifndef __USER_LABEL_PREFIX__ 45 #ifndef __USER_LABEL_PREFIX__
254 .macro shiftop name, dest, src1, src2, shiftop, shiftreg, tmp 263 .macro shiftop name, dest, src1, src2, shiftop, shiftreg, tmp
255 \name \dest, \src1, \src2, \shiftop \shiftreg 264 \name \dest, \src1, \src2, \shiftop \shiftreg
256 .endm 265 .endm
257 #endif 266 #endif
258 267
259 .macro ARM_LDIV0 name 268 #ifdef __ARM_EABI__
269 .macro ARM_LDIV0 name signed
270 cmp r0, #0
271 .ifc \signed, unsigned
272 movne r0, #0xffffffff
273 .else
274 movgt r0, #0x7fffffff
275 movlt r0, #0x80000000
276 .endif
277 b SYM (__aeabi_idiv0) __PLT__
278 .endm
279 #else
280 .macro ARM_LDIV0 name signed
260 str lr, [sp, #-8]! 281 str lr, [sp, #-8]!
261 98: cfi_push 98b - __\name, 0xe, -0x8, 0x8 282 98: cfi_push 98b - __\name, 0xe, -0x8, 0x8
262 bl SYM (__div0) __PLT__ 283 bl SYM (__div0) __PLT__
263 mov r0, #0 @ About as wrong as it could be. 284 mov r0, #0 @ About as wrong as it could be.
264 RETLDM unwind=98b 285 RETLDM unwind=98b
265 .endm 286 .endm
266 287 #endif
267 288
268 .macro THUMB_LDIV0 name 289
290 #ifdef __ARM_EABI__
291 .macro THUMB_LDIV0 name signed
292 #if defined(__ARM_ARCH_6M__)
293 .ifc \signed, unsigned
294 cmp r0, #0
295 beq 1f
296 mov r0, #0
297 mvn r0, r0 @ 0xffffffff
298 1:
299 .else
300 cmp r0, #0
301 beq 2f
302 blt 3f
303 mov r0, #0
304 mvn r0, r0
305 lsr r0, r0, #1 @ 0x7fffffff
306 b 2f
307 3: mov r0, #0x80
308 lsl r0, r0, #24 @ 0x80000000
309 2:
310 .endif
311 push {r0, r1, r2}
312 ldr r0, 4f
313 adr r1, 4f
314 add r0, r1
315 str r0, [sp, #8]
316 @ We know we are not on armv4t, so pop pc is safe.
317 pop {r0, r1, pc}
318 .align 2
319 4:
320 .word __aeabi_idiv0 - 4b
321 #elif defined(__thumb2__)
322 .syntax unified
323 .ifc \signed, unsigned
324 cbz r0, 1f
325 mov r0, #0xffffffff
326 1:
327 .else
328 cmp r0, #0
329 do_it gt
330 movgt r0, #0x7fffffff
331 do_it lt
332 movlt r0, #0x80000000
333 .endif
334 b.w SYM(__aeabi_idiv0) __PLT__
335 #else
336 .align 2
337 bx pc
338 nop
339 .arm
340 cmp r0, #0
341 .ifc \signed, unsigned
342 movne r0, #0xffffffff
343 .else
344 movgt r0, #0x7fffffff
345 movlt r0, #0x80000000
346 .endif
347 b SYM(__aeabi_idiv0) __PLT__
348 .thumb
349 #endif
350 .endm
351 #else
352 .macro THUMB_LDIV0 name signed
269 push { r1, lr } 353 push { r1, lr }
270 98: cfi_push 98b - __\name, 0xe, -0x4, 0x8 354 98: cfi_push 98b - __\name, 0xe, -0x4, 0x8
271 bl SYM (__div0) 355 bl SYM (__div0)
272 mov r0, #0 @ About as wrong as it could be. 356 mov r0, #0 @ About as wrong as it could be.
273 #if defined (__INTERWORKING__) 357 #if defined (__INTERWORKING__)
275 bx r2 359 bx r2
276 #else 360 #else
277 pop { r1, pc } 361 pop { r1, pc }
278 #endif 362 #endif
279 .endm 363 .endm
364 #endif
280 365
281 .macro FUNC_END name 366 .macro FUNC_END name
282 SIZE (__\name) 367 SIZE (__\name)
283 .endm 368 .endm
284 369
285 .macro DIV_FUNC_END name 370 .macro DIV_FUNC_END name signed
286 cfi_start __\name, LSYM(Lend_div0) 371 cfi_start __\name, LSYM(Lend_div0)
287 LSYM(Ldiv0): 372 LSYM(Ldiv0):
288 #ifdef __thumb__ 373 #ifdef __thumb__
289 THUMB_LDIV0 \name 374 THUMB_LDIV0 \name \signed
290 #else 375 #else
291 ARM_LDIV0 \name 376 ARM_LDIV0 \name \signed
292 #endif 377 #endif
293 cfi_end LSYM(Lend_div0) 378 cfi_end LSYM(Lend_div0)
294 FUNC_END \name 379 FUNC_END \name
295 .endm 380 .endm
296 381
411 #define xxl r0 496 #define xxl r0
412 #define yyh r3 497 #define yyh r3
413 #define yyl r2 498 #define yyl r2
414 #endif 499 #endif
415 500
501 #ifdef __ARM_EABI__
502 .macro WEAK name
503 .weak SYM (__\name)
504 .endm
505 #endif
506
416 #ifdef __thumb__ 507 #ifdef __thumb__
417 /* Register aliases. */ 508 /* Register aliases. */
418 509
419 work .req r4 @ XXXX is this safe ? 510 work .req r4 @ XXXX is this safe ?
420 dividend .req r0 511 dividend .req r0
435 /* ------------------------------------------------------------------------ */ 526 /* ------------------------------------------------------------------------ */
436 .macro ARM_DIV_BODY dividend, divisor, result, curbit 527 .macro ARM_DIV_BODY dividend, divisor, result, curbit
437 528
438 #if __ARM_ARCH__ >= 5 && ! defined (__OPTIMIZE_SIZE__) 529 #if __ARM_ARCH__ >= 5 && ! defined (__OPTIMIZE_SIZE__)
439 530
531 #if defined (__thumb2__)
532 clz \curbit, \dividend
533 clz \result, \divisor
534 sub \curbit, \result, \curbit
535 rsb \curbit, \curbit, #31
536 adr \result, 1f
537 add \curbit, \result, \curbit, lsl #4
538 mov \result, #0
539 mov pc, \curbit
540 .p2align 3
541 1:
542 .set shift, 32
543 .rept 32
544 .set shift, shift - 1
545 cmp.w \dividend, \divisor, lsl #shift
546 nop.n
547 adc.w \result, \result, \result
548 it cs
549 subcs.w \dividend, \dividend, \divisor, lsl #shift
550 .endr
551 #else
440 clz \curbit, \dividend 552 clz \curbit, \dividend
441 clz \result, \divisor 553 clz \result, \divisor
442 sub \curbit, \result, \curbit 554 sub \curbit, \result, \curbit
443 rsbs \curbit, \curbit, #31 555 rsbs \curbit, \curbit, #31
444 addne \curbit, \curbit, \curbit, lsl #1 556 addne \curbit, \curbit, \curbit, lsl #1
450 .set shift, shift - 1 562 .set shift, shift - 1
451 cmp \dividend, \divisor, lsl #shift 563 cmp \dividend, \divisor, lsl #shift
452 adc \result, \result, \result 564 adc \result, \result, \result
453 subcs \dividend, \dividend, \divisor, lsl #shift 565 subcs \dividend, \dividend, \divisor, lsl #shift
454 .endr 566 .endr
567 #endif
455 568
456 #else /* __ARM_ARCH__ < 5 || defined (__OPTIMIZE_SIZE__) */ 569 #else /* __ARM_ARCH__ < 5 || defined (__OPTIMIZE_SIZE__) */
457 #if __ARM_ARCH__ >= 5 570 #if __ARM_ARCH__ >= 5
458 571
459 clz \curbit, \divisor 572 clz \curbit, \divisor
497 610
498 #endif /* __ARM_ARCH__ < 5 */ 611 #endif /* __ARM_ARCH__ < 5 */
499 612
500 @ Division loop 613 @ Division loop
501 1: cmp \dividend, \divisor 614 1: cmp \dividend, \divisor
615 do_it hs, t
502 subhs \dividend, \dividend, \divisor 616 subhs \dividend, \dividend, \divisor
503 orrhs \result, \result, \curbit 617 orrhs \result, \result, \curbit
504 cmp \dividend, \divisor, lsr #1 618 cmp \dividend, \divisor, lsr #1
619 do_it hs, t
505 subhs \dividend, \dividend, \divisor, lsr #1 620 subhs \dividend, \dividend, \divisor, lsr #1
506 orrhs \result, \result, \curbit, lsr #1 621 orrhs \result, \result, \curbit, lsr #1
507 cmp \dividend, \divisor, lsr #2 622 cmp \dividend, \divisor, lsr #2
623 do_it hs, t
508 subhs \dividend, \dividend, \divisor, lsr #2 624 subhs \dividend, \dividend, \divisor, lsr #2
509 orrhs \result, \result, \curbit, lsr #2 625 orrhs \result, \result, \curbit, lsr #2
510 cmp \dividend, \divisor, lsr #3 626 cmp \dividend, \divisor, lsr #3
627 do_it hs, t
511 subhs \dividend, \dividend, \divisor, lsr #3 628 subhs \dividend, \dividend, \divisor, lsr #3
512 orrhs \result, \result, \curbit, lsr #3 629 orrhs \result, \result, \curbit, lsr #3
513 cmp \dividend, #0 @ Early termination? 630 cmp \dividend, #0 @ Early termination?
631 do_it hs, t
514 movnes \curbit, \curbit, lsr #4 @ No, any more bits to do? 632 movnes \curbit, \curbit, lsr #4 @ No, any more bits to do?
515 movne \divisor, \divisor, lsr #4 633 movne \divisor, \divisor, lsr #4
516 bne 1b 634 bne 1b
517 635
518 #endif /* __ARM_ARCH__ < 5 || defined (__OPTIMIZE_SIZE__) */ 636 #endif /* __ARM_ARCH__ < 5 || defined (__OPTIMIZE_SIZE__) */
797 /* ------------------------------------------------------------------------ */ 915 /* ------------------------------------------------------------------------ */
798 /* Start of the Real Functions */ 916 /* Start of the Real Functions */
799 /* ------------------------------------------------------------------------ */ 917 /* ------------------------------------------------------------------------ */
800 #ifdef L_udivsi3 918 #ifdef L_udivsi3
801 919
920 #if defined(__ARM_ARCH_6M__)
921
802 FUNC_START udivsi3 922 FUNC_START udivsi3
803 FUNC_ALIAS aeabi_uidiv udivsi3 923 FUNC_ALIAS aeabi_uidiv udivsi3
804 924
805 #ifdef __thumb__
806
807 cmp divisor, #0 925 cmp divisor, #0
808 beq LSYM(Ldiv0) 926 beq LSYM(Ldiv0)
927 LSYM(udivsi3_skip_div0_test):
809 mov curbit, #1 928 mov curbit, #1
810 mov result, #0 929 mov result, #0
811 930
812 push { work } 931 push { work }
813 cmp dividend, divisor 932 cmp dividend, divisor
817 936
818 mov r0, result 937 mov r0, result
819 pop { work } 938 pop { work }
820 RET 939 RET
821 940
822 #else /* ARM version. */ 941 #else /* ARM version/Thumb-2. */
823 942
943 ARM_FUNC_START udivsi3
944 ARM_FUNC_ALIAS aeabi_uidiv udivsi3
945
946 /* Note: if called via udivsi3_skip_div0_test, this will unnecessarily
947 check for division-by-zero a second time. */
948 LSYM(udivsi3_skip_div0_test):
824 subs r2, r1, #1 949 subs r2, r1, #1
950 do_it eq
825 RETc(eq) 951 RETc(eq)
826 bcc LSYM(Ldiv0) 952 bcc LSYM(Ldiv0)
827 cmp r0, r1 953 cmp r0, r1
828 bls 11f 954 bls 11f
829 tst r1, r2 955 tst r1, r2
832 ARM_DIV_BODY r0, r1, r2, r3 958 ARM_DIV_BODY r0, r1, r2, r3
833 959
834 mov r0, r2 960 mov r0, r2
835 RET 961 RET
836 962
837 11: moveq r0, #1 963 11: do_it eq, e
964 moveq r0, #1
838 movne r0, #0 965 movne r0, #0
839 RET 966 RET
840 967
841 12: ARM_DIV2_ORDER r1, r2 968 12: ARM_DIV2_ORDER r1, r2
842 969
843 mov r0, r0, lsr r2 970 mov r0, r0, lsr r2
844 RET 971 RET
845 972
846 #endif /* ARM version */ 973 #endif /* ARM version */
847 974
848 DIV_FUNC_END udivsi3 975 DIV_FUNC_END udivsi3 unsigned
849 976
977 #if defined(__ARM_ARCH_6M__)
850 FUNC_START aeabi_uidivmod 978 FUNC_START aeabi_uidivmod
851 #ifdef __thumb__ 979 cmp r1, #0
980 beq LSYM(Ldiv0)
852 push {r0, r1, lr} 981 push {r0, r1, lr}
853 bl SYM(__udivsi3) 982 bl LSYM(udivsi3_skip_div0_test)
854 POP {r1, r2, r3} 983 POP {r1, r2, r3}
855 mul r2, r0 984 mul r2, r0
856 sub r1, r1, r2 985 sub r1, r1, r2
857 bx r3 986 bx r3
858 #else 987 #else
988 ARM_FUNC_START aeabi_uidivmod
989 cmp r1, #0
990 beq LSYM(Ldiv0)
859 stmfd sp!, { r0, r1, lr } 991 stmfd sp!, { r0, r1, lr }
860 bl SYM(__udivsi3) 992 bl LSYM(udivsi3_skip_div0_test)
861 ldmfd sp!, { r1, r2, lr } 993 ldmfd sp!, { r1, r2, lr }
862 mul r3, r2, r0 994 mul r3, r2, r0
863 sub r1, r1, r3 995 sub r1, r1, r3
864 RET 996 RET
865 #endif 997 #endif
902 1034
903 RET 1035 RET
904 1036
905 #endif /* ARM version. */ 1037 #endif /* ARM version. */
906 1038
907 DIV_FUNC_END umodsi3 1039 DIV_FUNC_END umodsi3 unsigned
908 1040
909 #endif /* L_umodsi3 */ 1041 #endif /* L_umodsi3 */
910 /* ------------------------------------------------------------------------ */ 1042 /* ------------------------------------------------------------------------ */
911 #ifdef L_divsi3 1043 #ifdef L_divsi3
912 1044
1045 #if defined(__ARM_ARCH_6M__)
1046
913 FUNC_START divsi3 1047 FUNC_START divsi3
914 FUNC_ALIAS aeabi_idiv divsi3 1048 FUNC_ALIAS aeabi_idiv divsi3
915 1049
916 #ifdef __thumb__
917 cmp divisor, #0 1050 cmp divisor, #0
918 beq LSYM(Ldiv0) 1051 beq LSYM(Ldiv0)
919 1052 LSYM(divsi3_skip_div0_test):
920 push { work } 1053 push { work }
921 mov work, dividend 1054 mov work, dividend
922 eor work, divisor @ Save the sign of the result. 1055 eor work, divisor @ Save the sign of the result.
923 mov ip, work 1056 mov ip, work
924 mov curbit, #1 1057 mov curbit, #1
943 neg r0, r0 1076 neg r0, r0
944 LSYM(Lover12): 1077 LSYM(Lover12):
945 pop { work } 1078 pop { work }
946 RET 1079 RET
947 1080
948 #else /* ARM version. */ 1081 #else /* ARM/Thumb-2 version. */
949 1082
1083 ARM_FUNC_START divsi3
1084 ARM_FUNC_ALIAS aeabi_idiv divsi3
1085
950 cmp r1, #0 1086 cmp r1, #0
1087 beq LSYM(Ldiv0)
1088 LSYM(divsi3_skip_div0_test):
951 eor ip, r0, r1 @ save the sign of the result. 1089 eor ip, r0, r1 @ save the sign of the result.
952 beq LSYM(Ldiv0) 1090 do_it mi
953 rsbmi r1, r1, #0 @ loops below use unsigned. 1091 rsbmi r1, r1, #0 @ loops below use unsigned.
954 subs r2, r1, #1 @ division by 1 or -1 ? 1092 subs r2, r1, #1 @ division by 1 or -1 ?
955 beq 10f 1093 beq 10f
956 movs r3, r0 1094 movs r3, r0
1095 do_it mi
957 rsbmi r3, r0, #0 @ positive dividend value 1096 rsbmi r3, r0, #0 @ positive dividend value
958 cmp r3, r1 1097 cmp r3, r1
959 bls 11f 1098 bls 11f
960 tst r1, r2 @ divisor is power of 2 ? 1099 tst r1, r2 @ divisor is power of 2 ?
961 beq 12f 1100 beq 12f
962 1101
963 ARM_DIV_BODY r3, r1, r0, r2 1102 ARM_DIV_BODY r3, r1, r0, r2
964 1103
965 cmp ip, #0 1104 cmp ip, #0
1105 do_it mi
966 rsbmi r0, r0, #0 1106 rsbmi r0, r0, #0
967 RET 1107 RET
968 1108
969 10: teq ip, r0 @ same sign ? 1109 10: teq ip, r0 @ same sign ?
1110 do_it mi
970 rsbmi r0, r0, #0 1111 rsbmi r0, r0, #0
971 RET 1112 RET
972 1113
973 11: movlo r0, #0 1114 11: do_it lo
1115 movlo r0, #0
1116 do_it eq,t
974 moveq r0, ip, asr #31 1117 moveq r0, ip, asr #31
975 orreq r0, r0, #1 1118 orreq r0, r0, #1
976 RET 1119 RET
977 1120
978 12: ARM_DIV2_ORDER r1, r2 1121 12: ARM_DIV2_ORDER r1, r2
979 1122
980 cmp ip, #0 1123 cmp ip, #0
981 mov r0, r3, lsr r2 1124 mov r0, r3, lsr r2
1125 do_it mi
982 rsbmi r0, r0, #0 1126 rsbmi r0, r0, #0
983 RET 1127 RET
984 1128
985 #endif /* ARM version */ 1129 #endif /* ARM version */
986 1130
987 DIV_FUNC_END divsi3 1131 DIV_FUNC_END divsi3 signed
988 1132
1133 #if defined(__ARM_ARCH_6M__)
989 FUNC_START aeabi_idivmod 1134 FUNC_START aeabi_idivmod
990 #ifdef __thumb__ 1135 cmp r1, #0
1136 beq LSYM(Ldiv0)
991 push {r0, r1, lr} 1137 push {r0, r1, lr}
992 bl SYM(__divsi3) 1138 bl LSYM(divsi3_skip_div0_test)
993 POP {r1, r2, r3} 1139 POP {r1, r2, r3}
994 mul r2, r0 1140 mul r2, r0
995 sub r1, r1, r2 1141 sub r1, r1, r2
996 bx r3 1142 bx r3
997 #else 1143 #else
1144 ARM_FUNC_START aeabi_idivmod
1145 cmp r1, #0
1146 beq LSYM(Ldiv0)
998 stmfd sp!, { r0, r1, lr } 1147 stmfd sp!, { r0, r1, lr }
999 bl SYM(__divsi3) 1148 bl LSYM(divsi3_skip_div0_test)
1000 ldmfd sp!, { r1, r2, lr } 1149 ldmfd sp!, { r1, r2, lr }
1001 mul r3, r2, r0 1150 mul r3, r2, r0
1002 sub r1, r1, r3 1151 sub r1, r1, r3
1003 RET 1152 RET
1004 #endif 1153 #endif
1060 rsbmi r0, r0, #0 1209 rsbmi r0, r0, #0
1061 RET 1210 RET
1062 1211
1063 #endif /* ARM version */ 1212 #endif /* ARM version */
1064 1213
1065 DIV_FUNC_END modsi3 1214 DIV_FUNC_END modsi3 signed
1066 1215
1067 #endif /* L_modsi3 */ 1216 #endif /* L_modsi3 */
1068 /* ------------------------------------------------------------------------ */ 1217 /* ------------------------------------------------------------------------ */
1069 #ifdef L_dvmd_tls 1218 #ifdef L_dvmd_tls
1070 1219
1071 FUNC_START div0 1220 #ifdef __ARM_EABI__
1072 FUNC_ALIAS aeabi_idiv0 div0 1221 WEAK aeabi_idiv0
1073 FUNC_ALIAS aeabi_ldiv0 div0 1222 WEAK aeabi_ldiv0
1074 1223 FUNC_START aeabi_idiv0
1075 RET 1224 FUNC_START aeabi_ldiv0
1076 1225 RET
1077 FUNC_END aeabi_ldiv0 1226 FUNC_END aeabi_ldiv0
1078 FUNC_END aeabi_idiv0 1227 FUNC_END aeabi_idiv0
1228 #else
1229 FUNC_START div0
1230 RET
1079 FUNC_END div0 1231 FUNC_END div0
1232 #endif
1080 1233
1081 #endif /* L_divmodsi_tools */ 1234 #endif /* L_divmodsi_tools */
1082 /* ------------------------------------------------------------------------ */ 1235 /* ------------------------------------------------------------------------ */
1083 #ifdef L_dvmd_lnx 1236 #ifdef L_dvmd_lnx
1084 @ GNU/Linux division-by zero handler. Used in place of L_dvmd_tls 1237 @ GNU/Linux division-by zero handler. Used in place of L_dvmd_tls
1085 1238
1086 /* Constant taken from <asm/signal.h>. */ 1239 /* Constant taken from <asm/signal.h>. */
1087 #define SIGFPE 8 1240 #define SIGFPE 8
1088 1241
1242 #ifdef __ARM_EABI__
1243 WEAK aeabi_idiv0
1244 WEAK aeabi_ldiv0
1245 ARM_FUNC_START aeabi_idiv0
1246 ARM_FUNC_START aeabi_ldiv0
1247 #else
1089 ARM_FUNC_START div0 1248 ARM_FUNC_START div0
1249 #endif
1090 1250
1091 do_push {r1, lr} 1251 do_push {r1, lr}
1092 mov r0, #SIGFPE 1252 mov r0, #SIGFPE
1093 bl SYM(raise) __PLT__ 1253 bl SYM(raise) __PLT__
1094 RETLDM r1 1254 RETLDM r1
1095 1255
1256 #ifdef __ARM_EABI__
1257 FUNC_END aeabi_ldiv0
1258 FUNC_END aeabi_idiv0
1259 #else
1096 FUNC_END div0 1260 FUNC_END div0
1261 #endif
1097 1262
1098 #endif /* L_dvmd_lnx */ 1263 #endif /* L_dvmd_lnx */
1264 #ifdef L_clear_cache
1265 #if defined __ARM_EABI__ && defined __linux__
1266 @ EABI GNU/Linux call to cacheflush syscall.
1267 ARM_FUNC_START clear_cache
1268 do_push {r7}
1269 #if __ARM_ARCH__ >= 7 || defined(__ARM_ARCH_6T2__)
1270 movw r7, #2
1271 movt r7, #0xf
1272 #else
1273 mov r7, #0xf0000
1274 add r7, r7, #2
1275 #endif
1276 mov r2, #0
1277 swi 0
1278 do_pop {r7}
1279 RET
1280 FUNC_END clear_cache
1281 #else
1282 #error "This is only for ARM EABI GNU/Linux"
1283 #endif
1284 #endif /* L_clear_cache */
1099 /* ------------------------------------------------------------------------ */ 1285 /* ------------------------------------------------------------------------ */
1100 /* Dword shift operations. */ 1286 /* Dword shift operations. */
1101 /* All the following Dword shift variants rely on the fact that 1287 /* All the following Dword shift variants rely on the fact that
1102 shft xxx, Reg 1288 shft xxx, Reg
1103 is in fact done as 1289 is in fact done as
1510 1696
1511 SIZE (_interwork_call_via_lr) 1697 SIZE (_interwork_call_via_lr)
1512 1698
1513 #endif /* L_interwork_call_via_rX */ 1699 #endif /* L_interwork_call_via_rX */
1514 #endif /* !__thumb2__ */ 1700 #endif /* !__thumb2__ */
1701
1702 /* Functions to support compact pic switch tables in thumb1 state.
1703 All these routines take an index into the table in r0. The
1704 table is at LR & ~1 (but this must be rounded up in the case
1705 of 32-bit entires). They are only permitted to clobber r12
1706 and r14 and r0 must be preserved on exit. */
1707 #ifdef L_thumb1_case_sqi
1708
1709 .text
1710 .align 0
1711 .force_thumb
1712 .syntax unified
1713 THUMB_FUNC_START __gnu_thumb1_case_sqi
1714 push {r1}
1715 mov r1, lr
1716 lsrs r1, r1, #1
1717 lsls r1, r1, #1
1718 ldrsb r1, [r1, r0]
1719 lsls r1, r1, #1
1720 add lr, lr, r1
1721 pop {r1}
1722 bx lr
1723 SIZE (__gnu_thumb1_case_sqi)
1724 #endif
1725
1726 #ifdef L_thumb1_case_uqi
1727
1728 .text
1729 .align 0
1730 .force_thumb
1731 .syntax unified
1732 THUMB_FUNC_START __gnu_thumb1_case_uqi
1733 push {r1}
1734 mov r1, lr
1735 lsrs r1, r1, #1
1736 lsls r1, r1, #1
1737 ldrb r1, [r1, r0]
1738 lsls r1, r1, #1
1739 add lr, lr, r1
1740 pop {r1}
1741 bx lr
1742 SIZE (__gnu_thumb1_case_uqi)
1743 #endif
1744
1745 #ifdef L_thumb1_case_shi
1746
1747 .text
1748 .align 0
1749 .force_thumb
1750 .syntax unified
1751 THUMB_FUNC_START __gnu_thumb1_case_shi
1752 push {r0, r1}
1753 mov r1, lr
1754 lsrs r1, r1, #1
1755 lsls r0, r0, #1
1756 lsls r1, r1, #1
1757 ldrsh r1, [r1, r0]
1758 lsls r1, r1, #1
1759 add lr, lr, r1
1760 pop {r0, r1}
1761 bx lr
1762 SIZE (__gnu_thumb1_case_shi)
1763 #endif
1764
1765 #ifdef L_thumb1_case_uhi
1766
1767 .text
1768 .align 0
1769 .force_thumb
1770 .syntax unified
1771 THUMB_FUNC_START __gnu_thumb1_case_uhi
1772 push {r0, r1}
1773 mov r1, lr
1774 lsrs r1, r1, #1
1775 lsls r0, r0, #1
1776 lsls r1, r1, #1
1777 ldrh r1, [r1, r0]
1778 lsls r1, r1, #1
1779 add lr, lr, r1
1780 pop {r0, r1}
1781 bx lr
1782 SIZE (__gnu_thumb1_case_uhi)
1783 #endif
1784
1785 #ifdef L_thumb1_case_si
1786
1787 .text
1788 .align 0
1789 .force_thumb
1790 .syntax unified
1791 THUMB_FUNC_START __gnu_thumb1_case_si
1792 push {r0, r1}
1793 mov r1, lr
1794 adds.n r1, r1, #2 /* Align to word. */
1795 lsrs r1, r1, #2
1796 lsls r0, r0, #2
1797 lsls r1, r1, #2
1798 ldr r0, [r1, r0]
1799 adds r0, r0, r1
1800 mov lr, r0
1801 pop {r0, r1}
1802 mov pc, lr /* We know we were called from thumb code. */
1803 SIZE (__gnu_thumb1_case_si)
1804 #endif
1805
1515 #endif /* Arch supports thumb. */ 1806 #endif /* Arch supports thumb. */
1516 1807
1517 #ifndef __symbian__ 1808 #ifndef __symbian__
1518 #ifndef __ARM_ARCH_6M__ 1809 #ifndef __ARM_ARCH_6M__
1519 #include "ieee754-df.S" 1810 #include "ieee754-df.S"