0
|
1 /* -*- Mode: Asm -*- */
|
|
2 /* Copyright (C) 1998, 1999, 2000, 2007, 2008, 2009
|
|
3 Free Software Foundation, Inc.
|
|
4 Contributed by Denis Chertykov <denisc@overta.ru>
|
|
5
|
|
6 This file is free software; you can redistribute it and/or modify it
|
|
7 under the terms of the GNU General Public License as published by the
|
|
8 Free Software Foundation; either version 3, or (at your option) any
|
|
9 later version.
|
|
10
|
|
11 This file is distributed in the hope that it will be useful, but
|
|
12 WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
14 General Public License for more details.
|
|
15
|
|
16 Under Section 7 of GPL version 3, you are granted additional
|
|
17 permissions described in the GCC Runtime Library Exception, version
|
|
18 3.1, as published by the Free Software Foundation.
|
|
19
|
|
20 You should have received a copy of the GNU General Public License and
|
|
21 a copy of the GCC Runtime Library Exception along with this program;
|
|
22 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
|
|
23 <http://www.gnu.org/licenses/>. */
|
|
24
|
|
25 #define __zero_reg__ r1
|
|
26 #define __tmp_reg__ r0
|
|
27 #define __SREG__ 0x3f
|
|
28 #define __SP_H__ 0x3e
|
|
29 #define __SP_L__ 0x3d
|
|
30 #define __RAMPZ__ 0x3B
|
|
31
|
|
32 /* Most of the functions here are called directly from avr.md
|
|
33 patterns, instead of using the standard libcall mechanisms.
|
|
34 This can make better code because GCC knows exactly which
|
|
35 of the call-used registers (not all of them) are clobbered. */
|
|
36
|
|
37 .section .text.libgcc, "ax", @progbits
|
|
38
|
|
39 .macro mov_l r_dest, r_src
|
|
40 #if defined (__AVR_HAVE_MOVW__)
|
|
41 movw \r_dest, \r_src
|
|
42 #else
|
|
43 mov \r_dest, \r_src
|
|
44 #endif
|
|
45 .endm
|
|
46
|
|
47 .macro mov_h r_dest, r_src
|
|
48 #if defined (__AVR_HAVE_MOVW__)
|
|
49 ; empty
|
|
50 #else
|
|
51 mov \r_dest, \r_src
|
|
52 #endif
|
|
53 .endm
|
|
54
|
|
55 /* Note: mulqi3, mulhi3 are open-coded on the enhanced core. */
|
|
56 #if !defined (__AVR_HAVE_MUL__)
|
|
57 /*******************************************************
|
|
58 Multiplication 8 x 8
|
|
59 *******************************************************/
|
|
60 #if defined (L_mulqi3)
|
|
61
|
|
62 #define r_arg2 r22 /* multiplicand */
|
|
63 #define r_arg1 r24 /* multiplier */
|
|
64 #define r_res __tmp_reg__ /* result */
|
|
65
|
|
66 .global __mulqi3
|
|
67 .func __mulqi3
|
|
68 __mulqi3:
|
|
69 clr r_res ; clear result
|
|
70 __mulqi3_loop:
|
|
71 sbrc r_arg1,0
|
|
72 add r_res,r_arg2
|
|
73 add r_arg2,r_arg2 ; shift multiplicand
|
|
74 breq __mulqi3_exit ; while multiplicand != 0
|
|
75 lsr r_arg1 ;
|
|
76 brne __mulqi3_loop ; exit if multiplier = 0
|
|
77 __mulqi3_exit:
|
|
78 mov r_arg1,r_res ; result to return register
|
|
79 ret
|
|
80
|
|
81 #undef r_arg2
|
|
82 #undef r_arg1
|
|
83 #undef r_res
|
|
84
|
|
85 .endfunc
|
|
86 #endif /* defined (L_mulqi3) */
|
|
87
|
|
88 #if defined (L_mulqihi3)
|
|
89 .global __mulqihi3
|
|
90 .func __mulqihi3
|
|
91 __mulqihi3:
|
|
92 clr r25
|
|
93 sbrc r24, 7
|
|
94 dec r25
|
|
95 clr r23
|
|
96 sbrc r22, 7
|
|
97 dec r22
|
|
98 rjmp __mulhi3
|
|
99 .endfunc
|
|
100 #endif /* defined (L_mulqihi3) */
|
|
101
|
|
102 #if defined (L_umulqihi3)
|
|
103 .global __umulqihi3
|
|
104 .func __umulqihi3
|
|
105 __umulqihi3:
|
|
106 clr r25
|
|
107 clr r23
|
|
108 rjmp __mulhi3
|
|
109 .endfunc
|
|
110 #endif /* defined (L_umulqihi3) */
|
|
111
|
|
112 /*******************************************************
|
|
113 Multiplication 16 x 16
|
|
114 *******************************************************/
|
|
115 #if defined (L_mulhi3)
|
|
116 #define r_arg1L r24 /* multiplier Low */
|
|
117 #define r_arg1H r25 /* multiplier High */
|
|
118 #define r_arg2L r22 /* multiplicand Low */
|
|
119 #define r_arg2H r23 /* multiplicand High */
|
|
120 #define r_resL __tmp_reg__ /* result Low */
|
|
121 #define r_resH r21 /* result High */
|
|
122
|
|
123 .global __mulhi3
|
|
124 .func __mulhi3
|
|
125 __mulhi3:
|
|
126 clr r_resH ; clear result
|
|
127 clr r_resL ; clear result
|
|
128 __mulhi3_loop:
|
|
129 sbrs r_arg1L,0
|
|
130 rjmp __mulhi3_skip1
|
|
131 add r_resL,r_arg2L ; result + multiplicand
|
|
132 adc r_resH,r_arg2H
|
|
133 __mulhi3_skip1:
|
|
134 add r_arg2L,r_arg2L ; shift multiplicand
|
|
135 adc r_arg2H,r_arg2H
|
|
136
|
|
137 cp r_arg2L,__zero_reg__
|
|
138 cpc r_arg2H,__zero_reg__
|
|
139 breq __mulhi3_exit ; while multiplicand != 0
|
|
140
|
|
141 lsr r_arg1H ; gets LSB of multiplier
|
|
142 ror r_arg1L
|
|
143 sbiw r_arg1L,0
|
|
144 brne __mulhi3_loop ; exit if multiplier = 0
|
|
145 __mulhi3_exit:
|
|
146 mov r_arg1H,r_resH ; result to return register
|
|
147 mov r_arg1L,r_resL
|
|
148 ret
|
|
149
|
|
150 #undef r_arg1L
|
|
151 #undef r_arg1H
|
|
152 #undef r_arg2L
|
|
153 #undef r_arg2H
|
|
154 #undef r_resL
|
|
155 #undef r_resH
|
|
156
|
|
157 .endfunc
|
|
158 #endif /* defined (L_mulhi3) */
|
|
159 #endif /* !defined (__AVR_HAVE_MUL__) */
|
|
160
|
|
161 #if defined (L_mulhisi3)
|
|
162 .global __mulhisi3
|
|
163 .func __mulhisi3
|
|
164 __mulhisi3:
|
|
165 mov_l r18, r24
|
|
166 mov_h r19, r25
|
|
167 clr r24
|
|
168 sbrc r23, 7
|
|
169 dec r24
|
|
170 mov r25, r24
|
|
171 clr r20
|
|
172 sbrc r19, 7
|
|
173 dec r20
|
|
174 mov r21, r20
|
|
175 rjmp __mulsi3
|
|
176 .endfunc
|
|
177 #endif /* defined (L_mulhisi3) */
|
|
178
|
|
179 #if defined (L_umulhisi3)
|
|
180 .global __umulhisi3
|
|
181 .func __umulhisi3
|
|
182 __umulhisi3:
|
|
183 mov_l r18, r24
|
|
184 mov_h r19, r25
|
|
185 clr r24
|
|
186 clr r25
|
|
187 clr r20
|
|
188 clr r21
|
|
189 rjmp __mulsi3
|
|
190 .endfunc
|
|
191 #endif /* defined (L_umulhisi3) */
|
|
192
|
|
193 #if defined (L_mulsi3)
|
|
194 /*******************************************************
|
|
195 Multiplication 32 x 32
|
|
196 *******************************************************/
|
|
197 #define r_arg1L r22 /* multiplier Low */
|
|
198 #define r_arg1H r23
|
|
199 #define r_arg1HL r24
|
|
200 #define r_arg1HH r25 /* multiplier High */
|
|
201
|
|
202
|
|
203 #define r_arg2L r18 /* multiplicand Low */
|
|
204 #define r_arg2H r19
|
|
205 #define r_arg2HL r20
|
|
206 #define r_arg2HH r21 /* multiplicand High */
|
|
207
|
|
208 #define r_resL r26 /* result Low */
|
|
209 #define r_resH r27
|
|
210 #define r_resHL r30
|
|
211 #define r_resHH r31 /* result High */
|
|
212
|
|
213
|
|
214 .global __mulsi3
|
|
215 .func __mulsi3
|
|
216 __mulsi3:
|
|
217 #if defined (__AVR_HAVE_MUL__)
|
|
218 mul r_arg1L, r_arg2L
|
|
219 movw r_resL, r0
|
|
220 mul r_arg1H, r_arg2H
|
|
221 movw r_resHL, r0
|
|
222 mul r_arg1HL, r_arg2L
|
|
223 add r_resHL, r0
|
|
224 adc r_resHH, r1
|
|
225 mul r_arg1L, r_arg2HL
|
|
226 add r_resHL, r0
|
|
227 adc r_resHH, r1
|
|
228 mul r_arg1HH, r_arg2L
|
|
229 add r_resHH, r0
|
|
230 mul r_arg1HL, r_arg2H
|
|
231 add r_resHH, r0
|
|
232 mul r_arg1H, r_arg2HL
|
|
233 add r_resHH, r0
|
|
234 mul r_arg1L, r_arg2HH
|
|
235 add r_resHH, r0
|
|
236 clr r_arg1HH ; use instead of __zero_reg__ to add carry
|
|
237 mul r_arg1H, r_arg2L
|
|
238 add r_resH, r0
|
|
239 adc r_resHL, r1
|
|
240 adc r_resHH, r_arg1HH ; add carry
|
|
241 mul r_arg1L, r_arg2H
|
|
242 add r_resH, r0
|
|
243 adc r_resHL, r1
|
|
244 adc r_resHH, r_arg1HH ; add carry
|
|
245 movw r_arg1L, r_resL
|
|
246 movw r_arg1HL, r_resHL
|
|
247 clr r1 ; __zero_reg__ clobbered by "mul"
|
|
248 ret
|
|
249 #else
|
|
250 clr r_resHH ; clear result
|
|
251 clr r_resHL ; clear result
|
|
252 clr r_resH ; clear result
|
|
253 clr r_resL ; clear result
|
|
254 __mulsi3_loop:
|
|
255 sbrs r_arg1L,0
|
|
256 rjmp __mulsi3_skip1
|
|
257 add r_resL,r_arg2L ; result + multiplicand
|
|
258 adc r_resH,r_arg2H
|
|
259 adc r_resHL,r_arg2HL
|
|
260 adc r_resHH,r_arg2HH
|
|
261 __mulsi3_skip1:
|
|
262 add r_arg2L,r_arg2L ; shift multiplicand
|
|
263 adc r_arg2H,r_arg2H
|
|
264 adc r_arg2HL,r_arg2HL
|
|
265 adc r_arg2HH,r_arg2HH
|
|
266
|
|
267 lsr r_arg1HH ; gets LSB of multiplier
|
|
268 ror r_arg1HL
|
|
269 ror r_arg1H
|
|
270 ror r_arg1L
|
|
271 brne __mulsi3_loop
|
|
272 sbiw r_arg1HL,0
|
|
273 cpc r_arg1H,r_arg1L
|
|
274 brne __mulsi3_loop ; exit if multiplier = 0
|
|
275 __mulsi3_exit:
|
|
276 mov_h r_arg1HH,r_resHH ; result to return register
|
|
277 mov_l r_arg1HL,r_resHL
|
|
278 mov_h r_arg1H,r_resH
|
|
279 mov_l r_arg1L,r_resL
|
|
280 ret
|
|
281 #endif /* defined (__AVR_HAVE_MUL__) */
|
|
282 #undef r_arg1L
|
|
283 #undef r_arg1H
|
|
284 #undef r_arg1HL
|
|
285 #undef r_arg1HH
|
|
286
|
|
287
|
|
288 #undef r_arg2L
|
|
289 #undef r_arg2H
|
|
290 #undef r_arg2HL
|
|
291 #undef r_arg2HH
|
|
292
|
|
293 #undef r_resL
|
|
294 #undef r_resH
|
|
295 #undef r_resHL
|
|
296 #undef r_resHH
|
|
297
|
|
298 .endfunc
|
|
299 #endif /* defined (L_mulsi3) */
|
|
300
|
|
301 /*******************************************************
|
|
302 Division 8 / 8 => (result + remainder)
|
|
303 *******************************************************/
|
|
304 #define r_rem r25 /* remainder */
|
|
305 #define r_arg1 r24 /* dividend, quotient */
|
|
306 #define r_arg2 r22 /* divisor */
|
|
307 #define r_cnt r23 /* loop count */
|
|
308
|
|
309 #if defined (L_udivmodqi4)
|
|
310 .global __udivmodqi4
|
|
311 .func __udivmodqi4
|
|
312 __udivmodqi4:
|
|
313 sub r_rem,r_rem ; clear remainder and carry
|
|
314 ldi r_cnt,9 ; init loop counter
|
|
315 rjmp __udivmodqi4_ep ; jump to entry point
|
|
316 __udivmodqi4_loop:
|
|
317 rol r_rem ; shift dividend into remainder
|
|
318 cp r_rem,r_arg2 ; compare remainder & divisor
|
|
319 brcs __udivmodqi4_ep ; remainder <= divisor
|
|
320 sub r_rem,r_arg2 ; restore remainder
|
|
321 __udivmodqi4_ep:
|
|
322 rol r_arg1 ; shift dividend (with CARRY)
|
|
323 dec r_cnt ; decrement loop counter
|
|
324 brne __udivmodqi4_loop
|
|
325 com r_arg1 ; complement result
|
|
326 ; because C flag was complemented in loop
|
|
327 ret
|
|
328 .endfunc
|
|
329 #endif /* defined (L_udivmodqi4) */
|
|
330
|
|
331 #if defined (L_divmodqi4)
|
|
332 .global __divmodqi4
|
|
333 .func __divmodqi4
|
|
334 __divmodqi4:
|
|
335 bst r_arg1,7 ; store sign of dividend
|
|
336 mov __tmp_reg__,r_arg1
|
|
337 eor __tmp_reg__,r_arg2; r0.7 is sign of result
|
|
338 sbrc r_arg1,7
|
|
339 neg r_arg1 ; dividend negative : negate
|
|
340 sbrc r_arg2,7
|
|
341 neg r_arg2 ; divisor negative : negate
|
|
342 rcall __udivmodqi4 ; do the unsigned div/mod
|
|
343 brtc __divmodqi4_1
|
|
344 neg r_rem ; correct remainder sign
|
|
345 __divmodqi4_1:
|
|
346 sbrc __tmp_reg__,7
|
|
347 neg r_arg1 ; correct result sign
|
|
348 __divmodqi4_exit:
|
|
349 ret
|
|
350 .endfunc
|
|
351 #endif /* defined (L_divmodqi4) */
|
|
352
|
|
353 #undef r_rem
|
|
354 #undef r_arg1
|
|
355 #undef r_arg2
|
|
356 #undef r_cnt
|
|
357
|
|
358
|
|
359 /*******************************************************
|
|
360 Division 16 / 16 => (result + remainder)
|
|
361 *******************************************************/
|
|
362 #define r_remL r26 /* remainder Low */
|
|
363 #define r_remH r27 /* remainder High */
|
|
364
|
|
365 /* return: remainder */
|
|
366 #define r_arg1L r24 /* dividend Low */
|
|
367 #define r_arg1H r25 /* dividend High */
|
|
368
|
|
369 /* return: quotient */
|
|
370 #define r_arg2L r22 /* divisor Low */
|
|
371 #define r_arg2H r23 /* divisor High */
|
|
372
|
|
373 #define r_cnt r21 /* loop count */
|
|
374
|
|
375 #if defined (L_udivmodhi4)
|
|
376 .global __udivmodhi4
|
|
377 .func __udivmodhi4
|
|
378 __udivmodhi4:
|
|
379 sub r_remL,r_remL
|
|
380 sub r_remH,r_remH ; clear remainder and carry
|
|
381 ldi r_cnt,17 ; init loop counter
|
|
382 rjmp __udivmodhi4_ep ; jump to entry point
|
|
383 __udivmodhi4_loop:
|
|
384 rol r_remL ; shift dividend into remainder
|
|
385 rol r_remH
|
|
386 cp r_remL,r_arg2L ; compare remainder & divisor
|
|
387 cpc r_remH,r_arg2H
|
|
388 brcs __udivmodhi4_ep ; remainder < divisor
|
|
389 sub r_remL,r_arg2L ; restore remainder
|
|
390 sbc r_remH,r_arg2H
|
|
391 __udivmodhi4_ep:
|
|
392 rol r_arg1L ; shift dividend (with CARRY)
|
|
393 rol r_arg1H
|
|
394 dec r_cnt ; decrement loop counter
|
|
395 brne __udivmodhi4_loop
|
|
396 com r_arg1L
|
|
397 com r_arg1H
|
|
398 ; div/mod results to return registers, as for the div() function
|
|
399 mov_l r_arg2L, r_arg1L ; quotient
|
|
400 mov_h r_arg2H, r_arg1H
|
|
401 mov_l r_arg1L, r_remL ; remainder
|
|
402 mov_h r_arg1H, r_remH
|
|
403 ret
|
|
404 .endfunc
|
|
405 #endif /* defined (L_udivmodhi4) */
|
|
406
|
|
407 #if defined (L_divmodhi4)
|
|
408 .global __divmodhi4
|
|
409 .func __divmodhi4
|
|
410 __divmodhi4:
|
|
411 .global _div
|
|
412 _div:
|
|
413 bst r_arg1H,7 ; store sign of dividend
|
|
414 mov __tmp_reg__,r_arg1H
|
|
415 eor __tmp_reg__,r_arg2H ; r0.7 is sign of result
|
|
416 rcall __divmodhi4_neg1 ; dividend negative : negate
|
|
417 sbrc r_arg2H,7
|
|
418 rcall __divmodhi4_neg2 ; divisor negative : negate
|
|
419 rcall __udivmodhi4 ; do the unsigned div/mod
|
|
420 rcall __divmodhi4_neg1 ; correct remainder sign
|
|
421 tst __tmp_reg__
|
|
422 brpl __divmodhi4_exit
|
|
423 __divmodhi4_neg2:
|
|
424 com r_arg2H
|
|
425 neg r_arg2L ; correct divisor/result sign
|
|
426 sbci r_arg2H,0xff
|
|
427 __divmodhi4_exit:
|
|
428 ret
|
|
429 __divmodhi4_neg1:
|
|
430 brtc __divmodhi4_exit
|
|
431 com r_arg1H
|
|
432 neg r_arg1L ; correct dividend/remainder sign
|
|
433 sbci r_arg1H,0xff
|
|
434 ret
|
|
435 .endfunc
|
|
436 #endif /* defined (L_divmodhi4) */
|
|
437
|
|
438 #undef r_remH
|
|
439 #undef r_remL
|
|
440
|
|
441 #undef r_arg1H
|
|
442 #undef r_arg1L
|
|
443
|
|
444 #undef r_arg2H
|
|
445 #undef r_arg2L
|
|
446
|
|
447 #undef r_cnt
|
|
448
|
|
449 /*******************************************************
|
|
450 Division 32 / 32 => (result + remainder)
|
|
451 *******************************************************/
|
|
452 #define r_remHH r31 /* remainder High */
|
|
453 #define r_remHL r30
|
|
454 #define r_remH r27
|
|
455 #define r_remL r26 /* remainder Low */
|
|
456
|
|
457 /* return: remainder */
|
|
458 #define r_arg1HH r25 /* dividend High */
|
|
459 #define r_arg1HL r24
|
|
460 #define r_arg1H r23
|
|
461 #define r_arg1L r22 /* dividend Low */
|
|
462
|
|
463 /* return: quotient */
|
|
464 #define r_arg2HH r21 /* divisor High */
|
|
465 #define r_arg2HL r20
|
|
466 #define r_arg2H r19
|
|
467 #define r_arg2L r18 /* divisor Low */
|
|
468
|
|
469 #define r_cnt __zero_reg__ /* loop count (0 after the loop!) */
|
|
470
|
|
471 #if defined (L_udivmodsi4)
|
|
472 .global __udivmodsi4
|
|
473 .func __udivmodsi4
|
|
474 __udivmodsi4:
|
|
475 ldi r_remL, 33 ; init loop counter
|
|
476 mov r_cnt, r_remL
|
|
477 sub r_remL,r_remL
|
|
478 sub r_remH,r_remH ; clear remainder and carry
|
|
479 mov_l r_remHL, r_remL
|
|
480 mov_h r_remHH, r_remH
|
|
481 rjmp __udivmodsi4_ep ; jump to entry point
|
|
482 __udivmodsi4_loop:
|
|
483 rol r_remL ; shift dividend into remainder
|
|
484 rol r_remH
|
|
485 rol r_remHL
|
|
486 rol r_remHH
|
|
487 cp r_remL,r_arg2L ; compare remainder & divisor
|
|
488 cpc r_remH,r_arg2H
|
|
489 cpc r_remHL,r_arg2HL
|
|
490 cpc r_remHH,r_arg2HH
|
|
491 brcs __udivmodsi4_ep ; remainder <= divisor
|
|
492 sub r_remL,r_arg2L ; restore remainder
|
|
493 sbc r_remH,r_arg2H
|
|
494 sbc r_remHL,r_arg2HL
|
|
495 sbc r_remHH,r_arg2HH
|
|
496 __udivmodsi4_ep:
|
|
497 rol r_arg1L ; shift dividend (with CARRY)
|
|
498 rol r_arg1H
|
|
499 rol r_arg1HL
|
|
500 rol r_arg1HH
|
|
501 dec r_cnt ; decrement loop counter
|
|
502 brne __udivmodsi4_loop
|
|
503 ; __zero_reg__ now restored (r_cnt == 0)
|
|
504 com r_arg1L
|
|
505 com r_arg1H
|
|
506 com r_arg1HL
|
|
507 com r_arg1HH
|
|
508 ; div/mod results to return registers, as for the ldiv() function
|
|
509 mov_l r_arg2L, r_arg1L ; quotient
|
|
510 mov_h r_arg2H, r_arg1H
|
|
511 mov_l r_arg2HL, r_arg1HL
|
|
512 mov_h r_arg2HH, r_arg1HH
|
|
513 mov_l r_arg1L, r_remL ; remainder
|
|
514 mov_h r_arg1H, r_remH
|
|
515 mov_l r_arg1HL, r_remHL
|
|
516 mov_h r_arg1HH, r_remHH
|
|
517 ret
|
|
518 .endfunc
|
|
519 #endif /* defined (L_udivmodsi4) */
|
|
520
|
|
521 #if defined (L_divmodsi4)
|
|
522 .global __divmodsi4
|
|
523 .func __divmodsi4
|
|
524 __divmodsi4:
|
|
525 bst r_arg1HH,7 ; store sign of dividend
|
|
526 mov __tmp_reg__,r_arg1HH
|
|
527 eor __tmp_reg__,r_arg2HH ; r0.7 is sign of result
|
|
528 rcall __divmodsi4_neg1 ; dividend negative : negate
|
|
529 sbrc r_arg2HH,7
|
|
530 rcall __divmodsi4_neg2 ; divisor negative : negate
|
|
531 rcall __udivmodsi4 ; do the unsigned div/mod
|
|
532 rcall __divmodsi4_neg1 ; correct remainder sign
|
|
533 rol __tmp_reg__
|
|
534 brcc __divmodsi4_exit
|
|
535 __divmodsi4_neg2:
|
|
536 com r_arg2HH
|
|
537 com r_arg2HL
|
|
538 com r_arg2H
|
|
539 neg r_arg2L ; correct divisor/quotient sign
|
|
540 sbci r_arg2H,0xff
|
|
541 sbci r_arg2HL,0xff
|
|
542 sbci r_arg2HH,0xff
|
|
543 __divmodsi4_exit:
|
|
544 ret
|
|
545 __divmodsi4_neg1:
|
|
546 brtc __divmodsi4_exit
|
|
547 com r_arg1HH
|
|
548 com r_arg1HL
|
|
549 com r_arg1H
|
|
550 neg r_arg1L ; correct dividend/remainder sign
|
|
551 sbci r_arg1H, 0xff
|
|
552 sbci r_arg1HL,0xff
|
|
553 sbci r_arg1HH,0xff
|
|
554 ret
|
|
555 .endfunc
|
|
556 #endif /* defined (L_divmodsi4) */
|
|
557
|
|
558 /**********************************
|
|
559 * This is a prologue subroutine
|
|
560 **********************************/
|
|
561 #if defined (L_prologue)
|
|
562
|
|
563 .global __prologue_saves__
|
|
564 .func __prologue_saves__
|
|
565 __prologue_saves__:
|
|
566 push r2
|
|
567 push r3
|
|
568 push r4
|
|
569 push r5
|
|
570 push r6
|
|
571 push r7
|
|
572 push r8
|
|
573 push r9
|
|
574 push r10
|
|
575 push r11
|
|
576 push r12
|
|
577 push r13
|
|
578 push r14
|
|
579 push r15
|
|
580 push r16
|
|
581 push r17
|
|
582 push r28
|
|
583 push r29
|
|
584 in r28,__SP_L__
|
|
585 in r29,__SP_H__
|
|
586 sub r28,r26
|
|
587 sbc r29,r27
|
|
588 in __tmp_reg__,__SREG__
|
|
589 cli
|
|
590 out __SP_H__,r29
|
|
591 out __SREG__,__tmp_reg__
|
|
592 out __SP_L__,r28
|
|
593 #if defined (__AVR_HAVE_EIJMP_EICALL__)
|
|
594 eijmp
|
|
595 #else
|
|
596 ijmp
|
|
597 #endif
|
|
598
|
|
599 .endfunc
|
|
600 #endif /* defined (L_prologue) */
|
|
601
|
|
602 /*
|
|
603 * This is an epilogue subroutine
|
|
604 */
|
|
605 #if defined (L_epilogue)
|
|
606
|
|
607 .global __epilogue_restores__
|
|
608 .func __epilogue_restores__
|
|
609 __epilogue_restores__:
|
|
610 ldd r2,Y+18
|
|
611 ldd r3,Y+17
|
|
612 ldd r4,Y+16
|
|
613 ldd r5,Y+15
|
|
614 ldd r6,Y+14
|
|
615 ldd r7,Y+13
|
|
616 ldd r8,Y+12
|
|
617 ldd r9,Y+11
|
|
618 ldd r10,Y+10
|
|
619 ldd r11,Y+9
|
|
620 ldd r12,Y+8
|
|
621 ldd r13,Y+7
|
|
622 ldd r14,Y+6
|
|
623 ldd r15,Y+5
|
|
624 ldd r16,Y+4
|
|
625 ldd r17,Y+3
|
|
626 ldd r26,Y+2
|
|
627 ldd r27,Y+1
|
|
628 add r28,r30
|
|
629 adc r29,__zero_reg__
|
|
630 in __tmp_reg__,__SREG__
|
|
631 cli
|
|
632 out __SP_H__,r29
|
|
633 out __SREG__,__tmp_reg__
|
|
634 out __SP_L__,r28
|
|
635 mov_l r28, r26
|
|
636 mov_h r29, r27
|
|
637 ret
|
|
638 .endfunc
|
|
639 #endif /* defined (L_epilogue) */
|
|
640
|
|
641 #ifdef L_exit
|
|
642 .section .fini9,"ax",@progbits
|
|
643 .global _exit
|
|
644 .func _exit
|
|
645 _exit:
|
|
646 .weak exit
|
|
647 exit:
|
|
648
|
|
649 /* Code from .fini8 ... .fini1 sections inserted by ld script. */
|
|
650
|
|
651 .section .fini0,"ax",@progbits
|
|
652 cli
|
|
653 __stop_program:
|
|
654 rjmp __stop_program
|
|
655 .endfunc
|
|
656 #endif /* defined (L_exit) */
|
|
657
|
|
658 #ifdef L_cleanup
|
|
659 .weak _cleanup
|
|
660 .func _cleanup
|
|
661 _cleanup:
|
|
662 ret
|
|
663 .endfunc
|
|
664 #endif /* defined (L_cleanup) */
|
|
665
|
|
666 #ifdef L_tablejump
|
|
667 .global __tablejump2__
|
|
668 .func __tablejump2__
|
|
669 __tablejump2__:
|
|
670 lsl r30
|
|
671 rol r31
|
|
672 .global __tablejump__
|
|
673 __tablejump__:
|
|
674 #if defined (__AVR_HAVE_LPMX__)
|
|
675 lpm __tmp_reg__, Z+
|
|
676 lpm r31, Z
|
|
677 mov r30, __tmp_reg__
|
|
678
|
|
679 #if defined (__AVR_HAVE_EIJMP_EICALL__)
|
|
680 eijmp
|
|
681 #else
|
|
682 ijmp
|
|
683 #endif
|
|
684
|
|
685 #else
|
|
686 lpm
|
|
687 adiw r30, 1
|
|
688 push r0
|
|
689 lpm
|
|
690 push r0
|
|
691 #if defined (__AVR_HAVE_EIJMP_EICALL__)
|
|
692 push __zero_reg__
|
|
693 #endif
|
|
694 ret
|
|
695 #endif
|
|
696 .endfunc
|
|
697 #endif /* defined (L_tablejump) */
|
|
698
|
|
699 #ifdef L_copy_data
|
|
700 .section .init4,"ax",@progbits
|
|
701 .global __do_copy_data
|
|
702 __do_copy_data:
|
|
703 #if defined(__AVR_HAVE_ELPMX__)
|
|
704 ldi r17, hi8(__data_end)
|
|
705 ldi r26, lo8(__data_start)
|
|
706 ldi r27, hi8(__data_start)
|
|
707 ldi r30, lo8(__data_load_start)
|
|
708 ldi r31, hi8(__data_load_start)
|
|
709 ldi r16, hh8(__data_load_start)
|
|
710 out __RAMPZ__, r16
|
|
711 rjmp .L__do_copy_data_start
|
|
712 .L__do_copy_data_loop:
|
|
713 elpm r0, Z+
|
|
714 st X+, r0
|
|
715 .L__do_copy_data_start:
|
|
716 cpi r26, lo8(__data_end)
|
|
717 cpc r27, r17
|
|
718 brne .L__do_copy_data_loop
|
|
719 #elif !defined(__AVR_HAVE_ELPMX__) && defined(__AVR_HAVE_ELPM__)
|
|
720 ldi r17, hi8(__data_end)
|
|
721 ldi r26, lo8(__data_start)
|
|
722 ldi r27, hi8(__data_start)
|
|
723 ldi r30, lo8(__data_load_start)
|
|
724 ldi r31, hi8(__data_load_start)
|
|
725 ldi r16, hh8(__data_load_start - 0x10000)
|
|
726 .L__do_copy_data_carry:
|
|
727 inc r16
|
|
728 out __RAMPZ__, r16
|
|
729 rjmp .L__do_copy_data_start
|
|
730 .L__do_copy_data_loop:
|
|
731 elpm
|
|
732 st X+, r0
|
|
733 adiw r30, 1
|
|
734 brcs .L__do_copy_data_carry
|
|
735 .L__do_copy_data_start:
|
|
736 cpi r26, lo8(__data_end)
|
|
737 cpc r27, r17
|
|
738 brne .L__do_copy_data_loop
|
|
739 #elif !defined(__AVR_HAVE_ELPMX__) && !defined(__AVR_HAVE_ELPM__)
|
|
740 ldi r17, hi8(__data_end)
|
|
741 ldi r26, lo8(__data_start)
|
|
742 ldi r27, hi8(__data_start)
|
|
743 ldi r30, lo8(__data_load_start)
|
|
744 ldi r31, hi8(__data_load_start)
|
|
745 rjmp .L__do_copy_data_start
|
|
746 .L__do_copy_data_loop:
|
|
747 #if defined (__AVR_HAVE_LPMX__)
|
|
748 lpm r0, Z+
|
|
749 #else
|
|
750 lpm
|
|
751 adiw r30, 1
|
|
752 #endif
|
|
753 st X+, r0
|
|
754 .L__do_copy_data_start:
|
|
755 cpi r26, lo8(__data_end)
|
|
756 cpc r27, r17
|
|
757 brne .L__do_copy_data_loop
|
|
758 #endif /* !defined(__AVR_HAVE_ELPMX__) && !defined(__AVR_HAVE_ELPM__) */
|
|
759 #endif /* L_copy_data */
|
|
760
|
|
761 /* __do_clear_bss is only necessary if there is anything in .bss section. */
|
|
762
|
|
763 #ifdef L_clear_bss
|
|
764 .section .init4,"ax",@progbits
|
|
765 .global __do_clear_bss
|
|
766 __do_clear_bss:
|
|
767 ldi r17, hi8(__bss_end)
|
|
768 ldi r26, lo8(__bss_start)
|
|
769 ldi r27, hi8(__bss_start)
|
|
770 rjmp .do_clear_bss_start
|
|
771 .do_clear_bss_loop:
|
|
772 st X+, __zero_reg__
|
|
773 .do_clear_bss_start:
|
|
774 cpi r26, lo8(__bss_end)
|
|
775 cpc r27, r17
|
|
776 brne .do_clear_bss_loop
|
|
777 #endif /* L_clear_bss */
|
|
778
|
|
779 /* __do_global_ctors and __do_global_dtors are only necessary
|
|
780 if there are any constructors/destructors. */
|
|
781
|
|
782 #if defined (__AVR_HAVE_JMP_CALL__)
|
|
783 #define XCALL call
|
|
784 #else
|
|
785 #define XCALL rcall
|
|
786 #endif
|
|
787
|
|
788 #ifdef L_ctors
|
|
789 .section .init6,"ax",@progbits
|
|
790 .global __do_global_ctors
|
|
791 #if defined(__AVR_HAVE_RAMPZ__)
|
|
792 __do_global_ctors:
|
|
793 ldi r17, hi8(__ctors_start)
|
|
794 ldi r16, hh8(__ctors_start)
|
|
795 ldi r28, lo8(__ctors_end)
|
|
796 ldi r29, hi8(__ctors_end)
|
|
797 ldi r20, hh8(__ctors_end)
|
|
798 rjmp .L__do_global_ctors_start
|
|
799 .L__do_global_ctors_loop:
|
|
800 sbiw r28, 2
|
|
801 sbc r20, __zero_reg__
|
|
802 mov_h r31, r29
|
|
803 mov_l r30, r28
|
|
804 out __RAMPZ__, r20
|
|
805 XCALL __tablejump_elpm__
|
|
806 .L__do_global_ctors_start:
|
|
807 cpi r28, lo8(__ctors_start)
|
|
808 cpc r29, r17
|
|
809 cpc r20, r16
|
|
810 brne .L__do_global_ctors_loop
|
|
811 #else
|
|
812 __do_global_ctors:
|
|
813 ldi r17, hi8(__ctors_start)
|
|
814 ldi r28, lo8(__ctors_end)
|
|
815 ldi r29, hi8(__ctors_end)
|
|
816 rjmp .L__do_global_ctors_start
|
|
817 .L__do_global_ctors_loop:
|
|
818 sbiw r28, 2
|
|
819 mov_h r31, r29
|
|
820 mov_l r30, r28
|
|
821 XCALL __tablejump__
|
|
822 .L__do_global_ctors_start:
|
|
823 cpi r28, lo8(__ctors_start)
|
|
824 cpc r29, r17
|
|
825 brne .L__do_global_ctors_loop
|
|
826 #endif /* defined(__AVR_HAVE_RAMPZ__) */
|
|
827 #endif /* L_ctors */
|
|
828
|
|
829 #ifdef L_dtors
|
|
830 .section .fini6,"ax",@progbits
|
|
831 .global __do_global_dtors
|
|
832 #if defined(__AVR_HAVE_RAMPZ__)
|
|
833 __do_global_dtors:
|
|
834 ldi r17, hi8(__dtors_end)
|
|
835 ldi r16, hh8(__dtors_end)
|
|
836 ldi r28, lo8(__dtors_start)
|
|
837 ldi r29, hi8(__dtors_start)
|
|
838 ldi r20, hh8(__dtors_start)
|
|
839 rjmp .L__do_global_dtors_start
|
|
840 .L__do_global_dtors_loop:
|
|
841 sbiw r28, 2
|
|
842 sbc r20, __zero_reg__
|
|
843 mov_h r31, r29
|
|
844 mov_l r30, r28
|
|
845 out __RAMPZ__, r20
|
|
846 XCALL __tablejump_elpm__
|
|
847 .L__do_global_dtors_start:
|
|
848 cpi r28, lo8(__dtors_end)
|
|
849 cpc r29, r17
|
|
850 cpc r20, r16
|
|
851 brne .L__do_global_dtors_loop
|
|
852 #else
|
|
853 __do_global_dtors:
|
|
854 ldi r17, hi8(__dtors_end)
|
|
855 ldi r28, lo8(__dtors_start)
|
|
856 ldi r29, hi8(__dtors_start)
|
|
857 rjmp .L__do_global_dtors_start
|
|
858 .L__do_global_dtors_loop:
|
|
859 mov_h r31, r29
|
|
860 mov_l r30, r28
|
|
861 XCALL __tablejump__
|
|
862 adiw r28, 2
|
|
863 .L__do_global_dtors_start:
|
|
864 cpi r28, lo8(__dtors_end)
|
|
865 cpc r29, r17
|
|
866 brne .L__do_global_dtors_loop
|
|
867 #endif /* defined(__AVR_HAVE_RAMPZ__) */
|
|
868 #endif /* L_dtors */
|
|
869
|
|
870 #ifdef L_tablejump_elpm
|
|
871 .global __tablejump_elpm__
|
|
872 .func __tablejump_elpm__
|
|
873 __tablejump_elpm__:
|
|
874 #if defined (__AVR_HAVE_ELPM__)
|
|
875 #if defined (__AVR_HAVE_LPMX__)
|
|
876 elpm __tmp_reg__, Z+
|
|
877 elpm r31, Z
|
|
878 mov r30, __tmp_reg__
|
|
879 #if defined (__AVR_HAVE_EIJMP_EICALL__)
|
|
880 eijmp
|
|
881 #else
|
|
882 ijmp
|
|
883 #endif
|
|
884
|
|
885 #else
|
|
886 elpm
|
|
887 adiw r30, 1
|
|
888 push r0
|
|
889 elpm
|
|
890 push r0
|
|
891 #if defined (__AVR_HAVE_EIJMP_EICALL__)
|
|
892 push __zero_reg__
|
|
893 #endif
|
|
894 ret
|
|
895 #endif
|
|
896 #endif /* defined (__AVR_HAVE_ELPM__) */
|
|
897 .endfunc
|
|
898 #endif /* defined (L_tablejump_elpm) */
|
|
899
|