1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
|
/* ARM EABI compliant unwinding routines.
Copyright (C) 2004, 2005, 2009 Free Software Foundation, Inc.
Contributed by Paul Brook
This file is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3, or (at your option) any
later version.
This file is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
#include "unwind.h"
/* We add a prototype for abort here to avoid creating a dependency on
target headers. */
extern void abort (void);
/* Definitions for C++ runtime support routines. We make these weak
declarations to avoid pulling in libsupc++ unnecessarily. */
typedef unsigned char bool;
typedef struct _ZSt9type_info type_info; /* This names C++ type_info type */
enum __cxa_type_match_result
{
ctm_failed = 0,
ctm_succeeded = 1,
ctm_succeeded_with_ptr_to_base = 2
};
void __attribute__((weak)) __cxa_call_unexpected(_Unwind_Control_Block *ucbp);
bool __attribute__((weak)) __cxa_begin_cleanup(_Unwind_Control_Block *ucbp);
enum __cxa_type_match_result __attribute__((weak)) __cxa_type_match
(_Unwind_Control_Block *ucbp, const type_info *rttip,
bool is_reference, void **matched_object);
_Unwind_Ptr __attribute__((weak))
__gnu_Unwind_Find_exidx (_Unwind_Ptr, int *);
/* Misc constants. */
#define R_IP 12
#define R_SP 13
#define R_LR 14
#define R_PC 15
#define EXIDX_CANTUNWIND 1
#define uint32_highbit (((_uw) 1) << 31)
#define UCB_FORCED_STOP_FN(ucbp) ((ucbp)->unwinder_cache.reserved1)
#define UCB_PR_ADDR(ucbp) ((ucbp)->unwinder_cache.reserved2)
#define UCB_SAVED_CALLSITE_ADDR(ucbp) ((ucbp)->unwinder_cache.reserved3)
#define UCB_FORCED_STOP_ARG(ucbp) ((ucbp)->unwinder_cache.reserved4)
struct core_regs
{
_uw r[16];
};
/* We use normal integer types here to avoid the compiler generating
coprocessor instructions. */
struct vfp_regs
{
_uw64 d[16];
_uw pad;
};
struct vfpv3_regs
{
/* Always populated via VSTM, so no need for the "pad" field from
vfp_regs (which is used to store the format word for FSTMX). */
_uw64 d[16];
};
struct fpa_reg
{
_uw w[3];
};
struct fpa_regs
{
struct fpa_reg f[8];
};
struct wmmxd_regs
{
_uw64 wd[16];
};
struct wmmxc_regs
{
_uw wc[4];
};
/* Unwind descriptors. */
typedef struct
{
_uw16 length;
_uw16 offset;
} EHT16;
typedef struct
{
_uw length;
_uw offset;
} EHT32;
/* The ABI specifies that the unwind routines may only use core registers,
except when actually manipulating coprocessor state. This allows
us to write one implementation that works on all platforms by
demand-saving coprocessor registers.
During unwinding we hold the coprocessor state in the actual hardware
registers and allocate demand-save areas for use during phase1
unwinding. */
typedef struct
{
/* The first fields must be the same as a phase2_vrs. */
_uw demand_save_flags;
struct core_regs core;
_uw prev_sp; /* Only valid during forced unwinding. */
struct vfp_regs vfp;
struct vfpv3_regs vfp_regs_16_to_31;
struct fpa_regs fpa;
struct wmmxd_regs wmmxd;
struct wmmxc_regs wmmxc;
} phase1_vrs;
#define DEMAND_SAVE_VFP 1 /* VFP state has been saved if not set */
#define DEMAND_SAVE_VFP_D 2 /* VFP state is for FLDMD/FSTMD if set */
#define DEMAND_SAVE_VFP_V3 4 /* VFPv3 state for regs 16 .. 31 has
been saved if not set */
#define DEMAND_SAVE_WMMXD 8 /* iWMMXt data registers have been
saved if not set. */
#define DEMAND_SAVE_WMMXC 16 /* iWMMXt control registers have been
saved if not set. */
/* This must match the structure created by the assembly wrappers. */
typedef struct
{
_uw demand_save_flags;
struct core_regs core;
} phase2_vrs;
/* An exception index table entry. */
typedef struct __EIT_entry
{
_uw fnoffset;
_uw content;
} __EIT_entry;
/* Assembly helper functions. */
/* Restore core register state. Never returns. */
void __attribute__((noreturn)) restore_core_regs (struct core_regs *);
/* Coprocessor register state manipulation functions. */
/* Routines for FLDMX/FSTMX format... */
void __gnu_Unwind_Save_VFP (struct vfp_regs * p);
void __gnu_Unwind_Restore_VFP (struct vfp_regs * p);
void __gnu_Unwind_Save_WMMXD (struct wmmxd_regs * p);
void __gnu_Unwind_Restore_WMMXD (struct wmmxd_regs * p);
void __gnu_Unwind_Save_WMMXC (struct wmmxc_regs * p);
void __gnu_Unwind_Restore_WMMXC (struct wmmxc_regs * p);
/* ...and those for FLDMD/FSTMD format... */
void __gnu_Unwind_Save_VFP_D (struct vfp_regs * p);
void __gnu_Unwind_Restore_VFP_D (struct vfp_regs * p);
/* ...and those for VLDM/VSTM format, saving/restoring only registers
16 through 31. */
void __gnu_Unwind_Save_VFP_D_16_to_31 (struct vfpv3_regs * p);
void __gnu_Unwind_Restore_VFP_D_16_to_31 (struct vfpv3_regs * p);
/* Restore coprocessor state after phase1 unwinding. */
static void
restore_non_core_regs (phase1_vrs * vrs)
{
if ((vrs->demand_save_flags & DEMAND_SAVE_VFP) == 0)
{
if (vrs->demand_save_flags & DEMAND_SAVE_VFP_D)
__gnu_Unwind_Restore_VFP_D (&vrs->vfp);
else
__gnu_Unwind_Restore_VFP (&vrs->vfp);
}
if ((vrs->demand_save_flags & DEMAND_SAVE_VFP_V3) == 0)
__gnu_Unwind_Restore_VFP_D_16_to_31 (&vrs->vfp_regs_16_to_31);
if ((vrs->demand_save_flags & DEMAND_SAVE_WMMXD) == 0)
__gnu_Unwind_Restore_WMMXD (&vrs->wmmxd);
if ((vrs->demand_save_flags & DEMAND_SAVE_WMMXC) == 0)
__gnu_Unwind_Restore_WMMXC (&vrs->wmmxc);
}
/* A better way to do this would probably be to compare the absolute address
with a segment relative relocation of the same symbol. */
extern int __text_start;
extern int __data_start;
/* The exception index table location. */
extern __EIT_entry __exidx_start;
extern __EIT_entry __exidx_end;
/* ABI defined personality routines. */
extern _Unwind_Reason_Code __aeabi_unwind_cpp_pr0 (_Unwind_State,
_Unwind_Control_Block *, _Unwind_Context *);// __attribute__((weak));
extern _Unwind_Reason_Code __aeabi_unwind_cpp_pr1 (_Unwind_State,
_Unwind_Control_Block *, _Unwind_Context *) __attribute__((weak));
extern _Unwind_Reason_Code __aeabi_unwind_cpp_pr2 (_Unwind_State,
_Unwind_Control_Block *, _Unwind_Context *) __attribute__((weak));
/* ABI defined routine to store a virtual register to memory. */
_Unwind_VRS_Result _Unwind_VRS_Get (_Unwind_Context *context,
_Unwind_VRS_RegClass regclass,
_uw regno,
_Unwind_VRS_DataRepresentation representation,
void *valuep)
{
phase1_vrs *vrs = (phase1_vrs *) context;
switch (regclass)
{
case _UVRSC_CORE:
if (representation != _UVRSD_UINT32
|| regno > 15)
return _UVRSR_FAILED;
*(_uw *) valuep = vrs->core.r[regno];
return _UVRSR_OK;
case _UVRSC_VFP:
case _UVRSC_FPA:
case _UVRSC_WMMXD:
case _UVRSC_WMMXC:
return _UVRSR_NOT_IMPLEMENTED;
default:
return _UVRSR_FAILED;
}
}
/* ABI defined function to load a virtual register from memory. */
_Unwind_VRS_Result _Unwind_VRS_Set (_Unwind_Context *context,
_Unwind_VRS_RegClass regclass,
_uw regno,
_Unwind_VRS_DataRepresentation representation,
void *valuep)
{
phase1_vrs *vrs = (phase1_vrs *) context;
switch (regclass)
{
case _UVRSC_CORE:
if (representation != _UVRSD_UINT32
|| regno > 15)
return _UVRSR_FAILED;
vrs->core.r[regno] = *(_uw *) valuep;
return _UVRSR_OK;
case _UVRSC_VFP:
case _UVRSC_FPA:
case _UVRSC_WMMXD:
case _UVRSC_WMMXC:
return _UVRSR_NOT_IMPLEMENTED;
default:
return _UVRSR_FAILED;
}
}
/* ABI defined function to pop registers off the stack. */
_Unwind_VRS_Result _Unwind_VRS_Pop (_Unwind_Context *context,
_Unwind_VRS_RegClass regclass,
_uw discriminator,
_Unwind_VRS_DataRepresentation representation)
{
phase1_vrs *vrs = (phase1_vrs *) context;
switch (regclass)
{
case _UVRSC_CORE:
{
_uw *ptr;
_uw mask;
int i;
if (representation != _UVRSD_UINT32)
return _UVRSR_FAILED;
mask = discriminator & 0xffff;
ptr = (_uw *) vrs->core.r[R_SP];
/* Pop the requested registers. */
for (i = 0; i < 16; i++)
{
if (mask & (1 << i))
vrs->core.r[i] = *(ptr++);
}
/* Writeback the stack pointer value if it wasn't restored. */
if ((mask & (1 << R_SP)) == 0)
vrs->core.r[R_SP] = (_uw) ptr;
}
return _UVRSR_OK;
case _UVRSC_VFP:
{
_uw start = discriminator >> 16;
_uw count = discriminator & 0xffff;
struct vfp_regs tmp;
struct vfpv3_regs tmp_16_to_31;
int tmp_count;
_uw *sp;
_uw *dest;
int num_vfpv3_regs = 0;
/* We use an approximation here by bounding _UVRSD_DOUBLE
register numbers at 32 always, since we can't detect if
VFPv3 isn't present (in such a case the upper limit is 16). */
if ((representation != _UVRSD_VFPX && representation != _UVRSD_DOUBLE)
|| start + count > (representation == _UVRSD_VFPX ? 16 : 32)
|| (representation == _UVRSD_VFPX && start >= 16))
return _UVRSR_FAILED;
/* Check if we're being asked to pop VFPv3-only registers
(numbers 16 through 31). */
if (start >= 16)
num_vfpv3_regs = count;
else if (start + count > 16)
num_vfpv3_regs = start + count - 16;
if (num_vfpv3_regs && representation != _UVRSD_DOUBLE)
return _UVRSR_FAILED;
/* Demand-save coprocessor registers for stage1. */
if (start < 16 && (vrs->demand_save_flags & DEMAND_SAVE_VFP))
{
vrs->demand_save_flags &= ~DEMAND_SAVE_VFP;
if (representation == _UVRSD_DOUBLE)
{
/* Save in FLDMD/FSTMD format. */
vrs->demand_save_flags |= DEMAND_SAVE_VFP_D;
__gnu_Unwind_Save_VFP_D (&vrs->vfp);
}
else
{
/* Save in FLDMX/FSTMX format. */
vrs->demand_save_flags &= ~DEMAND_SAVE_VFP_D;
__gnu_Unwind_Save_VFP (&vrs->vfp);
}
}
if (num_vfpv3_regs > 0
&& (vrs->demand_save_flags & DEMAND_SAVE_VFP_V3))
{
vrs->demand_save_flags &= ~DEMAND_SAVE_VFP_V3;
__gnu_Unwind_Save_VFP_D_16_to_31 (&vrs->vfp_regs_16_to_31);
}
/* Restore the registers from the stack. Do this by saving the
current VFP registers to a memory area, moving the in-memory
values into that area, and restoring from the whole area.
For _UVRSD_VFPX we assume FSTMX standard format 1. */
if (representation == _UVRSD_VFPX)
__gnu_Unwind_Save_VFP (&tmp);
else
{
/* Save registers 0 .. 15 if required. */
if (start < 16)
__gnu_Unwind_Save_VFP_D (&tmp);
/* Save VFPv3 registers 16 .. 31 if required. */
if (num_vfpv3_regs)
__gnu_Unwind_Save_VFP_D_16_to_31 (&tmp_16_to_31);
}
/* Work out how many registers below register 16 need popping. */
tmp_count = num_vfpv3_regs > 0 ? 16 - start : count;
/* Copy registers below 16, if needed.
The stack address is only guaranteed to be word aligned, so
we can't use doubleword copies. */
sp = (_uw *) vrs->core.r[R_SP];
if (tmp_count > 0)
{
tmp_count *= 2;
dest = (_uw *) &tmp.d[start];
while (tmp_count--)
*(dest++) = *(sp++);
}
/* Copy VFPv3 registers numbered >= 16, if needed. */
if (num_vfpv3_regs > 0)
{
/* num_vfpv3_regs is needed below, so copy it. */
int tmp_count_2 = num_vfpv3_regs * 2;
int vfpv3_start = start < 16 ? 16 : start;
dest = (_uw *) &tmp_16_to_31.d[vfpv3_start - 16];
while (tmp_count_2--)
*(dest++) = *(sp++);
}
/* Skip the format word space if using FLDMX/FSTMX format. */
if (representation == _UVRSD_VFPX)
sp++;
/* Set the new stack pointer. */
vrs->core.r[R_SP] = (_uw) sp;
/* Reload the registers. */
if (representation == _UVRSD_VFPX)
__gnu_Unwind_Restore_VFP (&tmp);
else
{
/* Restore registers 0 .. 15 if required. */
if (start < 16)
__gnu_Unwind_Restore_VFP_D (&tmp);
/* Restore VFPv3 registers 16 .. 31 if required. */
if (num_vfpv3_regs > 0)
__gnu_Unwind_Restore_VFP_D_16_to_31 (&tmp_16_to_31);
}
}
return _UVRSR_OK;
case _UVRSC_FPA:
return _UVRSR_NOT_IMPLEMENTED;
case _UVRSC_WMMXD:
{
_uw start = discriminator >> 16;
_uw count = discriminator & 0xffff;
struct wmmxd_regs tmp;
_uw *sp;
_uw *dest;
if ((representation != _UVRSD_UINT64) || start + count > 16)
return _UVRSR_FAILED;
if (vrs->demand_save_flags & DEMAND_SAVE_WMMXD)
{
/* Demand-save resisters for stage1. */
vrs->demand_save_flags &= ~DEMAND_SAVE_WMMXD;
__gnu_Unwind_Save_WMMXD (&vrs->wmmxd);
}
/* Restore the registers from the stack. Do this by saving the
current WMMXD registers to a memory area, moving the in-memory
values into that area, and restoring from the whole area. */
__gnu_Unwind_Save_WMMXD (&tmp);
/* The stack address is only guaranteed to be word aligned, so
we can't use doubleword copies. */
sp = (_uw *) vrs->core.r[R_SP];
dest = (_uw *) &tmp.wd[start];
count *= 2;
while (count--)
*(dest++) = *(sp++);
/* Set the new stack pointer. */
vrs->core.r[R_SP] = (_uw) sp;
/* Reload the registers. */
__gnu_Unwind_Restore_WMMXD (&tmp);
}
return _UVRSR_OK;
case _UVRSC_WMMXC:
{
int i;
struct wmmxc_regs tmp;
_uw *sp;
if ((representation != _UVRSD_UINT32) || discriminator > 16)
return _UVRSR_FAILED;
if (vrs->demand_save_flags & DEMAND_SAVE_WMMXC)
{
/* Demand-save resisters for stage1. */
vrs->demand_save_flags &= ~DEMAND_SAVE_WMMXC;
__gnu_Unwind_Save_WMMXC (&vrs->wmmxc);
}
/* Restore the registers from the stack. Do this by saving the
current WMMXC registers to a memory area, moving the in-memory
values into that area, and restoring from the whole area. */
__gnu_Unwind_Save_WMMXC (&tmp);
sp = (_uw *) vrs->core.r[R_SP];
for (i = 0; i < 4; i++)
if (discriminator & (1 << i))
tmp.wc[i] = *(sp++);
/* Set the new stack pointer. */
vrs->core.r[R_SP] = (_uw) sp;
/* Reload the registers. */
__gnu_Unwind_Restore_WMMXC (&tmp);
}
return _UVRSR_OK;
default:
return _UVRSR_FAILED;
}
}
/* Core unwinding functions. */
/* Calculate the address encoded by a 31-bit self-relative offset at address
P. */
static inline _uw
selfrel_offset31 (const _uw *p)
{
_uw offset;
offset = *p;
/* Sign extend to 32 bits. */
if (offset & (1 << 30))
offset |= 1u << 31;
else
offset &= ~(1u << 31);
return offset + (_uw) p;
}
/* Perform a binary search for RETURN_ADDRESS in TABLE. The table contains
NREC entries. */
static const __EIT_entry *
search_EIT_table (const __EIT_entry * table, int nrec, _uw return_address)
{
_uw next_fn;
_uw this_fn;
int n, left, right;
if (nrec == 0)
return (__EIT_entry *) 0;
left = 0;
right = nrec - 1;
while (1)
{
n = (left + right) / 2;
this_fn = selfrel_offset31 (&table[n].fnoffset);
if (n != nrec - 1)
next_fn = selfrel_offset31 (&table[n + 1].fnoffset) - 1;
else
next_fn = (_uw)0 - 1;
if (return_address < this_fn)
{
if (n == left)
return (__EIT_entry *) 0;
right = n - 1;
}
else if (return_address <= next_fn)
return &table[n];
else
left = n + 1;
}
}
/* Find the exception index table eintry for the given address.
Fill in the relevant fields of the UCB.
Returns _URC_FAILURE if an error occurred, _URC_OK on success. */
static _Unwind_Reason_Code
get_eit_entry (_Unwind_Control_Block *ucbp, _uw return_address)
{
const __EIT_entry * eitp;
int nrec;
/* The return address is the address of the instruction following the
call instruction (plus one in thumb mode). If this was the last
instruction in the function the address will lie in the following
function. Subtract 2 from the address so that it points within the call
instruction itself. */
return_address -= 2;
if (__gnu_Unwind_Find_exidx)
{
eitp = (const __EIT_entry *) __gnu_Unwind_Find_exidx (return_address,
&nrec);
if (!eitp)
{
UCB_PR_ADDR (ucbp) = 0;
return _URC_FAILURE;
}
}
else
{
eitp = &__exidx_start;
nrec = &__exidx_end - &__exidx_start;
}
eitp = search_EIT_table (eitp, nrec, return_address);
if (!eitp)
{
UCB_PR_ADDR (ucbp) = 0;
return _URC_FAILURE;
}
ucbp->pr_cache.fnstart = selfrel_offset31 (&eitp->fnoffset);
/* Can this frame be unwound at all? */
if (eitp->content == EXIDX_CANTUNWIND)
{
UCB_PR_ADDR (ucbp) = 0;
return _URC_END_OF_STACK;
}
/* Obtain the address of the "real" __EHT_Header word. */
if (eitp->content & uint32_highbit)
{
/* It is immediate data. */
ucbp->pr_cache.ehtp = (_Unwind_EHT_Header *)&eitp->content;
ucbp->pr_cache.additional = 1;
}
else
{
/* The low 31 bits of the content field are a self-relative
offset to an _Unwind_EHT_Entry structure. */
ucbp->pr_cache.ehtp =
(_Unwind_EHT_Header *) selfrel_offset31 (&eitp->content);
ucbp->pr_cache.additional = 0;
}
/* Discover the personality routine address. */
if (*ucbp->pr_cache.ehtp & (1u << 31))
{
/* One of the predefined standard routines. */
_uw idx = (*(_uw *) ucbp->pr_cache.ehtp >> 24) & 0xf;
if (idx == 0)
UCB_PR_ADDR (ucbp) = (_uw) &__aeabi_unwind_cpp_pr0;
else if (idx == 1)
UCB_PR_ADDR (ucbp) = (_uw) &__aeabi_unwind_cpp_pr1;
else if (idx == 2)
UCB_PR_ADDR (ucbp) = (_uw) &__aeabi_unwind_cpp_pr2;
else
{ /* Failed */
UCB_PR_ADDR (ucbp) = 0;
return _URC_FAILURE;
}
}
else
{
/* Execute region offset to PR */
UCB_PR_ADDR (ucbp) = selfrel_offset31 (ucbp->pr_cache.ehtp);
}
return _URC_OK;
}
/* Perform phase2 unwinding. VRS is the initial virtual register state. */
static void __attribute__((noreturn))
unwind_phase2 (_Unwind_Control_Block * ucbp, phase2_vrs * vrs)
{
_Unwind_Reason_Code pr_result;
do
{
/* Find the entry for this routine. */
if (get_eit_entry (ucbp, vrs->core.r[R_PC]) != _URC_OK)
abort ();
UCB_SAVED_CALLSITE_ADDR (ucbp) = vrs->core.r[R_PC];
/* Call the pr to decide what to do. */
pr_result = ((personality_routine) UCB_PR_ADDR (ucbp))
(_US_UNWIND_FRAME_STARTING, ucbp, (_Unwind_Context *) vrs);
}
while (pr_result == _URC_CONTINUE_UNWIND);
if (pr_result != _URC_INSTALL_CONTEXT)
abort();
restore_core_regs (&vrs->core);
}
/* Perform phase2 forced unwinding. */
static _Unwind_Reason_Code
unwind_phase2_forced (_Unwind_Control_Block *ucbp, phase2_vrs *entry_vrs,
int resuming)
{
_Unwind_Stop_Fn stop_fn = (_Unwind_Stop_Fn) UCB_FORCED_STOP_FN (ucbp);
void *stop_arg = (void *)UCB_FORCED_STOP_ARG (ucbp);
_Unwind_Reason_Code pr_result = 0;
/* We use phase1_vrs here even though we do not demand save, for the
prev_sp field. */
phase1_vrs saved_vrs, next_vrs;
/* Save the core registers. */
saved_vrs.core = entry_vrs->core;
/* We don't need to demand-save the non-core registers, because we
unwind in a single pass. */
saved_vrs.demand_save_flags = 0;
/* Unwind until we reach a propagation barrier. */
do
{
_Unwind_State action;
_Unwind_Reason_Code entry_code;
_Unwind_Reason_Code stop_code;
/* Find the entry for this routine. */
entry_code = get_eit_entry (ucbp, saved_vrs.core.r[R_PC]);
if (resuming)
{
action = _US_UNWIND_FRAME_RESUME | _US_FORCE_UNWIND;
resuming = 0;
}
else
action = _US_UNWIND_FRAME_STARTING | _US_FORCE_UNWIND;
if (entry_code == _URC_OK)
{
UCB_SAVED_CALLSITE_ADDR (ucbp) = saved_vrs.core.r[R_PC];
next_vrs = saved_vrs;
/* Call the pr to decide what to do. */
pr_result = ((personality_routine) UCB_PR_ADDR (ucbp))
(action, ucbp, (void *) &next_vrs);
saved_vrs.prev_sp = next_vrs.core.r[R_SP];
}
else
{
/* Treat any failure as the end of unwinding, to cope more
gracefully with missing EH information. Mixed EH and
non-EH within one object will usually result in failure,
because the .ARM.exidx tables do not indicate the end
of the code to which they apply; but mixed EH and non-EH
shared objects should return an unwind failure at the
entry of a non-EH shared object. */
action |= _US_END_OF_STACK;
saved_vrs.prev_sp = saved_vrs.core.r[R_SP];
}
stop_code = stop_fn (1, action, ucbp->exception_class, ucbp,
(void *)&saved_vrs, stop_arg);
if (stop_code != _URC_NO_REASON)
return _URC_FAILURE;
if (entry_code != _URC_OK)
return entry_code;
saved_vrs = next_vrs;
}
while (pr_result == _URC_CONTINUE_UNWIND);
if (pr_result != _URC_INSTALL_CONTEXT)
{
/* Some sort of failure has occurred in the pr and probably the
pr returned _URC_FAILURE. */
return _URC_FAILURE;
}
restore_core_regs (&saved_vrs.core);
}
/* This is a very limited implementation of _Unwind_GetCFA. It returns
the stack pointer as it is about to be unwound, and is only valid
while calling the stop function during forced unwinding. If the
current personality routine result is going to run a cleanup, this
will not be the CFA; but when the frame is really unwound, it will
be. */
_Unwind_Word
_Unwind_GetCFA (_Unwind_Context *context)
{
return ((phase1_vrs *) context)->prev_sp;
}
/* Perform phase1 unwinding. UCBP is the exception being thrown, and
entry_VRS is the register state on entry to _Unwind_RaiseException. */
_Unwind_Reason_Code
__gnu_Unwind_RaiseException (_Unwind_Control_Block *, phase2_vrs *);
_Unwind_Reason_Code
__gnu_Unwind_RaiseException (_Unwind_Control_Block * ucbp,
phase2_vrs * entry_vrs)
{
phase1_vrs saved_vrs;
_Unwind_Reason_Code pr_result;
/* Set the pc to the call site. */
entry_vrs->core.r[R_PC] = entry_vrs->core.r[R_LR];
/* Save the core registers. */
saved_vrs.core = entry_vrs->core;
/* Set demand-save flags. */
saved_vrs.demand_save_flags = ~(_uw) 0;
/* Unwind until we reach a propagation barrier. */
do
{
/* Find the entry for this routine. */
if (get_eit_entry (ucbp, saved_vrs.core.r[R_PC]) != _URC_OK)
return _URC_FAILURE;
/* Call the pr to decide what to do. */
pr_result = ((personality_routine) UCB_PR_ADDR (ucbp))
(_US_VIRTUAL_UNWIND_FRAME, ucbp, (void *) &saved_vrs);
}
while (pr_result == _URC_CONTINUE_UNWIND);
/* We've unwound as far as we want to go, so restore the original
register state. */
restore_non_core_regs (&saved_vrs);
if (pr_result != _URC_HANDLER_FOUND)
{
/* Some sort of failure has occurred in the pr and probably the
pr returned _URC_FAILURE. */
return _URC_FAILURE;
}
unwind_phase2 (ucbp, entry_vrs);
}
/* Resume unwinding after a cleanup has been run. UCBP is the exception
being thrown and ENTRY_VRS is the register state on entry to
_Unwind_Resume. */
_Unwind_Reason_Code
__gnu_Unwind_ForcedUnwind (_Unwind_Control_Block *,
_Unwind_Stop_Fn, void *, phase2_vrs *);
_Unwind_Reason_Code
__gnu_Unwind_ForcedUnwind (_Unwind_Control_Block *ucbp,
_Unwind_Stop_Fn stop_fn, void *stop_arg,
phase2_vrs *entry_vrs)
{
UCB_FORCED_STOP_FN (ucbp) = (_uw) stop_fn;
UCB_FORCED_STOP_ARG (ucbp) = (_uw) stop_arg;
/* Set the pc to the call site. */
entry_vrs->core.r[R_PC] = entry_vrs->core.r[R_LR];
return unwind_phase2_forced (ucbp, entry_vrs, 0);
}
_Unwind_Reason_Code
__gnu_Unwind_Resume (_Unwind_Control_Block *, phase2_vrs *);
_Unwind_Reason_Code
__gnu_Unwind_Resume (_Unwind_Control_Block * ucbp, phase2_vrs * entry_vrs)
{
_Unwind_Reason_Code pr_result;
/* Recover the saved address. */
entry_vrs->core.r[R_PC] = UCB_SAVED_CALLSITE_ADDR (ucbp);
if (UCB_FORCED_STOP_FN (ucbp))
{
unwind_phase2_forced (ucbp, entry_vrs, 1);
/* We can't return failure at this point. */
abort ();
}
/* Call the cached PR. */
pr_result = ((personality_routine) UCB_PR_ADDR (ucbp))
(_US_UNWIND_FRAME_RESUME, ucbp, (_Unwind_Context *) entry_vrs);
switch (pr_result)
{
case _URC_INSTALL_CONTEXT:
/* Upload the registers to enter the landing pad. */
restore_core_regs (&entry_vrs->core);
case _URC_CONTINUE_UNWIND:
/* Continue unwinding the next frame. */
unwind_phase2 (ucbp, entry_vrs);
default:
abort ();
}
}
_Unwind_Reason_Code
__gnu_Unwind_Resume_or_Rethrow (_Unwind_Control_Block *, phase2_vrs *);
_Unwind_Reason_Code
__gnu_Unwind_Resume_or_Rethrow (_Unwind_Control_Block * ucbp,
phase2_vrs * entry_vrs)
{
if (!UCB_FORCED_STOP_FN (ucbp))
return __gnu_Unwind_RaiseException (ucbp, entry_vrs);
/* Set the pc to the call site. */
entry_vrs->core.r[R_PC] = entry_vrs->core.r[R_LR];
/* Continue unwinding the next frame. */
return unwind_phase2_forced (ucbp, entry_vrs, 0);
}
/* Clean up an exception object when unwinding is complete. */
void
_Unwind_Complete (_Unwind_Control_Block * ucbp __attribute__((unused)))
{
}
/* Get the _Unwind_Control_Block from an _Unwind_Context. */
static inline _Unwind_Control_Block *
unwind_UCB_from_context (_Unwind_Context * context)
{
return (_Unwind_Control_Block *) _Unwind_GetGR (context, R_IP);
}
/* Free an exception. */
void
_Unwind_DeleteException (_Unwind_Exception * exc)
{
if (exc->exception_cleanup)
(*exc->exception_cleanup) (_URC_FOREIGN_EXCEPTION_CAUGHT, exc);
}
/* Perform stack backtrace through unwind data. */
_Unwind_Reason_Code
__gnu_Unwind_Backtrace(_Unwind_Trace_Fn trace, void * trace_argument,
phase2_vrs * entry_vrs);
_Unwind_Reason_Code
__gnu_Unwind_Backtrace(_Unwind_Trace_Fn trace, void * trace_argument,
phase2_vrs * entry_vrs)
{
phase1_vrs saved_vrs;
_Unwind_Reason_Code code;
_Unwind_Control_Block ucb;
_Unwind_Control_Block *ucbp = &ucb;
/* Set the pc to the call site. */
entry_vrs->core.r[R_PC] = entry_vrs->core.r[R_LR];
/* Save the core registers. */
saved_vrs.core = entry_vrs->core;
/* Set demand-save flags. */
saved_vrs.demand_save_flags = ~(_uw) 0;
do
{
/* Find the entry for this routine. */
if (get_eit_entry (ucbp, saved_vrs.core.r[R_PC]) != _URC_OK)
{
code = _URC_FAILURE;
break;
}
/* The dwarf unwinder assumes the context structure holds things
like the function and LSDA pointers. The ARM implementation
caches these in the exception header (UCB). To avoid
rewriting everything we make the virtual IP register point at
the UCB. */
_Unwind_SetGR((_Unwind_Context *)&saved_vrs, 12, (_Unwind_Ptr) ucbp);
/* Call trace function. */
if ((*trace) ((_Unwind_Context *) &saved_vrs, trace_argument)
!= _URC_NO_REASON)
{
code = _URC_FAILURE;
break;
}
/* Call the pr to decide what to do. */
code = ((personality_routine) UCB_PR_ADDR (ucbp))
(_US_VIRTUAL_UNWIND_FRAME | _US_FORCE_UNWIND,
ucbp, (void *) &saved_vrs);
}
while (code != _URC_END_OF_STACK
&& code != _URC_FAILURE);
restore_non_core_regs (&saved_vrs);
return code;
}
/* Common implementation for ARM ABI defined personality routines.
ID is the index of the personality routine, other arguments are as defined
by __aeabi_unwind_cpp_pr{0,1,2}. */
static _Unwind_Reason_Code
__gnu_unwind_pr_common (_Unwind_State state,
_Unwind_Control_Block *ucbp,
_Unwind_Context *context,
int id)
{
__gnu_unwind_state uws;
_uw *data;
_uw offset;
_uw len;
_uw rtti_count;
int phase2_call_unexpected_after_unwind = 0;
int in_range = 0;
int forced_unwind = state & _US_FORCE_UNWIND;
state &= _US_ACTION_MASK;
data = (_uw *) ucbp->pr_cache.ehtp;
uws.data = *(data++);
uws.next = data;
if (id == 0)
{
uws.data <<= 8;
uws.words_left = 0;
uws.bytes_left = 3;
}
else
{
uws.words_left = (uws.data >> 16) & 0xff;
uws.data <<= 16;
uws.bytes_left = 2;
data += uws.words_left;
}
/* Restore the saved pointer. */
if (state == _US_UNWIND_FRAME_RESUME)
data = (_uw *) ucbp->cleanup_cache.bitpattern[0];
if ((ucbp->pr_cache.additional & 1) == 0)
{
/* Process descriptors. */
while (*data)
{
_uw addr;
_uw fnstart;
if (id == 2)
{
len = ((EHT32 *) data)->length;
offset = ((EHT32 *) data)->offset;
data += 2;
}
else
{
len = ((EHT16 *) data)->length;
offset = ((EHT16 *) data)->offset;
data++;
}
fnstart = ucbp->pr_cache.fnstart + (offset & ~1);
addr = _Unwind_GetGR (context, R_PC);
in_range = (fnstart <= addr && addr < fnstart + (len & ~1));
switch (((offset & 1) << 1) | (len & 1))
{
case 0:
/* Cleanup. */
if (state != _US_VIRTUAL_UNWIND_FRAME
&& in_range)
{
/* Cleanup in range, and we are running cleanups. */
_uw lp;
/* Landing pad address is 31-bit pc-relative offset. */
lp = selfrel_offset31 (data);
data++;
/* Save the exception data pointer. */
ucbp->cleanup_cache.bitpattern[0] = (_uw) data;
if (!__cxa_begin_cleanup (ucbp))
return _URC_FAILURE;
/* Setup the VRS to enter the landing pad. */
_Unwind_SetGR (context, R_PC, lp);
return _URC_INSTALL_CONTEXT;
}
/* Cleanup not in range, or we are in stage 1. */
data++;
break;
case 1:
/* Catch handler. */
if (state == _US_VIRTUAL_UNWIND_FRAME)
{
if (in_range)
{
/* Check for a barrier. */
_uw rtti;
bool is_reference = (data[0] & uint32_highbit) != 0;
void *matched;
enum __cxa_type_match_result match_type;
/* Check for no-throw areas. */
if (data[1] == (_uw) -2)
return _URC_FAILURE;
/* The thrown object immediately follows the ECB. */
matched = (void *)(ucbp + 1);
if (data[1] != (_uw) -1)
{
/* Match a catch specification. */
rtti = _Unwind_decode_target2 ((_uw) &data[1]);
match_type = __cxa_type_match (ucbp,
(type_info *) rtti,
is_reference,
&matched);
}
else
match_type = ctm_succeeded;
if (match_type)
{
ucbp->barrier_cache.sp =
_Unwind_GetGR (context, R_SP);
// ctm_succeeded_with_ptr_to_base really
// means _c_t_m indirected the pointer
// object. We have to reconstruct the
// additional pointer layer by using a temporary.
if (match_type == ctm_succeeded_with_ptr_to_base)
{
ucbp->barrier_cache.bitpattern[2]
= (_uw) matched;
ucbp->barrier_cache.bitpattern[0]
= (_uw) &ucbp->barrier_cache.bitpattern[2];
}
else
ucbp->barrier_cache.bitpattern[0] = (_uw) matched;
ucbp->barrier_cache.bitpattern[1] = (_uw) data;
return _URC_HANDLER_FOUND;
}
}
/* Handler out of range, or not matched. */
}
else if (ucbp->barrier_cache.sp == _Unwind_GetGR (context, R_SP)
&& ucbp->barrier_cache.bitpattern[1] == (_uw) data)
{
/* Matched a previous propagation barrier. */
_uw lp;
/* Setup for entry to the handler. */
lp = selfrel_offset31 (data);
_Unwind_SetGR (context, R_PC, lp);
_Unwind_SetGR (context, 0, (_uw) ucbp);
return _URC_INSTALL_CONTEXT;
}
/* Catch handler not matched. Advance to the next descriptor. */
data += 2;
break;
case 2:
rtti_count = data[0] & 0x7fffffff;
/* Exception specification. */
if (state == _US_VIRTUAL_UNWIND_FRAME)
{
if (in_range && (!forced_unwind || !rtti_count))
{
/* Match against the exception specification. */
_uw i;
_uw rtti;
void *matched;
for (i = 0; i < rtti_count; i++)
{
matched = (void *)(ucbp + 1);
rtti = _Unwind_decode_target2 ((_uw) &data[i + 1]);
if (__cxa_type_match (ucbp, (type_info *) rtti, 0,
&matched))
break;
}
if (i == rtti_count)
{
/* Exception does not match the spec. */
ucbp->barrier_cache.sp =
_Unwind_GetGR (context, R_SP);
ucbp->barrier_cache.bitpattern[0] = (_uw) matched;
ucbp->barrier_cache.bitpattern[1] = (_uw) data;
return _URC_HANDLER_FOUND;
}
}
/* Handler out of range, or exception is permitted. */
}
else if (ucbp->barrier_cache.sp == _Unwind_GetGR (context, R_SP)
&& ucbp->barrier_cache.bitpattern[1] == (_uw) data)
{
/* Matched a previous propagation barrier. */
_uw lp;
/* Record the RTTI list for __cxa_call_unexpected. */
ucbp->barrier_cache.bitpattern[1] = rtti_count;
ucbp->barrier_cache.bitpattern[2] = 0;
ucbp->barrier_cache.bitpattern[3] = 4;
ucbp->barrier_cache.bitpattern[4] = (_uw) &data[1];
if (data[0] & uint32_highbit)
{
data += rtti_count + 1;
/* Setup for entry to the handler. */
lp = selfrel_offset31 (data);
data++;
_Unwind_SetGR (context, R_PC, lp);
_Unwind_SetGR (context, 0, (_uw) ucbp);
return _URC_INSTALL_CONTEXT;
}
else
phase2_call_unexpected_after_unwind = 1;
}
if (data[0] & uint32_highbit)
data++;
data += rtti_count + 1;
break;
default:
/* Should never happen. */
return _URC_FAILURE;
}
/* Finished processing this descriptor. */
}
}
if (__gnu_unwind_execute (context, &uws) != _URC_OK)
return _URC_FAILURE;
if (phase2_call_unexpected_after_unwind)
{
/* Enter __cxa_unexpected as if called from the call site. */
_Unwind_SetGR (context, R_LR, _Unwind_GetGR (context, R_PC));
_Unwind_SetGR (context, R_PC, (_uw) &__cxa_call_unexpected);
return _URC_INSTALL_CONTEXT;
}
return _URC_CONTINUE_UNWIND;
}
/* ABI defined personality routine entry points. */
_Unwind_Reason_Code
__aeabi_unwind_cpp_pr0 (_Unwind_State state,
_Unwind_Control_Block *ucbp,
_Unwind_Context *context)
{
return __gnu_unwind_pr_common (state, ucbp, context, 0);
}
_Unwind_Reason_Code
__aeabi_unwind_cpp_pr1 (_Unwind_State state,
_Unwind_Control_Block *ucbp,
_Unwind_Context *context)
{
return __gnu_unwind_pr_common (state, ucbp, context, 1);
}
_Unwind_Reason_Code
__aeabi_unwind_cpp_pr2 (_Unwind_State state,
_Unwind_Control_Block *ucbp,
_Unwind_Context *context)
{
return __gnu_unwind_pr_common (state, ucbp, context, 2);
}
|