1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
|
/* Unaligned memory access functionality.
Copyright (C) 2000-2014, 2018 Red Hat, Inc.
This file is part of elfutils.
This file is free software; you can redistribute it and/or modify
it under the terms of either
* the GNU Lesser General Public License as published by the Free
Software Foundation; either version 3 of the License, or (at
your option) any later version
or
* the GNU General Public License as published by the Free
Software Foundation; either version 2 of the License, or (at
your option) any later version
or both in parallel, as here.
elfutils is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received copies of the GNU General Public License and
the GNU Lesser General Public License along with this program. If
not, see <http://www.gnu.org/licenses/>. */
#ifndef _MEMORY_ACCESS_H
#define _MEMORY_ACCESS_H 1
#include <byteswap.h>
#include <endian.h>
#include <limits.h>
#include <stdint.h>
/* Number decoding macros. See 7.6 Variable Length Data. */
#define len_leb128(var) ((8 * sizeof (var) + 6) / 7)
static inline size_t
__libdw_max_len_leb128 (const size_t type_len,
const unsigned char *addr, const unsigned char *end)
{
const size_t pointer_len = likely (addr < end) ? end - addr : 0;
return likely (type_len <= pointer_len) ? type_len : pointer_len;
}
static inline size_t
__libdw_max_len_uleb128 (const unsigned char *addr, const unsigned char *end)
{
const size_t type_len = len_leb128 (uint64_t);
return __libdw_max_len_leb128 (type_len, addr, end);
}
static inline size_t
__libdw_max_len_sleb128 (const unsigned char *addr, const unsigned char *end)
{
/* Subtract one step, so we don't shift into sign bit. */
const size_t type_len = len_leb128 (int64_t) - 1;
return __libdw_max_len_leb128 (type_len, addr, end);
}
#define get_uleb128_step(var, addr, nth) \
do { \
unsigned char __b = *(addr)++; \
(var) |= (typeof (var)) (__b & 0x7f) << ((nth) * 7); \
if (likely ((__b & 0x80) == 0)) \
return (var); \
} while (0)
static inline uint64_t
__libdw_get_uleb128 (const unsigned char **addrp, const unsigned char *end)
{
uint64_t acc = 0;
/* Unroll the first step to help the compiler optimize
for the common single-byte case. */
get_uleb128_step (acc, *addrp, 0);
const size_t max = __libdw_max_len_uleb128 (*addrp - 1, end);
for (size_t i = 1; i < max; ++i)
get_uleb128_step (acc, *addrp, i);
/* Other implementations set VALUE to UINT_MAX in this
case. So we better do this as well. */
return UINT64_MAX;
}
static inline uint64_t
__libdw_get_uleb128_unchecked (const unsigned char **addrp)
{
uint64_t acc = 0;
/* Unroll the first step to help the compiler optimize
for the common single-byte case. */
get_uleb128_step (acc, *addrp, 0);
const size_t max = len_leb128 (uint64_t);
for (size_t i = 1; i < max; ++i)
get_uleb128_step (acc, *addrp, i);
/* Other implementations set VALUE to UINT_MAX in this
case. So we better do this as well. */
return UINT64_MAX;
}
/* Note, addr needs to me smaller than end. */
#define get_uleb128(var, addr, end) ((var) = __libdw_get_uleb128 (&(addr), end))
#define get_uleb128_unchecked(var, addr) ((var) = __libdw_get_uleb128_unchecked (&(addr)))
/* The signed case is similar, but we sign-extend the result. */
#define get_sleb128_step(var, addr, nth) \
do { \
unsigned char __b = *(addr)++; \
(var) |= (typeof (var)) (__b & 0x7f) << ((nth) * 7); \
if (likely ((__b & 0x80) == 0)) \
{ \
if ((__b & 0x40) != 0) \
(var) |= - ((typeof (var)) 1 << (((nth) + 1) * 7)); \
return (var); \
} \
} while (0)
static inline int64_t
__libdw_get_sleb128 (const unsigned char **addrp, const unsigned char *end)
{
/* Do the work in an unsigned type, but use implementation-defined
behavior to cast to signed on return. This avoids some undefined
behavior when shifting. */
uint64_t acc = 0;
/* Unroll the first step to help the compiler optimize
for the common single-byte case. */
get_sleb128_step (acc, *addrp, 0);
const size_t max = __libdw_max_len_sleb128 (*addrp - 1, end);
for (size_t i = 1; i < max; ++i)
get_sleb128_step (acc, *addrp, i);
if (*addrp == end)
return INT64_MAX;
/* There might be one extra byte. */
unsigned char b = **addrp;
++*addrp;
if (likely ((b & 0x80) == 0))
{
/* We only need the low bit of the final byte, and as it is the
sign bit, we don't need to do anything else here. */
acc |= ((typeof (acc)) b) << 7 * max;
return acc;
}
/* Other implementations set VALUE to INT_MAX in this
case. So we better do this as well. */
return INT64_MAX;
}
static inline int64_t
__libdw_get_sleb128_unchecked (const unsigned char **addrp)
{
/* Do the work in an unsigned type, but use implementation-defined
behavior to cast to signed on return. This avoids some undefined
behavior when shifting. */
uint64_t acc = 0;
/* Unroll the first step to help the compiler optimize
for the common single-byte case. */
get_sleb128_step (acc, *addrp, 0);
/* Subtract one step, so we don't shift into sign bit. */
const size_t max = len_leb128 (int64_t) - 1;
for (size_t i = 1; i < max; ++i)
get_sleb128_step (acc, *addrp, i);
/* There might be one extra byte. */
unsigned char b = **addrp;
++*addrp;
if (likely ((b & 0x80) == 0))
{
/* We only need the low bit of the final byte, and as it is the
sign bit, we don't need to do anything else here. */
acc |= ((typeof (acc)) b) << 7 * max;
return acc;
}
/* Other implementations set VALUE to INT_MAX in this
case. So we better do this as well. */
return INT64_MAX;
}
#define get_sleb128(var, addr, end) ((var) = __libdw_get_sleb128 (&(addr), end))
#define get_sleb128_unchecked(var, addr) ((var) = __libdw_get_sleb128_unchecked (&(addr)))
/* We use simple memory access functions in case the hardware allows it.
The caller has to make sure we don't have alias problems. */
#if ALLOW_UNALIGNED
# define read_2ubyte_unaligned(Dbg, Addr) \
(unlikely ((Dbg)->other_byte_order) \
? bswap_16 (*((const uint16_t *) (Addr))) \
: *((const uint16_t *) (Addr)))
# define read_2sbyte_unaligned(Dbg, Addr) \
(unlikely ((Dbg)->other_byte_order) \
? (int16_t) bswap_16 (*((const int16_t *) (Addr))) \
: *((const int16_t *) (Addr)))
# define read_4ubyte_unaligned_noncvt(Addr) \
*((const uint32_t *) (Addr))
# define read_4ubyte_unaligned(Dbg, Addr) \
(unlikely ((Dbg)->other_byte_order) \
? bswap_32 (*((const uint32_t *) (Addr))) \
: *((const uint32_t *) (Addr)))
# define read_4sbyte_unaligned(Dbg, Addr) \
(unlikely ((Dbg)->other_byte_order) \
? (int32_t) bswap_32 (*((const int32_t *) (Addr))) \
: *((const int32_t *) (Addr)))
# define read_8ubyte_unaligned_noncvt(Addr) \
*((const uint64_t *) (Addr))
# define read_8ubyte_unaligned(Dbg, Addr) \
(unlikely ((Dbg)->other_byte_order) \
? bswap_64 (*((const uint64_t *) (Addr))) \
: *((const uint64_t *) (Addr)))
# define read_8sbyte_unaligned(Dbg, Addr) \
(unlikely ((Dbg)->other_byte_order) \
? (int64_t) bswap_64 (*((const int64_t *) (Addr))) \
: *((const int64_t *) (Addr)))
#else
union unaligned
{
void *p;
uint16_t u2;
uint32_t u4;
uint64_t u8;
int16_t s2;
int32_t s4;
int64_t s8;
} attribute_packed;
# define read_2ubyte_unaligned(Dbg, Addr) \
read_2ubyte_unaligned_1 ((Dbg)->other_byte_order, (Addr))
# define read_2sbyte_unaligned(Dbg, Addr) \
read_2sbyte_unaligned_1 ((Dbg)->other_byte_order, (Addr))
# define read_4ubyte_unaligned(Dbg, Addr) \
read_4ubyte_unaligned_1 ((Dbg)->other_byte_order, (Addr))
# define read_4sbyte_unaligned(Dbg, Addr) \
read_4sbyte_unaligned_1 ((Dbg)->other_byte_order, (Addr))
# define read_8ubyte_unaligned(Dbg, Addr) \
read_8ubyte_unaligned_1 ((Dbg)->other_byte_order, (Addr))
# define read_8sbyte_unaligned(Dbg, Addr) \
read_8sbyte_unaligned_1 ((Dbg)->other_byte_order, (Addr))
static inline uint16_t
read_2ubyte_unaligned_1 (bool other_byte_order, const void *p)
{
const union unaligned *up = p;
if (unlikely (other_byte_order))
return bswap_16 (up->u2);
return up->u2;
}
static inline int16_t
read_2sbyte_unaligned_1 (bool other_byte_order, const void *p)
{
const union unaligned *up = p;
if (unlikely (other_byte_order))
return (int16_t) bswap_16 (up->u2);
return up->s2;
}
static inline uint32_t
read_4ubyte_unaligned_noncvt (const void *p)
{
const union unaligned *up = p;
return up->u4;
}
static inline uint32_t
read_4ubyte_unaligned_1 (bool other_byte_order, const void *p)
{
const union unaligned *up = p;
if (unlikely (other_byte_order))
return bswap_32 (up->u4);
return up->u4;
}
static inline int32_t
read_4sbyte_unaligned_1 (bool other_byte_order, const void *p)
{
const union unaligned *up = p;
if (unlikely (other_byte_order))
return (int32_t) bswap_32 (up->u4);
return up->s4;
}
static inline uint64_t
read_8ubyte_unaligned_noncvt (const void *p)
{
const union unaligned *up = p;
return up->u8;
}
static inline uint64_t
read_8ubyte_unaligned_1 (bool other_byte_order, const void *p)
{
const union unaligned *up = p;
if (unlikely (other_byte_order))
return bswap_64 (up->u8);
return up->u8;
}
static inline int64_t
read_8sbyte_unaligned_1 (bool other_byte_order, const void *p)
{
const union unaligned *up = p;
if (unlikely (other_byte_order))
return (int64_t) bswap_64 (up->u8);
return up->s8;
}
#endif /* allow unaligned */
#define read_2ubyte_unaligned_inc(Dbg, Addr) \
({ uint16_t t_ = read_2ubyte_unaligned (Dbg, Addr); \
Addr = (__typeof (Addr)) (((uintptr_t) (Addr)) + 2); \
t_; })
#define read_2sbyte_unaligned_inc(Dbg, Addr) \
({ int16_t t_ = read_2sbyte_unaligned (Dbg, Addr); \
Addr = (__typeof (Addr)) (((uintptr_t) (Addr)) + 2); \
t_; })
#define read_4ubyte_unaligned_inc(Dbg, Addr) \
({ uint32_t t_ = read_4ubyte_unaligned (Dbg, Addr); \
Addr = (__typeof (Addr)) (((uintptr_t) (Addr)) + 4); \
t_; })
#define read_4sbyte_unaligned_inc(Dbg, Addr) \
({ int32_t t_ = read_4sbyte_unaligned (Dbg, Addr); \
Addr = (__typeof (Addr)) (((uintptr_t) (Addr)) + 4); \
t_; })
#define read_8ubyte_unaligned_inc(Dbg, Addr) \
({ uint64_t t_ = read_8ubyte_unaligned (Dbg, Addr); \
Addr = (__typeof (Addr)) (((uintptr_t) (Addr)) + 8); \
t_; })
#define read_8sbyte_unaligned_inc(Dbg, Addr) \
({ int64_t t_ = read_8sbyte_unaligned (Dbg, Addr); \
Addr = (__typeof (Addr)) (((uintptr_t) (Addr)) + 8); \
t_; })
/* 3ubyte reads are only used for DW_FORM_addrx3 and DW_FORM_strx3.
And are probably very rare. They are not optimized. They are
handled as if reading a 4byte value with the first (for big endian)
or last (for little endian) byte zero. */
static inline int
file_byte_order (bool other_byte_order)
{
#if __BYTE_ORDER == __LITTLE_ENDIAN
return other_byte_order ? __BIG_ENDIAN : __LITTLE_ENDIAN;
#else
return other_byte_order ? __LITTLE_ENDIAN : __BIG_ENDIAN;
#endif
}
static inline uint32_t
read_3ubyte_unaligned (Dwarf *dbg, const unsigned char *p)
{
union
{
uint32_t u4;
unsigned char c[4];
} d;
bool other_byte_order = dbg->other_byte_order;
if (file_byte_order (other_byte_order) == __BIG_ENDIAN)
{
d.c[0] = 0x00;
d.c[1] = p[0];
d.c[2] = p[1];
d.c[3] = p[2];
}
else
{
d.c[0] = p[0];
d.c[1] = p[1];
d.c[2] = p[2];
d.c[3] = 0x00;
}
if (other_byte_order)
return bswap_32 (d.u4);
else
return d.u4;
}
#define read_3ubyte_unaligned_inc(Dbg, Addr) \
({ uint32_t t_ = read_3ubyte_unaligned (Dbg, Addr); \
Addr = (__typeof (Addr)) (((uintptr_t) (Addr)) + 3); \
t_; })
#define read_addr_unaligned_inc(Nbytes, Dbg, Addr) \
(assert ((Nbytes) == 4 || (Nbytes) == 8), \
((Nbytes) == 4 ? read_4ubyte_unaligned_inc (Dbg, Addr) \
: read_8ubyte_unaligned_inc (Dbg, Addr)))
#endif /* memory-access.h */
|