summaryrefslogtreecommitdiff
path: root/lib/xray/xray_trampoline_x86_64.S
blob: 52985ffd19ab91d2d6cb4952503ef18254f2a73c (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
//===-- xray_trampoline_x86.s -----------------------------------*- ASM -*-===//
//
//                     The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of XRay, a dynamic runtime instrumentation system.
//
// This implements the X86-specific assembler for the trampolines.
//
//===----------------------------------------------------------------------===//

#include "../builtins/assembly.h"
#include "../sanitizer_common/sanitizer_asm.h"



.macro SAVE_REGISTERS
	pushfq
	subq $240, %rsp
	CFI_DEF_CFA_OFFSET(248)
	movq %rbp, 232(%rsp)
	movupd	%xmm0, 216(%rsp)
	movupd	%xmm1, 200(%rsp)
	movupd	%xmm2, 184(%rsp)
	movupd	%xmm3, 168(%rsp)
	movupd	%xmm4, 152(%rsp)
	movupd	%xmm5, 136(%rsp)
	movupd	%xmm6, 120(%rsp)
	movupd	%xmm7, 104(%rsp)
	movq	%rdi, 96(%rsp)
	movq	%rax, 88(%rsp)
	movq	%rdx, 80(%rsp)
	movq	%rsi, 72(%rsp)
	movq	%rcx, 64(%rsp)
	movq	%r8, 56(%rsp)
	movq	%r9, 48(%rsp)
	movq  %r10, 40(%rsp)
	movq  %r11, 32(%rsp)
	movq  %r12, 24(%rsp)
	movq  %r13, 16(%rsp)
	movq  %r14, 8(%rsp)
	movq  %r15, 0(%rsp)
.endm

.macro RESTORE_REGISTERS
	movq  232(%rsp), %rbp
	movupd	216(%rsp), %xmm0
	movupd	200(%rsp), %xmm1
	movupd	184(%rsp), %xmm2
	movupd	168(%rsp), %xmm3
	movupd	152(%rsp), %xmm4
	movupd	136(%rsp), %xmm5
	movupd	120(%rsp) , %xmm6
	movupd	104(%rsp) , %xmm7
	movq	96(%rsp), %rdi
	movq	88(%rsp), %rax
	movq	80(%rsp), %rdx
	movq	72(%rsp), %rsi
	movq	64(%rsp), %rcx
	movq	56(%rsp), %r8
	movq	48(%rsp), %r9
	movq  40(%rsp), %r10
	movq  32(%rsp), %r11
	movq  24(%rsp), %r12
	movq  16(%rsp), %r13
	movq  8(%rsp), %r14
	movq  0(%rsp), %r15
	addq	$240, %rsp
	popfq
	CFI_DEF_CFA_OFFSET(8)
.endm

.macro ALIGNED_CALL_RAX
	// Call the logging handler, after aligning the stack to a 16-byte boundary.
	// The approach we're taking here uses additional stack space to stash the
	// stack pointer twice before aligning the pointer to 16-bytes. If the stack
	// was 8-byte aligned, it will become 16-byte aligned -- when restoring the
	// pointer, we can always look -8 bytes from the current position to get
	// either of the values we've stashed in the first place.
	pushq %rsp
	pushq (%rsp)
	andq $-0x10, %rsp
  callq *%rax
	movq 8(%rsp), %rsp
.endm

	.text
#if !defined(__APPLE__)
	.section .text
	.file "xray_trampoline_x86.S"
#else
	.section __TEXT,__text
#endif

//===----------------------------------------------------------------------===//

	.globl ASM_SYMBOL(__xray_FunctionEntry)
	.align 16, 0x90
	ASM_TYPE_FUNCTION(__xray_FunctionEntry)
# LLVM-MCA-BEGIN __xray_FunctionEntry
ASM_SYMBOL(__xray_FunctionEntry):
	CFI_STARTPROC
	SAVE_REGISTERS

	// This load has to be atomic, it's concurrent with __xray_patch().
	// On x86/amd64, a simple (type-aligned) MOV instruction is enough.
	movq	ASM_SYMBOL(_ZN6__xray19XRayPatchedFunctionE)(%rip), %rax
	testq	%rax, %rax
	je	.Ltmp0

	// The patched function prologue puts its xray_instr_map index into %r10d.
	movl	%r10d, %edi
	xor	%esi,%esi
	ALIGNED_CALL_RAX

.Ltmp0:
	RESTORE_REGISTERS
	retq
# LLVM-MCA-END
	ASM_SIZE(__xray_FunctionEntry)
	CFI_ENDPROC

//===----------------------------------------------------------------------===//

	.globl ASM_SYMBOL(__xray_FunctionExit)
	.align 16, 0x90
	ASM_TYPE_FUNCTION(__xray_FunctionExit)
# LLVM-MCA-BEGIN __xray_FunctionExit
ASM_SYMBOL(__xray_FunctionExit):
	CFI_STARTPROC
	// Save the important registers first. Since we're assuming that this
	// function is only jumped into, we only preserve the registers for
	// returning.
	subq	$56, %rsp
	CFI_DEF_CFA_OFFSET(64)
	movq  %rbp, 48(%rsp)
	movupd	%xmm0, 32(%rsp)
	movupd	%xmm1, 16(%rsp)
	movq	%rax, 8(%rsp)
	movq	%rdx, 0(%rsp)
	movq	ASM_SYMBOL(_ZN6__xray19XRayPatchedFunctionE)(%rip), %rax
	testq %rax,%rax
	je	.Ltmp2

	movl	%r10d, %edi
	movl	$1, %esi
  ALIGNED_CALL_RAX

.Ltmp2:
	// Restore the important registers.
	movq  48(%rsp), %rbp
	movupd	32(%rsp), %xmm0
	movupd	16(%rsp), %xmm1
	movq	8(%rsp), %rax
	movq	0(%rsp), %rdx
	addq	$56, %rsp
	CFI_DEF_CFA_OFFSET(8)
	retq
# LLVM-MCA-END
	ASM_SIZE(__xray_FunctionExit)
	CFI_ENDPROC

//===----------------------------------------------------------------------===//

	.globl ASM_SYMBOL(__xray_FunctionTailExit)
	.align 16, 0x90
	ASM_TYPE_FUNCTION(__xray_FunctionTailExit)
# LLVM-MCA-BEGIN __xray_FunctionTailExit
ASM_SYMBOL(__xray_FunctionTailExit):
	CFI_STARTPROC
	SAVE_REGISTERS

	movq	ASM_SYMBOL(_ZN6__xray19XRayPatchedFunctionE)(%rip), %rax
	testq %rax,%rax
	je	.Ltmp4

	movl	%r10d, %edi
	movl	$2, %esi

  ALIGNED_CALL_RAX

.Ltmp4:
	RESTORE_REGISTERS
	retq
# LLVM-MCA-END
	ASM_SIZE(__xray_FunctionTailExit)
	CFI_ENDPROC

//===----------------------------------------------------------------------===//

	.globl ASM_SYMBOL(__xray_ArgLoggerEntry)
	.align 16, 0x90
	ASM_TYPE_FUNCTION(__xray_ArgLoggerEntry)
# LLVM-MCA-BEGIN __xray_ArgLoggerEntry
ASM_SYMBOL(__xray_ArgLoggerEntry):
	CFI_STARTPROC
	SAVE_REGISTERS

	// Again, these function pointer loads must be atomic; MOV is fine.
	movq	ASM_SYMBOL(_ZN6__xray13XRayArgLoggerE)(%rip), %rax
	testq	%rax, %rax
	jne	.Larg1entryLog

	// If [arg1 logging handler] not set, defer to no-arg logging.
	movq	ASM_SYMBOL(_ZN6__xray19XRayPatchedFunctionE)(%rip), %rax
	testq	%rax, %rax
	je	.Larg1entryFail

.Larg1entryLog:

	// First argument will become the third
	movq	%rdi, %rdx

	// XRayEntryType::LOG_ARGS_ENTRY into the second
	mov	$0x3, %esi

	// 32-bit function ID becomes the first
	movl	%r10d, %edi
	ALIGNED_CALL_RAX

.Larg1entryFail:
	RESTORE_REGISTERS
	retq
# LLVM-MCA-END
	ASM_SIZE(__xray_ArgLoggerEntry)
	CFI_ENDPROC

//===----------------------------------------------------------------------===//

	.global ASM_SYMBOL(__xray_CustomEvent)
	.align 16, 0x90
	ASM_TYPE_FUNCTION(__xray_CustomEvent)
# LLVM-MCA-BEGIN __xray_CustomEvent
ASM_SYMBOL(__xray_CustomEvent):
	CFI_STARTPROC
	SAVE_REGISTERS

	// We take two arguments to this trampoline, which should be in rdi	and rsi
	// already.
	movq ASM_SYMBOL(_ZN6__xray22XRayPatchedCustomEventE)(%rip), %rax
	testq %rax,%rax
	je .LcustomEventCleanup

	ALIGNED_CALL_RAX

.LcustomEventCleanup:
	RESTORE_REGISTERS
	retq
# LLVM-MCA-END
	ASM_SIZE(__xray_CustomEvent)
	CFI_ENDPROC

//===----------------------------------------------------------------------===//

	.global ASM_SYMBOL(__xray_TypedEvent)
	.align 16, 0x90
	ASM_TYPE_FUNCTION(__xray_TypedEvent)
# LLVM-MCA-BEGIN __xray_TypedEvent
ASM_SYMBOL(__xray_TypedEvent):
	CFI_STARTPROC
	SAVE_REGISTERS

	// We pass three arguments to this trampoline, which should be in rdi, rsi
	// and rdx without our intervention.
	movq ASM_SYMBOL(_ZN6__xray21XRayPatchedTypedEventE)(%rip), %rax
	testq %rax,%rax
	je .LtypedEventCleanup

	ALIGNED_CALL_RAX

.LtypedEventCleanup:
	RESTORE_REGISTERS
	retq
# LLVM-MCA-END
	ASM_SIZE(__xray_TypedEvent)
	CFI_ENDPROC

//===----------------------------------------------------------------------===//

NO_EXEC_STACK_DIRECTIVE