summaryrefslogtreecommitdiff
path: root/llvm/test/CodeGen/X86/memset64-on-x86-32.ll
blob: c6eecdcdf99cc52a6ffd030981d7b44fce0f896a (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=i386-unknown-unknown   -mattr=sse4.2 | FileCheck %s --check-prefix=FAST
; RUN: llc < %s -mtriple=i386-unknown-unknown   -mattr=ssse3   | FileCheck %s --check-prefix=SLOW_32
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=ssse3   | FileCheck %s --check-prefix=SLOW_64

define void @bork(ptr nocapture align 4 %dst) nounwind {
; FAST-LABEL: bork:
; FAST:       # %bb.0:
; FAST-NEXT:    movl {{[0-9]+}}(%esp), %eax
; FAST-NEXT:    xorps %xmm0, %xmm0
; FAST-NEXT:    movups %xmm0, 64(%eax)
; FAST-NEXT:    movups %xmm0, 48(%eax)
; FAST-NEXT:    movups %xmm0, 32(%eax)
; FAST-NEXT:    movups %xmm0, 16(%eax)
; FAST-NEXT:    movups %xmm0, (%eax)
; FAST-NEXT:    retl
;
; SLOW_32-LABEL: bork:
; SLOW_32:       # %bb.0:
; SLOW_32-NEXT:    movl {{[0-9]+}}(%esp), %eax
; SLOW_32-NEXT:    movl $0, 4(%eax)
; SLOW_32-NEXT:    movl $0, (%eax)
; SLOW_32-NEXT:    movl $0, 12(%eax)
; SLOW_32-NEXT:    movl $0, 8(%eax)
; SLOW_32-NEXT:    movl $0, 20(%eax)
; SLOW_32-NEXT:    movl $0, 16(%eax)
; SLOW_32-NEXT:    movl $0, 28(%eax)
; SLOW_32-NEXT:    movl $0, 24(%eax)
; SLOW_32-NEXT:    movl $0, 36(%eax)
; SLOW_32-NEXT:    movl $0, 32(%eax)
; SLOW_32-NEXT:    movl $0, 44(%eax)
; SLOW_32-NEXT:    movl $0, 40(%eax)
; SLOW_32-NEXT:    movl $0, 52(%eax)
; SLOW_32-NEXT:    movl $0, 48(%eax)
; SLOW_32-NEXT:    movl $0, 60(%eax)
; SLOW_32-NEXT:    movl $0, 56(%eax)
; SLOW_32-NEXT:    movl $0, 68(%eax)
; SLOW_32-NEXT:    movl $0, 64(%eax)
; SLOW_32-NEXT:    movl $0, 76(%eax)
; SLOW_32-NEXT:    movl $0, 72(%eax)
; SLOW_32-NEXT:    retl
;
; SLOW_64-LABEL: bork:
; SLOW_64:       # %bb.0:
; SLOW_64-NEXT:    movq $0, 72(%rdi)
; SLOW_64-NEXT:    movq $0, 64(%rdi)
; SLOW_64-NEXT:    movq $0, 56(%rdi)
; SLOW_64-NEXT:    movq $0, 48(%rdi)
; SLOW_64-NEXT:    movq $0, 40(%rdi)
; SLOW_64-NEXT:    movq $0, 32(%rdi)
; SLOW_64-NEXT:    movq $0, 24(%rdi)
; SLOW_64-NEXT:    movq $0, 16(%rdi)
; SLOW_64-NEXT:    movq $0, 8(%rdi)
; SLOW_64-NEXT:    movq $0, (%rdi)
; SLOW_64-NEXT:    retq
  call void @llvm.memset.p0.i64(ptr align 4 %dst, i8 0, i64 80, i1 false)
  ret void
}

declare void @llvm.memset.p0.i64(ptr nocapture, i8, i64, i1) nounwind