summaryrefslogtreecommitdiff
path: root/llvm/test/CodeGen/AMDGPU/amdgpu-late-codegenprepare.ll
blob: 0de0ac7b77a777e8ab01c9091d60e041be99147f (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -amdgpu-late-codegenprepare %s | FileCheck %s

; Make sure we don't crash when trying to create a bitcast between
; address spaces
define amdgpu_kernel void @constant_from_offset_cast_generic_null() {
; CHECK-LABEL: @constant_from_offset_cast_generic_null(
; CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr addrspace(4) getelementptr (i8, ptr addrspace(4) addrspacecast (ptr null to ptr addrspace(4)), i64 4), align 4
; CHECK-NEXT:    [[TMP2:%.*]] = lshr i32 [[TMP1]], 16
; CHECK-NEXT:    [[TMP3:%.*]] = trunc i32 [[TMP2]] to i8
; CHECK-NEXT:    store i8 [[TMP3]], ptr addrspace(1) undef, align 1
; CHECK-NEXT:    ret void
;
  %load = load i8, ptr addrspace(4) getelementptr inbounds (i8, ptr addrspace(4) addrspacecast (ptr null to ptr addrspace(4)), i64 6), align 1
  store i8 %load, ptr addrspace(1) undef
  ret void
}

define amdgpu_kernel void @constant_from_offset_cast_global_null() {
; CHECK-LABEL: @constant_from_offset_cast_global_null(
; CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr addrspace(4) getelementptr (i8, ptr addrspace(4) addrspacecast (ptr addrspace(1) null to ptr addrspace(4)), i64 4), align 4
; CHECK-NEXT:    [[TMP2:%.*]] = lshr i32 [[TMP1]], 16
; CHECK-NEXT:    [[TMP3:%.*]] = trunc i32 [[TMP2]] to i8
; CHECK-NEXT:    store i8 [[TMP3]], ptr addrspace(1) undef, align 1
; CHECK-NEXT:    ret void
;
  %load = load i8, ptr addrspace(4) getelementptr inbounds (i8, ptr addrspace(4) addrspacecast (ptr addrspace(1) null to ptr addrspace(4)), i64 6), align 1
  store i8 %load, ptr addrspace(1) undef
  ret void
}

@gv = unnamed_addr addrspace(1) global [64 x i8] undef, align 4

define amdgpu_kernel void @constant_from_offset_cast_global_gv() {
; CHECK-LABEL: @constant_from_offset_cast_global_gv(
; CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr addrspace(4) getelementptr (i8, ptr addrspace(4) addrspacecast (ptr addrspace(1) @gv to ptr addrspace(4)), i64 4), align 4
; CHECK-NEXT:    [[TMP2:%.*]] = lshr i32 [[TMP1]], 16
; CHECK-NEXT:    [[TMP3:%.*]] = trunc i32 [[TMP2]] to i8
; CHECK-NEXT:    store i8 [[TMP3]], ptr addrspace(1) undef, align 1
; CHECK-NEXT:    ret void
;
  %load = load i8, ptr addrspace(4) getelementptr inbounds (i8, ptr addrspace(4) addrspacecast (ptr addrspace(1) @gv to ptr addrspace(4)), i64 6), align 1
  store i8 %load, ptr addrspace(1) undef
  ret void
}

define amdgpu_kernel void @constant_from_offset_cast_generic_inttoptr() {
; CHECK-LABEL: @constant_from_offset_cast_generic_inttoptr(
; CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr addrspace(4) getelementptr (i8, ptr addrspace(4) addrspacecast (ptr inttoptr (i64 128 to ptr) to ptr addrspace(4)), i64 4), align 4
; CHECK-NEXT:    [[TMP2:%.*]] = lshr i32 [[TMP1]], 16
; CHECK-NEXT:    [[TMP3:%.*]] = trunc i32 [[TMP2]] to i8
; CHECK-NEXT:    store i8 [[TMP3]], ptr addrspace(1) undef, align 1
; CHECK-NEXT:    ret void
;
  %load = load i8, ptr addrspace(4) getelementptr inbounds (i8, ptr addrspace(4) addrspacecast (ptr inttoptr (i64 128 to ptr) to ptr addrspace(4)), i64 6), align 1
  store i8 %load, ptr addrspace(1) undef
  ret void
}

define amdgpu_kernel void @constant_from_inttoptr() {
; CHECK-LABEL: @constant_from_inttoptr(
; CHECK-NEXT:    [[LOAD:%.*]] = load i8, ptr addrspace(4) inttoptr (i64 128 to ptr addrspace(4)), align 4
; CHECK-NEXT:    store i8 [[LOAD]], ptr addrspace(1) undef, align 1
; CHECK-NEXT:    ret void
;
  %load = load i8, ptr addrspace(4) inttoptr (i64 128 to ptr addrspace(4)), align 1
  store i8 %load, ptr addrspace(1) undef
  ret void
}