; NOTE: Assertions have been autogenerated by utils/update_test_checks.py ; RUN: opt < %s -passes=slp-vectorizer -S -mtriple=i386-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128-n8:16:32-S128" target triple = "i386-apple-macosx10.8.0" ;int test(ptr G) { ; G[0] = 1+Gptr4; ; G[1] = 6+Gptr3; ; G[2] = 7+Gptr4; ; G[3] = 8+Gptr4; ;} define i32 @test(ptr nocapture %G) { ; CHECK-LABEL: @test( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds double, ptr [[G:%.*]], i64 5 ; CHECK-NEXT: [[TMP0:%.*]] = load <2 x double>, ptr [[ARRAYIDX]], align 8 ; CHECK-NEXT: [[TMP1:%.*]] = fmul <2 x double> [[TMP0]], ; CHECK-NEXT: [[TMP2:%.*]] = fadd <2 x double> [[TMP1]], ; CHECK-NEXT: store <2 x double> [[TMP2]], ptr [[G]], align 8 ; CHECK-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds double, ptr [[G]], i64 2 ; CHECK-NEXT: [[TMP3:%.*]] = extractelement <2 x double> [[TMP0]], i32 1 ; CHECK-NEXT: [[MUL11:%.*]] = fmul double [[TMP3]], 4.000000e+00 ; CHECK-NEXT: [[TMP4:%.*]] = insertelement <2 x double> [[TMP1]], double [[MUL11]], i32 1 ; CHECK-NEXT: [[TMP5:%.*]] = fadd <2 x double> [[TMP4]], ; CHECK-NEXT: store <2 x double> [[TMP5]], ptr [[ARRAYIDX9]], align 8 ; CHECK-NEXT: ret i32 undef ; entry: %arrayidx = getelementptr inbounds double, ptr %G, i64 5 %0 = load double, ptr %arrayidx, align 8 %mul = fmul double %0, 4.000000e+00 %add = fadd double %mul, 1.000000e+00 store double %add, ptr %G, align 8 %arrayidx2 = getelementptr inbounds double, ptr %G, i64 6 %1 = load double, ptr %arrayidx2, align 8 %mul3 = fmul double %1, 3.000000e+00 %add4 = fadd double %mul3, 6.000000e+00 %arrayidx5 = getelementptr inbounds double, ptr %G, i64 1 store double %add4, ptr %arrayidx5, align 8 %add8 = fadd double %mul, 7.000000e+00 %arrayidx9 = getelementptr inbounds double, ptr %G, i64 2 store double %add8, ptr %arrayidx9, align 8 %mul11 = fmul double %1, 4.000000e+00 %add12 = fadd double %mul11, 8.000000e+00 %arrayidx13 = getelementptr inbounds double, ptr %G, i64 3 store double %add12, ptr %arrayidx13, align 8 ret i32 undef } ;int foo(ptr A, int n) { ; A[0] = Aptr 7.9 * n + 6.0; ; A[1] = Aptr 7.7 * n + 2.0; ; A[2] = Aptr 7.6 * n + 3.0; ; A[3] = Aptr 7.4 * n + 4.0; ;} define i32 @foo(ptr nocapture %A, i32 %n) { ; CHECK-LABEL: @foo( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[CONV:%.*]] = sitofp i32 [[N:%.*]] to double ; CHECK-NEXT: [[TMP0:%.*]] = load <4 x double>, ptr [[A:%.*]], align 8 ; CHECK-NEXT: [[TMP1:%.*]] = fmul <4 x double> [[TMP0]], ; CHECK-NEXT: [[TMP2:%.*]] = insertelement <4 x double> poison, double [[CONV]], i32 0 ; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <4 x double> [[TMP2]], <4 x double> poison, <4 x i32> zeroinitializer ; CHECK-NEXT: [[TMP4:%.*]] = fmul <4 x double> [[TMP3]], [[TMP1]] ; CHECK-NEXT: [[TMP5:%.*]] = fadd <4 x double> [[TMP4]], ; CHECK-NEXT: store <4 x double> [[TMP5]], ptr [[A]], align 8 ; CHECK-NEXT: ret i32 undef ; entry: %0 = load double, ptr %A, align 8 %mul = fmul double %0, 7.900000e+00 %conv = sitofp i32 %n to double %mul1 = fmul double %conv, %mul %add = fadd double %mul1, 6.000000e+00 store double %add, ptr %A, align 8 %arrayidx3 = getelementptr inbounds double, ptr %A, i64 1 %1 = load double, ptr %arrayidx3, align 8 %mul4 = fmul double %1, 7.700000e+00 %mul6 = fmul double %conv, %mul4 %add7 = fadd double %mul6, 2.000000e+00 store double %add7, ptr %arrayidx3, align 8 %arrayidx9 = getelementptr inbounds double, ptr %A, i64 2 %2 = load double, ptr %arrayidx9, align 8 %mul10 = fmul double %2, 7.600000e+00 %mul12 = fmul double %conv, %mul10 %add13 = fadd double %mul12, 3.000000e+00 store double %add13, ptr %arrayidx9, align 8 %arrayidx15 = getelementptr inbounds double, ptr %A, i64 3 %3 = load double, ptr %arrayidx15, align 8 %mul16 = fmul double %3, 7.400000e+00 %mul18 = fmul double %conv, %mul16 %add19 = fadd double %mul18, 4.000000e+00 store double %add19, ptr %arrayidx15, align 8 ret i32 undef } ; int test2(ptr G, int k) { ; if (k) { ; G[0] = 1+Gptr4; ; G[1] = 6+Gptr3; ; } else { ; G[2] = 7+Gptr4; ; G[3] = 8+Gptr3; ; } ; } ; We can't merge the gather sequences because one does not dominate the other. define i32 @test2(ptr nocapture %G, i32 %k) { ; CHECK-LABEL: @test2( ; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i32 [[K:%.*]], 0 ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds double, ptr [[G:%.*]], i64 5 ; CHECK-NEXT: [[TMP3:%.*]] = load double, ptr [[TMP2]], align 8 ; CHECK-NEXT: [[TMP4:%.*]] = fmul double [[TMP3]], 4.000000e+00 ; CHECK-NEXT: br i1 [[TMP1]], label [[TMP12:%.*]], label [[TMP5:%.*]] ; CHECK: 5: ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds double, ptr [[G]], i64 6 ; CHECK-NEXT: [[TMP7:%.*]] = load double, ptr [[TMP6]], align 8 ; CHECK-NEXT: [[TMP8:%.*]] = fmul double [[TMP7]], 3.000000e+00 ; CHECK-NEXT: [[TMP9:%.*]] = insertelement <2 x double> poison, double [[TMP4]], i32 0 ; CHECK-NEXT: [[TMP10:%.*]] = insertelement <2 x double> [[TMP9]], double [[TMP8]], i32 1 ; CHECK-NEXT: [[TMP11:%.*]] = fadd <2 x double> [[TMP10]], ; CHECK-NEXT: store <2 x double> [[TMP11]], ptr [[G]], align 8 ; CHECK-NEXT: br label [[TMP20:%.*]] ; CHECK: 12: ; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds double, ptr [[G]], i64 2 ; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds double, ptr [[G]], i64 6 ; CHECK-NEXT: [[TMP15:%.*]] = load double, ptr [[TMP14]], align 8 ; CHECK-NEXT: [[TMP16:%.*]] = fmul double [[TMP15]], 3.000000e+00 ; CHECK-NEXT: [[TMP17:%.*]] = insertelement <2 x double> poison, double [[TMP4]], i32 0 ; CHECK-NEXT: [[TMP18:%.*]] = insertelement <2 x double> [[TMP17]], double [[TMP16]], i32 1 ; CHECK-NEXT: [[TMP19:%.*]] = fadd <2 x double> [[TMP18]], ; CHECK-NEXT: store <2 x double> [[TMP19]], ptr [[TMP13]], align 8 ; CHECK-NEXT: br label [[TMP20]] ; CHECK: 20: ; CHECK-NEXT: ret i32 undef ; %1 = icmp eq i32 %k, 0 %2 = getelementptr inbounds double, ptr %G, i64 5 %3 = load double, ptr %2, align 8 %4 = fmul double %3, 4.000000e+00 br i1 %1, label %12, label %5 ;