summaryrefslogtreecommitdiff
path: root/gcc/testsuite/gfortran.dg
diff options
context:
space:
mode:
Diffstat (limited to 'gcc/testsuite/gfortran.dg')
-rw-r--r--gcc/testsuite/gfortran.dg/gomp/declare-simd-1.f909
-rw-r--r--gcc/testsuite/gfortran.dg/gomp/depend-1.f9013
-rw-r--r--gcc/testsuite/gfortran.dg/gomp/target1.f90520
-rw-r--r--gcc/testsuite/gfortran.dg/gomp/target2.f9074
-rw-r--r--gcc/testsuite/gfortran.dg/gomp/target3.f9012
-rw-r--r--gcc/testsuite/gfortran.dg/gomp/udr4.f902
-rw-r--r--gcc/testsuite/gfortran.dg/openmp-define-3.f902
7 files changed, 630 insertions, 2 deletions
diff --git a/gcc/testsuite/gfortran.dg/gomp/declare-simd-1.f90 b/gcc/testsuite/gfortran.dg/gomp/declare-simd-1.f90
new file mode 100644
index 00000000000..d6ae7c9c812
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/gomp/declare-simd-1.f90
@@ -0,0 +1,9 @@
+! { dg-do compile }
+
+subroutine fn1 (x)
+ integer :: x
+!$omp declare simd (fn1) inbranch notinbranch uniform (x) ! { dg-error "Unclassifiable OpenMP directive" }
+end subroutine fn1
+subroutine fn2 (x)
+!$omp declare simd (fn100) ! { dg-error "should refer to containing procedure" }
+end subroutine fn2
diff --git a/gcc/testsuite/gfortran.dg/gomp/depend-1.f90 b/gcc/testsuite/gfortran.dg/gomp/depend-1.f90
new file mode 100644
index 00000000000..bd6d26a3830
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/gomp/depend-1.f90
@@ -0,0 +1,13 @@
+! { dg-do compile }
+
+subroutine foo (x)
+ integer :: x(5, *)
+!$omp parallel
+!$omp single
+!$omp task depend(in:x(:,5))
+!$omp end task
+!$omp task depend(in:x(5,:)) ! { dg-error "Rightmost upper bound of assumed size array section|proper array section" }
+!$omp end task
+!$omp end single
+!$omp end parallel
+end
diff --git a/gcc/testsuite/gfortran.dg/gomp/target1.f90 b/gcc/testsuite/gfortran.dg/gomp/target1.f90
new file mode 100644
index 00000000000..14db4970bdc
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/gomp/target1.f90
@@ -0,0 +1,520 @@
+! { dg-do compile }
+! { dg-options "-fopenmp" }
+
+module target1
+ interface
+ subroutine dosomething (a, n, m)
+ integer :: a (:), n, m
+ !$omp declare target
+ end subroutine dosomething
+ end interface
+contains
+ subroutine foo (n, o, p, q, r, pp)
+ integer :: n, o, p, q, r, s, i, j
+ integer :: a (2:o)
+ integer, pointer :: pp
+ !$omp target data device (n + 1) if (n .ne. 6) map (tofrom: n, r)
+ !$omp target device (n + 1) if (n .ne. 6) map (from: n) map (alloc: a(2:o))
+ call dosomething (a, n, 0)
+ !$omp end target
+ !$omp target teams device (n + 1) num_teams (n + 4) thread_limit (n * 2) &
+ !$omp & if (n .ne. 6)map (from: n) map (alloc: a(2:o)) default(shared) &
+ !$omp & private (p) firstprivate (q) shared (n) reduction (+: r)
+ r = r + 1
+ p = q
+ call dosomething (a, n, p + q)
+ !$omp end target teams
+ !$omp target teams distribute device (n + 1) num_teams (n + 4) collapse (2) &
+ !$omp & if (n .ne. 6)map (from: n) map (alloc: a(2:o)) default(shared) &
+ !$omp & private (p) firstprivate (q) shared (n) reduction (+: r) &
+ !$omp & thread_limit (n * 2) dist_schedule (static, 4)
+ do i = 1, 10
+ do j = 1, 10
+ r = r + 1
+ p = q
+ call dosomething (a, n, p + q)
+ end do
+ end do
+ !$omp target teams distribute device (n + 1) num_teams (n + 4) &
+ !$omp & if (n .ne. 6)map (from: n) map (alloc: a(2:o)) default(shared) &
+ !$omp & private (p) firstprivate (q) shared (n) reduction (+: r) &
+ !$omp & thread_limit (n * 2) dist_schedule (static, 4)
+ do i = 1, 10
+ do j = 1, 10
+ r = r + 1
+ p = q
+ call dosomething (a, n, p + q)
+ end do
+ end do
+ !$omp end target teams distribute
+ !$omp target teams distribute parallel do device (n + 1) num_teams (n + 4) &
+ !$omp & if (n .ne. 6)map (from: n) map (alloc: a(2:o)) default(shared) &
+ !$omp & private (p) firstprivate (q) shared (n) reduction (+: r) &
+ !$omp & thread_limit (n * 2) dist_schedule (static, 4) collapse (2) &
+ !$omp & num_threads (n + 4) proc_bind (spread) lastprivate (s) &
+ !$omp & ordered schedule (static, 8)
+ do i = 1, 10
+ do j = 1, 10
+ r = r + 1
+ p = q
+ call dosomething (a, n, p + q)
+ !$omp ordered
+ p = q
+ !$omp end ordered
+ s = i * 10 + j
+ end do
+ end do
+ !$omp target teams distribute parallel do device (n + 1) num_teams (n + 4) &
+ !$omp & if (n .ne. 6)map (from: n) map (alloc: a(2:o)) default(shared) &
+ !$omp & private (p) firstprivate (q) shared (n) reduction (+: r) &
+ !$omp & thread_limit (n * 2) dist_schedule (static, 4) num_threads (n + 4) &
+ !$omp & proc_bind (master) lastprivate (s) ordered schedule (static, 8)
+ do i = 1, 10
+ do j = 1, 10
+ r = r + 1
+ p = q
+ call dosomething (a, n, p + q)
+ end do
+ !$omp ordered
+ p = q
+ !$omp end ordered
+ s = i * 10
+ end do
+ !$omp end target teams distribute parallel do
+ !$omp target teams distribute parallel do simd device (n + 1) &
+ !$omp & if (n .ne. 6)map (from: n) map (alloc: a(2:o)) default(shared) &
+ !$omp & private (p) firstprivate (q) shared (n) reduction (+: r) &
+ !$omp & thread_limit (n * 2) dist_schedule (static, 4) collapse (2) &
+ !$omp & num_threads (n + 4) proc_bind (spread) lastprivate (s) &
+ !$omp & schedule (static, 8) num_teams (n + 4) safelen(8)
+ do i = 1, 10
+ do j = 1, 10
+ r = r + 1
+ p = q
+ a(2+i*10+j) = p + q
+ s = i * 10 + j
+ end do
+ end do
+ !$omp target teams distribute parallel do simd device (n + 1) &
+ !$omp & if (n .ne. 6)map (from: n) map (alloc: a(2:o)) default(shared) &
+ !$omp & private (p) firstprivate (q) shared (n) reduction (+: r) &
+ !$omp & thread_limit (n * 2) dist_schedule (static, 4) num_threads (n + 4) &
+ !$omp & proc_bind (master) lastprivate (s) schedule (static, 8) &
+ !$omp & num_teams (n + 4) safelen(16) linear(i:1) aligned (pp:4)
+ do i = 1, 10
+ r = r + 1
+ p = q
+ a(1+i) = p + q
+ s = i * 10
+ end do
+ !$omp end target teams distribute parallel do simd
+ !$omp target teams distribute simd device (n + 1) &
+ !$omp & if (n .ne. 6)map (from: n) map (alloc: a(2:o)) default(shared) &
+ !$omp & private (p) firstprivate (q) shared (n) reduction (+: r) &
+ !$omp & thread_limit (n * 2) dist_schedule (static, 4) collapse (2) &
+ !$omp & lastprivate (s) num_teams (n + 4) safelen(8)
+ do i = 1, 10
+ do j = 1, 10
+ r = r + 1
+ p = q
+ a(2+i*10+j) = p + q
+ s = i * 10 + j
+ end do
+ end do
+ !$omp target teams distribute simd device (n + 1) &
+ !$omp & if (n .ne. 6)map (from: n) map (alloc: a(2:o)) default(shared) &
+ !$omp & private (p) firstprivate (q) shared (n) reduction (+: r) &
+ !$omp & thread_limit (n * 2) dist_schedule (static, 4) lastprivate (s) &
+ !$omp & num_teams (n + 4) safelen(16) linear(i:1) aligned (pp:4)
+ do i = 1, 10
+ r = r + 1
+ p = q
+ a(1+i) = p + q
+ s = i * 10
+ end do
+ !$omp end target teams distribute simd
+ !$omp target device (n + 1) if (n .ne. 6)map (from: n) map (alloc: a(2:o))
+ !$omp teams num_teams (n + 4) thread_limit (n * 2) default(shared) &
+ !$omp & private (p) firstprivate (q) shared (n) reduction (+: r)
+ r = r + 1
+ p = q
+ call dosomething (a, n, p + q)
+ !$omp end teams
+ !$omp end target
+ !$omp target device (n + 1) if (n .ne. 6)map (from: n) map (alloc: a(2:o))
+ !$omp teams distribute num_teams (n + 4) collapse (2) default(shared) &
+ !$omp & private (p) firstprivate (q) shared (n) reduction (+: r) &
+ !$omp & thread_limit (n * 2) dist_schedule (static, 4)
+ do i = 1, 10
+ do j = 1, 10
+ r = r + 1
+ p = q
+ call dosomething (a, n, p + q)
+ end do
+ end do
+ !$omp end target
+ !$omp target device (n + 1) if (n .ne. 6)map (from: n) map (alloc: a(2:o))
+ !$omp teams distribute num_teams (n + 4) default(shared) &
+ !$omp & private (p) firstprivate (q) shared (n) reduction (+: r) &
+ !$omp & thread_limit (n * 2) dist_schedule (static, 4)
+ do i = 1, 10
+ do j = 1, 10
+ r = r + 1
+ p = q
+ call dosomething (a, n, p + q)
+ end do
+ end do
+ !$omp end teams distribute
+ !$omp end target
+ !$omp target device (n + 1) if (n .ne. 6)map (from: n) map (alloc: a(2:o))
+ !$omp teams distribute parallel do num_teams (n + 4) &
+ !$omp & if (n .ne. 6) default(shared) ordered schedule (static, 8) &
+ !$omp & private (p) firstprivate (q) shared (n) reduction (+: r) &
+ !$omp & thread_limit (n * 2) dist_schedule (static, 4) collapse (2) &
+ !$omp & num_threads (n + 4) proc_bind (spread) lastprivate (s)
+ do i = 1, 10
+ do j = 1, 10
+ r = r + 1
+ p = q
+ call dosomething (a, n, p + q)
+ !$omp ordered
+ p = q
+ !$omp end ordered
+ s = i * 10 + j
+ end do
+ end do
+ !$omp end target
+ !$omp target device (n + 1) if (n .ne. 6)map (from: n) map (alloc: a(2:o))
+ !$omp teams distribute parallel do num_teams (n + 4)if(n.ne.6)default(shared)&
+ !$omp & private (p) firstprivate (q) shared (n) reduction (+: r) &
+ !$omp & thread_limit (n * 2) dist_schedule (static, 4) num_threads (n + 4) &
+ !$omp & proc_bind (master) lastprivate (s) ordered schedule (static, 8)
+ do i = 1, 10
+ do j = 1, 10
+ r = r + 1
+ p = q
+ call dosomething (a, n, p + q)
+ end do
+ !$omp ordered
+ p = q
+ !$omp end ordered
+ s = i * 10
+ end do
+ !$omp end teams distribute parallel do
+ !$omp end target
+ !$omp target device (n + 1) if (n .ne. 6)map (from: n) map (alloc: a(2:o))
+ !$omp teams distribute parallel do simd if(n.ne.6)default(shared)&
+ !$omp & private (p) firstprivate (q) shared (n) reduction (+: r) &
+ !$omp & thread_limit (n * 2) dist_schedule (static, 4) collapse (2) &
+ !$omp & num_threads (n + 4) proc_bind (spread) lastprivate (s) &
+ !$omp & schedule (static, 8) num_teams (n + 4) safelen(8)
+ do i = 1, 10
+ do j = 1, 10
+ r = r + 1
+ p = q
+ a(2+i*10+j) = p + q
+ s = i * 10 + j
+ end do
+ end do
+ !$omp end target
+ !$omp target device (n + 1) if (n .ne. 6)map (from: n) map (alloc: a(2:o))
+ !$omp teams distribute parallel do simd if (n .ne. 6)default(shared) &
+ !$omp & private (p) firstprivate (q) shared (n) reduction (+: r) &
+ !$omp & thread_limit (n * 2) dist_schedule (static, 4) num_threads (n + 4) &
+ !$omp & proc_bind (master) lastprivate (s) schedule (static, 8) &
+ !$omp & num_teams (n + 4) safelen(16) linear(i:1) aligned (pp:4)
+ do i = 1, 10
+ r = r + 1
+ p = q
+ a(1+i) = p + q
+ s = i * 10
+ end do
+ !$omp end teams distribute parallel do simd
+ !$omp end target
+ !$omp target device (n + 1) if (n .ne. 6)map (from: n) map (alloc: a(2:o))
+ !$omp teams distribute simd default(shared) safelen(8) &
+ !$omp & private (p) firstprivate (q) shared (n) reduction (+: r) &
+ !$omp & thread_limit (n * 2) dist_schedule (static, 4) collapse (2) &
+ !$omp & lastprivate (s) num_teams (n + 4)
+ do i = 1, 10
+ do j = 1, 10
+ r = r + 1
+ p = q
+ a(2+i*10+j) = p + q
+ s = i * 10 + j
+ end do
+ end do
+ !$omp end target
+ !$omp target device (n + 1) if (n .ne. 6)map (from: n) map (alloc: a(2:o))
+ !$omp teams distribute simd default(shared) aligned (pp:4) &
+ !$omp & private (p) firstprivate (q) shared (n) reduction (+: r) &
+ !$omp & thread_limit (n * 2) dist_schedule (static, 4) lastprivate (s)
+ do i = 1, 10
+ r = r + 1
+ p = q
+ a(1+i) = p + q
+ s = i * 10
+ end do
+ !$omp end teams distribute simd
+ !$omp end target
+ !$omp target teams device (n + 1) if (n .ne. 6)map (from: n) &
+ !$omp & map (alloc: a(2:o)) num_teams (n + 4) thread_limit (n * 2) &
+ !$omp & default(shared) shared(n) private (p) reduction ( + : r )
+ !$omp distribute collapse (2) firstprivate (q) dist_schedule (static, 4)
+ do i = 1, 10
+ do j = 1, 10
+ r = r + 1
+ p = q
+ call dosomething (a, n, p + q)
+ end do
+ end do
+ !$omp end target teams
+ !$omp target teams device (n + 1) if (n .ne. 6)map (from: n) &
+ !$omp & map (alloc: a(2:o)) num_teams (n + 4) thread_limit (n * 2) &
+ !$omp & default(shared) shared(n) private (p) reduction(+:r)
+ !$omp distribute firstprivate (q) dist_schedule (static, 4)
+ do i = 1, 10
+ do j = 1, 10
+ r = r + 1
+ p = q
+ call dosomething (a, n, p + q)
+ end do
+ end do
+ !$omp end distribute
+ !$omp end target teams
+ !$omp target teams device (n + 1) if (n .ne. 6)map (from: n) &
+ !$omp & map (alloc: a(2:o)) num_teams (n + 4) thread_limit (n * 2) &
+ !$omp & default(shared) shared(n) private (p) reduction(+:r)
+ !$omp distribute parallel do if (n .ne. 6) default(shared) &
+ !$omp & ordered schedule (static, 8) private (p) firstprivate (q) &
+ !$omp & shared(n)reduction(+:r)dist_schedule(static,4)collapse(2)&
+ !$omp & num_threads (n + 4) proc_bind (spread) lastprivate (s)
+ do i = 1, 10
+ do j = 1, 10
+ r = r + 1
+ p = q
+ call dosomething (a, n, p + q)
+ !$omp ordered
+ p = q
+ !$omp end ordered
+ s = i * 10 + j
+ end do
+ end do
+ !$omp end target teams
+ !$omp target teams device (n + 1) if (n .ne. 6)map (from: n) &
+ !$omp & map (alloc: a(2:o)) num_teams (n + 4) thread_limit (n * 2) &
+ !$omp & default(shared) shared(n) private (p) reduction(+:r)
+ !$omp distribute parallel do if(n.ne.6)default(shared)&
+ !$omp & private (p) firstprivate (q) shared (n) reduction (+: r) &
+ !$omp & dist_schedule (static, 4) num_threads (n + 4) &
+ !$omp & proc_bind (master) lastprivate (s) ordered schedule (static, 8)
+ do i = 1, 10
+ do j = 1, 10
+ r = r + 1
+ p = q
+ call dosomething (a, n, p + q)
+ end do
+ !$omp ordered
+ p = q
+ !$omp end ordered
+ s = i * 10
+ end do
+ !$omp end distribute parallel do
+ !$omp end target teams
+ !$omp target teams device (n + 1) if (n .ne. 6)map (from: n) &
+ !$omp & map (alloc: a(2:o)) num_teams (n + 4) thread_limit (n * 2) &
+ !$omp & default(shared) shared(n) private (p) reduction(+:r)
+ !$omp distribute parallel do simd if(n.ne.6)default(shared)&
+ !$omp & private (p) firstprivate (q) shared (n) reduction (+: r) &
+ !$omp & dist_schedule (static, 4) collapse (2) safelen(8) &
+ !$omp & num_threads (n + 4) proc_bind (spread) lastprivate (s) &
+ !$omp & schedule (static, 8)
+ do i = 1, 10
+ do j = 1, 10
+ r = r + 1
+ p = q
+ a(2+i*10+j) = p + q
+ s = i * 10 + j
+ end do
+ end do
+ !$omp end target teams
+ !$omp target teams device (n + 1) if (n .ne. 6)map (from: n) &
+ !$omp & map (alloc: a(2:o)) num_teams (n + 4) thread_limit (n * 2) &
+ !$omp & default(shared) shared(n) private (p) reduction(+:r)
+ !$omp distribute parallel do simd if (n .ne. 6)default(shared) &
+ !$omp & private (p) firstprivate (q) shared (n) reduction (+: r) &
+ !$omp & dist_schedule (static, 4) num_threads (n + 4) &
+ !$omp & proc_bind (master) lastprivate (s) schedule (static, 8) &
+ !$omp & safelen(16) linear(i:1) aligned (pp:4)
+ do i = 1, 10
+ r = r + 1
+ p = q
+ a(1+i) = p + q
+ s = i * 10
+ end do
+ !$omp end distribute parallel do simd
+ !$omp end target teams
+ !$omp target teams device (n + 1) if (n .ne. 6)map (from: n) &
+ !$omp & map (alloc: a(2:o)) num_teams (n + 4) thread_limit (n * 2) &
+ !$omp & default(shared) shared(n) private (p) reduction(+:r)
+ !$omp distribute simd safelen(8) lastprivate(s) &
+ !$omp & private (p) firstprivate (q) reduction (+: r) &
+ !$omp & dist_schedule (static, 4) collapse (2)
+ do i = 1, 10
+ do j = 1, 10
+ r = r + 1
+ p = q
+ a(2+i*10+j) = p + q
+ s = i * 10 + j
+ end do
+ end do
+ !$omp end target teams
+ !$omp target teams device (n + 1) if (n .ne. 6)map (from: n) &
+ !$omp & map (alloc: a(2:o)) num_teams (n + 4) thread_limit (n * 2) &
+ !$omp & default(shared) shared(n) private (p) reduction(+:r)
+ !$omp distribute simd aligned (pp:4) &
+ !$omp & private (p) firstprivate (q) reduction (+: r) &
+ !$omp & dist_schedule (static, 4) lastprivate (s)
+ do i = 1, 10
+ r = r + 1
+ p = q
+ a(1+i) = p + q
+ s = i * 10
+ end do
+ !$omp end distribute simd
+ !$omp end target teams
+ !$omp end target data
+ end subroutine
+ subroutine bar (n, o, p, r, pp)
+ integer :: n, o, p, q, r, s, i, j
+ integer :: a (2:o)
+ integer, pointer :: pp
+ common /blk/ i, j, q
+ !$omp target teams device (n + 1) if (n .ne. 6)map (from: n) &
+ !$omp & map (alloc: a(2:o)) num_teams (n + 4) thread_limit (n * 2) &
+ !$omp & default(shared) shared(n) private (p) reduction ( + : r )
+ !$omp distribute collapse (2) firstprivate (q) dist_schedule (static, 4)
+ do i = 1, 10
+ do j = 1, 10
+ r = r + 1
+ p = q
+ call dosomething (a, n, p + q)
+ end do
+ end do
+ !$omp end target teams
+ !$omp target teams device (n + 1) if (n .ne. 6)map (from: n) &
+ !$omp & map (alloc: a(2:o)) num_teams (n + 4) thread_limit (n * 2) &
+ !$omp & default(shared) shared(n) private (p) reduction(+:r)
+ !$omp distribute firstprivate (q) dist_schedule (static, 4)
+ do i = 1, 10
+ do j = 1, 10
+ r = r + 1
+ p = q
+ call dosomething (a, n, p + q)
+ end do
+ end do
+ !$omp end distribute
+ !$omp end target teams
+ !$omp target teams device (n + 1) if (n .ne. 6)map (from: n) &
+ !$omp & map (alloc: a(2:o)) num_teams (n + 4) thread_limit (n * 2) &
+ !$omp & default(shared) shared(n) private (p) reduction(+:r)
+ !$omp distribute parallel do if (n .ne. 6) default(shared) &
+ !$omp & ordered schedule (static, 8) private (p) firstprivate (q) &
+ !$omp & shared(n)reduction(+:r)dist_schedule(static,4)collapse(2)&
+ !$omp & num_threads (n + 4) proc_bind (spread) lastprivate (s)
+ do i = 1, 10
+ do j = 1, 10
+ r = r + 1
+ p = q
+ call dosomething (a, n, p + q)
+ !$omp ordered
+ p = q
+ !$omp end ordered
+ s = i * 10 + j
+ end do
+ end do
+ !$omp end target teams
+ !$omp target teams device (n + 1) if (n .ne. 6)map (from: n) &
+ !$omp & map (alloc: a(2:o)) num_teams (n + 4) thread_limit (n * 2) &
+ !$omp & default(shared) shared(n) private (p) reduction(+:r)
+ !$omp distribute parallel do if(n.ne.6)default(shared)&
+ !$omp & private (p) firstprivate (q) shared (n) reduction (+: r) &
+ !$omp & dist_schedule (static, 4) num_threads (n + 4) &
+ !$omp & proc_bind (master) lastprivate (s) ordered schedule (static, 8)
+ do i = 1, 10
+ do j = 1, 10
+ r = r + 1
+ p = q
+ call dosomething (a, n, p + q)
+ end do
+ !$omp ordered
+ p = q
+ !$omp end ordered
+ s = i * 10
+ end do
+ !$omp end distribute parallel do
+ !$omp end target teams
+ !$omp target teams device (n + 1) if (n .ne. 6)map (from: n) &
+ !$omp & map (alloc: a(2:o)) num_teams (n + 4) thread_limit (n * 2) &
+ !$omp & default(shared) shared(n) private (p) reduction(+:r)
+ !$omp distribute parallel do simd if(n.ne.6)default(shared)&
+ !$omp & private (p) firstprivate (q) shared (n) reduction (+: r) &
+ !$omp & dist_schedule (static, 4) collapse (2) safelen(8) &
+ !$omp & num_threads (n + 4) proc_bind (spread) lastprivate (s) &
+ !$omp & schedule (static, 8)
+ do i = 1, 10
+ do j = 1, 10
+ r = r + 1
+ p = q
+ a(2+i*10+j) = p + q
+ s = i * 10 + j
+ end do
+ end do
+ !$omp end target teams
+ !$omp target teams device (n + 1) if (n .ne. 6)map (from: n) &
+ !$omp & map (alloc: a(2:o)) num_teams (n + 4) thread_limit (n * 2) &
+ !$omp & default(shared) shared(n) private (p) reduction(+:r)
+ !$omp distribute parallel do simd if (n .ne. 6)default(shared) &
+ !$omp & private (p) firstprivate (q) shared (n) reduction (+: r) &
+ !$omp & dist_schedule (static, 4) num_threads (n + 4) &
+ !$omp & proc_bind (master) lastprivate (s) schedule (static, 8) &
+ !$omp & safelen(16) linear(i:1) aligned (pp:4)
+ do i = 1, 10
+ r = r + 1
+ p = q
+ a(1+i) = p + q
+ s = i * 10
+ end do
+ !$omp end distribute parallel do simd
+ !$omp end target teams
+ !$omp target teams device (n + 1) if (n .ne. 6)map (from: n) &
+ !$omp & map (alloc: a(2:o)) num_teams (n + 4) thread_limit (n * 2) &
+ !$omp & default(shared) shared(n) private (p) reduction(+:r)
+ !$omp distribute simd safelen(8) lastprivate(s) &
+ !$omp & private (p) firstprivate (q) reduction (+: r) &
+ !$omp & dist_schedule (static, 4) collapse (2)
+ do i = 1, 10
+ do j = 1, 10
+ r = r + 1
+ p = q
+ a(2+i*10+j) = p + q
+ s = i * 10 + j
+ end do
+ end do
+ !$omp end target teams
+ !$omp target teams device (n + 1) if (n .ne. 6)map (from: n) &
+ !$omp & map (alloc: a(2:o)) num_teams (n + 4) thread_limit (n * 2) &
+ !$omp & default(shared) shared(n) private (p) reduction(+:r)
+ !$omp distribute simd aligned (pp:4) &
+ !$omp & private (p) firstprivate (q) reduction (+: r) &
+ !$omp & dist_schedule (static, 4) lastprivate (s)
+ do i = 1, 10
+ r = r + 1
+ p = q
+ a(1+i) = p + q
+ s = i * 10
+ end do
+ !$omp end distribute simd
+ !$omp end target teams
+ end subroutine
+end module
diff --git a/gcc/testsuite/gfortran.dg/gomp/target2.f90 b/gcc/testsuite/gfortran.dg/gomp/target2.f90
new file mode 100644
index 00000000000..7521331fcb1
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/gomp/target2.f90
@@ -0,0 +1,74 @@
+! { dg-do compile }
+! { dg-options "-fopenmp -ffree-line-length-160" }
+
+subroutine foo (n, s, t, u, v, w)
+ integer :: n, i, s, t, u, v, w
+ common /bar/ i
+ !$omp simd safelen(s + 1)
+ do i = 1, n
+ end do
+ !$omp do schedule (static, t * 2)
+ do i = 1, n
+ end do
+ !$omp do simd safelen(s + 1) schedule (static, t * 2)
+ do i = 1, n
+ end do
+ !$omp parallel do schedule (static, t * 2) num_threads (u - 1)
+ do i = 1, n
+ end do
+ !$omp parallel do simd safelen(s + 1) schedule (static, t * 2) num_threads (u - 1)
+ do i = 1, n
+ end do
+ !$omp distribute dist_schedule (static, v + 8)
+ do i = 1, n
+ end do
+ !$omp distribute simd dist_schedule (static, v + 8) safelen(s + 1)
+ do i = 1, n
+ end do
+ !$omp distribute parallel do simd dist_schedule (static, v + 8) safelen(s + 1) &
+ !$omp & schedule (static, t * 2) num_threads (u - 1)
+ do i = 1, n
+ end do
+ !$omp distribute parallel do dist_schedule (static, v + 8) num_threads (u - 1) &
+ !$omp & schedule (static, t * 2)
+ do i = 1, n
+ end do
+ !$omp target
+ !$omp teams distribute dist_schedule (static, v + 8) num_teams (w + 8)
+ do i = 1, n
+ end do
+ !$omp end target
+ !$omp target
+ !$omp teams distribute simd dist_schedule (static, v + 8) safelen(s + 1) &
+ !$omp & num_teams (w + 8)
+ do i = 1, n
+ end do
+ !$omp end target
+ !$omp target
+ !$omp teams distribute parallel do simd dist_schedule (static, v + 8) safelen(s + 1) &
+ !$omp & schedule (static, t * 2) num_threads (u - 1) num_teams (w + 8)
+ do i = 1, n
+ end do
+ !$omp end target
+ !$omp target
+ !$omp teams distribute parallel do dist_schedule (static, v + 8) num_threads (u - 1) &
+ !$omp & schedule (static, t * 2) num_teams (w + 8)
+ do i = 1, n
+ end do
+ !$omp end target
+ !$omp target teams distribute dist_schedule (static, v + 8) num_teams (w + 8)
+ do i = 1, n
+ end do
+ !$omp target teams distribute simd dist_schedule (static, v + 8) safelen(s + 1) &
+ !$omp & num_teams (w + 8)
+ do i = 1, n
+ end do
+ !$omp target teams distribute parallel do simd dist_schedule (static, v + 8) safelen(s + 1) &
+ !$omp & schedule (static, t * 2) num_threads (u - 1) num_teams (w + 8)
+ do i = 1, n
+ end do
+ !$omp target teams distribute parallel do dist_schedule (static, v + 8) num_threads (u - 1) &
+ !$omp & schedule (static, t * 2) num_teams (w + 8)
+ do i = 1, n
+ end do
+end subroutine
diff --git a/gcc/testsuite/gfortran.dg/gomp/target3.f90 b/gcc/testsuite/gfortran.dg/gomp/target3.f90
new file mode 100644
index 00000000000..53a9682bf96
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/gomp/target3.f90
@@ -0,0 +1,12 @@
+! { dg-do compile }
+! { dg-options "-fopenmp" }
+
+subroutine foo (r)
+ integer :: i, r
+ !$omp target
+ !$omp target teams distribute parallel do reduction (+: r) ! { dg-warning "target construct inside of target region" }
+ do i = 1, 10
+ r = r + 1
+ end do
+ !$omp end target
+end subroutine
diff --git a/gcc/testsuite/gfortran.dg/gomp/udr4.f90 b/gcc/testsuite/gfortran.dg/gomp/udr4.f90
index 223dfd04cd2..7e86a757214 100644
--- a/gcc/testsuite/gfortran.dg/gomp/udr4.f90
+++ b/gcc/testsuite/gfortran.dg/gomp/udr4.f90
@@ -6,7 +6,7 @@ subroutine f3
!$omp declare reduction (foo) ! { dg-error "Unclassifiable OpenMP directive" }
!$omp declare reduction (foo:integer) ! { dg-error "Unclassifiable OpenMP directive" }
!$omp declare reduction (foo:integer:omp_out=omp_out+omp_in) &
-!$omp & initializer(omp_priv=0) initializer(omp_priv=0) ! { dg-error "Unclassifiable statement" }
+!$omp & initializer(omp_priv=0) initializer(omp_priv=0) ! { dg-error "Unexpected junk after" }
end subroutine f3
subroutine f4
implicit integer (o)
diff --git a/gcc/testsuite/gfortran.dg/openmp-define-3.f90 b/gcc/testsuite/gfortran.dg/openmp-define-3.f90
index 3d559864faf..44d5c9de49b 100644
--- a/gcc/testsuite/gfortran.dg/openmp-define-3.f90
+++ b/gcc/testsuite/gfortran.dg/openmp-define-3.f90
@@ -6,6 +6,6 @@
# error _OPENMP not defined
#endif
-#if _OPENMP != 201107
+#if _OPENMP != 201307
# error _OPENMP defined to wrong value
#endif