From 604bebc64149d253c732ee5c9077f1826110f32a Mon Sep 17 00:00:00 2001 From: Brian Sumner Date: Tue, 29 Nov 2022 11:57:15 -0800 Subject: [PATCH 01/22] Implement some non-default-rounded functions Change-Id: Ic2a0806095178505e455b78d66c6d119cdc1445a --- cmake/OCL.cmake | 3 +- ocml/src/addD.cl | 22 ++++++++---- ocml/src/addF.cl | 22 ++++++++---- ocml/src/addH.cl | 22 ++++++++---- ocml/src/builtins.h | 87 +++++---------------------------------------- ocml/src/fmaD.cl | 22 ++++++++---- ocml/src/fmaF.cl | 22 ++++++++---- ocml/src/fmaH.cl | 22 ++++++++---- ocml/src/mulD.cl | 22 ++++++++---- ocml/src/mulF.cl | 22 ++++++++---- ocml/src/mulH.cl | 22 ++++++++---- ocml/src/subD.cl | 22 ++++++++---- ocml/src/subF.cl | 22 ++++++++---- ocml/src/subH.cl | 22 ++++++++---- 14 files changed, 203 insertions(+), 151 deletions(-) diff --git a/cmake/OCL.cmake b/cmake/OCL.cmake index cc533bee..30790a94 100644 --- a/cmake/OCL.cmake +++ b/cmake/OCL.cmake @@ -21,7 +21,8 @@ endif() # potential mis-aligned atomic ops detected by clang set(CLANG_OCL_FLAGS -fcolor-diagnostics -Werror -Wno-error=atomic-alignment -x cl -Xclang -cl-std=CL2.0 -target "${AMDGPU_TARGET_TRIPLE}" -fvisibility=protected -fomit-frame-pointer - -Xclang -finclude-default-header -nogpulib -cl-no-stdinc "${CLANG_OPTIONS_APPEND}") + -Xclang -finclude-default-header -Xclang -fexperimental-strict-floating-point + -nogpulib -cl-no-stdinc "${CLANG_OPTIONS_APPEND}") # For compatibility with the MSVC headers we use a 32-bit wchar. Users linking # against us must also use a short wchar. diff --git a/ocml/src/addD.cl b/ocml/src/addD.cl index 57a5f772..9fe2747c 100644 --- a/ocml/src/addD.cl +++ b/ocml/src/addD.cl @@ -7,15 +7,25 @@ #include "mathD.h" -#define GEN(LN,UN) \ +CONSTATTR double +MATH_MANGLE(add_rte)(double x, double y) +{ + return x + y; +} + +#pragma STDC FENV_ACCESS ON + +#define GEN(LN,RM) \ CONSTATTR double \ MATH_MANGLE(LN)(double x, double y) \ { \ - return BUILTIN_##UN##_F64(x, y); \ + BUILTIN_SETROUND_F16F64(RM); \ + double ret = x + y; \ + BUILTIN_SETROUND_F16F64(ROUND_RTE); \ + return ret; \ } -// GEN(add_rte,ADD_RTE) -// GEN(add_rtn,ADD_RTN) -// GEN(add_rtp,ADD_RTP) -// GEN(add_rtz,ADD_RTZ) +GEN(add_rtn, ROUND_RTN) +GEN(add_rtp, ROUND_RTP) +GEN(add_rtz, ROUND_RTZ) diff --git a/ocml/src/addF.cl b/ocml/src/addF.cl index 1d73b84e..1e8d9696 100644 --- a/ocml/src/addF.cl +++ b/ocml/src/addF.cl @@ -7,15 +7,25 @@ #include "mathF.h" -#define GEN(LN,UN) \ +CONSTATTR float +MATH_MANGLE(add_rte)(float x, float y) +{ + return x + y; +} + +#pragma STDC FENV_ACCESS ON + +#define GEN(LN,RM) \ CONSTATTR float \ MATH_MANGLE(LN)(float x, float y) \ { \ - return BUILTIN_##UN##_F32(x, y); \ + BUILTIN_SETROUND_F32(RM); \ + float ret = x + y; \ + BUILTIN_SETROUND_F32(ROUND_RTE); \ + return ret; \ } -// GEN(add_rte,ADD_RTE) -// GEN(add_rtn,ADD_RTN) -// GEN(add_rtp,ADD_RTP) -// GEN(add_rtz,ADD_RTZ) +GEN(add_rtn, ROUND_RTN) +GEN(add_rtp, ROUND_RTP) +GEN(add_rtz, ROUND_RTZ) diff --git a/ocml/src/addH.cl b/ocml/src/addH.cl index d464b49f..4ff04df4 100644 --- a/ocml/src/addH.cl +++ b/ocml/src/addH.cl @@ -7,15 +7,25 @@ #include "mathH.h" -#define GEN(LN,UN) \ +CONSTATTR half +MATH_MANGLE(add_rte)(half x, half y) +{ + return x + y; +} + +#pragma STDC FENV_ACCESS ON + +#define GEN(LN,RM) \ CONSTATTR half \ MATH_MANGLE(LN)(half x, half y) \ { \ - return BUILTIN_##UN##_F16(x, y); \ + BUILTIN_SETROUND_F16F64(RM); \ + half ret = x + y; \ + BUILTIN_SETROUND_F16F64(ROUND_RTE); \ + return ret; \ } -// GEN(add_rte,ADD_RTE) -// GEN(add_rtn,ADD_RTN) -// GEN(add_rtp,ADD_RTP) -// GEN(add_rtz,ADD_RTZ) +GEN(add_rtn, ROUND_RTN) +GEN(add_rtp, ROUND_RTP) +GEN(add_rtz, ROUND_RTZ) diff --git a/ocml/src/builtins.h b/ocml/src/builtins.h index 546efb89..a952cf43 100644 --- a/ocml/src/builtins.h +++ b/ocml/src/builtins.h @@ -232,81 +232,12 @@ #define BUILTIN_CLAMP_F32(X,L,H) __builtin_amdgcn_fmed3f(X,L,H) #define BUILTIN_CLAMP_F16(X,L,H) __llvm_amdgcn_fmed3_f16(X,L,H) -#define BUILTIN_ADD_RTE_F32 __llvm_add_rte_f32 -#define BUILTIN_ADD_RTE_F64 __llvm_add_rte_f64 -#define BUILTIN_ADD_RTE_F16 __llvm_add_rte_f16 -#define BUILTIN_ADD_RTN_F32 __llvm_add_rtn_f32 -#define BUILTIN_ADD_RTN_F64 __llvm_add_rtn_f64 -#define BUILTIN_ADD_RTN_F16 __llvm_add_rtn_f16 -#define BUILTIN_ADD_RTP_F32 __llvm_add_rtp_f32 -#define BUILTIN_ADD_RTP_F64 __llvm_add_rtp_f64 -#define BUILTIN_ADD_RTP_F16 __llvm_add_rtp_f16 -#define BUILTIN_ADD_RTZ_F32 __llvm_add_rtz_f32 -#define BUILTIN_ADD_RTZ_F64 __llvm_add_rtz_f64 -#define BUILTIN_ADD_RTZ_F16 __llvm_add_rtz_f16 - -#define BUILTIN_SUB_RTE_F32 __llvm_sub_rte_f32 -#define BUILTIN_SUB_RTE_F64 __llvm_sub_rte_f64 -#define BUILTIN_SUB_RTE_F16 __llvm_sub_rte_f16 -#define BUILTIN_SUB_RTN_F32 __llvm_sub_rtn_f32 -#define BUILTIN_SUB_RTN_F64 __llvm_sub_rtn_f64 -#define BUILTIN_SUB_RTN_F16 __llvm_sub_rtn_f16 -#define BUILTIN_SUB_RTP_F32 __llvm_sub_rtp_f32 -#define BUILTIN_SUB_RTP_F64 __llvm_sub_rtp_f64 -#define BUILTIN_SUB_RTP_F16 __llvm_sub_rtp_f16 -#define BUILTIN_SUB_RTZ_F32 __llvm_sub_rtz_f32 -#define BUILTIN_SUB_RTZ_F64 __llvm_sub_rtz_f64 -#define BUILTIN_SUB_RTZ_F16 __llvm_sub_rtz_f16 - -#define BUILTIN_MUL_RTE_F32 __llvm_mul_rte_f32 -#define BUILTIN_MUL_RTE_F64 __llvm_mul_rte_f64 -#define BUILTIN_MUL_RTE_F16 __llvm_mul_rte_f16 -#define BUILTIN_MUL_RTN_F32 __llvm_mul_rtn_f32 -#define BUILTIN_MUL_RTN_F64 __llvm_mul_rtn_f64 -#define BUILTIN_MUL_RTN_F16 __llvm_mul_rtn_f16 -#define BUILTIN_MUL_RTP_F32 __llvm_mul_rtp_f32 -#define BUILTIN_MUL_RTP_F64 __llvm_mul_rtp_f64 -#define BUILTIN_MUL_RTP_F16 __llvm_mul_rtp_f16 -#define BUILTIN_MUL_RTZ_F32 __llvm_mul_rtz_f32 -#define BUILTIN_MUL_RTZ_F64 __llvm_mul_rtz_f64 -#define BUILTIN_MUL_RTZ_F16 __llvm_mul_rtz_f16 - -#define BUILTIN_DIV_RTE_F32 __llvm_div_rte_f32 -#define BUILTIN_DIV_RTE_F64 __llvm_div_rte_f64 -#define BUILTIN_DIV_RTE_F16 __llvm_div_rte_f16 -#define BUILTIN_DIV_RTN_F32 __llvm_div_rtn_f32 -#define BUILTIN_DIV_RTN_F64 __llvm_div_rtn_f64 -#define BUILTIN_DIV_RTN_F16 __llvm_div_rtn_f16 -#define BUILTIN_DIV_RTP_F32 __llvm_div_rtp_f32 -#define BUILTIN_DIV_RTP_F64 __llvm_div_rtp_f64 -#define BUILTIN_DIV_RTP_F16 __llvm_div_rtp_f16 -#define BUILTIN_DIV_RTZ_F32 __llvm_div_rtz_f32 -#define BUILTIN_DIV_RTZ_F64 __llvm_div_rtz_f64 -#define BUILTIN_DIV_RTZ_F16 __llvm_div_rtz_f16 - -#define BUILTIN_SQRT_RTE_F32 __llvm_sqrt_rte_f32 -#define BUILTIN_SQRT_RTE_F64 __llvm_sqrt_rte_f64 -#define BUILTIN_SQRT_RTE_F16 __llvm_sqrt_rte_f16 -#define BUILTIN_SQRT_RTN_F32 __llvm_sqrt_rtn_f32 -#define BUILTIN_SQRT_RTN_F64 __llvm_sqrt_rtn_f64 -#define BUILTIN_SQRT_RTN_F16 __llvm_sqrt_rtn_f16 -#define BUILTIN_SQRT_RTP_F32 __llvm_sqrt_rtp_f32 -#define BUILTIN_SQRT_RTP_F64 __llvm_sqrt_rtp_f64 -#define BUILTIN_SQRT_RTP_F16 __llvm_sqrt_rtp_f16 -#define BUILTIN_SQRT_RTZ_F32 __llvm_sqrt_rtz_f32 -#define BUILTIN_SQRT_RTZ_F64 __llvm_sqrt_rtz_f64 -#define BUILTIN_SQRT_RTZ_F16 __llvm_sqrt_rtz_f16 - -#define BUILTIN_FMA_RTE_F32 __llvm_fma_rte_f32 -#define BUILTIN_FMA_RTE_F64 __llvm_fma_rte_f64 -#define BUILTIN_FMA_RTE_F16 __llvm_fma_rte_f16 -#define BUILTIN_FMA_RTN_F32 __llvm_fma_rtn_f32 -#define BUILTIN_FMA_RTN_F64 __llvm_fma_rtn_f64 -#define BUILTIN_FMA_RTN_F16 __llvm_fma_rtn_f16 -#define BUILTIN_FMA_RTP_F32 __llvm_fma_rtp_f32 -#define BUILTIN_FMA_RTP_F64 __llvm_fma_rtp_f64 -#define BUILTIN_FMA_RTP_F16 __llvm_fma_rtp_f16 -#define BUILTIN_FMA_RTZ_F32 __llvm_fma_rtz_f32 -#define BUILTIN_FMA_RTZ_F64 __llvm_fma_rtz_f64 -#define BUILTIN_FMA_RTZ_F16 __llvm_fma_rtz_f16 - +#define ROUND_RTE 0 +#define ROUND_RTP 1 +#define ROUND_RTN 2 +#define ROUND_RTZ 3 + +#define BUILTIN_GETROUND_F32() __builtin_amdgcn_s_getreg((1 << 0) | (0 << 6) | ((2-1) << 11)) +#define BUILTIN_SETROUND_F32(X) __builtin_amdgcn_s_setreg((1 << 0) | (0 << 6) | ((2-1) << 11), X) +#define BUILTIN_GETROUND_F16F64() __builtin_amdgcn_s_getreg((1 << 0) | (2 << 6) | ((2-1) << 11)) +#define BUILTIN_SETROUND_F16F64(X) __builtin_amdgcn_s_setreg((1 << 0) | (2 << 6) | ((2-1) << 11), X) diff --git a/ocml/src/fmaD.cl b/ocml/src/fmaD.cl index b0797613..cf841761 100644 --- a/ocml/src/fmaD.cl +++ b/ocml/src/fmaD.cl @@ -13,15 +13,25 @@ MATH_MANGLE(fma)(double a, double b, double c) return BUILTIN_FMA_F64(a, b, c); } -#define GEN(LN,UN) \ +CONSTATTR double +MATH_MANGLE(fma_rte)(double a, double b, double c) +{ + return BUILTIN_FMA_F64(a, b, c); +} + +#pragma STDC FENV_ACCESS ON + +#define GEN(LN,RM) \ CONSTATTR double \ MATH_MANGLE(LN)(double a, double b, double c) \ { \ - return BUILTIN_##UN##_F64(a, b, c); \ + BUILTIN_SETROUND_F16F64(RM); \ + double ret = BUILTIN_FMA_F64(a, b, c); \ + BUILTIN_SETROUND_F16F64(ROUND_RTE); \ + return ret; \ } -// GEN(fma_rte,FMA_RTE) -// GEN(fma_rtn,FMA_RTN) -// GEN(fma_rtp,FMA_RTP) -// GEN(fma_rtz,FMA_RTZ) +GEN(fma_rtn, ROUND_RTN) +GEN(fma_rtp, ROUND_RTP) +GEN(fma_rtz, ROUND_RTZ) diff --git a/ocml/src/fmaF.cl b/ocml/src/fmaF.cl index 2d262304..3192447c 100644 --- a/ocml/src/fmaF.cl +++ b/ocml/src/fmaF.cl @@ -19,15 +19,25 @@ MATH_MANGLE(fma)(float a, float b, float c) return BUILTIN_FMA_F32(a, b, c); } -#define GEN(LN,UN) \ +CONSTATTR float +MATH_MANGLE(fma_rte)(float a, float b, float c) +{ + return BUILTIN_FMA_F32(a, b, c); +} + +#pragma STDC FENV_ACCESS ON + +#define GEN(LN,RM) \ CONSTATTR float \ MATH_MANGLE(LN)(float a, float b, float c) \ { \ - return BUILTIN_##UN##_F32(a, b, c); \ + BUILTIN_SETROUND_F32(RM); \ + float ret = BUILTIN_FMA_F32(a, b, c); \ + BUILTIN_SETROUND_F32(ROUND_RTE); \ + return ret; \ } -// GEN(fma_rte,FMA_RTE) -// GEN(fma_rtn,FMA_RTN) -// GEN(fma_rtp,FMA_RTP) -// GEN(fma_rtz,FMA_RTZ) +GEN(fma_rtn, ROUND_RTN) +GEN(fma_rtp, ROUND_RTP) +GEN(fma_rtz, ROUND_RTZ) diff --git a/ocml/src/fmaH.cl b/ocml/src/fmaH.cl index 792e683e..012a2665 100644 --- a/ocml/src/fmaH.cl +++ b/ocml/src/fmaH.cl @@ -19,15 +19,25 @@ MATH_MANGLE(fma)(half a, half b, half c) return BUILTIN_FMA_F16(a, b, c); } -#define GEN(LN,UN) \ +CONSTATTR half +MATH_MANGLE(fma_rte)(half a, half b, half c) +{ + return BUILTIN_FMA_F16(a, b, c); +} + +#pragma STDC FENV_ACCESS ON + +#define GEN(LN,RM) \ CONSTATTR half \ MATH_MANGLE(LN)(half a, half b, half c) \ { \ - return BUILTIN_##UN##_F16(a, b, c); \ + BUILTIN_SETROUND_F16F64(RM); \ + half ret = BUILTIN_FMA_F64(a, b, c); \ + BUILTIN_SETROUND_F16F64(ROUND_RTE); \ + return ret; \ } -// GEN(fma_rte,FMA_RTE) -// GEN(fma_rtn,FMA_RTN) -// GEN(fma_rtp,FMA_RTP) -// GEN(fma_rtz,FMA_RTZ) +GEN(fma_rtn, ROUND_RTN) +GEN(fma_rtp, ROUND_RTP) +GEN(fma_rtz, ROUND_RTZ) diff --git a/ocml/src/mulD.cl b/ocml/src/mulD.cl index 3ef05f05..13080148 100644 --- a/ocml/src/mulD.cl +++ b/ocml/src/mulD.cl @@ -7,15 +7,25 @@ #include "mathD.h" -#define GEN(LN,UN) \ +CONSTATTR double +MATH_MANGLE(mul_rte)(double x, double y) +{ + return x * y; +} + +#pragma STDC FENV_ACCESS ON + +#define GEN(LN,RM) \ CONSTATTR double \ MATH_MANGLE(LN)(double x, double y) \ { \ - return BUILTIN_##UN##_F64(x, y); \ + BUILTIN_SETROUND_F16F64(RM); \ + double ret = x * y; \ + BUILTIN_SETROUND_F16F64(ROUND_RTE); \ + return ret; \ } -// GEN(mul_rte,MUL_RTE) -// GEN(mul_rtn,MUL_RTN) -// GEN(mul_rtp,MUL_RTP) -// GEN(mul_rtz,MUL_RTZ) +GEN(mul_rtn, ROUND_RTN) +GEN(mul_rtp, ROUND_RTP) +GEN(mul_rtz, ROUND_RTZ) diff --git a/ocml/src/mulF.cl b/ocml/src/mulF.cl index fa4cc032..6f724ff9 100644 --- a/ocml/src/mulF.cl +++ b/ocml/src/mulF.cl @@ -7,15 +7,25 @@ #include "mathF.h" -#define GEN(LN,UN) \ +CONSTATTR float +MATH_MANGLE(mul_rte)(float x, float y) +{ + return x + y; +} + +#pragma STDC FENV_ACCESS ON + +#define GEN(LN,RM) \ CONSTATTR float \ MATH_MANGLE(LN)(float x, float y) \ { \ - return BUILTIN_##UN##_F32(x, y); \ + BUILTIN_SETROUND_F32(RM); \ + float ret = x * y; \ + BUILTIN_SETROUND_F32(ROUND_RTE); \ + return ret; \ } -// GEN(mul_rte,MUL_RTE) -// GEN(mul_rtn,MUL_RTN) -// GEN(mul_rtp,MUL_RTP) -// GEN(mul_rtz,MUL_RTZ) +GEN(mul_rtn, ROUND_RTN) +GEN(mul_rtp, ROUND_RTP) +GEN(mul_rtz, ROUND_RTZ) diff --git a/ocml/src/mulH.cl b/ocml/src/mulH.cl index 422cfb07..c753e1dc 100644 --- a/ocml/src/mulH.cl +++ b/ocml/src/mulH.cl @@ -7,15 +7,25 @@ #include "mathH.h" -#define GEN(LN,UN) \ +CONSTATTR half +MATH_MANGLE(mul_rte)(half x, half y) +{ + return x * y; +} + +#pragma STDC FENV_ACCESS ON + +#define GEN(LN,RM) \ CONSTATTR half \ MATH_MANGLE(LN)(half x, half y) \ { \ - return BUILTIN_##UN##_F16(x, y); \ + BUILTIN_SETROUND_F16F64(RM); \ + half ret = x * y; \ + BUILTIN_SETROUND_F16F64(ROUND_RTE); \ + return ret; \ } -// GEN(mul_rte,MUL_RTE) -// GEN(mul_rtn,MUL_RTN) -// GEN(mul_rtp,MUL_RTP) -// GEN(mul_rtz,MUL_RTZ) +GEN(mul_rtn, ROUND_RTN) +GEN(mul_rtp, ROUND_RTP) +GEN(mul_rtz, ROUND_RTZ) diff --git a/ocml/src/subD.cl b/ocml/src/subD.cl index 947649a2..9efd9e44 100644 --- a/ocml/src/subD.cl +++ b/ocml/src/subD.cl @@ -7,15 +7,25 @@ #include "mathD.h" -#define GEN(LN,UN) \ +CONSTATTR double +MATH_MANGLE(sub_rte)(double x, double y) +{ + return x - y; +} + +#pragma STDC FENV_ACCESS ON + +#define GEN(LN,RM) \ CONSTATTR double \ MATH_MANGLE(LN)(double x, double y) \ { \ - return BUILTIN_##UN##_F64(x, y); \ + BUILTIN_SETROUND_F16F64(RM); \ + double ret = x - y; \ + BUILTIN_SETROUND_F16F64(ROUND_RTE); \ + return ret; \ } -// GEN(sub_rte,SUB_RTE) -// GEN(sub_rtn,SUB_RTN) -// GEN(sub_rtp,SUB_RTP) -// GEN(sub_rtz,SUB_RTZ) +GEN(sub_rtn, ROUND_RTN) +GEN(sub_rtp, ROUND_RTP) +GEN(sub_rtz, ROUND_RTZ) diff --git a/ocml/src/subF.cl b/ocml/src/subF.cl index be847055..148b8c39 100644 --- a/ocml/src/subF.cl +++ b/ocml/src/subF.cl @@ -7,15 +7,25 @@ #include "mathF.h" -#define GEN(LN,UN) \ +CONSTATTR float +MATH_MANGLE(sub_rte)(float x, float y) +{ + return x - y; +} + +#pragma STDC FENV_ACCESS ON + +#define GEN(LN,RM) \ CONSTATTR float \ MATH_MANGLE(LN)(float x, float y) \ { \ - return BUILTIN_##UN##_F32(x, y); \ + BUILTIN_SETROUND_F32(RM); \ + float ret = x - y; \ + BUILTIN_SETROUND_F32(ROUND_RTE); \ + return ret; \ } -// GEN(sub_rte,SUB_RTE) -// GEN(sub_rtn,SUB_RTN) -// GEN(sub_rtp,SUB_RTP) -// GEN(sub_rtz,SUB_RTZ) +GEN(sub_rtn, ROUND_RTN) +GEN(sub_rtp, ROUND_RTP) +GEN(sub_rtz, ROUND_RTZ) diff --git a/ocml/src/subH.cl b/ocml/src/subH.cl index 327e65c1..35963af6 100644 --- a/ocml/src/subH.cl +++ b/ocml/src/subH.cl @@ -7,15 +7,25 @@ #include "mathH.h" -#define GEN(LN,UN) \ +CONSTATTR half +MATH_MANGLE(sub_rte)(half x, half y) +{ + return x - y; +} + +#pragma STDC FENV_ACCESS ON + +#define GEN(LN,RM) \ CONSTATTR half \ MATH_MANGLE(LN)(half x, half y) \ { \ - return BUILTIN_##UN##_F16(x, y); \ + BUILTIN_SETROUND_F16F64(RM); \ + half ret = x - y; \ + BUILTIN_SETROUND_F16F64(ROUND_RTE); \ + return ret; \ } -// GEN(sub_rte,SUB_RTE) -// GEN(sub_rtn,SUB_RTN) -// GEN(sub_rtp,SUB_RTP) -// GEN(sub_rtz,SUB_RTZ) +GEN(sub_rtn, ROUND_RTN) +GEN(sub_rtp, ROUND_RTP) +GEN(sub_rtz, ROUND_RTZ) From ca0451ca2dbb5aabbc7e18ba249adb2819762d7b Mon Sep 17 00:00:00 2001 From: Matt Arsenault Date: Fri, 2 Dec 2022 10:29:20 -0500 Subject: [PATCH 02/22] Remove unnecessary REQUIRES_16BIT_INSTS No special intrinsics are used in this function. This is a first sample to use in testing new end to end compile tests. Change-Id: I8b480fe7fc3c36e8cc98a00a9b92b019dd0e1138 --- ocml/src/asinH.cl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ocml/src/asinH.cl b/ocml/src/asinH.cl index def285f4..fbb21961 100644 --- a/ocml/src/asinH.cl +++ b/ocml/src/asinH.cl @@ -9,7 +9,7 @@ CONSTATTR UGEN(asin) -REQUIRES_16BIT_INSTS CONSTATTR half +CONSTATTR half MATH_MANGLE(asin)(half x) { // Computes arcsin(x). From 888c0f2ebce60bdc933952b9697fa1c3b044f1ab Mon Sep 17 00:00:00 2001 From: Matt Arsenault Date: Mon, 28 Nov 2022 18:34:37 -0500 Subject: [PATCH 03/22] Expand testing of compiler builds Support testing the ISA result for functions. Unfortunately I don't think anything is regularly running these. Change-Id: If445564ce3364a07276e8202fd63cd2a5639add2 --- CMakeLists.txt | 2 +- test/compile/CMakeLists.txt | 59 ++++++++++ test/compile/RunCompileTest.cmake | 37 +++++++ .../RunConstantFoldTest.cmake | 1 + test/compile/asin.cl | 21 ++++ test/compile/lgamma_r.cl | 103 ++++++++++++++++++ test/constant_folding/CMakeLists.txt | 37 ------- test/constant_folding/lgamma_r.cl | 102 ----------------- 8 files changed, 222 insertions(+), 140 deletions(-) create mode 100644 test/compile/CMakeLists.txt create mode 100644 test/compile/RunCompileTest.cmake rename test/{constant_folding => compile}/RunConstantFoldTest.cmake (97%) create mode 100644 test/compile/asin.cl create mode 100644 test/compile/lgamma_r.cl delete mode 100644 test/constant_folding/CMakeLists.txt delete mode 100644 test/constant_folding/lgamma_r.cl diff --git a/CMakeLists.txt b/CMakeLists.txt index 030d6331..b65f1b56 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -70,7 +70,7 @@ add_subdirectory(hip) add_subdirectory(asanrtl) enable_testing() -add_subdirectory(test/constant_folding) +add_subdirectory(test/compile) include(Packages) diff --git a/test/compile/CMakeLists.txt b/test/compile/CMakeLists.txt new file mode 100644 index 00000000..2a863db6 --- /dev/null +++ b/test/compile/CMakeLists.txt @@ -0,0 +1,59 @@ +##===-------------------------------------------------------------------------- +## ROCm Device Libraries +## +## This file is distributed under the University of Illinois Open Source +## License. See LICENSE.TXT for details. +##===-------------------------------------------------------------------------- + +if(TARGET FileCheck) + set(FILECHECK_BIN $) +else() + # FIXME: Is there a better way to get the binary directory? + # FileCheck is also not normally installed, so it only really works + # well with build directories by default. + find_program(FILECHECK_BIN FileCheck + HINTS ${LLVM_DIR}/../../../bin) +endif() + +if(NOT FILECHECK_BIN) + message(STATUS "FileCheck not found, not adding constant fold tests") + return() +endif() + +message(STATUS "Running constant fold tests") + +function(add_compile_test test_name func_name script test_cpu extra_check_prefixes) + add_test(NAME ${test_name}__${test_cpu} + COMMAND ${CMAKE_COMMAND} + -DCLANG_BIN=$ + -DBINARY_DIR=${PROJECT_BINARY_DIR} + -DFILECHECK_BIN=${FILECHECK_BIN} + -DOUTPUT_FILE=output.${name}.${test_cpu}.s + -DINPUT_FILE=${CMAKE_CURRENT_SOURCE_DIR}/${func_name}.cl + -DTEST_CPU=${test_cpu} + -DEXTRA_CHECK_PREFIX=${extra_check_prefixes} + -P ${script}) +endfunction() + + +# Add constant folding tests +function(add_constant_fold_test name test_cpu) + add_compile_test(constant_fold_${name} ${name} + ${CMAKE_CURRENT_SOURCE_DIR}/RunConstantFoldTest.cmake ${test_cpu} CHECK) +endfunction() + +# Add full to ISA compile tests +function(add_isa_test name test_cpu) + string(TOUPPER ${test_cpu} check_prefix) + add_compile_test(compile_${name} ${name} + ${CMAKE_CURRENT_SOURCE_DIR}/RunCompileTest.cmake ${test_cpu} "${check_prefix},GCN") +endfunction() + + +foreach(gpu gfx900 gfx1030) + add_constant_fold_test(lgamma_r ${gpu}) +endforeach() + +foreach(gpu gfx700 gfx803) + add_isa_test(asin ${gpu}) +endforeach() diff --git a/test/compile/RunCompileTest.cmake b/test/compile/RunCompileTest.cmake new file mode 100644 index 00000000..bf871985 --- /dev/null +++ b/test/compile/RunCompileTest.cmake @@ -0,0 +1,37 @@ +##===-------------------------------------------------------------------------- +## ROCm Device Libraries +## +## This file is distributed under the University of Illinois Open Source +## License. See LICENSE.TXT for details. +##===-------------------------------------------------------------------------- + +# Test execution is wrapped here because add_test only allows running +# one command at a time. + +# FIXME: It would be better to use llvm-lit and parse RUN lines from +# individual tests. + +execute_process(COMMAND + ${CLANG_BIN} -O3 -S -cl-std=CL2.0 + -target amdgcn-amd-amdhsa -mcpu=${TEST_CPU} + -Xclang -finclude-default-header + --rocm-path=${BINARY_DIR} + -mllvm -amdgpu-simplify-libcall=0 + -o ${OUTPUT_FILE} ${INPUT_FILE} + RESULT_VARIABLE CLANG_RESULT + ERROR_VARIABLE CLANG_ERR) +if(CLANG_RESULT) + message(FATAL_ERROR "Error compiling test: ${CLANG_ERR}") +endif() + +execute_process(COMMAND ${FILECHECK_BIN} -v --enable-var-scope + --allow-unused-prefixes + --dump-input=fail + --dump-input-filter=all + ${INPUT_FILE} --input-file ${OUTPUT_FILE} + --check-prefixes=CHECK,${EXTRA_CHECK_PREFIX} + RESULT_VARIABLE FILECHECK_RESULT + ERROR_VARIABLE FILECHECK_ERROR) +if(FILECHECK_RESULT) + message(FATAL_ERROR "Error in test output: ${FILECHECK_ERROR}") +endif() diff --git a/test/constant_folding/RunConstantFoldTest.cmake b/test/compile/RunConstantFoldTest.cmake similarity index 97% rename from test/constant_folding/RunConstantFoldTest.cmake rename to test/compile/RunConstantFoldTest.cmake index 9045bbdd..54246900 100644 --- a/test/constant_folding/RunConstantFoldTest.cmake +++ b/test/compile/RunConstantFoldTest.cmake @@ -26,6 +26,7 @@ endif() execute_process(COMMAND ${FILECHECK_BIN} -v --enable-var-scope ${INPUT_FILE} --input-file ${OUTPUT_FILE} + --check-prefix=CONSTANTFOLD RESULT_VARIABLE FILECHECK_RESULT ERROR_VARIABLE FILECHECK_ERROR) if(FILECHECK_RESULT) diff --git a/test/compile/asin.cl b/test/compile/asin.cl new file mode 100644 index 00000000..3bce2274 --- /dev/null +++ b/test/compile/asin.cl @@ -0,0 +1,21 @@ + +#pragma OPENCL EXTENSION cl_khr_fp16 : enable + +// GCN: {{^}}test_asin_f16: +// GFX700: v_cvt_f32_f16{{(_e32)?}} [[CVT:v[0-9]+]] +// GFX700: v_cmp_le_f32{{(_e64)?}} s{{\[[0-9]+:[0-9]+\]}}, |[[CVT]]|, 0.5 +// GFX700: v_mul_f32 +// GFX700: v_mad_f32 +// GFX700: v_sqrt_f32 +// GFX700: v_bfi_b32 +// GFX700: v_cvt_f16_f32 + + +// GFX803: v_cmp_le_f16{{(_e64)?}} s{{\[[0-9]+:[0-9]+\]}}, |{{v[0-9]+}}|, 0.5 +// GFX803: v_mad_f32 +// GFX803: v_sqrt_f32 +// GFX803: v_bfi_b32 +kernel void test_asin_f16(global half* restrict out, global half* restrict in) { + int id = get_local_id(0); + out[id] = asin(in[id]); +} diff --git a/test/compile/lgamma_r.cl b/test/compile/lgamma_r.cl new file mode 100644 index 00000000..1e198422 --- /dev/null +++ b/test/compile/lgamma_r.cl @@ -0,0 +1,103 @@ +// Verify lgamma_r function constant folds to correct values. +// Run with filecheck from test cmake + +__attribute__((always_inline)) +static float test_lgamma_r(float val, volatile global int* sign_out) { + int tmp; + float result = lgamma_r(val, &tmp); + *sign_out = tmp; + return result; +} + +// CHECK-LABEL: {{^}}constant_fold_lgamma_r_f32: +// CONSTANTFOLD-LABEL: @constant_fold_lgamma_r_f32( +kernel void constant_fold_lgamma_r_f32(volatile global float* out, + volatile global int* sign_out) { + // CONSTANTFOLD: store volatile i32 0, + // CONSTANTFOLD-NEXT: store volatile float 0x7FF0000000000000 + out[0] = test_lgamma_r(0.0f, sign_out); + + // CONSTANTFOLD-NEXT: store volatile i32 0, + // CONSTANTFOLD-NEXT: store volatile float 0x7FF0000000000000 + out[0] = test_lgamma_r(-0.0f, sign_out); + + // CONSTANTFOLD-NEXT: store volatile i32 0, + // CONSTANTFOLD-NEXT: store volatile float 0x7FF8000000000000, + out[0] = test_lgamma_r(__builtin_nanf(""), sign_out); + + // CONSTANTFOLD-NEXT: store volatile i32 0, + // CONSTANTFOLD-NEXT: store volatile float 0x7FF4000000000000, + out[0] = test_lgamma_r(__builtin_nansf(""), sign_out); + + // CONSTANTFOLD-NEXT: store volatile i32 1, + // CONSTANTFOLD-NEXT: store volatile float 0x7FF0000000000000, + out[0] = test_lgamma_r(__builtin_inff(), sign_out); + + // CONSTANTFOLD-NEXT: store volatile i32 0, + // CONSTANTFOLD-NEXT: store volatile float 0x7FF0000000000000, + out[0] = test_lgamma_r(-__builtin_inff(), sign_out); + + // CONSTANTFOLD-NEXT: store volatile i32 1, + // CONSTANTFOLD-NEXT: store volatile float 0x419DE28020000000, + out[0] = test_lgamma_r(0x1.0p+23f, sign_out); + + // CONSTANTFOLD-NEXT: store volatile i32 0, + // CONSTANTFOLD-NEXT: store volatile float 0x7FF0000000000000, + out[0] = test_lgamma_r(-0x1.0p+23f, sign_out); + + // CONSTANTFOLD-NEXT: store volatile i32 1, + // CONSTANTFOLD-NEXT: store volatile float 0.000000e+00, + out[0] = test_lgamma_r(1.0f, sign_out); + + // CONSTANTFOLD-NEXT: store volatile i32 1, + // CONSTANTFOLD-NEXT: store volatile float 0.000000e+00, + out[0] = test_lgamma_r(2.0f, sign_out); + + // CONSTANTFOLD-NEXT: store volatile i32 1, + // CONSTANTFOLD-NEXT: store volatile float 0x3FE62E4300000000, + out[0] = test_lgamma_r(3.0f, sign_out); + + // CONSTANTFOLD-NEXT: store volatile i32 1, + // CONSTANTFOLD-NEXT: store volatile float 0x3FE250D040000000, + out[0] = test_lgamma_r(0.5f, sign_out); + + // CONSTANTFOLD-NEXT: store volatile i32 1, + // CONSTANTFOLD-NEXT: store volatile float 0x405601E680000000, + out[0] = test_lgamma_r(0x1.0p-127f, sign_out); + + // CONSTANTFOLD-NEXT: store volatile i32 1, + // CONSTANTFOLD-NEXT: store volatile float 0x419DE28040000000, + out[0] = test_lgamma_r(nextafter(0x1.0p+23f, __builtin_inff()), sign_out); + + // CONSTANTFOLD-NEXT: store volatile i32 1, + // CONSTANTFOLD-NEXT: store volatile float 0x419DE28000000000, + out[0] = test_lgamma_r(nextafter(0x1.0p+23f, -__builtin_inff()), sign_out); + + // CONSTANTFOLD-NEXT: store volatile i32 1, + // CONSTANTFOLD-NEXT: store volatile float 0xC19DE28040000000, + out[0] = test_lgamma_r(nextafter(-0x1.0p+23f, __builtin_inff()), sign_out); + + // CONSTANTFOLD-NEXT: store volatile i32 0, + // CONSTANTFOLD-NEXT: store volatile float 0x7FF0000000000000, + out[0] = test_lgamma_r(nextafter(-0x1.0p+23f, -__builtin_inff()), sign_out); + + // CONSTANTFOLD-NEXT: store volatile i32 0, + // CONSTANTFOLD-NEXT: store volatile float 0x7FF0000000000000, + out[0] = test_lgamma_r(-1.0f, sign_out); + + // CONSTANTFOLD-NEXT: store volatile i32 0, + // CONSTANTFOLD-NEXT: store volatile float 0x7FF0000000000000, + out[0] = test_lgamma_r(-2.0f, sign_out); + + // CONSTANTFOLD-NEXT: store volatile i32 0, + // CONSTANTFOLD-NEXT: store volatile float 0x7FF0000000000000, + out[0] = test_lgamma_r(-3.0f, sign_out); + + // CONSTANTFOLD-NEXT: store volatile i32 1, + // CONSTANTFOLD-NEXT: store volatile float 0xBFF4F1B100000000, + out[0] = test_lgamma_r(-3.5f, sign_out); + + // CONSTANTFOLD-NEXT: store volatile i32 1, + // CONSTANTFOLD-NEXT: store volatile float 0xC19DE28040000000, + out[0] = test_lgamma_r(as_float(0xcaffffff), sign_out); +} diff --git a/test/constant_folding/CMakeLists.txt b/test/constant_folding/CMakeLists.txt deleted file mode 100644 index 317bc8d0..00000000 --- a/test/constant_folding/CMakeLists.txt +++ /dev/null @@ -1,37 +0,0 @@ -##===-------------------------------------------------------------------------- -## ROCm Device Libraries -## -## This file is distributed under the University of Illinois Open Source -## License. See LICENSE.TXT for details. -##===-------------------------------------------------------------------------- - -if(TARGET FileCheck) - set(FILECHECK_BIN $) -else() - # FIXME: Is there a better way to get the binary directory? - # FileCheck is also not normally installed, so it only really works - # well with build directories by default. - find_program(FILECHECK_BIN FileCheck - HINTS ${LLVM_DIR}/../../../bin) -endif() - -if(NOT FILECHECK_BIN) - message(STATUS "FileCheck not found, not adding constant fold tests") - return() -endif() - -message(STATUS "Running constant fold tests") - -function(add_constant_fold_test name) - add_test(NAME constant_fold_${name} - COMMAND ${CMAKE_COMMAND} - -DCLANG_BIN=$ - -DBINARY_DIR=${PROJECT_BINARY_DIR} - -DFILECHECK_BIN=${FILECHECK_BIN} - -DOUTPUT_FILE=output.${name}.ll - -DINPUT_FILE=${CMAKE_CURRENT_SOURCE_DIR}/${name}.cl - -DTEST_CPU=gfx900 - -P ${CMAKE_CURRENT_SOURCE_DIR}/RunConstantFoldTest.cmake) -endfunction() - -add_constant_fold_test(lgamma_r) diff --git a/test/constant_folding/lgamma_r.cl b/test/constant_folding/lgamma_r.cl deleted file mode 100644 index b8489b5a..00000000 --- a/test/constant_folding/lgamma_r.cl +++ /dev/null @@ -1,102 +0,0 @@ -// Verify lgamma_r function constant folds to correct values. -// Run with filecheck from test cmake - -__attribute__((always_inline)) -static float test_lgamma_r(float val, volatile global int* sign_out) { - int tmp; - float result = lgamma_r(val, &tmp); - *sign_out = tmp; - return result; -} - -// CHECK-LABEL: @constant_fold_lgamma_r_f32( -kernel void constant_fold_lgamma_r_f32(volatile global float* out, - volatile global int* sign_out) { - // CHECK: store volatile i32 0, - // CHECK-NEXT: store volatile float 0x7FF0000000000000 - out[0] = test_lgamma_r(0.0f, sign_out); - - // CHECK-NEXT: store volatile i32 0, - // CHECK-NEXT: store volatile float 0x7FF0000000000000 - out[0] = test_lgamma_r(-0.0f, sign_out); - - // CHECK-NEXT: store volatile i32 0, - // CHECK-NEXT: store volatile float 0x7FF8000000000000, - out[0] = test_lgamma_r(__builtin_nanf(""), sign_out); - - // CHECK-NEXT: store volatile i32 0, - // CHECK-NEXT: store volatile float 0x7FF4000000000000, - out[0] = test_lgamma_r(__builtin_nansf(""), sign_out); - - // CHECK-NEXT: store volatile i32 1, - // CHECK-NEXT: store volatile float 0x7FF0000000000000, - out[0] = test_lgamma_r(__builtin_inff(), sign_out); - - // CHECK-NEXT: store volatile i32 0, - // CHECK-NEXT: store volatile float 0x7FF0000000000000, - out[0] = test_lgamma_r(-__builtin_inff(), sign_out); - - // CHECK-NEXT: store volatile i32 1, - // CHECK-NEXT: store volatile float 0x419DE28020000000, - out[0] = test_lgamma_r(0x1.0p+23f, sign_out); - - // CHECK-NEXT: store volatile i32 0, - // CHECK-NEXT: store volatile float 0x7FF0000000000000, - out[0] = test_lgamma_r(-0x1.0p+23f, sign_out); - - // CHECK-NEXT: store volatile i32 1, - // CHECK-NEXT: store volatile float 0.000000e+00, - out[0] = test_lgamma_r(1.0f, sign_out); - - // CHECK-NEXT: store volatile i32 1, - // CHECK-NEXT: store volatile float 0.000000e+00, - out[0] = test_lgamma_r(2.0f, sign_out); - - // CHECK-NEXT: store volatile i32 1, - // CHECK-NEXT: store volatile float 0x3FE62E4300000000, - out[0] = test_lgamma_r(3.0f, sign_out); - - // CHECK-NEXT: store volatile i32 1, - // CHECK-NEXT: store volatile float 0x3FE250D040000000, - out[0] = test_lgamma_r(0.5f, sign_out); - - // CHECK-NEXT: store volatile i32 1, - // CHECK-NEXT: store volatile float 0x405601E680000000, - out[0] = test_lgamma_r(0x1.0p-127f, sign_out); - - // CHECK-NEXT: store volatile i32 1, - // CHECK-NEXT: store volatile float 0x419DE28040000000, - out[0] = test_lgamma_r(nextafter(0x1.0p+23f, __builtin_inff()), sign_out); - - // CHECK-NEXT: store volatile i32 1, - // CHECK-NEXT: store volatile float 0x419DE28000000000, - out[0] = test_lgamma_r(nextafter(0x1.0p+23f, -__builtin_inff()), sign_out); - - // CHECK-NEXT: store volatile i32 1, - // CHECK-NEXT: store volatile float 0xC19DE28040000000, - out[0] = test_lgamma_r(nextafter(-0x1.0p+23f, __builtin_inff()), sign_out); - - // CHECK-NEXT: store volatile i32 0, - // CHECK-NEXT: store volatile float 0x7FF0000000000000, - out[0] = test_lgamma_r(nextafter(-0x1.0p+23f, -__builtin_inff()), sign_out); - - // CHECK-NEXT: store volatile i32 0, - // CHECK-NEXT: store volatile float 0x7FF0000000000000, - out[0] = test_lgamma_r(-1.0f, sign_out); - - // CHECK-NEXT: store volatile i32 0, - // CHECK-NEXT: store volatile float 0x7FF0000000000000, - out[0] = test_lgamma_r(-2.0f, sign_out); - - // CHECK-NEXT: store volatile i32 0, - // CHECK-NEXT: store volatile float 0x7FF0000000000000, - out[0] = test_lgamma_r(-3.0f, sign_out); - - // CHECK-NEXT: store volatile i32 1, - // CHECK-NEXT: store volatile float 0xBFF4F1B100000000, - out[0] = test_lgamma_r(-3.5f, sign_out); - - // CHECK-NEXT: store volatile i32 1, - // CHECK-NEXT: store volatile float 0xC19DE28040000000, - out[0] = test_lgamma_r(as_float(0xcaffffff), sign_out); -} From 18c44b021d069830ab9c64f59b694145aadf2080 Mon Sep 17 00:00:00 2001 From: Matt Arsenault Date: Tue, 22 Nov 2022 00:39:24 -0500 Subject: [PATCH 04/22] Only apply inf/nan check in frexp to gfx6. The instructions have correct inf/nan behavior as-is on everything since gfx7. Users should probably just directly call these builtins. Change-Id: I7a3a8217f7e10e2d1c1b87b55dbf2290b02a13ef --- ocml/src/frexpD.cl | 12 ++++++-- ocml/src/frexpF.cl | 12 ++++++-- ocml/src/frexpH.cl | 12 ++++++-- ocml/src/opts.h | 6 ++++ test/compile/CMakeLists.txt | 4 +++ test/compile/frexp.cl | 57 +++++++++++++++++++++++++++++++++++++ 6 files changed, 94 insertions(+), 9 deletions(-) create mode 100644 test/compile/frexp.cl diff --git a/ocml/src/frexpD.cl b/ocml/src/frexpD.cl index 4f9d252d..ecf80a0b 100644 --- a/ocml/src/frexpD.cl +++ b/ocml/src/frexpD.cl @@ -12,8 +12,14 @@ MATH_MANGLE(frexp)(double x, __private int *ep) { int e = BUILTIN_FREXP_EXP_F64(x); double r = BUILTIN_FREXP_MANT_F64(x); - bool c = BUILTIN_CLASS_F64(x, CLASS_PINF|CLASS_NINF|CLASS_SNAN|CLASS_QNAN); - *ep = c ? 0 : e; - return c ? x : r; + + if (HAVE_BUGGY_FREXP_INSTRUCTIONS()) { + bool isfinite = BUILTIN_ISFINITE_F64(x); + *ep = isfinite ? e : 0; + return isfinite ? r : x; + } + + *ep = e; + return r; } diff --git a/ocml/src/frexpF.cl b/ocml/src/frexpF.cl index c5b0b84b..6a3c18d8 100644 --- a/ocml/src/frexpF.cl +++ b/ocml/src/frexpF.cl @@ -12,8 +12,14 @@ MATH_MANGLE(frexp)(float x, __private int *ep) { int e = BUILTIN_FREXP_EXP_F32(x); float r = BUILTIN_FREXP_MANT_F32(x); - bool c = BUILTIN_CLASS_F32(x, CLASS_PINF|CLASS_NINF|CLASS_SNAN|CLASS_QNAN); - *ep = c ? 0 : e; - return c ? x : r; + + if (HAVE_BUGGY_FREXP_INSTRUCTIONS()) { + bool isfinite = BUILTIN_ISFINITE_F32(x); + *ep = isfinite ? e : 0; + return isfinite ? r : x; + } + + *ep = e; + return r; } diff --git a/ocml/src/frexpH.cl b/ocml/src/frexpH.cl index 0468f509..66b267c3 100644 --- a/ocml/src/frexpH.cl +++ b/ocml/src/frexpH.cl @@ -23,8 +23,14 @@ MATH_MANGLE(frexp)(half x, __private int *ep) { int e = (int)BUILTIN_FREXP_EXP_F16(x); half r = BUILTIN_FREXP_MANT_F16(x); - bool c = BUILTIN_CLASS_F16(x, CLASS_PINF|CLASS_NINF|CLASS_SNAN|CLASS_QNAN); - *ep = c ? 0 : e; - return c ? x : r; + + if (HAVE_BUGGY_FREXP_INSTRUCTIONS()) { + bool isfinite = BUILTIN_ISFINITE_F16(x); + *ep = isfinite ? e : 0; + return isfinite ? r : x; + } + + *ep = e; + return r; } diff --git a/ocml/src/opts.h b/ocml/src/opts.h index 9b82930e..f2694895 100644 --- a/ocml/src/opts.h +++ b/ocml/src/opts.h @@ -13,3 +13,9 @@ #define DAZ_OPT() __oclc_daz_opt #define CORRECTLY_ROUNDED_SQRT32() __oclc_correctly_rounded_sqrt32 +// GFX6 had unhelpful handling of infinities in v_frexp_* +// instructions. +// +// TODO: Really there should be a generic frexp intrinsic and the +// backend could handle the hardware workarounds. +#define HAVE_BUGGY_FREXP_INSTRUCTIONS() (__oclc_ISA_version < 7000) diff --git a/test/compile/CMakeLists.txt b/test/compile/CMakeLists.txt index 2a863db6..e7d5109d 100644 --- a/test/compile/CMakeLists.txt +++ b/test/compile/CMakeLists.txt @@ -57,3 +57,7 @@ endforeach() foreach(gpu gfx700 gfx803) add_isa_test(asin ${gpu}) endforeach() + +foreach(gpu gfx600 gfx700) + add_isa_test(frexp ${gpu}) +endforeach() diff --git a/test/compile/frexp.cl b/test/compile/frexp.cl new file mode 100644 index 00000000..780c5415 --- /dev/null +++ b/test/compile/frexp.cl @@ -0,0 +1,57 @@ + +#pragma OPENCL EXTENSION cl_khr_fp16 : enable + +// Test that a hardware bug is worked around for gfx6, not applied +// later. + +// GCN-LABEL: {{^}}test_frexp_f32: +// GFX600-DAG: s_movk_i32 [[CLASS_MASK:s[0-9]+]], 0x1f8 +// GFX600-DAG: v_frexp_mant_f32{{(_e32)?}} [[MANT:v[0-9]+]], [[SRC:v[0-9]+]] +// GFX600-DAG: v_frexp_exp_i32_f32{{(_e32)?}} [[EXP:v[0-9]+]], [[SRC:v[0-9]+]] + +// GFX600-DAG: v_cmp_class_f32{{(_e64)?}} [[CMP:(vcc|s{{\[[0-9]+:[0-9]+\]}})]], [[SRC]], [[CLASS_MASK]] + +// GFX600-DAG: v_cndmask_b32{{(_e32)?|(e64)?}} v{{[0-9]+}}, [[SRC]], [[MANT]], [[CMP]] +// GFX600-DAG: v_cndmask_b32{{(_e32)?|(e64)?}} v{{[0-9]+}}, 0, [[EXP]], [[CMP]] + + +// GFX700-NOT: v_cmp_class +// GFX700-DAG: v_frexp_mant_f32{{(_e32)?}} [[MANT:v[0-9]+]], [[SRC:v[0-9]+]] +// GFX700-DAG: v_frexp_exp_i32_f32{{(_e32)?}} [[EXP:v[0-9]+]], [[SRC:v[0-9]+]] +// GFX700-NOT: v_cmp_class +kernel void test_frexp_f32(global float* restrict out0, + global int* restrict out1, + global float* restrict in) { + int id = get_local_id(0); + + int exponent; + out0[id] = frexp(in[id], &exponent); + out1[id] = exponent; +} + +// GCN-LABEL: {{^}}test_frexp_f64: + +// GFX600-DAG: s_movk_i32 [[CLASS_MASK:s[0-9]+]], 0x1f8 +// GFX600-DAG: v_frexp_mant_f64{{(_e32)?}} v{{\[}}[[MANT_LO:[0-9]+]]:[[MANT_HI:[0-9]+]]{{\]}}, [[SRC:v\[[0-9]+:[0-9]+\]]] +// GFX600-DAG: v_frexp_exp_i32_f64{{(_e32)?}} [[EXP:v[0-9]+]], [[SRC:v\[[0-9]+:[0-9]+\]]] + +// GFX600-DAG: v_cmp_class_f64{{(_e64)?}} [[CMP:(vcc|s{{\[[0-9]+:[0-9]+\]}})]], [[SRC]], [[CLASS_MASK]] + +// GFX600-DAG: v_cndmask_b32{{(_e32)?|(e64)?}} v{{[0-9]+}}, v{{[0-9]+}}, v[[MANT_LO]], [[CMP]] +// GFX600-DAG: v_cndmask_b32{{(_e32)?|(e64)?}} v{{[0-9]+}}, v{{[0-9]+}}, v[[MANT_HI]], [[CMP]] +// GFX600-DAG: v_cndmask_b32{{(_e32)?|(e64)?}} v{{[0-9]+}}, 0, [[EXP]], [[CMP]] + + +// GFX700-NOT: v_cmp_class +// GFX700-DAG: v_frexp_mant_f64 +// GFX700-DAG: v_frexp_exp_i32_f64 +// GFX700-NOT: v_cmp_class +kernel void test_frexp_f64(global double* restrict out0, + global int* restrict out1, + global double* restrict in) { + int id = get_local_id(0); + + int exponent; + out0[id] = frexp(in[id], &exponent); + out1[id] = exponent; +} From 3f32a8a4c80b1eed688d1ca31dfd810fc107f5dc Mon Sep 17 00:00:00 2001 From: Matt Arsenault Date: Fri, 2 Dec 2022 08:40:45 -0500 Subject: [PATCH 05/22] Use BUILTIN_ISFINITE_* in place of class ninf|pinf|qnan|snan Change-Id: Iec520b96a6a4540eaca643d3ffecd7f4bc518de8 --- ocml/src/asinhH.cl | 2 +- ocml/src/cexpD.cl | 8 ++++---- ocml/src/cexpF.cl | 8 ++++---- ocml/src/csinhD.cl | 8 ++++---- ocml/src/csinhF.cl | 8 ++++---- ocml/src/ctanhD.cl | 2 +- ocml/src/ctanhF.cl | 2 +- ocml/src/logF_base.h | 4 ++-- ocml/src/sincospiD.cl | 6 +++--- ocml/src/sincospiH.cl | 6 +++--- ocml/src/sinpiD.cl | 2 +- ocml/src/sinpiH.cl | 2 +- ocml/src/tanpiD.cl | 2 +- ocml/src/tanpiF.cl | 2 +- ocml/src/tanpiH.cl | 2 +- 15 files changed, 32 insertions(+), 32 deletions(-) diff --git a/ocml/src/asinhH.cl b/ocml/src/asinhH.cl index a9590606..27976e32 100644 --- a/ocml/src/asinhH.cl +++ b/ocml/src/asinhH.cl @@ -18,7 +18,7 @@ MATH_MANGLE(asinh)(half hx) ret = BUILTIN_COPYSIGN_F16((half)(BUILTIN_LOG2_F32(t) * 0x1.62e430p-1f), hx); if (!FINITE_ONLY_OPT()) { - ret = BUILTIN_CLASS_F16(hx, CLASS_NINF|CLASS_PINF|CLASS_QNAN|CLASS_SNAN) ? hx : ret; + ret = BUILTIN_ISFINITE_F16(hx) ? ret : hx; } return ret; diff --git a/ocml/src/cexpD.cl b/ocml/src/cexpD.cl index 450f97f0..89ff0189 100644 --- a/ocml/src/cexpD.cl +++ b/ocml/src/cexpD.cl @@ -23,14 +23,14 @@ MATH_MANGLE(cexp)(double2 z) double ri = ex * sy; if (!FINITE_ONLY_OPT()) { - bool b = BUILTIN_CLASS_F64(y, CLASS_NINF|CLASS_PINF|CLASS_QNAN|CLASS_SNAN); + bool isfinite = BUILTIN_ISFINITE_F64(y); if (BUILTIN_CLASS_F64(x, CLASS_NINF)) { rr = 0.0; - ri = b ? 0.0 : ri; + ri = isfinite ? ri : 0.0; } if (BUILTIN_CLASS_F64(x, CLASS_PINF)) { - rr = b ? AS_DOUBLE(PINFBITPATT_DP64) : rr; - ri = b ? AS_DOUBLE(QNANBITPATT_DP64) : ri; + rr = isfinite ? rr : AS_DOUBLE(PINFBITPATT_DP64); + ri = isfinite ? ri : AS_DOUBLE(QNANBITPATT_DP64); ri = y == 0.0 ? y : ri; } ri = (BUILTIN_ISNAN_F64(x) & (y == 0.0)) ? y : ri; diff --git a/ocml/src/cexpF.cl b/ocml/src/cexpF.cl index 38c9d391..f254ee17 100644 --- a/ocml/src/cexpF.cl +++ b/ocml/src/cexpF.cl @@ -23,14 +23,14 @@ MATH_MANGLE(cexp)(float2 z) float ri = ex * sy; if (!FINITE_ONLY_OPT()) { - bool b = BUILTIN_CLASS_F32(y, CLASS_NINF|CLASS_PINF|CLASS_QNAN|CLASS_SNAN); + bool finite = BUILTIN_ISFINITE_F32(y); if (BUILTIN_CLASS_F32(x, CLASS_NINF)) { rr = 0.0f; - ri = b ? 0.0f : ri; + ri = finite ? ri : 0.0f; } if (BUILTIN_CLASS_F32(x, CLASS_PINF)) { - rr = b ? AS_FLOAT(PINFBITPATT_SP32) : rr; - ri = b ? AS_FLOAT(QNANBITPATT_SP32) : ri; + rr = finite ? rr : AS_FLOAT(PINFBITPATT_SP32); + ri = finite ? ri : AS_FLOAT(QNANBITPATT_SP32); ri = y == 0.0f ? y : ri; } ri = (BUILTIN_ISNAN_F32(x) & (y == 0.0f)) ? y : ri; diff --git a/ocml/src/csinhD.cl b/ocml/src/csinhD.cl index 7b0e4a12..ea0005c0 100644 --- a/ocml/src/csinhD.cl +++ b/ocml/src/csinhD.cl @@ -37,11 +37,11 @@ MATH_MANGLE(csinh)(double2 z) bool s = x >= 0x1.0p-27; double rr = BUILTIN_FLDEXP_F64(BUILTIN_COPYSIGN_F64(s ? sxhi : x, z.x) * cy, s); double ri = BUILTIN_FLDEXP_F64(cxhi * sy, 1); - + if (!FINITE_ONLY_OPT()) { - rr = (BUILTIN_CLASS_F64(x, CLASS_PZER|CLASS_NZER|CLASS_PINF|CLASS_NINF) & - BUILTIN_CLASS_F64(z.y, CLASS_PINF|CLASS_NINF|CLASS_QNAN|CLASS_SNAN)) ? z.x : rr; - ri = (BUILTIN_CLASS_F64(x, CLASS_PINF|CLASS_NINF|CLASS_QNAN|CLASS_SNAN) & (z.y == 0.0)) ? z.y : ri; + rr = (!BUILTIN_CLASS_F64(x, CLASS_PZER|CLASS_NZER|CLASS_PINF|CLASS_NINF) | + BUILTIN_ISFINITE_F64(z.y)) ? rr : z.x; + ri = (BUILTIN_ISFINITE_F64(x) | (z.y != 0.0)) ? ri : z.y; } return (double2)(rr, ri); diff --git a/ocml/src/csinhF.cl b/ocml/src/csinhF.cl index f0599fe5..d099794b 100644 --- a/ocml/src/csinhF.cl +++ b/ocml/src/csinhF.cl @@ -37,11 +37,11 @@ MATH_MANGLE(csinh)(float2 z) bool s = x >= 0x1.0p-12f; float rr = BUILTIN_FLDEXP_F32(BUILTIN_COPYSIGN_F32(s ? sxhi : x, z.x) * cy, s); float ri = BUILTIN_FLDEXP_F32(cxhi * sy, 1); - + if (!FINITE_ONLY_OPT()) { - rr = (BUILTIN_CLASS_F32(x, CLASS_PZER|CLASS_NZER|CLASS_PINF|CLASS_NINF) & - BUILTIN_CLASS_F32(z.y, CLASS_PINF|CLASS_NINF|CLASS_QNAN|CLASS_SNAN)) ? z.x : rr; - ri = (BUILTIN_CLASS_F32(x, CLASS_PINF|CLASS_NINF|CLASS_QNAN|CLASS_SNAN) & (z.y == 0.0f)) ? z.y : ri; + rr = (!BUILTIN_CLASS_F32(x, CLASS_PZER|CLASS_NZER|CLASS_PINF|CLASS_NINF) | + BUILTIN_ISFINITE_F32(z.y)) ? rr : z.x; + ri = (BUILTIN_ISFINITE_F32(x) | (z.y != 0.0f)) ? ri : z.y; } return (float2)(rr, ri); diff --git a/ocml/src/ctanhD.cl b/ocml/src/ctanhD.cl index 89669777..120c40d1 100644 --- a/ocml/src/ctanhD.cl +++ b/ocml/src/ctanhD.cl @@ -41,7 +41,7 @@ MATH_MANGLE(ctanh)(double2 z) if (!FINITE_ONLY_OPT()) { bool xn = BUILTIN_ISNAN_F64(x); - bool yin = BUILTIN_CLASS_F64(z.y, CLASS_NINF|CLASS_PINF|CLASS_QNAN|CLASS_SNAN); + bool yin = !BUILTIN_ISFINITE_F64(z.y); bool ni = BUILTIN_CLASS_F64(x, CLASS_PZER|CLASS_PSUB|CLASS_PNOR) & yin; rr = (ni | xn) ? AS_DOUBLE(QNANBITPATT_DP64) : rr; ri = ni ? AS_DOUBLE(QNANBITPATT_DP64) : ri; diff --git a/ocml/src/ctanhF.cl b/ocml/src/ctanhF.cl index 53fd8188..eb5c07ce 100644 --- a/ocml/src/ctanhF.cl +++ b/ocml/src/ctanhF.cl @@ -41,7 +41,7 @@ MATH_MANGLE(ctanh)(float2 z) if (!FINITE_ONLY_OPT()) { bool xn = BUILTIN_ISNAN_F32(x); - bool yin = BUILTIN_CLASS_F32(z.y, CLASS_NINF|CLASS_PINF|CLASS_QNAN|CLASS_SNAN); + bool yin = !BUILTIN_ISFINITE_F32(z.y); bool ni = BUILTIN_CLASS_F32(x, CLASS_PZER|CLASS_PSUB|CLASS_PNOR) & yin; rr = (ni | xn) ? AS_FLOAT(QNANBITPATT_SP32) : rr; ri = ni ? AS_FLOAT(QNANBITPATT_SP32) : ri; diff --git a/ocml/src/logF_base.h b/ocml/src/logF_base.h index 763623ab..f6756d04 100644 --- a/ocml/src/logF_base.h +++ b/ocml/src/logF_base.h @@ -55,7 +55,7 @@ MATH_MANGLE(log)(float x) r = MATH_MAD(yh, ch, MATH_MAD(yt, ch, MATH_MAD(yh, ct, yt*ct))); } - r = BUILTIN_CLASS_F32(y, CLASS_SNAN|CLASS_QNAN|CLASS_NINF|CLASS_PINF) != 0 ? y : r; + r = BUILTIN_ISFINITE_F32(y) ? r : y; return r; #endif } @@ -103,7 +103,7 @@ MATH_MANGLE(log)(float x) r = MATH_MAD(yh, ch, MATH_MAD(yt, ch, MATH_MAD(yh, ct, yt*ct))); } - r = BUILTIN_CLASS_F32(y, CLASS_SNAN|CLASS_QNAN|CLASS_NINF|CLASS_PINF) != 0 ? y : r; + r = BUILTIN_ISFINITE_F32(y) ? r : y; #if defined COMPILING_LOG10 r = r - (s ? 0x1.344136p+3f : 0.0f); diff --git a/ocml/src/sincospiD.cl b/ocml/src/sincospiD.cl index 4ede0cc7..4bb2db0a 100644 --- a/ocml/src/sincospiD.cl +++ b/ocml/src/sincospiD.cl @@ -24,9 +24,9 @@ MATH_MANGLE(sincospi)(double x, __private double * cp) c.hi ^= flip; if (!FINITE_ONLY_OPT()) { - bool nori = BUILTIN_CLASS_F64(x, CLASS_SNAN|CLASS_QNAN|CLASS_NINF|CLASS_PINF); - s = nori ? AS_INT2(QNANBITPATT_DP64) : s; - c = nori ? AS_INT2(QNANBITPATT_DP64) : c; + bool finite = BUILTIN_ISFINITE_F64(x); + s = finite ? s : AS_INT2(QNANBITPATT_DP64); + c = finite ? c : AS_INT2(QNANBITPATT_DP64); } *cp = AS_DOUBLE(c); diff --git a/ocml/src/sincospiH.cl b/ocml/src/sincospiH.cl index a1ba325d..4e4b6332 100644 --- a/ocml/src/sincospiH.cl +++ b/ocml/src/sincospiH.cl @@ -35,9 +35,9 @@ MATH_MANGLE(sincospi)(half x, __private half *cp) c ^= flip; if (!FINITE_ONLY_OPT()) { - bool nori = BUILTIN_CLASS_F16(x, CLASS_SNAN|CLASS_QNAN|CLASS_NINF|CLASS_PINF); - c = nori ? (short)QNANBITPATT_HP16 : c; - s = nori ? (short)QNANBITPATT_HP16 : s; + bool finite = BUILTIN_ISFINITE_F16(x); + c = finite ? c : (short)QNANBITPATT_HP16; + s = finite ? s : (short)QNANBITPATT_HP16; } *cp = AS_HALF(c); diff --git a/ocml/src/sinpiD.cl b/ocml/src/sinpiD.cl index ab208901..9fd4445c 100644 --- a/ocml/src/sinpiD.cl +++ b/ocml/src/sinpiD.cl @@ -18,7 +18,7 @@ MATH_MANGLE(sinpi)(double x) s.hi ^= (r.i > 1 ? 0x80000000 : 0) ^ (AS_INT2(x).hi & 0x80000000); if (!FINITE_ONLY_OPT()) { - s = BUILTIN_CLASS_F64(x, CLASS_SNAN|CLASS_QNAN|CLASS_NINF|CLASS_PINF) ? AS_INT2(QNANBITPATT_DP64) : s; + s = BUILTIN_ISFINITE_F64(x) ? s : AS_INT2(QNANBITPATT_DP64); } return AS_DOUBLE(s); diff --git a/ocml/src/sinpiH.cl b/ocml/src/sinpiH.cl index a5587913..51a95e26 100644 --- a/ocml/src/sinpiH.cl +++ b/ocml/src/sinpiH.cl @@ -20,7 +20,7 @@ MATH_MANGLE(sinpi)(half x) s ^= (r.i > (short)1 ? (short)0x8000 : (short)0) ^ (AS_SHORT(x) & (short)0x8000); if (!FINITE_ONLY_OPT()) { - s = BUILTIN_CLASS_F16(x, CLASS_SNAN|CLASS_QNAN|CLASS_NINF|CLASS_PINF) ? (short)QNANBITPATT_HP16 : s; + s = BUILTIN_ISFINITE_F16(x) ? s : (short)QNANBITPATT_HP16; } return AS_HALF(s); diff --git a/ocml/src/tanpiD.cl b/ocml/src/tanpiD.cl index 90c746ef..ab58aa90 100644 --- a/ocml/src/tanpiD.cl +++ b/ocml/src/tanpiD.cl @@ -17,7 +17,7 @@ MATH_MANGLE(tanpi)(double x) t.hi ^= AS_INT2(x).hi & (int)0x80000000; if (!FINITE_ONLY_OPT()) { - t = BUILTIN_CLASS_F64(x, CLASS_SNAN|CLASS_QNAN|CLASS_NINF|CLASS_PINF) ? AS_INT2(QNANBITPATT_DP64) : t; + t = BUILTIN_ISFINITE_F64(x) ? t : AS_INT2(QNANBITPATT_DP64); } return AS_DOUBLE(t); diff --git a/ocml/src/tanpiF.cl b/ocml/src/tanpiF.cl index a13b9143..49cb478e 100644 --- a/ocml/src/tanpiF.cl +++ b/ocml/src/tanpiF.cl @@ -17,7 +17,7 @@ MATH_MANGLE(tanpi)(float x) t ^= AS_INT(x) & (int)0x80000000; if (!FINITE_ONLY_OPT()) { - t = BUILTIN_CLASS_F32(x, CLASS_SNAN|CLASS_QNAN|CLASS_NINF|CLASS_PINF) ? QNANBITPATT_SP32 : t; + t = BUILTIN_ISFINITE_F32(x) ? t : QNANBITPATT_SP32; } return AS_FLOAT(t); diff --git a/ocml/src/tanpiH.cl b/ocml/src/tanpiH.cl index 67f16635..eceed041 100644 --- a/ocml/src/tanpiH.cl +++ b/ocml/src/tanpiH.cl @@ -19,7 +19,7 @@ MATH_MANGLE(tanpi)(half x) t ^= AS_SHORT(x) & (short)0x8000; if (!FINITE_ONLY_OPT()) { - t = BUILTIN_CLASS_F16(x, CLASS_SNAN|CLASS_QNAN|CLASS_NINF|CLASS_PINF) ? (short)QNANBITPATT_HP16 : t; + t = BUILTIN_ISFINITE_F16(x) ? t : (short)QNANBITPATT_HP16; } return AS_HALF(t); From fb9884b38c78fcdc0a70202dee22226705bc476b Mon Sep 17 00:00:00 2001 From: Matt Arsenault Date: Fri, 2 Dec 2022 12:24:03 -0500 Subject: [PATCH 06/22] Code simplification for csinh Not having code in the finite_only_opt case is easier to follow. Change-Id: Ie45f06366c712c3c5a4058b79fe7599e93ef51fb --- ocml/src/csinhD.cl | 13 ++++++------- ocml/src/csinhF.cl | 13 ++++++------- 2 files changed, 12 insertions(+), 14 deletions(-) diff --git a/ocml/src/csinhD.cl b/ocml/src/csinhD.cl index ea0005c0..83eb30ba 100644 --- a/ocml/src/csinhD.cl +++ b/ocml/src/csinhD.cl @@ -24,14 +24,13 @@ MATH_MANGLE(csinh)(double2 z) double cy; double sy = MATH_MANGLE(sincos)(z.y, &cy); - double cxhi, sxhi; - if (FINITE_ONLY_OPT()) { - cxhi = cx.hi; - sxhi = sx.hi; - } else { + double cxhi = cx.hi; + double sxhi = sx.hi; + + if (!FINITE_ONLY_OPT()) { bool b = x >= 0x1.6395a2079b70cp+9; - cxhi = b ? AS_DOUBLE(PINFBITPATT_DP64) : cx.hi; - sxhi = b ? AS_DOUBLE(PINFBITPATT_DP64) : sx.hi; + cxhi = b ? AS_DOUBLE(PINFBITPATT_DP64) : cxhi; + sxhi = b ? AS_DOUBLE(PINFBITPATT_DP64) : sxhi; } bool s = x >= 0x1.0p-27; diff --git a/ocml/src/csinhF.cl b/ocml/src/csinhF.cl index d099794b..06c92bd8 100644 --- a/ocml/src/csinhF.cl +++ b/ocml/src/csinhF.cl @@ -24,14 +24,13 @@ MATH_MANGLE(csinh)(float2 z) float cy; float sy = MATH_MANGLE(sincos)(z.y, &cy); - float cxhi, sxhi; - if (FINITE_ONLY_OPT()) { - cxhi = cx.hi; - sxhi = sx.hi; - } else { + float cxhi = cx.hi; + float sxhi = sx.hi; + + if (!FINITE_ONLY_OPT()) { bool b = x >= 0x1.686fc0p+6f; - cxhi = b ? AS_FLOAT(PINFBITPATT_SP32) : cx.hi; - sxhi = b ? AS_FLOAT(PINFBITPATT_SP32) : sx.hi; + cxhi = b ? AS_FLOAT(PINFBITPATT_SP32) : cxhi; + sxhi = b ? AS_FLOAT(PINFBITPATT_SP32) : sxhi; } bool s = x >= 0x1.0p-12f; From 4365e7b9d8d87da20fec1a9c48fecad9104d30e1 Mon Sep 17 00:00:00 2001 From: Matt Arsenault Date: Fri, 2 Dec 2022 20:42:57 -0500 Subject: [PATCH 07/22] Eliminate irif wrappers around llvm.ctlz __builtin_clz has a bizarre design where the behavior for 0 is target defined in clang, to set the second operand to whether a 0 result is defined. InstCombine handles folding checks for 0 around these, so just rely on that to get the defined 0 behavior. Produces identical IR after instcombine for all the types. The AMDGPU instruction really returns -1 for 0, so we end up splitting this out again later anyway. Change-Id: I7a7d867275f9d098429837dc1f3bf24811713eac --- irif/inc/irif.h | 10 +++++----- irif/src/cz.ll | 24 ------------------------ ockl/src/clz.cl | 8 ++++---- ocml/src/builtins.h | 3 --- 4 files changed, 9 insertions(+), 36 deletions(-) diff --git a/irif/inc/irif.h b/irif/inc/irif.h index 48b1fe31..ba3c71a1 100644 --- a/irif/inc/irif.h +++ b/irif/inc/irif.h @@ -28,12 +28,12 @@ extern __attribute__((const)) half2 __llvm_round_2f16(half2) __asm("llvm.round.v extern __attribute__((const)) half2 __llvm_rint_2f16(half2) __asm("llvm.rint.v2f16"); extern __attribute__((const)) half2 __llvm_canonicalize_2f16(half2) __asm("llvm.canonicalize.v2f16"); -// Intrinsics requiring wrapping -extern __attribute__((const)) uchar __llvm_ctlz_i8(uchar); -extern __attribute__((const)) ushort __llvm_ctlz_i16(ushort); -extern __attribute__((const)) uint __llvm_ctlz_i32(uint); -extern __attribute__((const)) ulong __llvm_ctlz_i64(ulong); +#define BUILTIN_CLZ_U8(x) (uchar)(x == 0u ? 8 : __builtin_clz(x) - 24) +#define BUILTIN_CLZ_U16(x) (ushort)(x == 0u ? 16 : __builtin_clzs(x)) +#define BUILTIN_CLZ_U32(x) (uint)(x == 0u ? 32 : __builtin_clz(x)) +#define BUILTIN_CLZ_U64(x) (ulong)(x == 0u ? 64 : __builtin_clzl(x)) +// Intrinsics requiring wrapping extern __attribute__((const)) uchar __llvm_cttz_i8(uchar); extern __attribute__((const)) ushort __llvm_cttz_i16(ushort); extern __attribute__((const)) uint __llvm_cttz_i32(uint); diff --git a/irif/src/cz.ll b/irif/src/cz.ll index d5bf3d71..a6d047d1 100644 --- a/irif/src/cz.ll +++ b/irif/src/cz.ll @@ -8,35 +8,11 @@ target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5" target triple = "amdgcn-amd-amdhsa" -declare i8 @llvm.ctlz.i8(i8, i1) #0 -declare i16 @llvm.ctlz.i16(i16, i1) #0 -declare i32 @llvm.ctlz.i32(i32, i1) #0 -declare i64 @llvm.ctlz.i64(i64, i1) #0 declare i8 @llvm.cttz.i8(i8, i1) #0 declare i16 @llvm.cttz.i16(i16, i1) #0 declare i32 @llvm.cttz.i32(i32, i1) #0 declare i64 @llvm.cttz.i64(i64, i1) #0 -define protected i8 @__llvm_ctlz_i8(i8) #1 { - %2 = call i8 @llvm.ctlz.i8(i8 %0, i1 false) - ret i8 %2 -} - -define protected i16 @__llvm_ctlz_i16(i16) #1 { - %2 = call i16 @llvm.ctlz.i16(i16 %0, i1 false) - ret i16 %2 -} - -define protected i32 @__llvm_ctlz_i32(i32) #1 { - %2 = call i32 @llvm.ctlz.i32(i32 %0, i1 false) - ret i32 %2 -} - -define protected i64 @__llvm_ctlz_i64(i64) #1 { - %2 = call i64 @llvm.ctlz.i64(i64 %0, i1 false) - ret i64 %2 -} - define protected i8 @__llvm_cttz_i8(i8) #1 { %2 = call i8 @llvm.cttz.i8(i8 %0, i1 false) ret i8 %2 diff --git a/ockl/src/clz.cl b/ockl/src/clz.cl index e93edbee..a3f5db17 100644 --- a/ockl/src/clz.cl +++ b/ockl/src/clz.cl @@ -11,24 +11,24 @@ __attribute__((always_inline, const)) uchar OCKL_MANGLE_T(clz,u8)(uchar i) { - return __llvm_ctlz_i8(i); + return BUILTIN_CLZ_U8(i); } __attribute__((always_inline, const)) ushort OCKL_MANGLE_T(clz,u16)(ushort i) { - return __llvm_ctlz_i16(i); + return BUILTIN_CLZ_U16(i); } __attribute__((always_inline, const)) uint OCKL_MANGLE_U32(clz)(uint i) { - return __llvm_ctlz_i32(i); + return BUILTIN_CLZ_U32(i); } __attribute__((always_inline, const)) ulong OCKL_MANGLE_U64(clz)(ulong i) { - return __llvm_ctlz_i64(i); + return BUILTIN_CLZ_U64(i); } diff --git a/ocml/src/builtins.h b/ocml/src/builtins.h index a952cf43..4c4de8d4 100644 --- a/ocml/src/builtins.h +++ b/ocml/src/builtins.h @@ -73,9 +73,6 @@ #define BUILTIN_COPYSIGN_F16 __builtin_copysignf16 #define BUILTIN_COPYSIGN_2F16 __llvm_copysign_2f16 -#define BUILTIN_CLZ_U32 __llvm_ctlz_i32 -#define BUILTIN_CLZ_U64 __llvm_ctlz_i64 - #define BUILTIN_FLOOR_F32 __builtin_floorf #define BUILTIN_FLOOR_F64 __builtin_floor #define BUILTIN_FLOOR_F16 __builtin_floorf16 From 70a0a51ca575581d454be033193fa93457b36c17 Mon Sep 17 00:00:00 2001 From: Matt Arsenault Date: Fri, 2 Dec 2022 20:55:58 -0500 Subject: [PATCH 08/22] Eliminate irif cttz wrappers Like the ctlz cases, produces identical IR after instcombine. Change-Id: Id20be5b7ec93b70bbbc7872f4421d424862f3bf4 --- irif/inc/irif.h | 9 ++++----- ockl/src/ctz.cl | 8 ++++---- opencl/src/pipes/pipes.h | 2 +- 3 files changed, 9 insertions(+), 10 deletions(-) diff --git a/irif/inc/irif.h b/irif/inc/irif.h index ba3c71a1..f09a6aff 100644 --- a/irif/inc/irif.h +++ b/irif/inc/irif.h @@ -33,11 +33,10 @@ extern __attribute__((const)) half2 __llvm_canonicalize_2f16(half2) __asm("llvm. #define BUILTIN_CLZ_U32(x) (uint)(x == 0u ? 32 : __builtin_clz(x)) #define BUILTIN_CLZ_U64(x) (ulong)(x == 0u ? 64 : __builtin_clzl(x)) -// Intrinsics requiring wrapping -extern __attribute__((const)) uchar __llvm_cttz_i8(uchar); -extern __attribute__((const)) ushort __llvm_cttz_i16(ushort); -extern __attribute__((const)) uint __llvm_cttz_i32(uint); -extern __attribute__((const)) ulong __llvm_cttz_i64(ulong); +#define BUILTIN_CTZ_U8(x) (uchar)(x == 0u ? (uchar)8 : __builtin_ctz((uint)x)) +#define BUILTIN_CTZ_U16(x) (ushort)(x == 0u ? 16 : __builtin_ctzs(x)) +#define BUILTIN_CTZ_U32(x) (uint)(x == 0u ? 32 : __builtin_ctz(x)) +#define BUILTIN_CTZ_U64(x) (ulong)(x == 0u ? 64 : __builtin_ctzl(x)) // Atomics extern uint __llvm_ld_atomic_a1_x_dev_i32(__global uint *); diff --git a/ockl/src/ctz.cl b/ockl/src/ctz.cl index a7ad76e9..22f05a8b 100644 --- a/ockl/src/ctz.cl +++ b/ockl/src/ctz.cl @@ -11,24 +11,24 @@ __attribute__((always_inline, const)) uchar OCKL_MANGLE_T(ctz,u8)(uchar i) { - return __llvm_cttz_i8(i); + return BUILTIN_CTZ_U8(i); } __attribute__((always_inline, const)) ushort OCKL_MANGLE_T(ctz,u16)(ushort i) { - return __llvm_cttz_i16(i); + return BUILTIN_CTZ_U16(i); } __attribute__((always_inline, const)) uint OCKL_MANGLE_U32(ctz)(uint i) { - return __llvm_cttz_i32(i); + return BUILTIN_CTZ_U32(i); } __attribute__((always_inline, const)) ulong OCKL_MANGLE_U64(ctz)(ulong i) { - return __llvm_cttz_i64(i); + return BUILTIN_CTZ_U64(i); } diff --git a/opencl/src/pipes/pipes.h b/opencl/src/pipes/pipes.h index 8a41177a..1008e692 100644 --- a/opencl/src/pipes/pipes.h +++ b/opencl/src/pipes/pipes.h @@ -73,7 +73,7 @@ wave_reserve_1(volatile __global atomic_size_t *pi, size_t lim) __builtin_amdgcn_wave_barrier(); // Broadcast the result; the ctz tells us which lane has active lane id 0 - uint k = (uint)__llvm_cttz_i64(__builtin_amdgcn_read_exec()); + uint k = (uint)BUILTIN_CTZ_U64(__builtin_amdgcn_read_exec()); i = ((size_t)__builtin_amdgcn_readlane((uint)(i >> 32), k) << 32) | (size_t)__builtin_amdgcn_readlane((uint)i, k); From 6bb703924a92dd236228b0a60416a5104f11f62a Mon Sep 17 00:00:00 2001 From: Matt Arsenault Date: Mon, 5 Dec 2022 07:55:34 -0500 Subject: [PATCH 09/22] opencl: Don't go through ockl wrappers around ctlz/cttz Change-Id: I1f45bf137a3dd6ba51ad34229fa2c6c53f29296b --- ockl/src/dm.cl | 2 +- opencl/src/integer/clz.cl | 17 +++++++++-------- opencl/src/integer/ctz.cl | 17 +++++++++-------- 3 files changed, 19 insertions(+), 17 deletions(-) diff --git a/ockl/src/dm.cl b/ockl/src/dm.cl index a947b699..49bbaa0b 100644 --- a/ockl/src/dm.cl +++ b/ockl/src/dm.cl @@ -337,7 +337,7 @@ static uint size_to_kind(uint sz) { sz = sz < 16 ? 16 : sz; - uint b = 31 - __ockl_clz_u32(sz); + uint b = 31 - BUILTIN_CLZ_U32(sz); uint v = 1 << b; return ((b - 4) << 1) + (sz > v) + (sz > (v | (v >> 1))); } diff --git a/opencl/src/integer/clz.cl b/opencl/src/integer/clz.cl index 0aee2b08..844fa1ea 100644 --- a/opencl/src/integer/clz.cl +++ b/opencl/src/integer/clz.cl @@ -6,6 +6,7 @@ *===------------------------------------------------------------------------*/ #include "int.h" +#include "irif.h" #define UEXPATTR __attribute__((overloadable, const)) UEXP(char,clz) @@ -20,48 +21,48 @@ UEXP(ulong,clz) UEXPATTR char clz(char x) { - return (char)__ockl_clz_u8((uchar)x); + return (char)BUILTIN_CLZ_U8((uchar)x); } UEXPATTR uchar clz(uchar x) { - return __ockl_clz_u8(x); + return BUILTIN_CLZ_U8(x); } UEXPATTR short clz(short x) { - return (short)__ockl_clz_u16((ushort)x); + return (short)BUILTIN_CLZ_U16((ushort)x); } UEXPATTR ushort clz(ushort x) { - return __ockl_clz_u16(x); + return BUILTIN_CLZ_U16(x); } UEXPATTR int clz(int x) { - return (int)__ockl_clz_u32((uint)x); + return (int)BUILTIN_CLZ_U32((uint)x); } UEXPATTR uint clz(uint x) { - return __ockl_clz_u32(x); + return BUILTIN_CLZ_U32(x); } UEXPATTR long clz(long x) { - return (long)__ockl_clz_u64((ulong)x); + return (long)BUILTIN_CLZ_U64((ulong)x); } UEXPATTR ulong clz(ulong x) { - return __ockl_clz_u64(x); + return BUILTIN_CLZ_U64(x); } diff --git a/opencl/src/integer/ctz.cl b/opencl/src/integer/ctz.cl index 3059b6bb..6cdde0a4 100644 --- a/opencl/src/integer/ctz.cl +++ b/opencl/src/integer/ctz.cl @@ -6,6 +6,7 @@ *===------------------------------------------------------------------------*/ #include "int.h" +#include "irif.h" #define UEXPATTR __attribute__((overloadable, const)) UEXP(char,ctz) @@ -20,48 +21,48 @@ UEXP(ulong,ctz) UEXPATTR char ctz(char x) { - return (char)__ockl_ctz_u8((uchar)x); + return (char)BUILTIN_CTZ_U8((uchar)x); } UEXPATTR uchar ctz(uchar x) { - return __ockl_ctz_u8(x); + return BUILTIN_CTZ_U8(x); } UEXPATTR short ctz(short x) { - return (short)__ockl_ctz_u16((ushort)x); + return (short)BUILTIN_CTZ_U16((ushort)x); } UEXPATTR ushort ctz(ushort x) { - return __ockl_ctz_u16(x); + return BUILTIN_CTZ_U16(x); } UEXPATTR int ctz(int x) { - return (int)__ockl_ctz_u32((uint)x); + return (int)BUILTIN_CTZ_U32((uint)x); } UEXPATTR uint ctz(uint x) { - return __ockl_ctz_u32(x); + return BUILTIN_CTZ_U32(x); } UEXPATTR long ctz(long x) { - return (long)__ockl_ctz_u64((ulong)x); + return (long)BUILTIN_CTZ_U64((ulong)x); } UEXPATTR ulong ctz(ulong x) { - return __ockl_ctz_u64(x); + return BUILTIN_CTZ_U64(x); } From 4cc318d575ef7dcd66805285f308c079f46d96b4 Mon Sep 17 00:00:00 2001 From: Matt Arsenault Date: Mon, 5 Dec 2022 08:02:15 -0500 Subject: [PATCH 10/22] Use -passes to silence warning from opt Change-Id: I88a8a728d317474873a2a8c52ee106eade52f4df --- cmake/OCL.cmake | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/OCL.cmake b/cmake/OCL.cmake index 30790a94..c0cf83ae 100644 --- a/cmake/OCL.cmake +++ b/cmake/OCL.cmake @@ -136,7 +136,7 @@ macro(opencl_bc_lib) # Extra link step with internalize COMMAND $ -internalize -only-needed "${name}.link0${LIB_SUFFIX}" -o "${OUT_NAME}${LIB_SUFFIX}" ${internal_link_libs} - COMMAND $ -strip + COMMAND $ -passes=strip -o "${OUT_NAME}${STRIP_SUFFIX}" "${OUT_NAME}${LIB_SUFFIX}" COMMAND "${PREPARE_BUILTINS}" -o ${OUTPUT_BC_LIB} "${OUT_NAME}${STRIP_SUFFIX}" From 5ea07ceb59a35a8d8724b4911c6d4cab138b2f5b Mon Sep 17 00:00:00 2001 From: Matt Arsenault Date: Mon, 21 Nov 2022 23:55:50 -0500 Subject: [PATCH 11/22] Remove DAZ_OPT check from fmin/fmax The attempt to optimize to compare and select doesn't make much sense. The middle end and backend tries to fold those into llvm.minnum/llvm.maxnum whenever possible. The backend would like to know if there could be a signaling nan to quiet, but we can get that from the calling function after inlining. Change-Id: I876766db2fe92f221d3efe64488dc9cb2b11fecc --- ocml/src/fmaxF.cl | 11 +---------- ocml/src/fminF.cl | 11 +---------- 2 files changed, 2 insertions(+), 20 deletions(-) diff --git a/ocml/src/fmaxF.cl b/ocml/src/fmaxF.cl index 5dd9e2de..da00090a 100644 --- a/ocml/src/fmaxF.cl +++ b/ocml/src/fmaxF.cl @@ -10,15 +10,6 @@ CONSTATTR float MATH_MANGLE(fmax)(float x, float y) { - float ret; - - if (DAZ_OPT() & !FINITE_ONLY_OPT()) { - // XXX revist this later - ret = BUILTIN_CMAX_F32(x, y); - } else { - ret = BUILTIN_MAX_F32(x, y); - } - - return ret; + return BUILTIN_MAX_F32(x, y); } diff --git a/ocml/src/fminF.cl b/ocml/src/fminF.cl index 6c50ef05..a0fc6d1b 100644 --- a/ocml/src/fminF.cl +++ b/ocml/src/fminF.cl @@ -10,15 +10,6 @@ CONSTATTR float MATH_MANGLE(fmin)(float x, float y) { - float ret; - - if (DAZ_OPT() & !FINITE_ONLY_OPT()) { - // XXX revisit this later - ret = BUILTIN_CMIN_F32(x, y); - } else { - ret = BUILTIN_MIN_F32(x, y); - } - - return ret; + return BUILTIN_MIN_F32(x, y); } From 342fc0dff1c91a5fe1dcb4be3f970d6f90d29130 Mon Sep 17 00:00:00 2001 From: Matt Arsenault Date: Wed, 7 Dec 2022 22:56:28 -0500 Subject: [PATCH 12/22] Remove unused irif declarations Change-Id: I1ddc20ea7d4ae7c18b4cec84acd020561df11b4f --- irif/inc/irif.h | 64 ------------- irif/src/atomic.ll | 221 --------------------------------------------- 2 files changed, 285 deletions(-) delete mode 100644 irif/src/atomic.ll diff --git a/irif/inc/irif.h b/irif/inc/irif.h index f09a6aff..7059e775 100644 --- a/irif/inc/irif.h +++ b/irif/inc/irif.h @@ -38,71 +38,7 @@ extern __attribute__((const)) half2 __llvm_canonicalize_2f16(half2) __asm("llvm. #define BUILTIN_CTZ_U32(x) (uint)(x == 0u ? 32 : __builtin_ctz(x)) #define BUILTIN_CTZ_U64(x) (ulong)(x == 0u ? 64 : __builtin_ctzl(x)) -// Atomics -extern uint __llvm_ld_atomic_a1_x_dev_i32(__global uint *); -extern ulong __llvm_ld_atomic_a1_x_dev_i64(__global ulong *); -extern uint __llvm_ld_atomic_a3_x_wg_i32(__local uint *); -extern ulong __llvm_ld_atomic_a3_x_wg_i64(__local ulong *); - -extern void __llvm_st_atomic_a1_x_dev_i32(__global uint *, uint); -extern void __llvm_st_atomic_a1_x_dev_i64(__global ulong *, ulong); -extern void __llvm_st_atomic_a3_x_wg_i32(__local uint *, uint); -extern void __llvm_st_atomic_a3_x_wg_i64(__local ulong *, ulong); - -extern uint __llvm_atomic_add_a1_x_dev_i32(__global uint *, uint); -extern ulong __llvm_atomic_add_a1_x_dev_i64(__global ulong *, ulong); -extern uint __llvm_atomic_add_a3_x_wg_i32(__local uint *, uint); -extern ulong __llvm_atomic_add_a3_x_wg_i64(__local ulong *, ulong); - -extern uint __llvm_atomic_and_a1_x_dev_i32(__global uint *, uint); -extern ulong __llvm_atomic_and_a1_x_dev_i64(__global ulong *, ulong); -extern uint __llvm_atomic_and_a3_x_wg_i32(__local uint *, uint); -extern ulong __llvm_atomic_and_a3_x_wg_i64(__local ulong *, ulong); - -extern uint __llvm_atomic_or_a1_x_dev_i32(__global uint *, uint); -extern ulong __llvm_atomic_or_a1_x_dev_i64(__global ulong *, ulong); -extern uint __llvm_atomic_or_a3_x_wg_i32(__local uint *, uint); -extern ulong __llvm_atomic_or_a3_x_wg_i64(__local ulong *, ulong); - -extern uint __llvm_atomic_max_a1_x_dev_i32(__global int *, int); -extern uint __llvm_atomic_umax_a1_x_dev_i32(__global uint *, uint); -extern ulong __llvm_atomic_max_a1_x_dev_i64(__global long *, long); -extern ulong __llvm_atomic_umax_a1_x_dev_i64(__global ulong *, ulong); -extern uint __llvm_atomic_max_a3_x_wg_i32(__local int *, int); -extern uint __llvm_atomic_umax_a3_x_wg_i32(__local uint *, uint); -extern ulong __llvm_atomic_max_a3_x_wg_i64(__local long *, long); -extern ulong __llvm_atomic_umax_a3_x_wg_i64(__local ulong *, ulong); - -extern uint __llvm_atomic_min_a1_x_dev_i32(__global int *, int); -extern uint __llvm_atomic_umin_a1_x_dev_i32(__global uint *, uint); -extern ulong __llvm_atomic_min_a1_x_dev_i64(__global long *, long); -extern ulong __llvm_atomic_umin_a1_x_dev_i64(__global ulong *, ulong); -extern uint __llvm_atomic_min_a3_x_wg_i32(__local int *, int); -extern uint __llvm_atomic_umin_a3_x_wg_i32(__local uint *, uint); -extern ulong __llvm_atomic_min_a3_x_wg_i64(__local long *, long); -extern ulong __llvm_atomic_umin_a3_x_wg_i64(__local ulong *, ulong); - -extern uint __llvm_cmpxchg_a1_x_x_dev_i32(__global uint *, uint, uint); -extern ulong __llvm_cmpxchg_a1_x_x_dev_i64(__global ulong *, ulong, ulong); -extern uint __llvm_cmpxchg_a3_x_x_wg_i32(__local uint *, uint, uint); -extern ulong __llvm_cmpxchg_a3_x_x_wg_i64(__local ulong *, ulong, ulong); - // AMDGPU intrinsics - -// llvm.amdgcn.mov.dpp.i32 - -// llvm.amdgcn.update.dpp.i32 -extern uint __llvm_amdgcn_update_dpp_i32(uint, uint, uint, uint, uint, bool) __asm("llvm.amdgcn.update.dpp.i32"); - -// llvm.amdgcn.mov.dpp8.i32 -extern uint __llvm_amdgcn_dpp8_i32(uint, uint) __asm("llvm.amdgcn.dpp8.i32"); - -// llvm.amdgcn.permlane16 -extern uint __llvm_amdgcn_permlane16(uint, uint, uint, uint, bool, bool) __asm("llvm.amdgcn.permlane16"); - -// llvm.amdgcn.permlanex16 -extern uint __llvm_amdgcn_permlanex16(uint, uint, uint, uint, bool, bool) __asm("llvm.amdgcn.permlanex16"); - extern __attribute__((const, convergent)) ulong __llvm_amdgcn_icmp_i64_i32(uint, uint, uint) __asm("llvm.amdgcn.icmp.i64.i32"); extern __attribute__((const, convergent)) ulong __llvm_amdgcn_icmp_i64_i64(ulong, ulong, uint) __asm("llvm.amdgcn.icmp.i64.i64"); extern __attribute__((const, convergent)) ulong __llvm_amdgcn_fcmp_i64_f32(float, float, uint) __asm("llvm.amdgcn.fcmp.i64.f32"); diff --git a/irif/src/atomic.ll b/irif/src/atomic.ll deleted file mode 100644 index 6b58bf2f..00000000 --- a/irif/src/atomic.ll +++ /dev/null @@ -1,221 +0,0 @@ -target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5" -target triple = "amdgcn-amd-amdhsa" - -;;; -;;; TODO add synchscope(N) -;;; - -;;;;; Load -define protected i32 @__llvm_ld_atomic_a1_x_dev_i32(i32 addrspace(1)* nocapture readonly) #0 { - %2 = load atomic volatile i32, i32 addrspace(1)* %0 monotonic, align 4 - ret i32 %2 -} - -define protected i64 @__llvm_ld_atomic_a1_x_dev_i64(i64 addrspace(1)* nocapture readonly) #0 { - %2 = load atomic volatile i64, i64 addrspace(1)* %0 monotonic, align 8 - ret i64 %2 -} - -define protected i32 @__llvm_ld_atomic_a3_x_wg_i32(i32 addrspace(3)* nocapture readonly) #0 { - %2 = load atomic volatile i32, i32 addrspace(3)* %0 monotonic, align 4 - ret i32 %2 -} - -define protected i64 @__llvm_ld_atomic_a3_x_wg_i64(i64 addrspace(3)* nocapture readonly) #0 { - %2 = load atomic volatile i64, i64 addrspace(3)* %0 monotonic, align 8 - ret i64 %2 -} - -;;;;; Store -define protected void @__llvm_st_atomic_a1_x_dev_i32(i32 addrspace(1)* nocapture, i32) #1 { - store atomic volatile i32 %1, i32 addrspace(1)* %0 monotonic, align 4 - ret void -} - -define protected void @__llvm_st_atomic_a1_x_dev_i64(i64 addrspace(1)* nocapture, i64) #1 { - store atomic volatile i64 %1, i64 addrspace(1)* %0 monotonic, align 8 - ret void -} - -define protected void @__llvm_st_atomic_a3_x_wg_i32(i32 addrspace(3)* nocapture, i32) #1 { - store atomic volatile i32 %1, i32 addrspace(3)* %0 monotonic, align 4 - ret void -} - -define protected void @__llvm_st_atomic_a3_x_wg_i64(i64 addrspace(3)* nocapture, i64) #1 { - store atomic volatile i64 %1, i64 addrspace(3)* %0 monotonic, align 8 - ret void -} - -;;;;; Add -define protected i32 @__llvm_atomic_add_a1_x_dev_i32(i32 addrspace(1)* nocapture, i32) #1 { - %3 = atomicrmw volatile add i32 addrspace(1)* %0, i32 %1 monotonic - ret i32 %3 -} - -define protected i64 @__llvm_atomic_add_a1_x_dev_i64(i64 addrspace(1)* nocapture, i64) #1 { - %3 = atomicrmw volatile add i64 addrspace(1)* %0, i64 %1 monotonic - ret i64 %3 -} - -define protected i32 @__llvm_atomic_add_a3_x_wg_i32(i32 addrspace(3)* nocapture, i32) #1 { - %3 = atomicrmw volatile add i32 addrspace(3)* %0, i32 %1 monotonic - ret i32 %3 -} - -define protected i64 @__llvm_atomic_add_a3_x_wg_i64(i64 addrspace(3)* nocapture, i64) #1 { - %3 = atomicrmw volatile add i64 addrspace(3)* %0, i64 %1 monotonic - ret i64 %3 -} - -;;;;; And -define protected i32 @__llvm_atomic_and_a1_x_dev_i32(i32 addrspace(1)* nocapture, i32) #1 { - %3 = atomicrmw volatile and i32 addrspace(1)* %0, i32 %1 monotonic - ret i32 %3 -} - -define protected i64 @__llvm_atomic_and_a1_x_dev_i64(i64 addrspace(1)* nocapture, i64) #1 { - %3 = atomicrmw volatile and i64 addrspace(1)* %0, i64 %1 monotonic - ret i64 %3 -} - -define protected i32 @__llvm_atomic_and_a3_x_wg_i32(i32 addrspace(3)* nocapture, i32) #1 { - %3 = atomicrmw volatile and i32 addrspace(3)* %0, i32 %1 monotonic - ret i32 %3 -} - -define protected i64 @__llvm_atomic_and_a3_x_wg_i64(i64 addrspace(3)* nocapture, i64) #1 { - %3 = atomicrmw volatile and i64 addrspace(3)* %0, i64 %1 monotonic - ret i64 %3 -} - -;;;;; Or -define protected i32 @__llvm_atomic_or_a1_x_dev_i32(i32 addrspace(1)* nocapture, i32) #1 { - %3 = atomicrmw volatile or i32 addrspace(1)* %0, i32 %1 monotonic - ret i32 %3 -} - -define protected i64 @__llvm_atomic_or_a1_x_dev_i64(i64 addrspace(1)* nocapture, i64) #1 { - %3 = atomicrmw volatile or i64 addrspace(1)* %0, i64 %1 monotonic - ret i64 %3 -} - -define protected i32 @__llvm_atomic_or_a3_x_wg_i32(i32 addrspace(3)* nocapture, i32) #1 { - %3 = atomicrmw volatile or i32 addrspace(3)* %0, i32 %1 monotonic - ret i32 %3 -} - -define protected i64 @__llvm_atomic_or_a3_x_wg_i64(i64 addrspace(3)* nocapture, i64) #1 { - %3 = atomicrmw volatile or i64 addrspace(3)* %0, i64 %1 monotonic - ret i64 %3 -} - -;;;;; Max -define protected i32 @__llvm_atomic_max_a1_x_dev_i32(i32 addrspace(1)* nocapture, i32) #0 { - %3 = atomicrmw volatile max i32 addrspace(1)* %0, i32 %1 monotonic - ret i32 %3 -} - -define protected i32 @__llvm_atomic_umax_a1_x_dev_i32(i32 addrspace(1)* nocapture, i32) #1 { - %3 = atomicrmw volatile umax i32 addrspace(1)* %0, i32 %1 monotonic - ret i32 %3 -} - -define protected i64 @__llvm_atomic_max_a1_x_dev_i64(i64 addrspace(1)* nocapture, i64) #1 { - %3 = atomicrmw volatile max i64 addrspace(1)* %0, i64 %1 monotonic - ret i64 %3 -} - -define protected i64 @__llvm_atomic_umax_a1_x_dev_i64(i64 addrspace(1)* nocapture, i64) #1 { - %3 = atomicrmw volatile umax i64 addrspace(1)* %0, i64 %1 monotonic - ret i64 %3 -} - -define protected i32 @__llvm_atomic_max_a3_x_wg_i32(i32 addrspace(3)* nocapture, i32) #1 { - %3 = atomicrmw volatile max i32 addrspace(3)* %0, i32 %1 monotonic - ret i32 %3 -} - -define protected i32 @__llvm_atomic_umax_a3_x_wg_i32(i32 addrspace(3)* nocapture, i32) #1 { - %3 = atomicrmw volatile umax i32 addrspace(3)* %0, i32 %1 monotonic - ret i32 %3 -} - -define protected i64 @__llvm_atomic_max_a3_x_wg_i64(i64 addrspace(3)* nocapture, i64) #1 { - %3 = atomicrmw volatile max i64 addrspace(3)* %0, i64 %1 monotonic - ret i64 %3 -} - -define protected i64 @__llvm_atomic_umax_a3_x_wg_i64(i64 addrspace(3)* nocapture, i64) #1 { - %3 = atomicrmw volatile umax i64 addrspace(3)* %0, i64 %1 monotonic - ret i64 %3 -} - -;;;;; Min -define protected i32 @__llvm_atomic_min_a1_x_dev_i32(i32 addrspace(1)* nocapture, i32) #1 { - %3 = atomicrmw volatile min i32 addrspace(1)* %0, i32 %1 monotonic - ret i32 %3 -} - -define protected i32 @__llvm_atomic_umin_a1_x_dev_i32(i32 addrspace(1)* nocapture, i32) #1 { - %3 = atomicrmw volatile umin i32 addrspace(1)* %0, i32 %1 monotonic - ret i32 %3 -} - -define protected i64 @__llvm_atomic_min_a1_x_dev_i64(i64 addrspace(1)* nocapture, i64) #1 { - %3 = atomicrmw volatile min i64 addrspace(1)* %0, i64 %1 monotonic - ret i64 %3 -} - -define protected i64 @__llvm_atomic_umin_a1_x_dev_i64(i64 addrspace(1)* nocapture, i64) #1 { - %3 = atomicrmw volatile umin i64 addrspace(1)* %0, i64 %1 monotonic - ret i64 %3 -} - -define protected i32 @__llvm_atomic_min_a3_x_wg_i32(i32 addrspace(3)* nocapture, i32) #1 { - %3 = atomicrmw volatile min i32 addrspace(3)* %0, i32 %1 monotonic - ret i32 %3 -} - -define protected i32 @__llvm_atomic_umin_a3_x_wg_i32(i32 addrspace(3)* nocapture, i32) #1 { - %3 = atomicrmw volatile umin i32 addrspace(3)* %0, i32 %1 monotonic - ret i32 %3 -} - -define protected i64 @__llvm_atomic_min_a3_x_wg_i64(i64 addrspace(3)* nocapture, i64) #1 { - %3 = atomicrmw volatile min i64 addrspace(3)* %0, i64 %1 monotonic - ret i64 %3 -} - -define protected i64 @__llvm_atomic_umin_a3_x_wg_i64(i64 addrspace(3)* nocapture, i64) #1 { - %3 = atomicrmw volatile umin i64 addrspace(3)* %0, i64 %1 monotonic - ret i64 %3 -} - -;;;;; cmpxchg -define protected i32 @__llvm_cmpxchg_a1_x_x_dev_i32(i32 addrspace(1)* nocapture, i32, i32) #0 { - %4 = cmpxchg volatile i32 addrspace(1)* %0, i32 %1, i32 %2 monotonic monotonic - %5 = extractvalue { i32, i1 } %4, 0 - ret i32 %5 -} - -define protected i64 @__llvm_cmpxchg_a1_x_x_dev_i64(i64 addrspace(1)* nocapture, i64, i64) #1 { - %4 = cmpxchg volatile i64 addrspace(1)* %0, i64 %1, i64 %2 monotonic monotonic - %5 = extractvalue { i64, i1 } %4, 0 - ret i64 %5 -} - -define protected i32 @__llvm_cmpxchg_a3_x_x_wg_i32(i32 addrspace(3)* nocapture, i32, i32) #1 { - %4 = cmpxchg volatile i32 addrspace(3)* %0, i32 %1, i32 %2 monotonic monotonic - %5 = extractvalue { i32, i1 } %4, 0 - ret i32 %5 -} - -define protected i64 @__llvm_cmpxchg_a3_x_x_wg(i64 addrspace(3)* nocapture, i64, i64) #1 { - %4 = cmpxchg volatile i64 addrspace(3)* %0, i64 %1, i64 %2 monotonic monotonic - %5 = extractvalue { i64, i1 } %4, 0 - ret i64 %5 -} - -attributes #0 = { alwaysinline argmemonly norecurse nounwind readonly } -attributes #1 = { alwaysinline argmemonly norecurse nounwind } From 5e4356387d234620677bca8fbd665cec6c96a705 Mon Sep 17 00:00:00 2001 From: Matt Arsenault Date: Thu, 8 Dec 2022 15:48:21 -0500 Subject: [PATCH 13/22] Mark deprecated functions with the attribute Change-Id: I7b52a79794442d9be9d4fadcbc1a61cef44b04c7 --- ockl/inc/ockl.h | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/ockl/inc/ockl.h b/ockl/inc/ockl.h index 0477d7d6..606b95ad 100644 --- a/ockl/inc/ockl.h +++ b/ockl/inc/ockl.h @@ -9,9 +9,11 @@ #define OCKL_H // This C header declares the functions provided by the OCKL library -// Aspects of this library's behavior can be controlled via the +// Aspects of this library's behavior can be controlled via the // oclc library. See the oclc header for further information +#define OCKL_DEPRECATED __attribute__((deprecated)) + #define _MANGLE3x(P,N,S) P##_##N##S #define MANGLE3x(P,N,S) _MANGLE3x(P,N,S) #define _MANGLE3(P,N,S) P##_##N##_##S @@ -141,7 +143,10 @@ DECL_CONST_OCKL_BINARY_U32(mul24) DECL_OCKL_NULLARY_U32(lane) DECL_OCKL_NULLARY_U32(activelane) +OCKL_DEPRECATED DECL_OCKL_NULLARY_U64(memtime) + +OCKL_DEPRECATED DECL_OCKL_NULLARY_U64(memrealtime) DECL_OCKL_NULLARY_U64(cyclectr) DECL_OCKL_NULLARY_U64(steadyctr) From a36c27df85abdd954f702b7506b379cfdfd6b0b5 Mon Sep 17 00:00:00 2001 From: Matt Arsenault Date: Fri, 2 Dec 2022 08:45:45 -0500 Subject: [PATCH 14/22] Use BUILTIN_ISINF_F32 in place of class pinf|ninf Change-Id: I37840f047f371c71aab9b93fa9ce1f995b437b83 --- ocml/src/ctanhD.cl | 2 +- ocml/src/ctanhF.cl | 2 +- ocml/src/len3D.cl | 6 +++--- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/ocml/src/ctanhD.cl b/ocml/src/ctanhD.cl index 120c40d1..f1ebc012 100644 --- a/ocml/src/ctanhD.cl +++ b/ocml/src/ctanhD.cl @@ -45,7 +45,7 @@ MATH_MANGLE(ctanh)(double2 z) bool ni = BUILTIN_CLASS_F64(x, CLASS_PZER|CLASS_PSUB|CLASS_PNOR) & yin; rr = (ni | xn) ? AS_DOUBLE(QNANBITPATT_DP64) : rr; ri = ni ? AS_DOUBLE(QNANBITPATT_DP64) : ri; - ri = (BUILTIN_CLASS_F64(x, CLASS_PINF|CLASS_NINF) & yin) ? 0.0 : ri; + ri = (BUILTIN_ISINF_F64(x) & yin) ? 0.0 : ri; ri = (xn & (z.y == 0.0)) ? z.y : ri; } diff --git a/ocml/src/ctanhF.cl b/ocml/src/ctanhF.cl index eb5c07ce..55446477 100644 --- a/ocml/src/ctanhF.cl +++ b/ocml/src/ctanhF.cl @@ -45,7 +45,7 @@ MATH_MANGLE(ctanh)(float2 z) bool ni = BUILTIN_CLASS_F32(x, CLASS_PZER|CLASS_PSUB|CLASS_PNOR) & yin; rr = (ni | xn) ? AS_FLOAT(QNANBITPATT_SP32) : rr; ri = ni ? AS_FLOAT(QNANBITPATT_SP32) : ri; - ri = (BUILTIN_CLASS_F32(x, CLASS_PINF|CLASS_NINF) & yin) ? 0.0f : ri; + ri = (BUILTIN_ISINF_F32(x) & yin) ? 0.0f : ri; ri = (xn & (z.y == 0.0f)) ? z.y : ri; } diff --git a/ocml/src/len3D.cl b/ocml/src/len3D.cl index dbe747b8..94e65721 100644 --- a/ocml/src/len3D.cl +++ b/ocml/src/len3D.cl @@ -34,9 +34,9 @@ MATH_MANGLE(len3)(double x, double y, double z) ret = (BUILTIN_ISNAN_F64(x) | BUILTIN_ISNAN_F64(y) | BUILTIN_ISNAN_F64(z)) ? AS_DOUBLE(QNANBITPATT_DP64) : ret; - ret = (BUILTIN_CLASS_F64(x, CLASS_PINF|CLASS_NINF) | - BUILTIN_CLASS_F64(y, CLASS_PINF|CLASS_NINF) | - BUILTIN_CLASS_F64(z, CLASS_PINF|CLASS_NINF)) ? AS_DOUBLE(PINFBITPATT_DP64) : ret; + ret = (BUILTIN_ISINF_F64(x) | + BUILTIN_ISINF_F64(y) | + BUILTIN_ISINF_F64(z)) ? AS_DOUBLE(PINFBITPATT_DP64) : ret; } return ret; From 7ebd4cc15ca87f5f288bbe6a3b08b75b4db0fae6 Mon Sep 17 00:00:00 2001 From: Matt Arsenault Date: Thu, 1 Dec 2022 23:29:27 -0500 Subject: [PATCH 15/22] Introduce and use BUILTIN_ISSUBNORMAL_* macros Do this instead of directly calling the class builtins. This better expresses the intent. Additionally, there are open questions about what the behavior of class is with a denormal input under DAZ (e.g. currently we'll get a different answer than what clang's implementation of fpclassify does). Handling these all in one place will help treat them consistently if there needs to be a change. Change-Id: Ib958847cd8101b4c9609d44f946595fec9bbc582 --- ocml/src/builtins.h | 4 ++++ ocml/src/cbrtF.cl | 4 ++-- ocml/src/fpclassifyD.cl | 2 +- ocml/src/fpclassifyF.cl | 2 +- ocml/src/fpclassifyH.cl | 2 +- ocml/src/logF_base.h | 4 ++-- ocml/src/rcbrtF.cl | 6 +++--- 7 files changed, 14 insertions(+), 10 deletions(-) diff --git a/ocml/src/builtins.h b/ocml/src/builtins.h index 4c4de8d4..4bd949d5 100644 --- a/ocml/src/builtins.h +++ b/ocml/src/builtins.h @@ -68,6 +68,10 @@ #define BUILTIN_ISFINITE_F64(x) __builtin_amdgcn_class(x, CLASS_NNOR|CLASS_NSUB|CLASS_NZER|CLASS_PZER|CLASS_PSUB|CLASS_PNOR) #define BUILTIN_ISFINITE_F16(x) __builtin_amdgcn_classh(x, CLASS_NNOR|CLASS_NSUB|CLASS_NZER|CLASS_PZER|CLASS_PSUB|CLASS_PNOR) +#define BUILTIN_ISSUBNORMAL_F32(x) __builtin_amdgcn_classf(x, CLASS_NSUB|CLASS_PSUB) +#define BUILTIN_ISSUBNORMAL_F64(x) __builtin_amdgcn_class(x, CLASS_NSUB|CLASS_PSUB) +#define BUILTIN_ISSUBNORMAL_F16(x) __builtin_amdgcn_classh(x, CLASS_NSUB|CLASS_PSUB) + #define BUILTIN_COPYSIGN_F32 __builtin_copysignf #define BUILTIN_COPYSIGN_F64 __builtin_copysign #define BUILTIN_COPYSIGN_F16 __builtin_copysignf16 diff --git a/ocml/src/cbrtF.cl b/ocml/src/cbrtF.cl index 36086dc5..c45e8efc 100644 --- a/ocml/src/cbrtF.cl +++ b/ocml/src/cbrtF.cl @@ -17,7 +17,7 @@ MATH_MANGLE(cbrt)(float x) float ax = BUILTIN_ABS_F32(x); if (!DAZ_OPT()) { - ax = BUILTIN_CLASS_F32(x, CLASS_NSUB|CLASS_PSUB) ? + ax = BUILTIN_ISSUBNORMAL_F32(x) ? BUILTIN_FLDEXP_F32(ax, 24) : ax; } @@ -25,7 +25,7 @@ MATH_MANGLE(cbrt)(float x) z = MATH_MAD(MATH_MAD(MATH_FAST_RCP(z*z), -ax, z), -0x1.555556p-2f, z); if (!DAZ_OPT()) { - z = BUILTIN_CLASS_F32(x, CLASS_NSUB|CLASS_PSUB) ? + z = BUILTIN_ISSUBNORMAL_F32(x) ? BUILTIN_FLDEXP_F32(z, -8) : z; } diff --git a/ocml/src/fpclassifyD.cl b/ocml/src/fpclassifyD.cl index 84958543..94aa6f64 100644 --- a/ocml/src/fpclassifyD.cl +++ b/ocml/src/fpclassifyD.cl @@ -12,7 +12,7 @@ MATH_MANGLE(fpclassify)(double x) { int ret = BUILTIN_ISINF_F64(x) ? FP_INFINITE : FP_NAN; ret = BUILTIN_CLASS_F64(x, CLASS_PZER|CLASS_NZER) ? FP_ZERO : ret; - ret = BUILTIN_CLASS_F64(x, CLASS_PSUB|CLASS_NSUB) ? FP_SUBNORMAL : ret; + ret = BUILTIN_ISSUBNORMAL_F64(x) ? FP_SUBNORMAL : ret; ret = BUILTIN_CLASS_F64(x, CLASS_PNOR|CLASS_NNOR) ? FP_NORMAL : ret; return ret; } diff --git a/ocml/src/fpclassifyF.cl b/ocml/src/fpclassifyF.cl index 232b6be2..cca55672 100644 --- a/ocml/src/fpclassifyF.cl +++ b/ocml/src/fpclassifyF.cl @@ -12,7 +12,7 @@ MATH_MANGLE(fpclassify)(float x) { int ret = BUILTIN_ISINF_F32(x) ? FP_INFINITE : FP_NAN; ret = BUILTIN_CLASS_F32(x, CLASS_PZER|CLASS_NZER) ? FP_ZERO : ret; - ret = BUILTIN_CLASS_F32(x, CLASS_PSUB|CLASS_NSUB) ? FP_SUBNORMAL : ret; + ret = BUILTIN_ISSUBNORMAL_F32(x) ? FP_SUBNORMAL : ret; ret = BUILTIN_CLASS_F32(x, CLASS_PNOR|CLASS_NNOR) ? FP_NORMAL : ret; return ret; } diff --git a/ocml/src/fpclassifyH.cl b/ocml/src/fpclassifyH.cl index b9e09ffe..ab7d2e4d 100644 --- a/ocml/src/fpclassifyH.cl +++ b/ocml/src/fpclassifyH.cl @@ -12,7 +12,7 @@ MATH_MANGLE(fpclassify)(half x) { int ret = BUILTIN_ISINF_F16(x) ? FP_INFINITE : FP_NAN; ret = BUILTIN_CLASS_F16(x, CLASS_PZER|CLASS_NZER) ? FP_ZERO : ret; - ret = BUILTIN_CLASS_F16(x, CLASS_PSUB|CLASS_NSUB) ? FP_SUBNORMAL : ret; + ret = BUILTIN_ISSUBNORMAL_F16(x) ? FP_SUBNORMAL : ret; ret = BUILTIN_CLASS_F16(x, CLASS_PNOR|CLASS_NNOR) ? FP_NORMAL : ret; return ret; } diff --git a/ocml/src/logF_base.h b/ocml/src/logF_base.h index f6756d04..ede48206 100644 --- a/ocml/src/logF_base.h +++ b/ocml/src/logF_base.h @@ -62,7 +62,7 @@ MATH_MANGLE(log)(float x) } else { // not DAZ if (UNSAFE_MATH_OPT()) { - bool s = BUILTIN_CLASS_F32(x, CLASS_NSUB|CLASS_PSUB); + bool s = BUILTIN_ISSUBNORMAL_F32(x); x *= s ? 0x1.0p+32f : 1.0f; #if defined COMPILING_LOG2 return BUILTIN_LOG2_F32(x) - (s ? 32.0f : 0.0f); @@ -72,7 +72,7 @@ MATH_MANGLE(log)(float x) return MATH_MAD(BUILTIN_LOG2_F32(x), 0x1.62e430p-1f, s ? -0x1.62e430p+4f : 0.0f); #endif } else { - bool s = BUILTIN_CLASS_F32(x, CLASS_NSUB|CLASS_PSUB); + bool s = BUILTIN_ISSUBNORMAL_F32(x); x *= s ? 0x1.0p+32f : 1.0f; #if defined COMPILING_LOG2 return BUILTIN_LOG2_F32(x) - (s ? 32.0f : 0.0f); diff --git a/ocml/src/rcbrtF.cl b/ocml/src/rcbrtF.cl index a8293efb..1d8669f7 100644 --- a/ocml/src/rcbrtF.cl +++ b/ocml/src/rcbrtF.cl @@ -15,9 +15,9 @@ MATH_MANGLE(rcbrt)(float x) } float ax = BUILTIN_ABS_F32(x); - + if (!DAZ_OPT()) { - ax = BUILTIN_CLASS_F32(x, CLASS_NSUB|CLASS_PSUB) ? + ax = BUILTIN_ISSUBNORMAL_F32(x) ? BUILTIN_FLDEXP_F32(ax, 24) : ax; } @@ -25,7 +25,7 @@ MATH_MANGLE(rcbrt)(float x) z = MATH_MAD(MATH_MAD(z*z, -z*ax, 1.0f), 0x1.555556p-2f*z, z); if (!DAZ_OPT()) { - z = BUILTIN_CLASS_F32(x, CLASS_NSUB|CLASS_PSUB) ? + z = BUILTIN_ISSUBNORMAL_F32(x) ? BUILTIN_FLDEXP_F32(z, 8) : z; } From 752480e5c7d7fa2c84e5517d8d96595c4c4c32be Mon Sep 17 00:00:00 2001 From: Matt Arsenault Date: Fri, 2 Dec 2022 08:27:58 -0500 Subject: [PATCH 16/22] Introduce and use BUILTIN_ISZERO_* macros Depending on what the final decision on the semantics of DAZ, this may need to expand to cover denormal inputs (i.e. be swapped to just a simple == 0.0) Change-Id: I388f629740080922f43e353bda45a595631c36fa --- ocml/src/builtins.h | 4 ++++ ocml/src/fpclassifyD.cl | 2 +- ocml/src/fpclassifyF.cl | 2 +- ocml/src/fpclassifyH.cl | 2 +- ocml/src/scalbD.cl | 2 +- ocml/src/scalbH.cl | 2 +- 6 files changed, 9 insertions(+), 5 deletions(-) diff --git a/ocml/src/builtins.h b/ocml/src/builtins.h index 4bd949d5..dff5a0ee 100644 --- a/ocml/src/builtins.h +++ b/ocml/src/builtins.h @@ -72,6 +72,10 @@ #define BUILTIN_ISSUBNORMAL_F64(x) __builtin_amdgcn_class(x, CLASS_NSUB|CLASS_PSUB) #define BUILTIN_ISSUBNORMAL_F16(x) __builtin_amdgcn_classh(x, CLASS_NSUB|CLASS_PSUB) +#define BUILTIN_ISZERO_F32(x) __builtin_amdgcn_classf(x, CLASS_NZER|CLASS_PZER) +#define BUILTIN_ISZERO_F64(x) __builtin_amdgcn_class(x, CLASS_NZER|CLASS_PZER) +#define BUILTIN_ISZERO_F16(x) __builtin_amdgcn_classh(x, CLASS_NZER|CLASS_PZER) + #define BUILTIN_COPYSIGN_F32 __builtin_copysignf #define BUILTIN_COPYSIGN_F64 __builtin_copysign #define BUILTIN_COPYSIGN_F16 __builtin_copysignf16 diff --git a/ocml/src/fpclassifyD.cl b/ocml/src/fpclassifyD.cl index 94aa6f64..ee04cadf 100644 --- a/ocml/src/fpclassifyD.cl +++ b/ocml/src/fpclassifyD.cl @@ -11,7 +11,7 @@ CONSTATTR int MATH_MANGLE(fpclassify)(double x) { int ret = BUILTIN_ISINF_F64(x) ? FP_INFINITE : FP_NAN; - ret = BUILTIN_CLASS_F64(x, CLASS_PZER|CLASS_NZER) ? FP_ZERO : ret; + ret = BUILTIN_ISZERO_F64(x) ? FP_ZERO : ret; ret = BUILTIN_ISSUBNORMAL_F64(x) ? FP_SUBNORMAL : ret; ret = BUILTIN_CLASS_F64(x, CLASS_PNOR|CLASS_NNOR) ? FP_NORMAL : ret; return ret; diff --git a/ocml/src/fpclassifyF.cl b/ocml/src/fpclassifyF.cl index cca55672..97d588f4 100644 --- a/ocml/src/fpclassifyF.cl +++ b/ocml/src/fpclassifyF.cl @@ -11,7 +11,7 @@ CONSTATTR int MATH_MANGLE(fpclassify)(float x) { int ret = BUILTIN_ISINF_F32(x) ? FP_INFINITE : FP_NAN; - ret = BUILTIN_CLASS_F32(x, CLASS_PZER|CLASS_NZER) ? FP_ZERO : ret; + ret = BUILTIN_ISZERO_F32(x) ? FP_ZERO : ret; ret = BUILTIN_ISSUBNORMAL_F32(x) ? FP_SUBNORMAL : ret; ret = BUILTIN_CLASS_F32(x, CLASS_PNOR|CLASS_NNOR) ? FP_NORMAL : ret; return ret; diff --git a/ocml/src/fpclassifyH.cl b/ocml/src/fpclassifyH.cl index ab7d2e4d..ad5d9caa 100644 --- a/ocml/src/fpclassifyH.cl +++ b/ocml/src/fpclassifyH.cl @@ -11,7 +11,7 @@ REQUIRES_16BIT_INSTS CONSTATTR int MATH_MANGLE(fpclassify)(half x) { int ret = BUILTIN_ISINF_F16(x) ? FP_INFINITE : FP_NAN; - ret = BUILTIN_CLASS_F16(x, CLASS_PZER|CLASS_NZER) ? FP_ZERO : ret; + ret = BUILTIN_ISZERO_F16(x) ? FP_ZERO : ret; ret = BUILTIN_ISSUBNORMAL_F16(x) ? FP_SUBNORMAL : ret; ret = BUILTIN_CLASS_F16(x, CLASS_PNOR|CLASS_NNOR) ? FP_NORMAL : ret; return ret; diff --git a/ocml/src/scalbD.cl b/ocml/src/scalbD.cl index 006d0a8d..ec1f9aca 100644 --- a/ocml/src/scalbD.cl +++ b/ocml/src/scalbD.cl @@ -15,7 +15,7 @@ MATH_MANGLE(scalb)(double x, double y) if (!FINITE_ONLY_OPT()) { ret = (BUILTIN_ISNAN_F64(x) | BUILTIN_ISNAN_F64(y)) ? AS_DOUBLE(QNANBITPATT_DP64) : ret; - ret = (BUILTIN_CLASS_F64(x, CLASS_NZER|CLASS_PZER) & BUILTIN_CLASS_F64(y, CLASS_PINF)) ? AS_DOUBLE(QNANBITPATT_DP64) : ret; + ret = (BUILTIN_ISZERO_F64(x) & BUILTIN_CLASS_F64(y, CLASS_PINF)) ? AS_DOUBLE(QNANBITPATT_DP64) : ret; ret = (BUILTIN_ISINF_F64(x) & BUILTIN_CLASS_F64(y, CLASS_NINF)) ? AS_DOUBLE(QNANBITPATT_DP64) : ret; } diff --git a/ocml/src/scalbH.cl b/ocml/src/scalbH.cl index 83b9a407..edbe14c7 100644 --- a/ocml/src/scalbH.cl +++ b/ocml/src/scalbH.cl @@ -17,7 +17,7 @@ MATH_MANGLE(scalb)(half x, half y) if (!FINITE_ONLY_OPT()) { ret = (BUILTIN_ISNAN_F16(x) | BUILTIN_ISNAN_F16(y)) ? AS_HALF((short)QNANBITPATT_HP16) : ret; - ret = (BUILTIN_CLASS_F16(x, CLASS_NZER|CLASS_PZER) & BUILTIN_CLASS_F16(y, CLASS_PINF)) ? AS_HALF((short)QNANBITPATT_HP16) : ret; + ret = (BUILTIN_ISZERO_F16(x) & BUILTIN_CLASS_F16(y, CLASS_PINF)) ? AS_HALF((short)QNANBITPATT_HP16) : ret; ret = (BUILTIN_ISINF_F16(x) & BUILTIN_CLASS_F16(y, CLASS_NINF)) ? AS_HALF((short)QNANBITPATT_HP16) : ret; } From 0cc349009003d975cc2a59e2d7bb90b6e37774ff Mon Sep 17 00:00:00 2001 From: Matt Arsenault Date: Fri, 2 Dec 2022 08:33:14 -0500 Subject: [PATCH 17/22] Introduce and use BUILTIN_ISNORMAL_* macros Round out the set of test macros. Eventually this should switch to use __builtin_isnormal. Change-Id: I3d71d2ab7d452ca0eec95e121cd81f81c1c82506 --- ocml/src/builtins.h | 4 ++++ ocml/src/fpclassifyD.cl | 2 +- ocml/src/fpclassifyF.cl | 2 +- ocml/src/fpclassifyH.cl | 2 +- ocml/src/isnormalD.cl | 2 +- ocml/src/isnormalF.cl | 2 +- ocml/src/isnormalH.cl | 6 +++--- 7 files changed, 12 insertions(+), 8 deletions(-) diff --git a/ocml/src/builtins.h b/ocml/src/builtins.h index dff5a0ee..a8aff026 100644 --- a/ocml/src/builtins.h +++ b/ocml/src/builtins.h @@ -76,6 +76,10 @@ #define BUILTIN_ISZERO_F64(x) __builtin_amdgcn_class(x, CLASS_NZER|CLASS_PZER) #define BUILTIN_ISZERO_F16(x) __builtin_amdgcn_classh(x, CLASS_NZER|CLASS_PZER) +#define BUILTIN_ISNORMAL_F32(x) __builtin_amdgcn_classf(x, CLASS_NNOR|CLASS_PNOR) +#define BUILTIN_ISNORMAL_F64(x) __builtin_amdgcn_class(x, CLASS_NNOR|CLASS_PNOR) +#define BUILTIN_ISNORMAL_F16(x) __builtin_amdgcn_classh(x, CLASS_NNOR|CLASS_PNOR) + #define BUILTIN_COPYSIGN_F32 __builtin_copysignf #define BUILTIN_COPYSIGN_F64 __builtin_copysign #define BUILTIN_COPYSIGN_F16 __builtin_copysignf16 diff --git a/ocml/src/fpclassifyD.cl b/ocml/src/fpclassifyD.cl index ee04cadf..10ab2d48 100644 --- a/ocml/src/fpclassifyD.cl +++ b/ocml/src/fpclassifyD.cl @@ -13,7 +13,7 @@ MATH_MANGLE(fpclassify)(double x) int ret = BUILTIN_ISINF_F64(x) ? FP_INFINITE : FP_NAN; ret = BUILTIN_ISZERO_F64(x) ? FP_ZERO : ret; ret = BUILTIN_ISSUBNORMAL_F64(x) ? FP_SUBNORMAL : ret; - ret = BUILTIN_CLASS_F64(x, CLASS_PNOR|CLASS_NNOR) ? FP_NORMAL : ret; + ret = BUILTIN_ISNORMAL_F64(x) ? FP_NORMAL : ret; return ret; } diff --git a/ocml/src/fpclassifyF.cl b/ocml/src/fpclassifyF.cl index 97d588f4..3cb92a83 100644 --- a/ocml/src/fpclassifyF.cl +++ b/ocml/src/fpclassifyF.cl @@ -13,7 +13,7 @@ MATH_MANGLE(fpclassify)(float x) int ret = BUILTIN_ISINF_F32(x) ? FP_INFINITE : FP_NAN; ret = BUILTIN_ISZERO_F32(x) ? FP_ZERO : ret; ret = BUILTIN_ISSUBNORMAL_F32(x) ? FP_SUBNORMAL : ret; - ret = BUILTIN_CLASS_F32(x, CLASS_PNOR|CLASS_NNOR) ? FP_NORMAL : ret; + ret = BUILTIN_ISNORMAL_F32(x) ? FP_NORMAL : ret; return ret; } diff --git a/ocml/src/fpclassifyH.cl b/ocml/src/fpclassifyH.cl index ad5d9caa..a4936c01 100644 --- a/ocml/src/fpclassifyH.cl +++ b/ocml/src/fpclassifyH.cl @@ -13,7 +13,7 @@ MATH_MANGLE(fpclassify)(half x) int ret = BUILTIN_ISINF_F16(x) ? FP_INFINITE : FP_NAN; ret = BUILTIN_ISZERO_F16(x) ? FP_ZERO : ret; ret = BUILTIN_ISSUBNORMAL_F16(x) ? FP_SUBNORMAL : ret; - ret = BUILTIN_CLASS_F16(x, CLASS_PNOR|CLASS_NNOR) ? FP_NORMAL : ret; + ret = BUILTIN_ISNORMAL_F16(x) ? FP_NORMAL : ret; return ret; } diff --git a/ocml/src/isnormalD.cl b/ocml/src/isnormalD.cl index 55799a17..69fbc280 100644 --- a/ocml/src/isnormalD.cl +++ b/ocml/src/isnormalD.cl @@ -10,6 +10,6 @@ CONSTATTR int MATH_MANGLE(isnormal)(double x) { - return BUILTIN_CLASS_F64(x, CLASS_PNOR|CLASS_NNOR); + return BUILTIN_ISNORMAL_F64(x); } diff --git a/ocml/src/isnormalF.cl b/ocml/src/isnormalF.cl index 9c640286..c8704c07 100644 --- a/ocml/src/isnormalF.cl +++ b/ocml/src/isnormalF.cl @@ -10,5 +10,5 @@ CONSTATTR int MATH_MANGLE(isnormal)(float x) { - return BUILTIN_CLASS_F32(x, CLASS_PNOR|CLASS_NNOR); + return BUILTIN_ISNORMAL_F32(x); } diff --git a/ocml/src/isnormalH.cl b/ocml/src/isnormalH.cl index d9200083..08943a38 100644 --- a/ocml/src/isnormalH.cl +++ b/ocml/src/isnormalH.cl @@ -11,13 +11,13 @@ REQUIRES_16BIT_INSTS CONSTATTR short2 MATH_MANGLE2(isnormal)(half2 x) { return (short2) - (BUILTIN_CLASS_F16(x.lo, CLASS_PNOR|CLASS_NNOR) ? (short)-1 : (short)0, - BUILTIN_CLASS_F16(x.hi, CLASS_PNOR|CLASS_NNOR) ? (short)-1 : (short)0); + (BUILTIN_ISNORMAL_F16(x.lo) ? (short)-1 : (short)0, + BUILTIN_ISNORMAL_F16(x.hi) ? (short)-1 : (short)0); } REQUIRES_16BIT_INSTS CONSTATTR int MATH_MANGLE(isnormal)(half x) { - return BUILTIN_CLASS_F16(x, CLASS_PNOR|CLASS_NNOR); + return BUILTIN_ISNORMAL_F16(x); } From 8b4117cf0653b89c28f6d88872e81e35ea867cb3 Mon Sep 17 00:00:00 2001 From: Matt Arsenault Date: Fri, 9 Dec 2022 16:48:15 -0500 Subject: [PATCH 18/22] Use BUILTIN_ISINF in other macros Change-Id: Ic347db3e8710a90f35a7e761af1522c5bf75f764 --- ocml/src/builtins.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/ocml/src/builtins.h b/ocml/src/builtins.h index a8aff026..77e9cc34 100644 --- a/ocml/src/builtins.h +++ b/ocml/src/builtins.h @@ -93,19 +93,19 @@ #define BUILTIN_FRACTION_F32(X) ({ \ float _fract_x = X; \ float _fract_r = __builtin_amdgcn_fractf(_fract_x); \ - _fract_r = __builtin_amdgcn_classf(_fract_x, CLASS_PINF|CLASS_NINF) ? 0.0f : _fract_r; \ + _fract_r = BUILTIN_ISINF_F32(_fract_x) ? 0.0f : _fract_r; \ _fract_r; \ }) #define BUILTIN_FRACTION_F64(X) ({ \ double _fract_x = X; \ double _fract_r = __builtin_amdgcn_fract(_fract_x); \ - _fract_r = __builtin_amdgcn_class(_fract_x, CLASS_PINF|CLASS_NINF) ? 0.0 : _fract_r; \ + _fract_r = BUILTIN_ISINF_F64(_fract_x) ? 0.0 : _fract_r; \ _fract_r; \ }) #define BUILTIN_FRACTION_F16(X) ({ \ half _fract_x = X; \ half _fract_r = __builtin_amdgcn_fracth(_fract_x); \ - _fract_r = __builtin_amdgcn_classh(_fract_x, CLASS_PINF|CLASS_NINF) ? 0.0h : _fract_r; \ + _fract_r = BUILTIN_ISINF_F16(_fract_x) ? 0.0h : _fract_r; \ _fract_r; \ }) From 311aa40c17b997b54c2f738548217389de201d63 Mon Sep 17 00:00:00 2001 From: Matt Arsenault Date: Mon, 28 Nov 2022 19:16:48 -0500 Subject: [PATCH 19/22] Test end to end compile of atan2 Caught backend creating illegal f16 classes from this. Change-Id: I154c5e81795f33647d6c5c200f631d15dd85a62d --- test/compile/CMakeLists.txt | 2 ++ test/compile/atan2.cl | 23 +++++++++++++++++++++++ test/compile/atan2pi.cl | 23 +++++++++++++++++++++++ 3 files changed, 48 insertions(+) create mode 100644 test/compile/atan2.cl create mode 100644 test/compile/atan2pi.cl diff --git a/test/compile/CMakeLists.txt b/test/compile/CMakeLists.txt index e7d5109d..9af0b1a0 100644 --- a/test/compile/CMakeLists.txt +++ b/test/compile/CMakeLists.txt @@ -56,6 +56,8 @@ endforeach() foreach(gpu gfx700 gfx803) add_isa_test(asin ${gpu}) + add_isa_test(atan2 ${gpu}) + add_isa_test(atan2pi ${gpu}) endforeach() foreach(gpu gfx600 gfx700) diff --git a/test/compile/atan2.cl b/test/compile/atan2.cl new file mode 100644 index 00000000..58e38456 --- /dev/null +++ b/test/compile/atan2.cl @@ -0,0 +1,23 @@ + +#pragma OPENCL EXTENSION cl_khr_fp16 : enable + +// GCN: {{^}}test_atan2_f16: +// GFX700: v_cvt_f32_f16 +// GFX700: v_mul_f32 +// GFX700: v_div_scale_f32 +// GFX700: v_div_scale_f32 +// GFX700: v_cmp_class_f32 +// GFX700: v_cmp_class_f32 +// GFX700: v_div_fixup_f32 +// GFX700: v_bfi_b32 + +// GFX803: v_max_f16 +// GFX803: v_rcp_f32 +// GFX803: v_mul_f32 +// GFX803: v_fma_f16 +// GFX803: v_cmp_o_f16 +// GFX803: v_bfi_b32 +kernel void test_atan2_f16(global half* restrict out, global half* restrict in0, global half* restrict in1) { + int id = get_local_id(0); + out[id] = atan2(in0[id], in1[id]); +} diff --git a/test/compile/atan2pi.cl b/test/compile/atan2pi.cl new file mode 100644 index 00000000..4488ec79 --- /dev/null +++ b/test/compile/atan2pi.cl @@ -0,0 +1,23 @@ + +#pragma OPENCL EXTENSION cl_khr_fp16 : enable + +// GCN: {{^}}test_atan2pi_f16: +// GFX700: v_cvt_f32_f16 +// GFX700: v_mul_f32 +// GFX700: v_div_scale_f32 +// GFX700: v_div_scale_f32 +// GFX700: v_cmp_class_f32 +// GFX700: v_cmp_class_f32 +// GFX700: v_div_fixup_f32 +// GFX700: v_bfi_b32 + +// GFX803: v_max_f16 +// GFX803: v_rcp_f32 +// GFX803: v_mul_f32 +// GFX803: v_fma_f16 +// GFX803: v_cmp_o_f16 +// GFX803: v_bfi_b32 +kernel void test_atan2pi_f16(global half* restrict out, global half* restrict in0, global half* restrict in1) { + int id = get_local_id(0); + out[id] = atan2pi(in0[id], in1[id]); +} From bd125e2c94a7e88562ee5c16edb79f139bc454d8 Mon Sep 17 00:00:00 2001 From: Matt Arsenault Date: Thu, 8 Dec 2022 15:22:52 -0500 Subject: [PATCH 20/22] Remove dead implementation of atomic_work_item_fence This wasn't built. It was also out of date for the waitcnt bitfields in gfx10 and 11. Remove this to eliminate more subtarget dependent code. Change-Id: I0ae87c2d127a3701a081a58672c94876fb9b5ee2 --- opencl/src/misc/awif.cl | 50 ----------------------------------------- 1 file changed, 50 deletions(-) diff --git a/opencl/src/misc/awif.cl b/opencl/src/misc/awif.cl index 62d496da..11719a3c 100644 --- a/opencl/src/misc/awif.cl +++ b/opencl/src/misc/awif.cl @@ -26,7 +26,6 @@ write_mem_fence(cl_mem_fence_flags flags) atomic_work_item_fence(flags, memory_order_release, memory_scope_work_group); } -#if !defined LOW_LEVEL_APPROACH __attribute__((overloadable)) void atomic_work_item_fence(cl_mem_fence_flags flags, memory_order order, memory_scope scope) { @@ -74,52 +73,3 @@ atomic_work_item_fence(cl_mem_fence_flags flags, memory_order order, memory_scop } } } -#else -// LGKMC (LDS, GDS, Konstant, Message) is 4 bits -// EXPC (Export) is 3 bits -// VMC (VMem) is 4 bits -#define LGKMC_MAX 0xf -#define EXPC_MAX 0x7 -#define VMC_MAX 0xf -#define WAITCNT_IMM(LGKMC, EXPC, VMC) ((LGKMC << 8) | (EXPC << 4) | VMC) - -__attribute__((target("vi-insts,ci-insts"))) -__attribute__((overloadable)) void -atomic_work_item_fence(cl_mem_fence_flags flags, memory_order order, memory_scope scope) -{ - if (order != memory_order_relaxed) { - // Strip CLK_IMAGE_MEM_FENCE - flags &= CLK_LOCAL_MEM_FENCE | CLK_GLOBAL_MEM_FENCE; - - if (flags == CLK_LOCAL_MEM_FENCE) { - __builtin_amdgcn_s_waitcnt(WAITCNT_IMM(0, EXPC_MAX, VMC_MAX)); - } else if (flags == CLK_GLOBAL_MEM_FENCE) { - if (order != memory_order_acquire) { - __builtin_amdgcn_s_waitcnt(WAITCNT_IMM(LGKMC_MAX, EXPC_MAX, 0)); - __builtin_amdgcn_s_dcache_wb(); - } - - if ((scope == memory_scope_device) | (scope == memory_scope_all_svm_devices)) { - if (order != memory_order_release) { - __builtin_amdgcn_buffer_wbinvl1_vol(); - __builtin_amdgcn_s_dcache_inv_vol(); - } - } - } else if (flags == (CLK_GLOBAL_MEM_FENCE|CLK_LOCAL_MEM_FENCE)) { - __builtin_amdgcn_s_waitcnt(order == memory_order_acquire ? - WAITCNT_IMM(0, EXPC_MAX, VMC_MAX) : - WAITCNT_IMM(0, EXPC_MAX, 0)); - if (order != memory_order_acquire) - __builtin_amdgcn_s_dcache_wb(); - - if ((scope == memory_scope_device) | (scope == memory_scope_all_svm_devices)) { - if (order != memory_order_release) { - __builtin_amdgcn_buffer_wbinvl1_vol(); - __builtin_amdgcn_s_dcache_inv_vol(); - } - } - } - } -} -#endif // LOW_LEVEL_APPROACH - From 63ddefcc34525f6df676be5c0bb045ac63ecd4d8 Mon Sep 17 00:00:00 2001 From: Matt Arsenault Date: Fri, 9 Dec 2022 23:21:27 -0500 Subject: [PATCH 21/22] Add macros for inf and nan Currently code uses an obtuse way of getting these by bitcasting from the integer representation. Change-Id: Iaa5c6634d0f43429f97340d79a741293cf9b835b --- ocml/src/mathD.h | 3 +++ ocml/src/mathF.h | 3 +++ ocml/src/mathH.h | 4 ++++ 3 files changed, 10 insertions(+) diff --git a/ocml/src/mathD.h b/ocml/src/mathD.h index ed42fe8b..2184f080 100644 --- a/ocml/src/mathD.h +++ b/ocml/src/mathD.h @@ -53,3 +53,6 @@ #define MANTLENGTH_DP64 53 #define BASEDIGITS_DP64 15 +#define QNAN_F64 __builtin_nan("") +#define PINF_F64 __builtin_inf() +#define NINF_F64 (-__builtin_inf()) diff --git a/ocml/src/mathF.h b/ocml/src/mathF.h index a5548dab..834c060f 100644 --- a/ocml/src/mathF.h +++ b/ocml/src/mathF.h @@ -52,3 +52,6 @@ #define MANTLENGTH_SP32 24 #define BASEDIGITS_SP32 7 +#define QNAN_F32 __builtin_nanf("") +#define PINF_F32 __builtin_inff() +#define NINF_F32 (-__builtin_inff()) diff --git a/ocml/src/mathH.h b/ocml/src/mathH.h index e15fbf96..430703cd 100644 --- a/ocml/src/mathH.h +++ b/ocml/src/mathH.h @@ -53,6 +53,10 @@ #define MANTLENGTH_HP16 11 #define BASEDIGITS_HP16 5 +#define QNAN_F16 __builtin_nanf16("") +#define PINF_F16 __builtin_inff16() +#define NINF_F16 (-__builtin_inff16()) + #define UGEN(N) \ half2 MATH_MANGLE2(N)(half2 x) \ { \ From 180677850c63c6a8640ebce8fb56fa1c34f67dc3 Mon Sep 17 00:00:00 2001 From: Matt Arsenault Date: Thu, 8 Dec 2022 16:54:54 -0500 Subject: [PATCH 22/22] Run amdgpu-unify-metadata after build Run this hack pass to cleanup the extra version metadata from the linked bitcode files. Really these named nodes should have a setvector behavior. We probably shouldn't run this in the backend, the main reason we have it is from all the duplicates from the libraries. Saves about 1K in total bitcode file size. Change-Id: Ibc59fb57113b2734700d1717262c5aefa5c59c55 --- cmake/OCL.cmake | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/OCL.cmake b/cmake/OCL.cmake index c0cf83ae..773c6f62 100644 --- a/cmake/OCL.cmake +++ b/cmake/OCL.cmake @@ -136,7 +136,7 @@ macro(opencl_bc_lib) # Extra link step with internalize COMMAND $ -internalize -only-needed "${name}.link0${LIB_SUFFIX}" -o "${OUT_NAME}${LIB_SUFFIX}" ${internal_link_libs} - COMMAND $ -passes=strip + COMMAND $ -passes=amdgpu-unify-metadata,strip -o "${OUT_NAME}${STRIP_SUFFIX}" "${OUT_NAME}${LIB_SUFFIX}" COMMAND "${PREPARE_BUILTINS}" -o ${OUTPUT_BC_LIB} "${OUT_NAME}${STRIP_SUFFIX}"