git: 3f9d30d72799 - main - math/onednn252: New port: Intel(R) Math Kernel Library for Deep Neural Networks
- Go to: [ bottom of page ] [ top of archives ] [ this month ]
Date: Wed, 15 Mar 2023 10:18:43 UTC
The branch main has been updated by yuri: URL: https://cgit.FreeBSD.org/ports/commit/?id=3f9d30d7279954888d1c470968347a6de3eed69e commit 3f9d30d7279954888d1c470968347a6de3eed69e Author: Yuri Victorovich <yuri@FreeBSD.org> AuthorDate: 2023-03-15 10:08:57 +0000 Commit: Yuri Victorovich <yuri@FreeBSD.org> CommitDate: 2023-03-15 10:18:40 +0000 math/onednn252: New port: Intel(R) Math Kernel Library for Deep Neural Networks Resurrect onednn-2.5.2 for math/flashlight. --- math/Makefile | 1 + math/onednn252/Makefile | 82 +++++++++++++++++++++++++ math/onednn252/distinfo | 3 + math/onednn252/files/patch-cmake_platform.cmake | 60 ++++++++++++++++++ math/onednn252/pkg-descr | 9 +++ math/onednn252/pkg-plist | 36 +++++++++++ 6 files changed, 191 insertions(+) diff --git a/math/Makefile b/math/Makefile index 89a6d0d79386..cc1ff6d236f0 100644 --- a/math/Makefile +++ b/math/Makefile @@ -649,6 +649,7 @@ SUBDIR += oink SUBDIR += oleo SUBDIR += onednn + SUBDIR += onednn252 SUBDIR += openblas SUBDIR += openfst SUBDIR += openlibm diff --git a/math/onednn252/Makefile b/math/onednn252/Makefile new file mode 100644 index 000000000000..d89827f4be00 --- /dev/null +++ b/math/onednn252/Makefile @@ -0,0 +1,82 @@ +PORTNAME= onednn +DISTVERSIONPREFIX= v +DISTVERSION= 2.5.2 +CATEGORIES= math # machine-learning +PKGNAMESUFFIX= 252 + +MAINTAINER= yuri@FreeBSD.org +COMMENT= Intel(R) Math Kernel Library for Deep Neural Networks +WWW= https://01.org/onednn + +LICENSE= APACHE20 +LICENSE_FILE= ${WRKSRC}/LICENSE + +NOT_FOR_ARCHS= armv6 armv7 i386 mips powerpc powerpcspe +BROKEN_aarch64= error: sys/prctl.h file not found +.if !exists(/usr/include/omp.h) +BROKEN= requires OpenMP support that is missing on this architecture +.endif + +TEST_DEPENDS= bash:shells/bash \ + libsysinfo>0:devel/libsysinfo + +USES= cmake compiler:c++11-lang localbase:ldflags +USE_LDCONFIG= yes + +USE_GITHUB= yes +GH_ACCOUNT= oneapi-src +GH_PROJECT= oneDNN + +CMAKE_OFF= DNNL_BUILD_TESTS DNNL_BUILD_EXAMPLES + +OPTIONS_DEFAULT= SIMD_DEFAULT OPENMP +OPTIONS_SINGLE= SIMD CPU_RUNTIME +OPTIONS_SINGLE_SIMD= SIMD_DEFAULT SIMD_NATIVE +OPTIONS_SINGLE_CPU_RUNTIME= OPENMP TBB SEQ THREADPOOL_STANDALONE THREADPOOL_EIGEN THREADPOOL_TBB +CPU_RUNTIME_DESC= Threading runtime for CPU engines + +OPENMP_CMAKE_ON= -DDNNL_CPU_RUNTIME=OMP + +SEQ_DESC= Sequential (no parallelism) +SEQ_CMAKE_ON= -DDNNL_CPU_RUNTIME=SEQ + +SIMD_DEFAULT_DESC= Default, no non-default SIMD instructions are used + +SIMD_NATIVE_DESC= Optimize for this CPU +SIMD_NATIVE_CXXFLAGS= -march=native + +SIMD_SSE41_DESC= Use SSE4.1 instructions +SIMD_SSE41_CXXFLAGS= -msse4.1 + +TBB_DESC= Threading Building Blocks +TBB_CMAKE_ON= -DDNNL_CPU_RUNTIME=TBB +TBB_BROKEN= https://github.com/oneapi-src/oneDNN/issues/876 + +THREADPOOL_STANDALONE_DESC= Threadpool based on the standalone implementation +THREADPOOL_STANDALONE_CMAKE_ON= -DDNNL_CPU_RUNTIME=THREADPOOL -D_DNNL_TEST_THREADPOOL_IMPL=STANDALONE +THREADPOOL_STANDALONE_BROKEN= https://github.com/oneapi-src/oneDNN/issues/877 +THREADPOOL_EIGEN_DESC= Threadpool based on the Eigen implementation +THREADPOOL_EIGEN_CMAKE_ON= -DDNNL_CPU_RUNTIME=THREADPOOL -D_DNNL_TEST_THREADPOOL_IMPL=EIGEN +THREADPOOL_TBB_DESC= Threadpool based on the TBB implementation +THREADPOOL_TBB_CMAKE_ON= -DDNNL_CPU_RUNTIME=THREADPOOL -D_DNNL_TEST_THREADPOOL_IMPL=TBB +THREADPOOL_TBB_BROKEN= https://github.com/oneapi-src/oneDNN/issues/876 + +ARCH_LOCAL!= /usr/bin/uname -p # because OPTIONS_SINGLE_SIMD doesn't support per-ARCH values OPTIONS_SINGLE_SIMD_{arch}, like OPTIONS_DEFINE_{arch} + +.if ${ARCH_LOCAL} == i386 || ${ARCH_LOCAL} == amd64 +OPTIONS_SINGLE_SIMD+= SIMD_SSE41 +.endif + +CXXFLAGS:= ${CXXFLAGS:S/-O2/-O3/} # clang writes wrong binary code when -O2 optimization is used and one testcase is failing, see https://bugs.llvm.org/show_bug.cgi?id=48104 + +post-install: + @${RM} -r ${STAGEDIR}${PREFIX}/share/doc + +do-test: + @${REINPLACE_CMD} 's| /bin/bash | ${LOCALBASE}/bin/bash |' ${WRKSRC}/tests/CMakeLists.txt + @cd ${BUILD_WRKSRC} && \ + ${SETENV} ${CONFIGURE_ENV} ${CMAKE_BIN} ${CMAKE_ARGS} -DDNNL_BUILD_TESTS=ON ${CMAKE_SOURCE_PATH} && \ + ${SETENV} ${MAKE_ENV} ${MAKE_CMD} ${MAKE_ARGS} ${ALL_TARGET} && \ + ${SETENV} ${MAKE_ENV} ${MAKE_CMD} ${MAKE_ARGS} test + +.include <bsd.port.mk> diff --git a/math/onednn252/distinfo b/math/onednn252/distinfo new file mode 100644 index 000000000000..8bfea9ad6ed3 --- /dev/null +++ b/math/onednn252/distinfo @@ -0,0 +1,3 @@ +TIMESTAMP = 1678870409 +SHA256 (oneapi-src-oneDNN-v2.5.2_GH0.tar.gz) = 11d50235afa03571dc70bb6d96a98bfb5d9b53e8c00cc2bfbde78588bd01f6a3 +SIZE (oneapi-src-oneDNN-v2.5.2_GH0.tar.gz) = 5807898 diff --git a/math/onednn252/files/patch-cmake_platform.cmake b/math/onednn252/files/patch-cmake_platform.cmake new file mode 100644 index 000000000000..390741db48ac --- /dev/null +++ b/math/onednn252/files/patch-cmake_platform.cmake @@ -0,0 +1,60 @@ +--- cmake/platform.cmake.orig 2021-12-07 19:00:25 UTC ++++ cmake/platform.cmake +@@ -175,7 +175,7 @@ elseif(UNIX OR MINGW) + set(DEF_ARCH_OPT_FLAGS "-O3") + endif() + # For native compilation tune for the host processor +- if (CMAKE_SYSTEM_PROCESSOR STREQUAL CMAKE_HOST_SYSTEM_PROCESSOR) ++ if (FALSE AND CMAKE_SYSTEM_PROCESSOR STREQUAL CMAKE_HOST_SYSTEM_PROCESSOR) + append(DEF_ARCH_OPT_FLAGS "-mcpu=native") + endif() + elseif(DNNL_TARGET_ARCH STREQUAL "PPC64") +@@ -183,7 +183,7 @@ elseif(UNIX OR MINGW) + set(DEF_ARCH_OPT_FLAGS "-O3") + endif() + # For native compilation tune for the host processor +- if (CMAKE_SYSTEM_PROCESSOR STREQUAL CMAKE_HOST_SYSTEM_PROCESSOR) ++ if (FALSE AND CMAKE_SYSTEM_PROCESSOR STREQUAL CMAKE_HOST_SYSTEM_PROCESSOR) + append(DEF_ARCH_OPT_FLAGS "-mcpu=native") + endif() + elseif(DNNL_TARGET_ARCH STREQUAL "S390X") +@@ -191,10 +191,10 @@ elseif(UNIX OR MINGW) + set(DEF_ARCH_OPT_FLAGS "-O3") + endif() + # For native compilation tune for the host processor +- if (CMAKE_SYSTEM_PROCESSOR STREQUAL CMAKE_HOST_SYSTEM_PROCESSOR) ++ if (FALSE AND CMAKE_SYSTEM_PROCESSOR STREQUAL CMAKE_HOST_SYSTEM_PROCESSOR) + append(DEF_ARCH_OPT_FLAGS "-march=native") + endif() +- elseif(DNNL_TARGET_ARCH STREQUAL "X64") ++ elseif(FALSE AND DNNL_TARGET_ARCH STREQUAL "X64") + set(DEF_ARCH_OPT_FLAGS "-msse4.1") + endif() + # Clang cannot vectorize some loops with #pragma omp simd and gets +@@ -272,7 +272,7 @@ elseif(UNIX OR MINGW) + set(DEF_ARCH_OPT_FLAGS "-O3") + endif() + # For native compilation tune for the host processor +- if (CMAKE_SYSTEM_PROCESSOR STREQUAL CMAKE_HOST_SYSTEM_PROCESSOR) ++ if (FALSE AND CMAKE_SYSTEM_PROCESSOR STREQUAL CMAKE_HOST_SYSTEM_PROCESSOR) + append(DEF_ARCH_OPT_FLAGS "-mcpu=native") + endif() + elseif(DNNL_TARGET_ARCH STREQUAL "PPC64") +@@ -281,7 +281,7 @@ elseif(UNIX OR MINGW) + endif() + # In GCC, -ftree-vectorize is turned on under -O3 since 2007. + # For native compilation tune for the host processor +- if (CMAKE_SYSTEM_PROCESSOR STREQUAL CMAKE_HOST_SYSTEM_PROCESSOR) ++ if (FALSE AND CMAKE_SYSTEM_PROCESSOR STREQUAL CMAKE_HOST_SYSTEM_PROCESSOR) + append(DEF_ARCH_OPT_FLAGS "-mcpu=native") + endif() + elseif(DNNL_TARGET_ARCH STREQUAL "S390X") +@@ -290,7 +290,7 @@ elseif(UNIX OR MINGW) + endif() + # In GCC, -ftree-vectorize is turned on under -O3 since 2007. + # For native compilation tune for the host processor +- if (CMAKE_SYSTEM_PROCESSOR STREQUAL CMAKE_HOST_SYSTEM_PROCESSOR) ++ if (FALSE AND CMAKE_SYSTEM_PROCESSOR STREQUAL CMAKE_HOST_SYSTEM_PROCESSOR) + append(DEF_ARCH_OPT_FLAGS "-march=native") + endif() + elseif(DNNL_TARGET_ARCH STREQUAL "RV64") diff --git a/math/onednn252/pkg-descr b/math/onednn252/pkg-descr new file mode 100644 index 000000000000..eb57f7ad4b0b --- /dev/null +++ b/math/onednn252/pkg-descr @@ -0,0 +1,9 @@ +Intel(R) Math Kernel Library for Deep Neural Networks (Intel(R) MKL-DNN) is an +open source performance library for deep learning applications. The library +accelerates deep learning applications and framework on Intel(R) architecture. +Intel(R) MKL-DNN contains vectorized and threaded building blocks which you can +use to implement deep neural networks (DNN) with C and C++ interfaces. + +DNN functionality optimized for Intel architecture is also included in Intel(R) +Math Kernel Library (Intel(R) MKL). API in this implementation is not compatible +with Intel MKL-DNN and does not include certain new and experimental features. diff --git a/math/onednn252/pkg-plist b/math/onednn252/pkg-plist new file mode 100644 index 000000000000..24bd772e4f67 --- /dev/null +++ b/math/onednn252/pkg-plist @@ -0,0 +1,36 @@ +include/dnnl.h +include/dnnl.hpp +include/dnnl_config.h +include/dnnl_debug.h +include/dnnl_ocl.h +include/dnnl_ocl.hpp +include/dnnl_sycl.h +include/dnnl_sycl.hpp +include/dnnl_sycl_types.h +include/dnnl_threadpool.h +include/dnnl_threadpool.hpp +include/dnnl_threadpool_iface.hpp +include/dnnl_types.h +include/dnnl_version.h +include/oneapi/dnnl/dnnl.h +include/oneapi/dnnl/dnnl.hpp +include/oneapi/dnnl/dnnl_config.h +include/oneapi/dnnl/dnnl_debug.h +include/oneapi/dnnl/dnnl_ocl.h +include/oneapi/dnnl/dnnl_ocl.hpp +include/oneapi/dnnl/dnnl_ocl_types.h +include/oneapi/dnnl/dnnl_sycl.h +include/oneapi/dnnl/dnnl_sycl.hpp +include/oneapi/dnnl/dnnl_sycl_types.h +include/oneapi/dnnl/dnnl_threadpool.h +include/oneapi/dnnl/dnnl_threadpool.hpp +include/oneapi/dnnl/dnnl_threadpool_iface.hpp +include/oneapi/dnnl/dnnl_types.h +include/oneapi/dnnl/dnnl_version.h +lib/cmake/dnnl/dnnl-config-version.cmake +lib/cmake/dnnl/dnnl-config.cmake +lib/cmake/dnnl/dnnl-targets-%%CMAKE_BUILD_TYPE%%.cmake +lib/cmake/dnnl/dnnl-targets.cmake +lib/libdnnl.so +lib/libdnnl.so.2 +lib/libdnnl.so.2.5