git: 85aebbb57bed - main - misc/py-pytorch: Disable mkldnn, CUDA, and add a patch
- Go to: [ bottom of page ] [ top of archives ] [ this month ]
Date: Wed, 10 May 2023 04:21:04 UTC
The branch main has been updated by yuri: URL: https://cgit.FreeBSD.org/ports/commit/?id=85aebbb57bed45347a7c13009a2cf10abff229ec commit 85aebbb57bed45347a7c13009a2cf10abff229ec Author: Yuri Victorovich <yuri@FreeBSD.org> AuthorDate: 2023-05-10 04:15:26 +0000 Commit: Yuri Victorovich <yuri@FreeBSD.org> CommitDate: 2023-05-10 04:21:00 +0000 misc/py-pytorch: Disable mkldnn, CUDA, and add a patch --- misc/py-pytorch/Makefile | 3 +++ misc/py-pytorch/files/patch-c10_core_DynamicCast.h | 21 +++++++++++++++++++++ 2 files changed, 24 insertions(+) diff --git a/misc/py-pytorch/Makefile b/misc/py-pytorch/Makefile index a71d2a5ebb29..e9f1fc05ffdd 100644 --- a/misc/py-pytorch/Makefile +++ b/misc/py-pytorch/Makefile @@ -1,6 +1,7 @@ PORTNAME= pytorch DISTVERSIONPREFIX= v DISTVERSION= 2.0.0 +PORTREVISION= 1 CATEGORIES= misc # machine-learning MASTER_SITES= https://github.com/pytorch/pytorch/releases/download/v${DISTVERSION}/ PKGNAMEPREFIX= ${PYTHON_PKGNAMEPREFIX} @@ -39,6 +40,8 @@ USE_PYTHON= distutils autoplist MAKE_ENV= USE_NINJA=no # ninja breaks for some reason MAKE_ENV+= BUILD_TEST=0 # ninja breaks for some reason +MAKE_ENV+= USE_MKLDNN=0 # disable MKLDNN that doesn't exist, see https://github.com/pytorch/pytorch/issues/100957 +MAKE_ENV+= USE_CUDNN=0 LDFLAGS+= -lexecinfo BINARY_ALIAS= make=${GMAKE} diff --git a/misc/py-pytorch/files/patch-c10_core_DynamicCast.h b/misc/py-pytorch/files/patch-c10_core_DynamicCast.h new file mode 100644 index 000000000000..517085e8bdee --- /dev/null +++ b/misc/py-pytorch/files/patch-c10_core_DynamicCast.h @@ -0,0 +1,21 @@ +- workaround for the failuree diring math/dgl build: +- /usr/local/lib/python3.9/site-packages/torch/include/c10/core/DynamicCast.h:112:22: error: use of undeclared identifier '__assert_fail' + +--- c10/core/DynamicCast.h.orig 2023-05-10 02:37:18 UTC ++++ c10/core/DynamicCast.h +@@ -99,13 +99,13 @@ C10_HOST_DEVICE inline void cast_and_store( + template <> \ + C10_HOST_DEVICE inline T fetch_and_cast<T>( \ + const ScalarType src_type, const void* ptr) { \ +- CUDA_KERNEL_ASSERT(ScalarType::scalartype_ == src_type); \ ++ assert(ScalarType::scalartype_ == src_type); \ + return c10::load<T>(ptr); \ + } \ + template <> \ + C10_HOST_DEVICE inline void cast_and_store<T>( \ + const ScalarType dest_type, void* ptr, T value) { \ +- CUDA_KERNEL_ASSERT(ScalarType::scalartype_ == dest_type); \ ++ assert(ScalarType::scalartype_ == dest_type); \ + *(T*)ptr = value; \ + } +