forked from pytorch/extension-cpp
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathsetup.py
More file actions
98 lines (81 loc) · 3.12 KB
/
setup.py
File metadata and controls
98 lines (81 loc) · 3.12 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import torch
import glob
from setuptools import find_packages, setup
from packaging.version import Version
from torch.utils.cpp_extension import (
CppExtension,
CUDAExtension,
BuildExtension,
CUDA_HOME,
)
library_name = "extension_cpp_stable"
py_limited_api = Version(torch.__version__) >= Version("2.6.0")
def get_extensions():
debug_mode = os.getenv("DEBUG", "0") == "1"
use_cuda = os.getenv("USE_CUDA", "1") == "1"
if debug_mode:
print("Compiling in debug mode")
use_cuda = use_cuda and torch.cuda.is_available() and CUDA_HOME is not None
extension = CUDAExtension if use_cuda else CppExtension
extra_link_args = []
extra_compile_args = {
"cxx": [
"-O3" if not debug_mode else "-O0",
"-fdiagnostics-color=always",
"-DPy_LIMITED_API=0x03090000",
# define TORCH_TARGET_VERSION with min version 2.10 to expose only the
# stable API subset from torch
# Format: [MAJ 1 byte][MIN 1 byte][PATCH 1 byte][ABI TAG 5 bytes]
# 2.10.0 = 0x020A000000000000
"-DTORCH_TARGET_VERSION=0x020a000000000000",
],
"nvcc": [
"-O3" if not debug_mode else "-O0",
# NVCC also needs TORCH_TARGET_VERSION for stable ABI in CUDA code
"-DTORCH_TARGET_VERSION=0x020a000000000000",
# USE_CUDA is currently needed for aoti_torch_get_current_cuda_stream
# declaration in shim.h. This will be improved in a future release.
"-DUSE_CUDA",
],
}
if debug_mode:
extra_compile_args["cxx"].append("-g")
extra_compile_args["nvcc"].append("-g")
extra_link_args.extend(["-O0", "-g"])
this_dir = os.path.dirname(os.path.abspath(__file__))
extensions_dir = os.path.join(this_dir, library_name, "csrc")
sources = list(glob.glob(os.path.join(extensions_dir, "*.cpp")))
extensions_cuda_dir = os.path.join(extensions_dir, "cuda")
cuda_sources = list(glob.glob(os.path.join(extensions_cuda_dir, "*.cu")))
if use_cuda:
sources += cuda_sources
ext_modules = [
extension(
f"{library_name}._C",
sources,
extra_compile_args=extra_compile_args,
extra_link_args=extra_link_args,
py_limited_api=py_limited_api,
)
]
return ext_modules
setup(
name=library_name,
version="0.0.1",
packages=find_packages(),
ext_modules=get_extensions(),
install_requires=["torch>=2.10.0"],
description="Example of PyTorch C++ and CUDA extensions using Stable ABI",
long_description=open(
os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "README.md")
).read(),
long_description_content_type="text/markdown",
url="https://github.com/pytorch/extension-cpp",
cmdclass={"build_ext": BuildExtension},
options={"bdist_wheel": {"py_limited_api": "cp39"}} if py_limited_api else {},
)