8000 Add support for the jetpack6.1 build by lanluo-nvidia · Pull Request #3201 · pytorch/TensorRT · GitHub
[go: up one dir, main page]
More Web Proxy on the site http://driver.im/
Skip to content

Add support for the jetpack6.1 build #3201

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 3 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
61 changes: 61 additions & 0 deletions .github/scripts/build_jetson_6.1.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,61 @@
#!/usr/bin/env bash

# how to run the jetson build on jetpack6.1:
# ./.github/scripts/build_jetson_6.1.sh

set -euxo pipefail

# get jetpack version: eg: Version: 6.1+b123 ---> 6.1
jetpack_version=$(apt show nvidia-jetpack 2>/dev/null | grep Version: | cut -d ' ' -f 2 | cut -d '+' -f 1)
python_version=$(python --version)
cuda_version=$(nvcc --version | grep Cuda | grep release | cut -d ',' -f 2 | sed -e 's/ release //g')
echo "Current jetpack_version: ${jetpack_version} cuda_version: ${cuda_version} python_version: ${python_version} "

export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/usr/lib/aarch64-linux-gnu:/usr/include/aarch64-linux-gnu:/usr/local/cuda-${cuda_version}/lib64

# make sure nvidia-jetpack dev package is installed:
# go to /usr/include/aarch64-linux-gnu/ if you can see NvInfer.h(tensorrt related header files) which means dev package is installed
# if not installed, install via the below cmd:
# sudo apt update
# sudo apt install nvidia-jetpack

# make sure cuda is installed:
# nvcc --version or go to /usr/local/cuda/bin to see whether it is installed
# the install nvidia-jetpack dev package step will automatically install the cuda toolkit
# if not installed, install via the below cmd:
# sudo apt update
# sudo apt install cuda-toolkit-12-6

# make sure bazel is installed via the below cmd:
# wget -v https://github.com/bazelbuild/bazelisk/releases/download/v1.20.0/bazelisk-linux-arm64
# sudo mv bazelisk-linux-arm64 /usr/bin/bazel
# chmod +x /usr/bin/bazel

# make sure pip is installed:
# sudo apt install python3-pip

# make sure setuptools is installed with the version < 71.*.*
# version 71.*.* will give the following error during build
# TypeError: canonicalize_version() got an unexpected keyword argument 'strip_trailing_zero'
# python -m pip install setuptools==70.2.0

# make sure torch is installed via the below cmd:
# wget https://developer.download.nvidia.cn/compute/redist/jp/v61/pytorch/torch-2.5.0a0+872d972e41.nv24.08.17622132-cp310-cp310-linux_aarch64.whl
# python -m pip install torch-2.5.0a0+872d972e41.nv24.08.17622132-cp310-cp310-linux_aarch64.whl

# make sure libcusparseLt.so exists if not download and copy via the below cmd:
# wget https://developer.download.nvidia.com/compute/cusparselt/redist/libcusparse_lt/linux-sbsa/libcusparse_lt-linux-sbsa-0.5.2.1-archive.tar.xz
# tar xf libcusparse_lt-linux-sbsa-0.5.2.1-archive.tar.xz
# sudo cp -a libcusparse_lt-linux-sbsa-0.5.2.1-archive/include/* /usr/local/cuda/include/
# sudo cp -a libcusparse_lt-linux-sbsa-0.5.2.1-archive/lib/* /usr/local/cuda/lib64/

export TORCH_INSTALL_PATH=$(python -c "import torch, os; print(os.path.dirname(torch.__file__))")
export SITE_PACKAGE_PATH=${TORCH_INSTALL_PATH::-6}
export CUDA_HOME=/usr/local/cuda-${cuda_version}/

# replace the Module file with jetpack one
cat toolchains/jp_workspaces/MODULE.bazel.tmpl | envsubst > MODULE.bazel

# build on jetpack
python setup.py --use-cxx11-abi install --user

11 changes: 8 additions & 3 deletions setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -156,12 +156,14 @@ def load_dep_info():
JETPACK_VERSION = "4.6"
elif version == "5.0":
JETPACK_VERSION = "5.0"
elif version == "6.1":
JETPACK_VERSION = "6.1"

if not JETPACK_VERSION:
warnings.warn(
"Assuming jetpack version to be 5.0, if not use the --jetpack-version option"
"Assuming jetpack version to be 6.1, if not use the --jetpack-version option"
)
JETPACK_VERSION = "5.0"
JETPACK_VERSION = "6.1"

if not CXX11_ABI:
warnings.warn(
Expand Down Expand Up @@ -213,12 +215,15 @@ def build_libtorchtrt_pre_cxx11_abi(
elif JETPACK_VERSION == "5.0":
cmd.append("--platforms=//toolchains:jetpack_5.0")
print("Jetpack version: 5.0")
elif JETPACK_VERSION == "6.1":
cmd.append("--platforms=//toolchains:jetpack_6.1")
print("Jetpack version: 6.1")

if CI_BUILD:
cmd.append("--platforms=//toolchains:ci_rhel_x86_64_linux")
print("CI based build")

print("building libtorchtrt")
print(f"building libtorchtrt {cmd=}")
status_code = subprocess.run(cmd).returncode

if status_code != 0:
Expand Down
9 changes: 9 additions & 0 deletions toolchains/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,15 @@ platform(
],
)

platform(
name = "jetpack_6.1",
constraint_values = [
"@platforms//os:linux",
"@platforms//cpu:aarch64",
"@//toolchains/jetpack:6.1",
],
)

platform(
name = "ci_rhel_x86_64_linux",
constraint_values = [
Expand Down
5 changes: 5 additions & 0 deletions toolchains/jetpack/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -11,3 +11,8 @@ constraint_value(
name = "4.6",
constraint_setting = ":jetpack",
)

constraint_value(
name = "6.1",
constraint_setting = ":jetpack",
)
61 changes: 61 additions & 0 deletions toolchains/jp_workspaces/MODULE.bazel.tmpl
Original file line number Diff line number Diff line change
@@ -0,0 +1,61 @@
module(
name = "torch_tensorrt",
repo_name = "org_pytorch_tensorrt",
version = "${BUILD_VERSION}"
)

bazel_dep(name = "googletest", version = "1.14.0")
bazel_dep(name = "platforms", version = "0.0.10")
bazel_dep(name = "rules_cc", version = "0.0.9")
bazel_dep(name = "rules_python", version = "0.34.0")

python = use_extension("@rules_python//python/extensions:python.bzl", "python")
python.toolchain(
ignore_root_user_error = True,
python_version = "3.11",
)

bazel_dep(name = "rules_pkg", version = "1.0.1")
git_override(
module_name = "rules_pkg",
commit = "17c57f4",
remote = "https://github.com/narendasan/rules_pkg",
)

local_repository = use_repo_rule("@bazel_tools//tools/build_defs/repo:local.bzl", "local_repository")

# External dependency for torch_tensorrt if you already have precompiled binaries.
local_repository(
name = "torch_tensorrt",
path = "${SITE_PACKAGE_PATH}/torch_tensorrt",
)


new_local_repository = use_repo_rule("@bazel_tools//tools/build_defs/repo:local.bzl", "new_local_repository")

# CUDA should be installed on the system locally
new_local_repository(
name = "cuda",
build_file = "@//third_party/cuda:BUILD",
path = "${CUDA_HOME}",
)

new_local_repository(
name = "libtorch",
path = "${TORCH_INSTALL_PATH}",
build_file = "third_party/libtorch/BUILD",
)

new_local_repository(
name = "libtorch_pre_cxx11_abi",
path = "${TORCH_INSTALL_PATH}",
build_file = "third_party/libtorch/BUILD"
)

new_local_repository(
name = "tensorrt",
path = "/usr/",
build_file = "@//third_party/tensorrt/local:BUILD"
)


Loading
0