Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
24 changes: 18 additions & 6 deletions .github/workflows/test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -2,11 +2,9 @@ name: Test

on:
pull_request:
branches:
- master
branches: [master]
push:
branches:
- master
branches: [master]

jobs:
test:
Expand All @@ -17,7 +15,7 @@ jobs:
fail-fast: false
matrix:
os: [ubuntu-latest, windows-latest]
python-version: ["3.7", "3.8", "3.9", "3.10"]
python-version: ["3.11", "3.12", "3.13", "3.14"]

steps:
- name: Checkout source
Expand All @@ -26,15 +24,29 @@ jobs:
fetch-depth: 0

- name: Install Python ${{ matrix.python-version }}
uses: actions/setup-python@v2
uses: actions/setup-python@v6
with:
python-version: ${{ matrix.python-version }}

# Install CUDA toolkit (nvcc + CUDA_PATH)
- name: Install CUDA Toolkit
uses: Jimver/[email protected]
with:
cuda: "12.3.0"

- name: Verify CUDA
shell: bash
run: |
echo "CUDA_PATH=$CUDA_PATH"
nvcc --version

- name: Install rtxpy
run: |
python -m pip install -U pip
python -m pip install -ve .[tests]
python -m pip list

- name: Run tests
run: |
python -m pytest -v rtxpy/tests

49 changes: 38 additions & 11 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
cmake_minimum_required(VERSION 3.10)

project(rtxpy)

set(CMAKE_CXX_STANDARD 11)
Expand All @@ -8,25 +7,53 @@ set(CMAKE_CXX_STANDARD_REQUIRED ON)
add_definitions(-DRTX_EXPORTS)

if (WIN32)
add_definitions(-D_CRT_SECURE_NO_WARNINGS)
add_definitions(-D_CRT_SECURE_NO_WARNINGS)
endif()

SET(SOURCE_DIR "crtx")
set(SOURCE_DIR "crtx")

set(HEADERS
${SOURCE_DIR}/common.h
${SOURCE_DIR}/internal.h
${SOURCE_DIR}/rtx.h
${SOURCE_DIR}/common.h
${SOURCE_DIR}/internal.h
${SOURCE_DIR}/rtx.h
)

set(SOURCES
${SOURCE_DIR}/dllmain.cpp
${SOURCE_DIR}/cuew/cuew.c
${SOURCE_DIR}/dllmain.cpp
${SOURCE_DIR}/cuew/cuew.c
)

add_library(${PROJECT_NAME} SHARED ${HEADERS} ${SOURCES})
target_compile_definitions(${PROJECT_NAME} PRIVATE CUDA_NO_PROTOTYPES OPTIX_DONT_INCLUDE_CUDA)

# ---- CUDA toolkit path (adjust if yours differs) ----
set(CUDA_TOOLKIT_ROOT_DIR "/usr/local/cuda")
set(CUDA_INCLUDE_DIR "${CUDA_TOOLKIT_ROOT_DIR}/include")
set(CUDA_LIB_DIR "${CUDA_TOOLKIT_ROOT_DIR}/lib64")

target_include_directories(${PROJECT_NAME} PRIVATE
${SOURCE_DIR}
${SOURCE_DIR}/optix_9.1/include
${SOURCE_DIR}/optix_9.1
${SOURCE_DIR}/cuew
${CUDA_INCLUDE_DIR}
)

# Link search paths:
# - CUDA toolkit libs (cudart, etc.) live here
# - WSL provides the NVIDIA driver libcuda.so here
target_link_directories(${PROJECT_NAME} PRIVATE
${CUDA_LIB_DIR}
/usr/lib/wsl/lib
)

target_link_libraries(${PROJECT_NAME} PRIVATE
cuda # libcuda.so (driver API)
dl
pthread
)

target_include_directories(${PROJECT_NAME} PUBLIC
${SOURCE_DIR}/optix_7.1
${SOURCE_DIR}/cuew
# Ensure runtime can find libcuda.so on WSL
target_link_options(${PROJECT_NAME} PRIVATE
"-Wl,-rpath,/usr/lib/wsl/lib"
)
3 changes: 2 additions & 1 deletion MANIFEST.in
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
# Extra files required in sdist
include CMakeLists.txt
recursive-include crtx *.h *.c *cpp *.cu *.sh
recursive-include crtx *.h *.c *cpp *.cu *.sh *.ptx
recursive-include rtxpy *.ptx *.so
66 changes: 66 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -32,3 +32,69 @@ To run tests

pip install -ve .[tests]
pytest -v rtxpy/tests


## Building from source:

### Building kernel.ptx
```bash
cd crtx
bash compileOptiX.sh
cp kernel.ptx ../rtxpy
```

### Building `librtxpy.so`
```bash
bash clean_build.sh
cp build/librtxpy.so ./rtxpy
```

### Building on WSL2:
To get the build working on WSL, I followed the post below:
https://forums.developer.nvidia.com/t/problem-running-optix-7-6-in-wsl/239355/8

---------------------

Welcome @chris.schwindt,

I believe we’re not yet packaging OptiX into the WSL2 driver. I believe this is hung up on a redesign of the driver packaging and delivery process, which is why it’s taking such a long time.

I have heard rumors that people have been able to get OptiX to work in WSL2 via manual install. This is unofficial and subject to change, so your mileage may vary, but here are some steps that may work for you:

Running OptiX Applications on WSL 2
Install WSL 2 and enable CUDA
Follow the canonical methods for installing WSL, display driver, and CUDA Toolkit within WSL

As mentioned in the docs, do not install a Linux Display driver in WSL, this will break the mapping of libcuda.
There are CUDA Toolkit downloads specifically for WSL that will not attempt to install a driver, only the toolkit.
You can also deselect the driver in a normal version of the toolkit.
Obtain OptiX / RTCore libraries for Linux
Download and extract libraries from the linux display driver.
You can run the driver installer in WSL using ./[driver filename].run -x which will unpack the driver but not install it.
Copy libnvoptix.so.XXX.00, libnvidia-rtcore.so.XXX.00, and libnvidia-ptxjitcompiler.so.XXX.00 into C:/Windows/System32/lxss/lib where XXX is the driver version.
Rename libnvoptix.so.XX.00 to libnvoptix.so.1
Rename libnvidia-ptxjitcompiler.so.XXX.00 to libnvidia-ptxjitcompiler.so.1
Do not rename libnvidia-rtcore.so.XXX.00
Be aware that future drivers may need additional libraries that will need to be copied.
Building an OptiX Application
You may need to add /usr/local/cuda/bin to your PATH to access NVCC, but do NOT add /usr/local/cuda/lib64 to LD_LIBRARY_PATH as you normally would when installing the CUDA toolkit. libcuda and other libraries are passed through from C:/Windows/System32/lxss/lib where you placed the OptiX and RTCore libs.
Instead, add /usr/lib/wsl/lib to your LD_LIBRARY_PATH to pick up CUDA, OptiX, etc.
Running an OptiX Application
With LD_LIBRARY_PATH set per the previous step, you should be able to run an OptiX executable.
You may need to rebuild the WSL cache. You can do so by quitting any WSL sessions and running wsl --shutdown from Powershell, then starting a new WSL session. Failing to reset the cache may lead to strange load paths.
You may verify paths are correct using strace, e.g., strace -o trace ./bin/optixHello
David.

---------------------

I ended up downloading: https://uk.download.nvidia.com/XFree86/Linux-x86_64/590.44.01/NVIDIA-Linux-x86_64-590.44.01.run
Nvidia Driver: 591.44

I then extract files and followed instructions above


I then extracted
```bash
bash
```
5 changes: 5 additions & 0 deletions clean_build.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
rm -rf build
mkdir build
cd build
cmake ..
cmake --build . -j
79 changes: 45 additions & 34 deletions crtx/compileOptiX.sh
Original file line number Diff line number Diff line change
@@ -1,53 +1,64 @@
#!/bin/bash
set -euo pipefail

unameOut="$(uname -s)"
case "${unameOut}" in
Linux*) machine=Linux;;
Darwin*) machine=Mac;;
CYGWIN*) machine=Cygwin;;
MINGW*) machine=MinGw;;
*) machine="UNKNOWN:${unameOut}"
*) machine="UNKNOWN:${unameOut}";;
esac

if [ ! -d "external/shaders" ]
then
mkdir external/shaders
fi
mkdir -p external/shaders

OPTIX_VERSION=9.1.0

if [ "${machine}" == "Linux" ]
then
echo "Setting up variables for Linux"
export OPTIX_VERSION=7.1.0
export INCLUDES="-I'/<PATH_TO>/NVIDIA-OptiX-SDK-${OPTIX_VERSION}-linux64-x86_64/include'"
export INCLUDES="$INCLUDES -I'../include'"
export INCLUDES="$INCLUDES -I'/usr/local/cuda/samples/common/inc'" #For math_helper.h
export NVCC="/usr/local/cuda/bin/nvcc"
export COMPILER="g++"
else
if [ "${machine}" == "MinGw" ]
then
echo "Setting up variables for Windows (Git Bash)"

export OPTIX_VERSION=7.1.0
export CUDA_VERSION=11.4
export INCLUDES=(-I"/c/ProgramData/NVIDIA Corporation/OptiX SDK $OPTIX_VERSION/include" -I"../include" -I"/c/ProgramData/NVIDIA Corporation/CUDA Samples/v${CUDA_VERSION}/common/inc")
export NVCC="/c/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v${CUDA_VERSION}/bin/nvcc"
# You may need to update the path to a valid compiler. This points to MSVS 2019 compiler
export COMPILER="/c/Program Files (x86)/Microsoft Visual Studio/2019/BuildTools/VC/Tools/MSVC/14.29.30037/bin/Hostx64/x64"
else
echo "Unsupported OS : ${machine}"
fi
fi
echo "Setting up variables for Linux"

echo "Compiling for OptiX $OPTIX_VERSION"
echo "NVCC compiler currently set: $NVCC"
echo "C++ compiler currently set: $COMPILER"
NVCC="/usr/local/cuda/bin/nvcc"
COMPILER="g++"

export NVCC_FLAGS="-m64 --std c++11 --use_fast_math -cudart static -arch sm_50 -Xptxas -v"
INCLUDES=(
-I"./optix_9.1" # <-- OptiX 9.1 headers vendored in this repo
-I"../include"
-I"/usr/local/cuda/samples/common/inc" # For helper_math.h / math_helper.h (CUDA samples)
)

if [ -f "kernel.ptx" ]
elif [ "${machine}" == "MinGw" ]
then
rm kernel.ptx
echo "Setting up variables for Windows (Git Bash)"

CUDA_VERSION=11.4
INCLUDES=(
-I"./optix_7.1" # <-- also use vendored headers on Windows
-I"../include"
-I"/c/ProgramData/NVIDIA Corporation/CUDA Samples/v${CUDA_VERSION}/common/inc"
)

NVCC="/c/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v${CUDA_VERSION}/bin/nvcc"
COMPILER="/c/Program Files (x86)/Microsoft Visual Studio/2019/BuildTools/VC/Tools/MSVC/14.29.30037/bin/Hostx64/x64"
else
echo "Unsupported OS : ${machine}"
exit 1
fi

exec "$NVCC" $NVCC_FLAGS -ccbin "$COMPILER" "${INCLUDES[@]}" -ptx -o kernel.ptx kernel.cu >> cudaoutput.txt | tee
echo "Compiling for OptiX ${OPTIX_VERSION}"
echo "NVCC compiler currently set: ${NVCC}"
echo "C++ compiler currently set: ${COMPILER}"

NVCC_FLAGS=(
-m64
--std=c++11
--use_fast_math
-cudart=static
-arch=sm_86
-Xptxas -v
)

rm -f kernel.ptx

exec "${NVCC}" "${NVCC_FLAGS[@]}" -ccbin "${COMPILER}" "${INCLUDES[@]}" -ptx -o kernel.ptx kernel.cu \
>> cudaoutput.txt | tee
2 changes: 1 addition & 1 deletion crtx/cuew/cuew.c
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@
# endif
#endif

#include <cuew.h>
#include <cuew/cuew.h>
#include <assert.h>
#include <stdio.h>
#include <string.h>
Expand Down
Loading
Loading