diff --git a/.github/workflows/gh-build-and-test.yml b/.github/workflows/gh-build-and-test.yml index 51f4bd87b..c2dc50c6b 100644 --- a/.github/workflows/gh-build-and-test.yml +++ b/.github/workflows/gh-build-and-test.yml @@ -300,9 +300,9 @@ jobs: - name: Run cuda.core tests shell: bash --noprofile --norc -xeuo pipefail {0} run: | - if [[ $SKIP_CUDA_BINDINGS_TEST == 1 ]]; then + if [[ ${{ matrix.python-version }} == "3.13" ]]; then # TODO: remove this hack once cuda-python has a cp313 build - if [[ ${{ matrix.python-version }} == "3.13" ]]; then + if [[ $SKIP_CUDA_BINDINGS_TEST == 1 ]]; then echo "Python 3.13 + cuda-python ${{ matrix.cuda-version }} is not supported, skipping the test..." exit 0 fi @@ -316,9 +316,6 @@ jobs: popd pushd ./cuda_core - # TODO: add requirements.txt for test deps? - pip install pytest - # TODO: add CuPy to test deps (which would require cuRAND) - # pip install "cupy-cuda${TEST_CUDA_MAJOR}x" + pip install -r "tests/requirements-cu${TEST_CUDA_MAJOR}.txt" pytest -rxXs tests/ popd diff --git a/cuda_core/examples/saxpy.py b/cuda_core/examples/saxpy.py index 8caa4d4a5..d1d7211f2 100644 --- a/cuda_core/examples/saxpy.py +++ b/cuda_core/examples/saxpy.py @@ -47,8 +47,9 @@ # prepare input/output size = cp.uint64(64) a = dtype(10) -x = cp.random.random(size, dtype=dtype) -y = cp.random.random(size, dtype=dtype) +rng = cp.random.default_rng() +x = rng.random(size, dtype=dtype) +y = rng.random(size, dtype=dtype) out = cp.empty_like(x) dev.sync() # cupy runs on a different stream from s, so sync before accessing @@ -73,8 +74,8 @@ # prepare input size = cp.uint64(128) a = dtype(42) -x = cp.random.random(size, dtype=dtype) -y = cp.random.random(size, dtype=dtype) +x = rng.random(size, dtype=dtype) +y = rng.random(size, dtype=dtype) dev.sync() # prepare output diff --git a/cuda_core/examples/strided_memory_view.py b/cuda_core/examples/strided_memory_view.py index 564d7fa01..2cc25989e 100644 --- a/cuda_core/examples/strided_memory_view.py +++ b/cuda_core/examples/strided_memory_view.py @@ -91,6 +91,7 @@ gpu_prog = Program(gpu_code, code_type="c++") # To know the GPU's compute capability, we need to identify which GPU to use. dev = Device(0) + dev.set_current() arch = "".join(f"{i}" for i in dev.compute_capability) mod = gpu_prog.compile( target_type="cubin", @@ -156,7 +157,6 @@ def my_func(arr, work_stream): # This takes the GPU path if cp: - dev.set_current() s = dev.create_stream() # Create input array on GPU arr_gpu = cp.ones(1024, dtype=cp.int32) diff --git a/cuda_core/examples/vector_add.py b/cuda_core/examples/vector_add.py index 550eaf2a2..172653277 100644 --- a/cuda_core/examples/vector_add.py +++ b/cuda_core/examples/vector_add.py @@ -42,8 +42,9 @@ # prepare input/output size = 50000 -a = cp.random.random(size, dtype=dtype) -b = cp.random.random(size, dtype=dtype) +rng = cp.random.default_rng() +a = rng.random(size, dtype=dtype) +b = rng.random(size, dtype=dtype) c = cp.empty_like(a) # cupy runs on a different stream from s, so sync before accessing diff --git a/cuda_core/tests/requirements-cu11.txt b/cuda_core/tests/requirements-cu11.txt new file mode 100644 index 000000000..8fb37e92e --- /dev/null +++ b/cuda_core/tests/requirements-cu11.txt @@ -0,0 +1,3 @@ +pytest +# TODO: remove this hack once cupy has a cp313 build +cupy-cuda11x; python_version < "3.13" diff --git a/cuda_core/tests/requirements-cu12.txt b/cuda_core/tests/requirements-cu12.txt new file mode 100644 index 000000000..2e82e12da --- /dev/null +++ b/cuda_core/tests/requirements-cu12.txt @@ -0,0 +1,3 @@ +pytest +# TODO: remove this hack once cupy has a cp313 build +cupy-cuda12x; python_version < "3.13"