diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 8f07d9fa..f7c597b2 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -37,7 +37,7 @@ jobs: pip install ${{ matrix.torch-spec }} ./.github/scripts/install_triton.sh pip install -r requirements.txt - python -m unittest discover -s test/ -p "*.py" -v + python -m unittest discover -s test/ -p "*.py" -v -t . test_cuda126_py312_a10g: name: test-cuda12.6-py3.12-a10g strategy: @@ -63,4 +63,4 @@ jobs: pip install ${{ matrix.torch-spec }} ./.github/scripts/install_triton.sh pip install -r requirements.txt - python -m unittest discover -s test/ -p "*.py" -v + python -m unittest discover -s test/ -p "*.py" -v -t . diff --git a/test/__init__.py b/test/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/test_closures.py b/test/test_closures.py index 235bf825..e59a4897 100644 --- a/test/test_closures.py +++ b/test/test_closures.py @@ -98,7 +98,7 @@ def fn_with_global(x, tile) -> torch.Tensor: import triton.language as tl from torch._inductor.runtime.triton_helpers import math as tl_math -import test_closures as _source_module +import test.test_closures as _source_module @triton.jit def _sin_func_arg_kernel(a, _source_module_attr_global_tensor, out, a_size_0, _source_module_attr_global_tensor_stride_0, a_stride_0, out_stride_0, _BLOCK_SIZE_0: tl.constexpr): @@ -280,7 +280,7 @@ def call_func_arg_on_host(a, alloc) -> torch.Tensor: import triton.language as tl from torch._inductor.runtime.triton_helpers import math as tl_math -import test_closures as _source_module +import test.test_closures as _source_module @triton.jit def _call_func_arg_on_host_kernel(a, out, a_size_0, a_stride_0, out_stride_0, _BLOCK_SIZE_0: tl.constexpr): diff --git a/test/test_examples.py b/test/test_examples.py index 25713732..69b792f9 100644 --- a/test/test_examples.py +++ b/test/test_examples.py @@ -241,7 +241,7 @@ def test_template_via_closure0(self): import triton.language as tl from torch._inductor.runtime import triton_helpers -import test_examples as _global_source0 +import test.test_examples as _global_source0 @triton.jit def _matmul_with_epilogue_kernel(x, y, epilogue_closure_0, out, _BLOCK_SIZE_0: tl.constexpr, _BLOCK_SIZE_1: tl.constexpr, _BLOCK_SIZE_2: tl.constexpr): @@ -323,7 +323,7 @@ def test_template_via_closure1(self): import triton.language as tl from torch._inductor.runtime import triton_helpers -import test_examples as _global_source0 +import test.test_examples as _global_source0 @triton.jit def _matmul_with_epilogue_kernel(x, y, epilogue_closure_0, out, _BLOCK_SIZE_0: tl.constexpr, _BLOCK_SIZE_1: tl.constexpr, _BLOCK_SIZE_2: tl.constexpr): @@ -401,7 +401,7 @@ def test_template_via_closure2(self): import triton.language as tl from torch._inductor.runtime import triton_helpers -import test_examples as _global_source0 +import test.test_examples as _global_source0 @triton.jit def _matmul_with_epilogue_kernel(x, y, out, _BLOCK_SIZE_0: tl.constexpr, _BLOCK_SIZE_1: tl.constexpr, _BLOCK_SIZE_2: tl.constexpr):