-
Notifications
You must be signed in to change notification settings - Fork 217
Lazy load code modules #269
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from 7 commits
3e15ebc
6ac39ba
016055e
9fba2b7
a55f322
d0d528b
a2555f9
e500304
f9b3b91
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -5,7 +5,7 @@ | |
| import importlib.metadata | ||
|
|
||
| from cuda import cuda | ||
| from cuda.core.experimental._utils import handle_return | ||
| from cuda.core.experimental._utils import handle_return, precondition | ||
|
|
||
| _backend = { | ||
| "old": { | ||
|
|
@@ -106,50 +106,61 @@ class ObjectCode: | |
|
|
||
| """ | ||
|
|
||
| __slots__ = ("_handle", "_code_type", "_module", "_loader", "_sym_map") | ||
| __slots__ = ("_handle", "_backend_version", "_jit_options", "_code_type", "_module", "_loader", "_sym_map") | ||
| _supported_code_type = ("cubin", "ptx", "ltoir", "fatbin") | ||
|
|
||
| def __init__(self, module, code_type, jit_options=None, *, symbol_mapping=None): | ||
| if code_type not in self._supported_code_type: | ||
| raise ValueError | ||
| _lazy_init() | ||
|
|
||
| # handle is assigned during _lazy_load | ||
| self._handle = None | ||
| self._jit_options = jit_options | ||
|
|
||
| self._backend_version = "new" if (_py_major_ver >= 12 and _driver_ver >= 12000) else "old" | ||
| self._loader = _backend[self._backend_version] | ||
|
|
||
| self._code_type = code_type | ||
| self._module = module | ||
| self._sym_map = {} if symbol_mapping is None else symbol_mapping | ||
|
|
||
| backend = "new" if (_py_major_ver >= 12 and _driver_ver >= 12000) else "old" | ||
| self._loader = _backend[backend] | ||
| # TODO: do we want to unload in a finalizer? Probably not.. | ||
|
|
||
| if isinstance(module, str): | ||
| def _lazy_load_module(self, *args, **kwargs): | ||
| if self._handle is not None: | ||
| return | ||
| if isinstance(self._module, str): | ||
| # TODO: this option is only taken by the new library APIs, but we have | ||
| # a bug that we can't easily support it just yet (NVIDIA/cuda-python#73). | ||
| if jit_options is not None: | ||
| if self._jit_options is not None: | ||
| raise ValueError | ||
| module = module.encode() | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. good catch thanks There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. don't thank me, thank the good folks at ruff |
||
| self._handle = handle_return(self._loader["file"](module)) | ||
| self._handle = handle_return(self._loader["file"](self._module)) | ||
| else: | ||
| assert isinstance(module, bytes) | ||
| if jit_options is None: | ||
| jit_options = {} | ||
| if backend == "new": | ||
| assert isinstance(self._module, bytes) | ||
| if self._jit_options is None: | ||
| self._jit_options = {} | ||
| if self._backend_version == "new": | ||
| args = ( | ||
| module, | ||
| list(jit_options.keys()), | ||
| list(jit_options.values()), | ||
| len(jit_options), | ||
| self._module, | ||
| list(self._jit_options.keys()), | ||
| list(self._jit_options.values()), | ||
| len(self._jit_options), | ||
| # TODO: support library options | ||
| [], | ||
| [], | ||
| 0, | ||
| ) | ||
| else: # "old" backend | ||
| args = (module, len(jit_options), list(jit_options.keys()), list(jit_options.values())) | ||
| args = ( | ||
| self._module, | ||
| len(self._jit_options), | ||
| list(self._jit_options.keys()), | ||
| list(self._jit_options.values()), | ||
| ) | ||
| self._handle = handle_return(self._loader["data"](*args)) | ||
|
|
||
| self._code_type = code_type | ||
| self._module = module | ||
| self._sym_map = {} if symbol_mapping is None else symbol_mapping | ||
|
|
||
| # TODO: do we want to unload in a finalizer? Probably not.. | ||
|
|
||
| @precondition(_lazy_load_module) | ||
| def get_kernel(self, name): | ||
| """Return the :obj:`Kernel` of a specified name from this object code. | ||
|
|
||
|
|
@@ -168,6 +179,7 @@ def get_kernel(self, name): | |
| name = self._sym_map[name] | ||
| except KeyError: | ||
| name = name.encode() | ||
|
|
||
| data = handle_return(self._loader["kernel"](self._handle, name)) | ||
| return Kernel._from_obj(data, self) | ||
|
|
||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -7,20 +7,12 @@ | |
| # is strictly prohibited. | ||
|
|
||
| import pytest | ||
| from conftest import can_load_generated_ptx | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. My understanding is that we don't need explicit imports for things from conftest? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I believe this only aplies to fixtures. helper functions still need to be imported. This is what my research and tests have shown. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I could add a new file called utils, or helpers and import that instead. There is mixed opinions on best practice online There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
I was only relying on chatgpt before. It told me the imports are not needed. If that's not true: keeping it simple seems best to me, unless you anticipate that we're accumulating many helper functions. I.e. just keep what you have? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. yeah, it told me that too, then changed its mind. Keeping it simple was my idea as well, with only 1 helper function, it seems premature to split it into a new file, but perhaps down the line it will make sense There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. yeah we can move it to |
||
|
|
||
| from cuda import cuda, nvrtc | ||
| from cuda.core.experimental import Device, Program | ||
| from cuda.core.experimental._module import Kernel, ObjectCode | ||
|
|
||
|
|
||
| def can_load_generated_ptx(): | ||
| _, driver_ver = cuda.cuDriverGetVersion() | ||
| _, nvrtc_major, nvrtc_minor = nvrtc.nvrtcVersion() | ||
| if nvrtc_major * 1000 + nvrtc_minor * 10 > driver_ver: | ||
| return False | ||
| return True | ||
|
|
||
|
|
||
| def test_program_init_valid_code_type(): | ||
| code = 'extern "C" __global__ void my_kernel() {}' | ||
| program = Program(code, "c++") | ||
|
|
||
Uh oh!
There was an error while loading. Please reload this page.