Skip to content
Merged
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 5 additions & 5 deletions python/llm/setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -275,11 +275,9 @@ def setup_package():


oneapi_2024_0_requires = ["dpcpp-cpp-rt==2024.0.2;platform_system=='Windows'",
"mkl-dpcpp==2024.0.0;platform_system=='Windows'",
"onednn==2024.0.0;platform_system=='Windows'"]
"mkl-dpcpp==2024.0.0;platform_system=='Windows'"]
oneapi_2024_2_requires = ["dpcpp-cpp-rt==2024.2.1;platform_system=='Windows'",
"mkl-dpcpp==2024.2.1;platform_system=='Windows'",
"onednn==2024.2.1;platform_system=='Windows'"]
"mkl-dpcpp==2024.2.1;platform_system=='Windows'"]
# Linux install with --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
xpu_21_requires = copy.deepcopy(all_requires)
for exclude_require in cpu_torch_version:
Expand All @@ -303,7 +301,8 @@ def setup_package():
"intel-extension-for-pytorch==2.3.110+xpu",
"bigdl-core-xe-23==" + CORE_XE_VERSION,
"bigdl-core-xe-batch-23==" + CORE_XE_VERSION,
"bigdl-core-xe-addons-23==" + CORE_XE_VERSION]
"bigdl-core-xe-addons-23==" + CORE_XE_VERSION,
"onednn-devel==2024.1.1;platform_system=='Windows'"]

cpp_requires = ["bigdl-core-cpp==" + CORE_XE_VERSION,
"onednn-devel==2024.2.1;platform_system=='Windows'"]
Expand Down Expand Up @@ -344,6 +343,7 @@ def setup_package():
"npu": npu_requires,
"xpu-2-1": xpu_21_requires,
"xpu-lnl": xpu_lnl_requires,
"xpu-arl": xpu_lnl_requires,
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Currently xpu-lnl and xpu-arl have the same requirements. @jason-dai Please let us know for any comments for the options :) Thank you

"serving": serving_requires,
"cpp": cpp_requires,
"llama-index": llama_index_requires}, # for internal usage when upstreaming for llama-index
Expand Down
Loading