File tree Expand file tree Collapse file tree 2 files changed +11
-0
lines changed
tests/model_executor/model_loader/runai_model_streamer Expand file tree Collapse file tree 2 files changed +11
-0
lines changed Original file line number Diff line number Diff line change @@ -543,8 +543,11 @@ steps:
543543
544544- label : Model Executor Test # 23min
545545 timeout_in_minutes : 35
546+ torch_nightly : true
546547 mirror_hardwares : [amdexperimental]
547548 source_file_dependencies :
549+ - vllm/engine/arg_utils.py
550+ - vllm/config/model.py
548551 - vllm/model_executor
549552 - tests/model_executor
550553 - tests/entrypoints/openai/test_tensorizer_entrypoint.py
Original file line number Diff line number Diff line change 77
88load_format = "runai_streamer"
99test_model = "openai-community/gpt2"
10+ # TODO(amacaskill): Replace with a GKE owned GCS bucket.
11+ test_gcs_model = "gs://vertex-model-garden-public-us/codegemma/codegemma-2b/"
1012
1113prompts = [
1214 "Hello, my name is" ,
@@ -32,3 +34,9 @@ def test_runai_model_loader_download_files(vllm_runner):
3234 with vllm_runner (test_model , load_format = load_format ) as llm :
3335 deserialized_outputs = llm .generate (prompts , sampling_params )
3436 assert deserialized_outputs
37+
38+
39+ def test_runai_model_loader_download_files_gcs (vllm_runner ):
40+ with vllm_runner (test_gcs_model , load_format = load_format ) as llm :
41+ deserialized_outputs = llm .generate (prompts , sampling_params )
42+ assert deserialized_outputs
You can’t perform that action at this time.
0 commit comments