Skip to content

Commit a536be8

Browse files
committed
Refactor is_s3 to is_cloud_storage
Signed-off-by: Peter Schuurman <[email protected]>
1 parent e7acb20 commit a536be8

File tree

3 files changed

+12
-10
lines changed

3 files changed

+12
-10
lines changed

vllm/engine/arg_utils.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -80,7 +80,7 @@
8080
is_interleaved,
8181
maybe_override_with_speculators,
8282
)
83-
from vllm.transformers_utils.utils import check_gguf_file, is_s3
83+
from vllm.transformers_utils.utils import check_gguf_file, is_cloud_storage
8484
from vllm.utils.argparse_utils import FlexibleArgumentParser
8585
from vllm.utils.mem_constants import GiB_bytes
8686
from vllm.utils.network_utils import get_ip
@@ -1297,10 +1297,10 @@ def create_engine_config(
12971297

12981298
# Check if the model is a speculator and override model/tokenizer/config
12991299
# BEFORE creating ModelConfig, so the config is created with the target model
1300-
# Skip speculator detection for S3 models since HuggingFace cannot load
1301-
# configs directly from S3 URLs. S3 models can still use speculators with
1302-
# explicit --speculative-config.
1303-
if not is_s3(self.model):
1300+
# Skip speculator detection for cloud storage models (eg: S3, GCS) since
1301+
# HuggingFace cannot load configs directly from S3 URLs. S3 models can still
1302+
# use speculators with explicit --speculative-config.
1303+
if not is_cloud_storage(self.model):
13041304
(self.model, self.tokenizer, self.speculative_config) = (
13051305
maybe_override_with_speculators(
13061306
model=self.model,

vllm/model_executor/model_loader/sharded_state_loader.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@
1919
runai_safetensors_weights_iterator,
2020
)
2121
from vllm.transformers_utils.s3_utils import glob as s3_glob
22-
from vllm.transformers_utils.utils import is_s3
22+
from vllm.transformers_utils.utils import is_cloud_storage
2323

2424
logger = init_logger(__name__)
2525

@@ -90,7 +90,7 @@ def get_end_ptr(tensor: torch.Tensor) -> int:
9090
return result
9191

9292
def _prepare_weights(self, model_name_or_path: str, revision: str | None):
93-
if is_s3(model_name_or_path) or os.path.isdir(model_name_or_path):
93+
if is_cloud_storage(model_name_or_path) or os.path.isdir(model_name_or_path):
9494
return model_name_or_path
9595
else:
9696
allow_patterns = ["*.safetensors"]
@@ -120,7 +120,7 @@ def load_weights(self, model: nn.Module, model_config: ModelConfig) -> None:
120120
)
121121

122122
filepaths = []
123-
if is_s3(local_model_path):
123+
if is_cloud_storage(local_model_path):
124124
file_pattern = f"*{self.pattern.format(rank=rank, part='*')}"
125125
filepaths = s3_glob(path=local_model_path, allow_pattern=[file_pattern])
126126
else:

vllm/transformers_utils/utils.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -15,8 +15,10 @@
1515
logger = init_logger(__name__)
1616

1717

18-
def is_s3(model_or_path: str) -> bool:
19-
return model_or_path.lower().startswith("s3://")
18+
def is_cloud_storage(model_or_path: str) -> bool:
19+
return model_or_path.lower().startswith(
20+
"s3://"
21+
) or model_or_path.lower().startswith("gs://")
2022

2123

2224
def check_gguf_file(model: str | PathLike) -> bool:

0 commit comments

Comments
 (0)