Skip to content

Commit 21c2627

Browse files
[Misc]Remove redundant hidden_size property in ModelConfig (#29749)
Signed-off-by: Xingyu Liu <[email protected]> Co-authored-by: Harry Mellor <[email protected]>
1 parent 39d2810 commit 21c2627

File tree

2 files changed

+2
-9
lines changed

2 files changed

+2
-9
lines changed

vllm/config/model.py

Lines changed: 1 addition & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1726,19 +1726,12 @@ def head_dtype(self) -> torch.dtype:
17261726
logger.debug_once("head dtype: %s", head_dtype)
17271727
return head_dtype
17281728

1729-
@property
1730-
def hidden_size(self):
1731-
if hasattr(self.hf_config, "hidden_size"):
1732-
return self.hf_config.hidden_size
1733-
text_config = self.hf_config.get_text_config()
1734-
return text_config.hidden_size
1735-
17361729
@property
17371730
def embedding_size(self):
17381731
dense_modules = try_get_dense_modules(self.model, revision=self.revision)
17391732
if dense_modules is not None:
17401733
return dense_modules[-1]["out_features"]
1741-
return self.hidden_size
1734+
return self.get_hidden_size()
17421735

17431736
def get_and_verify_max_len(self, max_model_len: int):
17441737
# Consider max_model_len in tokenizer_config only when

vllm/model_executor/models/adapters.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -301,7 +301,7 @@ def _init_pooler(self, vllm_config: "VllmConfig", prefix: str = ""):
301301
quant_config = vllm_config.quant_config
302302

303303
self.score = ReplicatedLinear(
304-
model_config.hidden_size,
304+
model_config.get_hidden_size(),
305305
text_config.num_labels,
306306
bias=False,
307307
params_dtype=vllm_config.model_config.head_dtype,

0 commit comments

Comments
 (0)