Skip to content

Commit

Permalink
[BugFix] Resolved Issues For LinearMethod --> QuantConfig (#4418)
Browse files Browse the repository at this point in the history
  • Loading branch information
robertgshaw2-neuralmagic authored Apr 27, 2024
1 parent ba4be44 commit 4ea1f96
Show file tree
Hide file tree
Showing 10 changed files with 0 additions and 10 deletions.
1 change: 0 additions & 1 deletion vllm/model_executor/models/bloom.py
Original file line number Diff line number Diff line change
Expand Up @@ -139,7 +139,6 @@ def __init__(
4 * hidden_size,
quant_config=quant_config,
)
quant_config = getattr(quant_config, "quant_config", None)
self.gelu_impl = get_act_fn("gelu", quant_config, 4 * hidden_size)
self.dense_4h_to_h = RowParallelLinear(
4 * hidden_size,
Expand Down
1 change: 0 additions & 1 deletion vllm/model_executor/models/falcon.py
Original file line number Diff line number Diff line change
Expand Up @@ -203,7 +203,6 @@ def __init__(
bias=config.bias,
skip_bias_add=True,
quant_config=quant_config)
quant_config = getattr(quant_config, "quant_config", None)
self.act = get_act_fn("gelu", quant_config, 4 * hidden_size)
self.reduce_row_parallel_results = not (config.new_decoder_architecture
or config.parallel_attn)
Expand Down
1 change: 0 additions & 1 deletion vllm/model_executor/models/gpt2.py
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,6 @@ def __init__(
bias=True,
quant_config=quant_config,
)
quant_config = getattr(quant_config, "quant_config", None)
self.act = get_act_fn(config.activation_function, quant_config,
intermediate_size)

Expand Down
1 change: 0 additions & 1 deletion vllm/model_executor/models/gpt_bigcode.py
Original file line number Diff line number Diff line change
Expand Up @@ -128,7 +128,6 @@ def __init__(
bias=True,
quant_config=quant_config,
)
quant_config = getattr(quant_config, "quant_config", None)
self.act = get_act_fn(config.activation_function, quant_config,
intermediate_size)

Expand Down
1 change: 0 additions & 1 deletion vllm/model_executor/models/gpt_j.py
Original file line number Diff line number Diff line change
Expand Up @@ -120,7 +120,6 @@ def __init__(
hidden_size,
quant_config=quant_config,
)
quant_config = getattr(quant_config, "quant_config", None)
self.act = get_act_fn(config.activation_function, quant_config,
intermediate_size)

Expand Down
1 change: 0 additions & 1 deletion vllm/model_executor/models/gpt_neox.py
Original file line number Diff line number Diff line change
Expand Up @@ -119,7 +119,6 @@ def __init__(
config.hidden_size,
quant_config=quant_config,
)
quant_config = getattr(quant_config, "quant_config", None)
self.act = get_act_fn(config.hidden_act, quant_config,
config.intermediate_size)

Expand Down
1 change: 0 additions & 1 deletion vllm/model_executor/models/mpt.py
Original file line number Diff line number Diff line change
Expand Up @@ -146,7 +146,6 @@ def __init__(
bias=not config.no_bias,
quant_config=quant_config,
)
quant_config = getattr(quant_config, "quant_config", None)
self.act = get_act_fn("gelu", quant_config, intermediate_size)
self.down_proj = RowParallelLinear(
intermediate_size,
Expand Down
1 change: 0 additions & 1 deletion vllm/model_executor/models/opt.py
Original file line number Diff line number Diff line change
Expand Up @@ -130,7 +130,6 @@ def __init__(
bias=config.enable_bias,
quant_config=quant_config,
)
quant_config = getattr(quant_config, "quant_config", None)
self.activation_fn = get_act_fn(config.activation_function,
quant_config, config.ffn_dim)
self.fc2 = RowParallelLinear(
Expand Down
1 change: 0 additions & 1 deletion vllm/model_executor/models/phi.py
Original file line number Diff line number Diff line change
Expand Up @@ -142,7 +142,6 @@ def __init__(self,
config.hidden_size,
quant_config=quant_config,
)
quant_config = getattr(quant_config, "quant_config", None)
self.act = get_act_fn(config.hidden_act, quant_config, n_inner)

def forward(self, hidden_states):
Expand Down
1 change: 0 additions & 1 deletion vllm/model_executor/models/starcoder2.py
Original file line number Diff line number Diff line change
Expand Up @@ -136,7 +136,6 @@ def __init__(self,
bias=config.use_bias,
quant_config=quant_config,
)
quant_config = getattr(quant_config, "quant_config", None)
self.act = get_act_fn(config.hidden_act, quant_config,
config.intermediate_size)

Expand Down

0 comments on commit 4ea1f96

Please sign in to comment.