Skip to content

Commit

Permalink
fix flake
Browse files Browse the repository at this point in the history
  • Loading branch information
minhthuc2502 committed Nov 25, 2024
1 parent ddf2997 commit c49570b
Showing 1 changed file with 11 additions and 19 deletions.
30 changes: 11 additions & 19 deletions python/ctranslate2/converters/transformers.py
Original file line number Diff line number Diff line change
Expand Up @@ -2017,7 +2017,11 @@ def set_vocabulary(self, spec, tokens):
spec.register_vocabulary(tokens)

def set_config(self, config, model, tokenizer):
config.bos_token = tokenizer.bos_token if tokenizer.bos_token is not None else tokenizer.pad_token
config.bos_token = (
tokenizer.bos_token
if tokenizer.bos_token is not None
else tokenizer.pad_token
)
config.eos_token = tokenizer.eos_token
config.unk_token = (
tokenizer.unk_token if tokenizer.unk_token is not None else ""
Expand All @@ -2041,31 +2045,19 @@ def set_decoder(self, spec, module):
)

split_layers = [common_spec.LinearSpec() for _ in range(3)]
self.set_linear(
split_layers[0], layer.self_attn.q_proj
)
self.set_linear(
split_layers[1], layer.self_attn.k_proj
)
self.set_linear(
split_layers[2], layer.self_attn.v_proj
)
self.set_linear(split_layers[0], layer.self_attn.q_proj)
self.set_linear(split_layers[1], layer.self_attn.k_proj)
self.set_linear(split_layers[2], layer.self_attn.v_proj)

utils.fuse_linear(layer_spec.self_attention.linear[0], split_layers)
self.set_linear(
layer_spec.self_attention.linear[1],
layer.self_attn.o_proj,
)

self.set_linear(
layer_spec.ffn.linear_0, layer.mlp.gate_proj
)
self.set_linear(
layer_spec.ffn.linear_0_noact, layer.mlp.up_proj
)
self.set_linear(
layer_spec.ffn.linear_1, layer.mlp.down_proj
)
self.set_linear(layer_spec.ffn.linear_0, layer.mlp.gate_proj)
self.set_linear(layer_spec.ffn.linear_0_noact, layer.mlp.up_proj)
self.set_linear(layer_spec.ffn.linear_1, layer.mlp.down_proj)

delattr(layer, "self_attn")
delattr(layer, "mlp")
Expand Down

0 comments on commit c49570b

Please sign in to comment.