Skip to content

Commit 6064209

Browse files
quic-amitrajeplatero97
authored andcommitted
Mllama_fix (quic#369)
Signed-off-by: Amit Raj <quic_amitraj@quicinc.com>
1 parent 2eb4859 commit 6064209

File tree

1 file changed

+1
-12
lines changed

1 file changed

+1
-12
lines changed

QEfficient/transformers/models/mllama/modeling_mllama.py

Lines changed: 1 addition & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -55,19 +55,8 @@ class QEffMllamaRotaryEmbedding(MllamaRotaryEmbedding):
5555
- Add static sin/cos computations.
5656
"""
5757

58-
def __init__(
59-
self,
60-
dim=None,
61-
max_position_embeddings=2048,
62-
base=10000,
63-
device=None,
64-
scaling_factor=1.0,
65-
rope_type="default",
66-
config: Optional[MllamaConfig] = None,
67-
):
58+
def __init__(self, config: MllamaConfig, device=None):
6859
super().__init__(config=config)
69-
inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device, **self.rope_kwargs)
70-
self.register_buffer("inv_freq", inv_freq, persistent=False)
7160

7261
# Build here to make `torch.jit.trace` work.
7362
self._set_cos_sin_cache(

0 commit comments

Comments
 (0)