Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Script to convert Grok-1 weights from raw JAX pickle files. #7058

Open
wants to merge 13 commits into
base: master
Choose a base branch
from
Prev Previous commit
Next Next commit
Move print to logging: Fixes.
  • Loading branch information
heiner committed May 25, 2024
commit d894497a9622fd1b5cac8dcb3ce8dba051641273
25 changes: 13 additions & 12 deletions convert_grok.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
"""

import argparse
import logging
import mmap
import os
import pathlib
Expand All @@ -34,8 +35,6 @@

import gguf

logger = logging.getLogger("convert_grok")

GGML_QK8_0 = 32
GGML_QK4_0 = 32
GGML_QK4_1 = 32
Expand Down Expand Up @@ -216,7 +215,7 @@ def dump_state_dict(f, ggml_type, input_dir, config):
tensor_ggml_type,
)
weights[name] = weight, scales
logger.info("Loaded", len(weight_names), "files")
logging.debug("Loaded %i files", len(weight_names))

f.write_header_to_file()
f.write_kv_data_to_file()
Expand All @@ -232,21 +231,23 @@ def dump_state_dict(f, ggml_type, input_dir, config):
_, tensor_ggml_type = get_dtype_and_ggml_type(tensor, ggml_type)
array = maybe_quantize_tensor(tensor, tensor_ggml_type).numpy()

logger.debug(
f"dumping {name}:",
f"{tensor_ggml_type.name}/{array.dtype}, {list(tensor.shape)}, {array.nbytes} bytes",
logging.info(
f"dumping {name}:"
f"{tensor_ggml_type.name}/{array.dtype}, {list(tensor.shape)}, {array.nbytes} bytes"
)
f.write_tensor_data(array)

tensor_info.append((name, list(tensor.shape), tensor_ggml_type.name))

try:
print(tabulate(tensor_info, headers=["name", "shape", "dtype"], tablefmt="psql")) # noqa: NP100
print(
tabulate(tensor_info, headers=["name", "shape", "dtype"], tablefmt="psql")
) # noqa: NP100
except NameError:
pass

if len(tensor_info) != len(weight_names):
logger.warning("Not all tensors are converted")
logging.warning("Not all tensors are converted")


def from_numpy(array):
Expand Down Expand Up @@ -379,7 +380,7 @@ def ffn_size(emb_size, widening_factor):
config.num_experts = len(config.experts)

assert config.num_experts >= 2, "need at least 2 experts"
logger.info("experts to export:", config.experts)
logging.info("experts to export: %s", config.experts)

f = gguf.GGUFWriter(args.save_path, "grok", endianess=gguf.GGUFEndian.LITTLE)

Expand Down Expand Up @@ -411,12 +412,12 @@ def ffn_size(emb_size, widening_factor):

delta = time.time() - start

logger.info(f"grok GGUF model saved to {args.save_path}. Total time {delta:.2f} sec")
logging.info(f"grok GGUF model saved to {args.save_path}. Total time {delta:.2f} sec")


def load_vocab(path):
def load_spm(p):
logger.info(f"Loading vocab file {p}")
logging.info(f"Loading vocab file {p}")
return SentencePieceVocab(p)

# Be extra-friendly and accept either a file or a directory. Also, if it's
Expand Down Expand Up @@ -452,7 +453,7 @@ def main():
args = parser.parse_args()

logging.basicConfig(level=logging.DEBUG if args.verbose else logging.INFO)

vocab = load_vocab(
pathlib.Path(args.vocab_dir) if args.vocab_dir else pathlib.Path(args.input_dir)
)
Expand Down