| | #include "build-info.h" |
| | #include "common.h" |
| | #include "llama.h" |
| |
|
| | #include <cstdio> |
| | #include <cstring> |
| | #include <vector> |
| | #include <string> |
| | #include <unordered_map> |
| | #include <fstream> |
| | #include <cmath> |
| | #include <cctype> |
| |
|
| | struct quant_option { |
| | std::string name; |
| | llama_ftype ftype; |
| | std::string desc; |
| | }; |
| |
|
| | static const std::vector<struct quant_option> QUANT_OPTIONS = { |
| | { "Q4_0", LLAMA_FTYPE_MOSTLY_Q4_0, " 4.34G, +0.4685 ppl @ Llama-3-8B", }, |
| | { "Q4_1", LLAMA_FTYPE_MOSTLY_Q4_1, " 4.78G, +0.4511 ppl @ Llama-3-8B", }, |
| | { "Q5_0", LLAMA_FTYPE_MOSTLY_Q5_0, " 5.21G, +0.1316 ppl @ Llama-3-8B", }, |
| | { "Q5_1", LLAMA_FTYPE_MOSTLY_Q5_1, " 5.65G, +0.1062 ppl @ Llama-3-8B", }, |
| | { "IQ2_XXS", LLAMA_FTYPE_MOSTLY_IQ2_XXS, " 2.06 bpw quantization", }, |
| | { "IQ2_XS", LLAMA_FTYPE_MOSTLY_IQ2_XS, " 2.31 bpw quantization", }, |
| | { "IQ2_S", LLAMA_FTYPE_MOSTLY_IQ2_S, " 2.5 bpw quantization", }, |
| | { "IQ2_M", LLAMA_FTYPE_MOSTLY_IQ2_M, " 2.7 bpw quantization", }, |
| | { "IQ1_S", LLAMA_FTYPE_MOSTLY_IQ1_S, " 1.56 bpw quantization", }, |
| | { "IQ1_M", LLAMA_FTYPE_MOSTLY_IQ1_M, " 1.75 bpw quantization", }, |
| | { "TQ1_0", LLAMA_FTYPE_MOSTLY_TQ1_0, " 1.69 bpw ternarization", }, |
| | { "TQ2_0", LLAMA_FTYPE_MOSTLY_TQ2_0, " 2.06 bpw ternarization", }, |
| | { "Q2_K", LLAMA_FTYPE_MOSTLY_Q2_K, " 2.96G, +3.5199 ppl @ Llama-3-8B", }, |
| | { "Q2_K_S", LLAMA_FTYPE_MOSTLY_Q2_K_S, " 2.96G, +3.1836 ppl @ Llama-3-8B", }, |
| | { "IQ3_XXS", LLAMA_FTYPE_MOSTLY_IQ3_XXS, " 3.06 bpw quantization", }, |
| | { "IQ3_S", LLAMA_FTYPE_MOSTLY_IQ3_S, " 3.44 bpw quantization", }, |
| | { "IQ3_M", LLAMA_FTYPE_MOSTLY_IQ3_M, " 3.66 bpw quantization mix", }, |
| | { "Q3_K", LLAMA_FTYPE_MOSTLY_Q3_K_M, "alias for Q3_K_M" }, |
| | { "IQ3_XS", LLAMA_FTYPE_MOSTLY_IQ3_XS, " 3.3 bpw quantization", }, |
| | { "Q3_K_S", LLAMA_FTYPE_MOSTLY_Q3_K_S, " 3.41G, +1.6321 ppl @ Llama-3-8B", }, |
| | { "Q3_K_M", LLAMA_FTYPE_MOSTLY_Q3_K_M, " 3.74G, +0.6569 ppl @ Llama-3-8B", }, |
| | { "Q3_K_L", LLAMA_FTYPE_MOSTLY_Q3_K_L, " 4.03G, +0.5562 ppl @ Llama-3-8B", }, |
| | { "IQ4_NL", LLAMA_FTYPE_MOSTLY_IQ4_NL, " 4.50 bpw non-linear quantization", }, |
| | { "IQ4_XS", LLAMA_FTYPE_MOSTLY_IQ4_XS, " 4.25 bpw non-linear quantization", }, |
| | { "Q4_K", LLAMA_FTYPE_MOSTLY_Q4_K_M, "alias for Q4_K_M", }, |
| | { "Q4_K_S", LLAMA_FTYPE_MOSTLY_Q4_K_S, " 4.37G, +0.2689 ppl @ Llama-3-8B", }, |
| | { "Q4_K_M", LLAMA_FTYPE_MOSTLY_Q4_K_M, " 4.58G, +0.1754 ppl @ Llama-3-8B", }, |
| | { "Q5_K", LLAMA_FTYPE_MOSTLY_Q5_K_M, "alias for Q5_K_M", }, |
| | { "Q5_K_S", LLAMA_FTYPE_MOSTLY_Q5_K_S, " 5.21G, +0.1049 ppl @ Llama-3-8B", }, |
| | { "Q5_K_M", LLAMA_FTYPE_MOSTLY_Q5_K_M, " 5.33G, +0.0569 ppl @ Llama-3-8B", }, |
| | { "Q6_K", LLAMA_FTYPE_MOSTLY_Q6_K, " 6.14G, +0.0217 ppl @ Llama-3-8B", }, |
| | { "Q8_0", LLAMA_FTYPE_MOSTLY_Q8_0, " 7.96G, +0.0026 ppl @ Llama-3-8B", }, |
| | { "F16", LLAMA_FTYPE_MOSTLY_F16, "14.00G, +0.0020 ppl @ Mistral-7B", }, |
| | { "BF16", LLAMA_FTYPE_MOSTLY_BF16, "14.00G, -0.0050 ppl @ Mistral-7B", }, |
| | { "F32", LLAMA_FTYPE_ALL_F32, "26.00G @ 7B", }, |
| | |
| | { "COPY", LLAMA_FTYPE_ALL_F32, "only copy tensors, no quantizing", }, |
| | }; |
| |
|
| | static const char * const LLM_KV_QUANTIZE_IMATRIX_FILE = "quantize.imatrix.file"; |
| | static const char * const LLM_KV_QUANTIZE_IMATRIX_DATASET = "quantize.imatrix.dataset"; |
| | static const char * const LLM_KV_QUANTIZE_IMATRIX_N_ENTRIES = "quantize.imatrix.entries_count"; |
| | static const char * const LLM_KV_QUANTIZE_IMATRIX_N_CHUNKS = "quantize.imatrix.chunks_count"; |
| |
|
| | static bool striequals(const char * a, const char * b) { |
| | while (*a && *b) { |
| | if (std::tolower(*a) != std::tolower(*b)) { |
| | return false; |
| | } |
| | a++; b++; |
| | } |
| | return *a == *b; |
| | } |
| |
|
| | static bool try_parse_ftype(const std::string & ftype_str_in, llama_ftype & ftype, std::string & ftype_str_out) { |
| | std::string ftype_str; |
| |
|
| | for (auto ch : ftype_str_in) { |
| | ftype_str.push_back(std::toupper(ch)); |
| | } |
| | for (auto & it : QUANT_OPTIONS) { |
| | if (striequals(it.name.c_str(), ftype_str.c_str())) { |
| | ftype = it.ftype; |
| | ftype_str_out = it.name; |
| | return true; |
| | } |
| | } |
| | try { |
| | int ftype_int = std::stoi(ftype_str); |
| | for (auto & it : QUANT_OPTIONS) { |
| | if (it.ftype == ftype_int) { |
| | ftype = it.ftype; |
| | ftype_str_out = it.name; |
| | return true; |
| | } |
| | } |
| | } |
| | catch (...) { |
| | |
| | } |
| | return false; |
| | } |
| |
|
| | |
| | |
| | |
| | [[noreturn]] |
| | static void usage(const char * executable) { |
| | printf("usage: %s [--help] [--allow-requantize] [--leave-output-tensor] [--pure] [--imatrix] [--include-weights] [--exclude-weights] [--output-tensor-type] [--token-embedding-type] [--override-kv] model-f32.gguf [model-quant.gguf] type [nthreads]\n\n", executable); |
| | printf(" --allow-requantize: Allows requantizing tensors that have already been quantized. Warning: This can severely reduce quality compared to quantizing from 16bit or 32bit\n"); |
| | printf(" --leave-output-tensor: Will leave output.weight un(re)quantized. Increases model size but may also increase quality, especially when requantizing\n"); |
| | printf(" --pure: Disable k-quant mixtures and quantize all tensors to the same type\n"); |
| | printf(" --imatrix file_name: use data in file_name as importance matrix for quant optimizations\n"); |
| | printf(" --include-weights tensor_name: use importance matrix for this/these tensor(s)\n"); |
| | printf(" --exclude-weights tensor_name: use importance matrix for this/these tensor(s)\n"); |
| | printf(" --output-tensor-type ggml_type: use this ggml_type for the output.weight tensor\n"); |
| | printf(" --token-embedding-type ggml_type: use this ggml_type for the token embeddings tensor\n"); |
| | printf(" --keep-split: will generate quantized model in the same shards as input\n"); |
| | printf(" --override-kv KEY=TYPE:VALUE\n"); |
| | printf(" Advanced option to override model metadata by key in the quantized model. May be specified multiple times.\n"); |
| | printf("Note: --include-weights and --exclude-weights cannot be used together\n"); |
| | printf("\nAllowed quantization types:\n"); |
| | for (auto & it : QUANT_OPTIONS) { |
| | if (it.name != "COPY") { |
| | printf(" %2d or ", it.ftype); |
| | } else { |
| | printf(" "); |
| | } |
| | printf("%-7s : %s\n", it.name.c_str(), it.desc.c_str()); |
| | } |
| | exit(1); |
| | } |
| |
|
| | static int load_imatrix(const std::string & imatrix_file, std::string & imatrix_dataset, std::unordered_map<std::string, std::vector<float>> & imatrix_data) { |
| | std::ifstream in(imatrix_file.c_str(), std::ios::binary); |
| | if (!in) { |
| | printf("%s: failed to open %s\n",__func__, imatrix_file.c_str()); |
| | exit(1); |
| | } |
| | int n_entries; |
| | in.read((char *)&n_entries, sizeof(n_entries)); |
| | if (in.fail() || n_entries < 1) { |
| | printf("%s: no data in file %s\n", __func__, imatrix_file.c_str()); |
| | exit(1); |
| | } |
| | for (int i = 0; i < n_entries; ++i) { |
| | int len; in.read((char *)&len, sizeof(len)); |
| | std::vector<char> name_as_vec(len+1); |
| | in.read((char *)name_as_vec.data(), len); |
| | if (in.fail()) { |
| | printf("%s: failed reading name for entry %d from %s\n", __func__, i+1, imatrix_file.c_str()); |
| | exit(1); |
| | } |
| | name_as_vec[len] = 0; |
| | std::string name{name_as_vec.data()}; |
| | auto & e = imatrix_data[name]; |
| | int ncall; |
| | in.read((char *)&ncall, sizeof(ncall)); |
| | int nval; |
| | in.read((char *)&nval, sizeof(nval)); |
| | if (in.fail() || nval < 1) { |
| | printf("%s: failed reading number of values for entry %d\n", __func__, i); |
| | imatrix_data = {}; |
| | exit(1); |
| | } |
| | e.resize(nval); |
| | in.read((char *)e.data(), nval*sizeof(float)); |
| | if (in.fail()) { |
| | printf("%s: failed reading data for entry %d\n", __func__, i); |
| | imatrix_data = {}; |
| | exit(1); |
| | } |
| | if (ncall > 0) { |
| | for (auto& v : e) v /= ncall; |
| | } |
| |
|
| | if (getenv("LLAMA_TRACE")) { |
| | printf("%s: loaded data (size = %6d, ncall = %6d) for '%s'\n", __func__, int(e.size()), ncall, name.c_str()); |
| | } |
| | } |
| |
|
| | |
| | int m_last_call = 0; |
| | if (in.peek() != EOF) { |
| | in.read((char *)&m_last_call, sizeof(m_last_call)); |
| | int dataset_len; |
| | in.read((char *)&dataset_len, sizeof(dataset_len)); |
| | std::vector<char> dataset_as_vec(dataset_len); |
| | in.read(dataset_as_vec.data(), dataset_len); |
| | imatrix_dataset.assign(dataset_as_vec.begin(), dataset_as_vec.end()); |
| | printf("%s: imatrix dataset='%s'\n", __func__, imatrix_dataset.c_str()); |
| | } |
| | printf("%s: loaded %d importance matrix entries from %s computed on %d chunks\n", __func__, int(imatrix_data.size()), imatrix_file.c_str(), m_last_call); |
| | return m_last_call; |
| | } |
| |
|
| | static int prepare_imatrix(const std::string & imatrix_file, |
| | std::string & imatrix_dataset, |
| | const std::vector<std::string> & included_weights, |
| | const std::vector<std::string> & excluded_weights, |
| | std::unordered_map<std::string, std::vector<float>> & imatrix_data) { |
| | int m_last_call = -1; |
| | if (!imatrix_file.empty()) { |
| | m_last_call = load_imatrix(imatrix_file, imatrix_dataset, imatrix_data); |
| | } |
| | if (imatrix_data.empty()) { |
| | return m_last_call; |
| | } |
| | if (!excluded_weights.empty()) { |
| | for (auto& name : excluded_weights) { |
| | for (auto it = imatrix_data.begin(); it != imatrix_data.end(); ) { |
| | auto pos = it->first.find(name); |
| | if (pos != std::string::npos) it = imatrix_data.erase(it); |
| | else ++it; |
| | } |
| | } |
| | } |
| | if (!included_weights.empty()) { |
| | std::unordered_map<std::string, std::vector<float>> tmp; |
| | for (auto& name : included_weights) { |
| | for (auto& e : imatrix_data) { |
| | auto pos = e.first.find(name); |
| | if (pos != std::string::npos) { |
| | tmp.emplace(std::move(e)); |
| | } |
| | } |
| | } |
| | imatrix_data = std::move(tmp); |
| | } |
| | if (!imatrix_data.empty()) { |
| | printf("%s: have %d importance matrix entries\n", __func__, int(imatrix_data.size())); |
| | } |
| | return m_last_call; |
| | } |
| |
|
| | static ggml_type parse_ggml_type(const char * arg) { |
| | for (int i = 0; i < GGML_TYPE_COUNT; ++i) { |
| | auto type = (ggml_type)i; |
| | const auto * name = ggml_type_name(type); |
| | if (name && striequals(name, arg)) { |
| | return type; |
| | } |
| | } |
| | fprintf(stderr, "%s: invalid ggml_type '%s'\n", __func__, arg); |
| | return GGML_TYPE_COUNT; |
| | } |
| |
|
| | int main(int argc, char ** argv) { |
| | if (argc < 3) { |
| | usage(argv[0]); |
| | } |
| |
|
| | llama_model_quantize_params params = llama_model_quantize_default_params(); |
| |
|
| | int arg_idx = 1; |
| | std::string imatrix_file; |
| | std::vector<std::string> included_weights, excluded_weights; |
| | std::vector<llama_model_kv_override> kv_overrides; |
| |
|
| | for (; arg_idx < argc && strncmp(argv[arg_idx], "--", 2) == 0; arg_idx++) { |
| | if (strcmp(argv[arg_idx], "--leave-output-tensor") == 0) { |
| | params.quantize_output_tensor = false; |
| | } else if (strcmp(argv[arg_idx], "--output-tensor-type") == 0) { |
| | if (arg_idx < argc-1) { |
| | params.output_tensor_type = parse_ggml_type(argv[++arg_idx]); |
| | if (params.output_tensor_type == GGML_TYPE_COUNT) { |
| | usage(argv[0]); |
| | } |
| | } else { |
| | usage(argv[0]); |
| | } |
| | } else if (strcmp(argv[arg_idx], "--token-embedding-type") == 0) { |
| | if (arg_idx < argc-1) { |
| | params.token_embedding_type = parse_ggml_type(argv[++arg_idx]); |
| | if (params.token_embedding_type == GGML_TYPE_COUNT) { |
| | usage(argv[0]); |
| | } |
| | } else { |
| | usage(argv[0]); |
| | } |
| | } else if (strcmp(argv[arg_idx], "--override-kv") == 0) { |
| | if (arg_idx == argc-1 || !string_parse_kv_override(argv[++arg_idx], kv_overrides)) { |
| | usage(argv[0]); |
| | } |
| | } else if (strcmp(argv[arg_idx], "--allow-requantize") == 0) { |
| | params.allow_requantize = true; |
| | } else if (strcmp(argv[arg_idx], "--pure") == 0) { |
| | params.pure = true; |
| | } else if (strcmp(argv[arg_idx], "--imatrix") == 0) { |
| | if (arg_idx < argc-1) { |
| | imatrix_file = argv[++arg_idx]; |
| | } else { |
| | usage(argv[0]); |
| | } |
| | } else if (strcmp(argv[arg_idx], "--include-weights") == 0) { |
| | if (arg_idx < argc-1) { |
| | included_weights.emplace_back(argv[++arg_idx]); |
| | } else { |
| | usage(argv[0]); |
| | } |
| | } else if (strcmp(argv[arg_idx], "--exclude-weights") == 0) { |
| | if (arg_idx < argc-1) { |
| | excluded_weights.emplace_back(argv[++arg_idx]); |
| | } else { |
| | usage(argv[0]); |
| | } |
| | } else if (strcmp(argv[arg_idx], "--keep-split") == 0) { |
| | params.keep_split = true; |
| | } else { |
| | usage(argv[0]); |
| | } |
| | } |
| |
|
| | if (argc - arg_idx < 2) { |
| | printf("%s: bad arguments\n", argv[0]); |
| | usage(argv[0]); |
| | } |
| | if (!included_weights.empty() && !excluded_weights.empty()) { |
| | usage(argv[0]); |
| | } |
| |
|
| | std::string imatrix_dataset; |
| | std::unordered_map<std::string, std::vector<float>> imatrix_data; |
| | int m_last_call = prepare_imatrix(imatrix_file, imatrix_dataset, included_weights, excluded_weights, imatrix_data); |
| | if (!imatrix_data.empty()) { |
| | params.imatrix = &imatrix_data; |
| | { |
| | llama_model_kv_override kvo; |
| | std::strcpy(kvo.key, LLM_KV_QUANTIZE_IMATRIX_FILE); |
| | kvo.tag = LLAMA_KV_OVERRIDE_TYPE_STR; |
| | strncpy(kvo.val_str, imatrix_file.c_str(), 127); |
| | kvo.val_str[127] = '\0'; |
| | kv_overrides.emplace_back(std::move(kvo)); |
| | } |
| | if (!imatrix_dataset.empty()) { |
| | llama_model_kv_override kvo; |
| | std::strcpy(kvo.key, LLM_KV_QUANTIZE_IMATRIX_DATASET); |
| | kvo.tag = LLAMA_KV_OVERRIDE_TYPE_STR; |
| | strncpy(kvo.val_str, imatrix_dataset.c_str(), 127); |
| | kvo.val_str[127] = '\0'; |
| | kv_overrides.emplace_back(std::move(kvo)); |
| | } |
| |
|
| | { |
| | llama_model_kv_override kvo; |
| | std::strcpy(kvo.key, LLM_KV_QUANTIZE_IMATRIX_N_ENTRIES); |
| | kvo.tag = LLAMA_KV_OVERRIDE_TYPE_INT; |
| | kvo.val_i64 = imatrix_data.size(); |
| | kv_overrides.emplace_back(std::move(kvo)); |
| | } |
| |
|
| | if (m_last_call > 0) { |
| | llama_model_kv_override kvo; |
| | std::strcpy(kvo.key, LLM_KV_QUANTIZE_IMATRIX_N_CHUNKS); |
| | kvo.tag = LLAMA_KV_OVERRIDE_TYPE_INT; |
| | kvo.val_i64 = m_last_call; |
| | kv_overrides.emplace_back(std::move(kvo)); |
| | } |
| | } |
| | if (!kv_overrides.empty()) { |
| | kv_overrides.emplace_back(); |
| | kv_overrides.back().key[0] = 0; |
| | params.kv_overrides = &kv_overrides; |
| | } |
| |
|
| | llama_backend_init(); |
| |
|
| | |
| | const std::string fname_inp = argv[arg_idx]; |
| | arg_idx++; |
| | std::string fname_out; |
| |
|
| | std::string ftype_str; |
| | std::string suffix = ".gguf"; |
| | if (try_parse_ftype(argv[arg_idx], params.ftype, ftype_str)) { |
| | std::string fpath; |
| | const size_t pos = fname_inp.find_last_of("/\\"); |
| | if (pos != std::string::npos) { |
| | fpath = fname_inp.substr(0, pos + 1); |
| | } |
| |
|
| | |
| | fname_out = fpath + "ggml-model-" + ftype_str; |
| | if (!params.keep_split) { |
| | fname_out += suffix; |
| | } |
| | arg_idx++; |
| | if (ftype_str == "COPY") { |
| | params.only_copy = true; |
| | } |
| | } else { |
| | fname_out = argv[arg_idx]; |
| | if (params.keep_split && fname_out.find(suffix) != std::string::npos) { |
| | fname_out = fname_out.substr(0, fname_out.length() - suffix.length()); |
| | } |
| | arg_idx++; |
| |
|
| | if (argc <= arg_idx) { |
| | fprintf(stderr, "%s: missing ftype\n", __func__); |
| | return 1; |
| | } |
| | if (!try_parse_ftype(argv[arg_idx], params.ftype, ftype_str)) { |
| | fprintf(stderr, "%s: invalid ftype '%s'\n", __func__, argv[3]); |
| | return 1; |
| | } |
| | if (ftype_str == "COPY") { |
| | params.only_copy = true; |
| | } |
| | arg_idx++; |
| | } |
| |
|
| | |
| | if (argc > arg_idx) { |
| | try { |
| | params.nthread = std::stoi(argv[arg_idx]); |
| | } |
| | catch (const std::exception & e) { |
| | fprintf(stderr, "%s: invalid nthread '%s' (%s)\n", __func__, argv[arg_idx], e.what()); |
| | return 1; |
| | } |
| | } |
| |
|
| | if ((params.ftype == LLAMA_FTYPE_MOSTLY_IQ2_XS || params.ftype == LLAMA_FTYPE_MOSTLY_IQ2_XXS || |
| | params.ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || |
| | params.ftype == LLAMA_FTYPE_MOSTLY_Q2_K_S || |
| | params.ftype == LLAMA_FTYPE_MOSTLY_IQ1_S || |
| | params.ftype == LLAMA_FTYPE_MOSTLY_IQ1_M) && imatrix_data.empty()) { |
| | fprintf(stderr, "\n==========================================================================================================\n"); |
| | fprintf(stderr, "Please do not use IQ1_S, IQ1_M, IQ2_S, IQ2_XXS, IQ2_XS or Q2_K_S quantization without an importance matrix\n"); |
| | fprintf(stderr, "==========================================================================================================\n\n\n"); |
| | return 1; |
| | } |
| |
|
| | print_build_info(); |
| |
|
| | fprintf(stderr, "%s: quantizing '%s' to '%s' as %s", __func__, fname_inp.c_str(), fname_out.c_str(), ftype_str.c_str()); |
| | if (params.nthread > 0) { |
| | fprintf(stderr, " using %d threads", params.nthread); |
| | } |
| | fprintf(stderr, "\n"); |
| |
|
| | const int64_t t_main_start_us = llama_time_us(); |
| |
|
| | int64_t t_quantize_us = 0; |
| |
|
| | |
| | { |
| | const int64_t t_start_us = llama_time_us(); |
| |
|
| | if (llama_model_quantize(fname_inp.c_str(), fname_out.c_str(), ¶ms)) { |
| | fprintf(stderr, "%s: failed to quantize model from '%s'\n", __func__, fname_inp.c_str()); |
| | return 1; |
| | } |
| |
|
| | t_quantize_us = llama_time_us() - t_start_us; |
| | } |
| |
|
| | |
| | { |
| | const int64_t t_main_end_us = llama_time_us(); |
| |
|
| | printf("\n"); |
| | printf("%s: quantize time = %8.2f ms\n", __func__, t_quantize_us/1000.0); |
| | printf("%s: total time = %8.2f ms\n", __func__, (t_main_end_us - t_main_start_us)/1000.0); |
| | } |
| |
|
| | llama_backend_free(); |
| |
|
| | return 0; |
| | } |
| |
|