| | #include "speculative.h" |
| |
|
| | #include "log.h" |
| | #include "common.h" |
| | #include "sampling.h" |
| |
|
| | #include <cstring> |
| | #include <algorithm> |
| |
|
| | #define SPEC_VOCAB_MAX_SIZE_DIFFERENCE 128 |
| | #define SPEC_VOCAB_CHECK_START_TOKEN_ID 5 |
| |
|
| | struct common_speculative { |
| | struct llama_context * ctx; |
| | struct common_sampler * smpl; |
| |
|
| | llama_batch batch; |
| | llama_tokens prompt; |
| | }; |
| |
|
| | struct common_speculative * common_speculative_init( |
| | struct llama_context * ctx_dft) { |
| | auto * result = new common_speculative { |
| | ctx_dft, |
| | nullptr, |
| | llama_batch_init(llama_n_batch(ctx_dft), 0, 1), |
| | {}, |
| | }; |
| |
|
| | |
| | #if 0 |
| | { |
| | common_params_sampling params; |
| | params.no_perf = false; |
| |
|
| | params.top_k = 40; |
| | params.top_p = 0.9; |
| |
|
| | params.samplers = { |
| | COMMON_SAMPLER_TYPE_TOP_K, |
| | COMMON_SAMPLER_TYPE_TOP_P, |
| | COMMON_SAMPLER_TYPE_INFILL, |
| | }; |
| |
|
| | result->smpl = common_sampler_init(llama_get_model(ctx_dft), params); |
| | } |
| | #else |
| | { |
| | common_params_sampling params; |
| | params.no_perf = false; |
| |
|
| | params.top_k = 10; |
| |
|
| | params.samplers = { |
| | COMMON_SAMPLER_TYPE_TOP_K, |
| | }; |
| |
|
| | result->smpl = common_sampler_init(llama_get_model(ctx_dft), params); |
| | } |
| | #endif |
| |
|
| | return result; |
| | } |
| |
|
| | void common_speculative_free(struct common_speculative * spec) { |
| | if (spec == nullptr) { |
| | return; |
| | } |
| |
|
| | common_sampler_free(spec->smpl); |
| |
|
| | llama_batch_free(spec->batch); |
| |
|
| | delete spec; |
| | } |
| |
|
| | bool common_speculative_are_compatible( |
| | const struct llama_context * ctx_tgt, |
| | const struct llama_context * ctx_dft) { |
| | const struct llama_model * model_tgt = llama_get_model(ctx_tgt); |
| | const struct llama_model * model_dft = llama_get_model(ctx_dft); |
| |
|
| | const struct llama_vocab * vocab_tgt = llama_model_get_vocab(model_tgt); |
| | const struct llama_vocab * vocab_dft = llama_model_get_vocab(model_dft); |
| |
|
| | const bool vocab_type_tgt = llama_vocab_type(vocab_tgt); |
| | LOG_DBG("%s: vocab_type tgt: %d\n", __func__, vocab_type_tgt); |
| |
|
| | const bool vocab_type_dft = llama_vocab_type(vocab_dft); |
| | LOG_DBG("%s: vocab_type dft: %d\n", __func__, vocab_type_dft); |
| |
|
| | if (vocab_type_tgt != vocab_type_dft) { |
| | LOG_ERR("%s: draft model vocab type must match target model to use speculation but " |
| | "vocab_type_dft = %d while vocab_type_tgt = %d\n", __func__, vocab_type_dft, vocab_type_tgt); |
| | return false; |
| | } |
| |
|
| | if (llama_vocab_get_add_bos(vocab_tgt) != llama_vocab_get_add_bos(vocab_dft) || |
| | llama_vocab_get_add_eos(vocab_tgt) != llama_vocab_get_add_eos(vocab_dft) || |
| | llama_vocab_bos(vocab_tgt) != llama_vocab_bos(vocab_dft) || |
| | llama_vocab_eos(vocab_tgt) != llama_vocab_eos(vocab_dft)) { |
| | LOG_ERR("%s: draft vocab special tokens must match target vocab to use speculation\n", __func__); |
| | LOG_ERR("%s: tgt: bos = %d (%d), eos = %d (%d)\n", __func__, llama_vocab_bos(vocab_tgt), llama_vocab_get_add_bos(vocab_tgt), llama_vocab_eos(vocab_tgt), llama_vocab_get_add_eos(vocab_tgt)); |
| | LOG_ERR("%s: dft: bos = %d (%d), eos = %d (%d)\n", __func__, llama_vocab_bos(vocab_dft), llama_vocab_get_add_bos(vocab_dft), llama_vocab_eos(vocab_dft), llama_vocab_get_add_eos(vocab_dft)); |
| | return false; |
| | } |
| |
|
| | { |
| | const int n_vocab_tgt = llama_vocab_n_tokens(vocab_tgt); |
| | const int n_vocab_dft = llama_vocab_n_tokens(vocab_dft); |
| |
|
| | const int vocab_diff = std::abs(n_vocab_tgt - n_vocab_dft); |
| |
|
| | if (vocab_diff > SPEC_VOCAB_MAX_SIZE_DIFFERENCE) { |
| | LOG_ERR("%s: draft model vocab must closely match target model to use speculation but " |
| | "target vocab size %d does not match draft vocab size %d - difference %d, max allowed %d\n", |
| | __func__, n_vocab_tgt, llama_vocab_n_tokens(vocab_dft), vocab_diff, SPEC_VOCAB_MAX_SIZE_DIFFERENCE); |
| | return false; |
| | } |
| |
|
| | for (int i = SPEC_VOCAB_CHECK_START_TOKEN_ID; i < std::min(n_vocab_tgt, n_vocab_dft); ++i) { |
| | const char * token_text_tgt = llama_vocab_get_text(vocab_tgt, i); |
| | const char * token_text_dft = llama_vocab_get_text(vocab_dft, i); |
| | if (std::strcmp(token_text_tgt, token_text_dft) != 0) { |
| | LOG_ERR("%s: draft vocab vocab must match target vocab to use speculation but " |
| | "token %d content differs - target '%s', draft '%s'\n", __func__, i, |
| | common_token_to_piece(ctx_tgt, i).c_str(), |
| | common_token_to_piece(ctx_dft, i).c_str()); |
| | return false; |
| | } |
| | } |
| | } |
| |
|
| | return true; |
| | } |
| |
|
| | llama_tokens common_speculative_gen_draft( |
| | struct common_speculative * spec, |
| | struct common_speculative_params params, |
| | const llama_tokens & prompt_tgt, |
| | llama_token id_last) { |
| | auto & batch = spec->batch; |
| | auto & ctx = spec->ctx; |
| | auto & smpl = spec->smpl; |
| | auto & prompt = spec->prompt; |
| |
|
| | int reuse_i = 0; |
| | int reuse_n = 0; |
| |
|
| | const int n_ctx = llama_n_ctx(ctx) - params.n_draft; |
| |
|
| | const int i_start = std::max<int>(0, (int) prompt_tgt.size() - n_ctx); |
| |
|
| | |
| | |
| | for (int i = 0; i < (int) prompt.size(); ++i) { |
| | int cur = 0; |
| | while (i_start + cur < (int) prompt_tgt.size() && |
| | i + cur < (int) prompt.size() && |
| | prompt_tgt[i_start + cur] == prompt[i + cur]) { |
| | cur++; |
| | } |
| |
|
| | if ((cur >= params.n_reuse || n_ctx >= (int) prompt_tgt.size()) && cur > reuse_n) { |
| | reuse_i = i; |
| | reuse_n = cur; |
| | } |
| | } |
| |
|
| | LOG_DBG("%s: reuse_i = %d, reuse_n = %d, prompt = %d\n", __func__, reuse_i, reuse_n, (int) prompt.size()); |
| |
|
| | llama_tokens result; |
| | result.reserve(params.n_draft); |
| |
|
| | if (reuse_n == 0) { |
| | llama_kv_cache_clear(ctx); |
| |
|
| | prompt.clear(); |
| | } else { |
| | |
| | |
| | if (reuse_i + reuse_n < (int) prompt.size() && prompt[reuse_i + reuse_n] == id_last) { |
| | for (int i = reuse_i + reuse_n + 1; i < (int) prompt.size(); ++i) { |
| | result.push_back(prompt[i]); |
| |
|
| | if (params.n_draft <= (int) result.size()) { |
| | break; |
| | } |
| | } |
| |
|
| | return result; |
| | } |
| |
|
| | if (reuse_i > 0) { |
| | llama_kv_cache_seq_rm (ctx, 0, 0, reuse_i); |
| | llama_kv_cache_seq_add(ctx, 0, reuse_i, -1, -reuse_i); |
| |
|
| | prompt.erase(prompt.begin(), prompt.begin() + reuse_i); |
| | } |
| |
|
| | if (reuse_n < (int) prompt.size()) { |
| | llama_kv_cache_seq_rm (ctx, 0, reuse_n, -1); |
| |
|
| | prompt.erase(prompt.begin() + reuse_n, prompt.end()); |
| | } |
| | } |
| |
|
| | |
| | common_batch_clear(batch); |
| |
|
| | for (size_t i = i_start + reuse_n; i < prompt_tgt.size(); ++i) { |
| | |
| | common_batch_add(batch, prompt_tgt[i], i - i_start, { 0 }, false); |
| |
|
| | prompt.push_back(prompt_tgt[i]); |
| | } |
| |
|
| | |
| | if (batch.n_tokens > 0) { |
| | |
| |
|
| | llama_decode(ctx, batch); |
| | } |
| |
|
| | const llama_pos n_past = prompt.size(); |
| |
|
| | LOG_DBG("%s: n_past = %d\n", __func__, n_past); |
| |
|
| | common_batch_clear(batch); |
| | common_batch_add (batch, id_last, n_past, { 0 }, true); |
| |
|
| | prompt.push_back(id_last); |
| |
|
| | |
| |
|
| | llama_decode(ctx, batch); |
| |
|
| | common_sampler_reset(smpl); |
| |
|
| | |
| | for (int i = 0; i < params.n_draft; ++i) { |
| | common_batch_clear(batch); |
| |
|
| | common_sampler_sample(smpl, ctx, 0, true); |
| |
|
| | const auto * cur_p = common_sampler_get_candidates(smpl); |
| |
|
| | for (int k = 0; k < std::min(3, (int) cur_p->size); ++k) { |
| | LOG_DBG(" - draft candidate %3d, pos %3d: %6d (%8.3f) '%s'\n", |
| | k, i, cur_p->data[k].id, cur_p->data[k].p, common_token_to_piece(ctx, cur_p->data[k].id).c_str()); |
| | } |
| |
|
| | |
| | const llama_token id = cur_p->data[0].id; |
| |
|
| | common_sampler_accept(smpl, id, true); |
| |
|
| | result.push_back(id); |
| |
|
| | if (params.n_draft <= (int) result.size()) { |
| | break; |
| | } |
| |
|
| | |
| | if (cur_p->data[0].p < params.p_min) { |
| | break; |
| | } |
| |
|
| | common_batch_add(batch, id, n_past + i + 1, { 0 }, true); |
| |
|
| | |
| | llama_decode(ctx, batch); |
| |
|
| | prompt.push_back(id); |
| | } |
| |
|
| | return result; |
| | } |
| |
|