Skip to content

Commit

Permalink
ggml-quants, llama : removed excess checks (#7274)
Browse files Browse the repository at this point in the history
  • Loading branch information
GermanAizek committed May 17, 2024
1 parent e18bc6a commit 359cbe3
Show file tree
Hide file tree
Showing 3 changed files with 4 additions and 8 deletions.
2 changes: 1 addition & 1 deletion common/common.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2553,7 +2553,7 @@ void dump_string_yaml_multiline(FILE * stream, const char * prop_name, const cha
size_t pos_start = 0;
size_t pos_found = 0;

if (!data_str.empty() && (std::isspace(data_str[0]) || std::isspace(data_str.back()))) {
if (std::isspace(data_str[0]) || std::isspace(data_str.back())) {
data_str = std::regex_replace(data_str, std::regex("\n"), "\\n");
data_str = std::regex_replace(data_str, std::regex("\""), "\\\"");
data_str = std::regex_replace(data_str, std::regex(R"(\\[^n"])"), R"(\$&)");
Expand Down
2 changes: 1 addition & 1 deletion ggml-quants.c
Original file line number Diff line number Diff line change
Expand Up @@ -1986,7 +1986,7 @@ static void quantize_row_q3_K_impl(const float * restrict x, block_q3_K * restri

for (int j = 0; j < QK_K/16; ++j) {
if (quant_weights) {
const float * qw = quant_weights ? quant_weights + QK_K * i + 16*j : NULL;
const float * qw = quant_weights + QK_K * i + 16*j;
for (int l = 0; l < 16; ++l) weight[l] = qw[l] * sqrtf(sigma2 + x[16*j+l]*x[16*j+l]);
} else {
for (int l = 0; l < 16; ++l) weight[l] = x[16*j+l]*x[16*j+l];
Expand Down
8 changes: 2 additions & 6 deletions llama.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -13904,9 +13904,7 @@ llama_token llama_sample_token_mirostat(struct llama_context * ctx, llama_token_

// Sample the next word X using top-k sampling
llama_sample_top_k(nullptr, candidates, int(k), 1);
if (ctx) {
ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
}
ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
llama_token X = llama_sample_token(ctx, candidates);
t_start_sample_us = ggml_time_us();

Expand All @@ -13920,9 +13918,7 @@ llama_token llama_sample_token_mirostat(struct llama_context * ctx, llama_token_
// Update mu using the learning rate and error
*mu = *mu - eta * e;

if (ctx) {
ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
}
ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
return X;
}

Expand Down

0 comments on commit 359cbe3

Please sign in to comment.