Skip to content

Commit

Permalink
Use typos to fix comments and logs.
Browse files Browse the repository at this point in the history
  • Loading branch information
richardkiss committed Dec 7, 2023
1 parent 05cd6e5 commit 72cb1ed
Show file tree
Hide file tree
Showing 19 changed files with 44 additions and 44 deletions.
8 changes: 4 additions & 4 deletions common/log.h
Original file line number Diff line number Diff line change
Expand Up @@ -61,13 +61,13 @@
// #define LOG_TARGET stderr
// #include "log.h"
//
// The log target can also be redirected to a diffrent function
// The log target can also be redirected to a different function
// like so:
//
// #define LOG_TARGET log_handler_diffrent()
// #define LOG_TARGET log_handler_different()
// #include "log.h"
//
// FILE* log_handler_diffrent()
// FILE* log_handler_different()
// {
// return stderr;
// }
Expand Down Expand Up @@ -421,7 +421,7 @@ inline FILE *log_handler2_impl(bool change = false, LogTriState append = LogTriS

// Disables logs entirely at runtime.
// Makes LOG() and LOG_TEE() produce no output,
// untill enabled back.
// until enabled back.
#define log_disable() log_disable_impl()

// INTERNAL, DO NOT USE
Expand Down
18 changes: 9 additions & 9 deletions common/stb_image.h
Original file line number Diff line number Diff line change
Expand Up @@ -2191,7 +2191,7 @@ stbi_inline static int stbi__extend_receive(stbi__jpeg * j, int n) {
if (j->code_bits < n)
stbi__grow_buffer_unsafe(j);
if (j->code_bits < n)
return 0; // ran out of bits from stream, return 0s intead of continuing
return 0; // ran out of bits from stream, return 0s instead of continuing

sgn = j->code_buffer >> 31; // sign bit always in MSB; 0 if MSB clear (positive), 1 if MSB set (negative)
k = stbi_lrot(j->code_buffer, n);
Expand All @@ -2207,7 +2207,7 @@ stbi_inline static int stbi__jpeg_get_bits(stbi__jpeg * j, int n) {
if (j->code_bits < n)
stbi__grow_buffer_unsafe(j);
if (j->code_bits < n)
return 0; // ran out of bits from stream, return 0s intead of continuing
return 0; // ran out of bits from stream, return 0s instead of continuing
k = stbi_lrot(j->code_buffer, n);
j->code_buffer = k & ~stbi__bmask[n];
k &= stbi__bmask[n];
Expand All @@ -2220,7 +2220,7 @@ stbi_inline static int stbi__jpeg_get_bit(stbi__jpeg * j) {
if (j->code_bits < 1)
stbi__grow_buffer_unsafe(j);
if (j->code_bits < 1)
return 0; // ran out of bits from stream, return 0s intead of continuing
return 0; // ran out of bits from stream, return 0s instead of continuing
k = j->code_buffer;
j->code_buffer <<= 1;
--j->code_bits;
Expand Down Expand Up @@ -5015,13 +5015,13 @@ static int stbi__create_png_image_raw(stbi__png * a, stbi_uc * raw, stbi__uint32

// we make a separate pass to expand bits to pixels; for performance,
// this could run two scanlines behind the above code, so it won't
// intefere with filtering but will still be in the cache.
// interfere with filtering but will still be in the cache.
if (depth < 8) {
for (j = 0; j < y; ++j) {
stbi_uc * cur = a->out + stride * j;
stbi_uc * in = a->out + stride * j + x * out_n - img_width_bytes;
// unpack 1/2/4-bit into a 8-bit buffer. allows us to keep the common 8-bit path optimal at minimal cost for
// 1/2/4-bit png guarante byte alignment, if width is not multiple of 8/4/2 we'll decode dummy trailing data that
// 1/2/4-bit png guarantee byte alignment, if width is not multiple of 8/4/2 we'll decode dummy trailing data that
// will be skipped in the later loop
stbi_uc scale = (color == 0) ? stbi__depth_scale_table[depth] : 1; // scale grayscale values to 0..255 range

Expand Down Expand Up @@ -5215,7 +5215,7 @@ static int stbi__expand_png_palette(stbi__png * a, stbi_uc * palette, int len, i
if (p == NULL)
return stbi__err("outofmem", "Out of memory");

// between here and free(out) below, exitting would leak
// between here and free(out) below, exiting would leak
temp_out = p;

if (pal_img_n == 3) {
Expand Down Expand Up @@ -6281,7 +6281,7 @@ static void * stbi__tga_load(stbi__context * s, int * x, int * y, int * comp, in
if (tga_width > STBI_MAX_DIMENSIONS)
return stbi__errpuc("too large", "Very large image (corrupt?)");

// do a tiny bit of precessing
// do a tiny bit of processing
if (tga_image_type >= 8) {
tga_image_type -= 8;
tga_is_RLE = 1;
Expand Down Expand Up @@ -7213,7 +7213,7 @@ static stbi_uc * stbi__gif_load_next(stbi__context * s, stbi__gif * g, int * com
// 0: not specified.
}

// background is what out is after the undoing of the previou frame;
// background is what out is after the undoing of the previous frame;
memcpy(g->background, g->out, 4 * g->w * g->h);
}

Expand Down Expand Up @@ -8277,7 +8277,7 @@ STBIDEF int stbi_is_16_bit_from_callbacks(stbi_io_callbacks const * c, void * us
1.31 (2011-06-20)
a few more leak fixes, bug in PNG handling (SpartanJ)
1.30 (2011-06-11)
added ability to load files via callbacks to accomidate custom input streams (Ben Wenger)
added ability to load files via callbacks to accommodate custom input streams (Ben Wenger)
removed deprecated format-specific test/load functions
removed support for installable file formats (stbi_loader) -- would have been broken for IO callbacks
anyway error cases in bmp and tga give messages and don't leak (Raymond Barbiero, grisha) fix inefficiency in
Expand Down
4 changes: 2 additions & 2 deletions convert.py
Original file line number Diff line number Diff line change
Expand Up @@ -585,7 +585,7 @@ def merge_multifile_models(models_plus: list[ModelPlus]) -> ModelPlus:

if any("model.embed_tokens.weight" in mp.model for mp in models_plus):
# Transformers models put different tensors in different files, but
# don't split indivdual tensors between files.
# don't split individual tensors between files.
model: LazyModel = {}
for mp in models_plus:
model.update(mp.model)
Expand Down Expand Up @@ -678,7 +678,7 @@ def rebuild_from_type_v2(func, new_type, args, state):
return func(*args)

CLASSES: dict[tuple[str, str], Any] = {
# getattr used here as a workaround for mypy not being smart enough to detrmine
# getattr used here as a workaround for mypy not being smart enough to determine
# the staticmethods have a __func__ attribute.
('torch._tensor', '_rebuild_from_type_v2'): getattr(rebuild_from_type_v2, '__func__'),
('torch._utils', '_rebuild_tensor_v2'): getattr(lazy_rebuild_tensor_v2, '__func__'),
Expand Down
2 changes: 1 addition & 1 deletion examples/llava/clip.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -739,7 +739,7 @@ bool clip_image_preprocess(const clip_ctx * ctx, const clip_image_u8 * img, clip
temp->ny = longer_side;
temp->size = 3 * longer_side * longer_side;
temp->data = new uint8_t[temp->size]();
uint8_t bc[3] = {122, 116, 104}; // bakground color in RGB from LLaVA
uint8_t bc[3] = {122, 116, 104}; // background color in RGB from LLaVA

// fill with background color
for (size_t i = 0; i < temp->size; i++) {
Expand Down
2 changes: 1 addition & 1 deletion examples/llava/convert-image-encoder-to-gguf.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ def bytes_to_unicode():
The reversible bpe codes work on unicode strings.
This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.
When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.
This is a signficant percentage of your normal, say, 32K bpe vocab.
This is a significant percentage of your normal, say, 32K bpe vocab.
To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
And avoids mapping to whitespace/control characters the bpe code barfs on.
"""
Expand Down
2 changes: 1 addition & 1 deletion examples/lookahead/README.md
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# llama.cpp/examples/lookahead

Demonstartion of lookahead decoding technique:
Demonstration of lookahead decoding technique:

https://lmsys.org/blog/2023-11-21-lookahead-decoding/

Expand Down
2 changes: 1 addition & 1 deletion examples/server/json.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -11227,7 +11227,7 @@ class binary_reader
}
if (is_ndarray) // ndarray dimensional vector can only contain integers, and can not embed another array
{
return sax->parse_error(chars_read, get_token_string(), parse_error::create(113, chars_read, exception_message(input_format, "ndarray dimentional vector is not allowed", "size"), nullptr));
return sax->parse_error(chars_read, get_token_string(), parse_error::create(113, chars_read, exception_message(input_format, "ndarray dimensional vector is not allowed", "size"), nullptr));
}
std::vector<size_t> dim;
if (JSON_HEDLEY_UNLIKELY(!get_ubjson_ndarray_size(dim)))
Expand Down
2 changes: 1 addition & 1 deletion examples/server/public/completion.js
Original file line number Diff line number Diff line change
Expand Up @@ -114,7 +114,7 @@ export async function* llama(prompt, params = {}, config = {}) {
return content;
}

// Call llama, return an event target that you can subcribe to
// Call llama, return an event target that you can subscribe to
//
// Example:
//
Expand Down
6 changes: 3 additions & 3 deletions examples/server/public/index.html
Original file line number Diff line number Diff line change
Expand Up @@ -238,7 +238,7 @@
cache_prompt: true
})

/* START: Support for storing prompt templates and parameters in borwser LocalStorage */
/* START: Support for storing prompt templates and parameters in browsers LocalStorage */

const local_storage_storageKey = "llamacpp_server_local_storage";

Expand Down Expand Up @@ -282,7 +282,7 @@
let importedTemplates = local_storage_getDataAsObject('user_templates')

if (importedTemplates) {
// saved templates were successfuly imported.
// saved templates were successfully imported.

console.log('Processing saved templates and updating default template')
params.value = { ...params.value, image_data: [] };
Expand All @@ -303,7 +303,7 @@
}

function userTemplateResetToDefault() {
console.log('Reseting themplate to default')
console.log('Resetting template to default')
selectedUserTemplate.value.name = 'default';
selectedUserTemplate.value.data = savedUserTemplates.value['default'];
}
Expand Down
2 changes: 1 addition & 1 deletion examples/speculative/README.md
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# llama.cpp/examples/speculative

Demonstartion of speculative decoding and tree-based speculative decoding techniques
Demonstration of speculative decoding and tree-based speculative decoding techniques

More info:

Expand Down
2 changes: 1 addition & 1 deletion examples/speculative/speculative.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -428,7 +428,7 @@ int main(int argc, char ** argv) {
++n_past_tgt;
}

// the first token is always proposed by the traget model before the speculation loop so we erase it here
// the first token is always proposed by the target model before the speculation loop so we erase it here
for (int s = 0; s < n_seq_dft; ++s) {
if (!drafts[s].active) {
continue;
Expand Down
2 changes: 1 addition & 1 deletion ggml-alloc.h
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ GGML_API size_t ggml_allocr_alloc_graph(ggml_allocr_t alloc, struct ggml_cgraph
// ggml-backend v2 API
//

// Seperate tensor and graph allocator objects
// Separate tensor and graph allocator objects
// This is necessary for multi-backend allocation because the graph allocator needs to use multiple tensor allocators
// The original API is kept as a wrapper around the new API

Expand Down
2 changes: 1 addition & 1 deletion ggml-backend-impl.h
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ extern "C" {
void (*get_tensor_async)(ggml_backend_t backend, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size);
void (*synchronize) (ggml_backend_t backend);

// (optional) copy tensor between different backends, allow for single-copy tranfers
// (optional) copy tensor between different backends, allow for single-copy transfers
void (*cpy_tensor_from)(ggml_backend_t backend, struct ggml_tensor * src, struct ggml_tensor * dst);
void (*cpy_tensor_to) (ggml_backend_t backend, struct ggml_tensor * src, struct ggml_tensor * dst);

Expand Down
4 changes: 2 additions & 2 deletions ggml-quants.c
Original file line number Diff line number Diff line change
Expand Up @@ -3114,7 +3114,7 @@ void ggml_vec_dot_q5_0_q8_0(const int n, float * restrict s, const void * restri

size_t vl = __riscv_vsetvl_e8m1(qk/2);

// These tempory registers are for masking and shift operations
// These temporary registers are for masking and shift operations
vuint32m2_t vt_1 = __riscv_vid_v_u32m2(vl);
vuint32m2_t vt_2 = __riscv_vsll_vv_u32m2(__riscv_vmv_v_x_u32m2(1, vl), vt_1, vl);

Expand Down Expand Up @@ -4757,7 +4757,7 @@ void ggml_vec_dot_q3_K_q8_K(const int n, float * restrict s, const void * restri

vl = 16;

// retreive lane to multiply with scale
// retrieve lane to multiply with scale
vint32m2_t aux0_0 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a0, 0), (scale[0]), vl);
vint32m2_t aux0_1 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a0, 1), (scale[1]), vl);
vint32m2_t aux1_0 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a1, 0), (scale[2]), vl);
Expand Down
12 changes: 6 additions & 6 deletions ggml.c
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
#define _CRT_SECURE_NO_DEPRECATE // Disables ridiculous "unsafe" warnigns on Windows
#define _CRT_SECURE_NO_DEPRECATE // Disables ridiculous "unsafe" warnings on Windows
#define _USE_MATH_DEFINES // For M_PI on MSVC

#include "ggml-impl.h"
Expand Down Expand Up @@ -33,7 +33,7 @@
// we should just be careful :)
#pragma warning(disable: 4244 4267)

// disable POSIX deprecation warnigns
// disable POSIX deprecation warnings
// these functions are never going away, anyway
#pragma warning(disable: 4996)
#endif
Expand Down Expand Up @@ -1756,7 +1756,7 @@ static_assert(sizeof(struct ggml_object)%GGML_MEM_ALIGN == 0, "ggml_object size
static_assert(sizeof(struct ggml_tensor)%GGML_MEM_ALIGN == 0, "ggml_tensor size must be a multiple of GGML_MEM_ALIGN");

// WARN:
// Mis-confguration can lead to problem that's hard to reason about:
// Mis-configuration can lead to problem that's hard to reason about:
// * At best it crash or talks nosense.
// * At worst it talks slightly difference but hard to perceive.
//
Expand Down Expand Up @@ -7421,7 +7421,7 @@ static void ggml_compute_forward_acc_f32(
GGML_ASSERT(ggml_is_contiguous(dst) && ggml_is_contiguous(src0));

// view src0 and dst with these strides and data offset inbytes during acc
// nb0 is implicitely element_size because src0 and dst are contiguous
// nb0 is implicitly element_size because src0 and dst are contiguous
size_t nb1 = ((int32_t *) dst->op_params)[0];
size_t nb2 = ((int32_t *) dst->op_params)[1];
size_t nb3 = ((int32_t *) dst->op_params)[2];
Expand Down Expand Up @@ -10027,7 +10027,7 @@ static void ggml_compute_forward_set_f32(
GGML_ASSERT(ggml_is_contiguous(dst) && ggml_is_contiguous(src0));

// view src0 and dst with these strides and data offset inbytes during set
// nb0 is implicitely element_size because src0 and dst are contiguous
// nb0 is implicitly element_size because src0 and dst are contiguous
size_t nb1 = ((int32_t *) dst->op_params)[0];
size_t nb2 = ((int32_t *) dst->op_params)[1];
size_t nb3 = ((int32_t *) dst->op_params)[2];
Expand Down Expand Up @@ -14272,7 +14272,7 @@ void ggml_build_backward_gradient_checkpointing(
// insert new tensors recomputing src, reusing already made replacements,
// remember replacements: remember new tensors with mapping from corresponding gf nodes
// recurse for input tensors,
// unless (i.e. terminating when) input tensors are replacments (like checkpoints)
// unless (i.e. terminating when) input tensors are replacements (like checkpoints)
node->src[k] = ggml_recompute_graph_node(ctx, gf, replacements, node->src[k]);
}
// insert rewritten backward node with replacements made into resulting backward graph gb
Expand Down
2 changes: 1 addition & 1 deletion gguf-py/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ If you want to publish the package manually for any reason, you need to have `tw
pip install build twine
```

Then, folow these steps to release a new version:
Then, follow these steps to release a new version:

1. Bump the version in `pyproject.toml`.
2. Build the package:
Expand Down
10 changes: 5 additions & 5 deletions llama.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2746,7 +2746,7 @@ static void llm_load_vocab(
// The assumption is, since special tokens aren't meant to be exposed to end user, they are designed
// to be unmatchable by the tokenizer, therefore tokens from the vocab, which are unmatchable by the tokenizer
// are special tokens.
// From testing, this appears to corelate 1:1 with special tokens.
// From testing, this appears to correlate 1:1 with special tokens.
//

// Counting special tokens and verifying in only one direction
Expand Down Expand Up @@ -5896,7 +5896,7 @@ static int llama_decode_internal(
const int64_t n_embd = hparams.n_embd;
const int64_t n_vocab = hparams.n_vocab;

// helpers for smoother batch API transistion
// helpers for smoother batch API transition
// after deprecating the llama_eval calls, these will be removed
std::vector<llama_pos> pos;

Expand Down Expand Up @@ -6674,12 +6674,12 @@ static void tokenizer_st_partition(const llama_vocab & vocab, std::forward_list<

// loop over the text
while (true) {
// find the first occurence of a given special token in this fragment
// find the first occurrence of a given special token in this fragment
// passing offset argument only limit the "search area" but match coordinates
// are still relative to the source full raw_text
auto match = raw_text->find(special_token, raw_text_base_offset);

// no occurences found, stop processing this fragment for a given special token
// no occurrences found, stop processing this fragment for a given special token
if (match == std::string::npos) break;

// check if match is within bounds of offset <-> length
Expand Down Expand Up @@ -7878,7 +7878,7 @@ struct llama_beam_search_data {
}

// Min-heaps are used to efficiently collect the top-k elements (k=n_beams).
// The repetative patterns below reflect the 2 stages of heaps:
// The repetitive patterns below reflect the 2 stages of heaps:
// * Gather elements until the vector is full, then call std::make_heap() on it.
// * If the heap is full and a new element is found that should be included, pop the
// least element to the back(), replace it with the new, then push it into the heap.
Expand Down
2 changes: 1 addition & 1 deletion tests/test-grad0.cpp
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
#define _CRT_SECURE_NO_DEPRECATE // Disables ridiculous "unsafe" warnigns on Windows
#define _CRT_SECURE_NO_DEPRECATE // Disables ridiculous "unsafe" warnings on Windows
#include "ggml.h"

#include <cmath>
Expand Down
4 changes: 2 additions & 2 deletions tests/test-quantize-perf.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -117,7 +117,7 @@ static void usage(char * argv[]) {
printf(" --size SIZE set test size, divisible by 32 (L1_SIZE:%d)\n", L1_SIZE);
printf(" -3 use size as L1, L2, L3 sizes (L1:%d L2:%d L3:%d)\n", L1_SIZE, L2_SIZE, L3_SIZE);
printf(" -4 use size as L1, L2, L3, MEM sizes (L1:%d L2:%d L3:%d MEM:%d)\n", L1_SIZE, L2_SIZE, L3_SIZE, MEM_SIZE);
printf(" --op OP set test opration as quantize_row_q_reference, quantize_row_q, dequantize_row_q,\n");
printf(" --op OP set test operation as quantize_row_q_reference, quantize_row_q, dequantize_row_q,\n");
printf(" quantize_row_q_dot, vec_dot_q (all)\n");
printf(" --type TYPE set test type as");
for (int i = 0; i < GGML_TYPE_COUNT; i++) {
Expand Down Expand Up @@ -202,7 +202,7 @@ int main(int argc, char * argv[]) {
}
int alignment = std::stoi(argv[i]);
if (alignment < 0 || alignment > MAX_ALIGNMENT) {
fprintf(stderr, "error: aligment-offset must be less than %d\n", MAX_ALIGNMENT);
fprintf(stderr, "error: alignment-offset must be less than %d\n", MAX_ALIGNMENT);
invalid_param = true;
break;
}
Expand Down

0 comments on commit 72cb1ed

Please sign in to comment.