Skip to content

Commit e022286

Browse files
committed
Updating examples.
1 parent 12d7d06 commit e022286

File tree

10 files changed

+13
-9
lines changed

10 files changed

+13
-9
lines changed

common/common-nexa.cpp

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@
66
#include <functional>
77

88
#include "ggml.h"
9+
#include "gguf.h"
910
// #include "src/ggml-impl.h"
1011
#include "ggml-alloc.h"
1112
#include "ggml-backend.h"

examples/llama.android/llama/src/main/cpp/llama-android.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -68,7 +68,7 @@
6868
//extern "C"
6969
//JNIEXPORT void JNICALL
7070
//Java_com_nexa_LLamaAndroid_free_1model(JNIEnv *, jobject, jlong model) {
71-
// llama_free_model(reinterpret_cast<llama_model *>(model));
71+
// llama_model_free(reinterpret_cast<llama_model *>(model));
7272
//}
7373
//
7474
//extern "C"

examples/llama.android/llama/src/main/cpp/llava-android.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -161,7 +161,7 @@ extern "C" JNIEXPORT void JNICALL
161161
Java_com_nexa_NexaVlmInference_free_1model(JNIEnv *env, jobject /* this */, jlong jmodel) {
162162
const auto llava_model = reinterpret_cast<llama_model *>(jmodel);
163163

164-
llama_free_model(llava_model);
164+
llama_model_free(llava_model);
165165
}
166166

167167

examples/nexa-omni-audio/omni.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -723,7 +723,7 @@ void omni_free(struct omni_context *ctx_omni)
723723
}
724724

725725
llama_free(ctx_omni->ctx_llama);
726-
llama_free_model(ctx_omni->model);
726+
llama_model_free(ctx_omni->model);
727727
llama_backend_free();
728728
}
729729

examples/nexa-omni-audio/whisper.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@
22
#define WHISPER_H
33

44
#include "ggml.h"
5+
#include "gguf.h"
56

67
#include <stddef.h>
78
#include <stdint.h>

examples/omni-vlm/clip.cpp

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@
44
// Note: Even when using identical normalized image inputs (see normalize_image_u8_to_f32()) we have a significant difference in resulting embeddings compared to pytorch
55
#include "clip.h"
66
#include "ggml.h"
7+
#include "gguf.h"
78
#include "ggml-alloc.h"
89
#include "ggml-backend.h"
910
#include "common.h"
@@ -2317,7 +2318,7 @@ bool clip_model_quantize(const char * fname_inp, const char * fname_out, const i
23172318
total_size_org += orig_size;
23182319
total_size_new += new_size;
23192320
gguf_set_tensor_type(ctx_out, name.c_str(), new_type);
2320-
gguf_set_tensor_data(ctx_out, name.c_str(), new_data, new_size);
2321+
gguf_set_tensor_data(ctx_out, name.c_str(), new_data);
23212322
fout.write((const char *)new_data, new_size);
23222323
size_t pad = GGML_PAD(new_size, gguf_get_alignment(ctx_out)) - new_size;
23232324
for (size_t j = 0; j < pad; ++j) {

examples/omni-vlm/omni-vlm-cli.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -245,7 +245,7 @@ static void omnivlm_free(struct omnivlm_context * ctx_omnivlm) {
245245
}
246246

247247
llama_free(ctx_omnivlm->ctx_llama);
248-
llama_free_model(ctx_omnivlm->model);
248+
llama_model_free(ctx_omnivlm->model);
249249
llama_backend_free();
250250
}
251251

@@ -301,7 +301,7 @@ int main(int argc, char ** argv) {
301301
omnivlm_free(ctx_omnivlm);
302302
}
303303

304-
llama_free_model(model);
304+
llama_model_free(model);
305305

306306
return 0;
307307
}

examples/omni-vlm/omni-vlm-wrapper.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -256,7 +256,7 @@ static void omnivlm_free(struct omnivlm_context * ctx_omnivlm) {
256256
}
257257

258258
llama_free(ctx_omnivlm->ctx_llama);
259-
llama_free_model(ctx_omnivlm->model);
259+
llama_model_free(ctx_omnivlm->model);
260260
llama_backend_free();
261261
}
262262

@@ -331,7 +331,7 @@ void omnivlm_free() {
331331
ctx_omnivlm->model = nullptr;
332332
omnivlm_free(ctx_omnivlm);
333333
}
334-
llama_free_model(model);
334+
llama_model_free(model);
335335
}
336336

337337

examples/qwen2-audio/qwen2.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -723,7 +723,7 @@ void omni_free(struct omni_context *ctx_omni)
723723
}
724724

725725
llama_free(ctx_omni->ctx_llama);
726-
llama_free_model(ctx_omni->model);
726+
llama_model_free(ctx_omni->model);
727727
llama_backend_free();
728728
free(ctx_omni);
729729
}

examples/qwen2-audio/whisper.cpp

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -70,6 +70,7 @@
7070
// use your favorite implementations
7171
#define DR_WAV_IMPLEMENTATION
7272
#include "dr_wav.h"
73+
#include "gguf.h"
7374

7475
#if defined(_MSC_VER)
7576
#pragma warning(disable : 4244 4267) // possible loss of data

0 commit comments

Comments
 (0)