Skip to content

Commit

Permalink
examples : adapt to metal API
Browse files Browse the repository at this point in the history
  • Loading branch information
ggerganov committed Jan 13, 2024
1 parent 0462134 commit aea4465
Show file tree
Hide file tree
Showing 7 changed files with 7 additions and 7 deletions.
2 changes: 1 addition & 1 deletion examples/gpt-2/main-backend.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -209,7 +209,7 @@ bool gpt2_model_load(const std::string & fname, gpt2_model & model, gpt_vocab &
#ifdef GGML_USE_METAL
if (n_gpu_layers > 0) {
fprintf(stderr, "%s: using Metal backend\n", __func__);
ggml_metal_log_set_callback(ggml_log_callback_default, nullptr);
ggml_backend_metal_log_set_callback(ggml_log_callback_default, nullptr);
model.backend = ggml_backend_metal_init();
if (!model.backend) {
fprintf(stderr, "%s: ggml_backend_metal_init() failed\n", __func__);
Expand Down
2 changes: 1 addition & 1 deletion examples/gpt-2/main-batched.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -298,7 +298,7 @@ bool gpt2_model_load(const std::string & fname, gpt2_model & model, gpt_vocab &
#ifdef GGML_USE_METAL
if (n_gpu_layers > 0) {
fprintf(stderr, "%s: using Metal backend\n", __func__);
ggml_metal_log_set_callback(ggml_log_callback_default, nullptr);
ggml_backend_metal_log_set_callback(ggml_log_callback_default, nullptr);
model.backend = ggml_backend_metal_init();
if (!model.backend) {
fprintf(stderr, "%s: ggml_backend_metal_init() failed\n", __func__);
Expand Down
2 changes: 1 addition & 1 deletion examples/gpt-2/main.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -118,7 +118,7 @@ void init_backends(gpt2_model & model, const gpt_params & params) {
#ifdef GGML_USE_METAL
if (params.n_gpu_layers > 0) {
fprintf(stderr, "%s: using Metal backend\n", __func__);
ggml_metal_log_set_callback(ggml_log_callback_default, nullptr);
ggml_backend_metal_log_set_callback(ggml_log_callback_default, nullptr);
gpu_backend = ggml_backend_metal_init();
if (!gpu_backend) {
fprintf(stderr, "%s: ggml_backend_metal_init() failed\n", __func__);
Expand Down
2 changes: 1 addition & 1 deletion examples/whisper/whisper.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1070,7 +1070,7 @@ static ggml_backend_t whisper_backend_init(const whisper_context_params & params
#ifdef GGML_USE_METAL
if (params.use_gpu) {
WHISPER_LOG_INFO("%s: using Metal backend\n", __func__);
ggml_metal_log_set_callback(g_state.log_callback, g_state.log_callback_user_data);
ggml_backend_metal_log_set_callback(g_state.log_callback, g_state.log_callback_user_data);
backend_gpu = ggml_backend_metal_init();
if (!backend_gpu) {
WHISPER_LOG_ERROR("%s: ggml_backend_metal_init() failed\n", __func__);
Expand Down
2 changes: 1 addition & 1 deletion tests/test-conv1d.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ void load_model(test_model & model, bool use_gpu = false) {
#ifdef GGML_USE_METAL
if (use_gpu) {
fprintf(stderr, "%s: using Metal backend\n", __func__);
ggml_metal_log_set_callback(ggml_log_callback_default, nullptr);
ggml_backend_metal_log_set_callback(ggml_log_callback_default, nullptr);
model.backend = ggml_backend_metal_init();
if (!model.backend) {
fprintf(stderr, "%s: ggml_backend_metal_init() failed\n", __func__);
Expand Down
2 changes: 1 addition & 1 deletion tests/test-conv2d.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ void load_model(test_model & model, bool use_gpu = false) {
#ifdef GGML_USE_METAL
if (use_gpu) {
fprintf(stderr, "%s: using Metal backend\n", __func__);
ggml_metal_log_set_callback(ggml_log_callback_default, nullptr);
ggml_backend_metal_log_set_callback(ggml_log_callback_default, nullptr);
model.backend = ggml_backend_metal_init();
if (!model.backend) {
fprintf(stderr, "%s: ggml_backend_metal_init() failed\n", __func__);
Expand Down
2 changes: 1 addition & 1 deletion tests/test-mul-mat.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ void load_model(test_model & model, float* a, float* b, int M, int N, int K, boo
#ifdef GGML_USE_METAL
if (use_gpu) {
fprintf(stderr, "%s: using Metal backend\n", __func__);
ggml_metal_log_set_callback(ggml_log_callback_default, nullptr);
ggml_backend_metal_log_set_callback(ggml_log_callback_default, nullptr);
model.backend = ggml_backend_metal_init();
if (!model.backend) {
fprintf(stderr, "%s: ggml_backend_metal_init() failed\n", __func__);
Expand Down

0 comments on commit aea4465

Please sign in to comment.