|
3 | 3 | import PackageDescription |
4 | 4 |
|
5 | 5 | var sources = [ |
6 | | - "src/llama.cpp", |
7 | | - "src/llama-vocab.cpp", |
8 | | - "src/llama-grammar.cpp", |
9 | | - "src/llama-sampling.cpp", |
10 | | - "src/unicode.cpp", |
11 | | - "src/unicode-data.cpp", |
12 | | - "ggml/src/ggml.c", |
13 | | - "ggml/src/ggml-aarch64.c", |
14 | | - "ggml/src/ggml-alloc.c", |
15 | | - "ggml/src/ggml-backend.cpp", |
16 | | - "ggml/src/ggml-backend-reg.cpp", |
17 | | - "ggml/src/ggml-cpu/ggml-cpu.c", |
18 | | - "ggml/src/ggml-cpu/ggml-cpu.cpp", |
19 | | - "ggml/src/ggml-cpu/ggml-cpu-aarch64.c", |
20 | | - "ggml/src/ggml-cpu/ggml-cpu-quants.c", |
21 | | - "ggml/src/ggml-threading.cpp", |
22 | | - "ggml/src/ggml-quants.c", |
| 6 | + "src/llama.cpp", |
| 7 | + "src/llama-vocab.cpp", |
| 8 | + "src/llama-grammar.cpp", |
| 9 | + "src/llama-sampling.cpp", |
| 10 | + "src/unicode.cpp", |
| 11 | + "src/unicode-data.cpp", |
| 12 | + "ggml/src/ggml.c", |
| 13 | + "ggml/src/ggml-aarch64.c", |
| 14 | + "ggml/src/ggml-alloc.c", |
| 15 | + "ggml/src/ggml-backend.cpp", |
| 16 | + "ggml/src/ggml-backend-reg.cpp", |
| 17 | + "ggml/src/ggml-cpu/ggml-cpu.c", |
| 18 | + "ggml/src/ggml-cpu/ggml-cpu.cpp", |
| 19 | + "ggml/src/ggml-cpu/ggml-cpu-aarch64.c", |
| 20 | + "ggml/src/ggml-cpu/ggml-cpu-quants.c", |
| 21 | + "ggml/src/ggml-threading.cpp", |
| 22 | + "ggml/src/ggml-quants.c", |
| 23 | +] |
| 24 | + |
| 25 | +var omniVlmSources = [ |
| 26 | + "common/log.h", |
| 27 | + "common/log.cpp", |
| 28 | + "common/arg.h", |
| 29 | + "common/arg.cpp", |
| 30 | + "common/common.cpp", |
| 31 | + "common/common.h", |
| 32 | + "common/json.hpp", |
| 33 | + "common/json-schema-to-grammar.cpp", |
| 34 | + "common/json-schema-to-grammar.h", |
| 35 | + "src/llama-grammar.h", |
| 36 | + "common/grammar-parser.cpp", |
| 37 | + "common/grammar-parser.h", |
| 38 | + "common/sampling.cpp", |
| 39 | + "common/sampling.h", |
| 40 | + "examples/omni-vlm/build-info.cpp", |
| 41 | + "examples/omni-vlm/clip.cpp", |
| 42 | + "examples/omni-vlm/clip.h", |
| 43 | + "examples/omni-vlm/omni-vlm-wrapper.cpp", |
| 44 | + "examples/omni-vlm/omni-vlm-wrapper.h", |
| 45 | + "examples/omni-vlm/omni-vlm.h", |
| 46 | + "examples/omni-vlm/omni-vlm.cpp", |
| 47 | + "common/base64.cpp", |
| 48 | + "ggml/include/ggml.h", |
| 49 | + "ggml/include/ggml-alloc.h", |
| 50 | + "ggml/include/ggml-backend.h", |
| 51 | + "ggml/src/ggml-common.h", |
| 52 | +] |
| 53 | + |
| 54 | +var testSources = [ |
| 55 | + "swift/LlavaTests/LlavaTests.swift" |
23 | 56 | ] |
24 | 57 |
|
25 | 58 | var resources: [Resource] = [] |
26 | 59 | var linkerSettings: [LinkerSetting] = [] |
27 | | -var cSettings: [CSetting] = [ |
28 | | - .unsafeFlags(["-Wno-shorten-64-to-32", "-O3", "-DNDEBUG"]), |
29 | | - .unsafeFlags(["-fno-objc-arc"]), |
30 | | - .headerSearchPath("ggml/src"), |
31 | | - // NOTE: NEW_LAPACK will required iOS version 16.4+ |
32 | | - // We should consider add this in the future when we drop support for iOS 14 |
33 | | - // (ref: ref: https://developer.apple.com/documentation/accelerate/1513264-cblas_sgemm?language=objc) |
34 | | - // .define("ACCELERATE_NEW_LAPACK"), |
35 | | - // .define("ACCELERATE_LAPACK_ILP64") |
| 60 | +var cSettings: [CSetting] = [ |
| 61 | + .unsafeFlags(["-Wno-shorten-64-to-32", "-O3", "-DNDEBUG"]), |
| 62 | + .unsafeFlags(["-fno-objc-arc"]), |
| 63 | + .headerSearchPath("."), |
| 64 | + .headerSearchPath("ggml/src"), |
| 65 | + .headerSearchPath("common"), |
| 66 | + .unsafeFlags(["-framework", "Foundation"]), |
| 67 | + .unsafeFlags(["-framework", "Accelerate"]), |
36 | 68 | ] |
37 | 69 |
|
38 | | -#if canImport(Darwin) |
39 | | -sources.append("ggml/src/ggml-common.h") |
40 | | -sources.append("ggml/src/ggml-metal/ggml-metal.m") |
41 | | -resources.append(.process("ggml/src/ggml-metal/ggml-metal.metal")) |
42 | | -linkerSettings.append(.linkedFramework("Accelerate")) |
43 | | -cSettings.append( |
44 | | - contentsOf: [ |
45 | | - .define("GGML_USE_ACCELERATE"), |
46 | | - .define("GGML_USE_METAL") |
47 | | - ] |
48 | | -) |
49 | | -#endif |
50 | | - |
51 | 70 | #if os(Linux) |
52 | | - cSettings.append(.define("_GNU_SOURCE")) |
| 71 | + cSettings.append(.define("_GNU_SOURCE")) |
53 | 72 | #endif |
54 | 73 |
|
| 74 | +let baseSettings = cSettings + [ |
| 75 | + .headerSearchPath("."), |
| 76 | + .headerSearchPath("src"), |
| 77 | + .headerSearchPath("common"), |
| 78 | + .headerSearchPath("examples/omni-vlm"), |
| 79 | + .headerSearchPath("ggml/include"), |
| 80 | +] |
| 81 | + |
| 82 | +let llamaTarget = Target.target( |
| 83 | + name: "llama", |
| 84 | + dependencies: [], |
| 85 | + path: ".", |
| 86 | + exclude: [ |
| 87 | + "build", "cmake", "examples", "scripts", "models", |
| 88 | + "tests", "CMakeLists.txt", "Makefile", |
| 89 | + ], |
| 90 | + sources: sources, |
| 91 | + resources: resources, |
| 92 | + publicHeadersPath: "spm-headers", |
| 93 | + cSettings: cSettings, |
| 94 | + linkerSettings: linkerSettings |
| 95 | +) |
| 96 | + |
| 97 | +let omnivlmTarget = Target.target( |
| 98 | + name: "omnivlm", |
| 99 | + dependencies: ["llama"], |
| 100 | + path: ".", |
| 101 | + sources: omniVlmSources, |
| 102 | + resources: resources, |
| 103 | + publicHeadersPath: "spm/omnivlm", |
| 104 | + cSettings: baseSettings + [ |
| 105 | + .headerSearchPath("ggml/src"), |
| 106 | + ], |
| 107 | + cxxSettings: [.unsafeFlags(["-std=c++14"])], |
| 108 | + linkerSettings: linkerSettings |
| 109 | +) |
| 110 | + |
| 111 | +let testTarget = Target.testTarget( |
| 112 | + name: "LlavaTests", |
| 113 | + dependencies: ["omnivlm"], |
| 114 | + path: ".", |
| 115 | + sources: testSources, |
| 116 | + resources: resources, |
| 117 | + cSettings: baseSettings + [ |
| 118 | + .headerSearchPath("ggml/src"), |
| 119 | + ], |
| 120 | + linkerSettings: linkerSettings |
| 121 | +) |
| 122 | + |
| 123 | +let supportedPlatforms: [SupportedPlatform] = [ |
| 124 | + .macOS(.v12), |
| 125 | + .iOS(.v14), |
| 126 | + .watchOS(.v4), |
| 127 | + .tvOS(.v14) |
| 128 | +] |
| 129 | + |
| 130 | +let products = [ |
| 131 | + Product.library(name: "llama", targets: ["llama"]), |
| 132 | + Product.library(name: "omnivlm", targets: ["omnivlm"]) |
| 133 | +] |
| 134 | + |
55 | 135 | let package = Package( |
56 | | - name: "llama", |
57 | | - platforms: [ |
58 | | - .macOS(.v12), |
59 | | - .iOS(.v14), |
60 | | - .watchOS(.v4), |
61 | | - .tvOS(.v14) |
62 | | - ], |
63 | | - products: [ |
64 | | - .library(name: "llama", targets: ["llama"]), |
65 | | - ], |
66 | | - targets: [ |
67 | | - .target( |
68 | | - name: "llama", |
69 | | - path: ".", |
70 | | - exclude: [ |
71 | | - "build", |
72 | | - "cmake", |
73 | | - "examples", |
74 | | - "scripts", |
75 | | - "models", |
76 | | - "tests", |
77 | | - "CMakeLists.txt", |
78 | | - "Makefile", |
79 | | - "ggml/src/ggml-metal-embed.metal" |
80 | | - ], |
81 | | - sources: sources, |
82 | | - resources: resources, |
83 | | - publicHeadersPath: "spm-headers", |
84 | | - cSettings: cSettings, |
85 | | - linkerSettings: linkerSettings |
86 | | - ) |
87 | | - ], |
88 | | - cxxLanguageStandard: .cxx11 |
| 136 | + name: "llama", |
| 137 | + platforms: supportedPlatforms, |
| 138 | + products: products, |
| 139 | + targets: [ |
| 140 | + llamaTarget, |
| 141 | + omnivlmTarget, |
| 142 | + testTarget |
| 143 | + ], |
| 144 | + cxxLanguageStandard: .cxx14 |
89 | 145 | ) |
0 commit comments