-
Notifications
You must be signed in to change notification settings - Fork 10.4k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
build : link against build info instead of compiling against it #3879
Merged
Conversation
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
I can't figure out how to get zig to have an object file's build step depend on a cmake-style configuration "header". @kassane You recently touched build.zig, do you know what to do here? |
ggerganov
reviewed
Nov 1, 2023
Hi @cebtenzzre, My patchdiff --git a/build.zig b/build.zig
index 9b58b74..699738f 100644
--- a/build.zig
+++ b/build.zig
@@ -10,7 +10,6 @@ const Maker = struct {
builder: *std.build.Builder,
target: CrossTarget,
optimize: Mode,
- config_header: *ConfigHeader,
enable_lto: bool,
include_dirs: ArrayList([]const u8),
@@ -41,26 +40,24 @@ const Maker = struct {
const commit_hash = try std.ChildProcess.exec(
.{ .allocator = builder.allocator, .argv = &.{ "git", "rev-parse", "HEAD" } },
);
- const config_header = builder.addConfigHeader(
- .{ .style = .blank, .include_path = "build-info.h" },
- .{
- .BUILD_NUMBER = 0,
- .BUILD_COMMIT = commit_hash.stdout[0 .. commit_hash.stdout.len - 1], // omit newline
- .BUILD_COMPILER = builder.fmt("Zig {s}", .{zig_version}),
- .BUILD_TARGET = try target.allocDescription(builder.allocator),
- },
- );
+ try std.fs.cwd().writeFile("common/build-info.cpp", builder.fmt(
+ \\int LLAMA_BUILD_NUMBER = {};
+ \\char const *LLAMA_COMMIT = "{s}";
+ \\char const *LLAMA_COMPILER = "Zig {s}";
+ \\char const *LLAMA_BUILD_TARGET = "{s}";
+ \\
+ , .{ 0, commit_hash.stdout[0 .. commit_hash.stdout.len - 1], zig_version, try target.allocDescription(builder.allocator) }));
var m = Maker{
.builder = builder,
.target = target,
.optimize = builder.standardOptimizeOption(.{}),
- .config_header = config_header,
.enable_lto = false,
.include_dirs = ArrayList([]const u8).init(builder.allocator),
.cflags = ArrayList([]const u8).init(builder.allocator),
.cxxflags = ArrayList([]const u8).init(builder.allocator),
.objs = ArrayList(*Compile).init(builder.allocator),
};
+
try m.addCFlag("-std=c11");
try m.addCxxFlag("-std=c++11");
try m.addProjectInclude(&.{});
@@ -72,7 +69,7 @@ const Maker = struct {
const o = m.builder.addObject(.{ .name = name, .target = m.target, .optimize = m.optimize });
if (o.target.getAbi() != .msvc)
o.defineCMacro("_GNU_SOURCE", null);
- o.addConfigHeader(m.config_header);
+
if (std.mem.endsWith(u8, src, ".c")) {
o.addCSourceFiles(&.{src}, m.cflags.items);
o.linkLibC();
@@ -85,7 +82,6 @@ const Maker = struct {
o.linkLibCpp();
}
}
- o.addConfigHeader(m.config_header);
for (m.include_dirs.items) |i| o.addIncludePath(.{ .path = i });
o.want_lto = m.enable_lto;
return o;
@@ -105,7 +101,6 @@ const Maker = struct {
// linkLibCpp already add (libc++ + libunwind + libc)
e.linkLibCpp();
}
- e.addConfigHeader(m.config_header);
m.builder.installArtifact(e);
e.want_lto = m.enable_lto;
return e;
@@ -121,6 +116,7 @@ pub fn build(b: *std.build.Builder) !void {
const ggml_backend = make.obj("ggml-backend", "ggml-backend.c");
const ggml_quants = make.obj("ggml-quants", "ggml-quants.c");
const llama = make.obj("llama", "llama.cpp");
+ const buildinfo = make.obj("common", "common/build-info.cpp");
const common = make.obj("common", "common/common.cpp");
const console = make.obj("console", "common/console.cpp");
const sampling = make.obj("sampling", "common/sampling.cpp");
@@ -128,14 +124,14 @@ pub fn build(b: *std.build.Builder) !void {
const train = make.obj("train", "common/train.cpp");
const clip = make.obj("clip", "examples/llava/clip.cpp");
- _ = make.exe("main", "examples/main/main.cpp", &.{ ggml, ggml_alloc, ggml_backend, ggml_quants, llama, common, sampling, console, grammar_parser });
- _ = make.exe("quantize", "examples/quantize/quantize.cpp", &.{ ggml, ggml_alloc, ggml_backend, ggml_quants, llama, common });
- _ = make.exe("perplexity", "examples/perplexity/perplexity.cpp", &.{ ggml, ggml_alloc, ggml_backend, ggml_quants, llama, common });
- _ = make.exe("embedding", "examples/embedding/embedding.cpp", &.{ ggml, ggml_alloc, ggml_backend, ggml_quants, llama, common });
- _ = make.exe("finetune", "examples/finetune/finetune.cpp", &.{ ggml, ggml_alloc, ggml_backend, ggml_quants, llama, common, train });
- _ = make.exe("train-text-from-scratch", "examples/train-text-from-scratch/train-text-from-scratch.cpp", &.{ ggml, ggml_alloc, ggml_backend, ggml_quants, llama, common, train });
+ _ = make.exe("main", "examples/main/main.cpp", &.{ ggml, ggml_alloc, ggml_backend, ggml_quants, llama, common, buildinfo, sampling, console, grammar_parser });
+ _ = make.exe("quantize", "examples/quantize/quantize.cpp", &.{ ggml, ggml_alloc, ggml_backend, ggml_quants, llama, common, buildinfo });
+ _ = make.exe("perplexity", "examples/perplexity/perplexity.cpp", &.{ ggml, ggml_alloc, ggml_backend, ggml_quants, llama, common, buildinfo });
+ _ = make.exe("embedding", "examples/embedding/embedding.cpp", &.{ ggml, ggml_alloc, ggml_backend, ggml_quants, llama, common, buildinfo });
+ _ = make.exe("finetune", "examples/finetune/finetune.cpp", &.{ ggml, ggml_alloc, ggml_backend, ggml_quants, llama, common, buildinfo, train });
+ _ = make.exe("train-text-from-scratch", "examples/train-text-from-scratch/train-text-from-scratch.cpp", &.{ ggml, ggml_alloc, ggml_backend, ggml_quants, llama, common, buildinfo, train });
- const server = make.exe("server", "examples/server/server.cpp", &.{ ggml, ggml_alloc, ggml_backend, ggml_quants, llama, common, sampling, grammar_parser, clip });
+ const server = make.exe("server", "examples/server/server.cpp", &.{ ggml, ggml_alloc, ggml_backend, ggml_quants, llama, common, buildinfo, sampling, grammar_parser, clip });
if (server.target.isWindows()) {
server.linkSystemLibrary("ws2_32");
} OutputLog start
main: build = 0 (56c1fad0b4a4ebce8cbb300ba4e4f4b2f5213b19)
main: built with Zig 0.11.0 for native
main: seed = 1698843849
error loading model: failed to open models/7B/ggml-model-f16.gguf: Arquivo ou diretório inexistente
llama_load_model_from_file: failed to load model
llama_init_from_gpt_params: error: failed to load model 'models/7B/ggml-model-f16.gguf'
main: error: unable to load model |
Co-authored-by: Matheus C. França <matheus-catarino@hotmail.com>
ggerganov
requested changes
Nov 1, 2023
ggerganov
approved these changes
Nov 2, 2023
olexiyb
pushed a commit
to Sanctum-AI/llama.cpp
that referenced
this pull request
Nov 23, 2023
…ganov#3879) * cmake : fix build when .git does not exist * cmake : simplify BUILD_INFO target * cmake : add missing dependencies on BUILD_INFO * build : link against build info instead of compiling against it * zig : make build info a .cpp source instead of a header Co-authored-by: Matheus C. França <matheus-catarino@hotmail.com> * cmake : revert change to CMP0115 --------- Co-authored-by: Matheus C. França <matheus-catarino@hotmail.com>
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
ref: #3842 (comment)
This makes incremental builds even faster by avoiding the need to recompile many objects just because the build info has changed (e.g., because the user ran
git commit
orgit pull
).Instead, only the build info itself is recompiled, and executables that depend on it are re-linked.
I chose to export it as variables LLAMA_BUILD_NUMBER, LLAMA_COMMIT, LLAMA_COMPILER, and LLAMA_BUILD_TARGET, but these could have different names, or be functions as originally mentioned.
Other changes: