diff --git a/examples/benchmark/main.zig b/examples/benchmark/main.zig index d8b0b47..28410c6 100644 --- a/examples/benchmark/main.zig +++ b/examples/benchmark/main.zig @@ -6,6 +6,7 @@ const flags = @import("tigerbeetle/flags"); // set log level to debug to print the generated IR pub const std_options = .{ .log_level = .warn, + .logFn = asynk.logFn(std.log.defaultLog), }; pub fn benchmark(a: zml.Tensor, b: zml.Tensor) zml.Tensor { diff --git a/examples/llama/main.zig b/examples/llama/main.zig index cc63830..6c2a9df 100644 --- a/examples/llama/main.zig +++ b/examples/llama/main.zig @@ -19,6 +19,7 @@ const log = std.log.scoped(.llama); pub const std_options = .{ .log_level = .info, + .logFn = asynk.logFn(std.log.defaultLog), }; pub fn tokenizePromptLlama3(allocator: std.mem.Allocator, tokenizer: zml.tokenizer.Tokenizer, config: LlamaLM.Config, prompt: []const u8) ![]u32 { diff --git a/examples/mnist/mnist.zig b/examples/mnist/mnist.zig index 51ca1e4..c49300a 100644 --- a/examples/mnist/mnist.zig +++ b/examples/mnist/mnist.zig @@ -1,14 +1,13 @@ -const asynk = @import("async"); const std = @import("std"); -const zml = @import("zml"); -const show_mlir = true; +const asynk = @import("async"); +const zml = @import("zml"); const log = std.log.scoped(.mnist); pub const std_options: std.Options = .{ - .logFn = asynk.logFn, .log_level = .info, + .logFn = asynk.logFn(std.log.defaultLog), }; /// Model definition