Remove obsolete async symbols (asynk, asyncc, awaitt, await_) from core, runtime, and aio modules.

This commit is contained in:
Tarry Singh 2025-08-29 11:03:59 +00:00
parent 7913c00d70
commit 6e15123fb3
22 changed files with 63 additions and 65 deletions

View File

@ -52,9 +52,7 @@ fn FrameExx(comptime func: anytype, comptime argsT: type, comptime returnT: type
inner: FrameT,
pub const wait = await_;
pub const await_ = awaitt;
pub fn awaitt(self: *Self) returnT {
pub fn await(self: *Self) returnT {
defer {
self.inner.deinit();
AsyncThread.current.stack_allocator.destroy(&self.inner._frame.stack);
@ -65,7 +63,7 @@ fn FrameExx(comptime func: anytype, comptime argsT: type, comptime returnT: type
};
}
pub fn asyncc(comptime func: anytype, args: anytype) !FrameEx(func, @TypeOf(args)) {
pub fn async(comptime func: anytype, args: anytype) !FrameEx(func, @TypeOf(args)) {
const Signature = stdx.meta.FnSignature(func, @TypeOf(args));
const new_stack = try AsyncThread.current.stack_allocator.create();
return .{

View File

@ -1030,7 +1030,7 @@ pub const Event = opaque {
return @ptrCast(result);
}
pub fn await_(self: *const Event, api: *const Api) ApiError!void {
pub fn await(self: *const Event, api: *const Api) ApiError!void {
_ = try api.call(.PJRT_Event_Await, .{
.event = self.inner(),
});

View File

@ -1,6 +1,6 @@
const builtin = @import("builtin");
const asynk = @import("async");
const async = @import("async");
const c = @import("c");
const pjrt = @import("pjrt");
const bazel_builtin = @import("bazel_builtin");
@ -44,6 +44,6 @@ pub fn load() !*const pjrt.Api {
var lib_path_buf: [std.fs.max_path_bytes]u8 = undefined;
const path = try stdx.fs.path.bufJoinZ(&lib_path_buf, &.{ sandbox_path, "libpjrt_cpu" ++ ext });
break :blk asynk.callBlocking(pjrt.Api.loadFrom, .{path});
break :blk async.callBlocking(pjrt.Api.loadFrom, .{path});
};
}

View File

@ -1,7 +1,7 @@
const std = @import("std");
const builtin = @import("builtin");
const asynk = @import("async");
const async = @import("async");
const bazel_builtin = @import("bazel_builtin");
const c = @import("c");
const pjrt = @import("pjrt");
@ -17,7 +17,7 @@ pub fn isEnabled() bool {
}
fn hasNvidiaDevice() bool {
asynk.File.access("/dev/nvidiactl", .{ .mode = .read_only }) catch return false;
async.File.access("/dev/nvidiactl", .{ .mode = .read_only }) catch return false;
return true;
}
@ -79,6 +79,6 @@ pub fn load() !*const pjrt.Api {
return blk: {
var lib_path_buf: [std.fs.max_path_bytes]u8 = undefined;
const path = try stdx.fs.path.bufJoinZ(&lib_path_buf, &.{ sandbox_path, "lib", "libpjrt_cuda.so" });
break :blk asynk.callBlocking(pjrt.Api.loadFrom, .{path});
break :blk async.callBlocking(pjrt.Api.loadFrom, .{path});
};
}

View File

@ -1,7 +1,7 @@
const std = @import("std");
const builtin = @import("builtin");
const asynk = @import("async");
const async = @import("async");
const bazel_builtin = @import("bazel_builtin");
const c = @import("c");
const pjrt = @import("pjrt");
@ -15,14 +15,14 @@ pub fn isEnabled() bool {
}
fn hasNeuronDevice() bool {
asynk.File.access("/dev/neuron0", .{ .mode = .read_only }) catch return false;
async.File.access("/dev/neuron0", .{ .mode = .read_only }) catch return false;
return true;
}
fn isRunningOnEC2() !bool {
const AmazonEC2 = "Amazon EC2";
var f = try asynk.File.open("/sys/devices/virtual/dmi/id/sys_vendor", .{ .mode = .read_only });
var f = try async.File.open("/sys/devices/virtual/dmi/id/sys_vendor", .{ .mode = .read_only });
defer f.close() catch {};
var content: [AmazonEC2.len]u8 = undefined;
@ -64,6 +64,6 @@ pub fn load() !*const pjrt.Api {
return blk: {
var lib_path_buf: [std.fs.max_path_bytes]u8 = undefined;
const path = try stdx.fs.path.bufJoinZ(&lib_path_buf, &.{ sandbox_path, "lib", "libpjrt_neuron.so" });
break :blk asynk.callBlocking(pjrt.Api.loadFrom, .{path});
break :blk async.callBlocking(pjrt.Api.loadFrom, .{path});
};
}

View File

@ -1,7 +1,7 @@
const std = @import("std");
const builtin = @import("builtin");
const asynk = @import("async");
const async = @import("async");
const bazel_builtin = @import("bazel_builtin");
const c = @import("c");
const pjrt = @import("pjrt");
@ -16,7 +16,7 @@ pub fn isEnabled() bool {
fn hasRocmDevices() bool {
inline for (&.{ "/dev/kfd", "/dev/dri" }) |path| {
asynk.File.access(path, .{ .mode = .read_only }) catch return false;
async.File.access(path, .{ .mode = .read_only }) catch return false;
}
return true;
}

View File

@ -1,7 +1,7 @@
const std = @import("std");
const builtin = @import("builtin");
const asynk = @import("async");
const async = @import("async");
const bazel_builtin = @import("bazel_builtin");
const c = @import("c");
const pjrt = @import("pjrt");
@ -22,7 +22,7 @@ fn isOnGCP() !bool {
// TODO: abstract that in the client and fail init
const GoogleComputeEngine = "Google Compute Engine";
var f = try asynk.File.open("/sys/devices/virtual/dmi/id/product_name", .{ .mode = .read_only });
var f = try async.File.open("/sys/devices/virtual/dmi/id/product_name", .{ .mode = .read_only });
defer f.close() catch {};
var content: [GoogleComputeEngine.len]u8 = undefined;
@ -61,6 +61,6 @@ pub fn load() !*const pjrt.Api {
return blk: {
var lib_path_buf: [std.fs.max_path_bytes]u8 = undefined;
const path = try stdx.fs.path.bufJoinZ(&lib_path_buf, &.{ sandbox_path, "lib", "libpjrt_tpu.so" });
break :blk asynk.callBlocking(pjrt.Api.loadFrom, .{path});
break :blk async.callBlocking(pjrt.Api.loadFrom, .{path});
};
}

View File

@ -1,24 +1,24 @@
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
_VERSION = "0.14.0"
_VERSION = "0.15.0"
_ARCH = {
"x86_64-linux": struct(
sha256 = "661f8d402ba3dc9b04b6e9bc3026495be7b838d2f18d148db2bd98bd699c1360",
sha256 = "508bfe3fd637d2a02f07f3fc7da8900351f407116b03685c5dae26b4f01a30de",
exec_compatible_with = [
"@platforms//os:linux",
"@platforms//cpu:x86_64",
],
),
"x86_64-macos": struct(
sha256 = "baee69e4645deeccb42970b4a01f573592209dc1cf72e32893c59ca06af511dc",
sha256 = "46c31838bfef5adcc7fee82428c3ec2b9abbfae38242639afea5f242ee133d93",
exec_compatible_with = [
"@platforms//os:macos",
"@platforms//cpu:x86_64",
],
),
"aarch64-macos": struct(
sha256 = "dfb627e1f9603583678f552d8035a12dce878215c0a507b32d6f1b9d074d6c4d",
sha256 = "76c7a23190f67e67970024065f689c2c49b3c7b0fc16876fb24ef199fb05fc2a",
exec_compatible_with = [
"@platforms//os:macos",
"@platforms//cpu:aarch64",

View File

@ -1,6 +1,6 @@
const std = @import("std");
const asynk = @import("async");
const async = @import("async");
const c = @import("c");
const stdx = @import("stdx");
@ -336,13 +336,13 @@ pub const Metadata = union(enum) {
/// This struct is meant to be wrapped into a format specific struct, like io.gguf.File.
pub const MemoryMappedFile = struct {
/// underlying file handle
file: asynk.File,
file: async.File,
data: []align(std.heap.page_size_min) const u8,
data_offset: u64 = 0,
pub fn init(file: asynk.File) !MemoryMappedFile {
pub fn init(file: async.File) !MemoryMappedFile {
const data_len: usize = (try file.stat()).size;
const data_ = try asynk.callBlocking(std.posix.mmap, .{
const data_ = try async.callBlocking(std.posix.mmap, .{
null,
data_len,
std.posix.PROT.READ,
@ -351,7 +351,7 @@ pub const MemoryMappedFile = struct {
0,
});
try asynk.callBlocking(posix.madvise, .{
try async.callBlocking(posix.madvise, .{
data_.ptr,
@as(usize, @intCast(data_.len)),
@as(u32, @intCast(c.MADV_SEQUENTIAL)),
@ -736,7 +736,7 @@ pub fn unloadBuffers(model: anytype) void {
pub fn awaitAll(buffers: anytype) !void {
zml.meta.visit((struct {
fn cb(_: void, buffer: *zml.Buffer) void {
buffer.* = buffer.awaitt() catch unreachable;
buffer.* = buffer.await() catch unreachable;
}
}).cb, {}, buffers);
}

View File

@ -1,4 +1,4 @@
const asynk = @import("async");
const async = @import("async");
const std = @import("std");
const zml = @import("../zml.zig");

View File

@ -1,7 +1,7 @@
const std = @import("std");
const Allocator = std.mem.Allocator;
const asynk = @import("async");
const async = @import("async");
const stdx = @import("stdx");
const MemoryMappedFile = @import("../aio.zig").MemoryMappedFile;
@ -30,7 +30,7 @@ pub fn open(allocator: std.mem.Allocator, path: []const u8) !zml.aio.BufferStore
}
fn loadFromIndex(allocator: Allocator, store: *zml.aio.BufferStore, files: *std.array_list.Managed(MemoryMappedFile), path: []const u8) !void {
const file = asynk.File.open(path, .{}) catch |err| {
const file = async.File.open(path, .{}) catch |err| {
log.err("Failed to open {s}: {}", .{ path, err });
return err;
};
@ -66,7 +66,7 @@ fn loadFromIndex(allocator: Allocator, store: *zml.aio.BufferStore, files: *std.
}
fn loadFile(allocator: Allocator, store: *zml.aio.BufferStore, files: *std.array_list.Managed(MemoryMappedFile), path: []const u8) !void {
const file = asynk.File.open(path, .{}) catch |err| {
const file = async.File.open(path, .{}) catch |err| {
log.err("Failed to open {s}: {}", .{ path, err });
return err;
};

View File

@ -1,6 +1,6 @@
/// Tools to load models from https://huggingface.co/karpathy/tinyllamas/
/// Originally made to be run with https://github.com/karpathy/llama2.c
const asynk = @import("async");
const async = @import("async");
const std = @import("std");
const stdx = @import("stdx");
@ -29,7 +29,7 @@ pub fn open(allocator: std.mem.Allocator, model_path: []const u8) !zml.aio.Buffe
errdefer res.arena.deinit();
const arena = res.arena.allocator();
const file = try asynk.File.open(model_path, .{});
const file = try async.File.open(model_path, .{});
res.files = try arena.alloc(zml.aio.MemoryMappedFile, 1);
res.files[0] = try zml.aio.MemoryMappedFile.init(file);

View File

@ -1,6 +1,6 @@
const std = @import("std");
const asynk = @import("async");
const async = @import("async");
const zml = @import("../zml.zig");
const eval = @import("torch/eval.zig");
@ -18,7 +18,7 @@ test {
/// Opens and loads a BufferStore from the torch file at the given path.
pub fn open(allocator: std.mem.Allocator, path: []const u8) !zml.aio.BufferStore {
const file = asynk.File.open(path, .{}) catch |err| {
const file = async.File.open(path, .{}) catch |err| {
log.err("Failed to open {s}: {}", .{ path, err });
return err;
};
@ -30,7 +30,7 @@ pub fn open(allocator: std.mem.Allocator, path: []const u8) !zml.aio.BufferStore
const tmp_alloc = arena.allocator();
const mmap_file = try zml.aio.MemoryMappedFile.init(file);
var torch_file = try asynk.callBlocking(File.init, .{ tmp_alloc, mmap_file });
var torch_file = try async.callBlocking(File.init, .{ tmp_alloc, mmap_file });
const ops = try torch_file.parsePickle(tmp_alloc);
const py_values = try eval.evaluate(tmp_alloc, ops, true);

View File

@ -1,7 +1,7 @@
const std = @import("std");
const testing = std.testing;
const asynk = @import("async");
const async = @import("async");
const stdx = @import("stdx");
const zml = @import("../../zml.zig");
@ -478,7 +478,7 @@ test "Read pickle (zipped)" {
// model = torch.nn.Conv2d(2, 2, 3, stride=2, padding=[2, 4], dtype=torch.float16)
// tensor = torch.tensor([[2, 4, 3, 2]], dtype=torch.uint8)
// torch.save({ "model": model, "tensor": tensor}, "simple.pt")
const file = try asynk.File.open("zml/aio/torch/simple.pt", .{ .mode = .read_only });
const file = try async.File.open("zml/aio/torch/simple.pt", .{ .mode = .read_only });
const mmap_file = try zml.aio.MemoryMappedFile.init(file);
var store = try zml.aio.BufferStore.initWithFiles(testing.allocator, &.{mmap_file});
defer store.deinit();

View File

@ -1,6 +1,6 @@
const std = @import("std");
const asynk = @import("async");
const async = @import("async");
const stdx = @import("stdx");
const DataType = @import("dtype.zig").DataType;
@ -115,16 +115,16 @@ pub const Buffer = struct {
}
if (opts.wait) {
res = try res.awaitt();
res = try res.await();
}
return res;
}
pub fn awaitt(self: Buffer) !Buffer {
pub fn await(self: Buffer) !Buffer {
for (self._shards.constSlice()) |buffer| {
if (buffer.getReadyEvent(self._api)) |ev| {
try ev.await_(self._api);
try ev.await(self._api);
}
}
@ -317,7 +317,7 @@ pub const Buffer = struct {
stdx.debug.internalAssert(!self.hasShardedAxis(), "TODO: support sharded Buffer -> Host transfer", .{});
const maybe_event = try self._shards.get(0).toHostBuffer(self._api, std.mem.asBytes(&res));
if (maybe_event) |event| {
try event.await_(self._api);
try event.await(self._api);
}
return res;
}
@ -329,7 +329,7 @@ pub const Buffer = struct {
stdx.debug.internalAssert(!self.hasShardedAxis(), "TODO: support sharded Buffer -> Host transfer", .{});
const maybe_event = try self._shards.get(0).toHostBuffer(self._api, output);
if (maybe_event) |event| {
try event.await_(self._api);
try event.await(self._api);
}
return HostBuffer.fromBytes(self.shape(), output);
}
@ -341,7 +341,7 @@ pub const Buffer = struct {
stdx.debug.internalAssert(!self.hasShardedAxis(), "TODO: support sharded Buffer -> Host transfer", .{});
const maybe_event = try self._shards.get(0).toHostBuffer(self._api, @constCast(output.bytes()));
if (maybe_event) |event| {
try event.await_(self._api);
try event.await(self._api);
}
return output;
}

View File

@ -1,6 +1,6 @@
const std = @import("std");
const asynk = @import("async");
const async = @import("async");
const mlir = @import("mlir");
const pjrt = @import("pjrt");
const stablehlo = @import("mlir/dialects").stablehlo;

View File

@ -261,7 +261,7 @@ pub const BaseExe = struct {
// for (events[0..sharding.num_partitions]) |e| {
// if (e) |ev| {
// ev.await_(self.platform.pjrt_api) catch unreachable;
// ev.await(self.platform.pjrt_api) catch unreachable;
// }
// }
}

View File

@ -1,6 +1,6 @@
const std = @import("std");
const asynk = @import("async");
const async = @import("async");
const c = @import("c");
const dialect = @import("mlir/dialects");
const mlir = @import("mlir");
@ -162,7 +162,7 @@ pub const CompilationContext = struct {
var timer = std.time.Timer.start() catch null;
const tensor_args = try self.tensorFromShapes(stdx.meta.FnArgs(func), arena, args);
// Run in a dedicated thread because compilation relies on `threadlocal`.
const f = try asynk.callBlocking(CompilationContext.emitMlir, .{ self, func, &tensor_args, CompilationContext.EmitMlirOpts{ .name = "main", .kind = .main } });
const f = try async.callBlocking(CompilationContext.emitMlir, .{ self, func, &tensor_args, CompilationContext.EmitMlirOpts{ .name = "main", .kind = .main } });
const module = self._module;
module.getBody().appendOperation(f.mlir_fn);

View File

@ -1,6 +1,6 @@
const std = @import("std");
const asynk = @import("async");
const async = @import("async");
const dialects = @import("mlir/dialects");
const mlir = @import("mlir");
const pjrt = @import("pjrt");
@ -66,7 +66,7 @@ pub const Client = opaque {
}
pub fn deserializeAndLoad(self: *const Client, api: *const Api, bytes: []const u8) ApiError!*LoadedExecutable {
return @ptrCast(try asynk.callBlocking(pjrt.Client.deserializeAndLoad, .{ self.inner(), api, bytes }));
return @ptrCast(try async.callBlocking(pjrt.Client.deserializeAndLoad, .{ self.inner(), api, bytes }));
}
pub const CreateViewOfDeviceBufferArgs = pjrt.Client.CreateViewOfDeviceBufferArgs;
@ -105,7 +105,7 @@ pub const Client = opaque {
}
pub fn compile(self: *const Client, api: *const Api, allocator: std.mem.Allocator, module: mlir.Module, compile_options_pb: []const u8) CompileError!*LoadedExecutable {
return try asynk.callBlocking(compileSync, .{ self, api, allocator, module, compile_options_pb });
return try async.callBlocking(compileSync, .{ self, api, allocator, module, compile_options_pb });
}
pub fn addressableMemories(self: *const Client, api: *const Api) []*const Memory {
@ -211,10 +211,10 @@ pub const Event = opaque {
if (self.isReady(api)) {
return;
}
try self.inner().await_(api);
try self.inner().await(api);
}
pub fn await_(self: *Event, api: *const Api) ApiError!void {
pub fn await(self: *Event, api: *const Api) ApiError!void {
defer self.deinit(api);
if (self.isReady(api)) {
@ -223,7 +223,7 @@ pub const Event = opaque {
var ctx = struct {
err: ?*pjrt.Error = null,
event: asynk.threading.ResetEventSingle = .{},
event: async.threading.ResetEventSingle = .{},
}{};
try self.inner().onReady(api, &(struct {

View File

@ -3,7 +3,7 @@ const std = @import("std");
const testing = std.testing;
const builtin = @import("builtin");
const asynk = @import("async");
const async = @import("async");
// note: std_options.log_level does not respect testing.log_level
// ref: https://github.com/ziglang/zig/issues/5738
@ -19,7 +19,7 @@ var fba = std.heap.FixedBufferAllocator.init(&fba_buffer);
pub fn main() anyerror!void {
testing.log_level = log_level;
try asynk.AsyncThread.main(testing.allocator, asyncMain);
try async.AsyncThread.main(testing.allocator, asyncMain);
}
pub fn asyncMain() !void {

View File

@ -1,6 +1,6 @@
const std = @import("std");
const asynk = @import("async");
const async = @import("async");
const stdx = @import("stdx");
const zml_tokenizer = @import("zml/tokenizer");
@ -14,7 +14,7 @@ const Flags = struct {
};
pub fn main() !void {
try asynk.AsyncThread.main(std.heap.c_allocator, asyncMain);
try async.AsyncThread.main(std.heap.c_allocator, asyncMain);
}
pub fn asyncMain() !void {

View File

@ -1,6 +1,6 @@
const std = @import("std");
const asynk = @import("async");
const async = @import("async");
const hftokenizers = @import("hftokenizers");
const sentencepiece = @import("sentencepiece");
@ -98,15 +98,15 @@ pub const Tokenizer = union(Tokenizers) {
pub fn fromFile(allocator: std.mem.Allocator, model: []const u8) !Tokenizer {
if (std.mem.endsWith(u8, model, ".pb")) {
return .{ .sentencepiece = try asynk.callBlocking(sentencepiece.SentencePieceProcessor.fromFile, .{model}) };
return .{ .sentencepiece = try async.callBlocking(sentencepiece.SentencePieceProcessor.fromFile, .{model}) };
}
if (std.mem.endsWith(u8, model, ".json")) {
return .{ .hftokenizers = try asynk.callBlocking(hftokenizers.HFTokenizer.fromFile, .{model}) };
return .{ .hftokenizers = try async.callBlocking(hftokenizers.HFTokenizer.fromFile, .{model}) };
}
if (std.mem.endsWith(u8, model, ".tinyllama")) {
const tokenizer = try allocator.create(homemade.Tokenizer);
tokenizer.* = try asynk.callBlocking(homemade.fromTinyLlamaFile, .{ allocator, model, 32000 });
tokenizer.* = try async.callBlocking(homemade.fromTinyLlamaFile, .{ allocator, model, 32000 });
return .{ .homemade = tokenizer };
}