Hotfixes for build rule, math utilities, module system, and NN implementation (fixes,)

This commit is contained in:
Tarry Singh 2023-06-29 10:26:54 +00:00
parent 7985716562
commit 63aca9f9c2
4 changed files with 6 additions and 15 deletions

View File

@ -23,7 +23,6 @@ def zig_cc_test(name, env = None, data = [], deps = [], test_runner = None, visi
name = "{}_test_lib".format(name),
kind = BINARY_KIND.test_lib,
test_runner = test_runner,
data = data,
deps = deps + [
"@rules_zig//zig/lib:libc",
],

View File

@ -1,13 +1,5 @@
pub inline fn divFloor(comptime T: type, numerator: anytype, denominator: anytype) T {
return @divFloor(floatCast(T, numerator), floatCast(T, denominator));
}
pub inline fn divExact(comptime T: type, numerator: anytype, denominator: anytype) T {
return @divExact(floatCast(T, numerator), floatCast(T, denominator));
}
pub inline fn divTrunc(comptime T: type, numerator: anytype, denominator: anytype) T {
return @divTrunc(floatCast(T, numerator), floatCast(T, denominator));
pub inline fn divFloat(comptime T: type, numerator: anytype, denominator: anytype) T {
return floatCast(T, numerator) / floatCast(T, denominator);
}
pub inline fn floatCast(comptime T: type, x: anytype) T {

View File

@ -943,7 +943,7 @@ fn compileInternal(
if (timer) |*t| {
const time_ms = @divFloor(t.lap(), std.time.ns_per_ms);
if (time_ms > 1000) log.info("Compilation took {d:.3}s", .{stdx.math.divFloor(f32, time_ms, 1000)});
if (time_ms > 1000) log.info("Compilation took {d:.3}s", .{stdx.math.divFloat(f32, time_ms, 1000)});
}
var arena_state_exe = std.heap.ArenaAllocator.init(allocator);

View File

@ -398,7 +398,7 @@ pub fn nearest(input: Tensor, scale_factor: []const f64) Tensor {
var res = input;
for (spatial_dims) |d| {
const n = out_shape.dim(d);
const ratio = stdx.math.divFloor(f32, input.dim(d), n);
const ratio = stdx.math.divFloat(f32, input.dim(d), n);
const offsets = Tensor.arange(.{ .end = n }, .f32).addConstant(0.5).scale(ratio).floor().convert(.i32);
res = res.gatherValues(d, offsets, .{ .indices_are_sorted = true });
}
@ -576,7 +576,7 @@ pub fn resizeLinear1d(image: Tensor, axis: i8, new_len: u63, opt: ResizeOpts) Te
const dtype = opt.precision orelse if (image.dtype().class() == .integer) .f32 else image.dtype();
const og_len = opt.original_len orelse Tensor.scalar(image.dim(axis), dtype);
const ratio = og_len.convert(dtype).scale(stdx.math.divFloor(f32, 1, new_len));
const ratio = og_len.convert(dtype).scale(stdx.math.divFloat(f32, 1, new_len));
const scaled = Tensor.arange(.{ .end = new_len }, dtype).mul(ratio);
const left = scaled.floor();
const right = left.addConstant(1);
@ -638,7 +638,7 @@ pub fn resizeCubic1d(image: Tensor, axis: i8, new_len: u63, opt: ResizeOpts) Ten
const dtype = opt.precision orelse if (image.dtype().class() == .integer) .f32 else image.dtype();
const og_len = opt.original_len orelse Tensor.scalar(image.dim(axis), dtype);
const ratio = og_len.convert(dtype).scale(stdx.math.divFloor(f32, 1, new_len));
const ratio = og_len.convert(dtype).scale(stdx.math.divFloat(f32, 1, new_len));
const scaled = Tensor.arange(.{ .end = new_len }, dtype).mul(ratio);
const t = scaled.sub(scaled.floor());
const pos = Tensor.stack(&.{