Enforce same length arrays

I’m trying to represent vectorNs but having a hard time enforcing the same length.

Closest I’ve come to it is like this:

pub fn add(comptime n: usize, vectors: [][n]f64) [n]f64 {
    var res: [n]f64 = undefined;
    @memset(&res, 0);
    for (vectors) |v| {
        for (0..n) |idx| {
            res[idx] += v[idx];
        }
    }
    return res;
}

But I don’t want to specify the length in every function.

Another path I’ve tried is to embed the length into the type:

fn Vector(comptime n: usize) type {
    return struct {
        values: [n]f64,

        const Self = @This();

        pub fn add(self: *Self, v: Self) void {
            for (0..v.values.len) |idx| {
                self.values[idx] += v.values[idx];
            }
        }
    }
}

But I’d like to keep it as a function rather than a method, which resulted me being in square one with the Vector parameter requiring N to be defined.

In fact, a sidequestion here is: What’s the easiest way to provide any amount of vectors for my function? I thought `vectors: [][n]f64` would be let me do .{ v1, v2, v3 } I was mistaken: type '[][3]f64' does not support array initialization syntax

The side-question first:

You must get the address of the array using &, to convert it to a pointer or slice.

    const v1: [3]f64 = .{ 0.0, 0.0, 0.0 };
    const v2: [3]f64 = .{ 1.0, 1.0, 1.0 };
    const vectors: []const [3]f64 = &.{ v1, v2 };

const is required in this case because the contents of v1 and v2 are constant.


You can have a function that uses anytype with a little comptime help to call the actual add function:

const std = @import("std");

fn Inner(T: type) type {
    return @typeInfo(T).pointer.child;
}

pub fn add(vectors: anytype) Inner(@TypeOf(vectors)) {
    const T = Inner(@TypeOf(vectors));
    const n = @typeInfo(T).array.len;
    return addN(n, vectors);
}

pub fn addN(comptime n: usize, vectors: []const [n]f64) [n]f64 {
    var res: [n]f64 = undefined;
    @memset(&res, 0);
    for (vectors) |v| {
        for (0..n) |idx| {
            res[idx] += v[idx];
        }
    }
    return res;
}

pub fn main() void {
    const v1: [3]f64 = .{ 0.0, 0.0, 0.0 };
    const v2: [3]f64 = .{ 1.0, 1.0, 1.0 };
    const v3: [3]f64 = .{ 41.0, 41.0, 41.0 };
    const vectors: []const [3]f64 = &.{ v1, v2, v3 };

    std.debug.print("{any}\n", .{add(vectors)});
}

And the easiest way to get rid of the ugly @memset is:

pub fn addN(comptime n: usize, vectors: []const [n]f64) [n]f64 {
    std.debug.assert(vectors.len > 0);
    var res: [n]f64 = vectors[0];
    for (vectors[1..]) |v| {
        for (0..n) |idx| {
            res[idx] += v[idx];
        }
    }
    return res;
}

EDIT: corrected the assertion, from n > 0 to vectors.len > 0, thank you @Zambyte

4 Likes

The assertion here is wrong. n is the length of elements of vectors, not the length of vectors itself. n > 0 can be true and accessing vectors[0] can still be illegal behavior.

It’s better to just @splat(0.0) to get around using memset.

pub fn add(comptime n: usize, vectors: []const [n]f64) [n]f64 {
    var res: @Vector(n, f64) = @splat(0.0);
    for (vectors) |v| {
        res += v; // Inner loop is not necessary if all you are doing is arithmetic.
    }
    return res;
}

test add {
    const v1: [3]f64 = .{ 0.0, 0.0, 0.0 };
    const v2: [3]f64 = .{ 1.0, 1.0, 1.0 };
    try std.testing.expectEqualSlices(f64, &.{ 1.0, 1.0, 1.0 }, &add(3, &.{ v1, v2 }));
}
3 Likes

[]const is required because &.{...} is a literal, it has not been given a mutable memory location.

If you want a non const slice you either have to do var vectors = [_][3]f34{ v1, v2 };
or use an allocator.

1 Like

Thanks all! For the side-question I went with

pub fn addN(comptime n: usize, vectors: []const [n]f64) [n]f64 {
    var res: [n]f64 = @splat(0.0);
    for (vectors) |v| {
        for (0..n) |idx| {
            res[idx] += v[idx];
        }
    }
    return res;
}

For the main one, I’ll keep looking, I can’t say I 100% understand what’s going on here:

fn Inner(T: type) type {
    return @typeInfo(T).pointer.child;
}

pub fn add(vectors: anytype) Inner(@TypeOf(vectors)) {
    const T = Inner(@TypeOf(vectors));
    const n = @typeInfo(T).array.len;
    return addN(n, vectors);
}

But as far as I can tell this should return the same type I hand down and not [n]f64, and if I understand correctly it’s exactly what’s happening to v5 according to this test:

test "add vectors" {
    const v1 = [_]f64{ 1, 2, 3 };
    const v2 = [_]f64{ 4, 5, 6 };
    const v3 = [_]f64{ 7, 8, 9 };
    const v4 = addN(3, &.{ v1, v2, v3 });
    try std.testing.expectEqual([_]f64{ 12, 15, 18 }, v4);

    const v5: []const [3]f64 = &.{ v1, v2, v3 };
    try std.testing.expectEqual([_]f64{ 12, 15, 18 }, v5);
}
/usr/lib/zig/std/testing.zig:74:15: error: incompatible types: '[3]f64' and '[]const [3]f64'
    const T = @TypeOf(expected, actual);
              ^~~~~~~~~~~~~~~~~~~~~~~~~
/usr/lib/zig/std/testing.zig:74:23: note: type '[3]f64' here
    const T = @TypeOf(expected, actual);
                      ^~~~~~~~
/usr/lib/zig/std/testing.zig:74:33: note: type '[]const [3]f64' here
    const T = @TypeOf(expected, actual);
                                ^~~~~~
src/vector.zig:32:32: note: called inline here
    try std.testing.expectEqual([_]f64{ 12, 15, 18 }, v5);
        ~~~~~~~~~~~~~~~~~~~~~~~^~~~~~~~~~~~~~~~~~~~~~~~~~

I went on to tinker with handling down unnamed structs and grabbing the fields but at this point it just feels so hacky that I’ll try something else.

Maybe I’m just overthinking this and should just go with slices and assert their length in every function but the length being part of the type feels such an obvious/right solution to me.

This seems like an okay solution to me, what do you all think?

pub fn Vector(comptime n: usize) type {
    return struct {
        pub fn add(vectors: []const [n]f64) [n]f64 {
            var res: [n]f64 = @splat(0.0);
            for (vectors) |v| {
                for (0..n) |idx| {
                    res[idx] += v[idx];
                }
            }
            return res;
        }
    };
}

test "add vectors" {
    const v1 = [_]f64{ 1, 2, 3 };
    const v2 = [_]f64{ 4, 5, 6 };
    const v3 = [_]f64{ 7, 8, 9 };

    const vec3 = Vector(3);
    const v4 = vec3.add(&.{ v1, v2, v3 });
    try std.testing.expectEqual([_]f64{ 12, 15, 18 }, v4);
}

So essentially I would have to define a “handler” for the vectors I’m currently working with but that’s not the end of the world and this way I’m not hacking with reflection and I only need to define my functions once.

It’s of course up to you and whatever you think makes the most sense for your use case, but this solution does not eliminate the explicit length requirement, it just creates distance between where that requirement is (now in the declaration of vec3) and the call site. In my experience, it seems like leaving the explicit length at the call site is more idiomatic.

I think if this were my project, this is what I would do. Instead of trying to use a comptime int n to specify the array length, just accept the numeric array / vector type as an argument (which includes the length), and then use that as a part of the type of vectors and the return type of the function. Check this out:

pub fn add(Vec: type, vectors: []const Vec) Vec {
    const Vector = switch (@typeInfo(Vec)) {
        .array => |a| @Vector(a.len, a.child),
        .vector => Vec,
        else => @compileError("Invalid vector type"),
    };
    var res: Vector = @splat(0);
    for (vectors) |v| {
        res += v; // Inner loop is not necessary if all you are doing is arithmetic.
    }
    return res;
}
test add {
    const v1: [3]f64 = .{ 0.0, 0.0, 0.0 };
    const v2: [3]f64 = .{ 1.0, 1.0, 1.0 };
    try std.testing.expectEqualSlices(
        f64,
        &.{ 1.0, 1.0, 1.0 },
        &add(@Vector(3, f64), &.{ v1, v2 }),
    );

    try std.testing.expectEqualSlices(
        f64,
        &.{ 1.0, 1.0, 1.0 },
        &add([3]f64, &.{ v1, v2 }),
    );
}

This lets add be used for vectors or arrays of any numeric type that supports SIMD operations. If you don’t care about SIMD and don’t want to support @Vector types, you can keep the inner loop and use the Vec argument as-is.

pub fn add(Vec: type, vectors: []const Vec) Vec {
    var res: Vec = @splat(0);
    for (vectors) |v| {
        for (&res, v) |*r, k| {
            r.* += k;
        }
    }
    return res;
}

test add {
    const v1: [3]f64 = .{ 0.0, 0.0, 0.0 };
    const v2: [3]f64 = .{ 1.0, 1.0, 1.0 };
    // Using @Vector doesn't work with this implementation of add
    // try std.testing.expectEqualSlices(
    //     f64,
    //     &.{ 1.0, 1.0, 1.0 },
    //     &add(@Vector(3, f64), &.{ v1, v2 }),
    // );

    try std.testing.expectEqualSlices(
        f64,
        &.{ 1.0, 1.0, 1.0 },
        &add([3]f64, &.{ v1, v2 }),
    );
}
2 Likes

a trivial case like this will probably be auto vectorised (if possible) anyway.

1 Like