Test output using new Io interface

I’m working on a module for parsing PNG files.
For testing I have a set of 166 different .png files that are all setup to test a specific item. What I’m currently doing is using Io.Group with concurrent to loop over a list of filenames and add a concurrent task for each seperate file, so far so good.
This works fast but I have the following issues with it:

  • All exceptions are hidden behind Io.Cancelable.Canceled
  • No way of identifying the precise file that failed.

Not sure what a solution could be for the first issue when using concurrent

For the last case I do see 2 solutions.

  • use std.log.err/std.debug.print on exception
  • use different test “” per file

The first one is easy to do but I have a feeling this is not really the correct way of providing context to tests, the second one requires quite a bit of different test cases and loses the performance (Is not the biggest issue though). So far I have avoided using AI in this project but this might be a use case for it to just provide a general test function and have AI write all the test cases calling that function.

Here the code.

test "can parse basic png" {
    const testing = std.testing;
    const io = testing.io;
    const gpa = testing.allocator;

    const Test = struct {
        pub fn testHeader(dir: Io.Dir, path: []const u8, expected_width: u32, expected_height: u32) !void {
            // Not sure yet how to be able to return different error types in this case, as concurrent expects Cancelable!void
            const file = dir.openFile(io, path, .{}) catch return Io.Cancelable.Canceled;
            defer file.close(io);
            var buff: [128]u8 = undefined;
            var fr = file.reader(io, &buff);
            const reader = &fr.interface;

            const image: Image = parse(reader, gpa) catch return Io.Cancelable.Canceled;

            testing.expectEqual(expected_width, image.width) catch return Io.Cancelable.Canceled;
            testing.expectEqual(expected_height, image.height) catch return Io.Cancelable.Canceled;
        }

        pub fn testHeaderCorrupted(dir: Io.Dir, path: []const u8) !void {
            const file = dir.openFile(io, path, .{}) catch return Io.Cancelable.Canceled;
            defer file.close(io);
            var buff: [128]u8 = undefined;
            var fr = file.reader(io, &buff);
            const reader = &fr.interface;

            const image = parse(reader, gpa);
            testing.expectError(ReadError.InvalidFormat, image) catch return Io.Cancelable.Canceled;
        }
    };

    const cwd = Io.Dir.cwd();

    var group: Io.Group = .init;
    for (primary_pngs) |png| {
        try group.concurrent(io, Test.testHeader, .{ cwd, png.path, png.width, png.height });
    }
    for (primary_check_pngs) |png| {
        try group.concurrent(io, Test.testHeader, .{ cwd, png.path, png.width, png.height });
    }
    for (corrupt_pngs) |png| {
        try group.concurrent(io, Test.testHeaderCorrupted, .{ cwd, png.path });
    }
    try group.await(io);
}

Curious to here your thoughts on how you would solve this!
Also this is my first time using concurrent with the new IO interface, if you see an issue please let me know :slight_smile:

You can return any error you want, or even no error.

The interface supports cancelling tasks, when that happens it returns the Canceled error. Very useful for expensive/long-running tasks.

You sure? changing the testHeader function to the following:

        pub fn testHeader(dir: Io.Dir, path: []const u8, expected_width: u32, expected_height: u32) !void {
            // Not sure yet how to be able to return different error types in this case, as concurrent expects Cancelable!void
            const file = try dir.openFile(io, path, .{});
            defer file.close(io);
            var buff: [128]u8 = undefined;
            var fr = file.reader(io, &buff);
            const reader = &fr.interface;

            const image: Image = try parse(reader, gpa);

            try testing.expectEqual(expected_width, image.width);
            try testing.expectEqual(expected_height, image.height);
        }

Won’t compile with:

master/lib/std/Io.zig:1133:24: error: expected type ‘error{Canceled}!void’, found ‘@typeInfo(@typeInfo(@TypeOf(png.test.can parse basic png.Test.testHeader)).@“fn”.return_type.?).error_union.error_set!void’
return @call(.auto, function, args_casted.*);
^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/master/lib/std/Io.zig:1133:24: note: ‘error.ReadFailed’ not a member of destination error set
/master/lib/std/Io.zig:1133:24: note: ‘error.EndOfStream’ not a member of destination error set
/master/lib/std/Io.zig:1133:24: note: ‘error.SystemResources’ not a member of destination error set
/master/lib/std/Io.zig:1133:24: note: ‘error.IsDir’ not a member of destination error set
/master/lib/std/Io.zig:1133:24: note: ‘error.WouldBlock’ not a member of destination error set
/master/lib/std/Io.zig:1133:24: note: ‘error.AccessDenied’ not a member of destination error set
/master/lib/std/Io.zig:1133:24: note: ‘error.Unexpected’ not a member of destination error set
/master/lib/std/Io.zig:1133:24: note: ‘error.InvalidFormat’ not a member of destination error set
/master/lib/std/Io.zig:1133:24: note: ‘error.TooBig’ not a member of destination error set
/master/lib/std/Io.zig:1133:24: note: ‘error.InvalidEnumTag’ not a member of destination error set
/master/lib/std/Io.zig:1133:24: note: ‘error.TestExpectedEqual’ not a member of destination error set
/master/lib/std/Io.zig:1131:59: note: function return type declared here
fn start(context: *const anyopaque) Cancelable!void {

you caught me in the middle of correcting myself :slight_smile:


I overlooked that you were using a group :sweat_smile:, you don’t need the specific error in this case, it will show up in the trace, and the test comparisons will log the differences which also trigger test failure.

But if you do want to get each error, you do something like (untested)

    // errors from test functions, only the ones you used
    const TestError = error {
        TestUnexpectedEqual,
        TestUnexpectedError,
    };
    // select requires a union, only one field so tag is unecessary
    // you could get away with a struct as it doesnt enforce it, but that could change
    const Result = union {
        r: (Io.Dir.OpenError || TestError)!void)
    };
    var buf: [2]Result = undefined;
    var select: Io.Select(Result) = .init(io, &buf);
    for (primary_pngs) |png| {
        // select only supports async, but you should have been using it anyway
        select.async(.r, Test.testHeader, .{ cwd, png.path, png.width, png.height });
    }
    for (primary_check_pngs) |png| {
        select.async(.r, Test.testHeader, .{ cwd, png.path, png.width, png.height });
    }
    for (corrupt_pngs) |png| {
        select.async(.r, Test.testHeaderCorrupted, .{ cwd, png.path });
    }
    // wait and report errors
    while (select.outstanding > 0) {
        // returns Cancelable!Result
        try (try select.await()).r;
    }

Looking at that, it might be easier to use an array list of the futures, though maybe less efficient but that doesnt matter here.

3 Likes

You shouldn’t be returning error.Canceled unless your task was actually canceled via group.cancel(io).

You need a different way of communicating results back to the main thread. I’d suggest using an Io.Queue with some form of “Result” structure, something like this sketch:

const Result = union(enum) {
    success: void,
    failure: []const u8, // filename
};

var result_buffer: [file_count]Result = undefined;
var result_queue: Io.Queue(Result) = .init;

// in test case:

if (expected_width != image.width) {
    queue.putOne(.{ .failure = path });
    return;
}

if (expected_height != image.height) {
    queue.putOne(.{ .failure = .path });
    return;
}

queue.putOne(.success);

// in main thread:

while (queue.getOne(io)) |result| {
    try std.testing.expectEqual(.success, result);
} else |err| return err;
3 Likes

that is easier than using a Select :sweat_smile:

You’d might want to log all the file paths instead of returning on the first failure.

In a code comment you mention I should be using async here instead of concurrent, any specific reason? I get that async basically means “I don’t care about the order of execution” which is the case here. but concurrent works also fine for this right? or is it more that concurrent here means that in order to be correct it must be executed concurrently and that is not the case here?

This looks like a nice solution! Able to log the file as well. Thnx!

concurent means must be executed concurrently, if that’s not possible it returns an error.

Your code doesn’t need concurrency, and there is a point where it won’t improve performance either.
Each unit of concurrency also needs memory to track things, so you are needlessly increasing your memory usage too.

It is also possible depending on the Io implementation that you hit a limit for concurrency that it will start returning the ConcurrencyUnavailable error, which your code will fail the test on.

async is just more applicable, and easier to use.

1 Like

I think you want async here. There’s no reason why parsing different files need to be simultaneous.
If the tests are supposed to succeed, you could just create a var file_error: Error!void = {}, and pass a pointer to each task. At the end, you just try it. You get one error per test, but you’ll probably just fix one error at a time anyways.

1 Like

I’ve implemented as follows using your sketch:

const Test = struct {
    const Result = struct {
        path: []const u8,
        result: anyerror!void,
    };

    pub fn testValidPng(io: Io, dir: Io.Dir, path: []const u8, expected_width: u32, expected_height: u32) !void {
        _ = expected_width;
        _ = expected_height;
        // Not sure yet how to be able to return different error types in this case, as concurrent expects Cancelable!void
        const file = try dir.openFile(io, path, .{});
        defer file.close(io);
        // var buff: [128]u8 = undefined;
        // var fr = file.reader(io, &buff);
        // const reader = &fr.interface;

        // const image: Image = try parse(reader, gpa);

        // try testing.expectEqual(expected_width, image.width);
        // try testing.expectEqual(expected_height, image.height);
    }

    pub fn run(io: Io, queue: *Io.Queue(Result), dir: Io.Dir, path: []const u8, expected_width: u32, expected_height: u32) Io.Cancelable!void {
        // Do not throw the error here, put it on the queue
        const result = testValidPng(io, dir, path, expected_width, expected_height);
        queue.putOne(io, .{ .path = path, .result = result }) catch |err| switch (err) {
            // We allocated a buffer large enough, should never happen
            Io.QueueClosedError.Closed => unreachable,
            Io.Cancelable.Canceled => |er| return er,
        };
    }
};
test "can parse basic png" {
    const testing = std.testing;
    const io = testing.io;
    const gpa = testing.allocator;
    _ = gpa;

    const cwd = Io.Dir.cwd();

    var result_buffer: [primary_pngs.len]Test.Result = undefined;
    var result_queue: Io.Queue(Test.Result) = .init(&result_buffer);

    var group: Io.Group = .init;
    for (primary_pngs) |png| {
        group.async(io, Test.run, .{ io, &result_queue, cwd, png.path, png.width, png.height });
    }

    try group.await(io);

    while (result_queue.getOne(io)) |result| {
        result.result catch |err| {
            return err;
        };
    } else |err| return err;
}

Instead of returning success or not I just put the actual error union and path on the queue.
This does not seem to work though. Somehow I get into a deadlock(or atleast looks like a deadlock, the test just keeps running) I’ve already disabled the actual png parsing code to make sure that is not the issue.

1 Like

I think I would try another approach:

  • generate a list of all the files
  • @embedFile that into the testing module
  • then use comptime to iterate over that list and create separate tests for all the different files

Then you can use something like this:

to be able to read which of the tests failed.

That way all the decisions about how to run the tests are delegated to the test runner.

2 Likes

I might actually try this, because as long as I use async Io I won’t be able to see which test failed. I might even go with code generation, I already have a csv with the filename and expected results. Using that I could just embed all the files and generate the test cases.