Inspired by this excellent post How to enforce Function Call Ordering I wanted to try to update some of my personal libraries that focus on reading certain file formats like PNG and KTX2. Using comptime to enforce that all elements are read or discarded in the correct order. I have a generic OrderedReader type that basically enforces all reads or discards done in order of the struct fields given to the OrderedReader, the Idea being that the given type should represent the file format you are trying to read.
See here the implementation, some types are not yet supported and in order to parse PNG files for example I would need to support slices with runtime length.
const std = @import("std");
const debug = std.debug;
const Allocator = std.mem.Allocator;
const Io = std.Io;
pub fn OrderedReader(comptime StructT: type) type {
return struct {
const Self = @This();
pub const FieldName = std.meta.FieldEnum(StructT);
next_field_index: comptime_int = 0,
pub fn field(comptime self: *Self, comptime name: FieldName) type {
if (comptime @intFromEnum(name) != self.next_field_index) {
const expected: FieldName = @enumFromInt(self.next_field_index);
@compileError("." ++ @tagName(expected) ++ " is expected, but received ." ++ @tagName(name));
}
self.next_field_index += 1;
const FT = comptime FieldType(name);
const next_index = self.next_field_index;
return switch (@typeInfo(FT)) {
.bool => struct {
pub const discardUntil = ReaderImpl(name, next_index).discardUntil;
pub const discard = ReaderImpl(name, next_index).discard;
pub const read = ReaderImpl(name, next_index).read;
pub const take = ReaderImpl(name, next_index).takeBool;
},
.int => |i| struct {
pub const discardUntil = ReaderImpl(name, next_index).discardUntil;
pub const discard = ReaderImpl(name, next_index).discard;
pub const read = ReaderImpl(name, next_index).read;
pub const take = if (@divExact(i.bits, 8) == 1) ReaderImpl(name, next_index).takeByte else ReaderImpl(name, next_index).takeInt;
},
.@"enum" => struct {
pub const discardUntil = ReaderImpl(name, next_index).discardUntil;
pub const discard = ReaderImpl(name, next_index).discard;
pub const read = ReaderImpl(name, next_index).read;
pub const take = ReaderImpl(name, next_index).takeEnum;
},
.@"struct" => struct {
pub const discardUntil = ReaderImpl(name, next_index).discardUntil;
pub const discard = ReaderImpl(name, next_index).discard;
pub const read = ReaderImpl(name, next_index).read;
pub const take = ReaderImpl(name, next_index).takeStruct;
},
.array => struct {
pub const discardUntil = ReaderImpl(name, next_index).discardUntil;
pub const discard = ReaderImpl(name, next_index).discard;
pub const read = ReaderImpl(name, next_index).read;
pub const take = ReaderImpl(name, next_index).takeArray;
},
else => @compileError("Type cannot be read"),
};
}
fn FieldType(comptime name: FieldName) type {
return @FieldType(StructT, @tagName(name));
}
pub fn ReaderImpl(comptime f: FieldName, comptime next_index: comptime_int) type {
const T = FieldType(f);
return struct {
inline fn discard(reader: *Io.Reader) !void {
try reader.discardAll(@sizeOf(T));
}
// TODO: How to handle slice fields?
inline fn discardUntil(reader: *Io.Reader, name: FieldName) !void {
const next_field: FieldName = @enumFromInt(next_index);
comptime debug.assert(@intFromEnum(name) > @intFromEnum(next_field));
const n = @offsetOf(StructT, @tagName(name)) - @offsetOf(StructT, @tagName(f));
try reader.discardAll(n);
}
inline fn read(reader: *Io.Reader, buffer: []const u8) !void {
debug.assert(buffer.len >= @sizeOf(T));
try reader.readSliceAll(buffer[0..@sizeOf(T)]);
}
inline fn takeInt(reader: *Io.Reader, endian: std.builtin.Endian) !T {
return reader.takeInt(T, endian);
}
inline fn takeEnum(reader: *Io.Reader, endian: std.builtin.Endian) !T {
return reader.takeEnum(T, endian);
}
inline fn takeBool(reader: *Io.Reader) !bool {
return (try reader.takeByte()) == 1;
}
inline fn takeByte(reader: *Io.Reader) !bool {
return reader.takeByte();
}
inline fn takeStruct(reader: *Io.Reader, endian: std.builtin.Endian) !T {
return reader.takeStruct(T, endian);
}
inline fn takeArray(reader: *Io.Reader) !T {
const array_ptr = try reader.takeArray(@sizeOf(T));
return std.mem.bytesAsValue(T, array_ptr).*;
}
};
}
};
}
test "OrderedReader enforces read's to be in order" {
const builtin = @import("builtin");
const testing = std.testing;
const native_endian = builtin.cpu.arch.endian();
const data: TestStruct = .{
.signature = .{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11 },
.header = .{
.size = @sizeOf(TestBody),
},
.body = .{
.i = 12,
.j = 13,
.k = .i,
.l = .j,
.m = true,
},
};
const source = std.mem.asBytes(&data);
var reader: Io.Reader = .fixed(source);
comptime var ordered_reader: OrderedReader(TestStruct) = .{};
// If swapped with header a compile time error will occur. The ordered reader enforces
// the order of reads to be the same as the order of struct fields passed to OrderedReader.
const signature = try ordered_reader.field(.signature).take(&reader);
try testing.expectEqualSlices(u8, &data.signature, &signature);
const header = ordered_reader.field(.header).take(&reader, native_endian);
try testing.expectEqualDeep(data.header, header);
const body = ordered_reader.field(.body).take(&reader, native_endian);
try testing.expectEqualDeep(data.body, body);
}
On it’s own it works well and does what I would expect it to do. But now I want to try to integrate this type in a more specialized reader, here I’m running into some troubles. Here one variation I tried:
const Ktx2 = struct {
pub const SuperCompression = enum(u32) { none, basislz, zstandard, zlib, _ };
pub const Header = extern struct {
format: u32,
type_size: u32,
width: u32,
height: u32,
depth: u32,
layers: u32,
faces: u32,
levels: u32,
supercompression: SuperCompression,
};
pub const Index = extern struct {
fd_byte_offset: u32,
dfd_byte_length: u32,
kvd_byte_offset: u32,
kvd_byte_length: u32,
sgd_byte_offset: u64,
sgd_byte_length: u64,
};
pub const signature = .{ '«', 'K', 'T', 'X', ' ', '2', '0', '»', '\r', '\n', '\x1A', '\n' };
identifier: [12]u8,
header: Header,
index: Index,
};
pub fn Ktx2Reader(comptime order: *OrderedReader(Ktx2)) type {
return struct {
const Self = @This();
pub const TakeHeaderError = error{ InvalidSignature, InvalidLevelCount } || Io.Reader.Error;
reader: *Io.Reader,
pub fn init(reader: *Io.Reader) Ktx2Reader {
return .{
.reader = reader,
};
}
pub fn takeHeader(self: *Self) TakeHeaderError!Ktx2.Header {
const sig = order.field(.identifier).take(self.reader);
if (!std.mem.eql(u8, &sig, &Ktx2.signature)) return error.InvalidSignature;
return order.field(.header).take(self.reader);
}
pub fn takeIndex(self: *Self) !Ktx2.Index {
return order.field(.index).take(self.reader);
}
};
}
test "Can read header" {
const testing = std.testing;
const io = testing.io;
const cwd = Io.Dir.cwd();
for (test_ktx2_files) |f| {
const file = try cwd.openFile(io, f.path, .{});
var buffer: [128]u8 = undefined;
var fr = file.reader(io, &buffer);
const reader = &fr.interface;
comptime var order: OrderedReader(Ktx2) = .{};
var ktx2_reader: Ktx2Reader(&order) = .init(reader);
const header = try ktx2_reader.takeHeader();
try testing.expectEqual(f.format, header.format);
try testing.expectEqual(f.type_size, header.type_size);
try testing.expectEqual(f.width, header.width);
try testing.expectEqual(f.height, header.height);
try testing.expectEqual(f.depth, header.depth);
try testing.expectEqual(f.layers, header.layers);
try testing.expectEqual(f.faces, header.faces);
try testing.expectEqual(f.levels, header.levels);
try testing.expectEqual(f.scomp, header.supercompression);
}
}
Unfortunatly this results in the following error:
src/root.zig:40:12: error: captured value contains reference to comptime var
return struct {
^~~~~~
src/root.zig:83:18: note: 'order' points to comptime var declared here
comptime var order: OrderedReader(Ktx2) = .{};
~~~~~~~~~^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
src/root.zig:84:36: note: called at comptime here
var ktx2_reader: Ktx2Reader(&order) = .init(reader);
~~~~~~~~~~^~~~~~~~
Some other variations I tried are
- Embedding the ordered reader into the Ktx2Reader
- Passing the ordered reader as parameter to takeHeader/takeIndex functions
this resulted in a similar error.
Is there any way to work arround this issue?