I have been poking around trying to code this up and learn a bit of zig on the way. I seem to getting different results based on a print statement. Line 21 seems to impact the test results which is surprising. Is there something I am missing?
const std = @import("std");
const maxInt = std.math.maxInt;
pub fn bitify(comptime I: type, n: I) []const u1 {
var zeroOffset: usize = 0;
if (n == 0) {
const b = [_]u1{0};
return b[zeroOffset..1];
} else if (n == 1) {
const b = [_]u1{1};
return b[zeroOffset..1];
}
const nsize = @bitSizeOf(I);
var bts = [_]u1{0} ** nsize;
var i: usize = 0;
var num = n;
while (i < nsize) : (i += 1) {
const bit = @intCast(u1, num % 2);
// XXX: Comment out this print statement and interesting things happen
std.debug.print("Casting {d} num is: {d} bit is: {d}\n", .{ i, num, bit });
bts[nsize - (i + 1)] = bit;
num = num / 2;
if (bit == 1) {
zeroOffset = nsize - (i + 1);
}
}
std.debug.print("Value {d} set bit: {d} bits are: {d}\n", .{ n, zeroOffset, bts });
// Drop leading 0's
var respBits: []const u1 = bts[zeroOffset..bts.len];
std.debug.print("Response bits are: {d}\n", .{respBits});
return respBits;
}
fn u1SliceEqual(name: []const u8, first: []const u1, second: []const u1) bool {
if (first.len != second.len) {
std.debug.print("Test {s} bit lengths to not match {d} != {d}\n", .{ name, first.len, second.len });
return false;
}
for (first) |b, i| {
if (second[i] != b) {
std.debug.print("For test {s}: index {d} first {d} second {d} not equal first {d} second {d}\n", .{ name, i, first, second, b, second[i] });
return false;
}
}
return true;
}
pub fn bits(comptime I: type, n: I) []const u1 {
const b = switch (I) {
u8, u16, u32, u64, u128 => {
if (n <= maxInt(u8)) {
return bitify(u8, @intCast(u8, n));
} else if (n > (maxInt(u8)) and n <= maxInt(u16)) {
return bitify(u16, @intCast(u16, n));
} else if (n > maxInt(u16) and n <= maxInt(u32)) {
return bitify(u32, @intCast(u32, n));
} else if (n > maxInt(u32) and n <= maxInt(u64)) {
return bitify(u64, @intCast(u64, n));
} else {
return bitify(u128, @intCast(u128, n));
}
},
else => @compileError("Not a number type"),
};
return b;
}
test "test 2^n bits" {
var allocator = std.testing.allocator;
var i: usize = 1;
while (i < 128) {
var array = std.ArrayList(u1).init(allocator); //.initCapacity(allocator, i + 1);
var j: usize = 0;
while (j < i) {
try array.append(1);
j += 1;
}
var v: u128 = @intCast(u128, std.math.pow(u128, 2, i) - 1);
var bitArray = bits(u128, v);
std.debug.print("\nValue {d} bit array {d} length {d} slice {d}\n", .{ v, bitArray, i, array.items });
try std.testing.expect(u1SliceEqual("u128 v", bitArray, array.items));
i += 1;
array.deinit();
}
}
test "bitify number" {
const array12 = [_]u1{ 1, 1, 0, 0, 0, 0 };
var zero: usize = 0;
const slice12 = array12[zero..array12.len];
try std.testing.expect(u1SliceEqual("u8 12", bitify(u8, 12), slice12));
}
I have been wondering if it has something to do with not allocating the bit arrays correctly?