AoC 2024: Day 9

Main thread for Day 9 of the 2024 advent of code. Feel free to discuss the challenge and ask questions. If particular discussions become large, we can break them off into new threads.

Some Rules:

  1. Please try to keep spoilers within spoiler tags. This is done by typing [spoiler]text to be blurred[\spoiler]. If you have longer sections you can also use the [details=“Title”] tags.
  2. Have Fun

Day 9 Challenge

Templates:

Resources:

Previous days discussions

Day 1: AoC 2024: Day 1
Day 2: AoC 2024: Day 2
Day 3: AoC 2024: Day 3
Day 4: AoC 2024: Day 4
Day 5: AoC 2024: Day 5
Day 6: AoC 2024: Day 6
Day 7: AoC 2024: Day 7
Day 8: AoC 2024: Day 8

Once again, quite a bit of potential for cleanup, but it runs.

https://zigbin.io/2ee5c0

Part 2 is quite slow (~170ms compared to ~3 ms for p1) but i really don’t feel like optimizing it.

Part 1 & 2
const std = @import("std");

const util = @import("util.zig");
const gpa = util.gpa;

const data = @embedFile("data/day09.txt");

pub fn main() !void {
    var files_list = std.ArrayList(i64).init(gpa);
    const files_raw = std.mem.trim(u8, data, &std.ascii.whitespace);

    var file_id: i64 = 0;
    for (files_raw, 0..) |c, i| {
        const n = c - '0';

        if (i % 2 != 0) {
            try files_list.appendNTimes(-1, n);
            continue;
        }

        try files_list.appendNTimes(file_id, n);
        file_id += 1;
    }

    var p1_files = try gpa.alloc(i64, files_list.items.len);
    @memcpy(p1_files, files_list.items);
    var p2_files = try gpa.alloc(i64, files_list.items.len);
    @memcpy(p2_files, files_list.items);

    std.debug.print("part1: {}\n", .{part1(&p1_files)});
    std.debug.print("part2: {}\n", .{part2(&p2_files)});
}

fn part1(files_in: *[]i64) i64 {
    var files = files_in.*;
    var i: usize = 0;
    var j: usize = files.len - 1;

    while (i != j) {
        if (files[i] != -1) {
            i += 1;
            continue;
        }
        if (files[j] == -1) {
            j -= 1;
            continue;
        }

        files[i] = files[j];
        files[j] = -1;
        i += 1;
        j -= 1;
    }

    return checksum(&files);
}

fn part2(files_in: *[]i64) i64 {
    var files = files_in.*;
    var visited = std.AutoHashMap(i64, bool).init(gpa);
    var k: usize = files.len - 1;
    var l: usize = files.len - 1;

    while (k > 0) {
        if (files[l] == -1) {
            l -= 1;
            k = l;
            continue;
        }

        if (files[k - 1] == files[k]) {
            k -= 1;
            continue;
        }

        const visited_entry = visited.getOrPut(files[k]) catch unreachable;

        if (!visited_entry.found_existing) {
            visited_entry.value_ptr.* = true;

            var i: usize = 0;
            var j: usize = 0;
            while (j < k) {
                if (files[i] != -1) {
                    i += 1;
                    j = i;
                    continue;
                }

                if (files[j + 1] == -1) {
                    j += 1;
                    continue;
                }

                if (j - i >= l - k) {
                    for (0..l - k + 1) |m| {
                        files[i + m] = files[k + m];
                        files[k + m] = -1;
                    }

                    break;
                }

                i = i + (j - i) + 1;
                j = i;
            }
        }

        l = l - (l - k) - 1;
        k = l;
    }

    return checksum(&files);
}

fn checksum(files: *[]i64) i64 {
    var sum: i64 = 0;
    var pos: i64 = 0;

    for (files.*) |file| {
        defer pos += 1;
        if (file == -1) continue;
        sum += file * pos;
    }

    return sum;
}

Part 1 ~2ms, Part 2 ~96ms

Part 1 & 2
const std = @import("std");
const mem = std.mem;

input: []const u8,
allocator: mem.Allocator,

//const in = "2333133121414131402";
pub fn part1(this: *const @This()) !?i64 {
    var blocks = std.ArrayList(usize).init(this.allocator);
    defer blocks.deinit();
    const in = this.input;

    var n: usize = 0;
    var f: usize = 0;
    for (0..in.len) |i| {
        if ('0' > in[i] or in[i] > '9') continue;
        const nb = in[i] - '0';
        if (i % 2 == 0) {
            for (0..nb) |_| {
                try blocks.append(n);
            }
            n += 1;
        } else {
            f += 1;
        }
    }

    var checksum: usize = 0;
    var id: usize = 0;
    var first: usize = 0;

    outer: for (0..in.len) |i| {
        if ('0' > in[i] or in[i] > '9') continue;
        const nb = in[i] - '0';
        if (i % 2 == 0) { // block
            for (0..nb) |_| {
                if (first >= blocks.items.len) break :outer;
                checksum += id * blocks.items[first];
                first += 1;
                id += 1;
            }
        } else { // free space
            for (0..nb) |_| {
                const b = blocks.popOrNull();
                if (b == null or first > blocks.items.len) break :outer;
                checksum += id * b.?;
                id += 1;
            }
        }
    }

    return @intCast(checksum);
}

const Block = struct {
    num: usize,
    quantity: u8,
};

pub fn part2(this: *const @This()) !?i64 {
    const in = this.input;
    // const in = "2333133121414131402";

    var blocks = std.ArrayList(Block).init(this.allocator);
    defer blocks.deinit();

    var n: usize = 0;
    for (0..in.len) |i| {
        if ('0' > in[i] or in[i] > '9') continue;
        if (i % 2 == 0) {
            const nb = in[i] - '0';
            try blocks.append(.{ .num = n, .quantity = nb });
            n += 1;
        }
    }
    var checksum: usize = 0;
    var last = blocks.items.len - 1;
    var index: usize = 0;
    var id: usize = 0;

    for (0..in.len) |i| {
        if ('0' > in[i] or in[i] > '9') continue;
        var nb = in[i] - '0';
        if (i % 2 == 0) {
            const qty = blocks.items[index].quantity;
            const num = blocks.items[index].num;
            for (0..qty) |_| {
                checksum += id * num;
                id += 1;
            }
            if (qty == 0) { // if it was moved
                id += nb;
            }
            index += 1;
        } else {
            while (last >= index and nb > 0) {
                const qty = blocks.items[last].quantity;
                const num = blocks.items[last].num;
                if (qty != 0 and qty <= nb) {
                    for (0..qty) |_| {
                        checksum += id * num;
                        id += 1;
                    }
                    nb -= qty;
                    blocks.items[last].quantity = 0;
                }
                last -= 1;
            }
            id += nb;
            last = blocks.items.len - 1;
        }
    }

    return @intCast(checksum);
}

Oh, I wish I knew about “appendNTimes” ^^

somehow part 2 came out feeling much more aligned with my construction than part 1. my part 1 runs in about 1ms and my part 2 runs in about 300ms.

Part 1 and Part 2

full code on GitHub
I’ll just paste the relevant function from each part without the setup.

Here’s part 1:

const Block = struct {
    id: Id,
    len: u8,

    const Id = union(enum) {
        free: void,
        file: u16,
    };
};

fn process(allocator: std.mem.Allocator, input: []const u8) !usize {
    var fragged: std.ArrayListUnmanaged(Block) = .{};
    defer fragged.deinit(allocator);
    var free = false;
    var id: u16 = 0;
    try fragged.ensureTotalCapacity(allocator, input.len);

    for (input) |byte| {
        defer free = !free;
        const digit = switch (byte) {
            '0'...'9' => byte - '0',
            else => continue,
        };
        const file: Block.Id = if (free) .free else id: {
            defer id += 1;
            break :id .{ .file = id };
        };
        fragged.appendAssumeCapacity(.{ .id = file, .len = digit });
    }

    var defragged: std.ArrayListUnmanaged(u16) = .{};
    defer defragged.deinit(allocator);
    var read_head: usize = 0;
    var copy_head: usize = fragged.items.len - 1;
    var copy = if (fragged.items[copy_head].id == .free) copy: {
        copy_head -= 1;
        break :copy fragged.items[copy_head];
    } else fragged.items[copy_head];
    var copy_len = copy.len;
    while (read_head < copy_head) {
        const read = fragged.items[read_head];
        switch (read.id) {
            .free => {
                var slice = try defragged.addManyAsSlice(allocator, read.len);
                var len = @min(copy_len, slice.len);
                @memset(slice[0..len], copy.id.file);
                while (len < slice.len) {
                    // we finished reading a file
                    slice = slice[len..];
                    copy_head -= 2;
                    copy = fragged.items[copy_head];
                    copy_len = copy.len;
                    len = @min(copy_len, slice.len);
                    @memset(slice[0..len], copy.id.file);
                }
                // we have leftover length in copy_len
                copy_len -= len;
                read_head += 1;
            },
            .file => |file_id| {
                const slice = try defragged.addManyAsSlice(allocator, read.len);
                @memset(slice, file_id);
                read_head += 1;
            },
        }
    }
    if (read_head == copy_head and copy_len > 0) {
        const slice = try defragged.addManyAsSlice(allocator, copy_len);
        @memset(slice, copy.id.file);
    }
    var checksum: usize = 0;
    for (defragged.items, 0..) |file_id, pos| checksum += file_id * pos;
    return checksum;
}

Part 2 uses the same Block struct


fn process(allocator: std.mem.Allocator, input: []const u8) !usize {
    var fragged: std.ArrayListUnmanaged(Block) = .{};
    defer fragged.deinit(allocator);
    var free = false;
    var id: u16 = 0;
    try fragged.ensureTotalCapacity(allocator, input.len);

    for (input) |byte| {
        defer free = !free;
        const digit = switch (byte) {
            '0'...'9' => byte - '0',
            else => continue,
        };
        const file: Block.Id = if (free) .free else id: {
            defer id += 1;
            break :id .{ .file = id };
        };
        fragged.appendAssumeCapacity(.{ .id = file, .len = digit });
    }

    var idx = fragged.items.len - 1;
    var pos: usize = 0;
    for (fragged.items) |block| {
        pos += block.len;
    }

    while (idx > 0) {
        const block = &fragged.items[idx];
        pos -= block.len;
        if (block.id == .free) {
            idx -= 1;
            continue;
        }
        var new_pos: usize = 0;
        var i: usize = 0;
        while (new_pos < pos) : (i += 1) {
            const item = &fragged.items[i];
            const item_len = item.len;
            defer new_pos += item_len;
            if (item.id != .free or item.len < block.len) continue;
            if (item.len == block.len) {
                // perfect fit
                item.id = block.id;
                block.id = .free;
                idx -= 1;
                break;
            }
            const rem = item.len - block.len;
            item.* = block.*;
            block.id = .free;
            try fragged.insert(allocator, i + 1, .{ .id = .free, .len = rem });
            break;
        } else {
            idx -= 1;
        }
    }

    var checksum: usize = 0;
    pos = 0;
    for (fragged.items) |block| {
        switch (block.id) {
            .free => pos += block.len,
            .file => |file_id| {
                for (0..block.len) |i| {
                    checksum += file_id * (pos + i);
                }
                pos += block.len;
            },
        }
    }
    return checksum;
}

Oh that was an intense night. Got it under 1ms. My trick was to keep lists of holes for each hole size in decreasing index order.

https://zigbin.io/4445f7