Be kind…
herewith a noob class as per the suggestions ( @vulpesx _arg) that, upon first look, that I can definitely follow for myself.
Data members data start with m_* (snake)
Method members, camelCase (as per reference)
Struct Name, PascalCase (as per reference)
Constant members, camelCase
Parameters, _arg appended (as per @vulpesx - quite like it)
Snake case (as per reference)
Local variable, start with _ then snake case (as per reference)
res (as result) and self are “special”
I think I can live with this. Especially since:
All locals come up when I press underscore.
All parameters are “near natural”
Even though self is involved, m_ for members clear not only avoid collision, but also is clear otherwise.
Not all methods tested, finalized but here is a rough draft example.
const std = @import("std");
const print = std.debug.print;
const math = std.math;
pub const PascalString = struct {
pub const Self = @This();
pub const maxSize: u8 = 255;
m_size: u8,
m_buffer: [maxSize]u8,
m_overflow: bool,
pub fn new(source_arg: []const u8) PascalString {
var res = PascalString{
.m_overflow = false,
.m_size = @intCast(source_arg.len),
.m_buffer = undefined,
};
if (source_arg.len > res.m_buffer.len) {
res.m_overflow = true;
res.m_size = @intCast(res.m_buffer.len);
}
@memcpy(
res.m_buffer[0..res.m_size],
source_arg[0..res.m_size],
);
return res;
}
pub fn toString(self: *Self) []u8 {
return self.m_buffer[0..self.m_size];
}
fn availableSlice(self: *Self) []u8 {
return self.m_buffer[self.m_size..];
}
pub fn available(self: *Self) u8 {
return maxSize - self.m_size;
}
pub fn indexOf(self: *Self, find_arg: []const u8) isize {
if (std.mem.indexOf(
u8,
&self.m_buffer,
find_arg,
)) |pos| {
return @bitCast(pos);
} else {
return -1;
}
}
pub fn count(self: *Self, find_arg: []const u8) usize {
const res = std.mem.count(
u8,
&self.m_buffer,
find_arg,
);
return res;
}
pub fn reset(self: *Self) *Self {
self.m_size = 0;
self.m_overflow = false;
return self;
}
pub fn dump(self: *Self) void {
print("\n*size={}", .{&self.m_size});
print("\n*buffer={}", .{&self.m_buffer[0]});
print("\noverflow={}", .{self.m_overflow});
print("\n", .{});
}
// pub fn addAny(self: @This(), other: anytype) !*Self {
// var buffer: [maxSize]u8 = undefined;
//
// switch (@TypeOf(other)) {
// @TypeOf(u32) => try std.fmt.bufPrint(&buffer, "{d}", .{other}),
// else => std.debug.panic("Failed", .{}),
// }
//
// return self;
// }
pub fn addPascal(self: *Self, append_arg: *Self) *Self {
return self.addStr(append_arg.toString());
}
pub fn addStr(self: *Self, append_arg: []const u8) *Self {
var _count: u8 = @truncate(
if (append_arg.len > self.m_buffer.len) self.m_buffer.len else append_arg.len,
);
// Make a backup of current size before ruining it
const _target = self.m_size;
if (self.m_size + append_arg.len > self.m_buffer.len) {
_count = @as(u8, self.m_buffer.len) - self.m_size;
self.m_overflow = true;
self.m_size = @intCast(self.m_buffer.len);
} else {
self.m_size += @as(u8, @intCast(append_arg.len));
}
@memcpy(
self.m_buffer[_target..self.m_size],
append_arg[0.._count],
);
return self;
}
// Cases:
// 1. index > size, just append the string
// 2. other overflows self from the index
// 3. other overflows maxSize
pub fn overtype(self: *Self, overtype_arg: []const u8, index_arg: u8) *Self {
if (index_arg > self.m_size) {
return self.addStr(overtype_arg);
}
const _end_offset = @as(usize, index_arg) + overtype_arg.len;
var _copy_size: u8 = @truncate(overtype_arg.len);
if (_end_offset > self.m_size) {
if (_end_offset < maxSize) {
self.m_size = @truncate(_end_offset);
} else {
_copy_size = maxSize - index_arg;
if (self.m_size < maxSize) {
self.m_overflow = true;
self.m_size += _copy_size;
}
}
}
@memcpy(
self.m_buffer[index_arg .. index_arg + _copy_size],
overtype_arg[0.._copy_size],
);
return self;
}
///
pub fn insertStr(self: *Self, source_arg: []const u8, index_arg: u8) *Self {
if (index_arg > self.m_size) {
return self.addStr(source_arg);
}
var _buffer: [maxSize]u8 = undefined;
// This is size safe
@memcpy(
_buffer[0..index_arg],
self.m_buffer[0..index_arg],
);
// other copy should not go over right side
const _length = if (index_arg + source_arg.len > maxSize)
maxSize - index_arg - source_arg.len
else
source_arg.len;
@memcpy(
_buffer[index_arg .. index_arg + _length],
source_arg[0.._length],
);
// Tail copy
const _tail_offset = index_arg + _length;
if (_tail_offset < maxSize) {
const _available = maxSize - _tail_offset;
const _residual = self.m_size - index_arg;
const _current = index_arg + _length;
const _tail_size = if (_residual > _available) _available else _residual;
@memcpy(
_buffer[_current .. _current + _tail_size],
self.m_buffer[index_arg .. index_arg + _tail_size],
);
}
if (@as(usize, self.m_size) + source_arg.len > maxSize) {
self.m_overflow = true;
self.m_size = maxSize;
} else {
self.m_size += @truncate(source_arg.len);
}
// Copy back the temp buffer to self.buffer
@memcpy(
self.m_buffer[0..self.m_size],
_buffer[0..self.m_size],
);
return self;
}
pub fn left(self: *Self, len_arg: u8) []u8 {
const res = self.m_buffer[0..if (len_arg > self.m_size) self.m_size else len_arg];
return res;
}
pub fn right(self: *Self, len_arg: u8) []u8 {
const l = if (len_arg > self.m_size) self.m_size else len_arg;
const res = self.m_buffer[self.m_size - l .. self.m_size];
return res;
}
pub fn deleteIter(self: *Self, index_arg: u8, count_arg: u8) *Self {
var _offset: u8 = 0;
for (index_arg + count_arg..self.m_size) |source| {
if (source < self.m_size) {
self.m_buffer[index_arg + _offset] = self.m_buffer[source];
_offset += 1;
}
}
self.m_size = index_arg + _offset;
return self;
}
/// 1234567890 delete(5, 3)
/// HEAD: copy(temp, self.buffer[0..5]
/// tail_from = 5+3 = 8
/// tail_to = 10 (self.size)
/// TAIL: copy(temp, self.buffer[tail_from..tail_to]
/// tail_size = 10 - 8 = 2
/// 1234567890 delete(5, 8)
/// HEAD: copy(temp, self.buffer[0..5]
/// tail_from = 5+8 = 13
/// tail_to = 10 (self.size)
/// TAIL: No Copy as tail_to > tail_from
/// tail_size = 0
pub fn delete(self: *Self, index_arg: u8, count_arg: u8) *Self {
if (index_arg > self.m_size or count_arg == 0)
return self;
var temp: [maxSize]u8 = undefined;
// Copy Head
if (index_arg > 0) {
@memcpy(temp[0..index_arg], self.m_buffer[0..index_arg]);
}
const tail_from = @as(u16, index_arg) + @as(u16, count_arg);
const tail_to = self.m_size;
var tail_size: u8 = undefined;
if (tail_from < tail_to) {
tail_size = @truncate(tail_to - tail_from);
@memcpy(
temp[index_arg .. index_arg + tail_size],
self.m_buffer[tail_from..tail_to],
);
} else {
tail_size = 0;
}
// Copy Tail
@memcpy(
self.m_buffer[0..self.m_size],
temp[0..self.m_size],
);
self.m_size = index_arg + tail_size;
return self;
}
pub fn clear(self: *Self) *Self {
self.m_size = 0;
return self;
}
pub fn sprintf(self: *Self, comptime format_arg: []const u8, any_arg: anytype) !*Self {
const _str = try std.fmt.bufPrint(self.availableSlice(), format_arg, any_arg);
self.m_size += @truncate(_str.len);
return self;
}
};