This is my code for a Tensor struct in Zig, where the data is stored in a 1D linear array ([]T
). I would like to create a function toArray
that uses the shape
to return the data as a multidimensional array. The issue I am facing is that the return type could vary depending on the shape. For example, it could return a [][][]T
, [][]T
, or even higher dimensions, and I am unsure how to handle this dynamic return type because of the multiple possible array types.
pub fn Tensor(comptime T: type) type {
return struct {
data: []T,
size: usize,
shape: []usize,
allocator: *const std.mem.Allocator,
pub fn fromArray(allocator: *const std.mem.Allocator, inputArray: anytype, shape: []usize) !@This() {
//std.debug.print("\n fromArray initialization...", .{});
var total_size: usize = 1;
for (shape) |dim| {
total_size *= dim;
}
const tensorShape = try allocator.alloc(usize, shape.len);
@memcpy(tensorShape, shape);
const tensorData = try allocator.alloc(T, total_size);
_ = flattenArray(T, inputArray, tensorData, 0);
return @This(){
.data = tensorData,
.size = total_size,
.shape = tensorShape,
.allocator = allocator,
};
}
pub fn init(allocator: *const std.mem.Allocator) !@This() {
return @This(){
.data = &[_]T{},
.size = 0,
.shape = &[_]usize{},
.allocator = allocator,
};
}
//copy self and return it in another Tensor
pub fn copy(self: *@This()) !Tensor(T) {
return try Tensor(T).fromArray(self.allocator, self.data, self.shape);
}
//inizialize and return a all-zero tensor starting from the shape
pub fn fromShape(allocator: *const std.mem.Allocator, shape: []usize) !@This() {
var total_size: usize = 1;
for (shape) |dim| {
total_size *= dim;
}
const tensorData = try allocator.alloc(T, total_size);
for (tensorData) |*data| {
data.* = 0;
}
return @This().fromArray(allocator, tensorData, shape);
}
//pay attention, the fill() can also perform a reshape
pub fn fill(self: *@This(), inputArray: anytype, shape: []usize) !void {
//deinitialize data e shape
self.deinit(); //if the Tensor has been just init() this function does nothing
//than, filling with the new values
var total_size: usize = 1;
for (shape) |dim| {
total_size *= dim;
}
const tensorShape = try self.allocator.alloc(usize, shape.len);
@memcpy(tensorShape, shape);
const tensorData = try self.allocator.alloc(T, total_size);
_ = flattenArray(T, inputArray, tensorData, 0);
self.data = tensorData;
self.size = total_size;
self.shape = tensorShape;
}
pub fn deinit(self: *@This()) void {
//std.debug.print("\n deinit tensor:\n", .{});
// Verifica se `data` è valido e non vuoto prima di liberarlo
if (self.data.len > 0) {
//std.debug.print("Liberazione di data con lunghezza: {}\n", .{self.data.len});
self.allocator.free(self.data);
self.data = &[_]T{}; // Resetta lo slice a vuoto
}
// Verifica se `shape` è valido e non vuoto prima di liberarlo
if (self.shape.len > 0) {
//std.debug.print("Liberazione di shape con lunghezza: {}\n", .{self.shape.len});
self.allocator.free(self.shape);
self.shape = &[_]usize{}; // Resetta lo slice a vuoto
}
}
pub fn setShape(self: *@This(), shape: []usize) !void {
var total_size: usize = 1;
for (shape) |dim| {
total_size *= dim;
}
self.shape = shape;
self.size = total_size;
}
pub fn getSize(self: *@This()) usize {
return self.size;
}
pub fn get(self: *const @This(), idx: usize) !T {
if (idx >= self.data.len) {
return error.IndexOutOfBounds;
}
return self.data[idx];
}
pub fn set(self: *@This(), idx: usize, value: T) !void {
if (idx >= self.data.len) {
return error.IndexOutOfBounds;
}
self.data[idx] = value;
}
pub fn flatten_index(self: *const @This(), indices: []const usize) !usize {
var idx: usize = 0;
var stride: usize = 1;
for (0..self.shape.len) |i| {
idx += indices[self.shape.len - 1 - i] * stride;
stride *= self.shape[self.shape.len - 1 - i];
}
return idx;
}
pub fn get_at(self: *const @This(), indices: []const usize) !T {
const idx = try self.flatten_index(indices);
return self.get(idx);
}
pub fn set_at(self: *@This(), indices: []const usize, value: T) !void {
const idx = try self.flatten_index(indices);
return self.set(idx, value);
}
pub fn info(self: *@This()) void {
std.debug.print("\ntensor infos: ", .{});
std.debug.print("\n data type:{}", .{@TypeOf(self.data[0])});
std.debug.print("\n size:{}", .{self.size});
std.debug.print("\n shape.len:{} shape: [ ", .{self.shape.len});
for (0..self.shape.len) |i| {
std.debug.print("{} ", .{self.shape[i]});
}
std.debug.print("] ", .{});
self.print();
}
pub fn print(self: *@This()) void {
std.debug.print("\n tensor data: ", .{});
for (0..self.size) |i| {
std.debug.print("{} ", .{self.data[i]});
}
std.debug.print("\n", .{});
}
};
}
I would appreciate any advice on how to handle the return type dynamically based on the shape. Thanks for your help!