Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
63 changes: 22 additions & 41 deletions src/block_entity.zig
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
const std = @import("std");

const main = @import("main.zig");
const List = main.List;
const Vec3i = main.vec.Vec3i;
const Block = main.blocks.Block;
const Chunk = main.chunk.Chunk;
Expand All @@ -11,7 +10,7 @@ const server = main.server;
const User = server.User;
const mesh_storage = main.renderer.mesh_storage;

pub const BlockEntityIndex = u32;
pub const BlockEntityIndex = main.utils.DenseId(u32);

pub const BlockEntityType = struct {
id: []const u8,
Expand Down Expand Up @@ -77,31 +76,40 @@ pub const EventStatus = enum {
ignored,
};

fn BlockEntityDataStorage(comptime side: enum {client, server}, T: type) type {
fn BlockEntityDataStorage(T: type) type {
return struct {
pub const DataT = T;
pub const EntryT = struct {
absoluteBlockPosition: Vec3i,
data: DataT,
};
var storage: List(EntryT) = undefined;
var freeIndexList: main.ListUnmanaged(BlockEntityIndex) = .{};
var nextIndex: BlockEntityIndex = @enumFromInt(0);
var storage: main.utils.SparseSet(EntryT, BlockEntityIndex) = .{};
pub var mutex: std.Thread.Mutex = .{};

pub fn init() void {
storage = .init(main.globalAllocator);
storage = .{};
freeIndexList = .{};
}
pub fn deinit() void {
storage.deinit();
storage.deinit(main.globalAllocator);
freeIndexList.deinit(main.globalAllocator);
nextIndex = @enumFromInt(0);
}
pub fn reset() void {
storage.clearRetainingCapacity();
storage.clear();
freeIndexList.clearRetainingCapacity();
}
pub fn add(pos: Vec3i, value: DataT, chunk: *Chunk) void {
mutex.lock();
defer mutex.unlock();

const dataIndex = storage.items.len;
storage.append(.{.absoluteBlockPosition = pos, .data = value});
const dataIndex: BlockEntityIndex = freeIndexList.popOrNull() orelse blk: {
defer nextIndex = @enumFromInt(@intFromEnum(nextIndex) + 1);
break :blk nextIndex;
};
storage.set(main.globalAllocator, dataIndex, value);

const blockIndex = chunk.getLocalBlockIndex(pos);

Expand All @@ -120,41 +128,15 @@ fn BlockEntityDataStorage(comptime side: enum {client, server}, T: type) type {
chunk.blockPosToEntityDataMapMutex.unlock();

const entry = entityNullable orelse {
std.log.warn("Couldn't remove entity data of block at position {}", .{pos});
std.log.err("Couldn't remove entity data of block at position {}", .{pos});
return;
};

const dataIndex = entry.value;
_ = storage.swapRemove(dataIndex);
if(dataIndex == storage.items.len) {
return;
}

const movedEntry = storage.items[dataIndex];
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Okay, cool, so what's the plan for unloading block entities when chunk they come from gets unloaded? Do you just iterate all the block entities whenever a chunk is unloaded?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'd just iterate over the chunk hashmap

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Alright I guess, why did I remove it the way I did...?
Was it because there is no direct external access for deleting? So caller will have to explicitly call proper callback and separately clear hash map entry.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Alright I guess, why did I remove it the way I did...?

To keep the list sparse, which is no longer needed due to using the SparseSet.

switch(side) {
.server => propagateRemoveServer(movedEntry.absoluteBlockPosition, dataIndex),
.client => propagateRemoveClient(movedEntry.absoluteBlockPosition, dataIndex),
}
}
fn propagateRemoveServer(pos: Vec3i, index: BlockEntityIndex) void {
const severChunk = server.world.?.getChunkFromCacheAndIncreaseRefCount(ChunkPosition.initFromWorldPos(pos, 1)).?;
defer severChunk.decreaseRefCount();

severChunk.super.blockPosToEntityDataMapMutex.lock();
defer severChunk.super.blockPosToEntityDataMapMutex.unlock();

const otherDataIndex = severChunk.super.getLocalBlockIndex(pos);
severChunk.super.blockPosToEntityDataMap.put(main.globalAllocator.allocator, otherDataIndex, index) catch unreachable;
}
fn propagateRemoveClient(pos: Vec3i, index: BlockEntityIndex) void {
const mesh = mesh_storage.getMeshAndIncreaseRefCount(ChunkPosition.initFromWorldPos(pos, 1)).?;
defer mesh.decreaseRefCount();

mesh.chunk.blockPosToEntityDataMapMutex.lock();
defer mesh.chunk.blockPosToEntityDataMapMutex.unlock();

const otherDataIndex = mesh.chunk.getLocalBlockIndex(pos);
mesh.chunk.blockPosToEntityDataMap.put(main.globalAllocator.allocator, otherDataIndex, index) catch unreachable;
freeIndexList.append(main.globalAllocator, dataIndex);
storage.remove(dataIndex) catch |err| {
std.log.err("Error while remvoing block entity at position {}: {s}", .{pos, @errorName(err)});
};
}
pub fn get(pos: Vec3i, chunk: *Chunk) ?*DataT {
main.utils.assertLocked(&mutex);
Expand All @@ -176,7 +158,6 @@ fn BlockEntityDataStorage(comptime side: enum {client, server}, T: type) type {
pub const BlockEntityTypes = struct {
pub const Chest = struct {
const StorageServer = BlockEntityDataStorage(
.server,
struct {
id: ?u32,
},
Expand Down
6 changes: 6 additions & 0 deletions src/utils.zig
Original file line number Diff line number Diff line change
Expand Up @@ -1927,6 +1927,12 @@ pub fn SparseSet(comptime T: type, comptime IdType: type) type { // MARK: Sparse
denseToSparseIndex: main.ListUnmanaged(IdType) = .{},
sparseToDenseIndex: main.ListUnmanaged(IdType) = .{},

pub fn clear(self: *Self) void {
self.dense.clearRetainingCapacity();
self.denseToSparseIndex.clearRetainingCapacity();
self.sparseToDenseIndex.clearRetainingCapacity();
}

pub fn deinit(self: *Self, allocator: NeverFailingAllocator) void {
self.dense.deinit(allocator);
self.denseToSparseIndex.deinit(allocator);
Expand Down