Skip to content

Commit

Permalink
std: remove meta.trait
Browse files Browse the repository at this point in the history
In general, I don't like the idea of std.meta.trait, and so I am
providing some guidance by deleting the entire namespace from the
standard library and compiler codebase.

My main criticism is that it's overcomplicated machinery that bloats
compile times and is ultimately unnecessary given the existence of Zig's
strong type system and reference traces.

Users who want this can create a third party package that provides this
functionality.

closes #18051
  • Loading branch information
andrewrk committed Nov 22, 2023
1 parent 994e191 commit d5e21a4
Show file tree
Hide file tree
Showing 23 changed files with 380 additions and 965 deletions.
1 change: 0 additions & 1 deletion CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -290,7 +290,6 @@ set(ZIG_STAGE2_SOURCES
"${CMAKE_SOURCE_DIR}/lib/std/mem/Allocator.zig"
"${CMAKE_SOURCE_DIR}/lib/std/meta.zig"
"${CMAKE_SOURCE_DIR}/lib/std/meta/trailer_flags.zig"
"${CMAKE_SOURCE_DIR}/lib/std/meta/trait.zig"
"${CMAKE_SOURCE_DIR}/lib/std/multi_array_list.zig"
"${CMAKE_SOURCE_DIR}/lib/std/os.zig"
"${CMAKE_SOURCE_DIR}/lib/std/os/linux.zig"
Expand Down
18 changes: 8 additions & 10 deletions lib/std/array_hash_map.zig
Original file line number Diff line number Diff line change
Expand Up @@ -4,8 +4,6 @@ const assert = debug.assert;
const testing = std.testing;
const math = std.math;
const mem = std.mem;
const meta = std.meta;
const trait = meta.trait;
const autoHash = std.hash.autoHash;
const Wyhash = std.hash.Wyhash;
const Allocator = mem.Allocator;
Expand Down Expand Up @@ -2341,13 +2339,13 @@ test "reIndex" {
test "auto store_hash" {
const HasCheapEql = AutoArrayHashMap(i32, i32);
const HasExpensiveEql = AutoArrayHashMap([32]i32, i32);
try testing.expect(meta.fieldInfo(HasCheapEql.Data, .hash).type == void);
try testing.expect(meta.fieldInfo(HasExpensiveEql.Data, .hash).type != void);
try testing.expect(std.meta.fieldInfo(HasCheapEql.Data, .hash).type == void);
try testing.expect(std.meta.fieldInfo(HasExpensiveEql.Data, .hash).type != void);

const HasCheapEqlUn = AutoArrayHashMapUnmanaged(i32, i32);
const HasExpensiveEqlUn = AutoArrayHashMapUnmanaged([32]i32, i32);
try testing.expect(meta.fieldInfo(HasCheapEqlUn.Data, .hash).type == void);
try testing.expect(meta.fieldInfo(HasExpensiveEqlUn.Data, .hash).type != void);
try testing.expect(std.meta.fieldInfo(HasCheapEqlUn.Data, .hash).type == void);
try testing.expect(std.meta.fieldInfo(HasExpensiveEqlUn.Data, .hash).type != void);
}

test "sort" {
Expand Down Expand Up @@ -2434,12 +2432,12 @@ pub fn getAutoHashFn(comptime K: type, comptime Context: type) (fn (Context, K)
return struct {
fn hash(ctx: Context, key: K) u32 {
_ = ctx;
if (comptime trait.hasUniqueRepresentation(K)) {
return @as(u32, @truncate(Wyhash.hash(0, std.mem.asBytes(&key))));
if (std.meta.hasUniqueRepresentation(K)) {
return @truncate(Wyhash.hash(0, std.mem.asBytes(&key)));
} else {
var hasher = Wyhash.init(0);
autoHash(&hasher, key);
return @as(u32, @truncate(hasher.final()));
return @truncate(hasher.final());
}
}
}.hash;
Expand All @@ -2450,7 +2448,7 @@ pub fn getAutoEqlFn(comptime K: type, comptime Context: type) (fn (Context, K, K
fn eql(ctx: Context, a: K, b: K, b_index: usize) bool {
_ = b_index;
_ = ctx;
return meta.eql(a, b);
return std.meta.eql(a, b);
}
}.eql;
}
Expand Down
268 changes: 130 additions & 138 deletions lib/std/atomic/Atomic.zig
Original file line number Diff line number Diff line change
Expand Up @@ -153,163 +153,155 @@ pub fn Atomic(comptime T: type) type {
return @atomicRmw(T, &self.value, op, value, ordering);
}

fn exportWhen(comptime condition: bool, comptime functions: type) type {
return if (condition) functions else struct {};
pub inline fn fetchAdd(self: *Self, value: T, comptime ordering: Ordering) T {
return self.rmw(.Add, value, ordering);
}

pub usingnamespace exportWhen(std.meta.trait.isNumber(T), struct {
pub inline fn fetchAdd(self: *Self, value: T, comptime ordering: Ordering) T {
return self.rmw(.Add, value, ordering);
}

pub inline fn fetchSub(self: *Self, value: T, comptime ordering: Ordering) T {
return self.rmw(.Sub, value, ordering);
}
pub inline fn fetchSub(self: *Self, value: T, comptime ordering: Ordering) T {
return self.rmw(.Sub, value, ordering);
}

pub inline fn fetchMin(self: *Self, value: T, comptime ordering: Ordering) T {
return self.rmw(.Min, value, ordering);
}
pub inline fn fetchMin(self: *Self, value: T, comptime ordering: Ordering) T {
return self.rmw(.Min, value, ordering);
}

pub inline fn fetchMax(self: *Self, value: T, comptime ordering: Ordering) T {
return self.rmw(.Max, value, ordering);
}
});
pub inline fn fetchMax(self: *Self, value: T, comptime ordering: Ordering) T {
return self.rmw(.Max, value, ordering);
}

pub usingnamespace exportWhen(std.meta.trait.isIntegral(T), struct {
pub inline fn fetchAnd(self: *Self, value: T, comptime ordering: Ordering) T {
return self.rmw(.And, value, ordering);
}
pub inline fn fetchAnd(self: *Self, value: T, comptime ordering: Ordering) T {
return self.rmw(.And, value, ordering);
}

pub inline fn fetchNand(self: *Self, value: T, comptime ordering: Ordering) T {
return self.rmw(.Nand, value, ordering);
}
pub inline fn fetchNand(self: *Self, value: T, comptime ordering: Ordering) T {
return self.rmw(.Nand, value, ordering);
}

pub inline fn fetchOr(self: *Self, value: T, comptime ordering: Ordering) T {
return self.rmw(.Or, value, ordering);
}
pub inline fn fetchOr(self: *Self, value: T, comptime ordering: Ordering) T {
return self.rmw(.Or, value, ordering);
}

pub inline fn fetchXor(self: *Self, value: T, comptime ordering: Ordering) T {
return self.rmw(.Xor, value, ordering);
}
pub inline fn fetchXor(self: *Self, value: T, comptime ordering: Ordering) T {
return self.rmw(.Xor, value, ordering);
}

const Bit = std.math.Log2Int(T);
const BitRmwOp = enum {
Set,
Reset,
Toggle,
};
const Bit = std.math.Log2Int(T);
const BitRmwOp = enum {
Set,
Reset,
Toggle,
};

pub inline fn bitSet(self: *Self, bit: Bit, comptime ordering: Ordering) u1 {
return bitRmw(self, .Set, bit, ordering);
}
pub inline fn bitSet(self: *Self, bit: Bit, comptime ordering: Ordering) u1 {
return bitRmw(self, .Set, bit, ordering);
}

pub inline fn bitReset(self: *Self, bit: Bit, comptime ordering: Ordering) u1 {
return bitRmw(self, .Reset, bit, ordering);
}
pub inline fn bitReset(self: *Self, bit: Bit, comptime ordering: Ordering) u1 {
return bitRmw(self, .Reset, bit, ordering);
}

pub inline fn bitToggle(self: *Self, bit: Bit, comptime ordering: Ordering) u1 {
return bitRmw(self, .Toggle, bit, ordering);
}
pub inline fn bitToggle(self: *Self, bit: Bit, comptime ordering: Ordering) u1 {
return bitRmw(self, .Toggle, bit, ordering);
}

inline fn bitRmw(self: *Self, comptime op: BitRmwOp, bit: Bit, comptime ordering: Ordering) u1 {
// x86 supports dedicated bitwise instructions
if (comptime builtin.target.cpu.arch.isX86() and @sizeOf(T) >= 2 and @sizeOf(T) <= 8) {
// TODO: this causes std lib test failures when enabled
if (false) {
return x86BitRmw(self, op, bit, ordering);
}
inline fn bitRmw(self: *Self, comptime op: BitRmwOp, bit: Bit, comptime ordering: Ordering) u1 {
// x86 supports dedicated bitwise instructions
if (comptime builtin.target.cpu.arch.isX86() and @sizeOf(T) >= 2 and @sizeOf(T) <= 8) {
// TODO: this causes std lib test failures when enabled
if (false) {
return x86BitRmw(self, op, bit, ordering);
}
}

const mask = @as(T, 1) << bit;
const value = switch (op) {
.Set => self.fetchOr(mask, ordering),
.Reset => self.fetchAnd(~mask, ordering),
.Toggle => self.fetchXor(mask, ordering),
};
const mask = @as(T, 1) << bit;
const value = switch (op) {
.Set => self.fetchOr(mask, ordering),
.Reset => self.fetchAnd(~mask, ordering),
.Toggle => self.fetchXor(mask, ordering),
};

return @intFromBool(value & mask != 0);
}
return @intFromBool(value & mask != 0);
}

inline fn x86BitRmw(self: *Self, comptime op: BitRmwOp, bit: Bit, comptime ordering: Ordering) u1 {
const old_bit: u8 = switch (@sizeOf(T)) {
2 => switch (op) {
.Set => asm volatile ("lock btsw %[bit], %[ptr]"
// LLVM doesn't support u1 flag register return values
: [result] "={@ccc}" (-> u8),
: [ptr] "*m" (&self.value),
[bit] "X" (@as(T, bit)),
: "cc", "memory"
),
.Reset => asm volatile ("lock btrw %[bit], %[ptr]"
// LLVM doesn't support u1 flag register return values
: [result] "={@ccc}" (-> u8),
: [ptr] "*m" (&self.value),
[bit] "X" (@as(T, bit)),
: "cc", "memory"
),
.Toggle => asm volatile ("lock btcw %[bit], %[ptr]"
// LLVM doesn't support u1 flag register return values
: [result] "={@ccc}" (-> u8),
: [ptr] "*m" (&self.value),
[bit] "X" (@as(T, bit)),
: "cc", "memory"
),
},
4 => switch (op) {
.Set => asm volatile ("lock btsl %[bit], %[ptr]"
// LLVM doesn't support u1 flag register return values
: [result] "={@ccc}" (-> u8),
: [ptr] "*m" (&self.value),
[bit] "X" (@as(T, bit)),
: "cc", "memory"
),
.Reset => asm volatile ("lock btrl %[bit], %[ptr]"
// LLVM doesn't support u1 flag register return values
: [result] "={@ccc}" (-> u8),
: [ptr] "*m" (&self.value),
[bit] "X" (@as(T, bit)),
: "cc", "memory"
),
.Toggle => asm volatile ("lock btcl %[bit], %[ptr]"
// LLVM doesn't support u1 flag register return values
: [result] "={@ccc}" (-> u8),
: [ptr] "*m" (&self.value),
[bit] "X" (@as(T, bit)),
: "cc", "memory"
),
},
8 => switch (op) {
.Set => asm volatile ("lock btsq %[bit], %[ptr]"
// LLVM doesn't support u1 flag register return values
: [result] "={@ccc}" (-> u8),
: [ptr] "*m" (&self.value),
[bit] "X" (@as(T, bit)),
: "cc", "memory"
),
.Reset => asm volatile ("lock btrq %[bit], %[ptr]"
// LLVM doesn't support u1 flag register return values
: [result] "={@ccc}" (-> u8),
: [ptr] "*m" (&self.value),
[bit] "X" (@as(T, bit)),
: "cc", "memory"
),
.Toggle => asm volatile ("lock btcq %[bit], %[ptr]"
// LLVM doesn't support u1 flag register return values
: [result] "={@ccc}" (-> u8),
: [ptr] "*m" (&self.value),
[bit] "X" (@as(T, bit)),
: "cc", "memory"
),
},
else => @compileError("Invalid atomic type " ++ @typeName(T)),
};
inline fn x86BitRmw(self: *Self, comptime op: BitRmwOp, bit: Bit, comptime ordering: Ordering) u1 {
const old_bit: u8 = switch (@sizeOf(T)) {
2 => switch (op) {
.Set => asm volatile ("lock btsw %[bit], %[ptr]"
// LLVM doesn't support u1 flag register return values
: [result] "={@ccc}" (-> u8),
: [ptr] "*m" (&self.value),
[bit] "X" (@as(T, bit)),
: "cc", "memory"
),
.Reset => asm volatile ("lock btrw %[bit], %[ptr]"
// LLVM doesn't support u1 flag register return values
: [result] "={@ccc}" (-> u8),
: [ptr] "*m" (&self.value),
[bit] "X" (@as(T, bit)),
: "cc", "memory"
),
.Toggle => asm volatile ("lock btcw %[bit], %[ptr]"
// LLVM doesn't support u1 flag register return values
: [result] "={@ccc}" (-> u8),
: [ptr] "*m" (&self.value),
[bit] "X" (@as(T, bit)),
: "cc", "memory"
),
},
4 => switch (op) {
.Set => asm volatile ("lock btsl %[bit], %[ptr]"
// LLVM doesn't support u1 flag register return values
: [result] "={@ccc}" (-> u8),
: [ptr] "*m" (&self.value),
[bit] "X" (@as(T, bit)),
: "cc", "memory"
),
.Reset => asm volatile ("lock btrl %[bit], %[ptr]"
// LLVM doesn't support u1 flag register return values
: [result] "={@ccc}" (-> u8),
: [ptr] "*m" (&self.value),
[bit] "X" (@as(T, bit)),
: "cc", "memory"
),
.Toggle => asm volatile ("lock btcl %[bit], %[ptr]"
// LLVM doesn't support u1 flag register return values
: [result] "={@ccc}" (-> u8),
: [ptr] "*m" (&self.value),
[bit] "X" (@as(T, bit)),
: "cc", "memory"
),
},
8 => switch (op) {
.Set => asm volatile ("lock btsq %[bit], %[ptr]"
// LLVM doesn't support u1 flag register return values
: [result] "={@ccc}" (-> u8),
: [ptr] "*m" (&self.value),
[bit] "X" (@as(T, bit)),
: "cc", "memory"
),
.Reset => asm volatile ("lock btrq %[bit], %[ptr]"
// LLVM doesn't support u1 flag register return values
: [result] "={@ccc}" (-> u8),
: [ptr] "*m" (&self.value),
[bit] "X" (@as(T, bit)),
: "cc", "memory"
),
.Toggle => asm volatile ("lock btcq %[bit], %[ptr]"
// LLVM doesn't support u1 flag register return values
: [result] "={@ccc}" (-> u8),
: [ptr] "*m" (&self.value),
[bit] "X" (@as(T, bit)),
: "cc", "memory"
),
},
else => @compileError("Invalid atomic type " ++ @typeName(T)),
};

// TODO: emit appropriate tsan fence if compiling with tsan
_ = ordering;
// TODO: emit appropriate tsan fence if compiling with tsan
_ = ordering;

return @as(u1, @intCast(old_bit));
}
});
return @intCast(old_bit);
}
};
}

Expand Down
2 changes: 1 addition & 1 deletion lib/std/enums.zig
Original file line number Diff line number Diff line change
Expand Up @@ -201,7 +201,7 @@ pub fn nameCast(comptime E: type, comptime value: anytype) E {
if (V == E) break :blk value;
const name: ?[]const u8 = switch (@typeInfo(V)) {
.EnumLiteral, .Enum => @tagName(value),
.Pointer => if (std.meta.trait.isZigString(V)) value else null,
.Pointer => value,
else => null,
};
if (name) |n| {
Expand Down
15 changes: 6 additions & 9 deletions lib/std/fmt.zig
Original file line number Diff line number Diff line change
Expand Up @@ -478,7 +478,7 @@ pub fn formatType(
return formatAddress(value, options, writer);
}

if (comptime std.meta.trait.hasFn("format")(T)) {
if (std.meta.hasFn(T, "format")) {
return try value.format(actual_fmt, options, writer);
}

Expand Down Expand Up @@ -611,15 +611,12 @@ pub fn formatType(
else => {},
}
}
if (comptime std.meta.trait.isZigString(info.child)) {
for (value, 0..) |item, i| {
comptime checkTextFmt(actual_fmt);
if (i != 0) try formatBuf(", ", options, writer);
try formatBuf(item, options, writer);
}
return;
for (value, 0..) |item, i| {
comptime checkTextFmt(actual_fmt);
if (i != 0) try formatBuf(", ", options, writer);
try formatBuf(item, options, writer);
}
invalidFmtError(fmt, value);
return;
},
.Enum, .Union, .Struct => {
return formatType(value.*, actual_fmt, options, writer, max_depth);
Expand Down
Loading

0 comments on commit d5e21a4

Please sign in to comment.