From 5b523d04690d8a01cb5d97e4f5a35443cb0cbde8 Mon Sep 17 00:00:00 2001 From: mlugg Date: Sat, 22 Jun 2024 00:29:38 +0100 Subject: [PATCH 001/152] Zir: make `src_line` absolute for `declaration` instructions We need special logic for updating line numbers anyway, so it's fine to just use absolute numbers here. This eliminates a field from `Decl`. --- lib/std/zig/AstGen.zig | 14 +++++++------- lib/std/zig/Zir.zig | 4 +--- src/InternPool.zig | 1 - src/Sema.zig | 37 ++++++++++--------------------------- src/Zcu.zig | 39 +++++++++++++++++++++++++++------------ src/codegen/llvm.zig | 18 +++++++++--------- src/codegen/spirv.zig | 4 ++-- src/link/Dwarf.zig | 12 ++++++------ src/print_zir.zig | 4 ++-- src/type.zig | 30 +++++++++++++++++++++++++++++- 10 files changed, 93 insertions(+), 70 deletions(-) diff --git a/lib/std/zig/AstGen.zig b/lib/std/zig/AstGen.zig index eecda7603123..febd6b60bbd3 100644 --- a/lib/std/zig/AstGen.zig +++ b/lib/std/zig/AstGen.zig @@ -4368,7 +4368,7 @@ fn fnDecl( decl_inst, std.zig.hashSrc(tree.getNodeSource(decl_node)), .{ .named = fn_name_token }, - decl_gz.decl_line - gz.decl_line, + decl_gz.decl_line, is_pub, is_export, doc_comment_index, @@ -4529,7 +4529,7 @@ fn globalVarDecl( decl_inst, std.zig.hashSrc(tree.getNodeSource(node)), .{ .named = name_token }, - block_scope.decl_line - gz.decl_line, + block_scope.decl_line, is_pub, is_export, doc_comment_index, @@ -4579,7 +4579,7 @@ fn comptimeDecl( decl_inst, std.zig.hashSrc(tree.getNodeSource(node)), .@"comptime", - decl_block.decl_line - gz.decl_line, + decl_block.decl_line, false, false, .empty, @@ -4629,7 +4629,7 @@ fn usingnamespaceDecl( decl_inst, std.zig.hashSrc(tree.getNodeSource(node)), .@"usingnamespace", - decl_block.decl_line - gz.decl_line, + decl_block.decl_line, is_pub, false, .empty, @@ -4818,7 +4818,7 @@ fn testDecl( decl_inst, std.zig.hashSrc(tree.getNodeSource(node)), test_name, - decl_block.decl_line - gz.decl_line, + decl_block.decl_line, false, false, .empty, @@ -13861,7 +13861,7 @@ fn setDeclaration( decl_inst: Zir.Inst.Index, src_hash: std.zig.SrcHash, name: DeclarationName, - line_offset: u32, + src_line: u32, is_pub: bool, is_export: bool, doc_comment: Zir.NullTerminatedString, @@ -13913,7 +13913,7 @@ fn setDeclaration( .@"comptime" => .@"comptime", .@"usingnamespace" => .@"usingnamespace", }, - .line_offset = line_offset, + .src_line = src_line, .flags = .{ .value_body_len = @intCast(value_len), .is_pub = is_pub, diff --git a/lib/std/zig/Zir.zig b/lib/std/zig/Zir.zig index 5dd69f9923de..be462e5edbd4 100644 --- a/lib/std/zig/Zir.zig +++ b/lib/std/zig/Zir.zig @@ -2598,9 +2598,7 @@ pub const Inst = struct { src_hash_3: u32, /// The name of this `Decl`. Also indicates whether it is a test, comptime block, etc. name: Name, - /// This Decl's line number relative to that of its parent. - /// TODO: column must be encoded similarly to respect non-formatted code! - line_offset: u32, + src_line: u32, flags: Flags, pub const Flags = packed struct(u32) { diff --git a/src/InternPool.zig b/src/InternPool.zig index e297a8ea782c..4268a84f37ff 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -6958,7 +6958,6 @@ fn finishFuncInstance( const decl_index = try ip.createDecl(gpa, .{ .name = undefined, .src_namespace = fn_owner_decl.src_namespace, - .src_line = fn_owner_decl.src_line, .has_tv = true, .owns_tv = true, .val = @import("Value.zig").fromInterned(func_index), diff --git a/src/Sema.zig b/src/Sema.zig index a59768686b74..931799828870 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -2827,7 +2827,6 @@ fn zirStructDecl( small.name_strategy, "struct", inst, - extra.data.src_line, ); mod.declPtr(new_decl_index).owns_tv = true; errdefer mod.abortAnonDecl(new_decl_index); @@ -2864,7 +2863,6 @@ fn createAnonymousDeclTypeNamed( name_strategy: Zir.Inst.NameStrategy, anon_prefix: []const u8, inst: ?Zir.Inst.Index, - src_line: u32, ) !InternPool.DeclIndex { const zcu = sema.mod; const ip = &zcu.intern_pool; @@ -2876,7 +2874,7 @@ fn createAnonymousDeclTypeNamed( switch (name_strategy) { .anon => {}, // handled after switch .parent => { - try zcu.initNewAnonDecl(new_decl_index, src_line, val, block.type_name_ctx); + try zcu.initNewAnonDecl(new_decl_index, val, block.type_name_ctx); return new_decl_index; }, .func => func_strat: { @@ -2921,7 +2919,7 @@ fn createAnonymousDeclTypeNamed( try writer.writeByte(')'); const name = try ip.getOrPutString(gpa, buf.items, .no_embedded_nulls); - try zcu.initNewAnonDecl(new_decl_index, src_line, val, name); + try zcu.initNewAnonDecl(new_decl_index, val, name); return new_decl_index; }, .dbg_var => { @@ -2935,7 +2933,7 @@ fn createAnonymousDeclTypeNamed( const name = try ip.getOrPutStringFmt(gpa, "{}.{s}", .{ block.type_name_ctx.fmt(ip), zir_data[i].str_op.getStr(sema.code), }, .no_embedded_nulls); - try zcu.initNewAnonDecl(new_decl_index, src_line, val, name); + try zcu.initNewAnonDecl(new_decl_index, val, name); return new_decl_index; }, else => {}, @@ -2956,7 +2954,7 @@ fn createAnonymousDeclTypeNamed( const name = ip.getOrPutStringFmt(gpa, "{}__{s}_{d}", .{ block.type_name_ctx.fmt(ip), anon_prefix, @intFromEnum(new_decl_index), }, .no_embedded_nulls) catch unreachable; - try zcu.initNewAnonDecl(new_decl_index, src_line, val, name); + try zcu.initNewAnonDecl(new_decl_index, val, name); return new_decl_index; } @@ -3062,7 +3060,6 @@ fn zirEnumDecl( small.name_strategy, "enum", inst, - extra.data.src_line, ); const new_decl = mod.declPtr(new_decl_index); new_decl.owns_tv = true; @@ -3330,7 +3327,6 @@ fn zirUnionDecl( small.name_strategy, "union", inst, - extra.data.src_line, ); mod.declPtr(new_decl_index).owns_tv = true; errdefer mod.abortAnonDecl(new_decl_index); @@ -3419,7 +3415,6 @@ fn zirOpaqueDecl( small.name_strategy, "opaque", inst, - extra.data.src_line, ); mod.declPtr(new_decl_index).owns_tv = true; errdefer mod.abortAnonDecl(new_decl_index); @@ -21546,7 +21541,7 @@ fn zirReify( .needed_comptime_reason = "struct fields must be comptime-known", }); - return try sema.reifyStruct(block, inst, src, layout, backing_integer_val, fields_arr, name_strategy, is_tuple_val.toBool(), extra.src_line); + return try sema.reifyStruct(block, inst, src, layout, backing_integer_val, fields_arr, name_strategy, is_tuple_val.toBool()); }, .Enum => { const struct_type = ip.loadStructType(ip.typeOf(union_val.val)); @@ -21575,7 +21570,7 @@ fn zirReify( .needed_comptime_reason = "enum fields must be comptime-known", }); - return sema.reifyEnum(block, inst, src, tag_type_val.toType(), is_exhaustive_val.toBool(), fields_arr, name_strategy, extra.src_line); + return sema.reifyEnum(block, inst, src, tag_type_val.toType(), is_exhaustive_val.toBool(), fields_arr, name_strategy); }, .Opaque => { const struct_type = ip.loadStructType(ip.typeOf(union_val.val)); @@ -21606,7 +21601,6 @@ fn zirReify( name_strategy, "opaque", inst, - extra.src_line, ); mod.declPtr(new_decl_index).owns_tv = true; errdefer mod.abortAnonDecl(new_decl_index); @@ -21643,7 +21637,7 @@ fn zirReify( .needed_comptime_reason = "union fields must be comptime-known", }); - return sema.reifyUnion(block, inst, src, layout, tag_type_val, fields_arr, name_strategy, extra.src_line); + return sema.reifyUnion(block, inst, src, layout, tag_type_val, fields_arr, name_strategy); }, .Fn => { const struct_type = ip.loadStructType(ip.typeOf(union_val.val)); @@ -21745,7 +21739,6 @@ fn reifyEnum( is_exhaustive: bool, fields_val: Value, name_strategy: Zir.Inst.NameStrategy, - src_line: u32, ) CompileError!Air.Inst.Ref { const mod = sema.mod; const gpa = sema.gpa; @@ -21807,7 +21800,6 @@ fn reifyEnum( name_strategy, "enum", inst, - src_line, ); mod.declPtr(new_decl_index).owns_tv = true; errdefer mod.abortAnonDecl(new_decl_index); @@ -21871,7 +21863,6 @@ fn reifyUnion( opt_tag_type_val: Value, fields_val: Value, name_strategy: Zir.Inst.NameStrategy, - src_line: u32, ) CompileError!Air.Inst.Ref { const mod = sema.mod; const gpa = sema.gpa; @@ -21955,7 +21946,6 @@ fn reifyUnion( name_strategy, "union", inst, - src_line, ); mod.declPtr(new_decl_index).owns_tv = true; errdefer mod.abortAnonDecl(new_decl_index); @@ -22051,7 +22041,7 @@ fn reifyUnion( } } - const enum_tag_ty = try sema.generateUnionTagTypeSimple(block, field_names.keys(), mod.declPtr(new_decl_index), src_line); + const enum_tag_ty = try sema.generateUnionTagTypeSimple(block, field_names.keys(), mod.declPtr(new_decl_index)); break :tag_ty .{ enum_tag_ty, false }; }; errdefer if (!has_explicit_tag) ip.remove(enum_tag_ty); // remove generated tag type on error @@ -22112,7 +22102,6 @@ fn reifyStruct( fields_val: Value, name_strategy: Zir.Inst.NameStrategy, is_tuple: bool, - src_line: u32, ) CompileError!Air.Inst.Ref { const mod = sema.mod; const gpa = sema.gpa; @@ -22213,7 +22202,6 @@ fn reifyStruct( name_strategy, "struct", inst, - src_line, ); mod.declPtr(new_decl_index).owns_tv = true; errdefer mod.abortAnonDecl(new_decl_index); @@ -26347,7 +26335,6 @@ fn zirBuiltinExtern( const new_decl = mod.declPtr(new_decl_index); try mod.initNewAnonDecl( new_decl_index, - sema.owner_decl.src_line, Value.fromInterned( if (Type.fromInterned(ptr_info.child).zigTypeTag(mod) == .Fn) try ip.getExternFunc(sema.gpa, .{ @@ -36745,10 +36732,10 @@ fn semaUnionFields(mod: *Module, arena: Allocator, union_type: InternPool.Loaded return sema.failWithOwnedErrorMsg(&block_scope, msg); } } else if (enum_field_vals.count() > 0) { - const enum_ty = try sema.generateUnionTagTypeNumbered(&block_scope, enum_field_names, enum_field_vals.keys(), mod.declPtr(union_type.decl), extra.data.src_line); + const enum_ty = try sema.generateUnionTagTypeNumbered(&block_scope, enum_field_names, enum_field_vals.keys(), mod.declPtr(union_type.decl)); union_type.tagTypePtr(ip).* = enum_ty; } else { - const enum_ty = try sema.generateUnionTagTypeSimple(&block_scope, enum_field_names, mod.declPtr(union_type.decl), extra.data.src_line); + const enum_ty = try sema.generateUnionTagTypeSimple(&block_scope, enum_field_names, mod.declPtr(union_type.decl)); union_type.tagTypePtr(ip).* = enum_ty; } } @@ -36766,7 +36753,6 @@ fn generateUnionTagTypeNumbered( enum_field_names: []const InternPool.NullTerminatedString, enum_field_vals: []const InternPool.Index, union_owner_decl: *Module.Decl, - src_line: u32, ) !InternPool.Index { const mod = sema.mod; const gpa = sema.gpa; @@ -36783,7 +36769,6 @@ fn generateUnionTagTypeNumbered( ); try mod.initNewAnonDecl( new_decl_index, - src_line, Value.@"unreachable", name, ); @@ -36816,7 +36801,6 @@ fn generateUnionTagTypeSimple( block: *Block, enum_field_names: []const InternPool.NullTerminatedString, union_owner_decl: *Module.Decl, - src_line: u32, ) !InternPool.Index { const mod = sema.mod; const ip = &mod.intern_pool; @@ -36834,7 +36818,6 @@ fn generateUnionTagTypeSimple( ); try mod.initNewAnonDecl( new_decl_index, - src_line, Value.@"unreachable", name, ); diff --git a/src/Zcu.zig b/src/Zcu.zig index 3faa1b878047..059aece30949 100644 --- a/src/Zcu.zig +++ b/src/Zcu.zig @@ -347,10 +347,6 @@ pub const Decl = struct { /// there is no parent. src_namespace: Namespace.Index, - /// Line number corresponding to `src_node`. Stored separately so that source files - /// do not need to be loaded into memory in order to compute debug line numbers. - /// This value is absolute. - src_line: u32, /// Index of the ZIR `declaration` instruction from which this `Decl` was created. /// For the root `Decl` of a `File` and legacy anonymous decls, this is `.none`. zir_decl_index: InternPool.TrackedInst.Index.Optional, @@ -564,6 +560,33 @@ pub const Decl = struct { .offset = LazySrcLoc.Offset.nodeOffset(0), }; } + + pub fn navSrcLine(decl: Decl, zcu: *Zcu) u32 { + const tracked = decl.zir_decl_index.unwrap() orelse inst: { + // generic instantiation + assert(decl.has_tv); + assert(decl.owns_tv); + const generic_owner_func = switch (zcu.intern_pool.indexToKey(decl.val.toIntern())) { + .func => |func| func.generic_owner, + else => return 0, // TODO: this is probably a `variable` or something; figure this out when we finish sorting out `Decl`. + }; + const generic_owner_decl = zcu.declPtr(zcu.funcInfo(generic_owner_func).owner_decl); + break :inst generic_owner_decl.zir_decl_index.unwrap().?; + }; + const info = tracked.resolveFull(&zcu.intern_pool); + const file = zcu.import_table.values()[zcu.path_digest_map.getIndex(info.path_digest).?]; + assert(file.zir_loaded); + const zir = file.zir; + const inst = zir.instructions.get(@intFromEnum(info.inst)); + assert(inst.tag == .declaration); + return zir.extraData(Zir.Inst.Declaration, inst.data.declaration.payload_index).data.src_line; + } + + pub fn typeSrcLine(decl: Decl, zcu: *Zcu) u32 { + assert(decl.has_tv); + assert(decl.owns_tv); + return decl.val.toType().typeDeclSrcLine(zcu).?; + } }; /// This state is attached to every Decl when Module emit_h is non-null. @@ -3944,7 +3967,6 @@ fn semaFile(mod: *Module, file: *File) SemaError!void { new_decl.name = try file.fullyQualifiedName(mod); new_decl.name_fully_qualified = true; - new_decl.src_line = 0; new_decl.is_pub = true; new_decl.is_exported = false; new_decl.alignment = .none; @@ -4762,8 +4784,6 @@ fn scanDecl(iter: *ScanDeclIter, decl_inst: Zir.Inst.Index) Allocator.Error!void const extra = zir.extraData(Zir.Inst.Declaration, inst_data.payload_index); const declaration = extra.data; - const line = iter.parent_decl.src_line + declaration.line_offset; - // Every Decl needs a name. const decl_name: InternPool.NullTerminatedString, const kind: Decl.Kind, const is_named_test: bool = switch (declaration.name) { .@"comptime" => info: { @@ -4850,7 +4870,6 @@ fn scanDecl(iter: *ScanDeclIter, decl_inst: Zir.Inst.Index) Allocator.Error!void const was_exported = decl.is_exported; assert(decl.kind == kind); // ZIR tracking should preserve this decl.name = decl_name; - decl.src_line = line; decl.is_pub = declaration.flags.is_pub; decl.is_exported = declaration.flags.is_export; break :decl_index .{ was_exported, decl_index }; @@ -4860,7 +4879,6 @@ fn scanDecl(iter: *ScanDeclIter, decl_inst: Zir.Inst.Index) Allocator.Error!void const new_decl = zcu.declPtr(new_decl_index); new_decl.kind = kind; new_decl.name = decl_name; - new_decl.src_line = line; new_decl.is_pub = declaration.flags.is_pub; new_decl.is_exported = declaration.flags.is_export; new_decl.zir_decl_index = tracked_inst.toOptional(); @@ -5263,7 +5281,6 @@ pub fn allocateNewDecl(zcu: *Zcu, namespace: Namespace.Index) !Decl.Index { const decl_index = try zcu.intern_pool.createDecl(gpa, .{ .name = undefined, .src_namespace = namespace, - .src_line = undefined, .has_tv = false, .owns_tv = false, .val = undefined, @@ -5311,14 +5328,12 @@ pub fn errorSetBits(mod: *Module) u16 { pub fn initNewAnonDecl( mod: *Module, new_decl_index: Decl.Index, - src_line: u32, val: Value, name: InternPool.NullTerminatedString, ) Allocator.Error!void { const new_decl = mod.declPtr(new_decl_index); new_decl.name = name; - new_decl.src_line = src_line; new_decl.val = val; new_decl.alignment = .none; new_decl.@"linksection" = .none; diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 1e29d2cbe57c..00cfd4404ae0 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -1697,7 +1697,7 @@ pub const Object = struct { const file, const subprogram = if (!wip.strip) debug_info: { const file = try o.getDebugFile(namespace.file_scope); - const line_number = decl.src_line + 1; + const line_number = decl.navSrcLine(zcu) + 1; const is_internal_linkage = decl.val.getExternFunc(zcu) == null and !zcu.decl_exports.contains(decl_index); const debug_decl_type = try o.lowerDebugType(decl.typeOf(zcu)); @@ -1741,7 +1741,7 @@ pub const Object = struct { .sync_scope = if (owner_mod.single_threaded) .singlethread else .system, .file = file, .scope = subprogram, - .base_line = dg.decl.src_line, + .base_line = dg.decl.navSrcLine(zcu), .prev_dbg_line = 0, .prev_dbg_column = 0, .err_ret_trace = err_ret_trace, @@ -2067,7 +2067,7 @@ pub const Object = struct { try o.builder.metadataString(name), file, scope, - owner_decl.src_line + 1, // Line + owner_decl.typeSrcLine(mod) + 1, // Line try o.lowerDebugType(int_ty), ty.abiSize(mod) * 8, (ty.abiAlignment(mod).toByteUnits() orelse 0) * 8, @@ -2237,7 +2237,7 @@ pub const Object = struct { try o.builder.metadataString(name), try o.getDebugFile(mod.namespacePtr(owner_decl.src_namespace).file_scope), try o.namespaceToDebugScope(owner_decl.src_namespace), - owner_decl.src_line + 1, // Line + owner_decl.typeSrcLine(mod) + 1, // Line .none, // Underlying type 0, // Size 0, // Align @@ -2867,7 +2867,7 @@ pub const Object = struct { try o.builder.metadataString(decl.name.toSlice(&mod.intern_pool)), // TODO use fully qualified name try o.getDebugFile(mod.namespacePtr(decl.src_namespace).file_scope), try o.namespaceToDebugScope(decl.src_namespace), - decl.src_line + 1, + decl.typeSrcLine(mod) + 1, .none, 0, 0, @@ -4762,7 +4762,7 @@ pub const DeclGen = struct { else => try o.lowerValue(init_val), }, &o.builder); - const line_number = decl.src_line + 1; + const line_number = decl.navSrcLine(zcu) + 1; const is_internal_linkage = !o.module.decl_exports.contains(decl_index); const namespace = zcu.namespacePtr(decl.src_namespace); @@ -5188,7 +5188,7 @@ pub const FuncGen = struct { self.file = try o.getDebugFile(namespace.file_scope); - const line_number = decl.src_line + 1; + const line_number = decl.navSrcLine(zcu) + 1; self.inlined = self.wip.debug_location; const fqn = try decl.fullyQualifiedName(zcu); @@ -5217,7 +5217,7 @@ pub const FuncGen = struct { o.debug_compile_unit, ); - self.base_line = decl.src_line; + self.base_line = decl.navSrcLine(zcu); const inlined_at_location = try self.wip.debug_location.toMetadata(&o.builder); self.wip.debug_location = .{ .location = .{ @@ -8857,7 +8857,7 @@ pub const FuncGen = struct { const src_index = self.air.instructions.items(.data)[@intFromEnum(inst)].arg.src_index; const func_index = self.dg.decl.getOwnedFunctionIndex(); const func = mod.funcInfo(func_index); - const lbrace_line = mod.declPtr(func.owner_decl).src_line + func.lbrace_line + 1; + const lbrace_line = mod.declPtr(func.owner_decl).navSrcLine(mod) + func.lbrace_line + 1; const lbrace_col = func.lbrace_column + 1; const debug_parameter = try o.builder.debugParameter( diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 8168cce99693..ee163c31543b 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -212,7 +212,7 @@ pub const Object = struct { false => .{ .unstructured = .{} }, }, .current_block_label = undefined, - .base_line = decl.src_line, + .base_line = decl.navSrcLine(mod), }; defer decl_gen.deinit(); @@ -6345,7 +6345,7 @@ const DeclGen = struct { const decl = mod.funcOwnerDeclPtr(extra.data.func); const old_base_line = self.base_line; defer self.base_line = old_base_line; - self.base_line = decl.src_line; + self.base_line = decl.navSrcLine(mod); return self.lowerBlock(inst, @ptrCast(self.air.extra[extra.end..][0..extra.data.body_len])); } diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig index ccfdfd0dbfde..7d576abbb4fc 100644 --- a/src/link/Dwarf.zig +++ b/src/link/Dwarf.zig @@ -948,8 +948,8 @@ pub const DeclState = struct { leb128.writeUnsignedFixed(4, self.dbg_line.addManyAsArrayAssumeCapacity(4), new_file); } - const old_src_line: i33 = self.mod.declPtr(old_func_info.owner_decl).src_line; - const new_src_line: i33 = self.mod.declPtr(new_func_info.owner_decl).src_line; + const old_src_line: i33 = self.mod.declPtr(old_func_info.owner_decl).navSrcLine(self.mod); + const new_src_line: i33 = self.mod.declPtr(new_func_info.owner_decl).navSrcLine(self.mod); if (new_src_line != old_src_line) { self.dbg_line.appendAssumeCapacity(DW.LNS.advance_line); leb128.writeSignedFixed(5, self.dbg_line.addManyAsArrayAssumeCapacity(5), new_src_line - old_src_line); @@ -1116,11 +1116,11 @@ pub fn initDeclState(self: *Dwarf, mod: *Module, decl_index: InternPool.DeclInde decl_state.dbg_line_func = decl.val.toIntern(); const func = decl.val.getFunction(mod).?; log.debug("decl.src_line={d}, func.lbrace_line={d}, func.rbrace_line={d}", .{ - decl.src_line, + decl.navSrcLine(mod), func.lbrace_line, func.rbrace_line, }); - const line: u28 = @intCast(decl.src_line + func.lbrace_line); + const line: u28 = @intCast(decl.navSrcLine(mod) + func.lbrace_line); dbg_line_buffer.appendSliceAssumeCapacity(&.{ DW.LNS.extended_op, @@ -1702,11 +1702,11 @@ pub fn updateDeclLineNumber(self: *Dwarf, mod: *Module, decl_index: InternPool.D const decl = mod.declPtr(decl_index); const func = decl.val.getFunction(mod).?; log.debug("decl.src_line={d}, func.lbrace_line={d}, func.rbrace_line={d}", .{ - decl.src_line, + decl.navSrcLine(mod), func.lbrace_line, func.rbrace_line, }); - const line: u28 = @intCast(decl.src_line + func.lbrace_line); + const line: u28 = @intCast(decl.navSrcLine(mod) + func.lbrace_line); var data: [4]u8 = undefined; leb128.writeUnsignedFixed(4, &data, line); diff --git a/src/print_zir.zig b/src/print_zir.zig index 6caf6a5d916a..d064f02a8b30 100644 --- a/src/print_zir.zig +++ b/src/print_zir.zig @@ -583,7 +583,7 @@ const Writer = struct { .reify => { const inst_data = self.code.extraData(Zir.Inst.Reify, extended.operand).data; - try stream.print("{d}, ", .{inst_data.src_line}); + try stream.print("line({d}), ", .{inst_data.src_line}); try self.writeInstRef(stream, inst_data.operand); try stream.writeAll(")) "); const prev_parent_decl_node = self.parent_decl_node; @@ -2749,7 +2749,7 @@ const Writer = struct { extra.data.src_hash_3, }; const src_hash_bytes: [16]u8 = @bitCast(src_hash_arr); - try stream.print(" line(+{d}) hash({})", .{ extra.data.line_offset, std.fmt.fmtSliceHexLower(&src_hash_bytes) }); + try stream.print(" line({d}) hash({})", .{ extra.data.src_line, std.fmt.fmtSliceHexLower(&src_hash_bytes) }); { const bodies = extra.data.getBodies(@intCast(extra.end), self.code); diff --git a/src/type.zig b/src/type.zig index 480db4e5b7ba..df9382227357 100644 --- a/src/type.zig +++ b/src/type.zig @@ -11,6 +11,7 @@ const target_util = @import("target.zig"); const Sema = @import("Sema.zig"); const InternPool = @import("InternPool.zig"); const Alignment = InternPool.Alignment; +const Zir = std.zig.Zir; /// Both types and values are canonically represented by a single 32-bit integer /// which is an index into an `InternPool` data structure. @@ -3340,7 +3341,7 @@ pub const Type = struct { .struct_type, .union_type, .opaque_type, .enum_type => |info| switch (info) { .declared => |d| d.zir_index, .reified => |r| r.zir_index, - .generated_tag => |gt| ip.loadUnionType(gt.union_type).zir_index, // must be declared since we can't generate tags when reifying + .generated_tag => |gt| ip.loadUnionType(gt.union_type).zir_index, .empty_struct => return null, }, else => return null, @@ -3440,6 +3441,33 @@ pub const Type = struct { }; } + pub fn typeDeclSrcLine(ty: Type, zcu: *const Zcu) ?u32 { + const ip = &zcu.intern_pool; + const tracked = switch (ip.indexToKey(ty.toIntern())) { + .struct_type, .union_type, .opaque_type, .enum_type => |info| switch (info) { + .declared => |d| d.zir_index, + .reified => |r| r.zir_index, + .generated_tag => |gt| ip.loadUnionType(gt.union_type).zir_index, + .empty_struct => return null, + }, + else => return null, + }; + const info = tracked.resolveFull(&zcu.intern_pool); + const file = zcu.import_table.values()[zcu.path_digest_map.getIndex(info.path_digest).?]; + assert(file.zir_loaded); + const zir = file.zir; + const inst = zir.instructions.get(@intFromEnum(info.inst)); + assert(inst.tag == .extended); + return switch (inst.data.extended.opcode) { + .struct_decl => zir.extraData(Zir.Inst.StructDecl, inst.data.extended.operand).data.src_line, + .union_decl => zir.extraData(Zir.Inst.UnionDecl, inst.data.extended.operand).data.src_line, + .enum_decl => zir.extraData(Zir.Inst.EnumDecl, inst.data.extended.operand).data.src_line, + .opaque_decl => zir.extraData(Zir.Inst.OpaqueDecl, inst.data.extended.operand).data.src_line, + .reify => zir.extraData(Zir.Inst.Reify, inst.data.extended.operand).data.src_line, + else => unreachable, + }; + } + /// Given a namespace type, returns its list of caotured values. pub fn getCaptures(ty: Type, zcu: *const Zcu) InternPool.CaptureValue.Slice { const ip = &zcu.intern_pool; From 4cb5318088b2eb66891f0d83b76a2740644b4156 Mon Sep 17 00:00:00 2001 From: mlugg Date: Mon, 24 Jun 2024 15:40:23 +0100 Subject: [PATCH 002/152] InternPool: rename `Depender` to `AnalSubject` This is essentially just a rename. I also changed the representation of `AnalSubject` to use a `packed struct` rather than a non-exhaustive enum, but that change is relatively trivial. --- src/Compilation.zig | 2 +- src/InternPool.zig | 50 ++++++++++++++++---------------- src/Sema.zig | 14 ++++----- src/Zcu.zig | 70 ++++++++++++++++++++++----------------------- 4 files changed, 68 insertions(+), 68 deletions(-) diff --git a/src/Compilation.zig b/src/Compilation.zig index de8f5542e3c3..b30f65ad1146 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -3494,7 +3494,7 @@ fn processOneJob(comp: *Compilation, job: Job, prog_node: std.Progress.Node) !vo .{@errorName(err)}, )); decl.analysis = .codegen_failure; - try module.retryable_failures.append(gpa, InternPool.Depender.wrap(.{ .decl = decl_index })); + try module.retryable_failures.append(gpa, InternPool.AnalSubject.wrap(.{ .decl = decl_index })); }; }, .analyze_mod => |pkg| { diff --git a/src/InternPool.zig b/src/InternPool.zig index 4268a84f37ff..cf56550c258c 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -81,7 +81,7 @@ namespace_name_deps: std.AutoArrayHashMapUnmanaged(NamespaceNameKey, DepEntry.In /// Given a `Depender`, points to an entry in `dep_entries` whose `depender` /// matches. The `next_dependee` field can be used to iterate all such entries /// and remove them from the corresponding lists. -first_dependency: std.AutoArrayHashMapUnmanaged(Depender, DepEntry.Index) = .{}, +first_dependency: std.AutoArrayHashMapUnmanaged(AnalSubject, DepEntry.Index) = .{}, /// Stores dependency information. The hashmaps declared above are used to look /// up entries in this list as required. This is not stored in `extra` so that @@ -132,39 +132,39 @@ pub fn trackZir(ip: *InternPool, gpa: Allocator, file: *Module.File, inst: Zir.I return @enumFromInt(gop.index); } -/// Reperesents the "source" of a dependency edge, i.e. either a Decl or a -/// runtime function (represented as an InternPool index). -/// MSB is 0 for a Decl, 1 for a function. -pub const Depender = enum(u32) { - _, +/// Analysis Subject. Represents a single entity which undergoes semantic analysis. +/// This is either a `Decl` (in future `Cau`) or a runtime function. +/// The LSB is used as a tag bit. +/// This is the "source" of an incremental dependency edge. +pub const AnalSubject = packed struct(u32) { + kind: enum(u1) { decl, func }, + index: u31, pub const Unwrapped = union(enum) { decl: DeclIndex, func: InternPool.Index, }; - pub fn unwrap(dep: Depender) Unwrapped { - const tag: u1 = @truncate(@intFromEnum(dep) >> 31); - const val: u31 = @truncate(@intFromEnum(dep)); - return switch (tag) { - 0 => .{ .decl = @enumFromInt(val) }, - 1 => .{ .func = @enumFromInt(val) }, + pub fn unwrap(as: AnalSubject) Unwrapped { + return switch (as.kind) { + .decl => .{ .decl = @enumFromInt(as.index) }, + .func => .{ .func = @enumFromInt(as.index) }, }; } - pub fn wrap(raw: Unwrapped) Depender { - return @enumFromInt(switch (raw) { - .decl => |decl| @intFromEnum(decl), - .func => |func| (1 << 31) | @intFromEnum(func), - }); + pub fn wrap(raw: Unwrapped) AnalSubject { + return switch (raw) { + .decl => |decl| .{ .kind = .decl, .index = @intCast(@intFromEnum(decl)) }, + .func => |func| .{ .kind = .func, .index = @intCast(@intFromEnum(func)) }, + }; } - pub fn toOptional(dep: Depender) Optional { - return @enumFromInt(@intFromEnum(dep)); + pub fn toOptional(as: AnalSubject) Optional { + return @enumFromInt(@as(u32, @bitCast(as))); } pub const Optional = enum(u32) { none = std.math.maxInt(u32), _, - pub fn unwrap(opt: Optional) ?Depender { + pub fn unwrap(opt: Optional) ?AnalSubject { return switch (opt) { .none => null, - _ => @enumFromInt(@intFromEnum(opt)), + _ => @bitCast(@intFromEnum(opt)), }; } }; @@ -178,7 +178,7 @@ pub const Dependee = union(enum) { namespace_name: NamespaceNameKey, }; -pub fn removeDependenciesForDepender(ip: *InternPool, gpa: Allocator, depender: Depender) void { +pub fn removeDependenciesForDepender(ip: *InternPool, gpa: Allocator, depender: AnalSubject) void { var opt_idx = (ip.first_dependency.fetchSwapRemove(depender) orelse return).value.toOptional(); while (opt_idx.unwrap()) |idx| { @@ -207,7 +207,7 @@ pub fn removeDependenciesForDepender(ip: *InternPool, gpa: Allocator, depender: pub const DependencyIterator = struct { ip: *const InternPool, next_entry: DepEntry.Index.Optional, - pub fn next(it: *DependencyIterator) ?Depender { + pub fn next(it: *DependencyIterator) ?AnalSubject { const idx = it.next_entry.unwrap() orelse return null; const entry = it.ip.dep_entries.items[@intFromEnum(idx)]; it.next_entry = entry.next; @@ -236,7 +236,7 @@ pub fn dependencyIterator(ip: *const InternPool, dependee: Dependee) DependencyI }; } -pub fn addDependency(ip: *InternPool, gpa: Allocator, depender: Depender, dependee: Dependee) Allocator.Error!void { +pub fn addDependency(ip: *InternPool, gpa: Allocator, depender: AnalSubject, dependee: Dependee) Allocator.Error!void { const first_depender_dep: DepEntry.Index.Optional = if (ip.first_dependency.get(depender)) |idx| dep: { // The entry already exists, so there is capacity to overwrite it later. break :dep idx.toOptional(); @@ -300,7 +300,7 @@ pub const DepEntry = extern struct { /// the first and only entry in one of `intern_pool.*_deps`, and does not /// appear in any list by `first_dependency`, but is not in /// `free_dep_entries` since `*_deps` stores a reference to it. - depender: Depender.Optional, + depender: AnalSubject.Optional, /// Index into `dep_entries` forming a doubly linked list of all dependencies on this dependee. /// Used to iterate all dependers for a given dependee during an update. /// null if this is the end of the list. diff --git a/src/Sema.zig b/src/Sema.zig index 931799828870..db520f5789bf 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -2735,12 +2735,12 @@ fn maybeRemoveOutdatedType(sema: *Sema, ty: InternPool.Index) !bool { if (!zcu.comp.debug_incremental) return false; const decl_index = Type.fromInterned(ty).getOwnerDecl(zcu); - const decl_as_depender = InternPool.Depender.wrap(.{ .decl = decl_index }); + const decl_as_depender = InternPool.AnalSubject.wrap(.{ .decl = decl_index }); const was_outdated = zcu.outdated.swapRemove(decl_as_depender) or zcu.potentially_outdated.swapRemove(decl_as_depender); if (!was_outdated) return false; _ = zcu.outdated_ready.swapRemove(decl_as_depender); - zcu.intern_pool.removeDependenciesForDepender(zcu.gpa, InternPool.Depender.wrap(.{ .decl = decl_index })); + zcu.intern_pool.removeDependenciesForDepender(zcu.gpa, InternPool.AnalSubject.wrap(.{ .decl = decl_index })); zcu.intern_pool.remove(ty); zcu.declPtr(decl_index).analysis = .dependency_failure; try zcu.markDependeeOutdated(.{ .decl_val = decl_index }); @@ -2834,7 +2834,7 @@ fn zirStructDecl( if (sema.mod.comp.debug_incremental) { try ip.addDependency( sema.gpa, - InternPool.Depender.wrap(.{ .decl = new_decl_index }), + InternPool.AnalSubject.wrap(.{ .decl = new_decl_index }), .{ .src_hash = try ip.trackZir(sema.gpa, block.getFileScope(mod), inst) }, ); } @@ -3068,7 +3068,7 @@ fn zirEnumDecl( if (sema.mod.comp.debug_incremental) { try mod.intern_pool.addDependency( sema.gpa, - InternPool.Depender.wrap(.{ .decl = new_decl_index }), + InternPool.AnalSubject.wrap(.{ .decl = new_decl_index }), .{ .src_hash = try mod.intern_pool.trackZir(sema.gpa, block.getFileScope(mod), inst) }, ); } @@ -3334,7 +3334,7 @@ fn zirUnionDecl( if (sema.mod.comp.debug_incremental) { try mod.intern_pool.addDependency( sema.gpa, - InternPool.Depender.wrap(.{ .decl = new_decl_index }), + InternPool.AnalSubject.wrap(.{ .decl = new_decl_index }), .{ .src_hash = try mod.intern_pool.trackZir(sema.gpa, block.getFileScope(mod), inst) }, ); } @@ -3422,7 +3422,7 @@ fn zirOpaqueDecl( if (sema.mod.comp.debug_incremental) { try ip.addDependency( gpa, - InternPool.Depender.wrap(.{ .decl = new_decl_index }), + InternPool.AnalSubject.wrap(.{ .decl = new_decl_index }), .{ .src_hash = try ip.trackZir(gpa, block.getFileScope(mod), inst) }, ); } @@ -38362,7 +38362,7 @@ pub fn declareDependency(sema: *Sema, dependee: InternPool.Dependee) !void { return; } - const depender = InternPool.Depender.wrap( + const depender = InternPool.AnalSubject.wrap( if (sema.owner_func_index != .none) .{ .func = sema.owner_func_index } else diff --git a/src/Zcu.zig b/src/Zcu.zig index 059aece30949..a490990cf3f9 100644 --- a/src/Zcu.zig +++ b/src/Zcu.zig @@ -139,26 +139,26 @@ global_error_set: GlobalErrorSet = .{}, /// Maximum amount of distinct error values, set by --error-limit error_limit: ErrorInt, -/// Value is the number of PO or outdated Decls which this Depender depends on. -potentially_outdated: std.AutoArrayHashMapUnmanaged(InternPool.Depender, u32) = .{}, -/// Value is the number of PO or outdated Decls which this Depender depends on. -/// Once this value drops to 0, the Depender is a candidate for re-analysis. -outdated: std.AutoArrayHashMapUnmanaged(InternPool.Depender, u32) = .{}, -/// This contains all `Depender`s in `outdated` whose PO dependency count is 0. -/// Such `Depender`s are ready for immediate re-analysis. +/// Value is the number of PO or outdated Decls which this AnalSubject depends on. +potentially_outdated: std.AutoArrayHashMapUnmanaged(InternPool.AnalSubject, u32) = .{}, +/// Value is the number of PO or outdated Decls which this AnalSubject depends on. +/// Once this value drops to 0, the AnalSubject is a candidate for re-analysis. +outdated: std.AutoArrayHashMapUnmanaged(InternPool.AnalSubject, u32) = .{}, +/// This contains all `AnalSubject`s in `outdated` whose PO dependency count is 0. +/// Such `AnalSubject`s are ready for immediate re-analysis. /// See `findOutdatedToAnalyze` for details. -outdated_ready: std.AutoArrayHashMapUnmanaged(InternPool.Depender, void) = .{}, +outdated_ready: std.AutoArrayHashMapUnmanaged(InternPool.AnalSubject, void) = .{}, /// This contains a set of Decls which may not be in `outdated`, but are the /// root Decls of files which have updated source and thus must be re-analyzed. /// If such a Decl is only in this set, the struct type index may be preserved /// (only the namespace might change). If such a Decl is also `outdated`, the /// struct type index must be recreated. outdated_file_root: std.AutoArrayHashMapUnmanaged(Decl.Index, void) = .{}, -/// This contains a list of Dependers whose analysis or codegen failed, but the +/// This contains a list of AnalSubject whose analysis or codegen failed, but the /// failure was something like running out of disk space, and trying again may /// succeed. On the next update, we will flush this list, marking all members of /// it as outdated. -retryable_failures: std.ArrayListUnmanaged(InternPool.Depender) = .{}, +retryable_failures: std.ArrayListUnmanaged(InternPool.AnalSubject) = .{}, stage1_flags: packed struct { have_winmain: bool = false, @@ -3137,9 +3137,9 @@ fn markPoDependeeUpToDate(zcu: *Zcu, dependee: InternPool.Dependee) !void { } } -/// Given a Depender which is newly outdated or PO, mark all Dependers which may -/// in turn be PO, due to a dependency on the original Depender's tyval or IES. -fn markTransitiveDependersPotentiallyOutdated(zcu: *Zcu, maybe_outdated: InternPool.Depender) !void { +/// Given a AnalSubject which is newly outdated or PO, mark all AnalSubjects which may +/// in turn be PO, due to a dependency on the original AnalSubject's tyval or IES. +fn markTransitiveDependersPotentiallyOutdated(zcu: *Zcu, maybe_outdated: InternPool.AnalSubject) !void { var it = zcu.intern_pool.dependencyIterator(switch (maybe_outdated.unwrap()) { .decl => |decl_index| .{ .decl_val = decl_index }, // TODO: also `decl_ref` deps when introduced .func => |func_index| .{ .func_ies = func_index }, @@ -3161,12 +3161,12 @@ fn markTransitiveDependersPotentiallyOutdated(zcu: *Zcu, maybe_outdated: InternP continue; } try zcu.potentially_outdated.putNoClobber(zcu.gpa, po, 1); - // This Depender was not already PO, so we must recursively mark its dependers as also PO. + // This AnalSubject was not already PO, so we must recursively mark its dependers as also PO. try zcu.markTransitiveDependersPotentiallyOutdated(po); } } -pub fn findOutdatedToAnalyze(zcu: *Zcu) Allocator.Error!?InternPool.Depender { +pub fn findOutdatedToAnalyze(zcu: *Zcu) Allocator.Error!?InternPool.AnalSubject { if (!zcu.comp.debug_incremental) return null; if (zcu.outdated.count() == 0 and zcu.potentially_outdated.count() == 0) { @@ -3174,8 +3174,8 @@ pub fn findOutdatedToAnalyze(zcu: *Zcu) Allocator.Error!?InternPool.Depender { return null; } - // Our goal is to find an outdated Depender which itself has no outdated or - // PO dependencies. Most of the time, such a Depender will exist - we track + // Our goal is to find an outdated AnalSubject which itself has no outdated or + // PO dependencies. Most of the time, such an AnalSubject will exist - we track // them in the `outdated_ready` set for efficiency. However, this is not // necessarily the case, since the Decl dependency graph may contain loops // via mutually recursive definitions: @@ -3197,7 +3197,7 @@ pub fn findOutdatedToAnalyze(zcu: *Zcu) Allocator.Error!?InternPool.Depender { // `outdated`. This set will be small (number of files changed in this // update), so it's alright for us to just iterate here. for (zcu.outdated_file_root.keys()) |file_decl| { - const decl_depender = InternPool.Depender.wrap(.{ .decl = file_decl }); + const decl_depender = InternPool.AnalSubject.wrap(.{ .decl = file_decl }); if (zcu.outdated.contains(decl_depender)) { // Since we didn't hit this in the first loop, this Decl must have // pending dependencies, so is ineligible. @@ -3213,7 +3213,7 @@ pub fn findOutdatedToAnalyze(zcu: *Zcu) Allocator.Error!?InternPool.Depender { return decl_depender; } - // There is no single Depender which is ready for re-analysis. Instead, we + // There is no single AnalSubject which is ready for re-analysis. Instead, we // must assume that some Decl with PO dependencies is outdated - e.g. in the // above example we arbitrarily pick one of A or B. We should select a Decl, // since a Decl is definitely responsible for the loop in the dependency @@ -3221,7 +3221,7 @@ pub fn findOutdatedToAnalyze(zcu: *Zcu) Allocator.Error!?InternPool.Depender { // The choice of this Decl could have a big impact on how much total // analysis we perform, since if analysis concludes its tyval is unchanged, - // then other PO Dependers may be resolved as up-to-date. To hopefully avoid + // then other PO AnalSubject may be resolved as up-to-date. To hopefully avoid // doing too much work, let's find a Decl which the most things depend on - // the idea is that this will resolve a lot of loops (but this is only a // heuristic). @@ -3271,7 +3271,7 @@ pub fn findOutdatedToAnalyze(zcu: *Zcu) Allocator.Error!?InternPool.Depender { chosen_decl_dependers, }); - return InternPool.Depender.wrap(.{ .decl = chosen_decl_idx.? }); + return InternPool.AnalSubject.wrap(.{ .decl = chosen_decl_idx.? }); } /// During an incremental update, before semantic analysis, call this to flush all values from @@ -3281,12 +3281,12 @@ pub fn flushRetryableFailures(zcu: *Zcu) !void { for (zcu.retryable_failures.items) |depender| { if (zcu.outdated.contains(depender)) continue; if (zcu.potentially_outdated.fetchSwapRemove(depender)) |kv| { - // This Depender was already PO, but we now consider it outdated. + // This AnalSubject was already PO, but we now consider it outdated. // Any transitive dependencies are already marked PO. try zcu.outdated.put(gpa, depender, kv.value); continue; } - // This Depender was not marked PO, but is now outdated. Mark it as + // This AnalSubject was not marked PO, but is now outdated. Mark it as // such, then recursively mark transitive dependencies as PO. try zcu.outdated.put(gpa, depender, 0); try zcu.markTransitiveDependersPotentiallyOutdated(depender); @@ -3456,7 +3456,7 @@ pub fn ensureDeclAnalyzed(mod: *Module, decl_index: Decl.Index) SemaError!void { // which tries to limit re-analysis to Decls whose previously listed // dependencies are all up-to-date. - const decl_as_depender = InternPool.Depender.wrap(.{ .decl = decl_index }); + const decl_as_depender = InternPool.AnalSubject.wrap(.{ .decl = decl_index }); const decl_was_outdated = mod.outdated.swapRemove(decl_as_depender) or mod.potentially_outdated.swapRemove(decl_as_depender); @@ -3522,7 +3522,7 @@ pub fn ensureDeclAnalyzed(mod: *Module, decl_index: Decl.Index) SemaError!void { else => |e| { decl.analysis = .sema_failure; try mod.failed_decls.ensureUnusedCapacity(mod.gpa, 1); - try mod.retryable_failures.append(mod.gpa, InternPool.Depender.wrap(.{ .decl = decl_index })); + try mod.retryable_failures.append(mod.gpa, InternPool.AnalSubject.wrap(.{ .decl = decl_index })); mod.failed_decls.putAssumeCapacityNoClobber(decl_index, try ErrorMsg.create( mod.gpa, decl.navSrcLoc(mod).upgrade(mod), @@ -3581,7 +3581,7 @@ pub fn ensureFuncBodyAnalyzed(zcu: *Zcu, maybe_coerced_func_index: InternPool.In // that's the case, we should remove this function from the binary. if (decl.val.ip_index != func_index) { try zcu.markDependeeOutdated(.{ .func_ies = func_index }); - ip.removeDependenciesForDepender(gpa, InternPool.Depender.wrap(.{ .func = func_index })); + ip.removeDependenciesForDepender(gpa, InternPool.AnalSubject.wrap(.{ .func = func_index })); ip.remove(func_index); @panic("TODO: remove orphaned function from binary"); } @@ -3607,7 +3607,7 @@ pub fn ensureFuncBodyAnalyzed(zcu: *Zcu, maybe_coerced_func_index: InternPool.In .complete => {}, } - const func_as_depender = InternPool.Depender.wrap(.{ .func = func_index }); + const func_as_depender = InternPool.AnalSubject.wrap(.{ .func = func_index }); const was_outdated = zcu.outdated.swapRemove(func_as_depender) or zcu.potentially_outdated.swapRemove(func_as_depender); @@ -3728,7 +3728,7 @@ pub fn ensureFuncBodyAnalyzed(zcu: *Zcu, maybe_coerced_func_index: InternPool.In .{@errorName(err)}, )); func.analysis(ip).state = .codegen_failure; - try zcu.retryable_failures.append(zcu.gpa, InternPool.Depender.wrap(.{ .func = func_index })); + try zcu.retryable_failures.append(zcu.gpa, InternPool.AnalSubject.wrap(.{ .func = func_index })); }, }; } else if (zcu.llvm_object) |llvm_object| { @@ -3773,7 +3773,7 @@ pub fn ensureFuncBodyAnalysisQueued(mod: *Module, func_index: InternPool.Index) assert(decl.has_tv); - const func_as_depender = InternPool.Depender.wrap(.{ .func = func_index }); + const func_as_depender = InternPool.AnalSubject.wrap(.{ .func = func_index }); const is_outdated = mod.outdated.contains(func_as_depender) or mod.potentially_outdated.contains(func_as_depender); @@ -3857,7 +3857,7 @@ fn getFileRootStruct(zcu: *Zcu, decl_index: Decl.Index, namespace_index: Namespa if (zcu.comp.debug_incremental) { try ip.addDependency( gpa, - InternPool.Depender.wrap(.{ .decl = decl_index }), + InternPool.AnalSubject.wrap(.{ .decl = decl_index }), .{ .src_hash = tracked_inst }, ); } @@ -3906,7 +3906,7 @@ fn semaFileUpdate(zcu: *Zcu, file: *File, type_outdated: bool) SemaError!bool { if (type_outdated) { // Invalidate the existing type, reusing the decl and namespace. - zcu.intern_pool.removeDependenciesForDepender(zcu.gpa, InternPool.Depender.wrap(.{ .decl = file.root_decl.unwrap().? })); + zcu.intern_pool.removeDependenciesForDepender(zcu.gpa, InternPool.AnalSubject.wrap(.{ .decl = file.root_decl.unwrap().? })); zcu.intern_pool.remove(decl.val.toIntern()); decl.val = undefined; _ = try zcu.getFileRootStruct(file.root_decl.unwrap().?, decl.src_namespace, file); @@ -4097,7 +4097,7 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !SemaDeclResult { break :ip_index .none; }; - mod.intern_pool.removeDependenciesForDepender(gpa, InternPool.Depender.wrap(.{ .decl = decl_index })); + mod.intern_pool.removeDependenciesForDepender(gpa, InternPool.AnalSubject.wrap(.{ .decl = decl_index })); decl.analysis = .in_progress; @@ -4323,7 +4323,7 @@ fn semaAnonOwnerDecl(zcu: *Zcu, decl_index: Decl.Index) !SemaDeclResult { // with a new Decl. // // Yes, this does mean that any type owner Decl has a constant value for its entire lifetime. - zcu.intern_pool.removeDependenciesForDepender(zcu.gpa, InternPool.Depender.wrap(.{ .decl = decl_index })); + zcu.intern_pool.removeDependenciesForDepender(zcu.gpa, InternPool.AnalSubject.wrap(.{ .decl = decl_index })); zcu.intern_pool.remove(decl.val.toIntern()); decl.analysis = .dependency_failure; return .{ @@ -5026,7 +5026,7 @@ pub fn analyzeFnBody(mod: *Module, func_index: InternPool.Index, arena: Allocato const decl_prog_node = mod.sema_prog_node.start((try decl.fullyQualifiedName(mod)).toSlice(ip), 0); defer decl_prog_node.end(); - mod.intern_pool.removeDependenciesForDepender(gpa, InternPool.Depender.wrap(.{ .func = func_index })); + mod.intern_pool.removeDependenciesForDepender(gpa, InternPool.AnalSubject.wrap(.{ .func = func_index })); var comptime_err_ret_trace = std.ArrayList(LazySrcLoc).init(gpa); defer comptime_err_ret_trace.deinit(); @@ -5627,7 +5627,7 @@ pub fn linkerUpdateDecl(zcu: *Zcu, decl_index: Decl.Index) !void { .{@errorName(err)}, )); decl.analysis = .codegen_failure; - try zcu.retryable_failures.append(zcu.gpa, InternPool.Depender.wrap(.{ .decl = decl_index })); + try zcu.retryable_failures.append(zcu.gpa, InternPool.AnalSubject.wrap(.{ .decl = decl_index })); }, }; } else if (zcu.llvm_object) |llvm_object| { From 6e78642d5158bdf6240c446fee5775c2888ad82e Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Wed, 26 Jun 2024 07:38:36 +0200 Subject: [PATCH 003/152] macho: link in TSAN if requested --- src/link/MachO.zig | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/link/MachO.zig b/src/link/MachO.zig index 9cec01c43789..5ec5cd764c56 100644 --- a/src/link/MachO.zig +++ b/src/link/MachO.zig @@ -411,6 +411,11 @@ pub fn flushModule(self: *MachO, arena: Allocator, prog_node: std.Progress.Node) if (module_obj_path) |path| try positionals.append(.{ .path = path }); + // TSAN + if (comp.config.any_sanitize_thread) { + try positionals.append(.{ .path = comp.tsan_static_lib.?.full_object_path }); + } + for (positionals.items) |obj| { self.parsePositional(obj.path, obj.must_link) catch |err| switch (err) { error.MalformedObject, From e9309036b28d402afd5f9f77d2d32e7d7b902b9f Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Wed, 26 Jun 2024 08:43:46 +0200 Subject: [PATCH 004/152] macho: print libtsan ref when dumping argv --- src/link/MachO.zig | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/link/MachO.zig b/src/link/MachO.zig index 5ec5cd764c56..75b2ab4d74aa 100644 --- a/src/link/MachO.zig +++ b/src/link/MachO.zig @@ -830,6 +830,10 @@ fn dumpArgv(self: *MachO, comp: *Compilation) !void { try argv.append(p); } + if (comp.config.any_sanitize_thread) { + try argv.append(comp.tsan_static_lib.?.full_object_path); + } + for (self.lib_dirs) |lib_dir| { const arg = try std.fmt.allocPrint(arena, "-L{s}", .{lib_dir}); try argv.append(arg); From 766e281a72bb915ebee21cafd862d6b7a2a298ca Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Wed, 26 Jun 2024 22:01:00 +0200 Subject: [PATCH 005/152] macho: set allow_shlib_undefined to true when TSAN was requested --- src/link/MachO.zig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/link/MachO.zig b/src/link/MachO.zig index 75b2ab4d74aa..dd185fcaec81 100644 --- a/src/link/MachO.zig +++ b/src/link/MachO.zig @@ -192,7 +192,7 @@ pub fn createEmpty( null else try std.fmt.allocPrint(arena, "{s}.o", .{emit.sub_path}); - const allow_shlib_undefined = options.allow_shlib_undefined orelse false; + const allow_shlib_undefined = options.allow_shlib_undefined orelse comp.config.any_sanitize_thread; const self = try arena.create(MachO); self.* = .{ From cb308ba3ac2d7e3735d1cb42ef085edb1e6db723 Mon Sep 17 00:00:00 2001 From: Wayne Wu <11427457+wyw@users.noreply.github.com> Date: Fri, 28 Jun 2024 03:37:44 +0800 Subject: [PATCH 006/152] langref: correct test runner path --- doc/langref.html.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/langref.html.in b/doc/langref.html.in index 9ef36919914d..d20c9028687d 100644 --- a/doc/langref.html.in +++ b/doc/langref.html.in @@ -800,7 +800,7 @@

The shell output shown above displays two lines after the zig test command. These lines are From 604a332e21f9aaff13c7b13331d9c6926c6c9d52 Mon Sep 17 00:00:00 2001 From: Sashko <2179872-misanthrop@users.noreply.gitlab.com> Date: Tue, 12 Mar 2024 09:08:03 +0100 Subject: [PATCH 007/152] Extract getZigArgs function in std.Build.Step.Compile --- lib/std/Build/Step/Compile.zig | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/lib/std/Build/Step/Compile.zig b/lib/std/Build/Step/Compile.zig index d18d8de41328..504025e3d404 100644 --- a/lib/std/Build/Step/Compile.zig +++ b/lib/std/Build/Step/Compile.zig @@ -989,10 +989,10 @@ fn getGeneratedFilePath(compile: *Compile, comptime tag_name: []const u8, asking return path; } -fn make(step: *Step, prog_node: std.Progress.Node) !void { +fn getZigArgs(compile: *Compile) ![][]const u8 { + const step = &compile.step; const b = step.owner; const arena = b.allocator; - const compile: *Compile = @fieldParentPtr("step", step); var zig_args = ArrayList([]const u8).init(arena); defer zig_args.deinit(); @@ -1298,6 +1298,7 @@ fn make(step: *Step, prog_node: std.Progress.Node) !void { // We need to emit the --mod argument here so that the above link objects // have the correct parent module, but only if the module is part of // this compilation. + if (!my_responsibility) continue; if (cli_named_modules.modules.getIndex(dep.module)) |module_cli_index| { const module_cli_name = cli_named_modules.names.keys()[module_cli_index]; try dep.module.appendZigProcessFlags(&zig_args, step); @@ -1724,7 +1725,16 @@ fn make(step: *Step, prog_node: std.Progress.Node) !void { try zig_args.append(resolved_args_file); } - const maybe_output_bin_path = step.evalZigProcess(zig_args.items, prog_node) catch |err| switch (err) { + return try zig_args.toOwnedSlice(); +} + +fn make(step: *Step, prog_node: std.Progress.Node) !void { + const b = step.owner; + const compile: *Compile = @fieldParentPtr("step", step); + + const zig_args = try getZigArgs(compile); + + const maybe_output_bin_path = step.evalZigProcess(zig_args, prog_node) catch |err| switch (err) { error.NeedCompileErrorCheck => { assert(compile.expect_errors != null); try checkCompileErrors(compile); From a717ac0340b6d177e4f02d96dff5418e6961391c Mon Sep 17 00:00:00 2001 From: Sashko <2179872-misanthrop@users.noreply.gitlab.com> Date: Tue, 12 Mar 2024 09:46:21 +0100 Subject: [PATCH 008/152] Add a standalone test to cover the duplicate module bug --- test/standalone/build.zig.zon | 3 ++ .../standalone/dep_duplicate_module/build.zig | 32 +++++++++++++++++++ test/standalone/dep_duplicate_module/lib.zig | 6 ++++ test/standalone/dep_duplicate_module/main.zig | 8 +++++ test/standalone/dep_duplicate_module/mod.zig | 7 ++++ 5 files changed, 56 insertions(+) create mode 100644 test/standalone/dep_duplicate_module/build.zig create mode 100644 test/standalone/dep_duplicate_module/lib.zig create mode 100644 test/standalone/dep_duplicate_module/main.zig create mode 100644 test/standalone/dep_duplicate_module/mod.zig diff --git a/test/standalone/build.zig.zon b/test/standalone/build.zig.zon index 1e79a547e928..80e9ba046c76 100644 --- a/test/standalone/build.zig.zon +++ b/test/standalone/build.zig.zon @@ -86,6 +86,9 @@ .dirname = .{ .path = "dirname", }, + .dep_duplicate_module = .{ + .path = "dep_duplicate_module", + }, .empty_env = .{ .path = "empty_env", }, diff --git a/test/standalone/dep_duplicate_module/build.zig b/test/standalone/dep_duplicate_module/build.zig new file mode 100644 index 000000000000..9148bf2c8fd2 --- /dev/null +++ b/test/standalone/dep_duplicate_module/build.zig @@ -0,0 +1,32 @@ +const std = @import("std"); + +pub fn build(b: *std.Build) void { + const target = b.standardTargetOptions(.{}); + const optimize = b.standardOptimizeOption(.{}); + + const mod = b.addModule("mod", .{ + .root_source_file = .{ .path = "mod.zig" }, + .target = target, + .optimize = optimize, + }); + + const lib = b.addStaticLibrary(.{ + .name = "lib", + .root_source_file = .{ .path = "lib.zig" }, + .target = target, + .optimize = optimize, + }); + lib.root_module.addImport("mod", mod); + + const exe = b.addExecutable(.{ + .name = "app", + .root_source_file = .{ .path = "main.zig" }, + .target = target, + .optimize = optimize, + }); + + exe.root_module.addImport("mod", mod); + exe.root_module.linkLibrary(lib); + + b.installArtifact(exe); +} diff --git a/test/standalone/dep_duplicate_module/lib.zig b/test/standalone/dep_duplicate_module/lib.zig new file mode 100644 index 000000000000..0a44f1a8ce4f --- /dev/null +++ b/test/standalone/dep_duplicate_module/lib.zig @@ -0,0 +1,6 @@ +const std = @import("std"); +const mod = @import("mod"); + +export fn work(x: u32) u32 { + return mod.double(x); +} diff --git a/test/standalone/dep_duplicate_module/main.zig b/test/standalone/dep_duplicate_module/main.zig new file mode 100644 index 000000000000..0ff26699c34a --- /dev/null +++ b/test/standalone/dep_duplicate_module/main.zig @@ -0,0 +1,8 @@ +const std = @import("std"); +const mod = @import("mod"); + +extern fn work(x: u32) u32; + +pub fn main() !void { + _ = work(mod.half(25)); +} diff --git a/test/standalone/dep_duplicate_module/mod.zig b/test/standalone/dep_duplicate_module/mod.zig new file mode 100644 index 000000000000..019ae3be9744 --- /dev/null +++ b/test/standalone/dep_duplicate_module/mod.zig @@ -0,0 +1,7 @@ +pub fn double(v: u32) u32 { + return v * 2; +} + +pub fn half(v: u32) u32 { + return v / 2; +} From 8db1490b8a36155d85a2d5741f97984ff583cfde Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 1 Jul 2024 16:38:02 -0700 Subject: [PATCH 009/152] update test build script to latest API --- test/standalone/dep_duplicate_module/build.zig | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/test/standalone/dep_duplicate_module/build.zig b/test/standalone/dep_duplicate_module/build.zig index 9148bf2c8fd2..733d49b91c93 100644 --- a/test/standalone/dep_duplicate_module/build.zig +++ b/test/standalone/dep_duplicate_module/build.zig @@ -5,14 +5,14 @@ pub fn build(b: *std.Build) void { const optimize = b.standardOptimizeOption(.{}); const mod = b.addModule("mod", .{ - .root_source_file = .{ .path = "mod.zig" }, + .root_source_file = b.path("mod.zig"), .target = target, .optimize = optimize, }); const lib = b.addStaticLibrary(.{ .name = "lib", - .root_source_file = .{ .path = "lib.zig" }, + .root_source_file = b.path("lib.zig"), .target = target, .optimize = optimize, }); @@ -20,7 +20,7 @@ pub fn build(b: *std.Build) void { const exe = b.addExecutable(.{ .name = "app", - .root_source_file = .{ .path = "main.zig" }, + .root_source_file = b.path("main.zig"), .target = target, .optimize = optimize, }); From 8ef24461a0016062e0ca20d1a152dd82fac511ab Mon Sep 17 00:00:00 2001 From: Sashko <2179872-misanthrop@users.noreply.gitlab.com> Date: Sat, 30 Mar 2024 17:36:21 +0100 Subject: [PATCH 010/152] DynLib: fix a typo in DynLib.openZ --- lib/std/dynamic_library.zig | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/lib/std/dynamic_library.zig b/lib/std/dynamic_library.zig index f0e8ae0f117f..a1db48b470cc 100644 --- a/lib/std/dynamic_library.zig +++ b/lib/std/dynamic_library.zig @@ -31,7 +31,7 @@ pub const DynLib = struct { /// Trusts the file. Malicious file will be able to execute arbitrary code. pub fn openZ(path_c: [*:0]const u8) Error!DynLib { - return .{ .inner = try InnerType.open(path_c) }; + return .{ .inner = try InnerType.openZ(path_c) }; } /// Trusts the file. @@ -376,7 +376,7 @@ pub const WindowsDynLib = struct { /// WindowsDynLib specific /// Opens dynamic library with specified library loading flags. pub fn openExZ(path_c: [*:0]const u8, flags: windows.LoadLibraryFlags) Error!WindowsDynLib { - const path_w = try windows.cStrToPrefixedFileW(null, path_c); + const path_w = windows.cStrToPrefixedFileW(null, path_c) catch return error.InvalidPath; return openExW(path_w.span().ptr, flags); } @@ -466,4 +466,5 @@ test "dynamic_library" { }; try testing.expectError(error.FileNotFound, DynLib.open(libname)); + try testing.expectError(error.FileNotFound, DynLib.openZ(libname.ptr)); } From da7c48324b1caf119fb183303deedc84a62c3106 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 1 Jul 2024 22:09:02 -0700 Subject: [PATCH 011/152] CI: update macOS runner to 12 Apple has already dropped support for macOS 11. GitHub Actions is dropping macOS 11 support now. The Zig project is also dropping macOS 11 support now. --- .github/workflows/ci.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 580a76f598fd..04be33bb6bc9 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -43,7 +43,7 @@ jobs: - name: Build and Test run: sh ci/aarch64-linux-release.sh x86_64-macos-release: - runs-on: "macos-11" + runs-on: "macos-12" env: ARCH: "x86_64" steps: From e4447c54eaae4d416cb3027d9cefad11196f9f6d Mon Sep 17 00:00:00 2001 From: David Rubin Date: Mon, 1 Jul 2024 15:08:18 -0700 Subject: [PATCH 012/152] add `ppc64` reloc definitions --- lib/std/elf.zig | 119 +++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 118 insertions(+), 1 deletion(-) diff --git a/lib/std/elf.zig b/lib/std/elf.zig index bdb84f51719a..2ba0783bee19 100644 --- a/lib/std/elf.zig +++ b/lib/std/elf.zig @@ -1896,7 +1896,7 @@ pub const R_X86_64 = enum(u32) { _, }; -/// AArch64 relocs. +/// AArch64 relocations. pub const R_AARCH64 = enum(u32) { /// No relocation. NONE = 0, @@ -2233,6 +2233,123 @@ pub const R_RISCV = enum(u32) { _, }; +/// PowerPC64 relocations. +pub const R_PPC64 = enum(u32) { + NONE = 0, + ADDR32 = 1, + ADDR24 = 2, + ADDR16 = 3, + ADDR16_LO = 4, + ADDR16_HI = 5, + ADDR16_HA = 6, + ADDR14 = 7, + ADDR14_BRTAKEN = 8, + ADDR14_BRNTAKEN = 9, + REL24 = 10, + REL14 = 11, + REL14_BRTAKEN = 12, + REL14_BRNTAKEN = 13, + GOT16 = 14, + GOT16_LO = 15, + GOT16_HI = 16, + GOT16_HA = 17, + COPY = 19, + GLOB_DAT = 20, + JMP_SLOT = 21, + RELATIVE = 22, + REL32 = 26, + PLT16_LO = 29, + PLT16_HI = 30, + PLT16_HA = 31, + ADDR64 = 38, + ADDR16_HIGHER = 39, + ADDR16_HIGHERA = 40, + ADDR16_HIGHEST = 41, + ADDR16_HIGHESTA = 42, + REL64 = 44, + TOC16 = 47, + TOC16_LO = 48, + TOC16_HI = 49, + TOC16_HA = 50, + TOC = 51, + ADDR16_DS = 56, + ADDR16_LO_DS = 57, + GOT16_DS = 58, + GOT16_LO_DS = 59, + PLT16_LO_DS = 60, + TOC16_DS = 63, + TOC16_LO_DS = 64, + TLS = 67, + DTPMOD64 = 68, + TPREL16 = 69, + TPREL16_LO = 70, + TPREL16_HI = 71, + TPREL16_HA = 72, + TPREL64 = 73, + DTPREL16 = 74, + DTPREL16_LO = 75, + DTPREL16_HI = 76, + DTPREL16_HA = 77, + DTPREL64 = 78, + GOT_TLSGD16 = 79, + GOT_TLSGD16_LO = 80, + GOT_TLSGD16_HI = 81, + GOT_TLSGD16_HA = 82, + GOT_TLSLD16 = 83, + GOT_TLSLD16_LO = 84, + GOT_TLSLD16_HI = 85, + GOT_TLSLD16_HA = 86, + GOT_TPREL16_DS = 87, + GOT_TPREL16_LO_DS = 88, + GOT_TPREL16_HI = 89, + GOT_TPREL16_HA = 90, + GOT_DTPREL16_DS = 91, + GOT_DTPREL16_LO_DS = 92, + GOT_DTPREL16_HI = 93, + GOT_DTPREL16_HA = 94, + TPREL16_DS = 95, + TPREL16_LO_DS = 96, + TPREL16_HIGHER = 97, + TPREL16_HIGHERA = 98, + TPREL16_HIGHEST = 99, + TPREL16_HIGHESTA = 100, + DTPREL16_DS = 101, + DTPREL16_LO_DS = 102, + DTPREL16_HIGHER = 103, + DTPREL16_HIGHERA = 104, + DTPREL16_HIGHEST = 105, + DTPREL16_HIGHESTA = 106, + TLSGD = 107, + TLSLD = 108, + ADDR16_HIGH = 110, + ADDR16_HIGHA = 111, + TPREL16_HIGH = 112, + TPREL16_HIGHA = 113, + DTPREL16_HIGH = 114, + DTPREL16_HIGHA = 115, + REL24_NOTOC = 116, + PLTSEQ = 119, + PLTCALL = 120, + PLTSEQ_NOTOC = 121, + PLTCALL_NOTOC = 122, + PCREL_OPT = 123, + PCREL34 = 132, + GOT_PCREL34 = 133, + PLT_PCREL34 = 134, + PLT_PCREL34_NOTOC = 135, + TPREL34 = 146, + DTPREL34 = 147, + GOT_TLSGD_PCREL34 = 148, + GOT_TLSLD_PCREL34 = 149, + GOT_TPREL_PCREL34 = 150, + IRELATIVE = 248, + REL16 = 249, + REL16_LO = 250, + REL16_HI = 251, + REL16_HA = 252, + _, +}; + pub const STV = enum(u2) { DEFAULT = 0, INTERNAL = 1, From 4b9d327f123df935adcde9dadcb8561f7a84ef95 Mon Sep 17 00:00:00 2001 From: Pavel Verigo Date: Fri, 28 Jun 2024 00:04:18 +0200 Subject: [PATCH 013/152] stage2-wasm: sign extend strategy --- src/arch/wasm/CodeGen.zig | 503 +++++++++++--------------------------- 1 file changed, 148 insertions(+), 355 deletions(-) diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index bfadbf0a794e..c0e7d029609e 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -2325,14 +2325,19 @@ fn airStore(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void WValue{ .imm32 = @as(u32, @truncate(mask)) } else WValue{ .imm64 = mask }; + const wrap_mask_val = if (ptr_info.packed_offset.host_size <= 4) + WValue{ .imm32 = @truncate(~@as(u64, 0) >> @intCast(64 - ty.bitSize(mod))) } + else + WValue{ .imm64 = ~@as(u64, 0) >> @intCast(64 - ty.bitSize(mod)) }; try func.emitWValue(lhs); const loaded = try func.load(lhs, int_elem_ty, 0); const anded = try func.binOp(loaded, mask_val, int_elem_ty, .@"and"); const extended_value = try func.intcast(rhs, ty, int_elem_ty); + const masked_value = try func.binOp(extended_value, wrap_mask_val, int_elem_ty, .@"and"); const shifted_value = if (ptr_info.packed_offset.bit_offset > 0) shifted: { - break :shifted try func.binOp(extended_value, shift_val, int_elem_ty, .shl); - } else extended_value; + break :shifted try func.binOp(masked_value, shift_val, int_elem_ty, .shl); + } else masked_value; const result = try func.binOp(anded, shifted_value, int_elem_ty, .@"or"); // lhs is still on the stack try func.store(.stack, result, int_elem_ty, lhs.offset()); @@ -2515,7 +2520,7 @@ fn load(func: *CodeGen, operand: WValue, ty: Type, offset: u32) InnerError!WValu .valtype1 = typeToValtype(ty, mod), .width = abi_size * 8, .op = .load, - .signedness = .unsigned, + .signedness = if (ty.isSignedInt(mod)) .signed else .unsigned, }); try func.addMemArg( @@ -2800,10 +2805,7 @@ fn airAbs(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { switch (wasm_bits) { 32 => { try func.emitWValue(operand); - if (wasm_bits != int_bits) { - try func.addImm32(wasm_bits - int_bits); - try func.addTag(.i32_shl); - } + try func.addImm32(31); try func.addTag(.i32_shr_s); @@ -2815,15 +2817,10 @@ fn airAbs(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { try func.addTag(.i32_xor); try func.emitWValue(tmp); try func.addTag(.i32_sub); - - _ = try func.wrapOperand(.stack, ty); }, 64 => { try func.emitWValue(operand); - if (wasm_bits != int_bits) { - try func.addImm64(wasm_bits - int_bits); - try func.addTag(.i64_shl); - } + try func.addImm64(63); try func.addTag(.i64_shr_s); @@ -2835,8 +2832,6 @@ fn airAbs(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { try func.addTag(.i64_xor); try func.emitWValue(tmp); try func.addTag(.i64_sub); - - _ = try func.wrapOperand(.stack, ty); }, 128 => { const mask = try func.allocStack(Type.u128); @@ -2844,10 +2839,6 @@ fn airAbs(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { try func.emitWValue(mask); _ = try func.load(operand, Type.u64, 8); - if (int_bits != 128) { - try func.addImm64(128 - int_bits); - try func.addTag(.i64_shl); - } try func.addImm64(63); try func.addTag(.i64_shr_s); @@ -2860,9 +2851,8 @@ fn airAbs(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const a = try func.binOpBigInt(operand, mask, Type.u128, .xor); const b = try func.binOpBigInt(a, mask, Type.u128, .sub); - const result = try func.wrapOperand(b, ty); - func.finishAir(inst, result, &.{ty_op.operand}); + func.finishAir(inst, b, &.{ty_op.operand}); return; }, else => unreachable, @@ -3058,14 +3048,28 @@ fn wrapOperand(func: *CodeGen, operand: WValue, ty: Type) InnerError!WValue { switch (wasm_bits) { 32 => { try func.emitWValue(operand); - try func.addImm32((@as(u32, 1) << @intCast(int_bits)) - 1); - try func.addTag(.i32_and); + if (ty.isSignedInt(mod)) { + try func.addImm32(32 - int_bits); + try func.addTag(.i32_shl); + try func.addImm32(32 - int_bits); + try func.addTag(.i32_shr_s); + } else { + try func.addImm32(~@as(u32, 0) >> @intCast(32 - int_bits)); + try func.addTag(.i32_and); + } return .stack; }, 64 => { try func.emitWValue(operand); - try func.addImm64((@as(u64, 1) << @intCast(int_bits)) - 1); - try func.addTag(.i64_and); + if (ty.isSignedInt(mod)) { + try func.addImm64(64 - int_bits); + try func.addTag(.i64_shl); + try func.addImm64(64 - int_bits); + try func.addTag(.i64_shr_s); + } else { + try func.addImm64(~@as(u64, 0) >> @intCast(64 - int_bits)); + try func.addTag(.i64_and); + } return .stack; }, 128 => { @@ -3078,8 +3082,15 @@ fn wrapOperand(func: *CodeGen, operand: WValue, ty: Type) InnerError!WValue { try func.emitWValue(result); _ = try func.load(operand, Type.u64, 8); - try func.addImm64((@as(u64, 1) << @intCast(int_bits - 64)) - 1); - try func.addTag(.i64_and); + if (ty.isSignedInt(mod)) { + try func.addImm64(128 - int_bits); + try func.addTag(.i64_shl); + try func.addImm64(128 - int_bits); + try func.addTag(.i64_shr_s); + } else { + try func.addImm64(~@as(u64, 0) >> @intCast(128 - int_bits)); + try func.addTag(.i64_and); + } try func.store(.stack, .stack, Type.u64, result.offset() + 8); return result; @@ -3201,22 +3212,6 @@ fn lowerDeclRefValue(func: *CodeGen, decl_index: InternPool.DeclIndex, offset: u } else return WValue{ .memory_offset = .{ .pointer = target_sym_index, .offset = offset } }; } -/// Converts a signed integer to its 2's complement form and returns -/// an unsigned integer instead. -/// Asserts bitsize <= 64 -fn toTwosComplement(value: anytype, bits: u7) std.meta.Int(.unsigned, @typeInfo(@TypeOf(value)).Int.bits) { - const T = @TypeOf(value); - comptime assert(@typeInfo(T) == .Int); - comptime assert(@typeInfo(T).Int.signedness == .signed); - assert(bits <= 64); - const WantedT = std.meta.Int(.unsigned, @typeInfo(T).Int.bits); - if (value >= 0) return @as(WantedT, @bitCast(value)); - const max_value = @as(u64, @intCast((@as(u65, 1) << bits) - 1)); - const flipped = @as(T, @intCast((~-@as(i65, value)) + 1)); - const result = @as(WantedT, @bitCast(flipped)) & max_value; - return @as(WantedT, @intCast(result)); -} - /// Asserts that `isByRef` returns `false` for `ty`. fn lowerConstant(func: *CodeGen, val: Value, ty: Type) InnerError!WValue { const mod = func.bin_file.base.comp.module.?; @@ -3268,18 +3263,12 @@ fn lowerConstant(func: *CodeGen, val: Value, ty: Type) InnerError!WValue { const int_info = ty.intInfo(mod); switch (int_info.signedness) { .signed => switch (int_info.bits) { - 0...32 => return WValue{ .imm32 = @as(u32, @intCast(toTwosComplement( - val.toSignedInt(mod), - @as(u6, @intCast(int_info.bits)), - ))) }, - 33...64 => return WValue{ .imm64 = toTwosComplement( - val.toSignedInt(mod), - @as(u7, @intCast(int_info.bits)), - ) }, + 0...32 => return WValue{ .imm32 = @bitCast(@as(i32, @intCast(val.toSignedInt(mod)))) }, + 33...64 => return WValue{ .imm64 = @bitCast(val.toSignedInt(mod)) }, else => unreachable, }, .unsigned => switch (int_info.bits) { - 0...32 => return WValue{ .imm32 = @as(u32, @intCast(val.toUnsignedInt(mod))) }, + 0...32 => return WValue{ .imm32 = @intCast(val.toUnsignedInt(mod)) }, 33...64 => return WValue{ .imm64 = val.toUnsignedInt(mod) }, else => unreachable, }, @@ -3618,29 +3607,11 @@ fn cmp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: std.math.CompareO // incase of an actual integer, we emit the correct signedness break :blk ty.intInfo(mod).signedness; }; - const extend_sign = blk: { - // do we need to extend the sign bit? - if (signedness != .signed) break :blk false; - if (op == .eq or op == .neq) break :blk false; - const int_bits = ty.intInfo(mod).bits; - const wasm_bits = toWasmBits(int_bits) orelse unreachable; - break :blk (wasm_bits != int_bits); - }; - - const lhs_wasm = if (extend_sign) - try func.signExtendInt(lhs, ty) - else - lhs; - - const rhs_wasm = if (extend_sign) - try func.signExtendInt(rhs, ty) - else - rhs; // ensure that when we compare pointers, we emit // the true pointer of a stack value, rather than the stack pointer. - try func.lowerToStack(lhs_wasm); - try func.lowerToStack(rhs_wasm); + try func.lowerToStack(lhs); + try func.lowerToStack(rhs); const opcode: wasm.Opcode = buildOpcode(.{ .valtype1 = typeToValtype(ty, mod), @@ -3812,25 +3783,44 @@ fn airUnreachable(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airBitcast(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.comp.module.?; const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; + const operand = try func.resolveInst(ty_op.operand); + const wanted_ty = func.typeOfIndex(inst); + const given_ty = func.typeOf(ty_op.operand); + + const bit_size = given_ty.bitSize(mod); + const needs_wrapping = (given_ty.isSignedInt(mod) != wanted_ty.isSignedInt(mod)) and + bit_size != 32 and bit_size != 64 and bit_size != 128; + const result = result: { - const operand = try func.resolveInst(ty_op.operand); - const wanted_ty = func.typeOfIndex(inst); - const given_ty = func.typeOf(ty_op.operand); if (given_ty.isAnyFloat() or wanted_ty.isAnyFloat()) { const bitcast_result = try func.bitcast(wanted_ty, given_ty, operand); break :result try bitcast_result.toLocal(func, wanted_ty); } - const mod = func.bin_file.base.comp.module.?; + if (isByRef(given_ty, mod) and !isByRef(wanted_ty, mod)) { const loaded_memory = try func.load(operand, wanted_ty, 0); - break :result try loaded_memory.toLocal(func, wanted_ty); + if (needs_wrapping) { + break :result try (try func.wrapOperand(loaded_memory, wanted_ty)).toLocal(func, wanted_ty); + } else { + break :result try loaded_memory.toLocal(func, wanted_ty); + } } if (!isByRef(given_ty, mod) and isByRef(wanted_ty, mod)) { const stack_memory = try func.allocStack(wanted_ty); try func.store(stack_memory, operand, given_ty, 0); - break :result stack_memory; + if (needs_wrapping) { + break :result try (try func.wrapOperand(stack_memory, wanted_ty)).toLocal(func, wanted_ty); + } else { + break :result stack_memory; + } } + + if (needs_wrapping) { + break :result try (try func.wrapOperand(operand, wanted_ty)).toLocal(func, wanted_ty); + } + break :result func.reuseOperand(ty_op.operand, operand); }; func.finishAir(inst, result, &.{ty_op.operand}); @@ -4355,7 +4345,7 @@ fn airIntcast(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const op_bits = toWasmBits(@as(u16, @intCast(operand_ty.bitSize(mod)))).?; const wanted_bits = toWasmBits(@as(u16, @intCast(ty.bitSize(mod)))).?; - const result = if (op_bits == wanted_bits and !ty.isSignedInt(mod)) + const result = if (op_bits == wanted_bits) func.reuseOperand(ty_op.operand, operand) else try (try func.intcast(operand, operand_ty, ty)).toLocal(func, ty); @@ -4377,37 +4367,17 @@ fn intcast(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerErro const op_bits = toWasmBits(given_bitsize).?; const wanted_bits = toWasmBits(wanted_bitsize).?; if (op_bits == wanted_bits) { - if (given.isSignedInt(mod)) { - if (given_bitsize < wanted_bitsize) { - // signed integers are stored as two's complement, - // when we upcast from a smaller integer to larger - // integers, we must get its absolute value similar to - // i64_extend_i32_s instruction. - return func.signExtendInt(operand, given); - } - return func.wrapOperand(operand, wanted); - } return operand; } - if (op_bits > 32 and op_bits <= 64 and wanted_bits == 32) { + if (op_bits == 64 and wanted_bits == 32) { try func.emitWValue(operand); try func.addTag(.i32_wrap_i64); - if (given.isSignedInt(mod) and wanted_bitsize < 32) - return func.wrapOperand(.{ .stack = {} }, wanted) - else - return WValue{ .stack = {} }; - } else if (op_bits == 32 and wanted_bits > 32 and wanted_bits <= 64) { - const operand32 = if (given_bitsize < 32 and wanted.isSignedInt(mod)) - try func.signExtendInt(operand, given) - else - operand; - try func.emitWValue(operand32); + return .stack; + } else if (op_bits == 32 and wanted_bits == 64) { + try func.emitWValue(operand); try func.addTag(if (wanted.isSignedInt(mod)) .i64_extend_i32_s else .i64_extend_i32_u); - if (given.isSignedInt(mod) and wanted_bitsize < 64) - return func.wrapOperand(.{ .stack = {} }, wanted) - else - return WValue{ .stack = {} }; + return .stack; } else if (wanted_bits == 128) { // for 128bit integers we store the integer in the virtual stack, rather than a local const stack_ptr = try func.allocStack(wanted); @@ -4416,17 +4386,18 @@ fn intcast(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerErro // for 32 bit integers, we first coerce the value into a 64 bit integer before storing it // meaning less store operations are required. const lhs = if (op_bits == 32) blk: { - break :blk try func.intcast(operand, given, if (wanted.isSignedInt(mod)) Type.i64 else Type.u64); + const sign_ty = if (wanted.isSignedInt(mod)) Type.i64 else Type.u64; + break :blk try (try func.intcast(operand, given, sign_ty)).toLocal(func, sign_ty); } else operand; // store msb first - try func.store(.{ .stack = {} }, lhs, Type.u64, 0 + stack_ptr.offset()); + try func.store(.stack, lhs, Type.u64, 0 + stack_ptr.offset()); // For signed integers we shift msb by 63 (64bit integer - 1 sign bit) and store remaining value if (wanted.isSignedInt(mod)) { try func.emitWValue(stack_ptr); const shr = try func.binOp(lhs, .{ .imm64 = 63 }, Type.i64, .shr); - try func.store(.{ .stack = {} }, shr, Type.u64, 8 + stack_ptr.offset()); + try func.store(.stack, shr, Type.u64, 8 + stack_ptr.offset()); } else { // Ensure memory of lsb is zero'd try func.store(stack_ptr, .{ .imm64 = 0 }, Type.u64, 8); @@ -5823,25 +5794,34 @@ fn airPopcount(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { }; switch (wasm_bits) { + 32 => { + try func.emitWValue(operand); + if (op_ty.isSignedInt(mod) and bits != wasm_bits) { + _ = try func.wrapOperand(.stack, try mod.intType(.unsigned, bits)); + } + try func.addTag(.i32_popcnt); + }, + 64 => { + try func.emitWValue(operand); + if (op_ty.isSignedInt(mod) and bits != wasm_bits) { + _ = try func.wrapOperand(.stack, try mod.intType(.unsigned, bits)); + } + try func.addTag(.i64_popcnt); + try func.addTag(.i32_wrap_i64); + try func.emitWValue(operand); + }, 128 => { _ = try func.load(operand, Type.u64, 0); try func.addTag(.i64_popcnt); _ = try func.load(operand, Type.u64, 8); + if (op_ty.isSignedInt(mod) and bits != wasm_bits) { + _ = try func.wrapOperand(.stack, try mod.intType(.unsigned, bits - 64)); + } try func.addTag(.i64_popcnt); try func.addTag(.i64_add); try func.addTag(.i32_wrap_i64); }, - else => { - try func.emitWValue(operand); - switch (wasm_bits) { - 32 => try func.addTag(.i32_popcnt), - 64 => { - try func.addTag(.i64_popcnt); - try func.addTag(.i32_wrap_i64); - }, - else => unreachable, - } - }, + else => unreachable, } const result = try func.allocLocal(result_ty); @@ -5877,7 +5857,7 @@ fn airBitReverse(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const reversed = if (bits == 32) intrin_ret else - try func.binOp(intrin_ret, .{ .imm32 = 32 - bits }, Type.u32, .shr); + try func.binOp(intrin_ret, .{ .imm32 = 32 - bits }, ty, .shr); const result = try reversed.toLocal(func, ty); func.finishAir(inst, result, &.{ty_op.operand}); }, @@ -5891,7 +5871,7 @@ fn airBitReverse(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const reversed = if (bits == 64) intrin_ret else - try func.binOp(intrin_ret, .{ .imm64 = 64 - bits }, Type.u64, .shr); + try func.binOp(intrin_ret, .{ .imm64 = 64 - bits }, ty, .shr); const result = try reversed.toLocal(func, ty); func.finishAir(inst, result, &.{ty_op.operand}); }, @@ -5928,7 +5908,11 @@ fn airBitReverse(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { defer tmp.free(func); try func.addLabel(.local_tee, tmp.local.value); try func.emitWValue(.{ .imm64 = 128 - bits }); - try func.addTag(.i64_shr_u); + if (ty.isSignedInt(mod)) { + try func.addTag(.i64_shr_s); + } else { + try func.addTag(.i64_shr_u); + } try func.store(.stack, .stack, Type.u64, result.offset() + 8); try func.addLabel(.local_get, tmp.local.value); try func.emitWValue(.{ .imm64 = bits - 64 }); @@ -5996,8 +5980,8 @@ fn airAddSubWithOverflow(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerErro const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = func.air.extraData(Air.Bin, ty_pl.payload).data; - const lhs_op = try func.resolveInst(extra.lhs); - const rhs_op = try func.resolveInst(extra.rhs); + const lhs = try func.resolveInst(extra.lhs); + const rhs = try func.resolveInst(extra.rhs); const lhs_ty = func.typeOf(extra.lhs); const mod = func.bin_file.base.comp.module.?; @@ -6012,7 +5996,7 @@ fn airAddSubWithOverflow(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerErro }; if (wasm_bits == 128) { - const result = try func.addSubWithOverflowBigInt(lhs_op, rhs_op, lhs_ty, func.typeOfIndex(inst), op); + const result = try func.addSubWithOverflowBigInt(lhs, rhs, lhs_ty, func.typeOfIndex(inst), op); return func.finishAir(inst, result, &.{ extra.lhs, extra.rhs }); } @@ -6022,24 +6006,6 @@ fn airAddSubWithOverflow(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerErro else => unreachable, }; - // for signed integers, we first apply signed shifts by the difference in bits - // to get the signed value, as we store it internally as 2's complement. - var lhs = if (wasm_bits != int_info.bits and is_signed) blk: { - break :blk try (try func.signExtendInt(lhs_op, lhs_ty)).toLocal(func, lhs_ty); - } else lhs_op; - var rhs = if (wasm_bits != int_info.bits and is_signed) blk: { - break :blk try (try func.signExtendInt(rhs_op, lhs_ty)).toLocal(func, lhs_ty); - } else rhs_op; - - // in this case, we performed a signExtendInt which created a temporary local - // so let's free this so it can be re-used instead. - // In the other case we do not want to free it, because that would free the - // resolved instructions which may be referenced by other instructions. - defer if (wasm_bits != int_info.bits and is_signed) { - lhs.free(func); - rhs.free(func); - }; - const bin_op = try (try func.binOp(lhs, rhs, lhs_ty, op)).toLocal(func, lhs_ty); var result = if (wasm_bits != int_info.bits) blk: { break :blk try (try func.wrapOperand(bin_op, lhs_ty)).toLocal(func, lhs_ty); @@ -6053,8 +6019,7 @@ fn airAddSubWithOverflow(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerErro const lt = try func.cmp(bin_op, lhs, lhs_ty, .lt); break :blk try func.binOp(cmp_zero, lt, Type.u32, .xor); } - const abs = try func.signExtendInt(bin_op, lhs_ty); - break :blk try func.cmp(abs, bin_op, lhs_ty, .neq); + break :blk try func.cmp(bin_op, bin_op, lhs_ty, .neq); } else if (wasm_bits == int_info.bits) try func.cmp(bin_op, lhs, lhs_ty, cmp_op) else @@ -6150,7 +6115,6 @@ fn airShlWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } const int_info = lhs_ty.intInfo(mod); - const is_signed = int_info.signedness == .signed; const wasm_bits = toWasmBits(int_info.bits) orelse { return func.fail("TODO: Implement shl_with_overflow for integer bitsize: {d}", .{int_info.bits}); }; @@ -6170,13 +6134,7 @@ fn airShlWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } else shl; defer result.free(func); // it's a no-op to free the same local twice (when wasm_bits == int_info.bits) - const overflow_bit = if (wasm_bits != int_info.bits and is_signed) blk: { - // emit lhs to stack to we can keep 'wrapped' on the stack also - try func.emitWValue(lhs); - const abs = try func.signExtendInt(shl, lhs_ty); - const wrapped = try func.wrapBinOp(abs, rhs_final, lhs_ty, .shr); - break :blk try func.cmp(.{ .stack = {} }, wrapped, lhs_ty, .neq); - } else blk: { + const overflow_bit = blk: { try func.emitWValue(lhs); const shr = try func.binOp(result, rhs_final, lhs_ty, .shr); break :blk try func.cmp(.{ .stack = {} }, shr, lhs_ty, .neq); @@ -6245,10 +6203,8 @@ fn airMulWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { break :blk down_cast; } } else if (int_info.signedness == .signed and wasm_bits == 32) blk: { - const lhs_abs = try func.signExtendInt(lhs, lhs_ty); - const rhs_abs = try func.signExtendInt(rhs, lhs_ty); - const bin_op = try (try func.binOp(lhs_abs, rhs_abs, lhs_ty, .mul)).toLocal(func, lhs_ty); - const mul_abs = try func.signExtendInt(bin_op, lhs_ty); + const bin_op = try (try func.binOp(lhs, rhs, lhs_ty, .mul)).toLocal(func, lhs_ty); + const mul_abs = try func.wrapOperand(bin_op, lhs_ty); _ = try func.cmp(mul_abs, bin_op, lhs_ty, .neq); try func.addLabel(.local_set, overflow_bit.local.value); break :blk try func.wrapOperand(bin_op, lhs_ty); @@ -6697,6 +6653,9 @@ fn airByteSwap(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { return func.fail("TODO: @byteSwap for vectors", .{}); } const int_info = ty.intInfo(mod); + const wasm_bits = toWasmBits(int_info.bits) orelse { + return func.fail("TODO: @byteSwap for integers with bitsize {d}", .{int_info.bits}); + }; // bytes are no-op if (int_info.bits == 8) { @@ -6704,73 +6663,34 @@ fn airByteSwap(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } const result = result: { - switch (int_info.bits) { - 16 => { - const shl_res = try func.binOp(operand, .{ .imm32 = 8 }, ty, .shl); - const lhs = try func.binOp(shl_res, .{ .imm32 = 0xFF00 }, ty, .@"and"); - const shr_res = try func.binOp(operand, .{ .imm32 = 8 }, ty, .shr); - const res = if (int_info.signedness == .signed) blk: { - break :blk try func.wrapOperand(shr_res, Type.u8); - } else shr_res; - break :result try (try func.binOp(lhs, res, ty, .@"or")).toLocal(func, ty); - }, - 24 => { - var msb = try (try func.wrapOperand(operand, Type.u16)).toLocal(func, Type.u16); - defer msb.free(func); - - const shl_res = try func.binOp(msb, .{ .imm32 = 8 }, Type.u16, .shl); - const lhs = try func.binOp(shl_res, .{ .imm32 = 0xFF0000 }, Type.u16, .@"and"); - const shr_res = try func.binOp(msb, .{ .imm32 = 8 }, ty, .shr); - - const res = if (int_info.signedness == .signed) blk: { - break :blk try func.wrapOperand(shr_res, Type.u8); - } else shr_res; - const lhs_tmp = try func.binOp(lhs, res, ty, .@"or"); - const lhs_result = try func.binOp(lhs_tmp, .{ .imm32 = 8 }, ty, .shr); - const rhs_wrap = try func.wrapOperand(msb, Type.u8); - const rhs_result = try func.binOp(rhs_wrap, .{ .imm32 = 16 }, ty, .shl); - - const lsb = try func.wrapBinOp(operand, .{ .imm32 = 16 }, Type.u8, .shr); - const tmp = try func.binOp(lhs_result, rhs_result, ty, .@"or"); - break :result try (try func.binOp(tmp, lsb, ty, .@"or")).toLocal(func, ty); - }, + switch (wasm_bits) { 32 => { - const shl_tmp = try func.binOp(operand, .{ .imm32 = 8 }, Type.u32, .shl); - const lhs = try func.binOp(shl_tmp, .{ .imm32 = 0xFF00FF00 }, Type.u32, .@"and"); - const shr_tmp = try func.binOp(operand, .{ .imm32 = 8 }, Type.u32, .shr); - const rhs = try func.binOp(shr_tmp, .{ .imm32 = 0x00FF00FF }, Type.u32, .@"and"); - var tmp_or = try (try func.binOp(lhs, rhs, Type.u32, .@"or")).toLocal(func, Type.u32); - - const shl = try func.binOp(tmp_or, .{ .imm32 = 16 }, Type.u32, .shl); - const shr = try func.binOp(tmp_or, .{ .imm32 = 16 }, Type.u32, .shr); - - tmp_or.free(func); + const intrin_ret = try func.callIntrinsic( + "__bswapsi2", + &.{.u32_type}, + Type.u32, + &.{operand}, + ); + const swapped = if (int_info.bits == 32) + intrin_ret + else + try func.binOp(intrin_ret, .{ .imm32 = 32 - int_info.bits }, ty, .shr); - break :result try (try func.binOp(shl, shr, Type.u32, .@"or")).toLocal(func, Type.u32); + break :result try swapped.toLocal(func, ty); }, 64 => { - const shl_tmp_1 = try func.binOp(operand, .{ .imm64 = 8 }, Type.u64, .shl); - const lhs_1 = try func.binOp(shl_tmp_1, .{ .imm64 = 0xFF00FF00FF00FF00 }, Type.u64, .@"and"); - - const shr_tmp_1 = try func.binOp(operand, .{ .imm64 = 8 }, Type.u64, .shr); - const rhs_1 = try func.binOp(shr_tmp_1, .{ .imm64 = 0x00FF00FF00FF00FF }, Type.u64, .@"and"); - - var tmp_or_1 = try (try func.binOp(lhs_1, rhs_1, Type.u64, .@"or")).toLocal(func, Type.u64); - - const shl_tmp_2 = try func.binOp(tmp_or_1, .{ .imm64 = 16 }, Type.u64, .shl); - const lhs_2 = try func.binOp(shl_tmp_2, .{ .imm64 = 0xFFFF0000FFFF0000 }, Type.u64, .@"and"); - - const shr_tmp_2 = try func.binOp(tmp_or_1, .{ .imm64 = 16 }, Type.u64, .shr); - tmp_or_1.free(func); - const rhs_2 = try func.binOp(shr_tmp_2, .{ .imm64 = 0x0000FFFF0000FFFF }, Type.u64, .@"and"); - - var tmp_or_2 = try (try func.binOp(lhs_2, rhs_2, Type.u64, .@"or")).toLocal(func, Type.u64); - - const shl = try func.binOp(tmp_or_2, .{ .imm64 = 32 }, Type.u64, .shl); - const shr = try func.binOp(tmp_or_2, .{ .imm64 = 32 }, Type.u64, .shr); - tmp_or_2.free(func); + const intrin_ret = try func.callIntrinsic( + "__bswapdi2", + &.{.u64_type}, + Type.u64, + &.{operand}, + ); + const swapped = if (int_info.bits == 64) + intrin_ret + else + try func.binOp(intrin_ret, .{ .imm64 = 64 - int_info.bits }, ty, .shr); - break :result try (try func.binOp(shl, shr, Type.u64, .@"or")).toLocal(func, Type.u64); + break :result try swapped.toLocal(func, ty); }, else => return func.fail("TODO: @byteSwap for integers with bitsize {d}", .{int_info.bits}), } @@ -6779,32 +6699,24 @@ fn airByteSwap(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airDiv(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const mod = func.bin_file.base.comp.module.?; const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const ty = func.typeOfIndex(inst); const lhs = try func.resolveInst(bin_op.lhs); const rhs = try func.resolveInst(bin_op.rhs); - const result = if (ty.isSignedInt(mod)) - try func.divSigned(lhs, rhs, ty) - else - try (try func.binOp(lhs, rhs, ty, .div)).toLocal(func, ty); + const result = try (try func.binOp(lhs, rhs, ty, .div)).toLocal(func, ty); func.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs }); } fn airDivTrunc(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const mod = func.bin_file.base.comp.module.?; const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const ty = func.typeOfIndex(inst); const lhs = try func.resolveInst(bin_op.lhs); const rhs = try func.resolveInst(bin_op.rhs); - const div_result = if (ty.isSignedInt(mod)) - try func.divSigned(lhs, rhs, ty) - else - try (try func.binOp(lhs, rhs, ty, .div)).toLocal(func, ty); + const div_result = try (try func.binOp(lhs, rhs, ty, .div)).toLocal(func, ty); if (ty.isAnyFloat()) { const trunc_result = try (try func.floatOp(.trunc, ty, &.{div_result})).toLocal(func, ty); @@ -6834,16 +6746,6 @@ fn airDivFloor(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { return func.fail("TODO: `@divFloor` for signed integers larger than 64 bits ({d} bits requested)", .{int_bits}); } - const lhs_wasm = if (wasm_bits != int_bits) - try (try func.signExtendInt(lhs, ty)).toLocal(func, ty) - else - lhs; - - const rhs_wasm = if (wasm_bits != int_bits) - try (try func.signExtendInt(rhs, ty)).toLocal(func, ty) - else - rhs; - const zero = switch (wasm_bits) { 32 => WValue{ .imm32 = 0 }, 64 => WValue{ .imm64 = 0 }, @@ -6852,7 +6754,7 @@ fn airDivFloor(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { // tee leaves the value on the stack and stores it in a local. const quotient = try func.allocLocal(ty); - _ = try func.binOp(lhs_wasm, rhs_wasm, ty, .div); + _ = try func.binOp(lhs, rhs, ty, .div); try func.addLabel(.local_tee, quotient.local.value); // select takes a 32 bit value as the condition, so in the 64 bit case we use eqz to narrow @@ -6864,7 +6766,7 @@ fn airDivFloor(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } // 0 if the signs of rhs_wasm and lhs_wasm are the same, 1 otherwise. - _ = try func.binOp(lhs_wasm, rhs_wasm, ty, .xor); + _ = try func.binOp(lhs, rhs, ty, .xor); _ = try func.cmp(.stack, zero, ty, .lt); switch (wasm_bits) { @@ -6879,7 +6781,7 @@ fn airDivFloor(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { else => unreachable, } - _ = try func.binOp(lhs_wasm, rhs_wasm, ty, .rem); + _ = try func.binOp(lhs, rhs, ty, .rem); if (wasm_bits == 64) { try func.addTag(.i64_eqz); @@ -6929,68 +6831,14 @@ fn airDivFloor(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { func.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs }); } -fn divSigned(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type) InnerError!WValue { - const mod = func.bin_file.base.comp.module.?; - const int_bits = ty.intInfo(mod).bits; - const wasm_bits = toWasmBits(int_bits) orelse { - return func.fail("TODO: Implement signed division for integers with bitsize '{d}'", .{int_bits}); - }; - - if (wasm_bits == 128) { - return func.fail("TODO: Implement signed division for 128-bit integerrs", .{}); - } - - if (wasm_bits != int_bits) { - // Leave both values on the stack - _ = try func.signExtendInt(lhs, ty); - _ = try func.signExtendInt(rhs, ty); - } else { - try func.emitWValue(lhs); - try func.emitWValue(rhs); - } - switch (wasm_bits) { - 32 => try func.addTag(.i32_div_s), - 64 => try func.addTag(.i64_div_s), - else => unreachable, - } - _ = try func.wrapOperand(.stack, ty); - - const result = try func.allocLocal(ty); - try func.addLabel(.local_set, result.local.value); - return result; -} - fn airRem(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const mod = func.bin_file.base.comp.module.?; const ty = func.typeOfIndex(inst); const lhs = try func.resolveInst(bin_op.lhs); const rhs = try func.resolveInst(bin_op.rhs); - const result = if (ty.isSignedInt(mod)) result: { - const int_bits = ty.intInfo(mod).bits; - const wasm_bits = toWasmBits(int_bits) orelse { - return func.fail("TODO: `@rem` for signed integers larger than 128 bits ({d} bits requested)", .{int_bits}); - }; - - if (wasm_bits > 64) { - return func.fail("TODO: `@rem` for signed integers larger than 64 bits ({d} bits requested)", .{int_bits}); - } - - const lhs_wasm = if (wasm_bits != int_bits) - try (try func.signExtendInt(lhs, ty)).toLocal(func, ty) - else - lhs; - - const rhs_wasm = if (wasm_bits != int_bits) - try (try func.signExtendInt(rhs, ty)).toLocal(func, ty) - else - rhs; - - _ = try func.binOp(lhs_wasm, rhs_wasm, ty, .rem); - break :result try func.wrapOperand(.stack, ty); - } else try func.binOp(lhs, rhs, ty, .rem); + const result = try func.binOp(lhs, rhs, ty, .rem); const return_local = try result.toLocal(func, ty); func.finishAir(inst, return_local, &.{ bin_op.lhs, bin_op.rhs }); @@ -7022,19 +6870,9 @@ fn airMod(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { return func.fail("TODO: `@mod` for signed integers larger than 64 bits ({d} bits requested)", .{int_bits}); } - const lhs_wasm = if (wasm_bits != int_bits) - try (try func.signExtendInt(lhs, ty)).toLocal(func, ty) - else - lhs; - - const rhs_wasm = if (wasm_bits != int_bits) - try (try func.signExtendInt(rhs, ty)).toLocal(func, ty) - else - rhs; - - _ = try func.binOp(lhs_wasm, rhs_wasm, ty, .rem); - _ = try func.binOp(.stack, rhs_wasm, ty, .add); - _ = try func.binOp(.stack, rhs_wasm, ty, .rem); + _ = try func.binOp(lhs, rhs, ty, .rem); + _ = try func.binOp(.stack, rhs, ty, .add); + _ = try func.binOp(.stack, rhs, ty, .rem); } else { return func.fail("TODO: implement `@mod` on floating point types for {}", .{func.target.cpu.arch}); } @@ -7044,42 +6882,6 @@ fn airMod(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { func.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs }); } -/// Sign extends an N bit signed integer and pushes the result to the stack. -/// The result will be sign extended to 32 bits if N <= 32 or 64 bits if N <= 64. -/// Support for integers wider than 64 bits has not yet been implemented. -fn signExtendInt(func: *CodeGen, operand: WValue, ty: Type) InnerError!WValue { - const mod = func.bin_file.base.comp.module.?; - const int_bits = ty.intInfo(mod).bits; - const wasm_bits = toWasmBits(int_bits) orelse { - return func.fail("TODO: signExtendInt for signed integers larger than '{d}' bits", .{int_bits}); - }; - - const shift_val = switch (wasm_bits) { - 32 => WValue{ .imm32 = wasm_bits - int_bits }, - 64 => WValue{ .imm64 = wasm_bits - int_bits }, - else => return func.fail("TODO: signExtendInt for i128", .{}), - }; - - try func.emitWValue(operand); - switch (wasm_bits) { - 32 => { - try func.emitWValue(shift_val); - try func.addTag(.i32_shl); - try func.emitWValue(shift_val); - try func.addTag(.i32_shr_s); - }, - 64 => { - try func.emitWValue(shift_val); - try func.addTag(.i64_shl); - try func.emitWValue(shift_val); - try func.addTag(.i64_shr_s); - }, - else => unreachable, - } - - return WValue{ .stack = {} }; -} - fn airSatBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void { assert(op == .add or op == .sub); const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; @@ -7131,20 +6933,13 @@ fn airSatBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void { return func.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs }); } -fn signedSat(func: *CodeGen, lhs_operand: WValue, rhs_operand: WValue, ty: Type, op: Op) InnerError!WValue { +fn signedSat(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError!WValue { const mod = func.bin_file.base.comp.module.?; const int_info = ty.intInfo(mod); const wasm_bits = toWasmBits(int_info.bits).?; const is_wasm_bits = wasm_bits == int_info.bits; const ext_ty = if (!is_wasm_bits) try mod.intType(int_info.signedness, wasm_bits) else ty; - var lhs = if (!is_wasm_bits) lhs: { - break :lhs try (try func.signExtendInt(lhs_operand, ty)).toLocal(func, ext_ty); - } else lhs_operand; - var rhs = if (!is_wasm_bits) rhs: { - break :rhs try (try func.signExtendInt(rhs_operand, ty)).toLocal(func, ext_ty); - } else rhs_operand; - const max_val: u64 = @as(u64, @intCast((@as(u65, 1) << @as(u7, @intCast(int_info.bits - 1))) - 1)); const min_val: i64 = (-@as(i64, @intCast(@as(u63, @intCast(max_val))))) - 1; const max_wvalue = switch (wasm_bits) { @@ -7161,8 +6956,6 @@ fn signedSat(func: *CodeGen, lhs_operand: WValue, rhs_operand: WValue, ty: Type, var bin_result = try (try func.binOp(lhs, rhs, ext_ty, op)).toLocal(func, ext_ty); if (!is_wasm_bits) { defer bin_result.free(func); // not returned in this branch - defer lhs.free(func); // uses temporary local for absvalue - defer rhs.free(func); // uses temporary local for absvalue try func.emitWValue(bin_result); try func.emitWValue(max_wvalue); _ = try func.cmp(bin_result, max_wvalue, ext_ty, .lt); From 5a9495002fb8de2b70462c40dd27938c2fef6271 Mon Sep 17 00:00:00 2001 From: Pavel Verigo Date: Fri, 28 Jun 2024 00:13:11 +0200 Subject: [PATCH 014/152] stage2-wasm: Zcu renaming --- src/arch/wasm/CodeGen.zig | 32 +++++++++++++++----------------- src/arch/wasm/Emit.zig | 6 ++---- src/arch/wasm/abi.zig | 6 ++---- 3 files changed, 19 insertions(+), 25 deletions(-) diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index c0e7d029609e..9effa759675c 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -11,10 +11,8 @@ const log = std.log.scoped(.codegen); const codegen = @import("../../codegen.zig"); const Zcu = @import("../../Zcu.zig"); -/// Deprecated. -const Module = Zcu; const InternPool = @import("../../InternPool.zig"); -const Decl = Module.Decl; +const Decl = Zcu.Decl; const Type = @import("../../type.zig").Type; const Value = @import("../../Value.zig"); const Compilation = @import("../../Compilation.zig"); @@ -674,7 +672,7 @@ local_index: u32 = 0, /// Used to track which argument is being referenced in `airArg`. arg_index: u32 = 0, /// If codegen fails, an error messages will be allocated and saved in `err_msg` -err_msg: *Module.ErrorMsg, +err_msg: *Zcu.ErrorMsg, /// List of all locals' types generated throughout this declaration /// used to emit locals count at start of 'code' section. locals: std.ArrayListUnmanaged(u8), @@ -768,7 +766,7 @@ pub fn deinit(func: *CodeGen) void { fn fail(func: *CodeGen, comptime fmt: []const u8, args: anytype) InnerError { const mod = func.bin_file.base.comp.module.?; const src_loc = func.decl.navSrcLoc(mod).upgrade(mod); - func.err_msg = try Module.ErrorMsg.create(func.gpa, src_loc, fmt, args); + func.err_msg = try Zcu.ErrorMsg.create(func.gpa, src_loc, fmt, args); return error.CodegenFail; } @@ -992,7 +990,7 @@ fn addExtraAssumeCapacity(func: *CodeGen, extra: anytype) error{OutOfMemory}!u32 } /// Using a given `Type`, returns the corresponding type -fn typeToValtype(ty: Type, mod: *Module) wasm.Valtype { +fn typeToValtype(ty: Type, mod: *Zcu) wasm.Valtype { const target = mod.getTarget(); const ip = &mod.intern_pool; return switch (ty.zigTypeTag(mod)) { @@ -1032,14 +1030,14 @@ fn typeToValtype(ty: Type, mod: *Module) wasm.Valtype { } /// Using a given `Type`, returns the byte representation of its wasm value type -fn genValtype(ty: Type, mod: *Module) u8 { +fn genValtype(ty: Type, mod: *Zcu) u8 { return wasm.valtype(typeToValtype(ty, mod)); } /// Using a given `Type`, returns the corresponding wasm value type /// Differently from `genValtype` this also allows `void` to create a block /// with no return type -fn genBlockType(ty: Type, mod: *Module) u8 { +fn genBlockType(ty: Type, mod: *Zcu) u8 { return switch (ty.ip_index) { .void_type, .noreturn_type => wasm.block_empty, else => genValtype(ty, mod), @@ -1149,7 +1147,7 @@ fn genFunctype( cc: std.builtin.CallingConvention, params: []const InternPool.Index, return_type: Type, - mod: *Module, + mod: *Zcu, ) !wasm.Type { var temp_params = std.ArrayList(wasm.Valtype).init(gpa); defer temp_params.deinit(); @@ -1204,7 +1202,7 @@ fn genFunctype( pub fn generate( bin_file: *link.File, - src_loc: Module.SrcLoc, + src_loc: Zcu.SrcLoc, func_index: InternPool.Index, air: Air, liveness: Liveness, @@ -1405,7 +1403,7 @@ fn resolveCallingConventionValues(func: *CodeGen, fn_ty: Type) InnerError!CallWV return result; } -fn firstParamSRet(cc: std.builtin.CallingConvention, return_type: Type, mod: *Module) bool { +fn firstParamSRet(cc: std.builtin.CallingConvention, return_type: Type, mod: *Zcu) bool { switch (cc) { .Unspecified, .Inline => return isByRef(return_type, mod), .C => { @@ -1713,7 +1711,7 @@ fn arch(func: *const CodeGen) std.Target.Cpu.Arch { /// For a given `Type`, will return true when the type will be passed /// by reference, rather than by value -fn isByRef(ty: Type, mod: *Module) bool { +fn isByRef(ty: Type, mod: *Zcu) bool { const ip = &mod.intern_pool; const target = mod.getTarget(); switch (ty.zigTypeTag(mod)) { @@ -1785,7 +1783,7 @@ const SimdStoreStrategy = enum { /// This means when a given type is 128 bits and either the simd128 or relaxed-simd /// features are enabled, the function will return `.direct`. This would allow to store /// it using a instruction, rather than an unrolled version. -fn determineSimdStoreStrategy(ty: Type, mod: *Module) SimdStoreStrategy { +fn determineSimdStoreStrategy(ty: Type, mod: *Zcu) SimdStoreStrategy { std.debug.assert(ty.zigTypeTag(mod) == .Vector); if (ty.bitSize(mod) != 128) return .unrolled; const hasFeature = std.Target.wasm.featureSetHas; @@ -3436,7 +3434,7 @@ fn valueAsI32(func: *const CodeGen, val: Value, ty: Type) i32 { assert(ptr.base_addr == .int); return @intCast(ptr.byte_offset); }, - .err => |err| @as(i32, @bitCast(@as(Module.ErrorInt, @intCast(mod.global_error_set.getIndex(err.name).?)))), + .err => |err| @as(i32, @bitCast(@as(Zcu.ErrorInt, @intCast(mod.global_error_set.getIndex(err.name).?)))), else => unreachable, }, } @@ -3447,11 +3445,11 @@ fn valueAsI32(func: *const CodeGen, val: Value, ty: Type) i32 { }; } -fn intIndexAsI32(ip: *const InternPool, int: InternPool.Index, mod: *Module) i32 { +fn intIndexAsI32(ip: *const InternPool, int: InternPool.Index, mod: *Zcu) i32 { return intStorageAsI32(ip.indexToKey(int).int.storage, mod); } -fn intStorageAsI32(storage: InternPool.Key.Int.Storage, mod: *Module) i32 { +fn intStorageAsI32(storage: InternPool.Key.Int.Storage, mod: *Zcu) i32 { return switch (storage) { .i64 => |x| @as(i32, @intCast(x)), .u64 => |x| @as(i32, @bitCast(@as(u32, @intCast(x)))), @@ -7340,7 +7338,7 @@ fn airErrorSetHasValue(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { var lowest: ?u32 = null; var highest: ?u32 = null; for (0..names.len) |name_index| { - const err_int: Module.ErrorInt = @intCast(mod.global_error_set.getIndex(names.get(ip)[name_index]).?); + const err_int: Zcu.ErrorInt = @intCast(mod.global_error_set.getIndex(names.get(ip)[name_index]).?); if (lowest) |*l| { if (err_int < l.*) { l.* = err_int; diff --git a/src/arch/wasm/Emit.zig b/src/arch/wasm/Emit.zig index 716e6e3b94f9..c41ea9ec5565 100644 --- a/src/arch/wasm/Emit.zig +++ b/src/arch/wasm/Emit.zig @@ -6,8 +6,6 @@ const std = @import("std"); const Mir = @import("Mir.zig"); const link = @import("../../link.zig"); const Zcu = @import("../../Zcu.zig"); -/// Deprecated. -const Module = Zcu; const InternPool = @import("../../InternPool.zig"); const codegen = @import("../../codegen.zig"); const leb128 = std.leb; @@ -18,7 +16,7 @@ mir: Mir, bin_file: *link.File.Wasm, /// Possible error message. When set, the value is allocated and /// must be freed manually. -error_msg: ?*Module.ErrorMsg = null, +error_msg: ?*Zcu.ErrorMsg = null, /// The binary representation that will be emit by this module. code: *std.ArrayList(u8), /// List of allocated locals. @@ -259,7 +257,7 @@ fn fail(emit: *Emit, comptime format: []const u8, args: anytype) InnerError { const comp = emit.bin_file.base.comp; const zcu = comp.module.?; const gpa = comp.gpa; - emit.error_msg = try Module.ErrorMsg.create(gpa, zcu.declPtr(emit.decl_index).navSrcLoc(zcu).upgrade(zcu), format, args); + emit.error_msg = try Zcu.ErrorMsg.create(gpa, zcu.declPtr(emit.decl_index).navSrcLoc(zcu).upgrade(zcu), format, args); return error.EmitFail; } diff --git a/src/arch/wasm/abi.zig b/src/arch/wasm/abi.zig index c4d49d51b5ac..03c68daa8533 100644 --- a/src/arch/wasm/abi.zig +++ b/src/arch/wasm/abi.zig @@ -10,8 +10,6 @@ const assert = std.debug.assert; const Type = @import("../../type.zig").Type; const Zcu = @import("../../Zcu.zig"); -/// Deprecated. -const Module = Zcu; /// Defines how to pass a type as part of a function signature, /// both for parameters as well as return values. @@ -24,7 +22,7 @@ const direct: [2]Class = .{ .direct, .none }; /// Classifies a given Zig type to determine how they must be passed /// or returned as value within a wasm function. /// When all elements result in `.none`, no value must be passed in or returned. -pub fn classifyType(ty: Type, mod: *Module) [2]Class { +pub fn classifyType(ty: Type, mod: *Zcu) [2]Class { const ip = &mod.intern_pool; const target = mod.getTarget(); if (!ty.hasRuntimeBitsIgnoreComptime(mod)) return none; @@ -102,7 +100,7 @@ pub fn classifyType(ty: Type, mod: *Module) [2]Class { /// Returns the scalar type a given type can represent. /// Asserts given type can be represented as scalar, such as /// a struct with a single scalar field. -pub fn scalarType(ty: Type, mod: *Module) Type { +pub fn scalarType(ty: Type, mod: *Zcu) Type { const ip = &mod.intern_pool; switch (ty.zigTypeTag(mod)) { .Struct => { From 1a951b49af90d04832ec7e928eb2f87630499d59 Mon Sep 17 00:00:00 2001 From: Pavel Verigo Date: Fri, 28 Jun 2024 00:39:49 +0200 Subject: [PATCH 015/152] stage2-wasm: not op for <= 128 bits ints --- src/arch/wasm/CodeGen.zig | 53 +++++++++++++++++++++----------- test/behavior/math.zig | 64 ++++++++++++++++++++++++++++++--------- 2 files changed, 85 insertions(+), 32 deletions(-) diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index 9effa759675c..b1ebf9126dcf 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -3729,32 +3729,49 @@ fn airNot(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { try func.addLabel(.local_set, not_tmp.local.value); break :result not_tmp; } else { - const operand_bits = operand_ty.intInfo(mod).bits; - const wasm_bits = toWasmBits(operand_bits) orelse { - return func.fail("TODO: Implement binary NOT for integer with bitsize '{d}'", .{operand_bits}); + const int_info = operand_ty.intInfo(mod); + const wasm_bits = toWasmBits(int_info.bits) orelse { + return func.fail("TODO: Implement binary NOT for {}", .{operand_ty.fmt(mod)}); }; switch (wasm_bits) { 32 => { - const bin_op = try func.binOp(operand, .{ .imm32 = ~@as(u32, 0) }, operand_ty, .xor); - break :result try (try func.wrapOperand(bin_op, operand_ty)).toLocal(func, operand_ty); + try func.emitWValue(operand); + try func.addImm32(switch (int_info.signedness) { + .unsigned => ~@as(u32, 0) >> @intCast(32 - int_info.bits), + .signed => ~@as(u32, 0), + }); + try func.addTag(.i32_xor); + break :result try @as(WValue, .stack).toLocal(func, operand_ty); }, 64 => { - const bin_op = try func.binOp(operand, .{ .imm64 = ~@as(u64, 0) }, operand_ty, .xor); - break :result try (try func.wrapOperand(bin_op, operand_ty)).toLocal(func, operand_ty); + try func.emitWValue(operand); + try func.addImm64(switch (int_info.signedness) { + .unsigned => ~@as(u64, 0) >> @intCast(64 - int_info.bits), + .signed => ~@as(u64, 0), + }); + try func.addTag(.i64_xor); + break :result try @as(WValue, .stack).toLocal(func, operand_ty); }, 128 => { - const result_ptr = try func.allocStack(operand_ty); - try func.emitWValue(result_ptr); - const msb = try func.load(operand, Type.u64, 0); - const msb_xor = try func.binOp(msb, .{ .imm64 = ~@as(u64, 0) }, Type.u64, .xor); - try func.store(.{ .stack = {} }, msb_xor, Type.u64, 0 + result_ptr.offset()); - - try func.emitWValue(result_ptr); - const lsb = try func.load(operand, Type.u64, 8); - const lsb_xor = try func.binOp(lsb, .{ .imm64 = ~@as(u64, 0) }, Type.u64, .xor); - try func.store(result_ptr, lsb_xor, Type.u64, 8 + result_ptr.offset()); - break :result result_ptr; + const ptr = try func.allocStack(operand_ty); + + try func.emitWValue(ptr); + _ = try func.load(operand, Type.u64, 0); + try func.addImm64(~@as(u64, 0)); + try func.addTag(.i64_xor); + try func.store(.stack, .stack, Type.u64, ptr.offset()); + + try func.emitWValue(ptr); + _ = try func.load(operand, Type.u64, 8); + try func.addImm64(switch (int_info.signedness) { + .unsigned => ~@as(u64, 0) >> @intCast(128 - int_info.bits), + .signed => ~@as(u64, 0), + }); + try func.addTag(.i64_xor); + try func.store(.stack, .stack, Type.u64, ptr.offset() + 8); + + break :result ptr; }, else => unreachable, } diff --git a/test/behavior/math.zig b/test/behavior/math.zig index 6ff4380a44db..44daec9ed58a 100644 --- a/test/behavior/math.zig +++ b/test/behavior/math.zig @@ -393,9 +393,39 @@ fn comptimeAdd(comptime a: comptime_int, comptime b: comptime_int) comptime_int return a + b; } +fn not(comptime T: type, a: T) T { + return ~a; +} + test "binary not" { if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + try expect(not(u0, 0) == 0); + try expect(not(u1, 0) == 1); + try expect(not(u1, 1) == 0); + try expect(not(u5, 0b01001) == 0b10110); + try expect(not(u5, 0b10110) == 0b01001); + try expect(not(u16, 0b10101010_10101010) == 0b01010101_01010101); + try expect(not(u16, 0b01010101_01010101) == 0b10101010_10101010); + try expect(not(u32, 0xAAAA_3333) == 0x5555_CCCC); + try expect(not(u32, 0x5555_CCCC) == 0xAAAA_3333); + try expect(not(u35, 0x4_1111_FFFF) == 0x3_EEEE_0000); + try expect(not(u35, 0x3_EEEE_0000) == 0x4_1111_FFFF); + try expect(not(u48, 0x4567_89AB_CDEF) == 0xBA98_7654_3210); + try expect(not(u48, 0xBA98_7654_3210) == 0x4567_89AB_CDEF); + try expect(not(u64, 0x0123_4567_89AB_CDEF) == 0xFEDC_BA98_7654_3210); + try expect(not(u64, 0xFEDC_BA98_7654_3210) == 0x0123_4567_89AB_CDEF); + + try expect(not(i0, 0) == 0); + try expect(not(i1, 0) == -1); + try expect(not(i1, -1) == 0); + try expect(not(i5, -2) == 1); + try expect(not(i5, 3) == -4); + try expect(not(i32, 0) == -1); + try expect(not(i32, -2147483648) == 2147483647); + try expect(not(i64, -1) == 0); + try expect(not(i64, 0) == -1); + try expect(comptime x: { break :x ~@as(u16, 0b1010101010101010) == 0b0101010101010101; }); @@ -405,34 +435,40 @@ test "binary not" { try expect(comptime x: { break :x ~@as(u0, 0) == 0; }); - try testBinaryNot(0b1010101010101010); } -fn testBinaryNot(x: u16) !void { - try expect(~x == 0b0101010101010101); -} - -test "binary not 128-bit" { - if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO +test "binary not big int <= 128 bits" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + try expect(not(u65, 1) == 0x1_FFFFFFFF_FFFFFFFE); + try expect(not(u65, 0x1_FFFFFFFF_FFFFFFFE) == 1); + + try expect(not(u96, 0x01234567_89ABCDEF_00000001) == 0xFEDCBA98_76543210_FFFFFFFE); + try expect(not(u96, 0xFEDCBA98_76543210_FFFFFFFE) == 0x01234567_89ABCDEF_00000001); + + try expect(not(u128, 0xAAAAAAAA_AAAAAAAA_AAAAAAAA_AAAAAAAA) == 0x55555555_55555555_55555555_55555555); + try expect(not(u128, 0x55555555_55555555_55555555_55555555) == 0xAAAAAAAA_AAAAAAAA_AAAAAAAA_AAAAAAAA); + + try expect(not(i65, -1) == 0); + try expect(not(i65, 0) == -1); + try expect(not(i65, -18446744073709551616) == 18446744073709551615); + try expect(not(i65, 18446744073709551615) == -18446744073709551616); + + try expect(not(i128, -1) == 0); + try expect(not(i128, 0) == -1); + try expect(not(i128, -200) == 199); + try expect(not(i128, 199) == -200); + try expect(comptime x: { break :x ~@as(u128, 0x55555555_55555555_55555555_55555555) == 0xaaaaaaaa_aaaaaaaa_aaaaaaaa_aaaaaaaa; }); try expect(comptime x: { break :x ~@as(i128, 0x55555555_55555555_55555555_55555555) == @as(i128, @bitCast(@as(u128, 0xaaaaaaaa_aaaaaaaa_aaaaaaaa_aaaaaaaa))); }); - - try testBinaryNot128(u128, 0xaaaaaaaa_aaaaaaaa_aaaaaaaa_aaaaaaaa); - try testBinaryNot128(i128, @as(i128, @bitCast(@as(u128, 0xaaaaaaaa_aaaaaaaa_aaaaaaaa_aaaaaaaa)))); -} - -fn testBinaryNot128(comptime Type: type, x: Type) !void { - try expect(~x == @as(Type, 0x55555555_55555555_55555555_55555555)); } test "division" { From 02b3d5b58adc58f85f213b580e60f4a07ae9b140 Mon Sep 17 00:00:00 2001 From: Michael Bradshaw Date: Fri, 28 Jun 2024 08:10:55 -0700 Subject: [PATCH 016/152] Rename isASCII to isAscii --- lib/compiler/aro/aro/Parser.zig | 2 +- lib/std/ascii.zig | 9 ++++++--- lib/std/net.zig | 2 +- lib/std/zig/tokenizer.zig | 2 +- 4 files changed, 9 insertions(+), 6 deletions(-) diff --git a/lib/compiler/aro/aro/Parser.zig b/lib/compiler/aro/aro/Parser.zig index a1f0631d84de..e21bd0bf097c 100644 --- a/lib/compiler/aro/aro/Parser.zig +++ b/lib/compiler/aro/aro/Parser.zig @@ -8011,7 +8011,7 @@ fn charLiteral(p: *Parser) Error!Result { const slice = char_kind.contentSlice(p.tokSlice(p.tok_i)); var is_multichar = false; - if (slice.len == 1 and std.ascii.isASCII(slice[0])) { + if (slice.len == 1 and std.ascii.isAscii(slice[0])) { // fast path: single unescaped ASCII char val = slice[0]; } else { diff --git a/lib/std/ascii.zig b/lib/std/ascii.zig index 70bc2c505023..d5b028500f18 100644 --- a/lib/std/ascii.zig +++ b/lib/std/ascii.zig @@ -130,7 +130,7 @@ pub fn isLower(c: u8) bool { /// Returns whether the character is printable and has some graphical representation, /// including the space character. pub fn isPrint(c: u8) bool { - return isASCII(c) and !isControl(c); + return isAscii(c) and !isControl(c); } /// Returns whether this character is included in `whitespace`. @@ -151,7 +151,7 @@ test whitespace { for (whitespace) |char| try std.testing.expect(isWhitespace(char)); var i: u8 = 0; - while (isASCII(i)) : (i += 1) { + while (isAscii(i)) : (i += 1) { if (isWhitespace(i)) try std.testing.expect(std.mem.indexOfScalar(u8, &whitespace, i) != null); } } @@ -173,10 +173,13 @@ pub fn isHex(c: u8) bool { } /// Returns whether the character is a 7-bit ASCII character. -pub fn isASCII(c: u8) bool { +pub fn isAscii(c: u8) bool { return c < 128; } +/// /// Deprecated: use `isAscii` +pub const isASCII = isAscii; + /// Uppercases the character and returns it as-is if already uppercase or not a letter. pub fn toUpper(c: u8) u8 { if (isLower(c)) { diff --git a/lib/std/net.zig b/lib/std/net.zig index 25030fe7aa89..79ca71d0e28a 100644 --- a/lib/std/net.zig +++ b/lib/std/net.zig @@ -1363,7 +1363,7 @@ pub fn isValidHostName(hostname: []const u8) bool { if (hostname.len >= 254) return false; if (!std.unicode.utf8ValidateSlice(hostname)) return false; for (hostname) |byte| { - if (!std.ascii.isASCII(byte) or byte == '.' or byte == '-' or std.ascii.isAlphanumeric(byte)) { + if (!std.ascii.isAscii(byte) or byte == '.' or byte == '-' or std.ascii.isAlphanumeric(byte)) { continue; } return false; diff --git a/lib/std/zig/tokenizer.zig b/lib/std/zig/tokenizer.zig index 6f9a232c4889..6897980fddd6 100644 --- a/lib/std/zig/tokenizer.zig +++ b/lib/std/zig/tokenizer.zig @@ -1270,7 +1270,7 @@ pub const Tokenizer = struct { fn getInvalidCharacterLength(self: *Tokenizer) u3 { const c0 = self.buffer[self.index]; - if (std.ascii.isASCII(c0)) { + if (std.ascii.isAscii(c0)) { if (c0 == '\r') { if (self.index + 1 < self.buffer.len and self.buffer[self.index + 1] == '\n') { // Carriage returns are *only* allowed just before a linefeed as part of a CRLF pair, otherwise From 5e3bad3556435663fa9220390248a5ed4f75be47 Mon Sep 17 00:00:00 2001 From: wooster0 Date: Sun, 23 Jun 2024 21:37:11 +0900 Subject: [PATCH 017/152] Make 0e.0 and 0xp0 not crash This fixes those sequences of characters crashing. --- lib/std/zig/AstGen.zig | 1 + lib/std/zig/number_literal.zig | 14 +++++++++ .../invalid_number_literals.zig | 29 +++++++++++++++++++ 3 files changed, 44 insertions(+) create mode 100644 test/cases/compile_errors/invalid_number_literals.zig diff --git a/lib/std/zig/AstGen.zig b/lib/std/zig/AstGen.zig index febd6b60bbd3..18012b802c77 100644 --- a/lib/std/zig/AstGen.zig +++ b/lib/std/zig/AstGen.zig @@ -8637,6 +8637,7 @@ fn failWithNumberError(astgen: *AstGen, err: std.zig.number_literal.Error, token assert(bytes.len >= 2 and bytes[0] == '0' and bytes[1] == 'x'); // Validated by tokenizer return astgen.failOff(token, @intCast(i), "sign '{c}' cannot follow digit '{c}' in hex base", .{ bytes[i], bytes[i - 1] }); }, + .period_after_exponent => |i| return astgen.failOff(token, @intCast(i), "unexpected period after exponent", .{}), } } diff --git a/lib/std/zig/number_literal.zig b/lib/std/zig/number_literal.zig index aba588a3ea8b..a4dc33eb91c3 100644 --- a/lib/std/zig/number_literal.zig +++ b/lib/std/zig/number_literal.zig @@ -56,6 +56,8 @@ pub const Error = union(enum) { invalid_character: usize, /// [+-] not immediately after [pPeE] invalid_exponent_sign: usize, + /// Period comes directly after exponent. + period_after_exponent: usize, }; /// Parse Zig number literal accepted by fmt.parseInt, fmt.parseFloat and big_int.setString. @@ -108,6 +110,9 @@ pub fn parseNumberLiteral(bytes: []const u8) Result { continue; }, 'p', 'P' => if (base == 16) { + if (i == 2) { + return .{ .failure = .{ .digit_after_base = {} } }; + } float = true; if (exponent) return .{ .failure = .{ .duplicate_exponent = i } }; if (underscore) return .{ .failure = .{ .exponent_after_underscore = i } }; @@ -116,6 +121,15 @@ pub fn parseNumberLiteral(bytes: []const u8) Result { continue; }, '.' => { + if (exponent) { + const digit_index = i - ".e".len; + if (digit_index < bytes.len) { + switch (bytes[digit_index]) { + '0'...'9' => return .{ .failure = .{ .period_after_exponent = i } }, + else => {}, + } + } + } float = true; if (base != 10 and base != 16) return .{ .failure = .{ .invalid_float_base = 2 } }; if (period) return .{ .failure = .duplicate_period }; diff --git a/test/cases/compile_errors/invalid_number_literals.zig b/test/cases/compile_errors/invalid_number_literals.zig new file mode 100644 index 000000000000..3d5f8b9dc442 --- /dev/null +++ b/test/cases/compile_errors/invalid_number_literals.zig @@ -0,0 +1,29 @@ +comptime { + _ = 0e.0; +} +comptime { + _ = 0E.0; +} +comptime { + _ = 12e.0; +} +comptime { + _ = 12E.0; +} +comptime { + _ = 0xp0; +} +comptime { + _ = 0xP0; +} + +// error +// backend=stage2 +// target=native +// +// :2:11: error: unexpected period after exponent +// :5:11: error: unexpected period after exponent +// :8:12: error: unexpected period after exponent +// :11:12: error: unexpected period after exponent +// :14:9: error: expected a digit after base prefix +// :17:9: error: expected a digit after base prefix From a31fe8aa3ed4868fb10000c1e3eeaa094488fc94 Mon Sep 17 00:00:00 2001 From: Pat Tullmann Date: Wed, 26 Jun 2024 12:51:19 -0700 Subject: [PATCH 018/152] glibc headers: arc4random* functions added in glibc 2.36 Here's the glibc v2.36 announcment noting the addition of arc4random, arc4random_buf and arc4random_uniform: https://sourceware.org/git/?p=glibc.git;a=blob;f=NEWS;h=8420a65cd06874ee09518366b8fba746a557212a;hb=6f4e0fcfa2d2b0915816a3a3a1d48b4763a7dee2 Tested with the testcase from the bug. I get a compile-time error when building against older glibc (instead of a linker error), and no errors (as before) when compiling against v2.36 or later. And the glibc_compat regression tests pass. Fix #20426 --- lib/libc/include/generic-glibc/stdlib.h | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/lib/libc/include/generic-glibc/stdlib.h b/lib/libc/include/generic-glibc/stdlib.h index ad86197336aa..5a5252ae0eb4 100644 --- a/lib/libc/include/generic-glibc/stdlib.h +++ b/lib/libc/include/generic-glibc/stdlib.h @@ -653,6 +653,11 @@ extern int lcong48_r (unsigned short int __param[7], struct drand48_data *__buffer) __THROW __nonnull ((1, 2)); +/* + * arc4random* symbols introduced in glibc 2.36: + * https://sourceware.org/git/?p=glibc.git;a=blob;f=NEWS;h=8420a65cd06874ee09518366b8fba746a557212a;hb=6f4e0fcfa2d2b0915816a3a3a1d48b4763a7dee2 + */ +# if (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 36) || __GLIBC__ > 2 /* Return a random integer between zero and 2**32-1 (inclusive). */ extern __uint32_t arc4random (void) __THROW __wur; @@ -665,6 +670,7 @@ extern void arc4random_buf (void *__buf, size_t __size) limit (exclusive). */ extern __uint32_t arc4random_uniform (__uint32_t __upper_bound) __THROW __wur; +# endif /* glibc v2.36 and later */ # endif /* Use misc. */ #endif /* Use misc or X/Open. */ From cb182432b08d13a343d1db5f4ee06c9c90430779 Mon Sep 17 00:00:00 2001 From: cryptocode Date: Fri, 28 Jun 2024 17:56:35 +0200 Subject: [PATCH 019/152] [std.c] Add eventfd and dup3 functions to FreeBSD The eventfd system call and dup3 library call have been available since FreeBSD 13 and 10 respectively, and are thus available in all [FreeBSD releases not deemed EOL]() The lack of these were discovered when porting a terminal emulator to FreeBSD. It would be nice to have them included in Zig's stdlib. --- lib/std/c/freebsd.zig | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/std/c/freebsd.zig b/lib/std/c/freebsd.zig index a60f5de525ee..ef4758030745 100644 --- a/lib/std/c/freebsd.zig +++ b/lib/std/c/freebsd.zig @@ -1870,3 +1870,5 @@ pub const MFD = struct { pub extern "c" fn memfd_create(name: [*:0]const u8, flags: c_uint) c_int; pub extern "c" fn copy_file_range(fd_in: fd_t, off_in: ?*off_t, fd_out: fd_t, off_out: ?*off_t, len: usize, flags: u32) usize; +pub extern "c" fn eventfd(initval: c_uint, flags: c_uint) c_int; +pub extern "c" fn dup3(old: c_int, new: c_int, flags: c_uint) c_int; From e1d4cf67caedfe20d2e0af9d3b518af1f0777191 Mon Sep 17 00:00:00 2001 From: Nikita Date: Tue, 2 Jul 2024 20:01:41 -0400 Subject: [PATCH 020/152] Add two new io_uring flags to linux.zig --- lib/std/os/linux.zig | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/lib/std/os/linux.zig b/lib/std/os/linux.zig index 1dbcd0e261ec..3deac7a2f6ef 100644 --- a/lib/std/os/linux.zig +++ b/lib/std/os/linux.zig @@ -4777,6 +4777,17 @@ pub const IORING_SETUP_SINGLE_ISSUER = 1 << 12; /// try to do it just before it is needed. pub const IORING_SETUP_DEFER_TASKRUN = 1 << 13; +/// Application provides ring memory +pub const IORING_SETUP_NO_MMAP = 1 << 14; + +/// Register the ring fd in itself for use with +/// IORING_REGISTER_USE_REGISTERED_RING; return a registered fd index rather +/// than an fd. +pub const IORING_SETUP_REGISTERED_FD_ONLY = 1 << 15; + +/// Removes indirection through the SQ index array. +pub const IORING_SETUP_NO_SQARRAY = 1 << 16; + /// IO submission data structure (Submission Queue Entry) pub const io_uring_sqe = @import("linux/io_uring_sqe.zig").io_uring_sqe; From 21cad3e09f173154c4c0dc61dd05fe8dd0628dc3 Mon Sep 17 00:00:00 2001 From: Michael Bradshaw Date: Fri, 28 Jun 2024 08:21:20 -0700 Subject: [PATCH 021/152] Rename MAX_NAME_BYTES to max_name_bytes --- lib/compiler/aro/aro/Driver.zig | 4 ++-- lib/std/fs.zig | 7 +++++-- lib/std/fs/Dir.zig | 2 +- lib/std/fs/test.zig | 4 ++-- lib/std/zig/WindowsSdk.zig | 2 +- 5 files changed, 11 insertions(+), 8 deletions(-) diff --git a/lib/compiler/aro/aro/Driver.zig b/lib/compiler/aro/aro/Driver.zig index af2677626e99..6876395b8ac5 100644 --- a/lib/compiler/aro/aro/Driver.zig +++ b/lib/compiler/aro/aro/Driver.zig @@ -730,8 +730,8 @@ fn processSource( defer obj.deinit(); // If it's used, name_buf will either hold a filename or `/tmp/<12 random bytes with base-64 encoding>.` - // both of which should fit into MAX_NAME_BYTES for all systems - var name_buf: [std.fs.MAX_NAME_BYTES]u8 = undefined; + // both of which should fit into max_name_bytes for all systems + var name_buf: [std.fs.max_name_bytes]u8 = undefined; const out_file_name = if (d.only_compile) blk: { const fmt_template = "{s}{s}"; diff --git a/lib/std/fs.zig b/lib/std/fs.zig index 139660815a6b..e56d68e40728 100644 --- a/lib/std/fs.zig +++ b/lib/std/fs.zig @@ -72,7 +72,7 @@ pub const max_path_bytes = switch (native_os) { /// On Windows, `[]u8` file name components are encoded as [WTF-8](https://simonsapin.github.io/wtf-8/). /// On WASI, file name components are encoded as valid UTF-8. /// On other platforms, `[]u8` components are an opaque sequence of bytes with no particular encoding. -pub const MAX_NAME_BYTES = switch (native_os) { +pub const max_name_bytes = switch (native_os) { .linux, .macos, .ios, .freebsd, .openbsd, .netbsd, .dragonfly, .solaris, .illumos => posix.NAME_MAX, // Haiku's NAME_MAX includes the null terminator, so subtract one. .haiku => posix.NAME_MAX - 1, @@ -81,7 +81,7 @@ pub const MAX_NAME_BYTES = switch (native_os) { // pair in the WTF-16LE, and we (over)account 3 bytes for it that way. .windows => windows.NAME_MAX * 3, // For WASI, the MAX_NAME will depend on the host OS, so it needs to be - // as large as the largest MAX_NAME_BYTES (Windows) in order to work on any host OS. + // as large as the largest max_name_bytes (Windows) in order to work on any host OS. // TODO determine if this is a reasonable approach .wasi => windows.NAME_MAX * 3, else => if (@hasDecl(root, "os") and @hasDecl(root.os, "NAME_MAX")) @@ -90,6 +90,9 @@ pub const MAX_NAME_BYTES = switch (native_os) { @compileError("NAME_MAX not implemented for " ++ @tagName(native_os)), }; +/// Deprecated: use `max_name_bytes` +pub const MAX_NAME_BYTES = max_name_bytes; + pub const base64_alphabet = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_".*; /// Base64 encoder, replacing the standard `+/` with `-_` so that it can be used in a file name on any filesystem. diff --git a/lib/std/fs/Dir.zig b/lib/std/fs/Dir.zig index 9b8406d6b290..597c158d631b 100644 --- a/lib/std/fs/Dir.zig +++ b/lib/std/fs/Dir.zig @@ -414,7 +414,7 @@ pub const Iterator = switch (native_os) { index: usize, end_index: usize, first_iter: bool, - name_data: [fs.MAX_NAME_BYTES]u8, + name_data: [fs.max_name_bytes]u8, const Self = @This(); diff --git a/lib/std/fs/test.zig b/lib/std/fs/test.zig index 593beea05d69..f596c52322c3 100644 --- a/lib/std/fs/test.zig +++ b/lib/std/fs/test.zig @@ -1269,11 +1269,11 @@ test "max file name component lengths" { } else if (native_os == .wasi) { // On WASI, the maxed filename depends on the host OS, so in order for this test to // work on any host, we need to use a length that will work for all platforms - // (i.e. the minimum MAX_NAME_BYTES of all supported platforms). + // (i.e. the minimum max_name_bytes of all supported platforms). const maxed_wasi_filename = [_]u8{'1'} ** 255; try testFilenameLimits(tmp.dir, &maxed_wasi_filename); } else { - const maxed_ascii_filename = [_]u8{'1'} ** std.fs.MAX_NAME_BYTES; + const maxed_ascii_filename = [_]u8{'1'} ** std.fs.max_name_bytes; try testFilenameLimits(tmp.dir, &maxed_ascii_filename); } } diff --git a/lib/std/zig/WindowsSdk.zig b/lib/std/zig/WindowsSdk.zig index 35e812a7cf45..99ad52bf1dc4 100644 --- a/lib/std/zig/WindowsSdk.zig +++ b/lib/std/zig/WindowsSdk.zig @@ -750,7 +750,7 @@ const MsvcLibDir = struct { var instances_dir = try findInstancesDir(allocator); defer instances_dir.close(); - var state_subpath_buf: [std.fs.MAX_NAME_BYTES + 32]u8 = undefined; + var state_subpath_buf: [std.fs.max_name_bytes + 32]u8 = undefined; var latest_version_lib_dir = std.ArrayListUnmanaged(u8){}; errdefer latest_version_lib_dir.deinit(allocator); From fe66a12a23eda90b89b06dc28957f3baac01ee89 Mon Sep 17 00:00:00 2001 From: Michael Bradshaw Date: Fri, 28 Jun 2024 08:27:00 -0700 Subject: [PATCH 022/152] Rename nonSIMDCall* to nonSimdCall* --- lib/std/valgrind.zig | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/lib/std/valgrind.zig b/lib/std/valgrind.zig index 43935fe37d50..8590302e9aa3 100644 --- a/lib/std/valgrind.zig +++ b/lib/std/valgrind.zig @@ -124,22 +124,34 @@ pub fn innerThreads(qzz: [*]u8) void { doClientRequestStmt(.InnerThreads, qzz, 0, 0, 0, 0); } -pub fn nonSIMDCall0(func: fn (usize) usize) usize { +pub fn nonSimdCall0(func: fn (usize) usize) usize { return doClientRequestExpr(0, .ClientCall0, @intFromPtr(func), 0, 0, 0, 0); } -pub fn nonSIMDCall1(func: fn (usize, usize) usize, a1: usize) usize { +pub fn nonSimdCall1(func: fn (usize, usize) usize, a1: usize) usize { return doClientRequestExpr(0, .ClientCall1, @intFromPtr(func), a1, 0, 0, 0); } -pub fn nonSIMDCall2(func: fn (usize, usize, usize) usize, a1: usize, a2: usize) usize { +pub fn nonSimdCall2(func: fn (usize, usize, usize) usize, a1: usize, a2: usize) usize { return doClientRequestExpr(0, .ClientCall2, @intFromPtr(func), a1, a2, 0, 0); } -pub fn nonSIMDCall3(func: fn (usize, usize, usize, usize) usize, a1: usize, a2: usize, a3: usize) usize { +pub fn nonSimdCall3(func: fn (usize, usize, usize, usize) usize, a1: usize, a2: usize, a3: usize) usize { return doClientRequestExpr(0, .ClientCall3, @intFromPtr(func), a1, a2, a3, 0); } +/// Deprecated: use `nonSimdCall0` +pub const nonSIMDCall0 = nonSimdCall0; + +/// Deprecated: use `nonSimdCall1` +pub const nonSIMDCall1 = nonSimdCall1; + +/// Deprecated: use `nonSimdCall2` +pub const nonSIMDCall2 = nonSimdCall2; + +/// Deprecated: use `nonSimdCall3` +pub const nonSIMDCall3 = nonSimdCall3; + /// Counts the number of errors that have been recorded by a tool. Nb: /// the tool must record the errors with VG_(maybe_record_error)() or /// VG_(unique_error)() for them to be counted. From 035c1b65229083e01bc5ec69f2ad529ba0c56062 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Igor=20Anic=CC=81?= Date: Thu, 11 Apr 2024 16:32:07 +0200 Subject: [PATCH 023/152] std.tar: add strip components error to diagnostics This was the only kind of error which was raised in pipeToFileSystem and not added to Diagnostics. Shell tar silently ignores paths which are stripped out when used with `--strip-components` switch. This enables that same behavior, errors will be collected in diagnostics but caller is free to ignore that type of diagnostics errors. Enables use case where caller knows structure of the tar file and want to extract only some deeply nested folders ignoring upper files/folders. Fixes: #17620 by giving caller options: - not provide diagnostic and get errors - provide diagnostics and analyze errors - provide diagnostics and ignore errors --- lib/std/tar.zig | 41 ++++++++++++++++++++++++++++++++++++++--- src/Package/Fetch.zig | 1 + 2 files changed, 39 insertions(+), 3 deletions(-) diff --git a/lib/std/tar.zig b/lib/std/tar.zig index 8ba5a19012c5..04571fb3c88f 100644 --- a/lib/std/tar.zig +++ b/lib/std/tar.zig @@ -46,6 +46,9 @@ pub const Diagnostics = struct { file_name: []const u8, file_type: Header.Kind, }, + components_outside_stripped_prefix: struct { + file_name: []const u8, + }, }; fn findRoot(d: *Diagnostics, path: []const u8) !void { @@ -97,6 +100,9 @@ pub const Diagnostics = struct { .unsupported_file_type => |info| { d.allocator.free(info.file_name); }, + .components_outside_stripped_prefix => |info| { + d.allocator.free(info.file_name); + }, } } d.errors.deinit(d.allocator); @@ -623,18 +629,24 @@ pub fn pipeToFileSystem(dir: std.fs.Dir, reader: anytype, options: PipeOptions) while (try iter.next()) |file| { const file_name = stripComponents(file.name, options.strip_components); + if (file_name.len == 0 and file.kind != .directory) { + const d = options.diagnostics orelse return error.TarComponentsOutsideStrippedPrefix; + try d.errors.append(d.allocator, .{ .components_outside_stripped_prefix = .{ + .file_name = try d.allocator.dupe(u8, file.name), + } }); + continue; + } if (options.diagnostics) |d| { try d.findRoot(file_name); } switch (file.kind) { .directory => { - if (file_name.len != 0 and !options.exclude_empty_directories) { + if (file_name.len > 0 and !options.exclude_empty_directories) { try dir.makePath(file_name); } }, .file => { - if (file_name.len == 0) return error.BadFileName; if (createDirAndFile(dir, file_name, fileMode(file.mode, options))) |fs_file| { defer fs_file.close(); try file.writeAll(fs_file); @@ -647,7 +659,6 @@ pub fn pipeToFileSystem(dir: std.fs.Dir, reader: anytype, options: PipeOptions) } }, .sym_link => { - if (file_name.len == 0) return error.BadFileName; const link_name = file.link_name; createDirAndSymlink(dir, link_name, file_name) catch |err| { const d = options.diagnostics orelse return error.UnableToCreateSymLink; @@ -1096,6 +1107,30 @@ test "findRoot without explicit root dir" { try testing.expectEqualStrings("root", diagnostics.root_dir); } +test "pipeToFileSystem strip_components" { + const data = @embedFile("tar/testdata/example.tar"); + var fbs = std.io.fixedBufferStream(data); + const reader = fbs.reader(); + + var tmp = testing.tmpDir(.{ .no_follow = true }); + defer tmp.cleanup(); + var diagnostics: Diagnostics = .{ .allocator = testing.allocator }; + defer diagnostics.deinit(); + + pipeToFileSystem(tmp.dir, reader, .{ + .strip_components = 3, + .diagnostics = &diagnostics, + }) catch |err| { + // Skip on platform which don't support symlinks + if (err == error.UnableToCreateSymLink) return error.SkipZigTest; + return err; + }; + + try testing.expectEqual(2, diagnostics.errors.items.len); + try testing.expectEqualStrings("example/b/symlink", diagnostics.errors.items[0].components_outside_stripped_prefix.file_name); + try testing.expectEqualStrings("example/a/file", diagnostics.errors.items[1].components_outside_stripped_prefix.file_name); +} + fn normalizePath(bytes: []u8) []u8 { const canonical_sep = std.fs.path.sep_posix; if (std.fs.path.sep == canonical_sep) return bytes; diff --git a/src/Package/Fetch.zig b/src/Package/Fetch.zig index 9f136536476b..6c5a930285d9 100644 --- a/src/Package/Fetch.zig +++ b/src/Package/Fetch.zig @@ -1189,6 +1189,7 @@ fn unpackTarball(f: *Fetch, out_dir: fs.Dir, reader: anytype) RunError!UnpackRes .unable_to_create_file => |i| res.unableToCreateFile(stripRoot(i.file_name, res.root_dir), i.code), .unable_to_create_sym_link => |i| res.unableToCreateSymLink(stripRoot(i.file_name, res.root_dir), i.link_name, i.code), .unsupported_file_type => |i| res.unsupportedFileType(stripRoot(i.file_name, res.root_dir), @intFromEnum(i.file_type)), + .components_outside_stripped_prefix => {}, // impossible with strip_components = 0 } } } From e0350859bb88d70b95a6b99932a9a5452f8b6b14 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Igor=20Anic=CC=81?= Date: Thu, 11 Apr 2024 17:47:36 +0200 Subject: [PATCH 024/152] use unreachable keyword for unreachable code path --- src/Package/Fetch.zig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Package/Fetch.zig b/src/Package/Fetch.zig index 6c5a930285d9..f6e5c2e2223a 100644 --- a/src/Package/Fetch.zig +++ b/src/Package/Fetch.zig @@ -1189,7 +1189,7 @@ fn unpackTarball(f: *Fetch, out_dir: fs.Dir, reader: anytype) RunError!UnpackRes .unable_to_create_file => |i| res.unableToCreateFile(stripRoot(i.file_name, res.root_dir), i.code), .unable_to_create_sym_link => |i| res.unableToCreateSymLink(stripRoot(i.file_name, res.root_dir), i.link_name, i.code), .unsupported_file_type => |i| res.unsupportedFileType(stripRoot(i.file_name, res.root_dir), @intFromEnum(i.file_type)), - .components_outside_stripped_prefix => {}, // impossible with strip_components = 0 + .components_outside_stripped_prefix => unreachable, // unreachable with strip_components = 0 } } } From 8f7b50e2c4ad0c08015bce02a3ba8a7a9a058e11 Mon Sep 17 00:00:00 2001 From: Techatrix Date: Wed, 26 Jun 2024 23:37:11 +0200 Subject: [PATCH 025/152] json: respect duplicate_field_behavior in std.json.Value.jsonParse --- lib/std/json/dynamic.zig | 14 +++++++++++++- lib/std/json/dynamic_test.zig | 28 ++++++++++++++++++++++++++++ lib/std/json/test.zig | 4 +++- 3 files changed, 44 insertions(+), 2 deletions(-) diff --git a/lib/std/json/dynamic.zig b/lib/std/json/dynamic.zig index a1849b0fed3e..7bacbd60d199 100644 --- a/lib/std/json/dynamic.zig +++ b/lib/std/json/dynamic.zig @@ -147,7 +147,19 @@ fn handleCompleteValue(stack: *Array, allocator: Allocator, source: anytype, val // stack: [..., .object] var object = &stack.items[stack.items.len - 1].object; - try object.put(key, value); + + const gop = try object.getOrPut(key); + if (gop.found_existing) { + switch (options.duplicate_field_behavior) { + .use_first => {}, + .@"error" => return error.DuplicateField, + .use_last => { + gop.value_ptr.* = value; + }, + } + } else { + gop.value_ptr.* = value; + } // This is an invalid state to leave the stack in, // so we have to process the next token before we return. diff --git a/lib/std/json/dynamic_test.zig b/lib/std/json/dynamic_test.zig index 4ac468f86603..9e181dea9c39 100644 --- a/lib/std/json/dynamic_test.zig +++ b/lib/std/json/dynamic_test.zig @@ -181,6 +181,34 @@ test "escaped characters" { try testing.expectEqualSlices(u8, obj.get("surrogatepair").?.string, "😂"); } +test "Value with duplicate fields" { + var arena_allocator = std.heap.ArenaAllocator.init(std.testing.allocator); + defer arena_allocator.deinit(); + + const doc = + \\{ + \\ "abc": 0, + \\ "abc": 1 + \\} + ; + + try testing.expectError(error.DuplicateField, parseFromSliceLeaky(std.json.Value, arena_allocator.allocator(), doc, .{ + .duplicate_field_behavior = .@"error", + })); + + const first = try parseFromSliceLeaky(std.json.Value, arena_allocator.allocator(), doc, .{ + .duplicate_field_behavior = .use_first, + }); + try testing.expectEqual(@as(usize, 1), first.object.count()); + try testing.expectEqual(@as(i64, 0), first.object.get("abc").?.integer); + + const last = try parseFromSliceLeaky(std.json.Value, arena_allocator.allocator(), doc, .{ + .duplicate_field_behavior = .use_last, + }); + try testing.expectEqual(@as(usize, 1), last.object.count()); + try testing.expectEqual(@as(i64, 1), last.object.get("abc").?.integer); +} + test "Value.jsonStringify" { var vals = [_]Value{ .{ .integer = 1 }, diff --git a/lib/std/json/test.zig b/lib/std/json/test.zig index 9530ab37a6bf..136e8e34d18b 100644 --- a/lib/std/json/test.zig +++ b/lib/std/json/test.zig @@ -28,7 +28,9 @@ fn testLowLevelScanner(s: []const u8) !void { } } fn testHighLevelDynamicParser(s: []const u8) !void { - var parsed = try parseFromSlice(Value, testing.allocator, s, .{}); + var parsed = try parseFromSlice(Value, testing.allocator, s, .{ + .duplicate_field_behavior = .use_first, + }); defer parsed.deinit(); } From 4870e002f213d7002ac1941c6a204aff79137d54 Mon Sep 17 00:00:00 2001 From: Krzysztof Wolicki Date: Tue, 25 Jun 2024 12:45:37 +0200 Subject: [PATCH 026/152] Compilation: pass libc include directories when compiling assembly_with_cpp --- src/Compilation.zig | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/Compilation.zig b/src/Compilation.zig index b30f65ad1146..343b22d2b1d3 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -5363,6 +5363,11 @@ pub fn addCCArgs( const c_headers_dir = try std.fs.path.join(arena, &[_][]const u8{ comp.zig_lib_directory.path.?, "include" }); try argv.append("-isystem"); try argv.append(c_headers_dir); + + for (comp.libc_include_dir_list) |include_dir| { + try argv.append("-isystem"); + try argv.append(include_dir); + } } // The Clang assembler does not accept the list of CPU features like the From efb00c20eefff7bf1ef2d2d7bb9ef16374ebfd27 Mon Sep 17 00:00:00 2001 From: antlilja Date: Wed, 3 Jul 2024 18:06:53 +0200 Subject: [PATCH 027/152] LLVM Builder: Pass correct argument to ensureUnusedMetadataCapacity The trail_len was being multiplied by the size of the type before --- src/codegen/llvm/Builder.zig | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/codegen/llvm/Builder.zig b/src/codegen/llvm/Builder.zig index f0855eee2797..558b0662ad57 100644 --- a/src/codegen/llvm/Builder.zig +++ b/src/codegen/llvm/Builder.zig @@ -12007,7 +12007,7 @@ pub fn debugExpression( self: *Builder, elements: []const u32, ) Allocator.Error!Metadata { - try self.ensureUnusedMetadataCapacity(1, Metadata.Expression, elements.len * @sizeOf(u32)); + try self.ensureUnusedMetadataCapacity(1, Metadata.Expression, elements.len); return self.debugExpressionAssumeCapacity(elements); } @@ -12015,7 +12015,7 @@ pub fn debugTuple( self: *Builder, elements: []const Metadata, ) Allocator.Error!Metadata { - try self.ensureUnusedMetadataCapacity(1, Metadata.Tuple, elements.len * @sizeOf(Metadata)); + try self.ensureUnusedMetadataCapacity(1, Metadata.Tuple, elements.len); return self.debugTupleAssumeCapacity(elements); } From 1cfc3647853123efc0fda991180e337d8a72eac5 Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Thu, 4 Jul 2024 07:19:08 +0200 Subject: [PATCH 028/152] tsan: build dynamic library on Apple platforms --- src/Compilation.zig | 3 +++ src/libtsan.zig | 37 +++++++++++++++++++++++++++++++------ 2 files changed, 34 insertions(+), 6 deletions(-) diff --git a/src/Compilation.zig b/src/Compilation.zig index 343b22d2b1d3..412798e09a3d 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -188,6 +188,9 @@ libunwind_static_lib: ?CRTFile = null, /// Populated when we build the TSAN static library. A Job to build this is placed in the queue /// and resolved before calling linker.flush(). tsan_static_lib: ?CRTFile = null, +/// Populated when we build the TSAN dynamic library. A Job to build this is placed in the queue +/// and resolved before calling linker.flush(). +tsan_dynamic_lib: ?CRTFile = null, /// Populated when we build the libc static library. A Job to build this is placed in the queue /// and resolved before calling linker.flush(). libc_static_lib: ?CRTFile = null, diff --git a/src/libtsan.zig b/src/libtsan.zig index 1aa32e6ff07a..4164200b47ed 100644 --- a/src/libtsan.zig +++ b/src/libtsan.zig @@ -25,10 +25,20 @@ pub fn buildTsan(comp: *Compilation, prog_node: std.Progress.Node) BuildError!vo defer arena_allocator.deinit(); const arena = arena_allocator.allocator(); - const root_name = "tsan"; - const output_mode = .Lib; - const link_mode = .static; const target = comp.getTarget(); + const root_name = switch (target.os.tag) { + // On Apple platforms, we use the same name as LLVM and Apple so that we correctly + // mark the images as instrumented when traversing them when TSAN dylib is + // initialized. + .macos => "clang_rt.tsan_osx_dynamic", + .ios => switch (target.abi) { + .simulator => "clang_rt.tsan_iossim_dynamic", + else => "clang_rt.tsan_ios_dynamic", + }, + else => "tsan", + }; + const link_mode: std.builtin.LinkMode = if (target.isDarwin()) .dynamic else .static; + const output_mode = .Lib; const basename = try std.zig.binNameAlloc(arena, .{ .root_name = root_name, .target = target, @@ -43,6 +53,7 @@ pub fn buildTsan(comp: *Compilation, prog_node: std.Progress.Node) BuildError!vo const optimize_mode = comp.compilerRtOptMode(); const strip = comp.compilerRtStrip(); + const link_libcpp = target.isDarwin(); const config = Compilation.Config.resolve(.{ .output_mode = output_mode, @@ -54,6 +65,7 @@ pub fn buildTsan(comp: *Compilation, prog_node: std.Progress.Node) BuildError!vo .root_optimize_mode = optimize_mode, .root_strip = strip, .link_libc = true, + .link_libcpp = link_libcpp, }) catch |err| { comp.setMiscFailure( .libtsan, @@ -272,6 +284,12 @@ pub fn buildTsan(comp: *Compilation, prog_node: std.Progress.Node) BuildError!vo }); } + const skip_linker_dependencies = !target.isDarwin(); + const linker_allow_shlib_undefined = target.isDarwin(); + const install_name = if (target.isDarwin()) + try std.fmt.allocPrintZ(arena, "@rpath/{s}", .{basename}) + else + null; const sub_compilation = Compilation.create(comp.gpa, arena, .{ .local_cache_directory = comp.global_cache_directory, .global_cache_directory = comp.global_cache_directory, @@ -294,7 +312,9 @@ pub fn buildTsan(comp: *Compilation, prog_node: std.Progress.Node) BuildError!vo .verbose_cimport = comp.verbose_cimport, .verbose_llvm_cpu_features = comp.verbose_llvm_cpu_features, .clang_passthrough_mode = comp.clang_passthrough_mode, - .skip_linker_dependencies = true, + .skip_linker_dependencies = skip_linker_dependencies, + .linker_allow_shlib_undefined = linker_allow_shlib_undefined, + .install_name = install_name, }) catch |err| { comp.setMiscFailure( .libtsan, @@ -317,8 +337,13 @@ pub fn buildTsan(comp: *Compilation, prog_node: std.Progress.Node) BuildError!vo }, }; - assert(comp.tsan_static_lib == null); - comp.tsan_static_lib = try sub_compilation.toCrtFile(); + assert(comp.tsan_static_lib == null and comp.tsan_dynamic_lib == null); + + if (target.isDarwin()) { + comp.tsan_dynamic_lib = try sub_compilation.toCrtFile(); + } else { + comp.tsan_static_lib = try sub_compilation.toCrtFile(); + } } const tsan_sources = [_][]const u8{ From c9d19ebb7a3d6996ec8b299d1698adf6737d73e5 Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Thu, 4 Jul 2024 07:21:01 +0200 Subject: [PATCH 029/152] macho: link dynamic TSAN lib --- src/link/MachO.zig | 21 +++++++++++++++------ src/link/MachO/load_commands.zig | 2 +- 2 files changed, 16 insertions(+), 7 deletions(-) diff --git a/src/link/MachO.zig b/src/link/MachO.zig index dd185fcaec81..52b57a2c58e1 100644 --- a/src/link/MachO.zig +++ b/src/link/MachO.zig @@ -150,6 +150,7 @@ no_implicit_dylibs: bool = false, /// Whether the linker should parse and always force load objects containing ObjC in archives. // TODO: in Zig we currently take -ObjC as always on force_load_objc: bool = true, +rpaths: std.ArrayListUnmanaged([]const u8) = .{}, /// Hot-code swapping state. hot_state: if (is_hot_update_compatible) HotUpdateState else struct {} = .{}, @@ -192,7 +193,7 @@ pub fn createEmpty( null else try std.fmt.allocPrint(arena, "{s}.o", .{emit.sub_path}); - const allow_shlib_undefined = options.allow_shlib_undefined orelse comp.config.any_sanitize_thread; + const allow_shlib_undefined = options.allow_shlib_undefined orelse false; const self = try arena.create(MachO); self.* = .{ @@ -358,6 +359,8 @@ pub fn deinit(self: *MachO) void { } self.thunks.deinit(gpa); self.unwind_records.deinit(gpa); + + self.rpaths.deinit(gpa); } pub fn flush(self: *MachO, arena: Allocator, prog_node: std.Progress.Node) link.File.FlushError!void { @@ -395,6 +398,9 @@ pub fn flushModule(self: *MachO, arena: Allocator, prog_node: std.Progress.Node) if (self.base.isStaticLib()) return relocatable.flushStaticLib(self, comp, module_obj_path); if (self.base.isObject()) return relocatable.flushObject(self, comp, module_obj_path); + try self.rpaths.ensureUnusedCapacity(gpa, self.base.rpath_list.len); + self.rpaths.appendSliceAssumeCapacity(self.base.rpath_list); + var positionals = std.ArrayList(Compilation.LinkObject).init(gpa); defer positionals.deinit(); @@ -413,7 +419,10 @@ pub fn flushModule(self: *MachO, arena: Allocator, prog_node: std.Progress.Node) // TSAN if (comp.config.any_sanitize_thread) { - try positionals.append(.{ .path = comp.tsan_static_lib.?.full_object_path }); + const path = comp.tsan_dynamic_lib.?.full_object_path; + try positionals.append(.{ .path = path }); + const basename = std.fs.path.dirname(path) orelse "."; + try self.rpaths.append(gpa, basename); } for (positionals.items) |obj| { @@ -771,7 +780,7 @@ fn dumpArgv(self: *MachO, comp: *Compilation) !void { try argv.append(syslibroot); } - for (self.base.rpath_list) |rpath| { + for (self.rpaths.items) |rpath| { try argv.append("-rpath"); try argv.append(rpath); } @@ -831,7 +840,7 @@ fn dumpArgv(self: *MachO, comp: *Compilation) !void { } if (comp.config.any_sanitize_thread) { - try argv.append(comp.tsan_static_lib.?.full_object_path); + try argv.append(comp.tsan_dynamic_lib.?.full_object_path); } for (self.lib_dirs) |lib_dir| { @@ -3015,8 +3024,8 @@ fn writeLoadCommands(self: *MachO) !struct { usize, usize, u64 } { ncmds += 1; } - try load_commands.writeRpathLCs(self.base.rpath_list, writer); - ncmds += self.base.rpath_list.len; + try load_commands.writeRpathLCs(self.rpaths.items, writer); + ncmds += self.rpaths.items.len; try writer.writeStruct(macho.source_version_command{ .version = 0 }); ncmds += 1; diff --git a/src/link/MachO/load_commands.zig b/src/link/MachO/load_commands.zig index 394253db488e..8719d92ea868 100644 --- a/src/link/MachO/load_commands.zig +++ b/src/link/MachO/load_commands.zig @@ -61,7 +61,7 @@ pub fn calcLoadCommandsSize(macho_file: *MachO, assume_max_path_len: bool) !u32 } // LC_RPATH { - for (macho_file.base.rpath_list) |rpath| { + for (macho_file.rpaths.items) |rpath| { sizeofcmds += calcInstallNameLen( @sizeOf(macho.rpath_command), rpath, From 76c3b6b794ebc9ddb910e369bbf2d121a01e06d8 Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Thu, 4 Jul 2024 07:26:28 +0200 Subject: [PATCH 030/152] tsan: add workaround for TSAN Apple bug Addresses TSAN bug on Apple platforms by always setting the headerpad size to a non-zero value when building the TSAN dylib. --- src/libtsan.zig | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/libtsan.zig b/src/libtsan.zig index 4164200b47ed..b7d3a9dda267 100644 --- a/src/libtsan.zig +++ b/src/libtsan.zig @@ -290,6 +290,8 @@ pub fn buildTsan(comp: *Compilation, prog_node: std.Progress.Node) BuildError!vo try std.fmt.allocPrintZ(arena, "@rpath/{s}", .{basename}) else null; + // This is temp conditional on resolving https://github.com/llvm/llvm-project/issues/97627 upstream. + const headerpad_size: ?u32 = if (target.isDarwin()) 32 else null; const sub_compilation = Compilation.create(comp.gpa, arena, .{ .local_cache_directory = comp.global_cache_directory, .global_cache_directory = comp.global_cache_directory, @@ -315,6 +317,7 @@ pub fn buildTsan(comp: *Compilation, prog_node: std.Progress.Node) BuildError!vo .skip_linker_dependencies = skip_linker_dependencies, .linker_allow_shlib_undefined = linker_allow_shlib_undefined, .install_name = install_name, + .headerpad_size = headerpad_size, }) catch |err| { comp.setMiscFailure( .libtsan, From b67caf72e31eefe08fe2dc8b17b4bca16e0e3cc6 Mon Sep 17 00:00:00 2001 From: Ryan Liptak Date: Wed, 3 Jul 2024 21:41:11 -0700 Subject: [PATCH 031/152] Add behavior test: including the sentinel when dereferencing a string literal This test would have failed in the past, but this has been fixed sometime in the last year. Closes #15944 --- test/behavior/string_literals.zig | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/test/behavior/string_literals.zig b/test/behavior/string_literals.zig index 898de2167ca5..a45403af9700 100644 --- a/test/behavior/string_literals.zig +++ b/test/behavior/string_literals.zig @@ -101,3 +101,14 @@ test "Peer type resolution with string literals and unknown length u8 pointers" try std.testing.expect(@TypeOf("", "a", @as([*:0]const u8, "")) == [*:0]const u8); try std.testing.expect(@TypeOf(@as([*:0]const u8, "baz"), "foo", "bar") == [*:0]const u8); } + +test "including the sentinel when dereferencing a string literal" { + var var_str = "abc"; + const var_derefed = var_str[0 .. var_str.len + 1].*; + + const const_str = "abc"; + const const_derefed = const_str[0 .. const_str.len + 1].*; + + try std.testing.expectEqualSlices(u8, &var_derefed, &const_derefed); + try std.testing.expectEqual(0, const_derefed[3]); +} From d9f1a952b8b0e19aafcf568b35cc220adbb4a7b5 Mon Sep 17 00:00:00 2001 From: Jonathan Marler Date: Tue, 2 Jul 2024 06:15:29 -0600 Subject: [PATCH 032/152] build: fix WriteFile and addCSourceFiles not adding LazyPath deps Adds a missing call to addLazyPathDependenciesOnly in std.Build.Module.addCSourceFiles. Also fixes an issue in std.Build.Step.WriteFile where it wasn't updating all the GeneratedFile instances for every directory. To fix the second issue, I removed all the GeneratedFile instances and now all files/directories reference the steps main GeneratedFile via sub paths. --- lib/std/Build/Module.zig | 1 + lib/std/Build/Step/WriteFile.zig | 56 ++++++++++-------------- test/src/Cases.zig | 6 ++- test/src/CompareOutput.zig | 10 +++-- test/src/RunTranslatedC.zig | 6 ++- test/src/StackTrace.zig | 5 ++- test/src/TranslateC.zig | 6 ++- test/standalone/build.zig.zon | 3 ++ test/standalone/dep_lazypath/build.zig | 37 ++++++++++++++++ test/standalone/dep_lazypath/inc/foo.h | 1 + test/standalone/dep_lazypath/inctest.zig | 8 ++++ test/tests.zig | 4 +- 12 files changed, 96 insertions(+), 47 deletions(-) create mode 100644 test/standalone/dep_lazypath/build.zig create mode 100644 test/standalone/dep_lazypath/inc/foo.h create mode 100644 test/standalone/dep_lazypath/inctest.zig diff --git a/lib/std/Build/Module.zig b/lib/std/Build/Module.zig index a5163b13af64..d40e86e46a7b 100644 --- a/lib/std/Build/Module.zig +++ b/lib/std/Build/Module.zig @@ -484,6 +484,7 @@ pub fn addCSourceFiles(m: *Module, options: AddCSourceFilesOptions) void { .flags = b.dupeStrings(options.flags), }; m.link_objects.append(allocator, .{ .c_source_files = c_source_files }) catch @panic("OOM"); + addLazyPathDependenciesOnly(m, c_source_files.root); } pub fn addCSourceFile(m: *Module, source: CSourceFile) void { diff --git a/lib/std/Build/Step/WriteFile.zig b/lib/std/Build/Step/WriteFile.zig index 0639573b8fea..013c58890a2d 100644 --- a/lib/std/Build/Step/WriteFile.zig +++ b/lib/std/Build/Step/WriteFile.zig @@ -17,8 +17,8 @@ const WriteFile = @This(); step: Step, // The elements here are pointers because we need stable pointers for the GeneratedFile field. -files: std.ArrayListUnmanaged(*File), -directories: std.ArrayListUnmanaged(*Directory), +files: std.ArrayListUnmanaged(File), +directories: std.ArrayListUnmanaged(Directory), output_source_files: std.ArrayListUnmanaged(OutputSourceFile), generated_directory: std.Build.GeneratedFile, @@ -26,20 +26,14 @@ generated_directory: std.Build.GeneratedFile, pub const base_id: Step.Id = .write_file; pub const File = struct { - generated_file: std.Build.GeneratedFile, sub_path: []const u8, contents: Contents, - - pub fn getPath(file: *File) std.Build.LazyPath { - return .{ .generated = .{ .file = &file.generated_file } }; - } }; pub const Directory = struct { source: std.Build.LazyPath, sub_path: []const u8, options: Options, - generated_dir: std.Build.GeneratedFile, pub const Options = struct { /// File paths that end in any of these suffixes will be excluded from copying. @@ -56,10 +50,6 @@ pub const Directory = struct { }; } }; - - pub fn getPath(dir: *Directory) std.Build.LazyPath { - return .{ .generated = .{ .file = &dir.generated_dir } }; - } }; pub const OutputSourceFile = struct { @@ -92,15 +82,18 @@ pub fn create(owner: *std.Build) *WriteFile { pub fn add(write_file: *WriteFile, sub_path: []const u8, bytes: []const u8) std.Build.LazyPath { const b = write_file.step.owner; const gpa = b.allocator; - const file = gpa.create(File) catch @panic("OOM"); - file.* = .{ - .generated_file = .{ .step = &write_file.step }, + const file = File{ .sub_path = b.dupePath(sub_path), .contents = .{ .bytes = b.dupe(bytes) }, }; write_file.files.append(gpa, file) catch @panic("OOM"); write_file.maybeUpdateName(); - return file.getPath(); + return .{ + .generated = .{ + .file = &write_file.generated_directory, + .sub_path = file.sub_path, + }, + }; } /// Place the file into the generated directory within the local cache, @@ -113,9 +106,7 @@ pub fn add(write_file: *WriteFile, sub_path: []const u8, bytes: []const u8) std. pub fn addCopyFile(write_file: *WriteFile, source: std.Build.LazyPath, sub_path: []const u8) std.Build.LazyPath { const b = write_file.step.owner; const gpa = b.allocator; - const file = gpa.create(File) catch @panic("OOM"); - file.* = .{ - .generated_file = .{ .step = &write_file.step }, + const file = File{ .sub_path = b.dupePath(sub_path), .contents = .{ .copy = source }, }; @@ -123,7 +114,12 @@ pub fn addCopyFile(write_file: *WriteFile, source: std.Build.LazyPath, sub_path: write_file.maybeUpdateName(); source.addStepDependencies(&write_file.step); - return file.getPath(); + return .{ + .generated = .{ + .file = &write_file.generated_directory, + .sub_path = file.sub_path, + }, + }; } /// Copy files matching the specified exclude/include patterns to the specified subdirectory @@ -137,18 +133,21 @@ pub fn addCopyDirectory( ) std.Build.LazyPath { const b = write_file.step.owner; const gpa = b.allocator; - const dir = gpa.create(Directory) catch @panic("OOM"); - dir.* = .{ + const dir = Directory{ .source = source.dupe(b), .sub_path = b.dupePath(sub_path), .options = options.dupe(b), - .generated_dir = .{ .step = &write_file.step }, }; write_file.directories.append(gpa, dir) catch @panic("OOM"); write_file.maybeUpdateName(); source.addStepDependencies(&write_file.step); - return dir.getPath(); + return .{ + .generated = .{ + .file = &write_file.generated_directory, + .sub_path = dir.sub_path, + }, + }; } /// A path relative to the package root. @@ -278,11 +277,6 @@ fn make(step: *Step, prog_node: std.Progress.Node) !void { if (try step.cacheHit(&man)) { const digest = man.final(); - for (write_file.files.items) |file| { - file.generated_file.path = try b.cache_root.join(b.allocator, &.{ - "o", &digest, file.sub_path, - }); - } write_file.generated_directory.path = try b.cache_root.join(b.allocator, &.{ "o", &digest }); return; } @@ -342,10 +336,6 @@ fn make(step: *Step, prog_node: std.Progress.Node) !void { _ = prev_status; }, } - - file.generated_file.path = try b.cache_root.join(b.allocator, &.{ - cache_path, file.sub_path, - }); } for (write_file.directories.items) |dir| { const full_src_dir_path = dir.source.getPath2(b, step); diff --git a/test/src/Cases.zig b/test/src/Cases.zig index 9296f4668f87..b8a3260ad6c6 100644 --- a/test/src/Cases.zig +++ b/test/src/Cases.zig @@ -665,10 +665,12 @@ pub fn lowerToBuildSteps( const writefiles = b.addWriteFiles(); var file_sources = std.StringHashMap(std.Build.LazyPath).init(b.allocator); defer file_sources.deinit(); - for (update.files.items) |file| { + const first_file = update.files.items[0]; + const root_source_file = writefiles.add(first_file.path, first_file.src); + file_sources.put(first_file.path, root_source_file) catch @panic("OOM"); + for (update.files.items[1..]) |file| { file_sources.put(file.path, writefiles.add(file.path, file.src)) catch @panic("OOM"); } - const root_source_file = writefiles.files.items[0].getPath(); const artifact = if (case.is_test) b.addTest(.{ .root_source_file = root_source_file, diff --git a/test/src/CompareOutput.zig b/test/src/CompareOutput.zig index 0179057987fc..58c123870526 100644 --- a/test/src/CompareOutput.zig +++ b/test/src/CompareOutput.zig @@ -81,7 +81,9 @@ pub fn addCase(self: *CompareOutput, case: TestCase) void { const b = self.b; const write_src = b.addWriteFiles(); - for (case.sources.items) |src_file| { + const first_src = case.sources.items[0]; + const first_file = write_src.add(first_src.filename, first_src.source); + for (case.sources.items[1..]) |src_file| { _ = write_src.add(src_file.filename, src_file.source); } @@ -99,7 +101,7 @@ pub fn addCase(self: *CompareOutput, case: TestCase) void { .target = b.graph.host, .optimize = .Debug, }); - exe.addAssemblyFile(write_src.files.items[0].getPath()); + exe.addAssemblyFile(first_file); const run = b.addRunArtifact(exe); run.setName(annotated_case_name); @@ -119,7 +121,7 @@ pub fn addCase(self: *CompareOutput, case: TestCase) void { const exe = b.addExecutable(.{ .name = "test", - .root_source_file = write_src.files.items[0].getPath(), + .root_source_file = first_file, .optimize = optimize, .target = b.graph.host, }); @@ -145,7 +147,7 @@ pub fn addCase(self: *CompareOutput, case: TestCase) void { const exe = b.addExecutable(.{ .name = "test", - .root_source_file = write_src.files.items[0].getPath(), + .root_source_file = first_file, .target = b.graph.host, .optimize = .Debug, }); diff --git a/test/src/RunTranslatedC.zig b/test/src/RunTranslatedC.zig index e437a33ba4de..59f87c6a8298 100644 --- a/test/src/RunTranslatedC.zig +++ b/test/src/RunTranslatedC.zig @@ -72,11 +72,13 @@ pub fn addCase(self: *RunTranslatedCContext, case: *const TestCase) void { } else if (self.test_filters.len > 0) return; const write_src = b.addWriteFiles(); - for (case.sources.items) |src_file| { + const first_case = case.sources.items[0]; + const root_source_file = write_src.add(first_case.filename, first_case.source); + for (case.sources.items[1..]) |src_file| { _ = write_src.add(src_file.filename, src_file.source); } const translate_c = b.addTranslateC(.{ - .root_source_file = write_src.files.items[0].getPath(), + .root_source_file = root_source_file, .target = b.graph.host, .optimize = .Debug, }); diff --git a/test/src/StackTrace.zig b/test/src/StackTrace.zig index 1ffcb1f880b7..09af0005de4b 100644 --- a/test/src/StackTrace.zig +++ b/test/src/StackTrace.zig @@ -51,10 +51,11 @@ fn addExpect( if (mem.indexOf(u8, annotated_case_name, test_filter)) |_| break; } else if (self.test_filters.len > 0) return; - const write_src = b.addWriteFile("source.zig", source); + const write_files = b.addWriteFiles(); + const source_zig = write_files.add("source.zig", source); const exe = b.addExecutable(.{ .name = "test", - .root_source_file = write_src.files.items[0].getPath(), + .root_source_file = source_zig, .optimize = optimize_mode, .target = b.graph.host, .error_tracing = mode_config.error_tracing, diff --git a/test/src/TranslateC.zig b/test/src/TranslateC.zig index 7a75cd64b706..c4cadcef5b27 100644 --- a/test/src/TranslateC.zig +++ b/test/src/TranslateC.zig @@ -93,12 +93,14 @@ pub fn addCase(self: *TranslateCContext, case: *const TestCase) void { } else if (self.test_filters.len > 0) return; const write_src = b.addWriteFiles(); - for (case.sources.items) |src_file| { + const first_src = case.sources.items[0]; + const root_source_file = write_src.add(first_src.filename, first_src.source); + for (case.sources.items[1..]) |src_file| { _ = write_src.add(src_file.filename, src_file.source); } const translate_c = b.addTranslateC(.{ - .root_source_file = write_src.files.items[0].getPath(), + .root_source_file = root_source_file, .target = b.resolveTargetQuery(case.target), .optimize = .Debug, }); diff --git a/test/standalone/build.zig.zon b/test/standalone/build.zig.zon index 80e9ba046c76..eb355ad25959 100644 --- a/test/standalone/build.zig.zon +++ b/test/standalone/build.zig.zon @@ -83,6 +83,9 @@ .dep_shared_builtin = .{ .path = "dep_shared_builtin", }, + .dep_lazypath = .{ + .path = "dep_lazypath", + }, .dirname = .{ .path = "dirname", }, diff --git a/test/standalone/dep_lazypath/build.zig b/test/standalone/dep_lazypath/build.zig new file mode 100644 index 000000000000..f309487981c7 --- /dev/null +++ b/test/standalone/dep_lazypath/build.zig @@ -0,0 +1,37 @@ +const std = @import("std"); + +pub fn build(b: *std.Build) void { + const test_step = b.step("test", "Test it"); + b.default_step = test_step; + + const optimize: std.builtin.OptimizeMode = .Debug; + + { + const write_files = b.addWriteFiles(); + const generated_main_c = write_files.add("main.c", ""); + const exe = b.addExecutable(.{ + .name = "test", + .target = b.graph.host, + .optimize = optimize, + }); + exe.addCSourceFiles(.{ + .root = generated_main_c.dirname(), + .files = &.{"main.c"}, + }); + b.step("csourcefiles", "").dependOn(&exe.step); + test_step.dependOn(&exe.step); + } + { + const write_files = b.addWriteFiles(); + const dir = write_files.addCopyDirectory(b.path("inc"), "", .{}); + const exe = b.addExecutable(.{ + .name = "test", + .root_source_file = b.path("inctest.zig"), + .target = b.graph.host, + .optimize = optimize, + }); + exe.addIncludePath(dir); + b.step("copydir", "").dependOn(&exe.step); + test_step.dependOn(&exe.step); + } +} diff --git a/test/standalone/dep_lazypath/inc/foo.h b/test/standalone/dep_lazypath/inc/foo.h new file mode 100644 index 000000000000..b174feb6bc0c --- /dev/null +++ b/test/standalone/dep_lazypath/inc/foo.h @@ -0,0 +1 @@ +#define foo_value 42 diff --git a/test/standalone/dep_lazypath/inctest.zig b/test/standalone/dep_lazypath/inctest.zig new file mode 100644 index 000000000000..398303663ff0 --- /dev/null +++ b/test/standalone/dep_lazypath/inctest.zig @@ -0,0 +1,8 @@ +const std = @import("std"); +const c = @cImport({ + @cInclude("foo.h"); +}); +comptime { + std.debug.assert(c.foo_value == 42); +} +pub fn main() void {} diff --git a/test/tests.zig b/test/tests.zig index bdbd70b51ddc..2202936d5965 100644 --- a/test/tests.zig +++ b/test/tests.zig @@ -783,7 +783,7 @@ pub fn addCliTests(b: *std.Build) *Step { if (builtin.os.tag == .linux and builtin.cpu.arch == .x86_64) { const tmp_path = b.makeTempPath(); - const writefile = b.addWriteFile("example.zig", + const example_zig = b.addWriteFiles().add("example.zig", \\// Type your code here, or load an example. \\export fn square(num: i32) i32 { \\ return num * num; @@ -804,7 +804,7 @@ pub fn addCliTests(b: *std.Build) *Step { "-fno-emit-bin", "-fno-emit-h", "-fstrip", "-OReleaseFast", }); - run.addFileArg(writefile.files.items[0].getPath()); + run.addFileArg(example_zig); const example_s = run.addPrefixedOutputFileArg("-femit-asm=", "example.s"); const checkfile = b.addCheckFile(example_s, .{ From 6756aaccf14e168132cabe8e0d7933f4fd51e9a9 Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Thu, 4 Jul 2024 22:01:19 +0200 Subject: [PATCH 033/152] macho: do not save rpaths globally in the driver --- src/link/MachO.zig | 32 ++++++++++---------- src/link/MachO/load_commands.zig | 50 +++++++++++++++++++------------- 2 files changed, 47 insertions(+), 35 deletions(-) diff --git a/src/link/MachO.zig b/src/link/MachO.zig index 52b57a2c58e1..76bea8176630 100644 --- a/src/link/MachO.zig +++ b/src/link/MachO.zig @@ -150,7 +150,6 @@ no_implicit_dylibs: bool = false, /// Whether the linker should parse and always force load objects containing ObjC in archives. // TODO: in Zig we currently take -ObjC as always on force_load_objc: bool = true, -rpaths: std.ArrayListUnmanaged([]const u8) = .{}, /// Hot-code swapping state. hot_state: if (is_hot_update_compatible) HotUpdateState else struct {} = .{}, @@ -359,8 +358,6 @@ pub fn deinit(self: *MachO) void { } self.thunks.deinit(gpa); self.unwind_records.deinit(gpa); - - self.rpaths.deinit(gpa); } pub fn flush(self: *MachO, arena: Allocator, prog_node: std.Progress.Node) link.File.FlushError!void { @@ -398,9 +395,6 @@ pub fn flushModule(self: *MachO, arena: Allocator, prog_node: std.Progress.Node) if (self.base.isStaticLib()) return relocatable.flushStaticLib(self, comp, module_obj_path); if (self.base.isObject()) return relocatable.flushObject(self, comp, module_obj_path); - try self.rpaths.ensureUnusedCapacity(gpa, self.base.rpath_list.len); - self.rpaths.appendSliceAssumeCapacity(self.base.rpath_list); - var positionals = std.ArrayList(Compilation.LinkObject).init(gpa); defer positionals.deinit(); @@ -419,10 +413,7 @@ pub fn flushModule(self: *MachO, arena: Allocator, prog_node: std.Progress.Node) // TSAN if (comp.config.any_sanitize_thread) { - const path = comp.tsan_dynamic_lib.?.full_object_path; - try positionals.append(.{ .path = path }); - const basename = std.fs.path.dirname(path) orelse "."; - try self.rpaths.append(gpa, basename); + try positionals.append(.{ .path = comp.tsan_dynamic_lib.?.full_object_path }); } for (positionals.items) |obj| { @@ -780,7 +771,7 @@ fn dumpArgv(self: *MachO, comp: *Compilation) !void { try argv.append(syslibroot); } - for (self.rpaths.items) |rpath| { + for (self.base.rpath_list) |rpath| { try argv.append("-rpath"); try argv.append(rpath); } @@ -840,7 +831,9 @@ fn dumpArgv(self: *MachO, comp: *Compilation) !void { } if (comp.config.any_sanitize_thread) { - try argv.append(comp.tsan_dynamic_lib.?.full_object_path); + const path = comp.tsan_dynamic_lib.?.full_object_path; + try argv.append(path); + try argv.appendSlice(&.{ "-rpath", std.fs.path.dirname(path) orelse "." }); } for (self.lib_dirs) |lib_dir| { @@ -2968,7 +2961,8 @@ pub fn writeStrtab(self: *MachO, off: u32) !u32 { } fn writeLoadCommands(self: *MachO) !struct { usize, usize, u64 } { - const gpa = self.base.comp.gpa; + const comp = self.base.comp; + const gpa = comp.gpa; const needed_size = try load_commands.calcLoadCommandsSize(self, false); const buffer = try gpa.alloc(u8, needed_size); defer gpa.free(buffer); @@ -3024,8 +3018,16 @@ fn writeLoadCommands(self: *MachO) !struct { usize, usize, u64 } { ncmds += 1; } - try load_commands.writeRpathLCs(self.rpaths.items, writer); - ncmds += self.rpaths.items.len; + for (self.base.rpath_list) |rpath| { + try load_commands.writeRpathLC(rpath, writer); + ncmds += 1; + } + if (comp.config.any_sanitize_thread) { + const path = comp.tsan_dynamic_lib.?.full_object_path; + const rpath = std.fs.path.dirname(path) orelse "."; + try load_commands.writeRpathLC(rpath, writer); + ncmds += 1; + } try writer.writeStruct(macho.source_version_command{ .version = 0 }); ncmds += 1; diff --git a/src/link/MachO/load_commands.zig b/src/link/MachO/load_commands.zig index 8719d92ea868..003612fa2f80 100644 --- a/src/link/MachO/load_commands.zig +++ b/src/link/MachO/load_commands.zig @@ -18,6 +18,9 @@ fn calcInstallNameLen(cmd_size: u64, name: []const u8, assume_max_path_len: bool } pub fn calcLoadCommandsSize(macho_file: *MachO, assume_max_path_len: bool) !u32 { + const comp = macho_file.base.comp; + const gpa = comp.gpa; + var sizeofcmds: u64 = 0; // LC_SEGMENT_64 @@ -48,7 +51,6 @@ pub fn calcLoadCommandsSize(macho_file: *MachO, assume_max_path_len: bool) !u32 } // LC_ID_DYLIB if (macho_file.base.isDynLib()) { - const gpa = macho_file.base.comp.gpa; const emit = macho_file.base.emit; const install_name = macho_file.install_name orelse try emit.directory.join(gpa, &.{emit.sub_path}); @@ -61,7 +63,17 @@ pub fn calcLoadCommandsSize(macho_file: *MachO, assume_max_path_len: bool) !u32 } // LC_RPATH { - for (macho_file.rpaths.items) |rpath| { + for (macho_file.base.rpath_list) |rpath| { + sizeofcmds += calcInstallNameLen( + @sizeOf(macho.rpath_command), + rpath, + assume_max_path_len, + ); + } + + if (comp.config.any_sanitize_thread) { + const path = comp.tsan_dynamic_lib.?.full_object_path; + const rpath = std.fs.path.dirname(path) orelse "."; sizeofcmds += calcInstallNameLen( @sizeOf(macho.rpath_command), rpath, @@ -245,24 +257,22 @@ pub fn writeDylibIdLC(macho_file: *MachO, writer: anytype) !void { }, writer); } -pub fn writeRpathLCs(rpaths: []const []const u8, writer: anytype) !void { - for (rpaths) |rpath| { - const rpath_len = rpath.len + 1; - const cmdsize = @as(u32, @intCast(mem.alignForward( - u64, - @sizeOf(macho.rpath_command) + rpath_len, - @sizeOf(u64), - ))); - try writer.writeStruct(macho.rpath_command{ - .cmdsize = cmdsize, - .path = @sizeOf(macho.rpath_command), - }); - try writer.writeAll(rpath); - try writer.writeByte(0); - const padding = cmdsize - @sizeOf(macho.rpath_command) - rpath_len; - if (padding > 0) { - try writer.writeByteNTimes(0, padding); - } +pub fn writeRpathLC(rpath: []const u8, writer: anytype) !void { + const rpath_len = rpath.len + 1; + const cmdsize = @as(u32, @intCast(mem.alignForward( + u64, + @sizeOf(macho.rpath_command) + rpath_len, + @sizeOf(u64), + ))); + try writer.writeStruct(macho.rpath_command{ + .cmdsize = cmdsize, + .path = @sizeOf(macho.rpath_command), + }); + try writer.writeAll(rpath); + try writer.writeByte(0); + const padding = cmdsize - @sizeOf(macho.rpath_command) - rpath_len; + if (padding > 0) { + try writer.writeByteNTimes(0, padding); } } From bc8cd135987c7dc7419d034ba31178331d606cfa Mon Sep 17 00:00:00 2001 From: mlugg Date: Thu, 27 Jun 2024 23:21:44 +0100 Subject: [PATCH 034/152] compiler: rename AnalSubject to AnalUnit I meant to call it this originally, I just got mixed up -- sorry! --- src/Compilation.zig | 2 +- src/InternPool.zig | 22 +++++++------- src/Sema.zig | 14 ++++----- src/Zcu.zig | 70 ++++++++++++++++++++++----------------------- 4 files changed, 54 insertions(+), 54 deletions(-) diff --git a/src/Compilation.zig b/src/Compilation.zig index 343b22d2b1d3..e0bbdd2e03c2 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -3494,7 +3494,7 @@ fn processOneJob(comp: *Compilation, job: Job, prog_node: std.Progress.Node) !vo .{@errorName(err)}, )); decl.analysis = .codegen_failure; - try module.retryable_failures.append(gpa, InternPool.AnalSubject.wrap(.{ .decl = decl_index })); + try module.retryable_failures.append(gpa, InternPool.AnalUnit.wrap(.{ .decl = decl_index })); }; }, .analyze_mod => |pkg| { diff --git a/src/InternPool.zig b/src/InternPool.zig index cf56550c258c..c6b27acaf39d 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -81,7 +81,7 @@ namespace_name_deps: std.AutoArrayHashMapUnmanaged(NamespaceNameKey, DepEntry.In /// Given a `Depender`, points to an entry in `dep_entries` whose `depender` /// matches. The `next_dependee` field can be used to iterate all such entries /// and remove them from the corresponding lists. -first_dependency: std.AutoArrayHashMapUnmanaged(AnalSubject, DepEntry.Index) = .{}, +first_dependency: std.AutoArrayHashMapUnmanaged(AnalUnit, DepEntry.Index) = .{}, /// Stores dependency information. The hashmaps declared above are used to look /// up entries in this list as required. This is not stored in `extra` so that @@ -132,36 +132,36 @@ pub fn trackZir(ip: *InternPool, gpa: Allocator, file: *Module.File, inst: Zir.I return @enumFromInt(gop.index); } -/// Analysis Subject. Represents a single entity which undergoes semantic analysis. +/// Analysis Unit. Represents a single entity which undergoes semantic analysis. /// This is either a `Decl` (in future `Cau`) or a runtime function. /// The LSB is used as a tag bit. /// This is the "source" of an incremental dependency edge. -pub const AnalSubject = packed struct(u32) { +pub const AnalUnit = packed struct(u32) { kind: enum(u1) { decl, func }, index: u31, pub const Unwrapped = union(enum) { decl: DeclIndex, func: InternPool.Index, }; - pub fn unwrap(as: AnalSubject) Unwrapped { + pub fn unwrap(as: AnalUnit) Unwrapped { return switch (as.kind) { .decl => .{ .decl = @enumFromInt(as.index) }, .func => .{ .func = @enumFromInt(as.index) }, }; } - pub fn wrap(raw: Unwrapped) AnalSubject { + pub fn wrap(raw: Unwrapped) AnalUnit { return switch (raw) { .decl => |decl| .{ .kind = .decl, .index = @intCast(@intFromEnum(decl)) }, .func => |func| .{ .kind = .func, .index = @intCast(@intFromEnum(func)) }, }; } - pub fn toOptional(as: AnalSubject) Optional { + pub fn toOptional(as: AnalUnit) Optional { return @enumFromInt(@as(u32, @bitCast(as))); } pub const Optional = enum(u32) { none = std.math.maxInt(u32), _, - pub fn unwrap(opt: Optional) ?AnalSubject { + pub fn unwrap(opt: Optional) ?AnalUnit { return switch (opt) { .none => null, _ => @bitCast(@intFromEnum(opt)), @@ -178,7 +178,7 @@ pub const Dependee = union(enum) { namespace_name: NamespaceNameKey, }; -pub fn removeDependenciesForDepender(ip: *InternPool, gpa: Allocator, depender: AnalSubject) void { +pub fn removeDependenciesForDepender(ip: *InternPool, gpa: Allocator, depender: AnalUnit) void { var opt_idx = (ip.first_dependency.fetchSwapRemove(depender) orelse return).value.toOptional(); while (opt_idx.unwrap()) |idx| { @@ -207,7 +207,7 @@ pub fn removeDependenciesForDepender(ip: *InternPool, gpa: Allocator, depender: pub const DependencyIterator = struct { ip: *const InternPool, next_entry: DepEntry.Index.Optional, - pub fn next(it: *DependencyIterator) ?AnalSubject { + pub fn next(it: *DependencyIterator) ?AnalUnit { const idx = it.next_entry.unwrap() orelse return null; const entry = it.ip.dep_entries.items[@intFromEnum(idx)]; it.next_entry = entry.next; @@ -236,7 +236,7 @@ pub fn dependencyIterator(ip: *const InternPool, dependee: Dependee) DependencyI }; } -pub fn addDependency(ip: *InternPool, gpa: Allocator, depender: AnalSubject, dependee: Dependee) Allocator.Error!void { +pub fn addDependency(ip: *InternPool, gpa: Allocator, depender: AnalUnit, dependee: Dependee) Allocator.Error!void { const first_depender_dep: DepEntry.Index.Optional = if (ip.first_dependency.get(depender)) |idx| dep: { // The entry already exists, so there is capacity to overwrite it later. break :dep idx.toOptional(); @@ -300,7 +300,7 @@ pub const DepEntry = extern struct { /// the first and only entry in one of `intern_pool.*_deps`, and does not /// appear in any list by `first_dependency`, but is not in /// `free_dep_entries` since `*_deps` stores a reference to it. - depender: AnalSubject.Optional, + depender: AnalUnit.Optional, /// Index into `dep_entries` forming a doubly linked list of all dependencies on this dependee. /// Used to iterate all dependers for a given dependee during an update. /// null if this is the end of the list. diff --git a/src/Sema.zig b/src/Sema.zig index db520f5789bf..df7ad3cd64ed 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -2735,12 +2735,12 @@ fn maybeRemoveOutdatedType(sema: *Sema, ty: InternPool.Index) !bool { if (!zcu.comp.debug_incremental) return false; const decl_index = Type.fromInterned(ty).getOwnerDecl(zcu); - const decl_as_depender = InternPool.AnalSubject.wrap(.{ .decl = decl_index }); + const decl_as_depender = InternPool.AnalUnit.wrap(.{ .decl = decl_index }); const was_outdated = zcu.outdated.swapRemove(decl_as_depender) or zcu.potentially_outdated.swapRemove(decl_as_depender); if (!was_outdated) return false; _ = zcu.outdated_ready.swapRemove(decl_as_depender); - zcu.intern_pool.removeDependenciesForDepender(zcu.gpa, InternPool.AnalSubject.wrap(.{ .decl = decl_index })); + zcu.intern_pool.removeDependenciesForDepender(zcu.gpa, InternPool.AnalUnit.wrap(.{ .decl = decl_index })); zcu.intern_pool.remove(ty); zcu.declPtr(decl_index).analysis = .dependency_failure; try zcu.markDependeeOutdated(.{ .decl_val = decl_index }); @@ -2834,7 +2834,7 @@ fn zirStructDecl( if (sema.mod.comp.debug_incremental) { try ip.addDependency( sema.gpa, - InternPool.AnalSubject.wrap(.{ .decl = new_decl_index }), + InternPool.AnalUnit.wrap(.{ .decl = new_decl_index }), .{ .src_hash = try ip.trackZir(sema.gpa, block.getFileScope(mod), inst) }, ); } @@ -3068,7 +3068,7 @@ fn zirEnumDecl( if (sema.mod.comp.debug_incremental) { try mod.intern_pool.addDependency( sema.gpa, - InternPool.AnalSubject.wrap(.{ .decl = new_decl_index }), + InternPool.AnalUnit.wrap(.{ .decl = new_decl_index }), .{ .src_hash = try mod.intern_pool.trackZir(sema.gpa, block.getFileScope(mod), inst) }, ); } @@ -3334,7 +3334,7 @@ fn zirUnionDecl( if (sema.mod.comp.debug_incremental) { try mod.intern_pool.addDependency( sema.gpa, - InternPool.AnalSubject.wrap(.{ .decl = new_decl_index }), + InternPool.AnalUnit.wrap(.{ .decl = new_decl_index }), .{ .src_hash = try mod.intern_pool.trackZir(sema.gpa, block.getFileScope(mod), inst) }, ); } @@ -3422,7 +3422,7 @@ fn zirOpaqueDecl( if (sema.mod.comp.debug_incremental) { try ip.addDependency( gpa, - InternPool.AnalSubject.wrap(.{ .decl = new_decl_index }), + InternPool.AnalUnit.wrap(.{ .decl = new_decl_index }), .{ .src_hash = try ip.trackZir(gpa, block.getFileScope(mod), inst) }, ); } @@ -38362,7 +38362,7 @@ pub fn declareDependency(sema: *Sema, dependee: InternPool.Dependee) !void { return; } - const depender = InternPool.AnalSubject.wrap( + const depender = InternPool.AnalUnit.wrap( if (sema.owner_func_index != .none) .{ .func = sema.owner_func_index } else diff --git a/src/Zcu.zig b/src/Zcu.zig index a490990cf3f9..f776879df36b 100644 --- a/src/Zcu.zig +++ b/src/Zcu.zig @@ -139,26 +139,26 @@ global_error_set: GlobalErrorSet = .{}, /// Maximum amount of distinct error values, set by --error-limit error_limit: ErrorInt, -/// Value is the number of PO or outdated Decls which this AnalSubject depends on. -potentially_outdated: std.AutoArrayHashMapUnmanaged(InternPool.AnalSubject, u32) = .{}, -/// Value is the number of PO or outdated Decls which this AnalSubject depends on. -/// Once this value drops to 0, the AnalSubject is a candidate for re-analysis. -outdated: std.AutoArrayHashMapUnmanaged(InternPool.AnalSubject, u32) = .{}, -/// This contains all `AnalSubject`s in `outdated` whose PO dependency count is 0. -/// Such `AnalSubject`s are ready for immediate re-analysis. +/// Value is the number of PO or outdated Decls which this AnalUnit depends on. +potentially_outdated: std.AutoArrayHashMapUnmanaged(InternPool.AnalUnit, u32) = .{}, +/// Value is the number of PO or outdated Decls which this AnalUnit depends on. +/// Once this value drops to 0, the AnalUnit is a candidate for re-analysis. +outdated: std.AutoArrayHashMapUnmanaged(InternPool.AnalUnit, u32) = .{}, +/// This contains all `AnalUnit`s in `outdated` whose PO dependency count is 0. +/// Such `AnalUnit`s are ready for immediate re-analysis. /// See `findOutdatedToAnalyze` for details. -outdated_ready: std.AutoArrayHashMapUnmanaged(InternPool.AnalSubject, void) = .{}, +outdated_ready: std.AutoArrayHashMapUnmanaged(InternPool.AnalUnit, void) = .{}, /// This contains a set of Decls which may not be in `outdated`, but are the /// root Decls of files which have updated source and thus must be re-analyzed. /// If such a Decl is only in this set, the struct type index may be preserved /// (only the namespace might change). If such a Decl is also `outdated`, the /// struct type index must be recreated. outdated_file_root: std.AutoArrayHashMapUnmanaged(Decl.Index, void) = .{}, -/// This contains a list of AnalSubject whose analysis or codegen failed, but the +/// This contains a list of AnalUnit whose analysis or codegen failed, but the /// failure was something like running out of disk space, and trying again may /// succeed. On the next update, we will flush this list, marking all members of /// it as outdated. -retryable_failures: std.ArrayListUnmanaged(InternPool.AnalSubject) = .{}, +retryable_failures: std.ArrayListUnmanaged(InternPool.AnalUnit) = .{}, stage1_flags: packed struct { have_winmain: bool = false, @@ -3137,9 +3137,9 @@ fn markPoDependeeUpToDate(zcu: *Zcu, dependee: InternPool.Dependee) !void { } } -/// Given a AnalSubject which is newly outdated or PO, mark all AnalSubjects which may -/// in turn be PO, due to a dependency on the original AnalSubject's tyval or IES. -fn markTransitiveDependersPotentiallyOutdated(zcu: *Zcu, maybe_outdated: InternPool.AnalSubject) !void { +/// Given a AnalUnit which is newly outdated or PO, mark all AnalUnits which may +/// in turn be PO, due to a dependency on the original AnalUnit's tyval or IES. +fn markTransitiveDependersPotentiallyOutdated(zcu: *Zcu, maybe_outdated: InternPool.AnalUnit) !void { var it = zcu.intern_pool.dependencyIterator(switch (maybe_outdated.unwrap()) { .decl => |decl_index| .{ .decl_val = decl_index }, // TODO: also `decl_ref` deps when introduced .func => |func_index| .{ .func_ies = func_index }, @@ -3161,12 +3161,12 @@ fn markTransitiveDependersPotentiallyOutdated(zcu: *Zcu, maybe_outdated: InternP continue; } try zcu.potentially_outdated.putNoClobber(zcu.gpa, po, 1); - // This AnalSubject was not already PO, so we must recursively mark its dependers as also PO. + // This AnalUnit was not already PO, so we must recursively mark its dependers as also PO. try zcu.markTransitiveDependersPotentiallyOutdated(po); } } -pub fn findOutdatedToAnalyze(zcu: *Zcu) Allocator.Error!?InternPool.AnalSubject { +pub fn findOutdatedToAnalyze(zcu: *Zcu) Allocator.Error!?InternPool.AnalUnit { if (!zcu.comp.debug_incremental) return null; if (zcu.outdated.count() == 0 and zcu.potentially_outdated.count() == 0) { @@ -3174,8 +3174,8 @@ pub fn findOutdatedToAnalyze(zcu: *Zcu) Allocator.Error!?InternPool.AnalSubject return null; } - // Our goal is to find an outdated AnalSubject which itself has no outdated or - // PO dependencies. Most of the time, such an AnalSubject will exist - we track + // Our goal is to find an outdated AnalUnit which itself has no outdated or + // PO dependencies. Most of the time, such an AnalUnit will exist - we track // them in the `outdated_ready` set for efficiency. However, this is not // necessarily the case, since the Decl dependency graph may contain loops // via mutually recursive definitions: @@ -3197,7 +3197,7 @@ pub fn findOutdatedToAnalyze(zcu: *Zcu) Allocator.Error!?InternPool.AnalSubject // `outdated`. This set will be small (number of files changed in this // update), so it's alright for us to just iterate here. for (zcu.outdated_file_root.keys()) |file_decl| { - const decl_depender = InternPool.AnalSubject.wrap(.{ .decl = file_decl }); + const decl_depender = InternPool.AnalUnit.wrap(.{ .decl = file_decl }); if (zcu.outdated.contains(decl_depender)) { // Since we didn't hit this in the first loop, this Decl must have // pending dependencies, so is ineligible. @@ -3213,7 +3213,7 @@ pub fn findOutdatedToAnalyze(zcu: *Zcu) Allocator.Error!?InternPool.AnalSubject return decl_depender; } - // There is no single AnalSubject which is ready for re-analysis. Instead, we + // There is no single AnalUnit which is ready for re-analysis. Instead, we // must assume that some Decl with PO dependencies is outdated - e.g. in the // above example we arbitrarily pick one of A or B. We should select a Decl, // since a Decl is definitely responsible for the loop in the dependency @@ -3221,7 +3221,7 @@ pub fn findOutdatedToAnalyze(zcu: *Zcu) Allocator.Error!?InternPool.AnalSubject // The choice of this Decl could have a big impact on how much total // analysis we perform, since if analysis concludes its tyval is unchanged, - // then other PO AnalSubject may be resolved as up-to-date. To hopefully avoid + // then other PO AnalUnit may be resolved as up-to-date. To hopefully avoid // doing too much work, let's find a Decl which the most things depend on - // the idea is that this will resolve a lot of loops (but this is only a // heuristic). @@ -3271,7 +3271,7 @@ pub fn findOutdatedToAnalyze(zcu: *Zcu) Allocator.Error!?InternPool.AnalSubject chosen_decl_dependers, }); - return InternPool.AnalSubject.wrap(.{ .decl = chosen_decl_idx.? }); + return InternPool.AnalUnit.wrap(.{ .decl = chosen_decl_idx.? }); } /// During an incremental update, before semantic analysis, call this to flush all values from @@ -3281,12 +3281,12 @@ pub fn flushRetryableFailures(zcu: *Zcu) !void { for (zcu.retryable_failures.items) |depender| { if (zcu.outdated.contains(depender)) continue; if (zcu.potentially_outdated.fetchSwapRemove(depender)) |kv| { - // This AnalSubject was already PO, but we now consider it outdated. + // This AnalUnit was already PO, but we now consider it outdated. // Any transitive dependencies are already marked PO. try zcu.outdated.put(gpa, depender, kv.value); continue; } - // This AnalSubject was not marked PO, but is now outdated. Mark it as + // This AnalUnit was not marked PO, but is now outdated. Mark it as // such, then recursively mark transitive dependencies as PO. try zcu.outdated.put(gpa, depender, 0); try zcu.markTransitiveDependersPotentiallyOutdated(depender); @@ -3456,7 +3456,7 @@ pub fn ensureDeclAnalyzed(mod: *Module, decl_index: Decl.Index) SemaError!void { // which tries to limit re-analysis to Decls whose previously listed // dependencies are all up-to-date. - const decl_as_depender = InternPool.AnalSubject.wrap(.{ .decl = decl_index }); + const decl_as_depender = InternPool.AnalUnit.wrap(.{ .decl = decl_index }); const decl_was_outdated = mod.outdated.swapRemove(decl_as_depender) or mod.potentially_outdated.swapRemove(decl_as_depender); @@ -3522,7 +3522,7 @@ pub fn ensureDeclAnalyzed(mod: *Module, decl_index: Decl.Index) SemaError!void { else => |e| { decl.analysis = .sema_failure; try mod.failed_decls.ensureUnusedCapacity(mod.gpa, 1); - try mod.retryable_failures.append(mod.gpa, InternPool.AnalSubject.wrap(.{ .decl = decl_index })); + try mod.retryable_failures.append(mod.gpa, InternPool.AnalUnit.wrap(.{ .decl = decl_index })); mod.failed_decls.putAssumeCapacityNoClobber(decl_index, try ErrorMsg.create( mod.gpa, decl.navSrcLoc(mod).upgrade(mod), @@ -3581,7 +3581,7 @@ pub fn ensureFuncBodyAnalyzed(zcu: *Zcu, maybe_coerced_func_index: InternPool.In // that's the case, we should remove this function from the binary. if (decl.val.ip_index != func_index) { try zcu.markDependeeOutdated(.{ .func_ies = func_index }); - ip.removeDependenciesForDepender(gpa, InternPool.AnalSubject.wrap(.{ .func = func_index })); + ip.removeDependenciesForDepender(gpa, InternPool.AnalUnit.wrap(.{ .func = func_index })); ip.remove(func_index); @panic("TODO: remove orphaned function from binary"); } @@ -3607,7 +3607,7 @@ pub fn ensureFuncBodyAnalyzed(zcu: *Zcu, maybe_coerced_func_index: InternPool.In .complete => {}, } - const func_as_depender = InternPool.AnalSubject.wrap(.{ .func = func_index }); + const func_as_depender = InternPool.AnalUnit.wrap(.{ .func = func_index }); const was_outdated = zcu.outdated.swapRemove(func_as_depender) or zcu.potentially_outdated.swapRemove(func_as_depender); @@ -3728,7 +3728,7 @@ pub fn ensureFuncBodyAnalyzed(zcu: *Zcu, maybe_coerced_func_index: InternPool.In .{@errorName(err)}, )); func.analysis(ip).state = .codegen_failure; - try zcu.retryable_failures.append(zcu.gpa, InternPool.AnalSubject.wrap(.{ .func = func_index })); + try zcu.retryable_failures.append(zcu.gpa, InternPool.AnalUnit.wrap(.{ .func = func_index })); }, }; } else if (zcu.llvm_object) |llvm_object| { @@ -3773,7 +3773,7 @@ pub fn ensureFuncBodyAnalysisQueued(mod: *Module, func_index: InternPool.Index) assert(decl.has_tv); - const func_as_depender = InternPool.AnalSubject.wrap(.{ .func = func_index }); + const func_as_depender = InternPool.AnalUnit.wrap(.{ .func = func_index }); const is_outdated = mod.outdated.contains(func_as_depender) or mod.potentially_outdated.contains(func_as_depender); @@ -3857,7 +3857,7 @@ fn getFileRootStruct(zcu: *Zcu, decl_index: Decl.Index, namespace_index: Namespa if (zcu.comp.debug_incremental) { try ip.addDependency( gpa, - InternPool.AnalSubject.wrap(.{ .decl = decl_index }), + InternPool.AnalUnit.wrap(.{ .decl = decl_index }), .{ .src_hash = tracked_inst }, ); } @@ -3906,7 +3906,7 @@ fn semaFileUpdate(zcu: *Zcu, file: *File, type_outdated: bool) SemaError!bool { if (type_outdated) { // Invalidate the existing type, reusing the decl and namespace. - zcu.intern_pool.removeDependenciesForDepender(zcu.gpa, InternPool.AnalSubject.wrap(.{ .decl = file.root_decl.unwrap().? })); + zcu.intern_pool.removeDependenciesForDepender(zcu.gpa, InternPool.AnalUnit.wrap(.{ .decl = file.root_decl.unwrap().? })); zcu.intern_pool.remove(decl.val.toIntern()); decl.val = undefined; _ = try zcu.getFileRootStruct(file.root_decl.unwrap().?, decl.src_namespace, file); @@ -4097,7 +4097,7 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !SemaDeclResult { break :ip_index .none; }; - mod.intern_pool.removeDependenciesForDepender(gpa, InternPool.AnalSubject.wrap(.{ .decl = decl_index })); + mod.intern_pool.removeDependenciesForDepender(gpa, InternPool.AnalUnit.wrap(.{ .decl = decl_index })); decl.analysis = .in_progress; @@ -4323,7 +4323,7 @@ fn semaAnonOwnerDecl(zcu: *Zcu, decl_index: Decl.Index) !SemaDeclResult { // with a new Decl. // // Yes, this does mean that any type owner Decl has a constant value for its entire lifetime. - zcu.intern_pool.removeDependenciesForDepender(zcu.gpa, InternPool.AnalSubject.wrap(.{ .decl = decl_index })); + zcu.intern_pool.removeDependenciesForDepender(zcu.gpa, InternPool.AnalUnit.wrap(.{ .decl = decl_index })); zcu.intern_pool.remove(decl.val.toIntern()); decl.analysis = .dependency_failure; return .{ @@ -5026,7 +5026,7 @@ pub fn analyzeFnBody(mod: *Module, func_index: InternPool.Index, arena: Allocato const decl_prog_node = mod.sema_prog_node.start((try decl.fullyQualifiedName(mod)).toSlice(ip), 0); defer decl_prog_node.end(); - mod.intern_pool.removeDependenciesForDepender(gpa, InternPool.AnalSubject.wrap(.{ .func = func_index })); + mod.intern_pool.removeDependenciesForDepender(gpa, InternPool.AnalUnit.wrap(.{ .func = func_index })); var comptime_err_ret_trace = std.ArrayList(LazySrcLoc).init(gpa); defer comptime_err_ret_trace.deinit(); @@ -5627,7 +5627,7 @@ pub fn linkerUpdateDecl(zcu: *Zcu, decl_index: Decl.Index) !void { .{@errorName(err)}, )); decl.analysis = .codegen_failure; - try zcu.retryable_failures.append(zcu.gpa, InternPool.AnalSubject.wrap(.{ .decl = decl_index })); + try zcu.retryable_failures.append(zcu.gpa, InternPool.AnalUnit.wrap(.{ .decl = decl_index })); }, }; } else if (zcu.llvm_object) |llvm_object| { From 7e552dc1e9a8388f71cc32083deb9dd848e79808 Mon Sep 17 00:00:00 2001 From: mlugg Date: Sat, 29 Jun 2024 01:36:25 +0100 Subject: [PATCH 035/152] Zcu: rework exports This commit reworks our representation of exported Decls and values in Zcu to be memory-optimized and trivially serialized. All exports are now stored in the `all_exports` array on `Zcu`. An `AnalUnit` which performs an export (either through an `export` annotation or by containing an analyzed `@export`) gains an entry into `single_exports` if it performs only one export, or `multi_exports` if it performs multiple. We no longer store a persistent mapping from a `Decl`/value to all exports of that entity; this state is not necessary for the majority of the pipeline. Instead, we construct it in `Zcu.processExports`, just before flush. This does not affect the algorithmic complexity of `processExports`, since this function already iterates all exports in the `Zcu`. The elimination of `decl_exports` and `value_exports` led to a few non-trivial backend changes. The LLVM backend has been wrangled into a more reasonable state in general regarding exports and externs. The C backend is currently disabled in this commit, because its support for `export` was quite broken, and that was exposed by this work -- I'm hoping @jacobly0 will be able to pick this up! --- src/Sema.zig | 114 ++++++++----- src/Zcu.zig | 303 ++++++++++++++++++----------------- src/codegen/c.zig | 2 + src/codegen/llvm.zig | 263 +++++++++++++----------------- src/link.zig | 12 +- src/link/C.zig | 29 +++- src/link/Coff.zig | 35 ++-- src/link/Elf.zig | 12 +- src/link/Elf/ZigObject.zig | 30 ++-- src/link/MachO.zig | 14 +- src/link/MachO/ZigObject.zig | 30 ++-- src/link/NvPtx.zig | 4 +- src/link/Plan9.zig | 66 +++++--- src/link/SpirV.zig | 5 +- src/link/Wasm.zig | 12 +- src/link/Wasm/ZigObject.zig | 17 +- 16 files changed, 498 insertions(+), 450 deletions(-) diff --git a/src/Sema.zig b/src/Sema.zig index df7ad3cd64ed..fafde99f47bc 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -117,6 +117,10 @@ maybe_comptime_allocs: std.AutoHashMapUnmanaged(Air.Inst.Index, MaybeComptimeAll /// Backed by gpa. comptime_allocs: std.ArrayListUnmanaged(ComptimeAlloc) = .{}, +/// A list of exports performed by this analysis. After this `Sema` terminates, +/// these are flushed to `Zcu.single_exports` or `Zcu.multi_exports`. +exports: std.ArrayListUnmanaged(Zcu.Export) = .{}, + const MaybeComptimeAlloc = struct { /// The runtime index of the `alloc` instruction. runtime_index: Value.RuntimeIndex, @@ -186,6 +190,7 @@ const build_options = @import("build_options"); const Compilation = @import("Compilation.zig"); const InternPool = @import("InternPool.zig"); const Alignment = InternPool.Alignment; +const AnalUnit = InternPool.AnalUnit; const ComptimeAllocIndex = InternPool.ComptimeAllocIndex; pub const default_branch_quota = 1000; @@ -875,6 +880,7 @@ pub fn deinit(sema: *Sema) void { sema.base_allocs.deinit(gpa); sema.maybe_comptime_allocs.deinit(gpa); sema.comptime_allocs.deinit(gpa); + sema.exports.deinit(gpa); sema.* = undefined; } @@ -2735,12 +2741,12 @@ fn maybeRemoveOutdatedType(sema: *Sema, ty: InternPool.Index) !bool { if (!zcu.comp.debug_incremental) return false; const decl_index = Type.fromInterned(ty).getOwnerDecl(zcu); - const decl_as_depender = InternPool.AnalUnit.wrap(.{ .decl = decl_index }); + const decl_as_depender = AnalUnit.wrap(.{ .decl = decl_index }); const was_outdated = zcu.outdated.swapRemove(decl_as_depender) or zcu.potentially_outdated.swapRemove(decl_as_depender); if (!was_outdated) return false; _ = zcu.outdated_ready.swapRemove(decl_as_depender); - zcu.intern_pool.removeDependenciesForDepender(zcu.gpa, InternPool.AnalUnit.wrap(.{ .decl = decl_index })); + zcu.intern_pool.removeDependenciesForDepender(zcu.gpa, AnalUnit.wrap(.{ .decl = decl_index })); zcu.intern_pool.remove(ty); zcu.declPtr(decl_index).analysis = .dependency_failure; try zcu.markDependeeOutdated(.{ .decl_val = decl_index }); @@ -2834,7 +2840,7 @@ fn zirStructDecl( if (sema.mod.comp.debug_incremental) { try ip.addDependency( sema.gpa, - InternPool.AnalUnit.wrap(.{ .decl = new_decl_index }), + AnalUnit.wrap(.{ .decl = new_decl_index }), .{ .src_hash = try ip.trackZir(sema.gpa, block.getFileScope(mod), inst) }, ); } @@ -3068,7 +3074,7 @@ fn zirEnumDecl( if (sema.mod.comp.debug_incremental) { try mod.intern_pool.addDependency( sema.gpa, - InternPool.AnalUnit.wrap(.{ .decl = new_decl_index }), + AnalUnit.wrap(.{ .decl = new_decl_index }), .{ .src_hash = try mod.intern_pool.trackZir(sema.gpa, block.getFileScope(mod), inst) }, ); } @@ -3334,7 +3340,7 @@ fn zirUnionDecl( if (sema.mod.comp.debug_incremental) { try mod.intern_pool.addDependency( sema.gpa, - InternPool.AnalUnit.wrap(.{ .decl = new_decl_index }), + AnalUnit.wrap(.{ .decl = new_decl_index }), .{ .src_hash = try mod.intern_pool.trackZir(sema.gpa, block.getFileScope(mod), inst) }, ); } @@ -3422,7 +3428,7 @@ fn zirOpaqueDecl( if (sema.mod.comp.debug_incremental) { try ip.addDependency( gpa, - InternPool.AnalUnit.wrap(.{ .decl = new_decl_index }), + AnalUnit.wrap(.{ .decl = new_decl_index }), .{ .src_hash = try ip.trackZir(gpa, block.getFileScope(mod), inst) }, ); } @@ -6423,10 +6429,9 @@ fn zirExportValue(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError return sema.analyzeExport(block, src, options, decl_index); } - try addExport(mod, .{ + try sema.exports.append(mod.gpa, .{ .opts = options, .src = src, - .owner_decl = sema.owner_decl_index, .exported = .{ .value = operand.toIntern() }, .status = .in_progress, }); @@ -6469,46 +6474,14 @@ pub fn analyzeExport( try sema.maybeQueueFuncBodyAnalysis(exported_decl_index); - try addExport(mod, .{ + try sema.exports.append(gpa, .{ .opts = options, .src = src, - .owner_decl = sema.owner_decl_index, .exported = .{ .decl_index = exported_decl_index }, .status = .in_progress, }); } -fn addExport(mod: *Module, export_init: Module.Export) error{OutOfMemory}!void { - const gpa = mod.gpa; - - try mod.decl_exports.ensureUnusedCapacity(gpa, 1); - try mod.value_exports.ensureUnusedCapacity(gpa, 1); - try mod.export_owners.ensureUnusedCapacity(gpa, 1); - - const new_export = try gpa.create(Module.Export); - errdefer gpa.destroy(new_export); - - new_export.* = export_init; - - const eo_gop = mod.export_owners.getOrPutAssumeCapacity(export_init.owner_decl); - if (!eo_gop.found_existing) eo_gop.value_ptr.* = .{}; - try eo_gop.value_ptr.append(gpa, new_export); - errdefer _ = eo_gop.value_ptr.pop(); - - switch (export_init.exported) { - .decl_index => |decl_index| { - const de_gop = mod.decl_exports.getOrPutAssumeCapacity(decl_index); - if (!de_gop.found_existing) de_gop.value_ptr.* = .{}; - try de_gop.value_ptr.append(gpa, new_export); - }, - .value => |value| { - const ve_gop = mod.value_exports.getOrPutAssumeCapacity(value); - if (!ve_gop.found_existing) ve_gop.value_ptr.* = .{}; - try ve_gop.value_ptr.append(gpa, new_export); - }, - } -} - fn zirSetAlignStack(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!void { const mod = sema.mod; const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data; @@ -8411,6 +8384,9 @@ fn instantiateGenericCall( }); sema.appendRefsAssumeCapacity(runtime_args.items); + // `child_sema` is owned by us, so just take its exports. + try sema.exports.appendSlice(sema.gpa, child_sema.exports.items); + if (ensure_result_used) { try sema.ensureResultUsed(block, sema.typeOf(result), call_src); } @@ -35263,6 +35239,8 @@ fn semaBackingIntType(mod: *Module, struct_type: InternPool.LoadedStructType) Co const backing_int_ty = try mod.intType(.unsigned, @intCast(fields_bit_sum)); struct_type.backingIntType(ip).* = backing_int_ty.toIntern(); } + + try sema.flushExports(); } fn checkBackingIntType(sema: *Sema, block: *Block, src: LazySrcLoc, backing_int_ty: Type, fields_bit_sum: u64) CompileError!void { @@ -36225,6 +36203,8 @@ fn semaStructFields( struct_type.clearTypesWip(ip); if (!any_inits) struct_type.setHaveFieldInits(ip); + + try sema.flushExports(); } // This logic must be kept in sync with `semaStructFields` @@ -36365,6 +36345,8 @@ fn semaStructFieldInits( struct_type.field_inits.get(ip)[field_i] = default_val.toIntern(); } } + + try sema.flushExports(); } fn semaUnionFields(mod: *Module, arena: Allocator, union_type: InternPool.LoadedUnionType) CompileError!void { @@ -36738,6 +36720,8 @@ fn semaUnionFields(mod: *Module, arena: Allocator, union_type: InternPool.Loaded const enum_ty = try sema.generateUnionTagTypeSimple(&block_scope, enum_field_names, mod.declPtr(union_type.decl)); union_type.tagTypePtr(ip).* = enum_ty; } + + try sema.flushExports(); } fn semaUnionFieldVal(sema: *Sema, block: *Block, src: LazySrcLoc, int_tag_ty: Type, tag_ref: Air.Inst.Ref) CompileError!Value { @@ -38362,7 +38346,7 @@ pub fn declareDependency(sema: *Sema, dependee: InternPool.Dependee) !void { return; } - const depender = InternPool.AnalUnit.wrap( + const depender = AnalUnit.wrap( if (sema.owner_func_index != .none) .{ .func = sema.owner_func_index } else @@ -38494,6 +38478,52 @@ fn analyzeUnreachable(sema: *Sema, block: *Block, src: LazySrcLoc, safety_check: } } +/// This should be called exactly once, at the end of a `Sema`'s lifetime. +/// It takes the exports stored in `sema.export` and flushes them to the `Zcu` +/// to be processed by the linker after the update. +pub fn flushExports(sema: *Sema) !void { + if (sema.exports.items.len == 0) return; + + const zcu = sema.mod; + const gpa = zcu.gpa; + + const unit: AnalUnit = if (sema.owner_func_index != .none) + AnalUnit.wrap(.{ .func = sema.owner_func_index }) + else + AnalUnit.wrap(.{ .decl = sema.owner_decl_index }); + + // There may be existing exports. For instance, a struct may export + // things during both field type resolution and field default resolution. + // + // So, pick up and delete any existing exports. This strategy performs + // redundant work, but that's okay, because this case is exceedingly rare. + if (zcu.single_exports.get(unit)) |export_idx| { + try sema.exports.append(gpa, zcu.all_exports.items[export_idx]); + } else if (zcu.multi_exports.get(unit)) |info| { + try sema.exports.appendSlice(gpa, zcu.all_exports.items[info.index..][0..info.len]); + } + zcu.deleteUnitExports(unit); + + // `sema.exports` is completed; store the data into the `Zcu`. + if (sema.exports.items.len == 1) { + try zcu.single_exports.ensureUnusedCapacity(gpa, 1); + const export_idx = zcu.free_exports.popOrNull() orelse idx: { + _ = try zcu.all_exports.addOne(gpa); + break :idx zcu.all_exports.items.len - 1; + }; + zcu.all_exports.items[export_idx] = sema.exports.items[0]; + zcu.single_exports.putAssumeCapacityNoClobber(unit, @intCast(export_idx)); + } else { + try zcu.multi_exports.ensureUnusedCapacity(gpa, 1); + const exports_base = zcu.all_exports.items.len; + try zcu.all_exports.appendSlice(gpa, sema.exports.items); + zcu.multi_exports.putAssumeCapacityNoClobber(unit, .{ + .index = @intCast(exports_base), + .len = @intCast(sema.exports.items.len), + }); + } +} + pub const bitCastVal = @import("Sema/bitcast.zig").bitCast; pub const bitCastSpliceVal = @import("Sema/bitcast.zig").bitCastSplice; diff --git a/src/Zcu.zig b/src/Zcu.zig index f776879df36b..3a329f0b03f8 100644 --- a/src/Zcu.zig +++ b/src/Zcu.zig @@ -35,6 +35,7 @@ const isUpDir = @import("introspect.zig").isUpDir; const clang = @import("clang.zig"); const InternPool = @import("InternPool.zig"); const Alignment = InternPool.Alignment; +const AnalUnit = InternPool.AnalUnit; const BuiltinFn = std.zig.BuiltinFn; const LlvmObject = @import("codegen/llvm.zig").Object; @@ -71,18 +72,22 @@ codegen_prog_node: std.Progress.Node = undefined, global_zir_cache: Compilation.Directory, /// Used by AstGen worker to load and store ZIR cache. local_zir_cache: Compilation.Directory, -/// It's rare for a decl to be exported, so we save memory by having a sparse -/// map of Decl indexes to details about them being exported. -/// The Export memory is owned by the `export_owners` table; the slice itself -/// is owned by this table. The slice is guaranteed to not be empty. -decl_exports: std.AutoArrayHashMapUnmanaged(Decl.Index, ArrayListUnmanaged(*Export)) = .{}, -/// Same as `decl_exports` but for exported constant values. -value_exports: std.AutoArrayHashMapUnmanaged(InternPool.Index, ArrayListUnmanaged(*Export)) = .{}, -/// This models the Decls that perform exports, so that `decl_exports` can be updated when a Decl -/// is modified. Note that the key of this table is not the Decl being exported, but the Decl that -/// is performing the export of another Decl. -/// This table owns the Export memory. -export_owners: std.AutoArrayHashMapUnmanaged(Decl.Index, ArrayListUnmanaged(*Export)) = .{}, +/// This is where all `Export` values are stored. Not all values here are necessarily valid exports; +/// to enumerate all exports, `single_exports` and `multi_exports` must be consulted. +all_exports: ArrayListUnmanaged(Export) = .{}, +/// This is a list of free indices in `all_exports`. These indices may be reused by exports from +/// future semantic analysis. +free_exports: ArrayListUnmanaged(u32) = .{}, +/// Maps from an `AnalUnit` which performs a single export, to the index into `all_exports` of +/// the export it performs. Note that the key is not the `Decl` being exported, but the `AnalUnit` +/// whose analysis triggered the export. +single_exports: std.AutoArrayHashMapUnmanaged(AnalUnit, u32) = .{}, +/// Like `single_exports`, but for `AnalUnit`s which perform multiple exports. +/// The exports are `all_exports.items[index..][0..len]`. +multi_exports: std.AutoArrayHashMapUnmanaged(AnalUnit, extern struct { + index: u32, + len: u32, +}) = .{}, /// The set of all the Zig source files in the Module. We keep track of this in order /// to iterate over it and check which source files have been modified on the file system when /// an update is requested, as well as to cache `@import` results. @@ -126,9 +131,8 @@ compile_log_decls: std.AutoArrayHashMapUnmanaged(Decl.Index, extern struct { failed_files: std.AutoArrayHashMapUnmanaged(*File, ?*ErrorMsg) = .{}, /// The ErrorMsg memory is owned by the `EmbedFile`, using Module's general purpose allocator. failed_embed_files: std.AutoArrayHashMapUnmanaged(*EmbedFile, *ErrorMsg) = .{}, -/// Using a map here for consistency with the other fields here. -/// The ErrorMsg memory is owned by the `Export`, using Module's general purpose allocator. -failed_exports: std.AutoArrayHashMapUnmanaged(*Export, *ErrorMsg) = .{}, +/// Key is index into `all_exports`. +failed_exports: std.AutoArrayHashMapUnmanaged(u32, *ErrorMsg) = .{}, /// If a decl failed due to a cimport error, the corresponding Clang errors /// are stored here. cimport_errors: std.AutoArrayHashMapUnmanaged(Decl.Index, std.zig.ErrorBundle) = .{}, @@ -140,14 +144,14 @@ global_error_set: GlobalErrorSet = .{}, error_limit: ErrorInt, /// Value is the number of PO or outdated Decls which this AnalUnit depends on. -potentially_outdated: std.AutoArrayHashMapUnmanaged(InternPool.AnalUnit, u32) = .{}, +potentially_outdated: std.AutoArrayHashMapUnmanaged(AnalUnit, u32) = .{}, /// Value is the number of PO or outdated Decls which this AnalUnit depends on. /// Once this value drops to 0, the AnalUnit is a candidate for re-analysis. -outdated: std.AutoArrayHashMapUnmanaged(InternPool.AnalUnit, u32) = .{}, +outdated: std.AutoArrayHashMapUnmanaged(AnalUnit, u32) = .{}, /// This contains all `AnalUnit`s in `outdated` whose PO dependency count is 0. /// Such `AnalUnit`s are ready for immediate re-analysis. /// See `findOutdatedToAnalyze` for details. -outdated_ready: std.AutoArrayHashMapUnmanaged(InternPool.AnalUnit, void) = .{}, +outdated_ready: std.AutoArrayHashMapUnmanaged(AnalUnit, void) = .{}, /// This contains a set of Decls which may not be in `outdated`, but are the /// root Decls of files which have updated source and thus must be re-analyzed. /// If such a Decl is only in this set, the struct type index may be preserved @@ -158,7 +162,7 @@ outdated_file_root: std.AutoArrayHashMapUnmanaged(Decl.Index, void) = .{}, /// failure was something like running out of disk space, and trying again may /// succeed. On the next update, we will flush this list, marking all members of /// it as outdated. -retryable_failures: std.ArrayListUnmanaged(InternPool.AnalUnit) = .{}, +retryable_failures: std.ArrayListUnmanaged(AnalUnit) = .{}, stage1_flags: packed struct { have_winmain: bool = false, @@ -267,8 +271,6 @@ pub const Exported = union(enum) { pub const Export = struct { opts: Options, src: LazySrcLoc, - /// The Decl that performs the export. Note that this is *not* the Decl being exported. - owner_decl: Decl.Index, exported: Exported, status: enum { in_progress, @@ -2507,20 +2509,10 @@ pub fn deinit(zcu: *Zcu) void { zcu.compile_log_decls.deinit(gpa); - for (zcu.decl_exports.values()) |*export_list| { - export_list.deinit(gpa); - } - zcu.decl_exports.deinit(gpa); - - for (zcu.value_exports.values()) |*export_list| { - export_list.deinit(gpa); - } - zcu.value_exports.deinit(gpa); - - for (zcu.export_owners.values()) |*value| { - freeExportList(gpa, value); - } - zcu.export_owners.deinit(gpa); + zcu.all_exports.deinit(gpa); + zcu.free_exports.deinit(gpa); + zcu.single_exports.deinit(gpa); + zcu.multi_exports.deinit(gpa); zcu.global_error_set.deinit(gpa); @@ -2590,11 +2582,6 @@ pub fn declIsRoot(mod: *Module, decl_index: Decl.Index) bool { return decl_index == namespace.decl_index; } -fn freeExportList(gpa: Allocator, export_list: *ArrayListUnmanaged(*Export)) void { - for (export_list.items) |exp| gpa.destroy(exp); - export_list.deinit(gpa); -} - // TODO https://github.com/ziglang/zig/issues/8643 const data_has_safety_tag = @sizeOf(Zir.Inst.Data) != 8; const HackDataLayout = extern struct { @@ -3139,7 +3126,7 @@ fn markPoDependeeUpToDate(zcu: *Zcu, dependee: InternPool.Dependee) !void { /// Given a AnalUnit which is newly outdated or PO, mark all AnalUnits which may /// in turn be PO, due to a dependency on the original AnalUnit's tyval or IES. -fn markTransitiveDependersPotentiallyOutdated(zcu: *Zcu, maybe_outdated: InternPool.AnalUnit) !void { +fn markTransitiveDependersPotentiallyOutdated(zcu: *Zcu, maybe_outdated: AnalUnit) !void { var it = zcu.intern_pool.dependencyIterator(switch (maybe_outdated.unwrap()) { .decl => |decl_index| .{ .decl_val = decl_index }, // TODO: also `decl_ref` deps when introduced .func => |func_index| .{ .func_ies = func_index }, @@ -3166,7 +3153,7 @@ fn markTransitiveDependersPotentiallyOutdated(zcu: *Zcu, maybe_outdated: InternP } } -pub fn findOutdatedToAnalyze(zcu: *Zcu) Allocator.Error!?InternPool.AnalUnit { +pub fn findOutdatedToAnalyze(zcu: *Zcu) Allocator.Error!?AnalUnit { if (!zcu.comp.debug_incremental) return null; if (zcu.outdated.count() == 0 and zcu.potentially_outdated.count() == 0) { @@ -3197,7 +3184,7 @@ pub fn findOutdatedToAnalyze(zcu: *Zcu) Allocator.Error!?InternPool.AnalUnit { // `outdated`. This set will be small (number of files changed in this // update), so it's alright for us to just iterate here. for (zcu.outdated_file_root.keys()) |file_decl| { - const decl_depender = InternPool.AnalUnit.wrap(.{ .decl = file_decl }); + const decl_depender = AnalUnit.wrap(.{ .decl = file_decl }); if (zcu.outdated.contains(decl_depender)) { // Since we didn't hit this in the first loop, this Decl must have // pending dependencies, so is ineligible. @@ -3271,7 +3258,7 @@ pub fn findOutdatedToAnalyze(zcu: *Zcu) Allocator.Error!?InternPool.AnalUnit { chosen_decl_dependers, }); - return InternPool.AnalUnit.wrap(.{ .decl = chosen_decl_idx.? }); + return AnalUnit.wrap(.{ .decl = chosen_decl_idx.? }); } /// During an incremental update, before semantic analysis, call this to flush all values from @@ -3456,7 +3443,7 @@ pub fn ensureDeclAnalyzed(mod: *Module, decl_index: Decl.Index) SemaError!void { // which tries to limit re-analysis to Decls whose previously listed // dependencies are all up-to-date. - const decl_as_depender = InternPool.AnalUnit.wrap(.{ .decl = decl_index }); + const decl_as_depender = AnalUnit.wrap(.{ .decl = decl_index }); const decl_was_outdated = mod.outdated.swapRemove(decl_as_depender) or mod.potentially_outdated.swapRemove(decl_as_depender); @@ -3485,7 +3472,7 @@ pub fn ensureDeclAnalyzed(mod: *Module, decl_index: Decl.Index) SemaError!void { // The exports this Decl performs will be re-discovered, so we remove them here // prior to re-analysis. if (build_options.only_c) unreachable; - try mod.deleteDeclExports(decl_index); + mod.deleteUnitExports(AnalUnit.wrap(.{ .decl = decl_index })); } const sema_result: SemaDeclResult = blk: { @@ -3522,7 +3509,7 @@ pub fn ensureDeclAnalyzed(mod: *Module, decl_index: Decl.Index) SemaError!void { else => |e| { decl.analysis = .sema_failure; try mod.failed_decls.ensureUnusedCapacity(mod.gpa, 1); - try mod.retryable_failures.append(mod.gpa, InternPool.AnalUnit.wrap(.{ .decl = decl_index })); + try mod.retryable_failures.append(mod.gpa, AnalUnit.wrap(.{ .decl = decl_index })); mod.failed_decls.putAssumeCapacityNoClobber(decl_index, try ErrorMsg.create( mod.gpa, decl.navSrcLoc(mod).upgrade(mod), @@ -3581,7 +3568,7 @@ pub fn ensureFuncBodyAnalyzed(zcu: *Zcu, maybe_coerced_func_index: InternPool.In // that's the case, we should remove this function from the binary. if (decl.val.ip_index != func_index) { try zcu.markDependeeOutdated(.{ .func_ies = func_index }); - ip.removeDependenciesForDepender(gpa, InternPool.AnalUnit.wrap(.{ .func = func_index })); + ip.removeDependenciesForDepender(gpa, AnalUnit.wrap(.{ .func = func_index })); ip.remove(func_index); @panic("TODO: remove orphaned function from binary"); } @@ -3607,12 +3594,14 @@ pub fn ensureFuncBodyAnalyzed(zcu: *Zcu, maybe_coerced_func_index: InternPool.In .complete => {}, } - const func_as_depender = InternPool.AnalUnit.wrap(.{ .func = func_index }); + const func_as_depender = AnalUnit.wrap(.{ .func = func_index }); const was_outdated = zcu.outdated.swapRemove(func_as_depender) or zcu.potentially_outdated.swapRemove(func_as_depender); if (was_outdated) { + if (build_options.only_c) unreachable; _ = zcu.outdated_ready.swapRemove(func_as_depender); + zcu.deleteUnitExports(AnalUnit.wrap(.{ .func = func_index })); } switch (func.analysis(ip).state) { @@ -3728,16 +3717,13 @@ pub fn ensureFuncBodyAnalyzed(zcu: *Zcu, maybe_coerced_func_index: InternPool.In .{@errorName(err)}, )); func.analysis(ip).state = .codegen_failure; - try zcu.retryable_failures.append(zcu.gpa, InternPool.AnalUnit.wrap(.{ .func = func_index })); + try zcu.retryable_failures.append(zcu.gpa, AnalUnit.wrap(.{ .func = func_index })); }, }; } else if (zcu.llvm_object) |llvm_object| { if (build_options.only_c) unreachable; llvm_object.updateFunc(zcu, func_index, air, liveness) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, - error.AnalysisFail => { - func.analysis(ip).state = .codegen_failure; - }, }; } } @@ -3773,7 +3759,7 @@ pub fn ensureFuncBodyAnalysisQueued(mod: *Module, func_index: InternPool.Index) assert(decl.has_tv); - const func_as_depender = InternPool.AnalUnit.wrap(.{ .func = func_index }); + const func_as_depender = AnalUnit.wrap(.{ .func = func_index }); const is_outdated = mod.outdated.contains(func_as_depender) or mod.potentially_outdated.contains(func_as_depender); @@ -3857,7 +3843,7 @@ fn getFileRootStruct(zcu: *Zcu, decl_index: Decl.Index, namespace_index: Namespa if (zcu.comp.debug_incremental) { try ip.addDependency( gpa, - InternPool.AnalUnit.wrap(.{ .decl = decl_index }), + AnalUnit.wrap(.{ .decl = decl_index }), .{ .src_hash = tracked_inst }, ); } @@ -3906,7 +3892,7 @@ fn semaFileUpdate(zcu: *Zcu, file: *File, type_outdated: bool) SemaError!bool { if (type_outdated) { // Invalidate the existing type, reusing the decl and namespace. - zcu.intern_pool.removeDependenciesForDepender(zcu.gpa, InternPool.AnalUnit.wrap(.{ .decl = file.root_decl.unwrap().? })); + zcu.intern_pool.removeDependenciesForDepender(zcu.gpa, AnalUnit.wrap(.{ .decl = file.root_decl.unwrap().? })); zcu.intern_pool.remove(decl.val.toIntern()); decl.val = undefined; _ = try zcu.getFileRootStruct(file.root_decl.unwrap().?, decl.src_namespace, file); @@ -4097,7 +4083,7 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !SemaDeclResult { break :ip_index .none; }; - mod.intern_pool.removeDependenciesForDepender(gpa, InternPool.AnalUnit.wrap(.{ .decl = decl_index })); + mod.intern_pool.removeDependenciesForDepender(gpa, AnalUnit.wrap(.{ .decl = decl_index })); decl.analysis = .in_progress; @@ -4293,6 +4279,8 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !SemaDeclResult { try sema.analyzeExport(&block_scope, export_src, .{ .name = decl.name }, decl_index); } + try sema.flushExports(); + return result; } @@ -4323,7 +4311,7 @@ fn semaAnonOwnerDecl(zcu: *Zcu, decl_index: Decl.Index) !SemaDeclResult { // with a new Decl. // // Yes, this does mean that any type owner Decl has a constant value for its entire lifetime. - zcu.intern_pool.removeDependenciesForDepender(zcu.gpa, InternPool.AnalUnit.wrap(.{ .decl = decl_index })); + zcu.intern_pool.removeDependenciesForDepender(zcu.gpa, AnalUnit.wrap(.{ .decl = decl_index })); zcu.intern_pool.remove(decl.val.toIntern()); decl.analysis = .dependency_failure; return .{ @@ -4949,63 +4937,44 @@ pub fn finalizeAnonDecl(mod: *Module, decl_index: Decl.Index) Allocator.Error!vo } } -/// Delete all the Export objects that are caused by this Decl. Re-analysis of -/// this Decl will cause them to be re-created (or not). -fn deleteDeclExports(mod: *Module, decl_index: Decl.Index) Allocator.Error!void { - var export_owners = (mod.export_owners.fetchSwapRemove(decl_index) orelse return).value; - - for (export_owners.items) |exp| { - switch (exp.exported) { - .decl_index => |exported_decl_index| { - if (mod.decl_exports.getPtr(exported_decl_index)) |export_list| { - // Remove exports with owner_decl matching the regenerating decl. - const list = export_list.items; - var i: usize = 0; - var new_len = list.len; - while (i < new_len) { - if (list[i].owner_decl == decl_index) { - mem.copyBackwards(*Export, list[i..], list[i + 1 .. new_len]); - new_len -= 1; - } else { - i += 1; - } - } - export_list.shrinkAndFree(mod.gpa, new_len); - if (new_len == 0) { - assert(mod.decl_exports.swapRemove(exported_decl_index)); - } - } - }, - .value => |value| { - if (mod.value_exports.getPtr(value)) |export_list| { - // Remove exports with owner_decl matching the regenerating decl. - const list = export_list.items; - var i: usize = 0; - var new_len = list.len; - while (i < new_len) { - if (list[i].owner_decl == decl_index) { - mem.copyBackwards(*Export, list[i..], list[i + 1 .. new_len]); - new_len -= 1; - } else { - i += 1; - } - } - export_list.shrinkAndFree(mod.gpa, new_len); - if (new_len == 0) { - assert(mod.value_exports.swapRemove(value)); - } - } - }, - } - if (mod.comp.bin_file) |lf| { - try lf.deleteDeclExport(decl_index, exp.opts.name); - } - if (mod.failed_exports.fetchSwapRemove(exp)) |failed_kv| { - failed_kv.value.destroy(mod.gpa); +/// Delete all the Export objects that are caused by this `AnalUnit`. Re-analysis of +/// this `AnalUnit` will cause them to be re-created (or not). +pub fn deleteUnitExports(zcu: *Zcu, anal_unit: AnalUnit) void { + const gpa = zcu.gpa; + + const exports_base, const exports_len = if (zcu.single_exports.fetchSwapRemove(anal_unit)) |kv| + .{ kv.value, 1 } + else if (zcu.multi_exports.fetchSwapRemove(anal_unit)) |info| + .{ info.value.index, info.value.len } + else + return; + + const exports = zcu.all_exports.items[exports_base..][0..exports_len]; + + // In an only-c build, we're guaranteed to never use incremental compilation, so there are + // guaranteed not to be any exports in the output file that need deleting (since we only call + // `updateExports` on flush). + // This case is needed because in some rare edge cases, `Sema` wants to add and delete exports + // within a single update. + if (!build_options.only_c) { + for (exports, exports_base..) |exp, export_idx| { + if (zcu.comp.bin_file) |lf| { + lf.deleteExport(exp.exported, exp.opts.name); + } + if (zcu.failed_exports.fetchSwapRemove(@intCast(export_idx))) |failed_kv| { + failed_kv.value.destroy(gpa); + } } - mod.gpa.destroy(exp); } - export_owners.deinit(mod.gpa); + + zcu.free_exports.ensureUnusedCapacity(gpa, exports_len) catch { + // This space will be reused eventually, so we need not propagate this error. + // Just leak it for now, and let GC reclaim it later on. + return; + }; + for (exports_base..exports_base + exports_len) |export_idx| { + zcu.free_exports.appendAssumeCapacity(@intCast(export_idx)); + } } pub fn analyzeFnBody(mod: *Module, func_index: InternPool.Index, arena: Allocator) SemaError!Air { @@ -5026,7 +4995,7 @@ pub fn analyzeFnBody(mod: *Module, func_index: InternPool.Index, arena: Allocato const decl_prog_node = mod.sema_prog_node.start((try decl.fullyQualifiedName(mod)).toSlice(ip), 0); defer decl_prog_node.end(); - mod.intern_pool.removeDependenciesForDepender(gpa, InternPool.AnalUnit.wrap(.{ .func = func_index })); + mod.intern_pool.removeDependenciesForDepender(gpa, AnalUnit.wrap(.{ .func = func_index })); var comptime_err_ret_trace = std.ArrayList(LazySrcLoc).init(gpa); defer comptime_err_ret_trace.deinit(); @@ -5262,6 +5231,8 @@ pub fn analyzeFnBody(mod: *Module, func_index: InternPool.Index, arena: Allocato }; } + try sema.flushExports(); + return .{ .instructions = sema.air_instructions.toOwnedSlice(), .extra = try sema.air_extra.toOwnedSlice(gpa), @@ -5392,33 +5363,89 @@ fn lockAndClearFileCompileError(mod: *Module, file: *File) void { /// Called from `Compilation.update`, after everything is done, just before /// reporting compile errors. In this function we emit exported symbol collision /// errors and communicate exported symbols to the linker backend. -pub fn processExports(mod: *Module) !void { +pub fn processExports(zcu: *Zcu) !void { + const gpa = zcu.gpa; + + // First, construct a mapping of every exported value and Decl to the indices of all its different exports. + var decl_exports: std.AutoArrayHashMapUnmanaged(Decl.Index, ArrayListUnmanaged(u32)) = .{}; + var value_exports: std.AutoArrayHashMapUnmanaged(InternPool.Index, ArrayListUnmanaged(u32)) = .{}; + defer { + for (decl_exports.values()) |*exports| { + exports.deinit(gpa); + } + decl_exports.deinit(gpa); + for (value_exports.values()) |*exports| { + exports.deinit(gpa); + } + value_exports.deinit(gpa); + } + + // We note as a heuristic: + // * It is rare to export a value. + // * It is rare for one Decl to be exported multiple times. + // So, this ensureTotalCapacity serves as a reasonable (albeit very approximate) optimization. + try decl_exports.ensureTotalCapacity(gpa, zcu.single_exports.count() + zcu.multi_exports.count()); + + for (zcu.single_exports.values()) |export_idx| { + const exp = zcu.all_exports.items[export_idx]; + const value_ptr, const found_existing = switch (exp.exported) { + .decl_index => |i| gop: { + const gop = try decl_exports.getOrPut(gpa, i); + break :gop .{ gop.value_ptr, gop.found_existing }; + }, + .value => |i| gop: { + const gop = try value_exports.getOrPut(gpa, i); + break :gop .{ gop.value_ptr, gop.found_existing }; + }, + }; + if (!found_existing) value_ptr.* = .{}; + try value_ptr.append(gpa, export_idx); + } + + for (zcu.multi_exports.values()) |info| { + for (zcu.all_exports.items[info.index..][0..info.len], info.index..) |exp, export_idx| { + const value_ptr, const found_existing = switch (exp.exported) { + .decl_index => |i| gop: { + const gop = try decl_exports.getOrPut(gpa, i); + break :gop .{ gop.value_ptr, gop.found_existing }; + }, + .value => |i| gop: { + const gop = try value_exports.getOrPut(gpa, i); + break :gop .{ gop.value_ptr, gop.found_existing }; + }, + }; + if (!found_existing) value_ptr.* = .{}; + try value_ptr.append(gpa, @intCast(export_idx)); + } + } + // Map symbol names to `Export` for name collision detection. var symbol_exports: SymbolExports = .{}; - defer symbol_exports.deinit(mod.gpa); + defer symbol_exports.deinit(gpa); - for (mod.decl_exports.keys(), mod.decl_exports.values()) |exported_decl, exports_list| { + for (decl_exports.keys(), decl_exports.values()) |exported_decl, exports_list| { const exported: Exported = .{ .decl_index = exported_decl }; - try processExportsInner(mod, &symbol_exports, exported, exports_list.items); + try processExportsInner(zcu, &symbol_exports, exported, exports_list.items); } - for (mod.value_exports.keys(), mod.value_exports.values()) |exported_value, exports_list| { + for (value_exports.keys(), value_exports.values()) |exported_value, exports_list| { const exported: Exported = .{ .value = exported_value }; - try processExportsInner(mod, &symbol_exports, exported, exports_list.items); + try processExportsInner(zcu, &symbol_exports, exported, exports_list.items); } } -const SymbolExports = std.AutoArrayHashMapUnmanaged(InternPool.NullTerminatedString, *Export); +const SymbolExports = std.AutoArrayHashMapUnmanaged(InternPool.NullTerminatedString, u32); fn processExportsInner( zcu: *Zcu, symbol_exports: *SymbolExports, exported: Exported, - exports: []const *Export, + export_indices: []const u32, ) error{OutOfMemory}!void { const gpa = zcu.gpa; - for (exports) |new_export| { + for (export_indices) |export_idx| { + const new_export = &zcu.all_exports.items[export_idx]; const gop = try symbol_exports.getOrPut(gpa, new_export.opts.name); if (gop.found_existing) { new_export.status = .failed_retryable; @@ -5428,40 +5455,41 @@ fn processExportsInner( new_export.opts.name.fmt(&zcu.intern_pool), }); errdefer msg.destroy(gpa); - const other_export = gop.value_ptr.*; + const other_export = zcu.all_exports.items[gop.value_ptr.*]; const other_src_loc = other_export.getSrcLoc(zcu); try zcu.errNoteNonLazy(other_src_loc, msg, "other symbol here", .{}); - zcu.failed_exports.putAssumeCapacityNoClobber(new_export, msg); + zcu.failed_exports.putAssumeCapacityNoClobber(export_idx, msg); new_export.status = .failed; } else { - gop.value_ptr.* = new_export; + gop.value_ptr.* = export_idx; } } if (zcu.comp.bin_file) |lf| { - try handleUpdateExports(zcu, exports, lf.updateExports(zcu, exported, exports)); + try handleUpdateExports(zcu, export_indices, lf.updateExports(zcu, exported, export_indices)); } else if (zcu.llvm_object) |llvm_object| { if (build_options.only_c) unreachable; - try handleUpdateExports(zcu, exports, llvm_object.updateExports(zcu, exported, exports)); + try handleUpdateExports(zcu, export_indices, llvm_object.updateExports(zcu, exported, export_indices)); } } fn handleUpdateExports( zcu: *Zcu, - exports: []const *Export, + export_indices: []const u32, result: link.File.UpdateExportsError!void, ) Allocator.Error!void { const gpa = zcu.gpa; result catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.AnalysisFail => { - const new_export = exports[0]; + const export_idx = export_indices[0]; + const new_export = &zcu.all_exports.items[export_idx]; new_export.status = .failed_retryable; try zcu.failed_exports.ensureUnusedCapacity(gpa, 1); const src_loc = new_export.getSrcLoc(zcu); const msg = try ErrorMsg.create(gpa, src_loc, "unable to export: {s}", .{ @errorName(err), }); - zcu.failed_exports.putAssumeCapacityNoClobber(new_export, msg); + zcu.failed_exports.putAssumeCapacityNoClobber(export_idx, msg); }, }; } @@ -5627,16 +5655,13 @@ pub fn linkerUpdateDecl(zcu: *Zcu, decl_index: Decl.Index) !void { .{@errorName(err)}, )); decl.analysis = .codegen_failure; - try zcu.retryable_failures.append(zcu.gpa, InternPool.AnalUnit.wrap(.{ .decl = decl_index })); + try zcu.retryable_failures.append(zcu.gpa, AnalUnit.wrap(.{ .decl = decl_index })); }, }; } else if (zcu.llvm_object) |llvm_object| { if (build_options.only_c) unreachable; llvm_object.updateDecl(zcu, decl_index) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, - error.AnalysisFail => { - decl.analysis = .codegen_failure; - }, }; } } @@ -5684,14 +5709,6 @@ pub fn addGlobalAssembly(mod: *Module, decl_index: Decl.Index, source: []const u } } -pub fn getDeclExports(mod: Module, decl_index: Decl.Index) []const *Export { - if (mod.decl_exports.get(decl_index)) |l| { - return l.items; - } else { - return &[0]*Export{}; - } -} - pub const Feature = enum { panic_fn, panic_unwrap_error, diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 94f8faa44153..a8e58a1055c2 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -3081,6 +3081,8 @@ pub fn genDeclValue( } pub fn genHeader(dg: *DeclGen) error{ AnalysisFail, OutOfMemory }!void { + if (true) @panic("TODO jacobly"); + const tracy = trace(@src()); defer tracy.end(); diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 00cfd4404ae0..dd6606ece72c 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -848,10 +848,6 @@ pub const Object = struct { /// Note that the values are not added until `emit`, when all errors in /// the compilation are known. error_name_table: Builder.Variable.Index, - /// This map is usually very close to empty. It tracks only the cases when a - /// second extern Decl could not be emitted with the correct name due to a - /// name collision. - extern_collisions: std.AutoArrayHashMapUnmanaged(InternPool.DeclIndex, void), /// Memoizes a null `?usize` value. null_opt_usize: Builder.Constant, @@ -1011,7 +1007,6 @@ pub const Object = struct { .named_enum_map = .{}, .type_map = .{}, .error_name_table = .none, - .extern_collisions = .{}, .null_opt_usize = .no_init, .struct_field_map = .{}, }; @@ -1029,7 +1024,6 @@ pub const Object = struct { self.anon_decl_map.deinit(gpa); self.named_enum_map.deinit(gpa); self.type_map.deinit(gpa); - self.extern_collisions.deinit(gpa); self.builder.deinit(); self.struct_field_map.deinit(gpa); self.* = undefined; @@ -1121,61 +1115,6 @@ pub const Object = struct { try object.builder.finishModuleAsm(); } - fn resolveExportExternCollisions(object: *Object) !void { - const mod = object.module; - - // This map has externs with incorrect symbol names. - for (object.extern_collisions.keys()) |decl_index| { - const global = object.decl_map.get(decl_index) orelse continue; - // Same logic as below but for externs instead of exports. - const decl_name = object.builder.strtabStringIfExists(mod.declPtr(decl_index).name.toSlice(&mod.intern_pool)) orelse continue; - const other_global = object.builder.getGlobal(decl_name) orelse continue; - if (other_global.toConst().getBase(&object.builder) == - global.toConst().getBase(&object.builder)) continue; - - try global.replace(other_global, &object.builder); - } - object.extern_collisions.clearRetainingCapacity(); - - for (mod.decl_exports.keys(), mod.decl_exports.values()) |decl_index, export_list| { - const global = object.decl_map.get(decl_index) orelse continue; - try resolveGlobalCollisions(object, global, export_list.items); - } - - for (mod.value_exports.keys(), mod.value_exports.values()) |val, export_list| { - const global = object.anon_decl_map.get(val) orelse continue; - try resolveGlobalCollisions(object, global, export_list.items); - } - } - - fn resolveGlobalCollisions( - object: *Object, - global: Builder.Global.Index, - export_list: []const *Module.Export, - ) !void { - const mod = object.module; - const global_base = global.toConst().getBase(&object.builder); - for (export_list) |exp| { - // Detect if the LLVM global has already been created as an extern. In such - // case, we need to replace all uses of it with this exported global. - const exp_name = object.builder.strtabStringIfExists(exp.opts.name.toSlice(&mod.intern_pool)) orelse continue; - - const other_global = object.builder.getGlobal(exp_name) orelse continue; - if (other_global.toConst().getBase(&object.builder) == global_base) continue; - - try global.takeName(other_global, &object.builder); - try other_global.replace(global, &object.builder); - // Problem: now we need to replace in the decl_map that - // the extern decl index points to this new global. However we don't - // know the decl index. - // Even if we did, a future incremental update to the extern would then - // treat the LLVM global as an extern rather than an export, so it would - // need a way to check that. - // This is a TODO that needs to be solved when making - // the LLVM backend support incremental compilation. - } - } - pub const EmitOptions = struct { pre_ir_path: ?[]const u8, pre_bc_path: ?[]const u8, @@ -1193,7 +1132,6 @@ pub const Object = struct { pub fn emit(self: *Object, options: EmitOptions) !void { { - try self.resolveExportExternCollisions(); try self.genErrorNameTable(); try self.genCmpLtErrorsLenFunction(); try self.genModuleLevelAssembly(); @@ -1698,8 +1636,7 @@ pub const Object = struct { const file = try o.getDebugFile(namespace.file_scope); const line_number = decl.navSrcLine(zcu) + 1; - const is_internal_linkage = decl.val.getExternFunc(zcu) == null and - !zcu.decl_exports.contains(decl_index); + const is_internal_linkage = decl.val.getExternFunc(zcu) == null; const debug_decl_type = try o.lowerDebugType(decl.typeOf(zcu)); const subprogram = try o.builder.debugSubprogram( @@ -1760,8 +1697,6 @@ pub const Object = struct { }; try fg.wip.finish(); - - try o.updateExports(zcu, .{ .decl_index = decl_index }, zcu.getDeclExports(decl_index)); } pub fn updateDecl(self: *Object, module: *Module, decl_index: InternPool.DeclIndex) !void { @@ -1781,66 +1716,25 @@ pub const Object = struct { }, else => |e| return e, }; - try self.updateExports(module, .{ .decl_index = decl_index }, module.getDeclExports(decl_index)); } pub fn updateExports( self: *Object, mod: *Module, exported: Module.Exported, - exports: []const *Module.Export, + export_indices: []const u32, ) link.File.UpdateExportsError!void { const decl_index = switch (exported) { .decl_index => |i| i, - .value => |val| return updateExportedValue(self, mod, val, exports), + .value => |val| return updateExportedValue(self, mod, val, export_indices), }; - const gpa = mod.gpa; const ip = &mod.intern_pool; - // If the module does not already have the function, we ignore this function call - // because we call `updateExports` at the end of `updateFunc` and `updateDecl`. - const global_index = self.decl_map.get(decl_index) orelse return; + const global_index = self.decl_map.get(decl_index).?; const decl = mod.declPtr(decl_index); const comp = mod.comp; - if (decl.isExtern(mod)) { - const decl_name = decl_name: { - if (mod.getTarget().isWasm() and decl.val.typeOf(mod).zigTypeTag(mod) == .Fn) { - if (decl.getOwnedExternFunc(mod).?.lib_name.toSlice(ip)) |lib_name| { - if (!std.mem.eql(u8, lib_name, "c")) { - break :decl_name try self.builder.strtabStringFmt("{}|{s}", .{ decl.name.fmt(ip), lib_name }); - } - } - } - break :decl_name try self.builder.strtabString(decl.name.toSlice(ip)); - }; - if (self.builder.getGlobal(decl_name)) |other_global| { - if (other_global != global_index) { - try self.extern_collisions.put(gpa, decl_index, {}); - } - } - - try global_index.rename(decl_name, &self.builder); - global_index.setLinkage(.external, &self.builder); - global_index.setUnnamedAddr(.default, &self.builder); - if (comp.config.dll_export_fns) - global_index.setDllStorageClass(.default, &self.builder); - - if (decl.val.getVariable(mod)) |decl_var| { - global_index.ptrConst(&self.builder).kind.variable.setThreadLocal( - if (decl_var.is_threadlocal) .generaldynamic else .default, - &self.builder, - ); - if (decl_var.is_weak_linkage) global_index.setLinkage(.extern_weak, &self.builder); - } - } else if (exports.len != 0) { - const main_exp_name = try self.builder.strtabString(exports[0].opts.name.toSlice(ip)); - try global_index.rename(main_exp_name, &self.builder); - - if (decl.val.getVariable(mod)) |decl_var| if (decl_var.is_threadlocal) - global_index.ptrConst(&self.builder).kind - .variable.setThreadLocal(.generaldynamic, &self.builder); - - return updateExportedGlobal(self, mod, global_index, exports); + if (export_indices.len != 0) { + return updateExportedGlobal(self, mod, global_index, export_indices); } else { const fqn = try self.builder.strtabString((try decl.fullyQualifiedName(mod)).toSlice(ip)); try global_index.rename(fqn, &self.builder); @@ -1848,17 +1742,6 @@ pub const Object = struct { if (comp.config.dll_export_fns) global_index.setDllStorageClass(.default, &self.builder); global_index.setUnnamedAddr(.unnamed_addr, &self.builder); - if (decl.val.getVariable(mod)) |decl_var| { - const decl_namespace = mod.namespacePtr(decl.src_namespace); - const single_threaded = decl_namespace.file_scope.mod.single_threaded; - global_index.ptrConst(&self.builder).kind.variable.setThreadLocal( - if (decl_var.is_threadlocal and !single_threaded) - .generaldynamic - else - .default, - &self.builder, - ); - } } } @@ -1866,11 +1749,11 @@ pub const Object = struct { o: *Object, mod: *Module, exported_value: InternPool.Index, - exports: []const *Module.Export, + export_indices: []const u32, ) link.File.UpdateExportsError!void { const gpa = mod.gpa; const ip = &mod.intern_pool; - const main_exp_name = try o.builder.strtabString(exports[0].opts.name.toSlice(ip)); + const main_exp_name = try o.builder.strtabString(mod.all_exports.items[export_indices[0]].opts.name.toSlice(ip)); const global_index = i: { const gop = try o.anon_decl_map.getOrPut(gpa, exported_value); if (gop.found_existing) { @@ -1894,32 +1777,57 @@ pub const Object = struct { try variable_index.setInitializer(init_val, &o.builder); break :i global_index; }; - return updateExportedGlobal(o, mod, global_index, exports); + return updateExportedGlobal(o, mod, global_index, export_indices); } fn updateExportedGlobal( o: *Object, mod: *Module, global_index: Builder.Global.Index, - exports: []const *Module.Export, + export_indices: []const u32, ) link.File.UpdateExportsError!void { const comp = mod.comp; const ip = &mod.intern_pool; + const first_export = mod.all_exports.items[export_indices[0]]; + + // We will rename this global to have a name matching `first_export`. + // Successive exports become aliases. + // If the first export name already exists, then there is a corresponding + // extern global - we replace it with this global. + const first_exp_name = try o.builder.strtabString(first_export.opts.name.toSlice(ip)); + if (o.builder.getGlobal(first_exp_name)) |other_global| replace: { + if (other_global.toConst().getBase(&o.builder) == global_index.toConst().getBase(&o.builder)) { + break :replace; // this global already has the name we want + } + try global_index.takeName(other_global, &o.builder); + try other_global.replace(global_index, &o.builder); + // Problem: now we need to replace in the decl_map that + // the extern decl index points to this new global. However we don't + // know the decl index. + // Even if we did, a future incremental update to the extern would then + // treat the LLVM global as an extern rather than an export, so it would + // need a way to check that. + // This is a TODO that needs to be solved when making + // the LLVM backend support incremental compilation. + } else { + try global_index.rename(first_exp_name, &o.builder); + } + global_index.setUnnamedAddr(.default, &o.builder); if (comp.config.dll_export_fns) global_index.setDllStorageClass(.dllexport, &o.builder); - global_index.setLinkage(switch (exports[0].opts.linkage) { + global_index.setLinkage(switch (first_export.opts.linkage) { .internal => unreachable, .strong => .external, .weak => .weak_odr, .link_once => .linkonce_odr, }, &o.builder); - global_index.setVisibility(switch (exports[0].opts.visibility) { + global_index.setVisibility(switch (first_export.opts.visibility) { .default => .default, .hidden => .hidden, .protected => .protected, }, &o.builder); - if (exports[0].opts.section.toSlice(ip)) |section| + if (first_export.opts.section.toSlice(ip)) |section| switch (global_index.ptrConst(&o.builder).kind) { .variable => |impl_index| impl_index.setSection( try o.builder.string(section), @@ -1936,7 +1844,8 @@ pub const Object = struct { // The planned solution to this is https://github.com/ziglang/zig/issues/13265 // Until then we iterate over existing aliases and make them point // to the correct decl, or otherwise add a new alias. Old aliases are leaked. - for (exports[1..]) |exp| { + for (export_indices[1..]) |export_idx| { + const exp = mod.all_exports.items[export_idx]; const exp_name = try o.builder.strtabString(exp.opts.name.toSlice(ip)); if (o.builder.getGlobal(exp_name)) |global| { switch (global.ptrConst(&o.builder).kind) { @@ -1944,7 +1853,13 @@ pub const Object = struct { alias.setAliasee(global_index.toConst(), &o.builder); continue; }, - .variable, .function => {}, + .variable, .function => { + // This existing global is an `extern` corresponding to this export. + // Replace it with the global being exported. + // This existing global must be replaced with the alias. + try global.rename(.empty, &o.builder); + try global.replace(global_index, &o.builder); + }, .replaced => unreachable, } } @@ -4762,36 +4677,77 @@ pub const DeclGen = struct { else => try o.lowerValue(init_val), }, &o.builder); + if (decl.val.getVariable(zcu)) |decl_var| { + const decl_namespace = zcu.namespacePtr(decl.src_namespace); + const single_threaded = decl_namespace.file_scope.mod.single_threaded; + variable_index.setThreadLocal( + if (decl_var.is_threadlocal and !single_threaded) .generaldynamic else .default, + &o.builder, + ); + } + const line_number = decl.navSrcLine(zcu) + 1; - const is_internal_linkage = !o.module.decl_exports.contains(decl_index); const namespace = zcu.namespacePtr(decl.src_namespace); const owner_mod = namespace.file_scope.mod; - if (owner_mod.strip) return; + if (!owner_mod.strip) { + const debug_file = try o.getDebugFile(namespace.file_scope); + + const debug_global_var = try o.builder.debugGlobalVar( + try o.builder.metadataString(decl.name.toSlice(ip)), // Name + try o.builder.metadataStringFromStrtabString(variable_index.name(&o.builder)), // Linkage name + debug_file, // File + debug_file, // Scope + line_number, + try o.lowerDebugType(decl.typeOf(zcu)), + variable_index, + .{ .local = !decl.isExtern(zcu) }, + ); - const debug_file = try o.getDebugFile(namespace.file_scope); + const debug_expression = try o.builder.debugExpression(&.{}); - const debug_global_var = try o.builder.debugGlobalVar( - try o.builder.metadataString(decl.name.toSlice(ip)), // Name - try o.builder.metadataStringFromStrtabString(variable_index.name(&o.builder)), // Linkage name - debug_file, // File - debug_file, // Scope - line_number, - try o.lowerDebugType(decl.typeOf(zcu)), - variable_index, - .{ .local = is_internal_linkage }, - ); + const debug_global_var_expression = try o.builder.debugGlobalVarExpression( + debug_global_var, + debug_expression, + ); - const debug_expression = try o.builder.debugExpression(&.{}); + variable_index.setGlobalVariableExpression(debug_global_var_expression, &o.builder); + try o.debug_globals.append(o.gpa, debug_global_var_expression); + } + } - const debug_global_var_expression = try o.builder.debugGlobalVarExpression( - debug_global_var, - debug_expression, - ); + if (decl.isExtern(zcu)) { + const global_index = o.decl_map.get(decl_index).?; - variable_index.setGlobalVariableExpression(debug_global_var_expression, &o.builder); - try o.debug_globals.append(o.gpa, debug_global_var_expression); + const decl_name = decl_name: { + if (zcu.getTarget().isWasm() and decl.typeOf(zcu).zigTypeTag(zcu) == .Fn) { + if (decl.getOwnedExternFunc(zcu).?.lib_name.toSlice(ip)) |lib_name| { + if (!std.mem.eql(u8, lib_name, "c")) { + break :decl_name try o.builder.strtabStringFmt("{}|{s}", .{ decl.name.fmt(ip), lib_name }); + } + } + } + break :decl_name try o.builder.strtabString(decl.name.toSlice(ip)); + }; + + if (o.builder.getGlobal(decl_name)) |other_global| { + if (other_global != global_index) { + // Another global already has this name; just use it in place of this global. + try global_index.replace(other_global, &o.builder); + return; + } + } + + try global_index.rename(decl_name, &o.builder); + global_index.setLinkage(.external, &o.builder); + global_index.setUnnamedAddr(.default, &o.builder); + if (zcu.comp.config.dll_export_fns) + global_index.setDllStorageClass(.default, &o.builder); + + if (decl.val.getVariable(zcu)) |decl_var| { + if (decl_var.is_weak_linkage) global_index.setLinkage(.extern_weak, &o.builder); + } } } }; @@ -5193,7 +5149,6 @@ pub const FuncGen = struct { const fqn = try decl.fullyQualifiedName(zcu); - const is_internal_linkage = !zcu.decl_exports.contains(decl_index); const fn_ty = try zcu.funcType(.{ .param_types = &.{}, .return_type = .void_type, @@ -5211,7 +5166,7 @@ pub const FuncGen = struct { .sp_flags = .{ .Optimized = owner_mod.optimize_mode != .Debug, .Definition = true, - .LocalToUnit = is_internal_linkage, + .LocalToUnit = true, // TODO: we can't know this at this point, since the function could be exported later! }, }, o.debug_compile_unit, diff --git a/src/link.zig b/src/link.zig index 75a9723f1c04..36a5cb8187e5 100644 --- a/src/link.zig +++ b/src/link.zig @@ -606,12 +606,12 @@ pub const File = struct { base: *File, module: *Module, exported: Module.Exported, - exports: []const *Module.Export, + export_indices: []const u32, ) UpdateExportsError!void { switch (base.tag) { inline else => |tag| { if (tag != .c and build_options.only_c) unreachable; - return @as(*tag.Type(), @fieldParentPtr("base", base)).updateExports(module, exported, exports); + return @as(*tag.Type(), @fieldParentPtr("base", base)).updateExports(module, exported, export_indices); }, } } @@ -671,11 +671,11 @@ pub const File = struct { } } - pub fn deleteDeclExport( + pub fn deleteExport( base: *File, - decl_index: InternPool.DeclIndex, + exported: Zcu.Exported, name: InternPool.NullTerminatedString, - ) !void { + ) void { if (build_options.only_c) @compileError("unreachable"); switch (base.tag) { .plan9, @@ -685,7 +685,7 @@ pub const File = struct { => {}, inline else => |tag| { - return @as(*tag.Type(), @fieldParentPtr("base", base)).deleteDeclExport(decl_index, name); + return @as(*tag.Type(), @fieldParentPtr("base", base)).deleteExport(exported, name); }, } } diff --git a/src/link/C.zig b/src/link/C.zig index e6830eac8cb5..3a8d06b5ee31 100644 --- a/src/link/C.zig +++ b/src/link/C.zig @@ -183,6 +183,8 @@ pub fn updateFunc( air: Air, liveness: Liveness, ) !void { + if (true) @panic("TODO jacobly"); + const gpa = self.base.comp.gpa; const func = zcu.funcInfo(func_index); @@ -250,6 +252,8 @@ pub fn updateFunc( } fn updateAnonDecl(self: *C, zcu: *Zcu, i: usize) !void { + if (true) @panic("TODO jacobly"); + const gpa = self.base.comp.gpa; const anon_decl = self.anon_decls.keys()[i]; @@ -306,6 +310,8 @@ fn updateAnonDecl(self: *C, zcu: *Zcu, i: usize) !void { } pub fn updateDecl(self: *C, zcu: *Zcu, decl_index: InternPool.DeclIndex) !void { + if (true) @panic("TODO jacobly"); + const tracy = trace(@src()); defer tracy.end(); @@ -390,6 +396,8 @@ fn abiDefines(self: *C, target: std.Target) !std.ArrayList(u8) { } pub fn flushModule(self: *C, arena: Allocator, prog_node: std.Progress.Node) !void { + if (true) @panic("TODO jacobly"); + _ = arena; // Has the same lifetime as the call to Compilation.update. const tracy = trace(@src()); @@ -451,9 +459,16 @@ pub fn flushModule(self: *C, arena: Allocator, prog_node: std.Progress.Node) !vo { var export_names: std.AutoHashMapUnmanaged(InternPool.NullTerminatedString, void) = .{}; defer export_names.deinit(gpa); - try export_names.ensureTotalCapacity(gpa, @intCast(zcu.decl_exports.entries.len)); - for (zcu.decl_exports.values()) |exports| for (exports.items) |@"export"| - try export_names.put(gpa, @"export".opts.name, {}); + try export_names.ensureTotalCapacity(gpa, @intCast(zcu.single_exports.count())); + for (zcu.single_exports.values()) |export_idx| { + export_names.putAssumeCapacity(gpa, zcu.all_exports.items[export_idx].opts.name, {}); + } + for (zcu.multi_exports.values()) |info| { + try export_names.ensureUnusedCapacity(info.len); + for (zcu.all_exports.items[info.index..][0..info.len]) |export_idx| { + export_names.putAssumeCapacity(gpa, zcu.all_exports.items[export_idx].opts.name, {}); + } + } for (self.anon_decls.values()) |*decl_block| { try self.flushDeclBlock(zcu, zcu.root_mod, &f, decl_block, export_names, .none); @@ -781,10 +796,10 @@ pub fn updateExports( self: *C, zcu: *Zcu, exported: Zcu.Exported, - exports: []const *Zcu.Export, + export_indices: []const u32, ) !void { - _ = exports; - _ = exported; - _ = zcu; _ = self; + _ = zcu; + _ = exported; + _ = export_indices; } diff --git a/src/link/Coff.zig b/src/link/Coff.zig index 4524441f3b6e..0244d085b8fa 100644 --- a/src/link/Coff.zig +++ b/src/link/Coff.zig @@ -1162,9 +1162,7 @@ pub fn updateFunc(self: *Coff, mod: *Module, func_index: InternPool.Index, air: try self.updateDeclCode(decl_index, code, .FUNCTION); - // Since we updated the vaddr and the size, each corresponding export - // symbol also needs to be updated. - return self.updateExports(mod, .{ .decl_index = decl_index }, mod.getDeclExports(decl_index)); + // Exports will be updated by `Zcu.processExports` after the update. } pub fn lowerUnnamedConst(self: *Coff, val: Value, decl_index: InternPool.DeclIndex) !u32 { @@ -1286,9 +1284,7 @@ pub fn updateDecl( try self.updateDeclCode(decl_index, code, .NULL); - // Since we updated the vaddr and the size, each corresponding export - // symbol also needs to be updated. - return self.updateExports(mod, .{ .decl_index = decl_index }, mod.getDeclExports(decl_index)); + // Exports will be updated by `Zcu.processExports` after the update. } fn updateLazySymbolAtom( @@ -1509,7 +1505,7 @@ pub fn updateExports( self: *Coff, mod: *Module, exported: Module.Exported, - exports: []const *Module.Export, + export_indices: []const u32, ) link.File.UpdateExportsError!void { if (build_options.skip_non_native and builtin.object_format != .coff) { @panic("Attempted to compile for object format that was disabled by build configuration"); @@ -1522,7 +1518,8 @@ pub fn updateExports( if (comp.config.use_llvm) { // Even in the case of LLVM, we need to notice certain exported symbols in order to // detect the default subsystem. - for (exports) |exp| { + for (export_indices) |export_idx| { + const exp = mod.all_exports.items[export_idx]; const exported_decl_index = switch (exp.exported) { .decl_index => |i| i, .value => continue, @@ -1552,7 +1549,7 @@ pub fn updateExports( } } - if (self.llvm_object) |llvm_object| return llvm_object.updateExports(mod, exported, exports); + if (self.llvm_object) |llvm_object| return llvm_object.updateExports(mod, exported, export_indices); const gpa = comp.gpa; @@ -1562,7 +1559,7 @@ pub fn updateExports( break :blk self.decls.getPtr(decl_index).?; }, .value => |value| self.anon_decls.getPtr(value) orelse blk: { - const first_exp = exports[0]; + const first_exp = mod.all_exports.items[export_indices[0]]; const res = try self.lowerAnonDecl(value, .none, first_exp.getSrcLoc(mod)); switch (res) { .ok => {}, @@ -1570,7 +1567,7 @@ pub fn updateExports( // TODO maybe it's enough to return an error here and let Module.processExportsInner // handle the error? try mod.failed_exports.ensureUnusedCapacity(mod.gpa, 1); - mod.failed_exports.putAssumeCapacityNoClobber(first_exp, em); + mod.failed_exports.putAssumeCapacityNoClobber(export_indices[0], em); return; }, } @@ -1580,12 +1577,13 @@ pub fn updateExports( const atom_index = metadata.atom; const atom = self.getAtom(atom_index); - for (exports) |exp| { + for (export_indices) |export_idx| { + const exp = mod.all_exports.items[export_idx]; log.debug("adding new export '{}'", .{exp.opts.name.fmt(&mod.intern_pool)}); if (exp.opts.section.toSlice(&mod.intern_pool)) |section_name| { if (!mem.eql(u8, section_name, ".text")) { - try mod.failed_exports.putNoClobber(gpa, exp, try Module.ErrorMsg.create( + try mod.failed_exports.putNoClobber(gpa, export_idx, try Module.ErrorMsg.create( gpa, exp.getSrcLoc(mod), "Unimplemented: ExportOptions.section", @@ -1596,7 +1594,7 @@ pub fn updateExports( } if (exp.opts.linkage == .link_once) { - try mod.failed_exports.putNoClobber(gpa, exp, try Module.ErrorMsg.create( + try mod.failed_exports.putNoClobber(gpa, export_idx, try Module.ErrorMsg.create( gpa, exp.getSrcLoc(mod), "Unimplemented: GlobalLinkage.link_once", @@ -1641,13 +1639,16 @@ pub fn updateExports( } } -pub fn deleteDeclExport( +pub fn deleteExport( self: *Coff, - decl_index: InternPool.DeclIndex, + exported: Zcu.Exported, name: InternPool.NullTerminatedString, ) void { if (self.llvm_object) |_| return; - const metadata = self.decls.getPtr(decl_index) orelse return; + const metadata = switch (exported) { + .decl_index => |decl_index| self.decls.getPtr(decl_index) orelse return, + .value => |value| self.anon_decls.getPtr(value) orelse return, + }; const mod = self.base.comp.module.?; const name_slice = name.toSlice(&mod.intern_pool); const sym_index = metadata.getExportPtr(self, name_slice) orelse return; diff --git a/src/link/Elf.zig b/src/link/Elf.zig index 5a14a544e85d..df8e6c0dd8d5 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -3011,13 +3011,13 @@ pub fn updateExports( self: *Elf, mod: *Module, exported: Module.Exported, - exports: []const *Module.Export, + export_indices: []const u32, ) link.File.UpdateExportsError!void { if (build_options.skip_non_native and builtin.object_format != .elf) { @panic("Attempted to compile for object format that was disabled by build configuration"); } - if (self.llvm_object) |llvm_object| return llvm_object.updateExports(mod, exported, exports); - return self.zigObjectPtr().?.updateExports(self, mod, exported, exports); + if (self.llvm_object) |llvm_object| return llvm_object.updateExports(mod, exported, export_indices); + return self.zigObjectPtr().?.updateExports(self, mod, exported, export_indices); } pub fn updateDeclLineNumber(self: *Elf, mod: *Module, decl_index: InternPool.DeclIndex) !void { @@ -3025,13 +3025,13 @@ pub fn updateDeclLineNumber(self: *Elf, mod: *Module, decl_index: InternPool.Dec return self.zigObjectPtr().?.updateDeclLineNumber(mod, decl_index); } -pub fn deleteDeclExport( +pub fn deleteExport( self: *Elf, - decl_index: InternPool.DeclIndex, + exported: Zcu.Exported, name: InternPool.NullTerminatedString, ) void { if (self.llvm_object) |_| return; - return self.zigObjectPtr().?.deleteDeclExport(self, decl_index, name); + return self.zigObjectPtr().?.deleteExport(self, exported, name); } fn addLinkerDefinedSymbols(self: *Elf) !void { diff --git a/src/link/Elf/ZigObject.zig b/src/link/Elf/ZigObject.zig index c2c5e879cb14..14040767b134 100644 --- a/src/link/Elf/ZigObject.zig +++ b/src/link/Elf/ZigObject.zig @@ -1115,9 +1115,7 @@ pub fn updateFunc( ); } - // Since we updated the vaddr and the size, each corresponding export - // symbol also needs to be updated. - return self.updateExports(elf_file, mod, .{ .decl_index = decl_index }, mod.getDeclExports(decl_index)); + // Exports will be updated by `Zcu.processExports` after the update. } pub fn updateDecl( @@ -1194,9 +1192,7 @@ pub fn updateDecl( ); } - // Since we updated the vaddr and the size, each corresponding export - // symbol also needs to be updated. - return self.updateExports(elf_file, mod, .{ .decl_index = decl_index }, mod.getDeclExports(decl_index)); + // Exports will be updated by `Zcu.processExports` after the update. } fn updateLazySymbol( @@ -1386,7 +1382,7 @@ pub fn updateExports( elf_file: *Elf, mod: *Module, exported: Module.Exported, - exports: []const *Module.Export, + export_indices: []const u32, ) link.File.UpdateExportsError!void { const tracy = trace(@src()); defer tracy.end(); @@ -1398,7 +1394,7 @@ pub fn updateExports( break :blk self.decls.getPtr(decl_index).?; }, .value => |value| self.anon_decls.getPtr(value) orelse blk: { - const first_exp = exports[0]; + const first_exp = mod.all_exports.items[export_indices[0]]; const res = try self.lowerAnonDecl(elf_file, value, .none, first_exp.getSrcLoc(mod)); switch (res) { .ok => {}, @@ -1406,7 +1402,7 @@ pub fn updateExports( // TODO maybe it's enough to return an error here and let Module.processExportsInner // handle the error? try mod.failed_exports.ensureUnusedCapacity(mod.gpa, 1); - mod.failed_exports.putAssumeCapacityNoClobber(first_exp, em); + mod.failed_exports.putAssumeCapacityNoClobber(export_indices[0], em); return; }, } @@ -1418,11 +1414,12 @@ pub fn updateExports( const esym = self.local_esyms.items(.elf_sym)[esym_index]; const esym_shndx = self.local_esyms.items(.shndx)[esym_index]; - for (exports) |exp| { + for (export_indices) |export_idx| { + const exp = mod.all_exports.items[export_idx]; if (exp.opts.section.unwrap()) |section_name| { if (!section_name.eqlSlice(".text", &mod.intern_pool)) { try mod.failed_exports.ensureUnusedCapacity(mod.gpa, 1); - mod.failed_exports.putAssumeCapacityNoClobber(exp, try Module.ErrorMsg.create( + mod.failed_exports.putAssumeCapacityNoClobber(export_idx, try Module.ErrorMsg.create( gpa, exp.getSrcLoc(mod), "Unimplemented: ExportOptions.section", @@ -1437,7 +1434,7 @@ pub fn updateExports( .weak => elf.STB_WEAK, .link_once => { try mod.failed_exports.ensureUnusedCapacity(mod.gpa, 1); - mod.failed_exports.putAssumeCapacityNoClobber(exp, try Module.ErrorMsg.create( + mod.failed_exports.putAssumeCapacityNoClobber(export_idx, try Module.ErrorMsg.create( gpa, exp.getSrcLoc(mod), "Unimplemented: GlobalLinkage.LinkOnce", @@ -1487,13 +1484,16 @@ pub fn updateDeclLineNumber( } } -pub fn deleteDeclExport( +pub fn deleteExport( self: *ZigObject, elf_file: *Elf, - decl_index: InternPool.DeclIndex, + exported: Zcu.Exported, name: InternPool.NullTerminatedString, ) void { - const metadata = self.decls.getPtr(decl_index) orelse return; + const metadata = switch (exported) { + .decl_index => |decl_index| self.decls.getPtr(decl_index) orelse return, + .value => |value| self.anon_decls.getPtr(value) orelse return, + }; const mod = elf_file.base.comp.module.?; const exp_name = name.toSlice(&mod.intern_pool); const esym_index = metadata.@"export"(self, exp_name) orelse return; diff --git a/src/link/MachO.zig b/src/link/MachO.zig index dd185fcaec81..3187ba528bc5 100644 --- a/src/link/MachO.zig +++ b/src/link/MachO.zig @@ -3196,22 +3196,22 @@ pub fn updateExports( self: *MachO, mod: *Module, exported: Module.Exported, - exports: []const *Module.Export, + export_indices: []const u32, ) link.File.UpdateExportsError!void { if (build_options.skip_non_native and builtin.object_format != .macho) { @panic("Attempted to compile for object format that was disabled by build configuration"); } - if (self.llvm_object) |llvm_object| return llvm_object.updateExports(mod, exported, exports); - return self.getZigObject().?.updateExports(self, mod, exported, exports); + if (self.llvm_object) |llvm_object| return llvm_object.updateExports(mod, exported, export_indices); + return self.getZigObject().?.updateExports(self, mod, exported, export_indices); } -pub fn deleteDeclExport( +pub fn deleteExport( self: *MachO, - decl_index: InternPool.DeclIndex, + exported: Zcu.Exported, name: InternPool.NullTerminatedString, -) Allocator.Error!void { +) void { if (self.llvm_object) |_| return; - return self.getZigObject().?.deleteDeclExport(self, decl_index, name); + return self.getZigObject().?.deleteExport(self, exported, name); } pub fn freeDecl(self: *MachO, decl_index: InternPool.DeclIndex) void { diff --git a/src/link/MachO/ZigObject.zig b/src/link/MachO/ZigObject.zig index e2202d11fc5b..1fce9e37dd7e 100644 --- a/src/link/MachO/ZigObject.zig +++ b/src/link/MachO/ZigObject.zig @@ -713,9 +713,7 @@ pub fn updateFunc( ); } - // Since we updated the vaddr and the size, each corresponding export - // symbol also needs to be updated. - return self.updateExports(macho_file, mod, .{ .decl_index = decl_index }, mod.getDeclExports(decl_index)); + // Exports will be updated by `Zcu.processExports` after the update. } pub fn updateDecl( @@ -790,9 +788,7 @@ pub fn updateDecl( ); } - // Since we updated the vaddr and the size, each corresponding export symbol also - // needs to be updated. - try self.updateExports(macho_file, mod, .{ .decl_index = decl_index }, mod.getDeclExports(decl_index)); + // Exports will be updated by `Zcu.processExports` after the update. } fn updateDeclCode( @@ -1187,7 +1183,7 @@ pub fn updateExports( macho_file: *MachO, mod: *Module, exported: Module.Exported, - exports: []const *Module.Export, + export_indices: []const u32, ) link.File.UpdateExportsError!void { const tracy = trace(@src()); defer tracy.end(); @@ -1199,7 +1195,7 @@ pub fn updateExports( break :blk self.decls.getPtr(decl_index).?; }, .value => |value| self.anon_decls.getPtr(value) orelse blk: { - const first_exp = exports[0]; + const first_exp = mod.all_exports.items[export_indices[0]]; const res = try self.lowerAnonDecl(macho_file, value, .none, first_exp.getSrcLoc(mod)); switch (res) { .ok => {}, @@ -1207,7 +1203,7 @@ pub fn updateExports( // TODO maybe it's enough to return an error here and let Module.processExportsInner // handle the error? try mod.failed_exports.ensureUnusedCapacity(mod.gpa, 1); - mod.failed_exports.putAssumeCapacityNoClobber(first_exp, em); + mod.failed_exports.putAssumeCapacityNoClobber(export_indices[0], em); return; }, } @@ -1218,11 +1214,12 @@ pub fn updateExports( const nlist_idx = macho_file.getSymbol(sym_index).nlist_idx; const nlist = self.symtab.items(.nlist)[nlist_idx]; - for (exports) |exp| { + for (export_indices) |export_idx| { + const exp = mod.all_exports.items[export_idx]; if (exp.opts.section.unwrap()) |section_name| { if (!section_name.eqlSlice("__text", &mod.intern_pool)) { try mod.failed_exports.ensureUnusedCapacity(mod.gpa, 1); - mod.failed_exports.putAssumeCapacityNoClobber(exp, try Module.ErrorMsg.create( + mod.failed_exports.putAssumeCapacityNoClobber(export_idx, try Module.ErrorMsg.create( gpa, exp.getSrcLoc(mod), "Unimplemented: ExportOptions.section", @@ -1232,7 +1229,7 @@ pub fn updateExports( } } if (exp.opts.linkage == .link_once) { - try mod.failed_exports.putNoClobber(mod.gpa, exp, try Module.ErrorMsg.create( + try mod.failed_exports.putNoClobber(mod.gpa, export_idx, try Module.ErrorMsg.create( gpa, exp.getSrcLoc(mod), "Unimplemented: GlobalLinkage.link_once", @@ -1364,15 +1361,18 @@ pub fn updateDeclLineNumber(self: *ZigObject, mod: *Module, decl_index: InternPo } } -pub fn deleteDeclExport( +pub fn deleteExport( self: *ZigObject, macho_file: *MachO, - decl_index: InternPool.DeclIndex, + exported: Zcu.Exported, name: InternPool.NullTerminatedString, ) void { const mod = macho_file.base.comp.module.?; - const metadata = self.decls.getPtr(decl_index) orelse return; + const metadata = switch (exported) { + .decl_index => |decl_index| self.decls.getPtr(decl_index) orelse return, + .value => |value| self.anon_decls.getPtr(value) orelse return, + }; const nlist_index = metadata.@"export"(self, name.toSlice(&mod.intern_pool)) orelse return; log.debug("deleting export '{}'", .{name.fmt(&mod.intern_pool)}); diff --git a/src/link/NvPtx.zig b/src/link/NvPtx.zig index 3d059acbb502..aa9ea1b5cdfe 100644 --- a/src/link/NvPtx.zig +++ b/src/link/NvPtx.zig @@ -96,12 +96,12 @@ pub fn updateExports( self: *NvPtx, module: *Module, exported: Module.Exported, - exports: []const *Module.Export, + export_indices: []const u32, ) !void { if (build_options.skip_non_native and builtin.object_format != .nvptx) @panic("Attempted to compile for object format that was disabled by build configuration"); - return self.llvm_object.updateExports(module, exported, exports); + return self.llvm_object.updateExports(module, exported, export_indices); } pub fn freeDecl(self: *NvPtx, decl_index: InternPool.DeclIndex) void { diff --git a/src/link/Plan9.zig b/src/link/Plan9.zig index b15ec8781519..60775ac66274 100644 --- a/src/link/Plan9.zig +++ b/src/link/Plan9.zig @@ -60,6 +60,9 @@ fn_decl_table: std.AutoArrayHashMapUnmanaged( ) = .{}, /// the code is modified when relocated, so that is why it is mutable data_decl_table: std.AutoArrayHashMapUnmanaged(InternPool.DeclIndex, []u8) = .{}, +/// When `updateExports` is called, we store the export indices here, to be used +/// during flush. +decl_exports: std.AutoArrayHashMapUnmanaged(InternPool.DeclIndex, []u32) = .{}, /// Table of unnamed constants associated with a parent `Decl`. /// We store them here so that we can free the constants whenever the `Decl` @@ -770,8 +773,8 @@ pub fn flushModule(self: *Plan9, arena: Allocator, prog_node: std.Progress.Node) mem.writeInt(u64, got_table[atom.got_index.? * 8 ..][0..8], off, target.cpu.arch.endian()); } self.syms.items[atom.sym_index.?].value = off; - if (mod.decl_exports.get(decl_index)) |exports| { - try self.addDeclExports(mod, decl_index, exports.items); + if (self.decl_exports.get(decl_index)) |export_indices| { + try self.addDeclExports(mod, decl_index, export_indices); } } } @@ -836,8 +839,8 @@ pub fn flushModule(self: *Plan9, arena: Allocator, prog_node: std.Progress.Node) mem.writeInt(u64, got_table[atom.got_index.? * 8 ..][0..8], off, target.cpu.arch.endian()); } self.syms.items[atom.sym_index.?].value = off; - if (mod.decl_exports.get(decl_index)) |exports| { - try self.addDeclExports(mod, decl_index, exports.items); + if (self.decl_exports.get(decl_index)) |export_indices| { + try self.addDeclExports(mod, decl_index, export_indices); } } // write the unnamed constants after the other data decls @@ -1007,20 +1010,21 @@ fn addDeclExports( self: *Plan9, mod: *Module, decl_index: InternPool.DeclIndex, - exports: []const *Module.Export, + export_indices: []const u32, ) !void { const gpa = self.base.comp.gpa; const metadata = self.decls.getPtr(decl_index).?; const atom = self.getAtom(metadata.index); - for (exports) |exp| { + for (export_indices) |export_idx| { + const exp = mod.all_exports.items[export_idx]; const exp_name = exp.opts.name.toSlice(&mod.intern_pool); // plan9 does not support custom sections if (exp.opts.section.unwrap()) |section_name| { if (!section_name.eqlSlice(".text", &mod.intern_pool) and !section_name.eqlSlice(".data", &mod.intern_pool)) { - try mod.failed_exports.put(mod.gpa, exp, try Module.ErrorMsg.create( + try mod.failed_exports.put(mod.gpa, export_idx, try Module.ErrorMsg.create( gpa, mod.declPtr(decl_index).navSrcLoc(mod).upgrade(mod), "plan9 does not support extra sections", @@ -1152,15 +1156,23 @@ pub fn updateExports( self: *Plan9, module: *Module, exported: Module.Exported, - exports: []const *Module.Export, + export_indices: []const u32, ) !void { + const gpa = self.base.comp.gpa; switch (exported) { .value => @panic("TODO: plan9 updateExports handling values"), - .decl_index => |decl_index| _ = try self.seeDecl(decl_index), + .decl_index => |decl_index| { + _ = try self.seeDecl(decl_index); + if (self.decl_exports.fetchSwapRemove(decl_index)) |kv| { + gpa.free(kv.value); + } + try self.decl_exports.ensureUnusedCapacity(gpa, 1); + const duped_indices = try gpa.dupe(u32, export_indices); + self.decl_exports.putAssumeCapacityNoClobber(decl_index, duped_indices); + }, } - // we do all the things in flush + // all proper work is done in flush _ = module; - _ = exports; } pub fn getOrCreateAtomForLazySymbol(self: *Plan9, sym: File.LazySymbol) !Atom.Index { @@ -1290,6 +1302,10 @@ pub fn deinit(self: *Plan9) void { gpa.free(self.syms.items[sym_index].name); } self.data_decl_table.deinit(gpa); + for (self.decl_exports.values()) |export_indices| { + gpa.free(export_indices); + } + self.decl_exports.deinit(gpa); self.syms.deinit(gpa); self.got_index_free_list.deinit(gpa); self.syms_index_free_list.deinit(gpa); @@ -1395,10 +1411,13 @@ pub fn writeSyms(self: *Plan9, buf: *std.ArrayList(u8)) !void { const atom = self.getAtom(decl_metadata.index); const sym = self.syms.items[atom.sym_index.?]; try self.writeSym(writer, sym); - if (self.base.comp.module.?.decl_exports.get(decl_index)) |exports| { - for (exports.items) |e| if (decl_metadata.getExport(self, e.opts.name.toSlice(ip))) |exp_i| { - try self.writeSym(writer, self.syms.items[exp_i]); - }; + if (self.decl_exports.get(decl_index)) |export_indices| { + for (export_indices) |export_idx| { + const exp = mod.all_exports.items[export_idx]; + if (decl_metadata.getExport(self, exp.opts.name.toSlice(ip))) |exp_i| { + try self.writeSym(writer, self.syms.items[exp_i]); + } + } } } } @@ -1442,13 +1461,16 @@ pub fn writeSyms(self: *Plan9, buf: *std.ArrayList(u8)) !void { const atom = self.getAtom(decl_metadata.index); const sym = self.syms.items[atom.sym_index.?]; try self.writeSym(writer, sym); - if (self.base.comp.module.?.decl_exports.get(decl_index)) |exports| { - for (exports.items) |e| if (decl_metadata.getExport(self, e.opts.name.toSlice(ip))) |exp_i| { - const s = self.syms.items[exp_i]; - if (mem.eql(u8, s.name, "_start")) - self.entry_val = s.value; - try self.writeSym(writer, s); - }; + if (self.decl_exports.get(decl_index)) |export_indices| { + for (export_indices) |export_idx| { + const exp = mod.all_exports.items[export_idx]; + if (decl_metadata.getExport(self, exp.opts.name.toSlice(ip))) |exp_i| { + const s = self.syms.items[exp_i]; + if (mem.eql(u8, s.name, "_start")) + self.entry_val = s.value; + try self.writeSym(writer, s); + } + } } } } diff --git a/src/link/SpirV.zig b/src/link/SpirV.zig index 099d58bfa0a3..d1a8ff96c6e5 100644 --- a/src/link/SpirV.zig +++ b/src/link/SpirV.zig @@ -152,7 +152,7 @@ pub fn updateExports( self: *SpirV, mod: *Module, exported: Module.Exported, - exports: []const *Module.Export, + export_indices: []const u32, ) !void { const decl_index = switch (exported) { .decl_index => |i| i, @@ -177,7 +177,8 @@ pub fn updateExports( if ((!is_vulkan and execution_model == .Kernel) or (is_vulkan and (execution_model == .Fragment or execution_model == .Vertex))) { - for (exports) |exp| { + for (export_indices) |export_idx| { + const exp = mod.all_exports.items[export_idx]; try self.object.spv.declareEntryPoint( spv_decl_index, exp.opts.name.toSlice(&mod.intern_pool), diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig index 6476784a01e7..164ddbc118da 100644 --- a/src/link/Wasm.zig +++ b/src/link/Wasm.zig @@ -1542,26 +1542,26 @@ pub fn getAnonDeclVAddr(wasm: *Wasm, decl_val: InternPool.Index, reloc_info: lin return wasm.zigObjectPtr().?.getAnonDeclVAddr(wasm, decl_val, reloc_info); } -pub fn deleteDeclExport( +pub fn deleteExport( wasm: *Wasm, - decl_index: InternPool.DeclIndex, + exported: Zcu.Exported, name: InternPool.NullTerminatedString, ) void { if (wasm.llvm_object) |_| return; - return wasm.zigObjectPtr().?.deleteDeclExport(wasm, decl_index, name); + return wasm.zigObjectPtr().?.deleteExport(wasm, exported, name); } pub fn updateExports( wasm: *Wasm, mod: *Module, exported: Module.Exported, - exports: []const *Module.Export, + export_indices: []const u32, ) !void { if (build_options.skip_non_native and builtin.object_format != .wasm) { @panic("Attempted to compile for object format that was disabled by build configuration"); } - if (wasm.llvm_object) |llvm_object| return llvm_object.updateExports(mod, exported, exports); - return wasm.zigObjectPtr().?.updateExports(wasm, mod, exported, exports); + if (wasm.llvm_object) |llvm_object| return llvm_object.updateExports(mod, exported, export_indices); + return wasm.zigObjectPtr().?.updateExports(wasm, mod, exported, export_indices); } pub fn freeDecl(wasm: *Wasm, decl_index: InternPool.DeclIndex) void { diff --git a/src/link/Wasm/ZigObject.zig b/src/link/Wasm/ZigObject.zig index 1accf81c0227..a3b8eb445994 100644 --- a/src/link/Wasm/ZigObject.zig +++ b/src/link/Wasm/ZigObject.zig @@ -833,13 +833,17 @@ pub fn getAnonDeclVAddr( return target_symbol_index; } -pub fn deleteDeclExport( +pub fn deleteExport( zig_object: *ZigObject, wasm_file: *Wasm, - decl_index: InternPool.DeclIndex, + exported: Zcu.Exported, name: InternPool.NullTerminatedString, ) void { const mod = wasm_file.base.comp.module.?; + const decl_index = switch (exported) { + .decl_index => |decl_index| decl_index, + .value => @panic("TODO: implement Wasm linker code for exporting a constant value"), + }; const decl_info = zig_object.decls_map.getPtr(decl_index) orelse return; if (decl_info.@"export"(zig_object, name.toSlice(&mod.intern_pool))) |sym_index| { const sym = zig_object.symbol(sym_index); @@ -856,7 +860,7 @@ pub fn updateExports( wasm_file: *Wasm, mod: *Module, exported: Module.Exported, - exports: []const *Module.Export, + export_indices: []const u32, ) !void { const decl_index = switch (exported) { .decl_index => |i| i, @@ -873,9 +877,10 @@ pub fn updateExports( const gpa = mod.gpa; log.debug("Updating exports for decl '{}'", .{decl.name.fmt(&mod.intern_pool)}); - for (exports) |exp| { + for (export_indices) |export_idx| { + const exp = mod.all_exports.items[export_idx]; if (exp.opts.section.toSlice(&mod.intern_pool)) |section| { - try mod.failed_exports.putNoClobber(gpa, exp, try Module.ErrorMsg.create( + try mod.failed_exports.putNoClobber(gpa, export_idx, try Module.ErrorMsg.create( gpa, decl.navSrcLoc(mod).upgrade(mod), "Unimplemented: ExportOptions.section '{s}'", @@ -908,7 +913,7 @@ pub fn updateExports( }, .strong => {}, // symbols are strong by default .link_once => { - try mod.failed_exports.putNoClobber(gpa, exp, try Module.ErrorMsg.create( + try mod.failed_exports.putNoClobber(gpa, export_idx, try Module.ErrorMsg.create( gpa, decl.navSrcLoc(mod).upgrade(mod), "Unimplemented: LinkOnce", From 5f03c025058ddda09bfb3eac283bb88d30ad38cc Mon Sep 17 00:00:00 2001 From: mlugg Date: Sat, 29 Jun 2024 04:16:47 +0100 Subject: [PATCH 036/152] Zcu: key compile errors on `AnalUnit` where appropriate This change seeks to more appropriately model the way semantic analysis works by drawing a more clear line between errors emitted by analyzing a `Decl` (in future a `Cau`) and errors emitted by analyzing a runtime function. This does change a few compile errors surrounding compile logs by adding more "also here" notes. The new notes are more technically correct, but perhaps not so helpful. They're not doing enough harm for me to put extensive thought into this for now. --- src/Compilation.zig | 128 ++++++++++-------- src/Sema.zig | 27 ++-- src/Zcu.zig | 52 +++---- src/codegen/llvm.zig | 4 +- src/codegen/spirv.zig | 2 +- src/link/Coff.zig | 7 +- src/link/Elf/ZigObject.zig | 7 +- src/link/MachO/ZigObject.zig | 7 +- src/link/Plan9.zig | 7 +- src/link/Wasm/ZigObject.zig | 7 +- ..._tagged_enum_doesnt_crash_the_compiler.zig | 1 + test/cases/compile_errors/compile_log.zig | 1 + 12 files changed, 130 insertions(+), 120 deletions(-) diff --git a/src/Compilation.zig b/src/Compilation.zig index e0bbdd2e03c2..4c693ffb28e3 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -2831,11 +2831,11 @@ pub fn totalErrorCount(comp: *Compilation) u32 { } } - if (comp.module) |module| { - total += module.failed_exports.count(); - total += module.failed_embed_files.count(); + if (comp.module) |zcu| { + total += zcu.failed_exports.count(); + total += zcu.failed_embed_files.count(); - for (module.failed_files.keys(), module.failed_files.values()) |file, error_msg| { + for (zcu.failed_files.keys(), zcu.failed_files.values()) |file, error_msg| { if (error_msg) |_| { total += 1; } else { @@ -2851,23 +2851,27 @@ pub fn totalErrorCount(comp: *Compilation) u32 { // When a parse error is introduced, we keep all the semantic analysis for // the previous parse success, including compile errors, but we cannot // emit them until the file succeeds parsing. - for (module.failed_decls.keys()) |key| { - if (module.declFileScope(key).okToReportErrors()) { + for (zcu.failed_analysis.keys()) |key| { + const decl_index = switch (key.unwrap()) { + .decl => |d| d, + .func => |ip_index| zcu.funcInfo(ip_index).owner_decl, + }; + if (zcu.declFileScope(decl_index).okToReportErrors()) { total += 1; - if (module.cimport_errors.get(key)) |errors| { + if (zcu.cimport_errors.get(key)) |errors| { total += errors.errorMessageCount(); } } } - if (module.emit_h) |emit_h| { + if (zcu.emit_h) |emit_h| { for (emit_h.failed_decls.keys()) |key| { - if (module.declFileScope(key).okToReportErrors()) { + if (zcu.declFileScope(key).okToReportErrors()) { total += 1; } } } - if (module.global_error_set.entries.len - 1 > module.error_limit) { + if (zcu.global_error_set.entries.len - 1 > zcu.error_limit) { total += 1; } } @@ -2882,8 +2886,8 @@ pub fn totalErrorCount(comp: *Compilation) u32 { // Compile log errors only count if there are no other errors. if (total == 0) { - if (comp.module) |module| { - total += @intFromBool(module.compile_log_decls.count() != 0); + if (comp.module) |zcu| { + total += @intFromBool(zcu.compile_log_sources.count() != 0); } } @@ -2934,10 +2938,10 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle { .msg = try bundle.addString("memory allocation failure"), }); } - if (comp.module) |module| { - for (module.failed_files.keys(), module.failed_files.values()) |file, error_msg| { + if (comp.module) |zcu| { + for (zcu.failed_files.keys(), zcu.failed_files.values()) |file, error_msg| { if (error_msg) |msg| { - try addModuleErrorMsg(module, &bundle, msg.*); + try addModuleErrorMsg(zcu, &bundle, msg.*); } else { // Must be ZIR errors. Note that this may include AST errors. // addZirErrorMessages asserts that the tree is loaded. @@ -2945,54 +2949,59 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle { try addZirErrorMessages(&bundle, file); } } - for (module.failed_embed_files.values()) |error_msg| { - try addModuleErrorMsg(module, &bundle, error_msg.*); + for (zcu.failed_embed_files.values()) |error_msg| { + try addModuleErrorMsg(zcu, &bundle, error_msg.*); } - for (module.failed_decls.keys(), module.failed_decls.values()) |decl_index, error_msg| { + for (zcu.failed_analysis.keys(), zcu.failed_analysis.values()) |anal_unit, error_msg| { + const decl_index = switch (anal_unit.unwrap()) { + .decl => |d| d, + .func => |ip_index| zcu.funcInfo(ip_index).owner_decl, + }; + // Skip errors for Decls within files that had a parse failure. // We'll try again once parsing succeeds. - if (module.declFileScope(decl_index).okToReportErrors()) { - try addModuleErrorMsg(module, &bundle, error_msg.*); - if (module.cimport_errors.get(decl_index)) |errors| { - for (errors.getMessages()) |err_msg_index| { - const err_msg = errors.getErrorMessage(err_msg_index); - try bundle.addRootErrorMessage(.{ - .msg = try bundle.addString(errors.nullTerminatedString(err_msg.msg)), - .src_loc = if (err_msg.src_loc != .none) blk: { - const src_loc = errors.getSourceLocation(err_msg.src_loc); - break :blk try bundle.addSourceLocation(.{ - .src_path = try bundle.addString(errors.nullTerminatedString(src_loc.src_path)), - .span_start = src_loc.span_start, - .span_main = src_loc.span_main, - .span_end = src_loc.span_end, - .line = src_loc.line, - .column = src_loc.column, - .source_line = if (src_loc.source_line != 0) try bundle.addString(errors.nullTerminatedString(src_loc.source_line)) else 0, - }); - } else .none, - }); - } + if (!zcu.declFileScope(decl_index).okToReportErrors()) continue; + + try addModuleErrorMsg(zcu, &bundle, error_msg.*); + if (zcu.cimport_errors.get(anal_unit)) |errors| { + for (errors.getMessages()) |err_msg_index| { + const err_msg = errors.getErrorMessage(err_msg_index); + try bundle.addRootErrorMessage(.{ + .msg = try bundle.addString(errors.nullTerminatedString(err_msg.msg)), + .src_loc = if (err_msg.src_loc != .none) blk: { + const src_loc = errors.getSourceLocation(err_msg.src_loc); + break :blk try bundle.addSourceLocation(.{ + .src_path = try bundle.addString(errors.nullTerminatedString(src_loc.src_path)), + .span_start = src_loc.span_start, + .span_main = src_loc.span_main, + .span_end = src_loc.span_end, + .line = src_loc.line, + .column = src_loc.column, + .source_line = if (src_loc.source_line != 0) try bundle.addString(errors.nullTerminatedString(src_loc.source_line)) else 0, + }); + } else .none, + }); } } } - if (module.emit_h) |emit_h| { + if (zcu.emit_h) |emit_h| { for (emit_h.failed_decls.keys(), emit_h.failed_decls.values()) |decl_index, error_msg| { // Skip errors for Decls within files that had a parse failure. // We'll try again once parsing succeeds. - if (module.declFileScope(decl_index).okToReportErrors()) { - try addModuleErrorMsg(module, &bundle, error_msg.*); + if (zcu.declFileScope(decl_index).okToReportErrors()) { + try addModuleErrorMsg(zcu, &bundle, error_msg.*); } } } - for (module.failed_exports.values()) |value| { - try addModuleErrorMsg(module, &bundle, value.*); + for (zcu.failed_exports.values()) |value| { + try addModuleErrorMsg(zcu, &bundle, value.*); } - const actual_error_count = module.global_error_set.entries.len - 1; - if (actual_error_count > module.error_limit) { + const actual_error_count = zcu.global_error_set.entries.len - 1; + if (actual_error_count > zcu.error_limit) { try bundle.addRootErrorMessage(.{ - .msg = try bundle.printString("module used more errors than possible: used {d}, max {d}", .{ - actual_error_count, module.error_limit, + .msg = try bundle.printString("ZCU used more errors than possible: used {d}, max {d}", .{ + actual_error_count, zcu.error_limit, }), .notes_len = 1, }); @@ -3041,14 +3050,14 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle { } if (comp.module) |zcu| { - if (bundle.root_list.items.len == 0 and zcu.compile_log_decls.count() != 0) { - const values = zcu.compile_log_decls.values(); + if (bundle.root_list.items.len == 0 and zcu.compile_log_sources.count() != 0) { + const values = zcu.compile_log_sources.values(); // First one will be the error; subsequent ones will be notes. const src_loc = values[0].src().upgrade(zcu); const err_msg: Module.ErrorMsg = .{ .src_loc = src_loc, .msg = "found compile log statement", - .notes = try gpa.alloc(Module.ErrorMsg, zcu.compile_log_decls.count() - 1), + .notes = try gpa.alloc(Module.ErrorMsg, zcu.compile_log_sources.count() - 1), }; defer gpa.free(err_msg.notes); @@ -3486,13 +3495,16 @@ fn processOneJob(comp: *Compilation, job: Job, prog_node: std.Progress.Node) !vo const decl = module.declPtr(decl_index); const lf = comp.bin_file.?; lf.updateDeclLineNumber(module, decl_index) catch |err| { - try module.failed_decls.ensureUnusedCapacity(gpa, 1); - module.failed_decls.putAssumeCapacityNoClobber(decl_index, try Module.ErrorMsg.create( - gpa, - decl.navSrcLoc(module).upgrade(module), - "unable to update line number: {s}", - .{@errorName(err)}, - )); + try module.failed_analysis.ensureUnusedCapacity(gpa, 1); + module.failed_analysis.putAssumeCapacityNoClobber( + InternPool.AnalUnit.wrap(.{ .decl = decl_index }), + try Module.ErrorMsg.create( + gpa, + decl.navSrcLoc(module).upgrade(module), + "unable to update line number: {s}", + .{@errorName(err)}, + ), + ); decl.analysis = .codegen_failure; try module.retryable_failures.append(gpa, InternPool.AnalUnit.wrap(.{ .decl = decl_index })); }; diff --git a/src/Sema.zig b/src/Sema.zig index fafde99f47bc..4337ce892648 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -2486,7 +2486,7 @@ pub fn failWithOwnedErrorMsg(sema: *Sema, block: ?*Block, err_msg: *Module.Error crash_report.compilerPanic("unexpected compile error occurred", null, null); } - try mod.failed_decls.ensureUnusedCapacity(gpa, 1); + try mod.failed_analysis.ensureUnusedCapacity(gpa, 1); try mod.failed_files.ensureUnusedCapacity(gpa, 1); if (block) |start_block| { @@ -2504,7 +2504,7 @@ pub fn failWithOwnedErrorMsg(sema: *Sema, block: ?*Block, err_msg: *Module.Error const max_references = refs: { if (mod.comp.reference_trace) |num| break :refs num; // Do not add multiple traces without explicit request. - if (mod.failed_decls.count() > 0) break :ref; + if (mod.failed_analysis.count() > 0) break :ref; break :refs default_reference_trace_len; }; @@ -2544,7 +2544,7 @@ pub fn failWithOwnedErrorMsg(sema: *Sema, block: ?*Block, err_msg: *Module.Error if (sema.func_index != .none) { ip.funcAnalysis(sema.func_index).state = .sema_failure; } - const gop = mod.failed_decls.getOrPutAssumeCapacity(sema.owner_decl_index); + const gop = mod.failed_analysis.getOrPutAssumeCapacity(sema.ownerUnit()); if (gop.found_existing) { // If there are multiple errors for the same Decl, prefer the first one added. sema.err = null; @@ -5823,11 +5823,7 @@ fn zirCompileLog( } try writer.print("\n", .{}); - const decl_index = if (sema.func_index != .none) - mod.funcOwnerDeclIndex(sema.func_index) - else - sema.owner_decl_index; - const gop = try mod.compile_log_decls.getOrPut(sema.gpa, decl_index); + const gop = try mod.compile_log_sources.getOrPut(sema.gpa, sema.ownerUnit()); if (!gop.found_existing) gop.value_ptr.* = .{ .base_node_inst = block.src_base_inst, .node_offset = src_node, @@ -5980,7 +5976,7 @@ fn zirCImport(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileEr if (!comp.config.link_libc) try sema.errNote(src, msg, "libc headers not available; compilation does not link against libc", .{}); - const gop = try mod.cimport_errors.getOrPut(gpa, sema.owner_decl_index); + const gop = try mod.cimport_errors.getOrPut(gpa, sema.ownerUnit()); if (!gop.found_existing) { gop.value_ptr.* = c_import_res.errors; c_import_res.errors = std.zig.ErrorBundle.empty; @@ -38487,10 +38483,7 @@ pub fn flushExports(sema: *Sema) !void { const zcu = sema.mod; const gpa = zcu.gpa; - const unit: AnalUnit = if (sema.owner_func_index != .none) - AnalUnit.wrap(.{ .func = sema.owner_func_index }) - else - AnalUnit.wrap(.{ .decl = sema.owner_decl_index }); + const unit = sema.ownerUnit(); // There may be existing exports. For instance, a struct may export // things during both field type resolution and field default resolution. @@ -38524,6 +38517,14 @@ pub fn flushExports(sema: *Sema) !void { } } +pub fn ownerUnit(sema: Sema) AnalUnit { + if (sema.owner_func_index != .none) { + return AnalUnit.wrap(.{ .func = sema.owner_func_index }); + } else { + return AnalUnit.wrap(.{ .decl = sema.owner_decl_index }); + } +} + pub const bitCastVal = @import("Sema/bitcast.zig").bitCast; pub const bitCastSpliceVal = @import("Sema/bitcast.zig").bitCastSplice; diff --git a/src/Zcu.zig b/src/Zcu.zig index 3a329f0b03f8..d29d2e427983 100644 --- a/src/Zcu.zig +++ b/src/Zcu.zig @@ -108,15 +108,11 @@ embed_table: std.StringArrayHashMapUnmanaged(*EmbedFile) = .{}, /// is not yet implemented. intern_pool: InternPool = .{}, -/// We optimize memory usage for a compilation with no compile errors by storing the -/// error messages and mapping outside of `Decl`. -/// The ErrorMsg memory is owned by the decl, using Module's general purpose allocator. -/// Note that a Decl can succeed but the Fn it represents can fail. In this case, -/// a Decl can have a failed_decls entry but have analysis status of success. -failed_decls: std.AutoArrayHashMapUnmanaged(Decl.Index, *ErrorMsg) = .{}, -/// Keep track of one `@compileLog` callsite per owner Decl. +/// The ErrorMsg memory is owned by the `AnalUnit`, using Module's general purpose allocator. +failed_analysis: std.AutoArrayHashMapUnmanaged(AnalUnit, *ErrorMsg) = .{}, +/// Keep track of one `@compileLog` callsite per `AnalUnit`. /// The value is the source location of the `@compileLog` call, convertible to a `LazySrcLoc`. -compile_log_decls: std.AutoArrayHashMapUnmanaged(Decl.Index, extern struct { +compile_log_sources: std.AutoArrayHashMapUnmanaged(AnalUnit, extern struct { base_node_inst: InternPool.TrackedInst.Index, node_offset: i32, pub fn src(self: @This()) LazySrcLoc { @@ -133,9 +129,9 @@ failed_files: std.AutoArrayHashMapUnmanaged(*File, ?*ErrorMsg) = .{}, failed_embed_files: std.AutoArrayHashMapUnmanaged(*EmbedFile, *ErrorMsg) = .{}, /// Key is index into `all_exports`. failed_exports: std.AutoArrayHashMapUnmanaged(u32, *ErrorMsg) = .{}, -/// If a decl failed due to a cimport error, the corresponding Clang errors +/// If analysis failed due to a cimport error, the corresponding Clang errors /// are stored here. -cimport_errors: std.AutoArrayHashMapUnmanaged(Decl.Index, std.zig.ErrorBundle) = .{}, +cimport_errors: std.AutoArrayHashMapUnmanaged(AnalUnit, std.zig.ErrorBundle) = .{}, /// Key is the error name, index is the error tag value. Index 0 has a length-0 string. global_error_set: GlobalErrorSet = .{}, @@ -180,6 +176,7 @@ emit_h: ?*GlobalEmitH, test_functions: std.AutoArrayHashMapUnmanaged(Decl.Index, void) = .{}, +/// TODO: the key here will be a `Cau.Index`. global_assembly: std.AutoArrayHashMapUnmanaged(Decl.Index, []u8) = .{}, reference_table: std.AutoHashMapUnmanaged(Decl.Index, struct { @@ -371,9 +368,9 @@ pub const Decl = struct { /// successfully complete semantic analysis. dependency_failure, /// Semantic analysis failure. - /// There will be a corresponding ErrorMsg in Zcu.failed_decls. + /// There will be a corresponding ErrorMsg in Zcu.failed_analysis. sema_failure, - /// There will be a corresponding ErrorMsg in Zcu.failed_decls. + /// There will be a corresponding ErrorMsg in Zcu.failed_analysis. codegen_failure, /// Sematic analysis and constant value codegen of this Decl has /// succeeded. However, the Decl may be outdated due to an in-progress @@ -1001,11 +998,6 @@ pub const EmbedFile = struct { /// This struct holds data necessary to construct API-facing `AllErrors.Message`. /// Its memory is managed with the general purpose allocator so that they /// can be created and destroyed in response to incremental updates. -/// In some cases, the File could have been inferred from where the ErrorMsg -/// is stored. For example, if it is stored in Module.failed_decls, then the File -/// would be determined by the Decl Scope. However, the data structure contains the field -/// anyway so that `ErrorMsg` can be reused for error notes, which may be in a different -/// file than the parent error message. It also simplifies processing of error messages. pub const ErrorMsg = struct { src_loc: SrcLoc, msg: []const u8, @@ -2454,8 +2446,6 @@ pub fn deinit(zcu: *Zcu) void { for (zcu.import_table.keys()) |key| { gpa.free(key); } - var failed_decls = zcu.failed_decls; - zcu.failed_decls = .{}; for (zcu.import_table.values()) |value| { value.destroy(zcu); } @@ -2473,10 +2463,10 @@ pub fn deinit(zcu: *Zcu) void { zcu.local_zir_cache.handle.close(); zcu.global_zir_cache.handle.close(); - for (failed_decls.values()) |value| { + for (zcu.failed_analysis.values()) |value| { value.destroy(gpa); } - failed_decls.deinit(gpa); + zcu.failed_analysis.deinit(gpa); if (zcu.emit_h) |emit_h| { for (emit_h.failed_decls.values()) |value| { @@ -2507,7 +2497,7 @@ pub fn deinit(zcu: *Zcu) void { } zcu.cimport_errors.deinit(gpa); - zcu.compile_log_decls.deinit(gpa); + zcu.compile_log_sources.deinit(gpa); zcu.all_exports.deinit(gpa); zcu.free_exports.deinit(gpa); @@ -3508,9 +3498,9 @@ pub fn ensureDeclAnalyzed(mod: *Module, decl_index: Decl.Index) SemaError!void { error.GenericPoison => unreachable, else => |e| { decl.analysis = .sema_failure; - try mod.failed_decls.ensureUnusedCapacity(mod.gpa, 1); + try mod.failed_analysis.ensureUnusedCapacity(mod.gpa, 1); try mod.retryable_failures.append(mod.gpa, AnalUnit.wrap(.{ .decl = decl_index })); - mod.failed_decls.putAssumeCapacityNoClobber(decl_index, try ErrorMsg.create( + mod.failed_analysis.putAssumeCapacityNoClobber(AnalUnit.wrap(.{ .decl = decl_index }), try ErrorMsg.create( mod.gpa, decl.navSrcLoc(mod).upgrade(mod), "unable to analyze: {s}", @@ -3683,9 +3673,9 @@ pub fn ensureFuncBodyAnalyzed(zcu: *Zcu, maybe_coerced_func_index: InternPool.In verify.verify() catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, else => { - try zcu.failed_decls.ensureUnusedCapacity(gpa, 1); - zcu.failed_decls.putAssumeCapacityNoClobber( - decl_index, + try zcu.failed_analysis.ensureUnusedCapacity(gpa, 1); + zcu.failed_analysis.putAssumeCapacityNoClobber( + AnalUnit.wrap(.{ .decl = decl_index }), try Module.ErrorMsg.create( gpa, decl.navSrcLoc(zcu).upgrade(zcu), @@ -3709,8 +3699,8 @@ pub fn ensureFuncBodyAnalyzed(zcu: *Zcu, maybe_coerced_func_index: InternPool.In func.analysis(ip).state = .codegen_failure; }, else => { - try zcu.failed_decls.ensureUnusedCapacity(gpa, 1); - zcu.failed_decls.putAssumeCapacityNoClobber(decl_index, try Module.ErrorMsg.create( + try zcu.failed_analysis.ensureUnusedCapacity(gpa, 1); + zcu.failed_analysis.putAssumeCapacityNoClobber(AnalUnit.wrap(.{ .decl = decl_index }), try Module.ErrorMsg.create( gpa, decl.navSrcLoc(zcu).upgrade(zcu), "unable to codegen: {s}", @@ -5647,8 +5637,8 @@ pub fn linkerUpdateDecl(zcu: *Zcu, decl_index: Decl.Index) !void { }, else => { const gpa = zcu.gpa; - try zcu.failed_decls.ensureUnusedCapacity(gpa, 1); - zcu.failed_decls.putAssumeCapacityNoClobber(decl_index, try ErrorMsg.create( + try zcu.failed_analysis.ensureUnusedCapacity(gpa, 1); + zcu.failed_analysis.putAssumeCapacityNoClobber(AnalUnit.wrap(.{ .decl = decl_index }), try ErrorMsg.create( gpa, decl.navSrcLoc(zcu).upgrade(zcu), "unable to codegen: {s}", diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index dd6606ece72c..6fe7adf33c0f 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -1689,7 +1689,7 @@ pub const Object = struct { fg.genBody(air.getMainBody()) catch |err| switch (err) { error.CodegenFail => { decl.analysis = .codegen_failure; - try zcu.failed_decls.put(zcu.gpa, decl_index, dg.err_msg.?); + try zcu.failed_analysis.put(zcu.gpa, InternPool.AnalUnit.wrap(.{ .decl = decl_index }), dg.err_msg.?); dg.err_msg = null; return; }, @@ -1710,7 +1710,7 @@ pub const Object = struct { dg.genDecl() catch |err| switch (err) { error.CodegenFail => { decl.analysis = .codegen_failure; - try module.failed_decls.put(module.gpa, decl_index, dg.err_msg.?); + try module.failed_analysis.put(module.gpa, InternPool.AnalUnit.wrap(.{ .decl = decl_index }), dg.err_msg.?); dg.err_msg = null; return; }, diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index ee163c31543b..54b7b381cffd 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -218,7 +218,7 @@ pub const Object = struct { decl_gen.genDecl() catch |err| switch (err) { error.CodegenFail => { - try mod.failed_decls.put(mod.gpa, decl_index, decl_gen.error_msg.?); + try mod.failed_analysis.put(mod.gpa, InternPool.AnalUnit.wrap(.{ .decl = decl_index }), decl_gen.error_msg.?); }, else => |other| { // There might be an error that happened *after* self.error_msg diff --git a/src/link/Coff.zig b/src/link/Coff.zig index 0244d085b8fa..94b9ca520ec6 100644 --- a/src/link/Coff.zig +++ b/src/link/Coff.zig @@ -1155,7 +1155,7 @@ pub fn updateFunc(self: *Coff, mod: *Module, func_index: InternPool.Index, air: .ok => code_buffer.items, .fail => |em| { func.analysis(&mod.intern_pool).state = .codegen_failure; - try mod.failed_decls.put(mod.gpa, decl_index, em); + try mod.failed_analysis.put(mod.gpa, AnalUnit.wrap(.{ .decl = decl_index }), em); return; }, }; @@ -1183,7 +1183,7 @@ pub fn lowerUnnamedConst(self: *Coff, val: Value, decl_index: InternPool.DeclInd .ok => |atom_index| atom_index, .fail => |em| { decl.analysis = .codegen_failure; - try mod.failed_decls.put(mod.gpa, decl_index, em); + try mod.failed_analysis.put(mod.gpa, AnalUnit.wrap(.{ .decl = decl_index }), em); log.err("{s}", .{em.msg}); return error.CodegenFail; }, @@ -1277,7 +1277,7 @@ pub fn updateDecl( .ok => code_buffer.items, .fail => |em| { decl.analysis = .codegen_failure; - try mod.failed_decls.put(mod.gpa, decl_index, em); + try mod.failed_analysis.put(mod.gpa, AnalUnit.wrap(.{ .decl = decl_index }), em); return; }, }; @@ -2751,6 +2751,7 @@ const TableSection = @import("table_section.zig").TableSection; const StringTable = @import("StringTable.zig"); const Type = @import("../type.zig").Type; const Value = @import("../Value.zig"); +const AnalUnit = InternPool.AnalUnit; pub const base_tag: link.File.Tag = .coff; diff --git a/src/link/Elf/ZigObject.zig b/src/link/Elf/ZigObject.zig index 14040767b134..74e2039f37a7 100644 --- a/src/link/Elf/ZigObject.zig +++ b/src/link/Elf/ZigObject.zig @@ -1096,7 +1096,7 @@ pub fn updateFunc( .ok => code_buffer.items, .fail => |em| { func.analysis(&mod.intern_pool).state = .codegen_failure; - try mod.failed_decls.put(mod.gpa, decl_index, em); + try mod.failed_analysis.put(mod.gpa, AnalUnit.wrap(.{ .decl = decl_index }), em); return; }, }; @@ -1170,7 +1170,7 @@ pub fn updateDecl( .ok => code_buffer.items, .fail => |em| { decl.analysis = .codegen_failure; - try mod.failed_decls.put(mod.gpa, decl_index, em); + try mod.failed_analysis.put(mod.gpa, AnalUnit.wrap(.{ .decl = decl_index }), em); return; }, }; @@ -1307,7 +1307,7 @@ pub fn lowerUnnamedConst( .ok => |sym_index| sym_index, .fail => |em| { decl.analysis = .codegen_failure; - try mod.failed_decls.put(mod.gpa, decl_index, em); + try mod.failed_analysis.put(mod.gpa, AnalUnit.wrap(.{ .decl = decl_index }), em); log.err("{s}", .{em.msg}); return error.CodegenFail; }, @@ -1656,4 +1656,5 @@ const Symbol = @import("Symbol.zig"); const StringTable = @import("../StringTable.zig"); const Type = @import("../../type.zig").Type; const Value = @import("../../Value.zig"); +const AnalUnit = InternPool.AnalUnit; const ZigObject = @This(); diff --git a/src/link/MachO/ZigObject.zig b/src/link/MachO/ZigObject.zig index 1fce9e37dd7e..ee5ab83b0af4 100644 --- a/src/link/MachO/ZigObject.zig +++ b/src/link/MachO/ZigObject.zig @@ -694,7 +694,7 @@ pub fn updateFunc( .ok => code_buffer.items, .fail => |em| { func.analysis(&mod.intern_pool).state = .codegen_failure; - try mod.failed_decls.put(mod.gpa, decl_index, em); + try mod.failed_analysis.put(mod.gpa, AnalUnit.wrap(.{ .decl = decl_index }), em); return; }, }; @@ -762,7 +762,7 @@ pub fn updateDecl( .ok => code_buffer.items, .fail => |em| { decl.analysis = .codegen_failure; - try mod.failed_decls.put(mod.gpa, decl_index, em); + try mod.failed_analysis.put(mod.gpa, AnalUnit.wrap(.{ .decl = decl_index }), em); return; }, }; @@ -1105,7 +1105,7 @@ pub fn lowerUnnamedConst( .ok => |sym_index| sym_index, .fail => |em| { decl.analysis = .codegen_failure; - try mod.failed_decls.put(mod.gpa, decl_index, em); + try mod.failed_analysis.put(mod.gpa, AnalUnit.wrap(.{ .decl = decl_index }), em); log.err("{s}", .{em.msg}); return error.CodegenFail; }, @@ -1596,4 +1596,5 @@ const Symbol = @import("Symbol.zig"); const StringTable = @import("../StringTable.zig"); const Type = @import("../../type.zig").Type; const Value = @import("../../Value.zig"); +const AnalUnit = InternPool.AnalUnit; const ZigObject = @This(); diff --git a/src/link/Plan9.zig b/src/link/Plan9.zig index 60775ac66274..d44da5c973dd 100644 --- a/src/link/Plan9.zig +++ b/src/link/Plan9.zig @@ -17,6 +17,7 @@ const Air = @import("../Air.zig"); const Liveness = @import("../Liveness.zig"); const Type = @import("../type.zig").Type; const Value = @import("../Value.zig"); +const AnalUnit = InternPool.AnalUnit; const std = @import("std"); const builtin = @import("builtin"); @@ -449,7 +450,7 @@ pub fn updateFunc(self: *Plan9, mod: *Module, func_index: InternPool.Index, air: .ok => try code_buffer.toOwnedSlice(), .fail => |em| { func.analysis(&mod.intern_pool).state = .codegen_failure; - try mod.failed_decls.put(mod.gpa, decl_index, em); + try mod.failed_analysis.put(mod.gpa, AnalUnit.wrap(.{ .decl = decl_index }), em); return; }, }; @@ -513,7 +514,7 @@ pub fn lowerUnnamedConst(self: *Plan9, val: Value, decl_index: InternPool.DeclIn .ok => code_buffer.items, .fail => |em| { decl.analysis = .codegen_failure; - try mod.failed_decls.put(mod.gpa, decl_index, em); + try mod.failed_analysis.put(mod.gpa, AnalUnit.wrap(.{ .decl = decl_index }), em); log.err("{s}", .{em.msg}); return error.CodegenFail; }, @@ -550,7 +551,7 @@ pub fn updateDecl(self: *Plan9, mod: *Module, decl_index: InternPool.DeclIndex) .ok => code_buffer.items, .fail => |em| { decl.analysis = .codegen_failure; - try mod.failed_decls.put(mod.gpa, decl_index, em); + try mod.failed_analysis.put(mod.gpa, AnalUnit.wrap(.{ .decl = decl_index }), em); return; }, }; diff --git a/src/link/Wasm/ZigObject.zig b/src/link/Wasm/ZigObject.zig index a3b8eb445994..341d3a2fc839 100644 --- a/src/link/Wasm/ZigObject.zig +++ b/src/link/Wasm/ZigObject.zig @@ -280,7 +280,7 @@ pub fn updateDecl( .ok => code_writer.items, .fail => |em| { decl.analysis = .codegen_failure; - try mod.failed_decls.put(mod.gpa, decl_index, em); + try mod.failed_analysis.put(mod.gpa, AnalUnit.wrap(.{ .decl = decl_index }), em); return; }, }; @@ -320,7 +320,7 @@ pub fn updateFunc( .ok => code_writer.items, .fail => |em| { decl.analysis = .codegen_failure; - try mod.failed_decls.put(mod.gpa, decl_index, em); + try mod.failed_analysis.put(mod.gpa, AnalUnit.wrap(.{ .decl = decl_index }), em); return; }, }; @@ -501,7 +501,7 @@ pub fn lowerUnnamedConst(zig_object: *ZigObject, wasm_file: *Wasm, val: Value, d }, .fail => |em| { decl.analysis = .codegen_failure; - try mod.failed_decls.put(mod.gpa, decl_index, em); + try mod.failed_analysis.put(mod.gpa, AnalUnit.wrap(.{ .decl = decl_index }), em); return error.CodegenFail; }, } @@ -1255,4 +1255,5 @@ const Symbol = @import("Symbol.zig"); const Type = @import("../../type.zig").Type; const Value = @import("../../Value.zig"); const Wasm = @import("../Wasm.zig"); +const AnalUnit = InternPool.AnalUnit; const ZigObject = @This(); diff --git a/test/cases/compile_errors/compileLog_of_tagged_enum_doesnt_crash_the_compiler.zig b/test/cases/compile_errors/compileLog_of_tagged_enum_doesnt_crash_the_compiler.zig index f7de8129b7a5..6ba1329a2e9c 100644 --- a/test/cases/compile_errors/compileLog_of_tagged_enum_doesnt_crash_the_compiler.zig +++ b/test/cases/compile_errors/compileLog_of_tagged_enum_doesnt_crash_the_compiler.zig @@ -16,6 +16,7 @@ pub export fn entry() void { // target=native // // :6:5: error: found compile log statement +// :6:5: note: also here // // Compile Log Output: // @as(tmp.Bar, .{ .X = 123 }) diff --git a/test/cases/compile_errors/compile_log.zig b/test/cases/compile_errors/compile_log.zig index 6a14b78b173a..ac89cfd1b326 100644 --- a/test/cases/compile_errors/compile_log.zig +++ b/test/cases/compile_errors/compile_log.zig @@ -18,6 +18,7 @@ export fn baz() void { // // :6:5: error: found compile log statement // :12:5: note: also here +// :6:5: note: also here // // Compile Log Output: // @as(*const [5:0]u8, "begin") From 089bbd6588d82ccda0646e756006cf5787eadef2 Mon Sep 17 00:00:00 2001 From: mlugg Date: Sat, 29 Jun 2024 20:00:11 +0100 Subject: [PATCH 037/152] Zcu: rework reference traces Previously, `reference_table` mapped from a `Decl` being referenced to the `Decl` that performed the reference. This is convenient for constructing error messages, but problematic for incremental compilation. This is because on an incremental update, we want to efficiently remove all references triggered by an `AnalUnit` which is being re-analyzed. For this reason, `reference_table` now maps the other way: from the `AnalUnit` *performing* the reference, to the `AnalUnit` whose analysis was triggered. As a general rule, any call to any of the following functions should be preceded by a call to `Sema.addReferenceEntry`: * `Zcu.ensureDeclAnalyzed` * `Sema.ensureDeclAnalyzed` * `Zcu.ensureFuncBodyAnalyzed` * `Zcu.ensureFuncBodyAnalysisQueued` This is not just important for error messages, but also more fundamentally for incremental compilation. When an incremental update occurs, we must determine whether any `AnalUnit` has become unreferenced: in this case, we should ignore its associated error messages, and perhaps even remove it from the binary. For this reason, we no longer store only one reference to every `AnalUnit`, but every reference. At the end of an update, `Zcu.resolveReferences` will construct the reverse mapping, and as such identify which `AnalUnit`s are still referenced. The current implementation doesn't quite do what we need for incremental compilation here, but the framework is in place. Note that `Zcu.resolveReferences` does constitute a non-trivial amount of work on every incremental update. However, for incremental compilation, this work -- which will effectively be a graph traversal over all `AnalUnit` references -- seems strictly necessary. At the moment, this work is only done if the `Zcu` has any errors, when collecting them into the final `ErrorBundle`. An unsolved problem here is how to represent inline function calls in the reference trace. If `foo` performs an inline call to `bar` which references `qux`, then ideally, `bar` would be shown on the reference trace between `foo` and `qux`, but this is not currently the case. The solution here is probably for `Zcu.Reference` to store information about the source locations of active inline calls betweeen the referencer and its reference. --- src/Compilation.zig | 96 +++++++++++++++---------- src/Sema.zig | 167 +++++++++++++++++++------------------------- src/Zcu.zig | 149 ++++++++++++++++++++++++++------------- 3 files changed, 231 insertions(+), 181 deletions(-) diff --git a/src/Compilation.zig b/src/Compilation.zig index 4c693ffb28e3..3c97cd314504 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -31,6 +31,7 @@ const clangMain = @import("main.zig").clangMain; const Zcu = @import("Zcu.zig"); /// Deprecated; use `Zcu`. const Module = Zcu; +const Sema = @import("Sema.zig"); const InternPool = @import("InternPool.zig"); const Cache = std.Build.Cache; const c_codegen = @import("codegen/c.zig"); @@ -2939,9 +2940,12 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle { }); } if (comp.module) |zcu| { + var all_references = try zcu.resolveReferences(); + defer all_references.deinit(gpa); + for (zcu.failed_files.keys(), zcu.failed_files.values()) |file, error_msg| { if (error_msg) |msg| { - try addModuleErrorMsg(zcu, &bundle, msg.*); + try addModuleErrorMsg(zcu, &bundle, msg.*, &all_references); } else { // Must be ZIR errors. Note that this may include AST errors. // addZirErrorMessages asserts that the tree is loaded. @@ -2950,7 +2954,7 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle { } } for (zcu.failed_embed_files.values()) |error_msg| { - try addModuleErrorMsg(zcu, &bundle, error_msg.*); + try addModuleErrorMsg(zcu, &bundle, error_msg.*, &all_references); } for (zcu.failed_analysis.keys(), zcu.failed_analysis.values()) |anal_unit, error_msg| { const decl_index = switch (anal_unit.unwrap()) { @@ -2962,7 +2966,7 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle { // We'll try again once parsing succeeds. if (!zcu.declFileScope(decl_index).okToReportErrors()) continue; - try addModuleErrorMsg(zcu, &bundle, error_msg.*); + try addModuleErrorMsg(zcu, &bundle, error_msg.*, &all_references); if (zcu.cimport_errors.get(anal_unit)) |errors| { for (errors.getMessages()) |err_msg_index| { const err_msg = errors.getErrorMessage(err_msg_index); @@ -2989,12 +2993,12 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle { // Skip errors for Decls within files that had a parse failure. // We'll try again once parsing succeeds. if (zcu.declFileScope(decl_index).okToReportErrors()) { - try addModuleErrorMsg(zcu, &bundle, error_msg.*); + try addModuleErrorMsg(zcu, &bundle, error_msg.*, &all_references); } } } for (zcu.failed_exports.values()) |value| { - try addModuleErrorMsg(zcu, &bundle, value.*); + try addModuleErrorMsg(zcu, &bundle, value.*, &all_references); } const actual_error_count = zcu.global_error_set.entries.len - 1; @@ -3051,6 +3055,9 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle { if (comp.module) |zcu| { if (bundle.root_list.items.len == 0 and zcu.compile_log_sources.count() != 0) { + var all_references = try zcu.resolveReferences(); + defer all_references.deinit(gpa); + const values = zcu.compile_log_sources.values(); // First one will be the error; subsequent ones will be notes. const src_loc = values[0].src().upgrade(zcu); @@ -3068,7 +3075,7 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle { }; } - try addModuleErrorMsg(zcu, &bundle, err_msg); + try addModuleErrorMsg(zcu, &bundle, err_msg, &all_references); } } @@ -3124,7 +3131,12 @@ pub const ErrorNoteHashContext = struct { } }; -pub fn addModuleErrorMsg(mod: *Module, eb: *ErrorBundle.Wip, module_err_msg: Module.ErrorMsg) !void { +pub fn addModuleErrorMsg( + mod: *Module, + eb: *ErrorBundle.Wip, + module_err_msg: Module.ErrorMsg, + all_references: *const std.AutoHashMapUnmanaged(InternPool.AnalUnit, Zcu.ResolvedReference), +) !void { const gpa = eb.gpa; const ip = &mod.intern_pool; const err_source = module_err_msg.src_loc.file_scope.getSource(gpa) catch |err| { @@ -3145,39 +3157,49 @@ pub fn addModuleErrorMsg(mod: *Module, eb: *ErrorBundle.Wip, module_err_msg: Mod var ref_traces: std.ArrayListUnmanaged(ErrorBundle.ReferenceTrace) = .{}; defer ref_traces.deinit(gpa); - const remaining_references: ?u32 = remaining: { - if (mod.comp.reference_trace) |_| { - if (module_err_msg.hidden_references > 0) break :remaining module_err_msg.hidden_references; - } else { - if (module_err_msg.reference_trace.len > 0) break :remaining 0; + if (module_err_msg.reference_trace_root.unwrap()) |rt_root| { + var seen: std.AutoHashMapUnmanaged(InternPool.AnalUnit, void) = .{}; + defer seen.deinit(gpa); + + const max_references = mod.comp.reference_trace orelse Sema.default_reference_trace_len; + + var referenced_by = rt_root; + while (all_references.get(referenced_by)) |ref| { + const gop = try seen.getOrPut(gpa, ref.referencer); + if (gop.found_existing) break; + if (ref_traces.items.len < max_references) { + const src = ref.src.upgrade(mod); + const source = try src.file_scope.getSource(gpa); + const span = try src.span(gpa); + const loc = std.zig.findLineColumn(source.bytes, span.main); + const rt_file_path = try src.file_scope.fullPath(gpa); + const name = switch (ref.referencer.unwrap()) { + .decl => |d| mod.declPtr(d).name, + .func => |f| mod.funcOwnerDeclPtr(f).name, + }; + try ref_traces.append(gpa, .{ + .decl_name = try eb.addString(name.toSlice(ip)), + .src_loc = try eb.addSourceLocation(.{ + .src_path = try eb.addString(rt_file_path), + .span_start = span.start, + .span_main = span.main, + .span_end = span.end, + .line = @intCast(loc.line), + .column = @intCast(loc.column), + .source_line = 0, + }), + }); + } + referenced_by = ref.referencer; } - break :remaining null; - }; - try ref_traces.ensureTotalCapacityPrecise(gpa, module_err_msg.reference_trace.len + - @intFromBool(remaining_references != null)); - for (module_err_msg.reference_trace) |module_reference| { - const source = try module_reference.src_loc.file_scope.getSource(gpa); - const span = try module_reference.src_loc.span(gpa); - const loc = std.zig.findLineColumn(source.bytes, span.main); - const rt_file_path = try module_reference.src_loc.file_scope.fullPath(gpa); - defer gpa.free(rt_file_path); - ref_traces.appendAssumeCapacity(.{ - .decl_name = try eb.addString(module_reference.decl.toSlice(ip)), - .src_loc = try eb.addSourceLocation(.{ - .src_path = try eb.addString(rt_file_path), - .span_start = span.start, - .span_main = span.main, - .span_end = span.end, - .line = @intCast(loc.line), - .column = @intCast(loc.column), - .source_line = 0, - }), - }); + if (seen.count() > ref_traces.items.len) { + try ref_traces.append(gpa, .{ + .decl_name = @intCast(seen.count() - ref_traces.items.len), + .src_loc = .none, + }); + } } - if (remaining_references) |remaining| ref_traces.appendAssumeCapacity( - .{ .decl_name = remaining, .src_loc = .none }, - ); const src_loc = try eb.addSourceLocation(.{ .src_path = try eb.addString(file_path), diff --git a/src/Sema.zig b/src/Sema.zig index 4337ce892648..105fedbec733 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -121,6 +121,11 @@ comptime_allocs: std.ArrayListUnmanaged(ComptimeAlloc) = .{}, /// these are flushed to `Zcu.single_exports` or `Zcu.multi_exports`. exports: std.ArrayListUnmanaged(Zcu.Export) = .{}, +/// All references registered so far by this `Sema`. This is a temporary duplicate +/// of data stored in `Zcu.all_references`. It exists to avoid adding references to +/// a given `AnalUnit` multiple times. +references: std.AutoArrayHashMapUnmanaged(AnalUnit, void) = .{}, + const MaybeComptimeAlloc = struct { /// The runtime index of the `alloc` instruction. runtime_index: Value.RuntimeIndex, @@ -2472,87 +2477,57 @@ pub fn failWithOwnedErrorMsg(sema: *Sema, block: ?*Block, err_msg: *Module.Error @setCold(true); const gpa = sema.gpa; const mod = sema.mod; + const ip = &mod.intern_pool; - ref: { - errdefer err_msg.destroy(gpa); + if (build_options.enable_debug_extensions and mod.comp.debug_compile_errors) { + var all_references = mod.resolveReferences() catch @panic("out of memory"); + var wip_errors: std.zig.ErrorBundle.Wip = undefined; + wip_errors.init(gpa) catch @panic("out of memory"); + Compilation.addModuleErrorMsg(mod, &wip_errors, err_msg.*, &all_references) catch unreachable; + std.debug.print("compile error during Sema:\n", .{}); + var error_bundle = wip_errors.toOwnedBundle("") catch unreachable; + error_bundle.renderToStdErr(.{ .ttyconf = .no_color }); + crash_report.compilerPanic("unexpected compile error occurred", null, null); + } - if (build_options.enable_debug_extensions and mod.comp.debug_compile_errors) { - var wip_errors: std.zig.ErrorBundle.Wip = undefined; - wip_errors.init(gpa) catch unreachable; - Compilation.addModuleErrorMsg(mod, &wip_errors, err_msg.*) catch unreachable; - std.debug.print("compile error during Sema:\n", .{}); - var error_bundle = wip_errors.toOwnedBundle("") catch unreachable; - error_bundle.renderToStdErr(.{ .ttyconf = .no_color }); - crash_report.compilerPanic("unexpected compile error occurred", null, null); + if (block) |start_block| { + var block_it = start_block; + while (block_it.inlining) |inlining| { + try sema.errNote( + inlining.call_src, + err_msg, + "called from here", + .{}, + ); + block_it = inlining.call_block; } + } - try mod.failed_analysis.ensureUnusedCapacity(gpa, 1); - try mod.failed_files.ensureUnusedCapacity(gpa, 1); - - if (block) |start_block| { - var block_it = start_block; - while (block_it.inlining) |inlining| { - try sema.errNote( - inlining.call_src, - err_msg, - "called from here", - .{}, - ); - block_it = inlining.call_block; - } - - const max_references = refs: { - if (mod.comp.reference_trace) |num| break :refs num; - // Do not add multiple traces without explicit request. - if (mod.failed_analysis.count() > 0) break :ref; - break :refs default_reference_trace_len; - }; + const use_ref_trace = if (mod.comp.reference_trace) |n| n > 0 else mod.failed_analysis.count() == 0; + if (use_ref_trace) { + err_msg.reference_trace_root = sema.ownerUnit().toOptional(); + } - var referenced_by = if (sema.owner_func_index != .none) - mod.funcOwnerDeclIndex(sema.owner_func_index) - else - sema.owner_decl_index; - var reference_stack = std.ArrayList(Module.ErrorMsg.Trace).init(gpa); - defer reference_stack.deinit(); - - // Avoid infinite loops. - var seen = std.AutoHashMap(InternPool.DeclIndex, void).init(gpa); - defer seen.deinit(); - - while (mod.reference_table.get(referenced_by)) |ref| { - const gop = try seen.getOrPut(ref.referencer); - if (gop.found_existing) break; - if (reference_stack.items.len < max_references) { - const decl = mod.declPtr(ref.referencer); - try reference_stack.append(.{ - .decl = decl.name, - .src_loc = ref.src.upgrade(mod), - }); - } - referenced_by = ref.referencer; - } - err_msg.reference_trace = try reference_stack.toOwnedSlice(); - err_msg.hidden_references = @intCast(seen.count() -| max_references); - } + const gop = try mod.failed_analysis.getOrPut(gpa, sema.ownerUnit()); + if (gop.found_existing) { + // If there are multiple errors for the same Decl, prefer the first one added. + sema.err = null; + err_msg.destroy(gpa); + } else { + sema.err = err_msg; + gop.value_ptr.* = err_msg; } - const ip = &mod.intern_pool; + if (sema.owner_func_index != .none) { ip.funcAnalysis(sema.owner_func_index).state = .sema_failure; } else { sema.owner_decl.analysis = .sema_failure; } + if (sema.func_index != .none) { ip.funcAnalysis(sema.func_index).state = .sema_failure; } - const gop = mod.failed_analysis.getOrPutAssumeCapacity(sema.ownerUnit()); - if (gop.found_existing) { - // If there are multiple errors for the same Decl, prefer the first one added. - sema.err = null; - err_msg.destroy(gpa); - } else { - sema.err = err_msg; - gop.value_ptr.* = err_msg; - } + return error.AnalysisFail; } @@ -4235,6 +4210,7 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com if (mod.intern_pool.isFuncBody(val)) { const ty = Type.fromInterned(mod.intern_pool.typeOf(val)); if (try sema.fnHasRuntimeBits(ty)) { + try sema.addReferenceEntry(src, AnalUnit.wrap(.{ .func = val })); try mod.ensureFuncBodyAnalysisQueued(val); } } @@ -6395,6 +6371,7 @@ fn zirExport(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void } else try sema.lookupIdentifier(block, operand_src, decl_name); const options = try sema.resolveExportOptions(block, options_src, extra.options); { + try sema.addReferenceEntry(src, AnalUnit.wrap(.{ .decl = decl_index })); try sema.ensureDeclAnalyzed(decl_index); const exported_decl = mod.declPtr(decl_index); if (exported_decl.val.getFunction(mod)) |function| { @@ -6446,6 +6423,7 @@ pub fn analyzeExport( if (options.linkage == .internal) return; + try sema.addReferenceEntry(src, AnalUnit.wrap(.{ .decl = exported_decl_index })); try sema.ensureDeclAnalyzed(exported_decl_index); const exported_decl = mod.declPtr(exported_decl_index); const export_ty = exported_decl.typeOf(mod); @@ -6468,7 +6446,7 @@ pub fn analyzeExport( return sema.fail(block, src, "export target cannot be extern", .{}); } - try sema.maybeQueueFuncBodyAnalysis(exported_decl_index); + try sema.maybeQueueFuncBodyAnalysis(src, exported_decl_index); try sema.exports.append(gpa, .{ .opts = options, @@ -6699,8 +6677,7 @@ fn zirDeclRef(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air .no_embedded_nulls, ); const decl_index = try sema.lookupIdentifier(block, src, decl_name); - try sema.addReferencedBy(src, decl_index); - return sema.analyzeDeclRef(decl_index); + return sema.analyzeDeclRef(src, decl_index); } fn zirDeclVal(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { @@ -7903,6 +7880,7 @@ fn analyzeCall( if (try sema.resolveValue(func)) |func_val| { if (mod.intern_pool.isFuncBody(func_val.toIntern())) { + try sema.addReferenceEntry(call_src, AnalUnit.wrap(.{ .func = func_val.toIntern() })); try mod.ensureFuncBodyAnalysisQueued(func_val.toIntern()); } } @@ -8339,8 +8317,6 @@ fn instantiateGenericCall( const callee = mod.funcInfo(callee_index); callee.branchQuota(ip).* = @max(callee.branchQuota(ip).*, sema.branch_quota); - try sema.addReferencedBy(call_src, callee.owner_decl); - // Make a runtime call to the new function, making sure to omit the comptime args. const func_ty = Type.fromInterned(callee.ty); const func_ty_info = mod.typeToFunc(func_ty).?; @@ -8366,6 +8342,7 @@ fn instantiateGenericCall( ip.funcAnalysis(sema.owner_func_index).calls_or_awaits_errorable_fn = true; } + try sema.addReferenceEntry(call_src, AnalUnit.wrap(.{ .func = callee_index })); try mod.ensureFuncBodyAnalysisQueued(callee_index); try sema.air_extra.ensureUnusedCapacity(sema.gpa, @typeInfo(Air.Call).Struct.fields.len + runtime_args.items.len); @@ -17479,7 +17456,7 @@ fn zirClosureGet(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat .@"comptime" => |index| return Air.internedToRef(index), .runtime => |index| index, .decl_val => |decl_index| return sema.analyzeDeclVal(block, src, decl_index), - .decl_ref => |decl_index| return sema.analyzeDeclRef(decl_index), + .decl_ref => |decl_index| return sema.analyzeDeclRef(src, decl_index), }; // The comptime case is handled already above. Runtime case below. @@ -27673,7 +27650,6 @@ fn fieldCallBind( const decl_idx = (try sema.namespaceLookup(block, src, namespace, field_name)) orelse break :found_decl null; - try sema.addReferencedBy(src, decl_idx); const decl_val = try sema.analyzeDeclVal(block, src, decl_idx); const decl_type = sema.typeOf(decl_val); if (mod.typeToFunc(decl_type)) |func_type| f: { @@ -27829,8 +27805,7 @@ fn namespaceLookupRef( decl_name: InternPool.NullTerminatedString, ) CompileError!?Air.Inst.Ref { const decl = (try sema.namespaceLookup(block, src, opt_namespace, decl_name)) orelse return null; - try sema.addReferencedBy(src, decl); - return try sema.analyzeDeclRef(decl); + return try sema.analyzeDeclRef(src, decl); } fn namespaceLookupVal( @@ -28968,7 +28943,7 @@ fn coerceExtra( if (inst_ty.zigTypeTag(zcu) == .Fn) { const fn_val = try sema.resolveConstDefinedValue(block, LazySrcLoc.unneeded, inst, undefined); const fn_decl = fn_val.pointerDecl(zcu).?; - const inst_as_ptr = try sema.analyzeDeclRef(fn_decl); + const inst_as_ptr = try sema.analyzeDeclRef(inst_src, fn_decl); return sema.coerce(block, dest_ty, inst_as_ptr, inst_src); } @@ -30521,7 +30496,7 @@ fn coerceVarArgParam( .Fn => fn_ptr: { const fn_val = try sema.resolveConstDefinedValue(block, LazySrcLoc.unneeded, inst, undefined); const fn_decl = fn_val.pointerDecl(mod).?; - break :fn_ptr try sema.analyzeDeclRef(fn_decl); + break :fn_ptr try sema.analyzeDeclRef(inst_src, fn_decl); }, .Array => return sema.fail(block, inst_src, "arrays must be passed by reference to variadic function", .{}), .Float => float: { @@ -31748,11 +31723,10 @@ fn analyzeDeclVal( src: LazySrcLoc, decl_index: InternPool.DeclIndex, ) CompileError!Air.Inst.Ref { - try sema.addReferencedBy(src, decl_index); if (sema.decl_val_table.get(decl_index)) |result| { return result; } - const decl_ref = try sema.analyzeDeclRefInner(decl_index, false); + const decl_ref = try sema.analyzeDeclRefInner(src, decl_index, false); const result = try sema.analyzeLoad(block, src, decl_ref, src); if (result.toInterned() != null) { if (!block.is_typeof) { @@ -31762,18 +31736,18 @@ fn analyzeDeclVal( return result; } -fn addReferencedBy( +fn addReferenceEntry( sema: *Sema, src: LazySrcLoc, - decl_index: InternPool.DeclIndex, + referenced_unit: AnalUnit, ) !void { if (sema.mod.comp.reference_trace == 0) return; - try sema.mod.reference_table.put(sema.gpa, decl_index, .{ - // TODO: this can make the reference trace suboptimal. This will be fixed - // once the reference table is reworked for incremental compilation. - .referencer = sema.owner_decl_index, - .src = src, - }); + const gop = try sema.references.getOrPut(sema.gpa, referenced_unit); + if (gop.found_existing) return; + // TODO: we need to figure out how to model inline calls here. + // They aren't references in the analysis sense, but ought to show up in the reference trace! + // Would representing inline calls in the reference table cause excessive memory usage? + try sema.mod.addUnitReference(sema.ownerUnit(), referenced_unit, src); } pub fn ensureDeclAnalyzed(sema: *Sema, decl_index: InternPool.DeclIndex) CompileError!void { @@ -31823,16 +31797,17 @@ fn optRefValue(sema: *Sema, opt_val: ?Value) !Value { } }))); } -fn analyzeDeclRef(sema: *Sema, decl_index: InternPool.DeclIndex) CompileError!Air.Inst.Ref { - return sema.analyzeDeclRefInner(decl_index, true); +fn analyzeDeclRef(sema: *Sema, src: LazySrcLoc, decl_index: InternPool.DeclIndex) CompileError!Air.Inst.Ref { + return sema.analyzeDeclRefInner(src, decl_index, true); } /// Analyze a reference to the decl at the given index. Ensures the underlying decl is analyzed, but /// only triggers analysis for function bodies if `analyze_fn_body` is true. If it's possible for a /// decl_ref to end up in runtime code, the function body must be analyzed: `analyzeDeclRef` wraps /// this function with `analyze_fn_body` set to true. -fn analyzeDeclRefInner(sema: *Sema, decl_index: InternPool.DeclIndex, analyze_fn_body: bool) CompileError!Air.Inst.Ref { +fn analyzeDeclRefInner(sema: *Sema, src: LazySrcLoc, decl_index: InternPool.DeclIndex, analyze_fn_body: bool) CompileError!Air.Inst.Ref { const mod = sema.mod; + try sema.addReferenceEntry(src, AnalUnit.wrap(.{ .decl = decl_index })); try sema.ensureDeclAnalyzed(decl_index); const decl_val = try mod.declPtr(decl_index).valueOrFail(); @@ -31853,7 +31828,7 @@ fn analyzeDeclRefInner(sema: *Sema, decl_index: InternPool.DeclIndex, analyze_fn }, }); if (analyze_fn_body) { - try sema.maybeQueueFuncBodyAnalysis(decl_index); + try sema.maybeQueueFuncBodyAnalysis(src, decl_index); } return Air.internedToRef((try mod.intern(.{ .ptr = .{ .ty = ptr_ty.toIntern(), @@ -31862,12 +31837,13 @@ fn analyzeDeclRefInner(sema: *Sema, decl_index: InternPool.DeclIndex, analyze_fn } }))); } -fn maybeQueueFuncBodyAnalysis(sema: *Sema, decl_index: InternPool.DeclIndex) !void { +fn maybeQueueFuncBodyAnalysis(sema: *Sema, src: LazySrcLoc, decl_index: InternPool.DeclIndex) !void { const mod = sema.mod; const decl = mod.declPtr(decl_index); const decl_val = try decl.valueOrFail(); if (!mod.intern_pool.isFuncBody(decl_val.toIntern())) return; if (!try sema.fnHasRuntimeBits(decl_val.typeOf(mod))) return; + try sema.addReferenceEntry(src, AnalUnit.wrap(.{ .func = decl_val.toIntern() })); try mod.ensureFuncBodyAnalysisQueued(decl_val.toIntern()); } @@ -31882,8 +31858,8 @@ fn analyzeRef( if (try sema.resolveValue(operand)) |val| { switch (mod.intern_pool.indexToKey(val.toIntern())) { - .extern_func => |extern_func| return sema.analyzeDeclRef(extern_func.decl), - .func => |func| return sema.analyzeDeclRef(func.owner_decl), + .extern_func => |extern_func| return sema.analyzeDeclRef(src, extern_func.decl), + .func => |func| return sema.analyzeDeclRef(src, func.owner_decl), else => return anonDeclRef(sema, val.toIntern()), } } @@ -35834,6 +35810,7 @@ fn resolveInferredErrorSet( } // In this case we are dealing with the actual InferredErrorSet object that // corresponds to the function, not one created to track an inline/comptime call. + try sema.addReferenceEntry(src, AnalUnit.wrap(.{ .func = func_index })); try sema.ensureFuncBodyAnalyzed(func_index); } diff --git a/src/Zcu.zig b/src/Zcu.zig index d29d2e427983..4d7508da20c6 100644 --- a/src/Zcu.zig +++ b/src/Zcu.zig @@ -179,10 +179,15 @@ test_functions: std.AutoArrayHashMapUnmanaged(Decl.Index, void) = .{}, /// TODO: the key here will be a `Cau.Index`. global_assembly: std.AutoArrayHashMapUnmanaged(Decl.Index, []u8) = .{}, -reference_table: std.AutoHashMapUnmanaged(Decl.Index, struct { - referencer: Decl.Index, - src: LazySrcLoc, -}) = .{}, +/// Key is the `AnalUnit` *performing* the reference. This representation allows +/// incremental updates to quickly delete references caused by a specific `AnalUnit`. +/// Value is index into `all_reference` of the first reference triggered by the unit. +/// The `next` field on the `Reference` forms a linked list of all references +/// triggered by the key `AnalUnit`. +reference_table: std.AutoArrayHashMapUnmanaged(AnalUnit, u32) = .{}, +all_references: std.ArrayListUnmanaged(Reference) = .{}, +/// Freelist of indices in `all_references`. +free_references: std.ArrayListUnmanaged(u32) = .{}, panic_messages: [PanicId.len]Decl.OptionalIndex = .{.none} ** PanicId.len, /// The panic function body. @@ -290,44 +295,14 @@ pub const Export = struct { } }; -const ValueArena = struct { - state: std.heap.ArenaAllocator.State, - state_acquired: ?*std.heap.ArenaAllocator.State = null, - - /// If this ValueArena replaced an existing one during re-analysis, this is the previous instance - prev: ?*ValueArena = null, - - /// Returns an allocator backed by either promoting `state`, or by the existing ArenaAllocator - /// that has already promoted `state`. `out_arena_allocator` provides storage for the initial promotion, - /// and must live until the matching call to release(). - pub fn acquire(self: *ValueArena, child_allocator: Allocator, out_arena_allocator: *std.heap.ArenaAllocator) Allocator { - if (self.state_acquired) |state_acquired| { - return @as(*std.heap.ArenaAllocator, @fieldParentPtr("state", state_acquired)).allocator(); - } - - out_arena_allocator.* = self.state.promote(child_allocator); - self.state_acquired = &out_arena_allocator.state; - return out_arena_allocator.allocator(); - } - - /// Releases the allocator acquired by `acquire. `arena_allocator` must match the one passed to `acquire`. - pub fn release(self: *ValueArena, arena_allocator: *std.heap.ArenaAllocator) void { - if (@as(*std.heap.ArenaAllocator, @fieldParentPtr("state", self.state_acquired.?)) == arena_allocator) { - self.state = self.state_acquired.?.*; - self.state_acquired = null; - } - } - - pub fn deinit(self: ValueArena, child_allocator: Allocator) void { - assert(self.state_acquired == null); - - const prev = self.prev; - self.state.promote(child_allocator).deinit(); - - if (prev) |p| { - p.deinit(child_allocator); - } - } +pub const Reference = struct { + /// The `AnalUnit` whose semantic analysis was triggered by this reference. + referenced: AnalUnit, + /// Index into `all_references` of the next `Reference` triggered by the same `AnalUnit`. + /// `std.math.maxInt(u32)` is the sentinel. + next: u32, + /// The source location of the reference. + src: LazySrcLoc, }; pub const Decl = struct { @@ -758,7 +733,7 @@ pub const File = struct { /// Whether this file is a part of multiple packages. This is an error condition which will be reported after AstGen. multi_pkg: bool = false, /// List of references to this file, used for multi-package errors. - references: std.ArrayListUnmanaged(Reference) = .{}, + references: std.ArrayListUnmanaged(File.Reference) = .{}, /// The hash of the path to this file, used to store `InternPool.TrackedInst`. path_digest: Cache.BinDigest, @@ -925,7 +900,7 @@ pub const File = struct { } /// Add a reference to this file during AstGen. - pub fn addReference(file: *File, mod: Module, ref: Reference) !void { + pub fn addReference(file: *File, mod: Module, ref: File.Reference) !void { // Don't add the same module root twice. Note that since we always add module roots at the // front of the references array (see below), this loop is actually O(1) on valid code. if (ref == .root) { @@ -1002,8 +977,7 @@ pub const ErrorMsg = struct { src_loc: SrcLoc, msg: []const u8, notes: []ErrorMsg = &.{}, - reference_trace: []Trace = &.{}, - hidden_references: u32 = 0, + reference_trace_root: AnalUnit.Optional = .none, pub const Trace = struct { decl: InternPool.NullTerminatedString, @@ -1048,7 +1022,6 @@ pub const ErrorMsg = struct { } gpa.free(err_msg.notes); gpa.free(err_msg.msg); - gpa.free(err_msg.reference_trace); err_msg.* = undefined; } }; @@ -2520,6 +2493,8 @@ pub fn deinit(zcu: *Zcu) void { zcu.global_assembly.deinit(gpa); zcu.reference_table.deinit(gpa); + zcu.all_references.deinit(gpa); + zcu.free_references.deinit(gpa); { var it = zcu.intern_pool.allocated_namespaces.iterator(0); @@ -3462,7 +3437,8 @@ pub fn ensureDeclAnalyzed(mod: *Module, decl_index: Decl.Index) SemaError!void { // The exports this Decl performs will be re-discovered, so we remove them here // prior to re-analysis. if (build_options.only_c) unreachable; - mod.deleteUnitExports(AnalUnit.wrap(.{ .decl = decl_index })); + mod.deleteUnitExports(decl_as_depender); + mod.deleteUnitReferences(decl_as_depender); } const sema_result: SemaDeclResult = blk: { @@ -3591,7 +3567,8 @@ pub fn ensureFuncBodyAnalyzed(zcu: *Zcu, maybe_coerced_func_index: InternPool.In if (was_outdated) { if (build_options.only_c) unreachable; _ = zcu.outdated_ready.swapRemove(func_as_depender); - zcu.deleteUnitExports(AnalUnit.wrap(.{ .func = func_index })); + zcu.deleteUnitExports(func_as_depender); + zcu.deleteUnitReferences(func_as_depender); } switch (func.analysis(ip).state) { @@ -4967,6 +4944,47 @@ pub fn deleteUnitExports(zcu: *Zcu, anal_unit: AnalUnit) void { } } +/// Delete all references in `reference_table` which are caused by this `AnalUnit`. +/// Re-analysis of the `AnalUnit` will cause appropriate references to be recreated. +fn deleteUnitReferences(zcu: *Zcu, anal_unit: AnalUnit) void { + const gpa = zcu.gpa; + + const kv = zcu.reference_table.fetchSwapRemove(anal_unit) orelse return; + var idx = kv.value; + + while (idx != std.math.maxInt(u32)) { + zcu.free_references.append(gpa, idx) catch { + // This space will be reused eventually, so we need not propagate this error. + // Just leak it for now, and let GC reclaim it later on. + return; + }; + idx = zcu.all_references.items[idx].next; + } +} + +pub fn addUnitReference(zcu: *Zcu, src_unit: AnalUnit, referenced_unit: AnalUnit, ref_src: LazySrcLoc) Allocator.Error!void { + const gpa = zcu.gpa; + + try zcu.reference_table.ensureUnusedCapacity(gpa, 1); + + const ref_idx = zcu.free_references.popOrNull() orelse idx: { + _ = try zcu.all_references.addOne(gpa); + break :idx zcu.all_references.items.len - 1; + }; + + errdefer comptime unreachable; + + const gop = zcu.reference_table.getOrPutAssumeCapacity(src_unit); + + zcu.all_references.items[ref_idx] = .{ + .referenced = referenced_unit, + .next = if (gop.found_existing) gop.value_ptr.* else std.math.maxInt(u32), + .src = ref_src, + }; + + gop.value_ptr.* = @intCast(ref_idx); +} + pub fn analyzeFnBody(mod: *Module, func_index: InternPool.Index, arena: Allocator) SemaError!Air { const tracy = trace(@src()); defer tracy.end(); @@ -6447,3 +6465,36 @@ pub fn structPackedFieldBitOffset( } unreachable; // index out of bounds } + +pub const ResolvedReference = struct { + referencer: AnalUnit, + src: LazySrcLoc, +}; + +/// Returns a mapping from an `AnalUnit` to where it is referenced. +/// TODO: in future, this must be adapted to traverse from roots of analysis. That way, we can +/// use the returned map to determine which units have become unreferenced in an incremental update. +pub fn resolveReferences(zcu: *Zcu) !std.AutoHashMapUnmanaged(AnalUnit, ResolvedReference) { + const gpa = zcu.gpa; + + var result: std.AutoHashMapUnmanaged(AnalUnit, ResolvedReference) = .{}; + errdefer result.deinit(gpa); + + // This is not a sufficient size, but a lower bound. + try result.ensureTotalCapacity(gpa, @intCast(zcu.reference_table.count())); + + for (zcu.reference_table.keys(), zcu.reference_table.values()) |referencer, first_ref_idx| { + assert(first_ref_idx != std.math.maxInt(u32)); + var ref_idx = first_ref_idx; + while (ref_idx != std.math.maxInt(u32)) { + const ref = zcu.all_references.items[ref_idx]; + const gop = try result.getOrPut(gpa, ref.referenced); + if (!gop.found_existing) { + gop.value_ptr.* = .{ .referencer = referencer, .src = ref.src }; + } + ref_idx = ref.next; + } + } + + return result; +} From ded5c759f83a4da355a128dd4d7f5e22cbd3cabe Mon Sep 17 00:00:00 2001 From: mlugg Date: Sun, 30 Jun 2024 03:00:07 +0100 Subject: [PATCH 038/152] Zcu: store `LazySrcLoc` in error messages This change modifies `Zcu.ErrorMsg` to store a `Zcu.LazySrcLoc` rather than a `Zcu.SrcLoc`. Everything else is dominoes. The reason for this change is incremental compilation. If a failed `AnalUnit` is up-to-date on an update, we want to re-use the old error messages. However, the file containing the error location may have been modified, and `SrcLoc` cannot survive such a modification. `LazySrcLoc` is designed to be correct across incremental updates. Therefore, we defer source location resolution until `Compilation` gathers the compile errors into the `ErrorBundle`. --- src/Compilation.zig | 91 +++++++++++++++++++----------------- src/Sema.zig | 10 ++-- src/Zcu.zig | 78 ++++++++++++------------------- src/arch/aarch64/CodeGen.zig | 4 +- src/arch/aarch64/Emit.zig | 2 +- src/arch/arm/CodeGen.zig | 4 +- src/arch/arm/Emit.zig | 2 +- src/arch/riscv64/CodeGen.zig | 4 +- src/arch/riscv64/Lower.zig | 2 +- src/arch/sparc64/CodeGen.zig | 4 +- src/arch/sparc64/Emit.zig | 2 +- src/arch/wasm/CodeGen.zig | 6 +-- src/arch/wasm/Emit.zig | 2 +- src/arch/x86_64/CodeGen.zig | 6 +-- src/arch/x86_64/Lower.zig | 2 +- src/codegen.zig | 22 ++++----- src/codegen/c.zig | 2 +- src/codegen/llvm.zig | 2 +- src/codegen/spirv.zig | 4 +- src/link.zig | 2 +- src/link/Coff.zig | 25 ++++------ src/link/Elf.zig | 2 +- src/link/Elf/ZigObject.zig | 29 +++++------- src/link/MachO.zig | 2 +- src/link/MachO/ZigObject.zig | 25 ++++------ src/link/Plan9.zig | 19 +++----- src/link/Wasm.zig | 2 +- src/link/Wasm/ZigObject.zig | 14 +++--- 28 files changed, 162 insertions(+), 207 deletions(-) diff --git a/src/Compilation.zig b/src/Compilation.zig index 3c97cd314504..55084fb971a7 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -2629,22 +2629,24 @@ fn reportMultiModuleErrors(mod: *Module) !void { for (notes[0..num_notes], file.references.items[0..num_notes], 0..) |*note, ref, i| { errdefer for (notes[0..i]) |*n| n.deinit(mod.gpa); note.* = switch (ref) { - .import => |loc| blk: { - break :blk try Module.ErrorMsg.init( - mod.gpa, - loc, - "imported from module {s}", - .{loc.file_scope.mod.fully_qualified_name}, - ); - }, - .root => |pkg| blk: { - break :blk try Module.ErrorMsg.init( - mod.gpa, - .{ .file_scope = file, .base_node = 0, .lazy = .entire_file }, - "root of module {s}", - .{pkg.fully_qualified_name}, - ); - }, + .import => |import| try Module.ErrorMsg.init( + mod.gpa, + .{ + .base_node_inst = try mod.intern_pool.trackZir(mod.gpa, import.file, .main_struct_inst), + .offset = .{ .token_abs = import.token }, + }, + "imported from module {s}", + .{import.file.mod.fully_qualified_name}, + ), + .root => |pkg| try Module.ErrorMsg.init( + mod.gpa, + .{ + .base_node_inst = try mod.intern_pool.trackZir(mod.gpa, file, .main_struct_inst), + .offset = .entire_file, + }, + "root of module {s}", + .{pkg.fully_qualified_name}, + ), }; } errdefer for (notes[0..num_notes]) |*n| n.deinit(mod.gpa); @@ -2652,7 +2654,10 @@ fn reportMultiModuleErrors(mod: *Module) !void { if (omitted > 0) { notes[num_notes] = try Module.ErrorMsg.init( mod.gpa, - .{ .file_scope = file, .base_node = 0, .lazy = .entire_file }, + .{ + .base_node_inst = try mod.intern_pool.trackZir(mod.gpa, file, .main_struct_inst), + .offset = .entire_file, + }, "{} more references omitted", .{omitted}, ); @@ -2661,7 +2666,10 @@ fn reportMultiModuleErrors(mod: *Module) !void { const err = try Module.ErrorMsg.create( mod.gpa, - .{ .file_scope = file, .base_node = 0, .lazy = .entire_file }, + .{ + .base_node_inst = try mod.intern_pool.trackZir(mod.gpa, file, .main_struct_inst), + .offset = .entire_file, + }, "file exists in multiple modules", .{}, ); @@ -3060,7 +3068,7 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle { const values = zcu.compile_log_sources.values(); // First one will be the error; subsequent ones will be notes. - const src_loc = values[0].src().upgrade(zcu); + const src_loc = values[0].src(); const err_msg: Module.ErrorMsg = .{ .src_loc = src_loc, .msg = "found compile log statement", @@ -3070,7 +3078,7 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle { for (values[1..], err_msg.notes) |src_info, *note| { note.* = .{ - .src_loc = src_info.src().upgrade(zcu), + .src_loc = src_info.src(), .msg = "also here", }; } @@ -3139,8 +3147,9 @@ pub fn addModuleErrorMsg( ) !void { const gpa = eb.gpa; const ip = &mod.intern_pool; - const err_source = module_err_msg.src_loc.file_scope.getSource(gpa) catch |err| { - const file_path = try module_err_msg.src_loc.file_scope.fullPath(gpa); + const err_src_loc = module_err_msg.src_loc.upgrade(mod); + const err_source = err_src_loc.file_scope.getSource(gpa) catch |err| { + const file_path = try err_src_loc.file_scope.fullPath(gpa); defer gpa.free(file_path); try eb.addRootErrorMessage(.{ .msg = try eb.printString("unable to load '{s}': {s}", .{ @@ -3149,9 +3158,9 @@ pub fn addModuleErrorMsg( }); return; }; - const err_span = try module_err_msg.src_loc.span(gpa); + const err_span = try err_src_loc.span(gpa); const err_loc = std.zig.findLineColumn(err_source.bytes, err_span.main); - const file_path = try module_err_msg.src_loc.file_scope.fullPath(gpa); + const file_path = try err_src_loc.file_scope.fullPath(gpa); defer gpa.free(file_path); var ref_traces: std.ArrayListUnmanaged(ErrorBundle.ReferenceTrace) = .{}; @@ -3208,7 +3217,7 @@ pub fn addModuleErrorMsg( .span_end = err_span.end, .line = @intCast(err_loc.line), .column = @intCast(err_loc.column), - .source_line = if (module_err_msg.src_loc.lazy == .entire_file) + .source_line = if (err_src_loc.lazy == .entire_file) 0 else try eb.addString(err_loc.source_line), @@ -3225,10 +3234,11 @@ pub fn addModuleErrorMsg( defer notes.deinit(gpa); for (module_err_msg.notes) |module_note| { - const source = try module_note.src_loc.file_scope.getSource(gpa); - const span = try module_note.src_loc.span(gpa); + const note_src_loc = module_note.src_loc.upgrade(mod); + const source = try note_src_loc.file_scope.getSource(gpa); + const span = try note_src_loc.span(gpa); const loc = std.zig.findLineColumn(source.bytes, span.main); - const note_file_path = try module_note.src_loc.file_scope.fullPath(gpa); + const note_file_path = try note_src_loc.file_scope.fullPath(gpa); defer gpa.free(note_file_path); const gop = try notes.getOrPutContext(gpa, .{ @@ -3522,7 +3532,7 @@ fn processOneJob(comp: *Compilation, job: Job, prog_node: std.Progress.Node) !vo InternPool.AnalUnit.wrap(.{ .decl = decl_index }), try Module.ErrorMsg.create( gpa, - decl.navSrcLoc(module).upgrade(module), + decl.navSrcLoc(module), "unable to update line number: {s}", .{@errorName(err)}, ), @@ -4023,9 +4033,8 @@ fn workerAstGenFile( const res = mod.importFile(file, import_path) catch continue; if (!res.is_pkg) { res.file.addReference(mod.*, .{ .import = .{ - .file_scope = file, - .base_node = 0, - .lazy = .{ .token_abs = item.data.token }, + .file = file, + .token = item.data.token, } }) catch continue; } break :blk res; @@ -4398,20 +4407,14 @@ fn reportRetryableAstGenError( file.status = .retryable_failure; - const src_loc: Module.SrcLoc = switch (src) { + const src_loc: Module.LazySrcLoc = switch (src) { .root => .{ - .file_scope = file, - .base_node = 0, - .lazy = .entire_file, + .base_node_inst = try mod.intern_pool.trackZir(gpa, file, .main_struct_inst), + .offset = .entire_file, }, - .import => |info| blk: { - const importing_file = info.importing_file; - - break :blk .{ - .file_scope = importing_file, - .base_node = 0, - .lazy = .{ .token_abs = info.import_tok }, - }; + .import => |info| .{ + .base_node_inst = try mod.intern_pool.trackZir(gpa, info.importing_file, .main_struct_inst), + .offset = .{ .token_abs = info.import_tok }, }, }; diff --git a/src/Sema.zig b/src/Sema.zig index 105fedbec733..9254cf3b8efb 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -2425,8 +2425,7 @@ pub fn errNote( comptime format: []const u8, args: anytype, ) error{OutOfMemory}!void { - const zcu = sema.mod; - return zcu.errNoteNonLazy(src.upgrade(zcu), parent, format, args); + return sema.mod.errNote(src, parent, format, args); } fn addFieldErrNote( @@ -2454,7 +2453,7 @@ pub fn errMsg( args: anytype, ) Allocator.Error!*Module.ErrorMsg { assert(src.offset != .unneeded); - return Module.ErrorMsg.create(sema.gpa, src.upgrade(sema.mod), format, args); + return Module.ErrorMsg.create(sema.gpa, src, format, args); } pub fn fail( @@ -2542,7 +2541,6 @@ fn reparentOwnedErrorMsg( args: anytype, ) !void { const mod = sema.mod; - const resolved_src = src.upgrade(mod); const msg_str = try std.fmt.allocPrint(mod.gpa, format, args); const orig_notes = msg.notes.len; @@ -2553,7 +2551,7 @@ fn reparentOwnedErrorMsg( .msg = msg.msg, }; - msg.src_loc = resolved_src; + msg.src_loc = src; msg.msg = msg_str; } @@ -13883,7 +13881,7 @@ fn zirEmbedFile(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A return sema.fail(block, operand_src, "file path name cannot be empty", .{}); } - const val = mod.embedFile(block.getFileScope(mod), name, operand_src.upgrade(mod)) catch |err| switch (err) { + const val = mod.embedFile(block.getFileScope(mod), name, operand_src) catch |err| switch (err) { error.ImportOutsideModulePath => { return sema.fail(block, operand_src, "embed of file outside package path: '{s}'", .{name}); }, diff --git a/src/Zcu.zig b/src/Zcu.zig index 4d7508da20c6..508bef971a88 100644 --- a/src/Zcu.zig +++ b/src/Zcu.zig @@ -289,10 +289,6 @@ pub const Export = struct { section: InternPool.OptionalNullTerminatedString = .none, visibility: std.builtin.SymbolVisibility = .default, }; - - pub fn getSrcLoc(exp: Export, mod: *Module) SrcLoc { - return exp.src.upgrade(mod); - } }; pub const Reference = struct { @@ -746,7 +742,10 @@ pub const File = struct { /// A single reference to a file. pub const Reference = union(enum) { /// The file is imported directly (i.e. not as a package) with @import. - import: SrcLoc, + import: struct { + file: *File, + token: Ast.TokenIndex, + }, /// The file is the root of a module. root: *Package.Module, }; @@ -900,7 +899,7 @@ pub const File = struct { } /// Add a reference to this file during AstGen. - pub fn addReference(file: *File, mod: Module, ref: File.Reference) !void { + pub fn addReference(file: *File, zcu: Zcu, ref: File.Reference) !void { // Don't add the same module root twice. Note that since we always add module roots at the // front of the references array (see below), this loop is actually O(1) on valid code. if (ref == .root) { @@ -917,17 +916,17 @@ pub const File = struct { // to make multi-module errors more helpful (since "root-of" notes are generally more // informative than "imported-from" notes). This path is hit very rarely, so the speed // of the insert operation doesn't matter too much. - .root => try file.references.insert(mod.gpa, 0, ref), + .root => try file.references.insert(zcu.gpa, 0, ref), // Other references we'll just put at the end. - else => try file.references.append(mod.gpa, ref), + else => try file.references.append(zcu.gpa, ref), } - const pkg = switch (ref) { - .import => |loc| loc.file_scope.mod, - .root => |pkg| pkg, + const mod = switch (ref) { + .import => |import| import.file.mod, + .root => |mod| mod, }; - if (pkg != file.mod) file.multi_pkg = true; + if (mod != file.mod) file.multi_pkg = true; } /// Mark this file and every file referenced by it as multi_pkg and report an @@ -967,30 +966,25 @@ pub const EmbedFile = struct { owner: *Package.Module, stat: Cache.File.Stat, val: InternPool.Index, - src_loc: SrcLoc, + src_loc: LazySrcLoc, }; /// This struct holds data necessary to construct API-facing `AllErrors.Message`. /// Its memory is managed with the general purpose allocator so that they /// can be created and destroyed in response to incremental updates. pub const ErrorMsg = struct { - src_loc: SrcLoc, + src_loc: LazySrcLoc, msg: []const u8, notes: []ErrorMsg = &.{}, reference_trace_root: AnalUnit.Optional = .none, - pub const Trace = struct { - decl: InternPool.NullTerminatedString, - src_loc: SrcLoc, - }; - pub fn create( gpa: Allocator, - src_loc: SrcLoc, + src_loc: LazySrcLoc, comptime format: []const u8, args: anytype, ) !*ErrorMsg { - assert(src_loc.lazy != .unneeded); + assert(src_loc.offset != .unneeded); const err_msg = try gpa.create(ErrorMsg); errdefer gpa.destroy(err_msg); err_msg.* = try ErrorMsg.init(gpa, src_loc, format, args); @@ -1006,7 +1000,7 @@ pub const ErrorMsg = struct { pub fn init( gpa: Allocator, - src_loc: SrcLoc, + src_loc: LazySrcLoc, comptime format: []const u8, args: anytype, ) !ErrorMsg { @@ -1994,15 +1988,12 @@ pub const LazySrcLoc = struct { entire_file, /// The source location points to a byte offset within a source file, /// offset from 0. The source file is determined contextually. - /// Inside a `SrcLoc`, the `file_scope` union field will be active. byte_abs: u32, /// The source location points to a token within a source file, /// offset from 0. The source file is determined contextually. - /// Inside a `SrcLoc`, the `file_scope` union field will be active. token_abs: u32, /// The source location points to an AST node within a source file, /// offset from 0. The source file is determined contextually. - /// Inside a `SrcLoc`, the `file_scope` union field will be active. node_abs: u32, /// The source location points to a byte offset within a source file, /// offset from the byte offset of the base node within the file. @@ -2373,8 +2364,7 @@ pub const LazySrcLoc = struct { } /// Resolve the file and AST node of `base_node_inst` to get a resolved `SrcLoc`. - /// TODO: it is incorrect to store a `SrcLoc` anywhere due to incremental compilation. - /// Probably the type should be removed entirely and this resolution performed on-the-fly when needed. + /// The resulting `SrcLoc` should only be used ephemerally, as it is not correct across incremental updates. pub fn upgrade(lazy: LazySrcLoc, zcu: *Zcu) SrcLoc { const file, const base_node = resolveBaseNode(lazy.base_node_inst, zcu); return .{ @@ -3478,7 +3468,7 @@ pub fn ensureDeclAnalyzed(mod: *Module, decl_index: Decl.Index) SemaError!void { try mod.retryable_failures.append(mod.gpa, AnalUnit.wrap(.{ .decl = decl_index })); mod.failed_analysis.putAssumeCapacityNoClobber(AnalUnit.wrap(.{ .decl = decl_index }), try ErrorMsg.create( mod.gpa, - decl.navSrcLoc(mod).upgrade(mod), + decl.navSrcLoc(mod), "unable to analyze: {s}", .{@errorName(e)}, )); @@ -3655,7 +3645,7 @@ pub fn ensureFuncBodyAnalyzed(zcu: *Zcu, maybe_coerced_func_index: InternPool.In AnalUnit.wrap(.{ .decl = decl_index }), try Module.ErrorMsg.create( gpa, - decl.navSrcLoc(zcu).upgrade(zcu), + decl.navSrcLoc(zcu), "invalid liveness: {s}", .{@errorName(err)}, ), @@ -3679,7 +3669,7 @@ pub fn ensureFuncBodyAnalyzed(zcu: *Zcu, maybe_coerced_func_index: InternPool.In try zcu.failed_analysis.ensureUnusedCapacity(gpa, 1); zcu.failed_analysis.putAssumeCapacityNoClobber(AnalUnit.wrap(.{ .decl = decl_index }), try Module.ErrorMsg.create( gpa, - decl.navSrcLoc(zcu).upgrade(zcu), + decl.navSrcLoc(zcu), "unable to codegen: {s}", .{@errorName(err)}, )); @@ -4480,7 +4470,7 @@ pub fn embedFile( mod: *Module, cur_file: *File, import_string: []const u8, - src_loc: SrcLoc, + src_loc: LazySrcLoc, ) !InternPool.Index { const gpa = mod.gpa; @@ -4555,7 +4545,7 @@ fn newEmbedFile( sub_file_path: []const u8, resolved_path: []const u8, result: **EmbedFile, - src_loc: SrcLoc, + src_loc: LazySrcLoc, ) !InternPool.Index { const gpa = mod.gpa; const ip = &mod.intern_pool; @@ -5320,17 +5310,13 @@ pub fn initNewAnonDecl( new_decl.analysis = .complete; } -pub fn errNoteNonLazy( +pub fn errNote( mod: *Module, - src_loc: SrcLoc, + src_loc: LazySrcLoc, parent: *ErrorMsg, comptime format: []const u8, args: anytype, ) error{OutOfMemory}!void { - if (src_loc.lazy == .unneeded) { - assert(parent.src_loc.lazy == .unneeded); - return; - } const msg = try std.fmt.allocPrint(mod.gpa, format, args); errdefer mod.gpa.free(msg); @@ -5458,14 +5444,12 @@ fn processExportsInner( if (gop.found_existing) { new_export.status = .failed_retryable; try zcu.failed_exports.ensureUnusedCapacity(gpa, 1); - const src_loc = new_export.getSrcLoc(zcu); - const msg = try ErrorMsg.create(gpa, src_loc, "exported symbol collision: {}", .{ + const msg = try ErrorMsg.create(gpa, new_export.src, "exported symbol collision: {}", .{ new_export.opts.name.fmt(&zcu.intern_pool), }); errdefer msg.destroy(gpa); const other_export = zcu.all_exports.items[gop.value_ptr.*]; - const other_src_loc = other_export.getSrcLoc(zcu); - try zcu.errNoteNonLazy(other_src_loc, msg, "other symbol here", .{}); + try zcu.errNote(other_export.src, msg, "other symbol here", .{}); zcu.failed_exports.putAssumeCapacityNoClobber(export_idx, msg); new_export.status = .failed; } else { @@ -5493,8 +5477,7 @@ fn handleUpdateExports( const new_export = &zcu.all_exports.items[export_idx]; new_export.status = .failed_retryable; try zcu.failed_exports.ensureUnusedCapacity(gpa, 1); - const src_loc = new_export.getSrcLoc(zcu); - const msg = try ErrorMsg.create(gpa, src_loc, "unable to export: {s}", .{ + const msg = try ErrorMsg.create(gpa, new_export.src, "unable to export: {s}", .{ @errorName(err), }); zcu.failed_exports.putAssumeCapacityNoClobber(export_idx, msg); @@ -5658,7 +5641,7 @@ pub fn linkerUpdateDecl(zcu: *Zcu, decl_index: Decl.Index) !void { try zcu.failed_analysis.ensureUnusedCapacity(gpa, 1); zcu.failed_analysis.putAssumeCapacityNoClobber(AnalUnit.wrap(.{ .decl = decl_index }), try ErrorMsg.create( gpa, - decl.navSrcLoc(zcu).upgrade(zcu), + decl.navSrcLoc(zcu), "unable to codegen: {s}", .{@errorName(err)}, )); @@ -5685,9 +5668,8 @@ fn reportRetryableFileError( const err_msg = try ErrorMsg.create( mod.gpa, .{ - .file_scope = file, - .base_node = 0, - .lazy = .entire_file, + .base_node_inst = try mod.intern_pool.trackZir(mod.gpa, file, .main_struct_inst), + .offset = .entire_file, }, format, args, diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig index 48908db51b37..51b62aba14bb 100644 --- a/src/arch/aarch64/CodeGen.zig +++ b/src/arch/aarch64/CodeGen.zig @@ -59,7 +59,7 @@ args: []MCValue, ret_mcv: MCValue, fn_type: Type, arg_index: u32, -src_loc: Module.SrcLoc, +src_loc: Module.LazySrcLoc, stack_align: u32, /// MIR Instructions @@ -331,7 +331,7 @@ const Self = @This(); pub fn generate( lf: *link.File, - src_loc: Module.SrcLoc, + src_loc: Module.LazySrcLoc, func_index: InternPool.Index, air: Air, liveness: Liveness, diff --git a/src/arch/aarch64/Emit.zig b/src/arch/aarch64/Emit.zig index a783137a54cb..2588db6adce2 100644 --- a/src/arch/aarch64/Emit.zig +++ b/src/arch/aarch64/Emit.zig @@ -22,7 +22,7 @@ bin_file: *link.File, debug_output: DebugInfoOutput, target: *const std.Target, err_msg: ?*ErrorMsg = null, -src_loc: Module.SrcLoc, +src_loc: Module.LazySrcLoc, code: *std.ArrayList(u8), prev_di_line: u32, diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index 60453cebe252..ae802c8f486b 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -59,7 +59,7 @@ args: []MCValue, ret_mcv: MCValue, fn_type: Type, arg_index: u32, -src_loc: Module.SrcLoc, +src_loc: Module.LazySrcLoc, stack_align: u32, /// MIR Instructions @@ -338,7 +338,7 @@ const Self = @This(); pub fn generate( lf: *link.File, - src_loc: Module.SrcLoc, + src_loc: Module.LazySrcLoc, func_index: InternPool.Index, air: Air, liveness: Liveness, diff --git a/src/arch/arm/Emit.zig b/src/arch/arm/Emit.zig index 3a9bfcf4b69f..b85deaa3ce33 100644 --- a/src/arch/arm/Emit.zig +++ b/src/arch/arm/Emit.zig @@ -26,7 +26,7 @@ bin_file: *link.File, debug_output: DebugInfoOutput, target: *const std.Target, err_msg: ?*ErrorMsg = null, -src_loc: Module.SrcLoc, +src_loc: Module.LazySrcLoc, code: *std.ArrayList(u8), prev_di_line: u32, diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index a5cdf8621b4d..2bba63f616d7 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -59,7 +59,7 @@ args: []MCValue, ret_mcv: InstTracking, fn_type: Type, arg_index: usize, -src_loc: Zcu.SrcLoc, +src_loc: Zcu.LazySrcLoc, /// MIR Instructions mir_instructions: std.MultiArrayList(Mir.Inst) = .{}, @@ -696,7 +696,7 @@ const CallView = enum(u1) { pub fn generate( bin_file: *link.File, - src_loc: Zcu.SrcLoc, + src_loc: Zcu.LazySrcLoc, func_index: InternPool.Index, air: Air, liveness: Liveness, diff --git a/src/arch/riscv64/Lower.zig b/src/arch/riscv64/Lower.zig index dda3f3cf2ad1..3d3dc8513fe1 100644 --- a/src/arch/riscv64/Lower.zig +++ b/src/arch/riscv64/Lower.zig @@ -8,7 +8,7 @@ allocator: Allocator, mir: Mir, cc: std.builtin.CallingConvention, err_msg: ?*ErrorMsg = null, -src_loc: Zcu.SrcLoc, +src_loc: Zcu.LazySrcLoc, result_insts_len: u8 = undefined, result_relocs_len: u8 = undefined, result_insts: [ diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig index 14500ed3296a..ca1cef125022 100644 --- a/src/arch/sparc64/CodeGen.zig +++ b/src/arch/sparc64/CodeGen.zig @@ -64,7 +64,7 @@ args: []MCValue, ret_mcv: MCValue, fn_type: Type, arg_index: usize, -src_loc: Module.SrcLoc, +src_loc: Module.LazySrcLoc, stack_align: Alignment, /// MIR Instructions @@ -263,7 +263,7 @@ const BigTomb = struct { pub fn generate( lf: *link.File, - src_loc: Module.SrcLoc, + src_loc: Module.LazySrcLoc, func_index: InternPool.Index, air: Air, liveness: Liveness, diff --git a/src/arch/sparc64/Emit.zig b/src/arch/sparc64/Emit.zig index acd605eebc5e..b509bb7c7966 100644 --- a/src/arch/sparc64/Emit.zig +++ b/src/arch/sparc64/Emit.zig @@ -24,7 +24,7 @@ bin_file: *link.File, debug_output: DebugInfoOutput, target: *const std.Target, err_msg: ?*ErrorMsg = null, -src_loc: Module.SrcLoc, +src_loc: Module.LazySrcLoc, code: *std.ArrayList(u8), prev_di_line: u32, diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index b1ebf9126dcf..91d637c7653d 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -765,7 +765,7 @@ pub fn deinit(func: *CodeGen) void { /// Sets `err_msg` on `CodeGen` and returns `error.CodegenFail` which is caught in link/Wasm.zig fn fail(func: *CodeGen, comptime fmt: []const u8, args: anytype) InnerError { const mod = func.bin_file.base.comp.module.?; - const src_loc = func.decl.navSrcLoc(mod).upgrade(mod); + const src_loc = func.decl.navSrcLoc(mod); func.err_msg = try Zcu.ErrorMsg.create(func.gpa, src_loc, fmt, args); return error.CodegenFail; } @@ -1202,7 +1202,7 @@ fn genFunctype( pub fn generate( bin_file: *link.File, - src_loc: Zcu.SrcLoc, + src_loc: Zcu.LazySrcLoc, func_index: InternPool.Index, air: Air, liveness: Liveness, @@ -3162,7 +3162,7 @@ fn lowerAnonDeclRef( } const decl_align = mod.intern_pool.indexToKey(anon_decl.orig_ty).ptr_type.flags.alignment; - const res = try func.bin_file.lowerAnonDecl(decl_val, decl_align, func.decl.navSrcLoc(mod).upgrade(mod)); + const res = try func.bin_file.lowerAnonDecl(decl_val, decl_align, func.decl.navSrcLoc(mod)); switch (res) { .ok => {}, .fail => |em| { diff --git a/src/arch/wasm/Emit.zig b/src/arch/wasm/Emit.zig index c41ea9ec5565..73ef723345cf 100644 --- a/src/arch/wasm/Emit.zig +++ b/src/arch/wasm/Emit.zig @@ -257,7 +257,7 @@ fn fail(emit: *Emit, comptime format: []const u8, args: anytype) InnerError { const comp = emit.bin_file.base.comp; const zcu = comp.module.?; const gpa = comp.gpa; - emit.error_msg = try Zcu.ErrorMsg.create(gpa, zcu.declPtr(emit.decl_index).navSrcLoc(zcu).upgrade(zcu), format, args); + emit.error_msg = try Zcu.ErrorMsg.create(gpa, zcu.declPtr(emit.decl_index).navSrcLoc(zcu), format, args); return error.EmitFail; } diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index ea6f0f8a4eb3..31ed0bf51489 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -74,7 +74,7 @@ va_info: union { ret_mcv: InstTracking, fn_type: Type, arg_index: u32, -src_loc: Module.SrcLoc, +src_loc: Module.LazySrcLoc, eflags_inst: ?Air.Inst.Index = null, @@ -795,7 +795,7 @@ const Self = @This(); pub fn generate( bin_file: *link.File, - src_loc: Module.SrcLoc, + src_loc: Module.LazySrcLoc, func_index: InternPool.Index, air: Air, liveness: Liveness, @@ -971,7 +971,7 @@ pub fn generate( pub fn generateLazy( bin_file: *link.File, - src_loc: Module.SrcLoc, + src_loc: Module.LazySrcLoc, lazy_sym: link.File.LazySymbol, code: *std.ArrayList(u8), debug_output: DebugInfoOutput, diff --git a/src/arch/x86_64/Lower.zig b/src/arch/x86_64/Lower.zig index 058a0550d97e..852d19132d2b 100644 --- a/src/arch/x86_64/Lower.zig +++ b/src/arch/x86_64/Lower.zig @@ -8,7 +8,7 @@ allocator: Allocator, mir: Mir, cc: std.builtin.CallingConvention, err_msg: ?*ErrorMsg = null, -src_loc: Module.SrcLoc, +src_loc: Module.LazySrcLoc, result_insts_len: u8 = undefined, result_relocs_len: u8 = undefined, result_insts: [ diff --git a/src/codegen.zig b/src/codegen.zig index b8662ed15b56..769e8f7cd54e 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -47,7 +47,7 @@ pub const DebugInfoOutput = union(enum) { pub fn generateFunction( lf: *link.File, - src_loc: Module.SrcLoc, + src_loc: Module.LazySrcLoc, func_index: InternPool.Index, air: Air, liveness: Liveness, @@ -79,7 +79,7 @@ pub fn generateFunction( pub fn generateLazyFunction( lf: *link.File, - src_loc: Module.SrcLoc, + src_loc: Module.LazySrcLoc, lazy_sym: link.File.LazySymbol, code: *std.ArrayList(u8), debug_output: DebugInfoOutput, @@ -105,7 +105,7 @@ fn writeFloat(comptime F: type, f: F, target: Target, endian: std.builtin.Endian pub fn generateLazySymbol( bin_file: *link.File, - src_loc: Module.SrcLoc, + src_loc: Module.LazySrcLoc, lazy_sym: link.File.LazySymbol, // TODO don't use an "out" parameter like this; put it in the result instead alignment: *Alignment, @@ -171,7 +171,7 @@ pub fn generateLazySymbol( pub fn generateSymbol( bin_file: *link.File, - src_loc: Module.SrcLoc, + src_loc: Module.LazySrcLoc, val: Value, code: *std.ArrayList(u8), debug_output: DebugInfoOutput, @@ -618,7 +618,7 @@ pub fn generateSymbol( fn lowerPtr( bin_file: *link.File, - src_loc: Module.SrcLoc, + src_loc: Module.LazySrcLoc, ptr_val: InternPool.Index, code: *std.ArrayList(u8), debug_output: DebugInfoOutput, @@ -683,7 +683,7 @@ const RelocInfo = struct { fn lowerAnonDeclRef( lf: *link.File, - src_loc: Module.SrcLoc, + src_loc: Module.LazySrcLoc, anon_decl: InternPool.Key.Ptr.BaseAddr.AnonDecl, code: *std.ArrayList(u8), debug_output: DebugInfoOutput, @@ -730,7 +730,7 @@ fn lowerAnonDeclRef( fn lowerDeclRef( lf: *link.File, - src_loc: Module.SrcLoc, + src_loc: Module.LazySrcLoc, decl_index: InternPool.DeclIndex, code: *std.ArrayList(u8), debug_output: DebugInfoOutput, @@ -814,7 +814,7 @@ pub const GenResult = union(enum) { fn fail( gpa: Allocator, - src_loc: Module.SrcLoc, + src_loc: Module.LazySrcLoc, comptime format: []const u8, args: anytype, ) Allocator.Error!GenResult { @@ -825,7 +825,7 @@ pub const GenResult = union(enum) { fn genDeclRef( lf: *link.File, - src_loc: Module.SrcLoc, + src_loc: Module.LazySrcLoc, val: Value, ptr_decl_index: InternPool.DeclIndex, ) CodeGenError!GenResult { @@ -931,7 +931,7 @@ fn genDeclRef( fn genUnnamedConst( lf: *link.File, - src_loc: Module.SrcLoc, + src_loc: Module.LazySrcLoc, val: Value, owner_decl_index: InternPool.DeclIndex, ) CodeGenError!GenResult { @@ -970,7 +970,7 @@ fn genUnnamedConst( pub fn genTypedValue( lf: *link.File, - src_loc: Module.SrcLoc, + src_loc: Module.LazySrcLoc, val: Value, owner_decl_index: InternPool.DeclIndex, ) CodeGenError!GenResult { diff --git a/src/codegen/c.zig b/src/codegen/c.zig index a8e58a1055c2..6bd8bcc6fc2e 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -637,7 +637,7 @@ pub const DeclGen = struct { const zcu = dg.zcu; const decl_index = dg.pass.decl; const decl = zcu.declPtr(decl_index); - const src_loc = decl.navSrcLoc(zcu).upgrade(zcu); + const src_loc = decl.navSrcLoc(zcu); dg.error_msg = try Zcu.ErrorMsg.create(dg.gpa, src_loc, format, args); return error.AnalysisFail; } diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 6fe7adf33c0f..c65158a88a1c 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -4644,7 +4644,7 @@ pub const DeclGen = struct { const o = dg.object; const gpa = o.gpa; const mod = o.module; - const src_loc = dg.decl.navSrcLoc(mod).upgrade(mod); + const src_loc = dg.decl.navSrcLoc(mod); dg.err_msg = try Module.ErrorMsg.create(gpa, src_loc, "TODO (LLVM): " ++ format, args); return error.CodegenFail; } diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 54b7b381cffd..494ec0737e63 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -415,7 +415,7 @@ const DeclGen = struct { pub fn fail(self: *DeclGen, comptime format: []const u8, args: anytype) Error { @setCold(true); const mod = self.module; - const src_loc = self.module.declPtr(self.decl_index).navSrcLoc(mod).upgrade(mod); + const src_loc = self.module.declPtr(self.decl_index).navSrcLoc(mod); assert(self.error_msg == null); self.error_msg = try Module.ErrorMsg.create(self.module.gpa, src_loc, format, args); return error.CodegenFail; @@ -6439,7 +6439,7 @@ const DeclGen = struct { // TODO: Translate proper error locations. assert(as.errors.items.len != 0); assert(self.error_msg == null); - const src_loc = self.module.declPtr(self.decl_index).navSrcLoc(mod).upgrade(mod); + const src_loc = self.module.declPtr(self.decl_index).navSrcLoc(mod); self.error_msg = try Module.ErrorMsg.create(self.module.gpa, src_loc, "failed to assemble SPIR-V inline assembly", .{}); const notes = try self.module.gpa.alloc(Module.ErrorMsg, as.errors.items.len); diff --git a/src/link.zig b/src/link.zig index 36a5cb8187e5..7f108c283f9a 100644 --- a/src/link.zig +++ b/src/link.zig @@ -646,7 +646,7 @@ pub const File = struct { base: *File, decl_val: InternPool.Index, decl_align: InternPool.Alignment, - src_loc: Module.SrcLoc, + src_loc: Module.LazySrcLoc, ) !LowerResult { if (build_options.only_c) @compileError("unreachable"); switch (base.tag) { diff --git a/src/link/Coff.zig b/src/link/Coff.zig index 94b9ca520ec6..366ba8750906 100644 --- a/src/link/Coff.zig +++ b/src/link/Coff.zig @@ -1144,7 +1144,7 @@ pub fn updateFunc(self: *Coff, mod: *Module, func_index: InternPool.Index, air: const res = try codegen.generateFunction( &self.base, - decl.navSrcLoc(mod).upgrade(mod), + decl.navSrcLoc(mod), func_index, air, liveness, @@ -1179,7 +1179,7 @@ pub fn lowerUnnamedConst(self: *Coff, val: Value, decl_index: InternPool.DeclInd const sym_name = try std.fmt.allocPrint(gpa, "__unnamed_{}_{d}", .{ decl_name.fmt(&mod.intern_pool), index }); defer gpa.free(sym_name); const ty = val.typeOf(mod); - const atom_index = switch (try self.lowerConst(sym_name, val, ty.abiAlignment(mod), self.rdata_section_index.?, decl.navSrcLoc(mod).upgrade(mod))) { + const atom_index = switch (try self.lowerConst(sym_name, val, ty.abiAlignment(mod), self.rdata_section_index.?, decl.navSrcLoc(mod))) { .ok => |atom_index| atom_index, .fail => |em| { decl.analysis = .codegen_failure; @@ -1197,7 +1197,7 @@ const LowerConstResult = union(enum) { fail: *Module.ErrorMsg, }; -fn lowerConst(self: *Coff, name: []const u8, val: Value, required_alignment: InternPool.Alignment, sect_id: u16, src_loc: Module.SrcLoc) !LowerConstResult { +fn lowerConst(self: *Coff, name: []const u8, val: Value, required_alignment: InternPool.Alignment, sect_id: u16, src_loc: Module.LazySrcLoc) !LowerConstResult { const gpa = self.base.comp.gpa; var code_buffer = std.ArrayList(u8).init(gpa); @@ -1270,7 +1270,7 @@ pub fn updateDecl( defer code_buffer.deinit(); const decl_val = if (decl.val.getVariable(mod)) |variable| Value.fromInterned(variable.init) else decl.val; - const res = try codegen.generateSymbol(&self.base, decl.navSrcLoc(mod).upgrade(mod), decl_val, &code_buffer, .none, .{ + const res = try codegen.generateSymbol(&self.base, decl.navSrcLoc(mod), decl_val, &code_buffer, .none, .{ .parent_atom_index = atom.getSymbolIndex().?, }); const code = switch (res) { @@ -1309,14 +1309,7 @@ fn updateLazySymbolAtom( const atom = self.getAtomPtr(atom_index); const local_sym_index = atom.getSymbolIndex().?; - const src = if (sym.ty.srcLocOrNull(mod)) |src| - src.upgrade(mod) - else - Module.SrcLoc{ - .file_scope = undefined, - .base_node = undefined, - .lazy = .unneeded, - }; + const src = sym.ty.srcLocOrNull(mod) orelse Module.LazySrcLoc.unneeded; const res = try codegen.generateLazySymbol( &self.base, src, @@ -1560,7 +1553,7 @@ pub fn updateExports( }, .value => |value| self.anon_decls.getPtr(value) orelse blk: { const first_exp = mod.all_exports.items[export_indices[0]]; - const res = try self.lowerAnonDecl(value, .none, first_exp.getSrcLoc(mod)); + const res = try self.lowerAnonDecl(value, .none, first_exp.src); switch (res) { .ok => {}, .fail => |em| { @@ -1585,7 +1578,7 @@ pub fn updateExports( if (!mem.eql(u8, section_name, ".text")) { try mod.failed_exports.putNoClobber(gpa, export_idx, try Module.ErrorMsg.create( gpa, - exp.getSrcLoc(mod), + exp.src, "Unimplemented: ExportOptions.section", .{}, )); @@ -1596,7 +1589,7 @@ pub fn updateExports( if (exp.opts.linkage == .link_once) { try mod.failed_exports.putNoClobber(gpa, export_idx, try Module.ErrorMsg.create( gpa, - exp.getSrcLoc(mod), + exp.src, "Unimplemented: GlobalLinkage.link_once", .{}, )); @@ -1867,7 +1860,7 @@ pub fn lowerAnonDecl( self: *Coff, decl_val: InternPool.Index, explicit_alignment: InternPool.Alignment, - src_loc: Module.SrcLoc, + src_loc: Module.LazySrcLoc, ) !codegen.Result { const gpa = self.base.comp.gpa; const mod = self.base.comp.module.?; diff --git a/src/link/Elf.zig b/src/link/Elf.zig index df8e6c0dd8d5..c1df15308335 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -552,7 +552,7 @@ pub fn lowerAnonDecl( self: *Elf, decl_val: InternPool.Index, explicit_alignment: InternPool.Alignment, - src_loc: Module.SrcLoc, + src_loc: Module.LazySrcLoc, ) !codegen.Result { return self.zigObjectPtr().?.lowerAnonDecl(self, decl_val, explicit_alignment, src_loc); } diff --git a/src/link/Elf/ZigObject.zig b/src/link/Elf/ZigObject.zig index 74e2039f37a7..57fa61001948 100644 --- a/src/link/Elf/ZigObject.zig +++ b/src/link/Elf/ZigObject.zig @@ -686,7 +686,7 @@ pub fn lowerAnonDecl( elf_file: *Elf, decl_val: InternPool.Index, explicit_alignment: InternPool.Alignment, - src_loc: Module.SrcLoc, + src_loc: Module.LazySrcLoc, ) !codegen.Result { const gpa = elf_file.base.comp.gpa; const mod = elf_file.base.comp.module.?; @@ -1074,7 +1074,7 @@ pub fn updateFunc( const res = if (decl_state) |*ds| try codegen.generateFunction( &elf_file.base, - decl.navSrcLoc(mod).upgrade(mod), + decl.navSrcLoc(mod), func_index, air, liveness, @@ -1084,7 +1084,7 @@ pub fn updateFunc( else try codegen.generateFunction( &elf_file.base, - decl.navSrcLoc(mod).upgrade(mod), + decl.navSrcLoc(mod), func_index, air, liveness, @@ -1156,13 +1156,13 @@ pub fn updateDecl( // TODO implement .debug_info for global variables const decl_val = if (decl.val.getVariable(mod)) |variable| Value.fromInterned(variable.init) else decl.val; const res = if (decl_state) |*ds| - try codegen.generateSymbol(&elf_file.base, decl.navSrcLoc(mod).upgrade(mod), decl_val, &code_buffer, .{ + try codegen.generateSymbol(&elf_file.base, decl.navSrcLoc(mod), decl_val, &code_buffer, .{ .dwarf = ds, }, .{ .parent_atom_index = sym_index, }) else - try codegen.generateSymbol(&elf_file.base, decl.navSrcLoc(mod).upgrade(mod), decl_val, &code_buffer, .none, .{ + try codegen.generateSymbol(&elf_file.base, decl.navSrcLoc(mod), decl_val, &code_buffer, .none, .{ .parent_atom_index = sym_index, }); @@ -1217,14 +1217,7 @@ fn updateLazySymbol( break :blk try self.strtab.insert(gpa, name); }; - const src = if (sym.ty.srcLocOrNull(mod)) |src| - src.upgrade(mod) - else - Module.SrcLoc{ - .file_scope = undefined, - .base_node = undefined, - .lazy = .unneeded, - }; + const src = sym.ty.srcLocOrNull(mod) orelse Module.LazySrcLoc.unneeded; const res = try codegen.generateLazySymbol( &elf_file.base, src, @@ -1302,7 +1295,7 @@ pub fn lowerUnnamedConst( val, ty.abiAlignment(mod), elf_file.zig_data_rel_ro_section_index.?, - decl.navSrcLoc(mod).upgrade(mod), + decl.navSrcLoc(mod), )) { .ok => |sym_index| sym_index, .fail => |em| { @@ -1329,7 +1322,7 @@ fn lowerConst( val: Value, required_alignment: InternPool.Alignment, output_section_index: u32, - src_loc: Module.SrcLoc, + src_loc: Module.LazySrcLoc, ) !LowerConstResult { const gpa = elf_file.base.comp.gpa; @@ -1395,7 +1388,7 @@ pub fn updateExports( }, .value => |value| self.anon_decls.getPtr(value) orelse blk: { const first_exp = mod.all_exports.items[export_indices[0]]; - const res = try self.lowerAnonDecl(elf_file, value, .none, first_exp.getSrcLoc(mod)); + const res = try self.lowerAnonDecl(elf_file, value, .none, first_exp.src); switch (res) { .ok => {}, .fail => |em| { @@ -1421,7 +1414,7 @@ pub fn updateExports( try mod.failed_exports.ensureUnusedCapacity(mod.gpa, 1); mod.failed_exports.putAssumeCapacityNoClobber(export_idx, try Module.ErrorMsg.create( gpa, - exp.getSrcLoc(mod), + exp.src, "Unimplemented: ExportOptions.section", .{}, )); @@ -1436,7 +1429,7 @@ pub fn updateExports( try mod.failed_exports.ensureUnusedCapacity(mod.gpa, 1); mod.failed_exports.putAssumeCapacityNoClobber(export_idx, try Module.ErrorMsg.create( gpa, - exp.getSrcLoc(mod), + exp.src, "Unimplemented: GlobalLinkage.LinkOnce", .{}, )); diff --git a/src/link/MachO.zig b/src/link/MachO.zig index 3187ba528bc5..ed20a16abf60 100644 --- a/src/link/MachO.zig +++ b/src/link/MachO.zig @@ -3228,7 +3228,7 @@ pub fn lowerAnonDecl( self: *MachO, decl_val: InternPool.Index, explicit_alignment: InternPool.Alignment, - src_loc: Module.SrcLoc, + src_loc: Module.LazySrcLoc, ) !codegen.Result { return self.getZigObject().?.lowerAnonDecl(self, decl_val, explicit_alignment, src_loc); } diff --git a/src/link/MachO/ZigObject.zig b/src/link/MachO/ZigObject.zig index ee5ab83b0af4..861ced921472 100644 --- a/src/link/MachO/ZigObject.zig +++ b/src/link/MachO/ZigObject.zig @@ -572,7 +572,7 @@ pub fn lowerAnonDecl( macho_file: *MachO, decl_val: InternPool.Index, explicit_alignment: Atom.Alignment, - src_loc: Module.SrcLoc, + src_loc: Module.LazySrcLoc, ) !codegen.Result { const gpa = macho_file.base.comp.gpa; const mod = macho_file.base.comp.module.?; @@ -682,7 +682,7 @@ pub fn updateFunc( const dio: codegen.DebugInfoOutput = if (decl_state) |*ds| .{ .dwarf = ds } else .none; const res = try codegen.generateFunction( &macho_file.base, - decl.navSrcLoc(mod).upgrade(mod), + decl.navSrcLoc(mod), func_index, air, liveness, @@ -754,7 +754,7 @@ pub fn updateDecl( const decl_val = if (decl.val.getVariable(mod)) |variable| Value.fromInterned(variable.init) else decl.val; const dio: codegen.DebugInfoOutput = if (decl_state) |*ds| .{ .dwarf = ds } else .none; - const res = try codegen.generateSymbol(&macho_file.base, decl.navSrcLoc(mod).upgrade(mod), decl_val, &code_buffer, dio, .{ + const res = try codegen.generateSymbol(&macho_file.base, decl.navSrcLoc(mod), decl_val, &code_buffer, dio, .{ .parent_atom_index = sym_index, }); @@ -1100,7 +1100,7 @@ pub fn lowerUnnamedConst( val, val.typeOf(mod).abiAlignment(mod), macho_file.zig_const_sect_index.?, - decl.navSrcLoc(mod).upgrade(mod), + decl.navSrcLoc(mod), )) { .ok => |sym_index| sym_index, .fail => |em| { @@ -1127,7 +1127,7 @@ fn lowerConst( val: Value, required_alignment: Atom.Alignment, output_section_index: u8, - src_loc: Module.SrcLoc, + src_loc: Module.LazySrcLoc, ) !LowerConstResult { const gpa = macho_file.base.comp.gpa; @@ -1196,7 +1196,7 @@ pub fn updateExports( }, .value => |value| self.anon_decls.getPtr(value) orelse blk: { const first_exp = mod.all_exports.items[export_indices[0]]; - const res = try self.lowerAnonDecl(macho_file, value, .none, first_exp.getSrcLoc(mod)); + const res = try self.lowerAnonDecl(macho_file, value, .none, first_exp.src); switch (res) { .ok => {}, .fail => |em| { @@ -1221,7 +1221,7 @@ pub fn updateExports( try mod.failed_exports.ensureUnusedCapacity(mod.gpa, 1); mod.failed_exports.putAssumeCapacityNoClobber(export_idx, try Module.ErrorMsg.create( gpa, - exp.getSrcLoc(mod), + exp.src, "Unimplemented: ExportOptions.section", .{}, )); @@ -1231,7 +1231,7 @@ pub fn updateExports( if (exp.opts.linkage == .link_once) { try mod.failed_exports.putNoClobber(mod.gpa, export_idx, try Module.ErrorMsg.create( gpa, - exp.getSrcLoc(mod), + exp.src, "Unimplemented: GlobalLinkage.link_once", .{}, )); @@ -1291,14 +1291,7 @@ fn updateLazySymbol( break :blk try self.strtab.insert(gpa, name); }; - const src = if (lazy_sym.ty.srcLocOrNull(mod)) |src| - src.upgrade(mod) - else - Module.SrcLoc{ - .file_scope = undefined, - .base_node = undefined, - .lazy = .unneeded, - }; + const src = lazy_sym.ty.srcLocOrNull(mod) orelse Module.LazySrcLoc.unneeded; const res = try codegen.generateLazySymbol( &macho_file.base, src, diff --git a/src/link/Plan9.zig b/src/link/Plan9.zig index d44da5c973dd..2efe569d9860 100644 --- a/src/link/Plan9.zig +++ b/src/link/Plan9.zig @@ -439,7 +439,7 @@ pub fn updateFunc(self: *Plan9, mod: *Module, func_index: InternPool.Index, air: const res = try codegen.generateFunction( &self.base, - decl.navSrcLoc(mod).upgrade(mod), + decl.navSrcLoc(mod), func_index, air, liveness, @@ -505,7 +505,7 @@ pub fn lowerUnnamedConst(self: *Plan9, val: Value, decl_index: InternPool.DeclIn }; self.syms.items[info.sym_index.?] = sym; - const res = try codegen.generateSymbol(&self.base, decl.navSrcLoc(mod).upgrade(mod), val, &code_buffer, .{ + const res = try codegen.generateSymbol(&self.base, decl.navSrcLoc(mod), val, &code_buffer, .{ .none = {}, }, .{ .parent_atom_index = new_atom_idx, @@ -544,7 +544,7 @@ pub fn updateDecl(self: *Plan9, mod: *Module, decl_index: InternPool.DeclIndex) defer code_buffer.deinit(); const decl_val = if (decl.val.getVariable(mod)) |variable| Value.fromInterned(variable.init) else decl.val; // TODO we need the symbol index for symbol in the table of locals for the containing atom - const res = try codegen.generateSymbol(&self.base, decl.navSrcLoc(mod).upgrade(mod), decl_val, &code_buffer, .{ .none = {} }, .{ + const res = try codegen.generateSymbol(&self.base, decl.navSrcLoc(mod), decl_val, &code_buffer, .{ .none = {} }, .{ .parent_atom_index = @as(Atom.Index, @intCast(atom_idx)), }); const code = switch (res) { @@ -1027,7 +1027,7 @@ fn addDeclExports( { try mod.failed_exports.put(mod.gpa, export_idx, try Module.ErrorMsg.create( gpa, - mod.declPtr(decl_index).navSrcLoc(mod).upgrade(mod), + mod.declPtr(decl_index).navSrcLoc(mod), "plan9 does not support extra sections", .{}, )); @@ -1225,14 +1225,7 @@ fn updateLazySymbolAtom(self: *Plan9, sym: File.LazySymbol, atom_index: Atom.Ind self.syms.items[self.getAtomPtr(atom_index).sym_index.?] = symbol; // generate the code - const src = if (sym.ty.srcLocOrNull(mod)) |src| - src.upgrade(mod) - else - Module.SrcLoc{ - .file_scope = undefined, - .base_node = undefined, - .lazy = .unneeded, - }; + const src = sym.ty.srcLocOrNull(mod) orelse Module.LazySrcLoc.unneeded; const res = try codegen.generateLazySymbol( &self.base, src, @@ -1553,7 +1546,7 @@ pub fn lowerAnonDecl( self: *Plan9, decl_val: InternPool.Index, explicit_alignment: InternPool.Alignment, - src_loc: Module.SrcLoc, + src_loc: Module.LazySrcLoc, ) !codegen.Result { _ = explicit_alignment; // This is basically the same as lowerUnnamedConst. diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig index 164ddbc118da..3befedad89b3 100644 --- a/src/link/Wasm.zig +++ b/src/link/Wasm.zig @@ -1533,7 +1533,7 @@ pub fn lowerAnonDecl( wasm: *Wasm, decl_val: InternPool.Index, explicit_alignment: Alignment, - src_loc: Module.SrcLoc, + src_loc: Module.LazySrcLoc, ) !codegen.Result { return wasm.zigObjectPtr().?.lowerAnonDecl(wasm, decl_val, explicit_alignment, src_loc); } diff --git a/src/link/Wasm/ZigObject.zig b/src/link/Wasm/ZigObject.zig index 341d3a2fc839..ca950e5cef4e 100644 --- a/src/link/Wasm/ZigObject.zig +++ b/src/link/Wasm/ZigObject.zig @@ -269,7 +269,7 @@ pub fn updateDecl( const res = try codegen.generateSymbol( &wasm_file.base, - decl.navSrcLoc(mod).upgrade(mod), + decl.navSrcLoc(mod), val, &code_writer, .none, @@ -308,7 +308,7 @@ pub fn updateFunc( defer code_writer.deinit(); const result = try codegen.generateFunction( &wasm_file.base, - decl.navSrcLoc(mod).upgrade(mod), + decl.navSrcLoc(mod), func_index, air, liveness, @@ -439,7 +439,7 @@ pub fn lowerAnonDecl( wasm_file: *Wasm, decl_val: InternPool.Index, explicit_alignment: InternPool.Alignment, - src_loc: Module.SrcLoc, + src_loc: Module.LazySrcLoc, ) !codegen.Result { const gpa = wasm_file.base.comp.gpa; const gop = try zig_object.anon_decls.getOrPut(gpa, decl_val); @@ -494,7 +494,7 @@ pub fn lowerUnnamedConst(zig_object: *ZigObject, wasm_file: *Wasm, val: Value, d else decl.navSrcLoc(mod); - switch (try zig_object.lowerConst(wasm_file, name, val, decl_src.upgrade(mod))) { + switch (try zig_object.lowerConst(wasm_file, name, val, decl_src)) { .ok => |atom_index| { try wasm_file.getAtomPtr(parent_atom_index).locals.append(gpa, atom_index); return @intFromEnum(wasm_file.getAtom(atom_index).sym_index); @@ -512,7 +512,7 @@ const LowerConstResult = union(enum) { fail: *Module.ErrorMsg, }; -fn lowerConst(zig_object: *ZigObject, wasm_file: *Wasm, name: []const u8, val: Value, src_loc: Module.SrcLoc) !LowerConstResult { +fn lowerConst(zig_object: *ZigObject, wasm_file: *Wasm, name: []const u8, val: Value, src_loc: Module.LazySrcLoc) !LowerConstResult { const gpa = wasm_file.base.comp.gpa; const mod = wasm_file.base.comp.module.?; @@ -882,7 +882,7 @@ pub fn updateExports( if (exp.opts.section.toSlice(&mod.intern_pool)) |section| { try mod.failed_exports.putNoClobber(gpa, export_idx, try Module.ErrorMsg.create( gpa, - decl.navSrcLoc(mod).upgrade(mod), + decl.navSrcLoc(mod), "Unimplemented: ExportOptions.section '{s}'", .{section}, )); @@ -915,7 +915,7 @@ pub fn updateExports( .link_once => { try mod.failed_exports.putNoClobber(gpa, export_idx, try Module.ErrorMsg.create( gpa, - decl.navSrcLoc(mod).upgrade(mod), + decl.navSrcLoc(mod), "Unimplemented: LinkOnce", .{}, )); From 2f0f1efa6fa50ca27a44d5f7a0c38a6cafbbfb7c Mon Sep 17 00:00:00 2001 From: mlugg Date: Tue, 2 Jul 2024 09:51:51 +0100 Subject: [PATCH 039/152] compiler: type.zig -> Type.zig --- CMakeLists.txt | 2 +- src/Air.zig | 2 +- src/Compilation.zig | 2 +- src/RangeSet.zig | 2 +- src/Sema.zig | 2 +- src/Sema/bitcast.zig | 2 +- src/Sema/comptime_ptr_access.zig | 2 +- src/Type.zig | 3617 ++++++++++++++++++++++++++++++ src/Value.zig | 2 +- src/Zcu.zig | 2 +- src/arch/aarch64/CodeGen.zig | 2 +- src/arch/aarch64/abi.zig | 2 +- src/arch/arm/CodeGen.zig | 2 +- src/arch/arm/Emit.zig | 2 +- src/arch/arm/abi.zig | 2 +- src/arch/riscv64/CodeGen.zig | 2 +- src/arch/riscv64/Mir.zig | 2 +- src/arch/riscv64/abi.zig | 2 +- src/arch/sparc64/CodeGen.zig | 2 +- src/arch/wasm/CodeGen.zig | 2 +- src/arch/wasm/abi.zig | 2 +- src/arch/x86_64/CodeGen.zig | 2 +- src/arch/x86_64/abi.zig | 2 +- src/codegen.zig | 2 +- src/codegen/c.zig | 2 +- src/codegen/c/Type.zig | 2 +- src/codegen/llvm.zig | 2 +- src/codegen/spirv.zig | 2 +- src/link.zig | 2 +- src/link/C.zig | 2 +- src/link/Coff.zig | 2 +- src/link/Dwarf.zig | 2 +- src/link/Elf/ZigObject.zig | 2 +- src/link/MachO/DebugSymbols.zig | 2 +- src/link/MachO/ZigObject.zig | 2 +- src/link/Plan9.zig | 2 +- src/link/Wasm.zig | 2 +- src/link/Wasm/ZigObject.zig | 2 +- src/mutable_value.zig | 2 +- src/print_air.zig | 2 +- src/print_value.zig | 2 +- src/register_manager.zig | 2 +- src/target.zig | 2 +- src/type.zig | 3617 ------------------------------ 44 files changed, 3659 insertions(+), 3659 deletions(-) create mode 100644 src/Type.zig delete mode 100644 src/type.zig diff --git a/CMakeLists.txt b/CMakeLists.txt index 33cdb66b5d0b..a33df3a096b5 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -522,6 +522,7 @@ set(ZIG_STAGE2_SOURCES src/Sema.zig src/Sema/bitcast.zig src/Sema/comptime_ptr_access.zig + src/Type.zig src/Value.zig src/Zcu.zig src/arch/aarch64/CodeGen.zig @@ -673,7 +674,6 @@ set(ZIG_STAGE2_SOURCES src/target.zig src/tracy.zig src/translate_c.zig - src/type.zig src/wasi_libc.zig ) diff --git a/src/Air.zig b/src/Air.zig index e70f73432f6f..0a05470e1c85 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -9,7 +9,7 @@ const assert = std.debug.assert; const Air = @This(); const Value = @import("Value.zig"); -const Type = @import("type.zig").Type; +const Type = @import("Type.zig"); const InternPool = @import("InternPool.zig"); const Zcu = @import("Zcu.zig"); /// Deprecated. diff --git a/src/Compilation.zig b/src/Compilation.zig index 55084fb971a7..b964ffd0d136 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -12,7 +12,7 @@ const WaitGroup = std.Thread.WaitGroup; const ErrorBundle = std.zig.ErrorBundle; const Value = @import("Value.zig"); -const Type = @import("type.zig").Type; +const Type = @import("Type.zig"); const target_util = @import("target.zig"); const Package = @import("Package.zig"); const link = @import("link.zig"); diff --git a/src/RangeSet.zig b/src/RangeSet.zig index 30b8c273cda4..01d9157767b5 100644 --- a/src/RangeSet.zig +++ b/src/RangeSet.zig @@ -3,7 +3,7 @@ const assert = std.debug.assert; const Order = std.math.Order; const InternPool = @import("InternPool.zig"); -const Type = @import("type.zig").Type; +const Type = @import("Type.zig"); const Value = @import("Value.zig"); const Zcu = @import("Zcu.zig"); /// Deprecated. diff --git a/src/Sema.zig b/src/Sema.zig index 9254cf3b8efb..57b2c897a118 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -176,7 +176,7 @@ const log = std.log.scoped(.sema); const Sema = @This(); const Value = @import("Value.zig"); const MutableValue = @import("mutable_value.zig").MutableValue; -const Type = @import("type.zig").Type; +const Type = @import("Type.zig"); const Air = @import("Air.zig"); const Zir = std.zig.Zir; const Zcu = @import("Zcu.zig"); diff --git a/src/Sema/bitcast.zig b/src/Sema/bitcast.zig index 62a0122fa1b3..9536ee33cd53 100644 --- a/src/Sema/bitcast.zig +++ b/src/Sema/bitcast.zig @@ -767,6 +767,6 @@ const assert = std.debug.assert; const Sema = @import("../Sema.zig"); const Zcu = @import("../Zcu.zig"); const InternPool = @import("../InternPool.zig"); -const Type = @import("../type.zig").Type; +const Type = @import("../Type.zig"); const Value = @import("../Value.zig"); const CompileError = Zcu.CompileError; diff --git a/src/Sema/comptime_ptr_access.zig b/src/Sema/comptime_ptr_access.zig index 59c4c9507d88..d8e638ca2660 100644 --- a/src/Sema/comptime_ptr_access.zig +++ b/src/Sema/comptime_ptr_access.zig @@ -1054,7 +1054,7 @@ const ComptimeAllocIndex = InternPool.ComptimeAllocIndex; const Sema = @import("../Sema.zig"); const Block = Sema.Block; const MutableValue = @import("../mutable_value.zig").MutableValue; -const Type = @import("../type.zig").Type; +const Type = @import("../Type.zig"); const Value = @import("../Value.zig"); const Zcu = @import("../Zcu.zig"); const LazySrcLoc = Zcu.LazySrcLoc; diff --git a/src/Type.zig b/src/Type.zig new file mode 100644 index 000000000000..96c3e055fdb6 --- /dev/null +++ b/src/Type.zig @@ -0,0 +1,3617 @@ +//! Both types and values are canonically represented by a single 32-bit integer +//! which is an index into an `InternPool` data structure. +//! This struct abstracts around this storage by providing methods only +//! applicable to types rather than values in general. + +const std = @import("std"); +const builtin = @import("builtin"); +const Value = @import("Value.zig"); +const assert = std.debug.assert; +const Target = std.Target; +const Zcu = @import("Zcu.zig"); +/// Deprecated. +const Module = Zcu; +const log = std.log.scoped(.Type); +const target_util = @import("target.zig"); +const Sema = @import("Sema.zig"); +const InternPool = @import("InternPool.zig"); +const Alignment = InternPool.Alignment; +const Zir = std.zig.Zir; +const Type = @This(); + +ip_index: InternPool.Index, + +pub fn zigTypeTag(ty: Type, mod: *const Module) std.builtin.TypeId { + return ty.zigTypeTagOrPoison(mod) catch unreachable; +} + +pub fn zigTypeTagOrPoison(ty: Type, mod: *const Module) error{GenericPoison}!std.builtin.TypeId { + return mod.intern_pool.zigTypeTagOrPoison(ty.toIntern()); +} + +pub fn baseZigTypeTag(self: Type, mod: *Module) std.builtin.TypeId { + return switch (self.zigTypeTag(mod)) { + .ErrorUnion => self.errorUnionPayload(mod).baseZigTypeTag(mod), + .Optional => { + return self.optionalChild(mod).baseZigTypeTag(mod); + }, + else => |t| t, + }; +} + +pub fn isSelfComparable(ty: Type, mod: *const Module, is_equality_cmp: bool) bool { + return switch (ty.zigTypeTag(mod)) { + .Int, + .Float, + .ComptimeFloat, + .ComptimeInt, + => true, + + .Vector => ty.elemType2(mod).isSelfComparable(mod, is_equality_cmp), + + .Bool, + .Type, + .Void, + .ErrorSet, + .Fn, + .Opaque, + .AnyFrame, + .Enum, + .EnumLiteral, + => is_equality_cmp, + + .NoReturn, + .Array, + .Struct, + .Undefined, + .Null, + .ErrorUnion, + .Union, + .Frame, + => false, + + .Pointer => !ty.isSlice(mod) and (is_equality_cmp or ty.isCPtr(mod)), + .Optional => { + if (!is_equality_cmp) return false; + return ty.optionalChild(mod).isSelfComparable(mod, is_equality_cmp); + }, + }; +} + +/// If it is a function pointer, returns the function type. Otherwise returns null. +pub fn castPtrToFn(ty: Type, mod: *const Module) ?Type { + if (ty.zigTypeTag(mod) != .Pointer) return null; + const elem_ty = ty.childType(mod); + if (elem_ty.zigTypeTag(mod) != .Fn) return null; + return elem_ty; +} + +/// Asserts the type is a pointer. +pub fn ptrIsMutable(ty: Type, mod: *const Module) bool { + return !mod.intern_pool.indexToKey(ty.toIntern()).ptr_type.flags.is_const; +} + +pub const ArrayInfo = struct { + elem_type: Type, + sentinel: ?Value = null, + len: u64, +}; + +pub fn arrayInfo(self: Type, mod: *const Module) ArrayInfo { + return .{ + .len = self.arrayLen(mod), + .sentinel = self.sentinel(mod), + .elem_type = self.childType(mod), + }; +} + +pub fn ptrInfo(ty: Type, mod: *const Module) InternPool.Key.PtrType { + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .ptr_type => |p| p, + .opt_type => |child| switch (mod.intern_pool.indexToKey(child)) { + .ptr_type => |p| p, + else => unreachable, + }, + else => unreachable, + }; +} + +pub fn eql(a: Type, b: Type, mod: *const Module) bool { + _ = mod; // TODO: remove this parameter + // The InternPool data structure hashes based on Key to make interned objects + // unique. An Index can be treated simply as u32 value for the + // purpose of Type/Value hashing and equality. + return a.toIntern() == b.toIntern(); +} + +pub fn format(ty: Type, comptime unused_fmt_string: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void { + _ = ty; + _ = unused_fmt_string; + _ = options; + _ = writer; + @compileError("do not format types directly; use either ty.fmtDebug() or ty.fmt()"); +} + +pub const Formatter = std.fmt.Formatter(format2); + +pub fn fmt(ty: Type, module: *Module) Formatter { + return .{ .data = .{ + .ty = ty, + .module = module, + } }; +} + +const FormatContext = struct { + ty: Type, + module: *Module, +}; + +fn format2( + ctx: FormatContext, + comptime unused_format_string: []const u8, + options: std.fmt.FormatOptions, + writer: anytype, +) !void { + comptime assert(unused_format_string.len == 0); + _ = options; + return print(ctx.ty, writer, ctx.module); +} + +pub fn fmtDebug(ty: Type) std.fmt.Formatter(dump) { + return .{ .data = ty }; +} + +/// This is a debug function. In order to print types in a meaningful way +/// we also need access to the module. +pub fn dump( + start_type: Type, + comptime unused_format_string: []const u8, + options: std.fmt.FormatOptions, + writer: anytype, +) @TypeOf(writer).Error!void { + _ = options; + comptime assert(unused_format_string.len == 0); + return writer.print("{any}", .{start_type.ip_index}); +} + +/// Prints a name suitable for `@typeName`. +/// TODO: take an `opt_sema` to pass to `fmtValue` when printing sentinels. +pub fn print(ty: Type, writer: anytype, mod: *Module) @TypeOf(writer).Error!void { + const ip = &mod.intern_pool; + switch (ip.indexToKey(ty.toIntern())) { + .int_type => |int_type| { + const sign_char: u8 = switch (int_type.signedness) { + .signed => 'i', + .unsigned => 'u', + }; + return writer.print("{c}{d}", .{ sign_char, int_type.bits }); + }, + .ptr_type => { + const info = ty.ptrInfo(mod); + + if (info.sentinel != .none) switch (info.flags.size) { + .One, .C => unreachable, + .Many => try writer.print("[*:{}]", .{Value.fromInterned(info.sentinel).fmtValue(mod, null)}), + .Slice => try writer.print("[:{}]", .{Value.fromInterned(info.sentinel).fmtValue(mod, null)}), + } else switch (info.flags.size) { + .One => try writer.writeAll("*"), + .Many => try writer.writeAll("[*]"), + .C => try writer.writeAll("[*c]"), + .Slice => try writer.writeAll("[]"), + } + if (info.flags.alignment != .none or + info.packed_offset.host_size != 0 or + info.flags.vector_index != .none) + { + const alignment = if (info.flags.alignment != .none) + info.flags.alignment + else + Type.fromInterned(info.child).abiAlignment(mod); + try writer.print("align({d}", .{alignment.toByteUnits() orelse 0}); + + if (info.packed_offset.bit_offset != 0 or info.packed_offset.host_size != 0) { + try writer.print(":{d}:{d}", .{ + info.packed_offset.bit_offset, info.packed_offset.host_size, + }); + } + if (info.flags.vector_index == .runtime) { + try writer.writeAll(":?"); + } else if (info.flags.vector_index != .none) { + try writer.print(":{d}", .{@intFromEnum(info.flags.vector_index)}); + } + try writer.writeAll(") "); + } + if (info.flags.address_space != .generic) { + try writer.print("addrspace(.{s}) ", .{@tagName(info.flags.address_space)}); + } + if (info.flags.is_const) try writer.writeAll("const "); + if (info.flags.is_volatile) try writer.writeAll("volatile "); + if (info.flags.is_allowzero and info.flags.size != .C) try writer.writeAll("allowzero "); + + try print(Type.fromInterned(info.child), writer, mod); + return; + }, + .array_type => |array_type| { + if (array_type.sentinel == .none) { + try writer.print("[{d}]", .{array_type.len}); + try print(Type.fromInterned(array_type.child), writer, mod); + } else { + try writer.print("[{d}:{}]", .{ + array_type.len, + Value.fromInterned(array_type.sentinel).fmtValue(mod, null), + }); + try print(Type.fromInterned(array_type.child), writer, mod); + } + return; + }, + .vector_type => |vector_type| { + try writer.print("@Vector({d}, ", .{vector_type.len}); + try print(Type.fromInterned(vector_type.child), writer, mod); + try writer.writeAll(")"); + return; + }, + .opt_type => |child| { + try writer.writeByte('?'); + return print(Type.fromInterned(child), writer, mod); + }, + .error_union_type => |error_union_type| { + try print(Type.fromInterned(error_union_type.error_set_type), writer, mod); + try writer.writeByte('!'); + if (error_union_type.payload_type == .generic_poison_type) { + try writer.writeAll("anytype"); + } else { + try print(Type.fromInterned(error_union_type.payload_type), writer, mod); + } + return; + }, + .inferred_error_set_type => |func_index| { + try writer.writeAll("@typeInfo(@typeInfo(@TypeOf("); + const owner_decl = mod.funcOwnerDeclPtr(func_index); + try owner_decl.renderFullyQualifiedName(mod, writer); + try writer.writeAll(")).Fn.return_type.?).ErrorUnion.error_set"); + }, + .error_set_type => |error_set_type| { + const names = error_set_type.names; + try writer.writeAll("error{"); + for (names.get(ip), 0..) |name, i| { + if (i != 0) try writer.writeByte(','); + try writer.print("{}", .{name.fmt(ip)}); + } + try writer.writeAll("}"); + }, + .simple_type => |s| switch (s) { + .f16, + .f32, + .f64, + .f80, + .f128, + .usize, + .isize, + .c_char, + .c_short, + .c_ushort, + .c_int, + .c_uint, + .c_long, + .c_ulong, + .c_longlong, + .c_ulonglong, + .c_longdouble, + .anyopaque, + .bool, + .void, + .type, + .anyerror, + .comptime_int, + .comptime_float, + .noreturn, + .adhoc_inferred_error_set, + => return writer.writeAll(@tagName(s)), + + .null, + .undefined, + => try writer.print("@TypeOf({s})", .{@tagName(s)}), + + .enum_literal => try writer.print("@TypeOf(.{s})", .{@tagName(s)}), + .atomic_order => try writer.writeAll("std.builtin.AtomicOrder"), + .atomic_rmw_op => try writer.writeAll("std.builtin.AtomicRmwOp"), + .calling_convention => try writer.writeAll("std.builtin.CallingConvention"), + .address_space => try writer.writeAll("std.builtin.AddressSpace"), + .float_mode => try writer.writeAll("std.builtin.FloatMode"), + .reduce_op => try writer.writeAll("std.builtin.ReduceOp"), + .call_modifier => try writer.writeAll("std.builtin.CallModifier"), + .prefetch_options => try writer.writeAll("std.builtin.PrefetchOptions"), + .export_options => try writer.writeAll("std.builtin.ExportOptions"), + .extern_options => try writer.writeAll("std.builtin.ExternOptions"), + .type_info => try writer.writeAll("std.builtin.Type"), + + .generic_poison => unreachable, + }, + .struct_type => { + const struct_type = ip.loadStructType(ty.toIntern()); + if (struct_type.decl.unwrap()) |decl_index| { + const decl = mod.declPtr(decl_index); + try decl.renderFullyQualifiedName(mod, writer); + } else if (ip.loadStructType(ty.toIntern()).namespace.unwrap()) |namespace_index| { + const namespace = mod.namespacePtr(namespace_index); + try namespace.renderFullyQualifiedName(mod, .empty, writer); + } else { + try writer.writeAll("@TypeOf(.{})"); + } + }, + .anon_struct_type => |anon_struct| { + if (anon_struct.types.len == 0) { + return writer.writeAll("@TypeOf(.{})"); + } + try writer.writeAll("struct{"); + for (anon_struct.types.get(ip), anon_struct.values.get(ip), 0..) |field_ty, val, i| { + if (i != 0) try writer.writeAll(", "); + if (val != .none) { + try writer.writeAll("comptime "); + } + if (anon_struct.names.len != 0) { + try writer.print("{}: ", .{anon_struct.names.get(ip)[i].fmt(&mod.intern_pool)}); + } + + try print(Type.fromInterned(field_ty), writer, mod); + + if (val != .none) { + try writer.print(" = {}", .{Value.fromInterned(val).fmtValue(mod, null)}); + } + } + try writer.writeAll("}"); + }, + + .union_type => { + const decl = mod.declPtr(ip.loadUnionType(ty.toIntern()).decl); + try decl.renderFullyQualifiedName(mod, writer); + }, + .opaque_type => { + const decl = mod.declPtr(ip.loadOpaqueType(ty.toIntern()).decl); + try decl.renderFullyQualifiedName(mod, writer); + }, + .enum_type => { + const decl = mod.declPtr(ip.loadEnumType(ty.toIntern()).decl); + try decl.renderFullyQualifiedName(mod, writer); + }, + .func_type => |fn_info| { + if (fn_info.is_noinline) { + try writer.writeAll("noinline "); + } + try writer.writeAll("fn ("); + const param_types = fn_info.param_types.get(&mod.intern_pool); + for (param_types, 0..) |param_ty, i| { + if (i != 0) try writer.writeAll(", "); + if (std.math.cast(u5, i)) |index| { + if (fn_info.paramIsComptime(index)) { + try writer.writeAll("comptime "); + } + if (fn_info.paramIsNoalias(index)) { + try writer.writeAll("noalias "); + } + } + if (param_ty == .generic_poison_type) { + try writer.writeAll("anytype"); + } else { + try print(Type.fromInterned(param_ty), writer, mod); + } + } + if (fn_info.is_var_args) { + if (param_types.len != 0) { + try writer.writeAll(", "); + } + try writer.writeAll("..."); + } + try writer.writeAll(") "); + if (fn_info.cc != .Unspecified) { + try writer.writeAll("callconv(."); + try writer.writeAll(@tagName(fn_info.cc)); + try writer.writeAll(") "); + } + if (fn_info.return_type == .generic_poison_type) { + try writer.writeAll("anytype"); + } else { + try print(Type.fromInterned(fn_info.return_type), writer, mod); + } + }, + .anyframe_type => |child| { + if (child == .none) return writer.writeAll("anyframe"); + try writer.writeAll("anyframe->"); + return print(Type.fromInterned(child), writer, mod); + }, + + // values, not types + .undef, + .simple_value, + .variable, + .extern_func, + .func, + .int, + .err, + .error_union, + .enum_literal, + .enum_tag, + .empty_enum_value, + .float, + .ptr, + .slice, + .opt, + .aggregate, + .un, + // memoization, not types + .memoized_call, + => unreachable, + } +} + +pub fn fromInterned(i: InternPool.Index) Type { + assert(i != .none); + return .{ .ip_index = i }; +} + +pub fn toIntern(ty: Type) InternPool.Index { + assert(ty.ip_index != .none); + return ty.ip_index; +} + +pub fn toValue(self: Type) Value { + return Value.fromInterned(self.toIntern()); +} + +const RuntimeBitsError = Module.CompileError || error{NeedLazy}; + +/// true if and only if the type takes up space in memory at runtime. +/// There are two reasons a type will return false: +/// * the type is a comptime-only type. For example, the type `type` itself. +/// - note, however, that a struct can have mixed fields and only the non-comptime-only +/// fields will count towards the ABI size. For example, `struct {T: type, x: i32}` +/// hasRuntimeBits()=true and abiSize()=4 +/// * the type has only one possible value, making its ABI size 0. +/// - an enum with an explicit tag type has the ABI size of the integer tag type, +/// making it one-possible-value only if the integer tag type has 0 bits. +/// When `ignore_comptime_only` is true, then types that are comptime-only +/// may return false positives. +pub fn hasRuntimeBitsAdvanced( + ty: Type, + mod: *Module, + ignore_comptime_only: bool, + strat: AbiAlignmentAdvancedStrat, +) RuntimeBitsError!bool { + const ip = &mod.intern_pool; + return switch (ty.toIntern()) { + // False because it is a comptime-only type. + .empty_struct_type => false, + else => switch (ip.indexToKey(ty.toIntern())) { + .int_type => |int_type| int_type.bits != 0, + .ptr_type => { + // Pointers to zero-bit types still have a runtime address; however, pointers + // to comptime-only types do not, with the exception of function pointers. + if (ignore_comptime_only) return true; + return switch (strat) { + .sema => |sema| !(try sema.typeRequiresComptime(ty)), + .eager => !comptimeOnly(ty, mod), + .lazy => error.NeedLazy, + }; + }, + .anyframe_type => true, + .array_type => |array_type| return array_type.lenIncludingSentinel() > 0 and + try Type.fromInterned(array_type.child).hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat), + .vector_type => |vector_type| return vector_type.len > 0 and + try Type.fromInterned(vector_type.child).hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat), + .opt_type => |child| { + const child_ty = Type.fromInterned(child); + if (child_ty.isNoReturn(mod)) { + // Then the optional is comptime-known to be null. + return false; + } + if (ignore_comptime_only) return true; + return switch (strat) { + .sema => |sema| !(try sema.typeRequiresComptime(child_ty)), + .eager => !comptimeOnly(child_ty, mod), + .lazy => error.NeedLazy, + }; + }, + .error_union_type, + .error_set_type, + .inferred_error_set_type, + => true, + + // These are function *bodies*, not pointers. + // They return false here because they are comptime-only types. + // Special exceptions have to be made when emitting functions due to + // this returning false. + .func_type => false, + + .simple_type => |t| switch (t) { + .f16, + .f32, + .f64, + .f80, + .f128, + .usize, + .isize, + .c_char, + .c_short, + .c_ushort, + .c_int, + .c_uint, + .c_long, + .c_ulong, + .c_longlong, + .c_ulonglong, + .c_longdouble, + .bool, + .anyerror, + .adhoc_inferred_error_set, + .anyopaque, + .atomic_order, + .atomic_rmw_op, + .calling_convention, + .address_space, + .float_mode, + .reduce_op, + .call_modifier, + .prefetch_options, + .export_options, + .extern_options, + => true, + + // These are false because they are comptime-only types. + .void, + .type, + .comptime_int, + .comptime_float, + .noreturn, + .null, + .undefined, + .enum_literal, + .type_info, + => false, + + .generic_poison => unreachable, + }, + .struct_type => { + const struct_type = ip.loadStructType(ty.toIntern()); + if (struct_type.assumeRuntimeBitsIfFieldTypesWip(ip)) { + // In this case, we guess that hasRuntimeBits() for this type is true, + // and then later if our guess was incorrect, we emit a compile error. + return true; + } + switch (strat) { + .sema => |sema| _ = try sema.resolveTypeFields(ty), + .eager => assert(struct_type.haveFieldTypes(ip)), + .lazy => if (!struct_type.haveFieldTypes(ip)) return error.NeedLazy, + } + for (0..struct_type.field_types.len) |i| { + if (struct_type.comptime_bits.getBit(ip, i)) continue; + const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]); + if (try field_ty.hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat)) + return true; + } else { + return false; + } + }, + .anon_struct_type => |tuple| { + for (tuple.types.get(ip), tuple.values.get(ip)) |field_ty, val| { + if (val != .none) continue; // comptime field + if (try Type.fromInterned(field_ty).hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat)) return true; + } + return false; + }, + + .union_type => { + const union_type = ip.loadUnionType(ty.toIntern()); + switch (union_type.flagsPtr(ip).runtime_tag) { + .none => { + if (union_type.flagsPtr(ip).status == .field_types_wip) { + // In this case, we guess that hasRuntimeBits() for this type is true, + // and then later if our guess was incorrect, we emit a compile error. + union_type.flagsPtr(ip).assumed_runtime_bits = true; + return true; + } + }, + .safety, .tagged => { + const tag_ty = union_type.tagTypePtr(ip).*; + // tag_ty will be `none` if this union's tag type is not resolved yet, + // in which case we want control flow to continue down below. + if (tag_ty != .none and + try Type.fromInterned(tag_ty).hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat)) + { + return true; + } + }, + } + switch (strat) { + .sema => |sema| _ = try sema.resolveTypeFields(ty), + .eager => assert(union_type.flagsPtr(ip).status.haveFieldTypes()), + .lazy => if (!union_type.flagsPtr(ip).status.haveFieldTypes()) + return error.NeedLazy, + } + for (0..union_type.field_types.len) |field_index| { + const field_ty = Type.fromInterned(union_type.field_types.get(ip)[field_index]); + if (try field_ty.hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat)) + return true; + } else { + return false; + } + }, + + .opaque_type => true, + .enum_type => Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty).hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat), + + // values, not types + .undef, + .simple_value, + .variable, + .extern_func, + .func, + .int, + .err, + .error_union, + .enum_literal, + .enum_tag, + .empty_enum_value, + .float, + .ptr, + .slice, + .opt, + .aggregate, + .un, + // memoization, not types + .memoized_call, + => unreachable, + }, + }; +} + +/// true if and only if the type has a well-defined memory layout +/// readFrom/writeToMemory are supported only for types with a well- +/// defined memory layout +pub fn hasWellDefinedLayout(ty: Type, mod: *Module) bool { + const ip = &mod.intern_pool; + return switch (ip.indexToKey(ty.toIntern())) { + .int_type, + .vector_type, + => true, + + .error_union_type, + .error_set_type, + .inferred_error_set_type, + .anon_struct_type, + .opaque_type, + .anyframe_type, + // These are function bodies, not function pointers. + .func_type, + => false, + + .array_type => |array_type| Type.fromInterned(array_type.child).hasWellDefinedLayout(mod), + .opt_type => ty.isPtrLikeOptional(mod), + .ptr_type => |ptr_type| ptr_type.flags.size != .Slice, + + .simple_type => |t| switch (t) { + .f16, + .f32, + .f64, + .f80, + .f128, + .usize, + .isize, + .c_char, + .c_short, + .c_ushort, + .c_int, + .c_uint, + .c_long, + .c_ulong, + .c_longlong, + .c_ulonglong, + .c_longdouble, + .bool, + .void, + => true, + + .anyerror, + .adhoc_inferred_error_set, + .anyopaque, + .atomic_order, + .atomic_rmw_op, + .calling_convention, + .address_space, + .float_mode, + .reduce_op, + .call_modifier, + .prefetch_options, + .export_options, + .extern_options, + .type, + .comptime_int, + .comptime_float, + .noreturn, + .null, + .undefined, + .enum_literal, + .type_info, + .generic_poison, + => false, + }, + .struct_type => { + const struct_type = ip.loadStructType(ty.toIntern()); + // Struct with no fields have a well-defined layout of no bits. + return struct_type.layout != .auto or struct_type.field_types.len == 0; + }, + .union_type => { + const union_type = ip.loadUnionType(ty.toIntern()); + return switch (union_type.flagsPtr(ip).runtime_tag) { + .none, .safety => union_type.flagsPtr(ip).layout != .auto, + .tagged => false, + }; + }, + .enum_type => switch (ip.loadEnumType(ty.toIntern()).tag_mode) { + .auto => false, + .explicit, .nonexhaustive => true, + }, + + // values, not types + .undef, + .simple_value, + .variable, + .extern_func, + .func, + .int, + .err, + .error_union, + .enum_literal, + .enum_tag, + .empty_enum_value, + .float, + .ptr, + .slice, + .opt, + .aggregate, + .un, + // memoization, not types + .memoized_call, + => unreachable, + }; +} + +pub fn hasRuntimeBits(ty: Type, mod: *Module) bool { + return hasRuntimeBitsAdvanced(ty, mod, false, .eager) catch unreachable; +} + +pub fn hasRuntimeBitsIgnoreComptime(ty: Type, mod: *Module) bool { + return hasRuntimeBitsAdvanced(ty, mod, true, .eager) catch unreachable; +} + +pub fn fnHasRuntimeBits(ty: Type, mod: *Module) bool { + return ty.fnHasRuntimeBitsAdvanced(mod, null) catch unreachable; +} + +/// Determines whether a function type has runtime bits, i.e. whether a +/// function with this type can exist at runtime. +/// Asserts that `ty` is a function type. +/// If `opt_sema` is not provided, asserts that the return type is sufficiently resolved. +pub fn fnHasRuntimeBitsAdvanced(ty: Type, mod: *Module, opt_sema: ?*Sema) Module.CompileError!bool { + const fn_info = mod.typeToFunc(ty).?; + if (fn_info.is_generic) return false; + if (fn_info.is_var_args) return true; + if (fn_info.cc == .Inline) return false; + return !try Type.fromInterned(fn_info.return_type).comptimeOnlyAdvanced(mod, opt_sema); +} + +pub fn isFnOrHasRuntimeBits(ty: Type, mod: *Module) bool { + switch (ty.zigTypeTag(mod)) { + .Fn => return ty.fnHasRuntimeBits(mod), + else => return ty.hasRuntimeBits(mod), + } +} + +/// Same as `isFnOrHasRuntimeBits` but comptime-only types may return a false positive. +pub fn isFnOrHasRuntimeBitsIgnoreComptime(ty: Type, mod: *Module) bool { + return switch (ty.zigTypeTag(mod)) { + .Fn => true, + else => return ty.hasRuntimeBitsIgnoreComptime(mod), + }; +} + +pub fn isNoReturn(ty: Type, mod: *Module) bool { + return mod.intern_pool.isNoReturn(ty.toIntern()); +} + +/// Returns `none` if the pointer is naturally aligned and the element type is 0-bit. +pub fn ptrAlignment(ty: Type, mod: *Module) Alignment { + return ptrAlignmentAdvanced(ty, mod, null) catch unreachable; +} + +pub fn ptrAlignmentAdvanced(ty: Type, mod: *Module, opt_sema: ?*Sema) !Alignment { + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .ptr_type => |ptr_type| { + if (ptr_type.flags.alignment != .none) + return ptr_type.flags.alignment; + + if (opt_sema) |sema| { + const res = try Type.fromInterned(ptr_type.child).abiAlignmentAdvanced(mod, .{ .sema = sema }); + return res.scalar; + } + + return (Type.fromInterned(ptr_type.child).abiAlignmentAdvanced(mod, .eager) catch unreachable).scalar; + }, + .opt_type => |child| Type.fromInterned(child).ptrAlignmentAdvanced(mod, opt_sema), + else => unreachable, + }; +} + +pub fn ptrAddressSpace(ty: Type, mod: *const Module) std.builtin.AddressSpace { + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .ptr_type => |ptr_type| ptr_type.flags.address_space, + .opt_type => |child| mod.intern_pool.indexToKey(child).ptr_type.flags.address_space, + else => unreachable, + }; +} + +/// Never returns `none`. Asserts that all necessary type resolution is already done. +pub fn abiAlignment(ty: Type, mod: *Module) Alignment { + return (ty.abiAlignmentAdvanced(mod, .eager) catch unreachable).scalar; +} + +/// May capture a reference to `ty`. +/// Returned value has type `comptime_int`. +pub fn lazyAbiAlignment(ty: Type, mod: *Module) !Value { + switch (try ty.abiAlignmentAdvanced(mod, .lazy)) { + .val => |val| return val, + .scalar => |x| return mod.intValue(Type.comptime_int, x.toByteUnits() orelse 0), + } +} + +pub const AbiAlignmentAdvanced = union(enum) { + scalar: Alignment, + val: Value, +}; + +pub const AbiAlignmentAdvancedStrat = union(enum) { + eager, + lazy, + sema: *Sema, +}; + +/// If you pass `eager` you will get back `scalar` and assert the type is resolved. +/// In this case there will be no error, guaranteed. +/// If you pass `lazy` you may get back `scalar` or `val`. +/// If `val` is returned, a reference to `ty` has been captured. +/// If you pass `sema` you will get back `scalar` and resolve the type if +/// necessary, possibly returning a CompileError. +pub fn abiAlignmentAdvanced( + ty: Type, + mod: *Module, + strat: AbiAlignmentAdvancedStrat, +) Module.CompileError!AbiAlignmentAdvanced { + const target = mod.getTarget(); + const use_llvm = mod.comp.config.use_llvm; + const ip = &mod.intern_pool; + + const opt_sema = switch (strat) { + .sema => |sema| sema, + else => null, + }; + + switch (ty.toIntern()) { + .empty_struct_type => return AbiAlignmentAdvanced{ .scalar = .@"1" }, + else => switch (ip.indexToKey(ty.toIntern())) { + .int_type => |int_type| { + if (int_type.bits == 0) return AbiAlignmentAdvanced{ .scalar = .@"1" }; + return .{ .scalar = intAbiAlignment(int_type.bits, target, use_llvm) }; + }, + .ptr_type, .anyframe_type => { + return .{ .scalar = ptrAbiAlignment(target) }; + }, + .array_type => |array_type| { + return Type.fromInterned(array_type.child).abiAlignmentAdvanced(mod, strat); + }, + .vector_type => |vector_type| { + if (vector_type.len == 0) return .{ .scalar = .@"1" }; + switch (mod.comp.getZigBackend()) { + else => { + const elem_bits: u32 = @intCast(try Type.fromInterned(vector_type.child).bitSizeAdvanced(mod, opt_sema)); + if (elem_bits == 0) return .{ .scalar = .@"1" }; + const bytes = ((elem_bits * vector_type.len) + 7) / 8; + const alignment = std.math.ceilPowerOfTwoAssert(u32, bytes); + return .{ .scalar = Alignment.fromByteUnits(alignment) }; + }, + .stage2_c => { + return Type.fromInterned(vector_type.child).abiAlignmentAdvanced(mod, strat); + }, + .stage2_x86_64 => { + if (vector_type.child == .bool_type) { + if (vector_type.len > 256 and std.Target.x86.featureSetHas(target.cpu.features, .avx512f)) return .{ .scalar = .@"64" }; + if (vector_type.len > 128 and std.Target.x86.featureSetHas(target.cpu.features, .avx2)) return .{ .scalar = .@"32" }; + if (vector_type.len > 64) return .{ .scalar = .@"16" }; + const bytes = std.math.divCeil(u32, vector_type.len, 8) catch unreachable; + const alignment = std.math.ceilPowerOfTwoAssert(u32, bytes); + return .{ .scalar = Alignment.fromByteUnits(alignment) }; + } + const elem_bytes: u32 = @intCast((try Type.fromInterned(vector_type.child).abiSizeAdvanced(mod, strat)).scalar); + if (elem_bytes == 0) return .{ .scalar = .@"1" }; + const bytes = elem_bytes * vector_type.len; + if (bytes > 32 and std.Target.x86.featureSetHas(target.cpu.features, .avx512f)) return .{ .scalar = .@"64" }; + if (bytes > 16 and std.Target.x86.featureSetHas(target.cpu.features, .avx)) return .{ .scalar = .@"32" }; + return .{ .scalar = .@"16" }; + }, + } + }, + + .opt_type => return abiAlignmentAdvancedOptional(ty, mod, strat), + .error_union_type => |info| return abiAlignmentAdvancedErrorUnion(ty, mod, strat, Type.fromInterned(info.payload_type)), + + .error_set_type, .inferred_error_set_type => { + const bits = mod.errorSetBits(); + if (bits == 0) return AbiAlignmentAdvanced{ .scalar = .@"1" }; + return .{ .scalar = intAbiAlignment(bits, target, use_llvm) }; + }, + + // represents machine code; not a pointer + .func_type => return .{ .scalar = target_util.defaultFunctionAlignment(target) }, + + .simple_type => |t| switch (t) { + .bool, + .atomic_order, + .atomic_rmw_op, + .calling_convention, + .address_space, + .float_mode, + .reduce_op, + .call_modifier, + .prefetch_options, + .anyopaque, + => return .{ .scalar = .@"1" }, + + .usize, + .isize, + => return .{ .scalar = intAbiAlignment(target.ptrBitWidth(), target, use_llvm) }, + + .export_options, + .extern_options, + .type_info, + => return .{ .scalar = ptrAbiAlignment(target) }, + + .c_char => return .{ .scalar = cTypeAlign(target, .char) }, + .c_short => return .{ .scalar = cTypeAlign(target, .short) }, + .c_ushort => return .{ .scalar = cTypeAlign(target, .ushort) }, + .c_int => return .{ .scalar = cTypeAlign(target, .int) }, + .c_uint => return .{ .scalar = cTypeAlign(target, .uint) }, + .c_long => return .{ .scalar = cTypeAlign(target, .long) }, + .c_ulong => return .{ .scalar = cTypeAlign(target, .ulong) }, + .c_longlong => return .{ .scalar = cTypeAlign(target, .longlong) }, + .c_ulonglong => return .{ .scalar = cTypeAlign(target, .ulonglong) }, + .c_longdouble => return .{ .scalar = cTypeAlign(target, .longdouble) }, + + .f16 => return .{ .scalar = .@"2" }, + .f32 => return .{ .scalar = cTypeAlign(target, .float) }, + .f64 => switch (target.c_type_bit_size(.double)) { + 64 => return .{ .scalar = cTypeAlign(target, .double) }, + else => return .{ .scalar = .@"8" }, + }, + .f80 => switch (target.c_type_bit_size(.longdouble)) { + 80 => return .{ .scalar = cTypeAlign(target, .longdouble) }, + else => { + const u80_ty: Type = .{ .ip_index = .u80_type }; + return .{ .scalar = abiAlignment(u80_ty, mod) }; + }, + }, + .f128 => switch (target.c_type_bit_size(.longdouble)) { + 128 => return .{ .scalar = cTypeAlign(target, .longdouble) }, + else => return .{ .scalar = .@"16" }, + }, + + .anyerror, .adhoc_inferred_error_set => { + const bits = mod.errorSetBits(); + if (bits == 0) return AbiAlignmentAdvanced{ .scalar = .@"1" }; + return .{ .scalar = intAbiAlignment(bits, target, use_llvm) }; + }, + + .void, + .type, + .comptime_int, + .comptime_float, + .null, + .undefined, + .enum_literal, + => return .{ .scalar = .@"1" }, + + .noreturn => unreachable, + .generic_poison => unreachable, + }, + .struct_type => { + const struct_type = ip.loadStructType(ty.toIntern()); + if (struct_type.layout == .@"packed") { + switch (strat) { + .sema => |sema| try sema.resolveTypeLayout(ty), + .lazy => if (struct_type.backingIntType(ip).* == .none) return .{ + .val = Value.fromInterned((try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_align = ty.toIntern() }, + } }))), + }, + .eager => {}, + } + return .{ .scalar = Type.fromInterned(struct_type.backingIntType(ip).*).abiAlignment(mod) }; + } + + const flags = struct_type.flagsPtr(ip).*; + if (flags.alignment != .none) return .{ .scalar = flags.alignment }; + + return switch (strat) { + .eager => unreachable, // struct alignment not resolved + .sema => |sema| .{ + .scalar = try sema.resolveStructAlignment(ty.toIntern(), struct_type), + }, + .lazy => .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_align = ty.toIntern() }, + } }))) }, + }; + }, + .anon_struct_type => |tuple| { + var big_align: Alignment = .@"1"; + for (tuple.types.get(ip), tuple.values.get(ip)) |field_ty, val| { + if (val != .none) continue; // comptime field + switch (try Type.fromInterned(field_ty).abiAlignmentAdvanced(mod, strat)) { + .scalar => |field_align| big_align = big_align.max(field_align), + .val => switch (strat) { + .eager => unreachable, // field type alignment not resolved + .sema => unreachable, // passed to abiAlignmentAdvanced above + .lazy => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_align = ty.toIntern() }, + } }))) }, + }, + } + } + return .{ .scalar = big_align }; + }, + .union_type => { + const union_type = ip.loadUnionType(ty.toIntern()); + const flags = union_type.flagsPtr(ip).*; + if (flags.alignment != .none) return .{ .scalar = flags.alignment }; + + if (!union_type.haveLayout(ip)) switch (strat) { + .eager => unreachable, // union layout not resolved + .sema => |sema| return .{ .scalar = try sema.resolveUnionAlignment(ty, union_type) }, + .lazy => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_align = ty.toIntern() }, + } }))) }, + }; + + return .{ .scalar = union_type.flagsPtr(ip).alignment }; + }, + .opaque_type => return .{ .scalar = .@"1" }, + .enum_type => return .{ + .scalar = Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty).abiAlignment(mod), + }, + + // values, not types + .undef, + .simple_value, + .variable, + .extern_func, + .func, + .int, + .err, + .error_union, + .enum_literal, + .enum_tag, + .empty_enum_value, + .float, + .ptr, + .slice, + .opt, + .aggregate, + .un, + // memoization, not types + .memoized_call, + => unreachable, + }, + } +} + +fn abiAlignmentAdvancedErrorUnion( + ty: Type, + mod: *Module, + strat: AbiAlignmentAdvancedStrat, + payload_ty: Type, +) Module.CompileError!AbiAlignmentAdvanced { + // This code needs to be kept in sync with the equivalent switch prong + // in abiSizeAdvanced. + const code_align = abiAlignment(Type.anyerror, mod); + switch (strat) { + .eager, .sema => { + if (!(payload_ty.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) { + error.NeedLazy => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_align = ty.toIntern() }, + } }))) }, + else => |e| return e, + })) { + return .{ .scalar = code_align }; + } + return .{ .scalar = code_align.max( + (try payload_ty.abiAlignmentAdvanced(mod, strat)).scalar, + ) }; + }, + .lazy => { + switch (try payload_ty.abiAlignmentAdvanced(mod, strat)) { + .scalar => |payload_align| return .{ .scalar = code_align.max(payload_align) }, + .val => {}, + } + return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_align = ty.toIntern() }, + } }))) }; + }, + } +} + +fn abiAlignmentAdvancedOptional( + ty: Type, + mod: *Module, + strat: AbiAlignmentAdvancedStrat, +) Module.CompileError!AbiAlignmentAdvanced { + const target = mod.getTarget(); + const child_type = ty.optionalChild(mod); + + switch (child_type.zigTypeTag(mod)) { + .Pointer => return .{ .scalar = ptrAbiAlignment(target) }, + .ErrorSet => return abiAlignmentAdvanced(Type.anyerror, mod, strat), + .NoReturn => return .{ .scalar = .@"1" }, + else => {}, + } + + switch (strat) { + .eager, .sema => { + if (!(child_type.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) { + error.NeedLazy => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_align = ty.toIntern() }, + } }))) }, + else => |e| return e, + })) { + return .{ .scalar = .@"1" }; + } + return child_type.abiAlignmentAdvanced(mod, strat); + }, + .lazy => switch (try child_type.abiAlignmentAdvanced(mod, strat)) { + .scalar => |x| return .{ .scalar = x.max(.@"1") }, + .val => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_align = ty.toIntern() }, + } }))) }, + }, + } +} + +/// May capture a reference to `ty`. +pub fn lazyAbiSize(ty: Type, mod: *Module) !Value { + switch (try ty.abiSizeAdvanced(mod, .lazy)) { + .val => |val| return val, + .scalar => |x| return mod.intValue(Type.comptime_int, x), + } +} + +/// Asserts the type has the ABI size already resolved. +/// Types that return false for hasRuntimeBits() return 0. +pub fn abiSize(ty: Type, mod: *Module) u64 { + return (abiSizeAdvanced(ty, mod, .eager) catch unreachable).scalar; +} + +const AbiSizeAdvanced = union(enum) { + scalar: u64, + val: Value, +}; + +/// If you pass `eager` you will get back `scalar` and assert the type is resolved. +/// In this case there will be no error, guaranteed. +/// If you pass `lazy` you may get back `scalar` or `val`. +/// If `val` is returned, a reference to `ty` has been captured. +/// If you pass `sema` you will get back `scalar` and resolve the type if +/// necessary, possibly returning a CompileError. +pub fn abiSizeAdvanced( + ty: Type, + mod: *Module, + strat: AbiAlignmentAdvancedStrat, +) Module.CompileError!AbiSizeAdvanced { + const target = mod.getTarget(); + const use_llvm = mod.comp.config.use_llvm; + const ip = &mod.intern_pool; + + switch (ty.toIntern()) { + .empty_struct_type => return AbiSizeAdvanced{ .scalar = 0 }, + + else => switch (ip.indexToKey(ty.toIntern())) { + .int_type => |int_type| { + if (int_type.bits == 0) return AbiSizeAdvanced{ .scalar = 0 }; + return AbiSizeAdvanced{ .scalar = intAbiSize(int_type.bits, target, use_llvm) }; + }, + .ptr_type => |ptr_type| switch (ptr_type.flags.size) { + .Slice => return .{ .scalar = @divExact(target.ptrBitWidth(), 8) * 2 }, + else => return .{ .scalar = @divExact(target.ptrBitWidth(), 8) }, + }, + .anyframe_type => return AbiSizeAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) }, + + .array_type => |array_type| { + const len = array_type.lenIncludingSentinel(); + if (len == 0) return .{ .scalar = 0 }; + switch (try Type.fromInterned(array_type.child).abiSizeAdvanced(mod, strat)) { + .scalar => |elem_size| return .{ .scalar = len * elem_size }, + .val => switch (strat) { + .sema, .eager => unreachable, + .lazy => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_size = ty.toIntern() }, + } }))) }, + }, + } + }, + .vector_type => |vector_type| { + const opt_sema = switch (strat) { + .sema => |sema| sema, + .eager => null, + .lazy => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_size = ty.toIntern() }, + } }))) }, + }; + const alignment = switch (try ty.abiAlignmentAdvanced(mod, strat)) { + .scalar => |x| x, + .val => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_size = ty.toIntern() }, + } }))) }, + }; + const total_bytes = switch (mod.comp.getZigBackend()) { + else => total_bytes: { + const elem_bits = try Type.fromInterned(vector_type.child).bitSizeAdvanced(mod, opt_sema); + const total_bits = elem_bits * vector_type.len; + break :total_bytes (total_bits + 7) / 8; + }, + .stage2_c => total_bytes: { + const elem_bytes: u32 = @intCast((try Type.fromInterned(vector_type.child).abiSizeAdvanced(mod, strat)).scalar); + break :total_bytes elem_bytes * vector_type.len; + }, + .stage2_x86_64 => total_bytes: { + if (vector_type.child == .bool_type) break :total_bytes std.math.divCeil(u32, vector_type.len, 8) catch unreachable; + const elem_bytes: u32 = @intCast((try Type.fromInterned(vector_type.child).abiSizeAdvanced(mod, strat)).scalar); + break :total_bytes elem_bytes * vector_type.len; + }, + }; + return AbiSizeAdvanced{ .scalar = alignment.forward(total_bytes) }; + }, + + .opt_type => return ty.abiSizeAdvancedOptional(mod, strat), + + .error_set_type, .inferred_error_set_type => { + const bits = mod.errorSetBits(); + if (bits == 0) return AbiSizeAdvanced{ .scalar = 0 }; + return AbiSizeAdvanced{ .scalar = intAbiSize(bits, target, use_llvm) }; + }, + + .error_union_type => |error_union_type| { + const payload_ty = Type.fromInterned(error_union_type.payload_type); + // This code needs to be kept in sync with the equivalent switch prong + // in abiAlignmentAdvanced. + const code_size = abiSize(Type.anyerror, mod); + if (!(payload_ty.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) { + error.NeedLazy => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_size = ty.toIntern() }, + } }))) }, + else => |e| return e, + })) { + // Same as anyerror. + return AbiSizeAdvanced{ .scalar = code_size }; + } + const code_align = abiAlignment(Type.anyerror, mod); + const payload_align = abiAlignment(payload_ty, mod); + const payload_size = switch (try payload_ty.abiSizeAdvanced(mod, strat)) { + .scalar => |elem_size| elem_size, + .val => switch (strat) { + .sema => unreachable, + .eager => unreachable, + .lazy => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_size = ty.toIntern() }, + } }))) }, + }, + }; + + var size: u64 = 0; + if (code_align.compare(.gt, payload_align)) { + size += code_size; + size = payload_align.forward(size); + size += payload_size; + size = code_align.forward(size); + } else { + size += payload_size; + size = code_align.forward(size); + size += code_size; + size = payload_align.forward(size); + } + return AbiSizeAdvanced{ .scalar = size }; + }, + .func_type => unreachable, // represents machine code; not a pointer + .simple_type => |t| switch (t) { + .bool, + .atomic_order, + .atomic_rmw_op, + .calling_convention, + .address_space, + .float_mode, + .reduce_op, + .call_modifier, + => return AbiSizeAdvanced{ .scalar = 1 }, + + .f16 => return AbiSizeAdvanced{ .scalar = 2 }, + .f32 => return AbiSizeAdvanced{ .scalar = 4 }, + .f64 => return AbiSizeAdvanced{ .scalar = 8 }, + .f128 => return AbiSizeAdvanced{ .scalar = 16 }, + .f80 => switch (target.c_type_bit_size(.longdouble)) { + 80 => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.longdouble) }, + else => { + const u80_ty: Type = .{ .ip_index = .u80_type }; + return AbiSizeAdvanced{ .scalar = abiSize(u80_ty, mod) }; + }, + }, + + .usize, + .isize, + => return AbiSizeAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) }, + + .c_char => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.char) }, + .c_short => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.short) }, + .c_ushort => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.ushort) }, + .c_int => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.int) }, + .c_uint => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.uint) }, + .c_long => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.long) }, + .c_ulong => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.ulong) }, + .c_longlong => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.longlong) }, + .c_ulonglong => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.ulonglong) }, + .c_longdouble => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.longdouble) }, + + .anyopaque, + .void, + .type, + .comptime_int, + .comptime_float, + .null, + .undefined, + .enum_literal, + => return AbiSizeAdvanced{ .scalar = 0 }, + + .anyerror, .adhoc_inferred_error_set => { + const bits = mod.errorSetBits(); + if (bits == 0) return AbiSizeAdvanced{ .scalar = 0 }; + return AbiSizeAdvanced{ .scalar = intAbiSize(bits, target, use_llvm) }; + }, + + .prefetch_options => unreachable, // missing call to resolveTypeFields + .export_options => unreachable, // missing call to resolveTypeFields + .extern_options => unreachable, // missing call to resolveTypeFields + + .type_info => unreachable, + .noreturn => unreachable, + .generic_poison => unreachable, + }, + .struct_type => { + const struct_type = ip.loadStructType(ty.toIntern()); + switch (strat) { + .sema => |sema| try sema.resolveTypeLayout(ty), + .lazy => switch (struct_type.layout) { + .@"packed" => { + if (struct_type.backingIntType(ip).* == .none) return .{ + .val = Value.fromInterned((try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_size = ty.toIntern() }, + } }))), + }; + }, + .auto, .@"extern" => { + if (!struct_type.haveLayout(ip)) return .{ + .val = Value.fromInterned((try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_size = ty.toIntern() }, + } }))), + }; + }, + }, + .eager => {}, + } + switch (struct_type.layout) { + .@"packed" => return .{ + .scalar = Type.fromInterned(struct_type.backingIntType(ip).*).abiSize(mod), + }, + .auto, .@"extern" => { + assert(struct_type.haveLayout(ip)); + return .{ .scalar = struct_type.size(ip).* }; + }, + } + }, + .anon_struct_type => |tuple| { + switch (strat) { + .sema => |sema| try sema.resolveTypeLayout(ty), + .lazy, .eager => {}, + } + const field_count = tuple.types.len; + if (field_count == 0) { + return AbiSizeAdvanced{ .scalar = 0 }; + } + return AbiSizeAdvanced{ .scalar = ty.structFieldOffset(field_count, mod) }; + }, + + .union_type => { + const union_type = ip.loadUnionType(ty.toIntern()); + switch (strat) { + .sema => |sema| try sema.resolveTypeLayout(ty), + .lazy => if (!union_type.flagsPtr(ip).status.haveLayout()) return .{ + .val = Value.fromInterned((try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_size = ty.toIntern() }, + } }))), + }, + .eager => {}, + } + + assert(union_type.haveLayout(ip)); + return .{ .scalar = union_type.size(ip).* }; + }, + .opaque_type => unreachable, // no size available + .enum_type => return .{ .scalar = Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty).abiSize(mod) }, + + // values, not types + .undef, + .simple_value, + .variable, + .extern_func, + .func, + .int, + .err, + .error_union, + .enum_literal, + .enum_tag, + .empty_enum_value, + .float, + .ptr, + .slice, + .opt, + .aggregate, + .un, + // memoization, not types + .memoized_call, + => unreachable, + }, + } +} + +fn abiSizeAdvancedOptional( + ty: Type, + mod: *Module, + strat: AbiAlignmentAdvancedStrat, +) Module.CompileError!AbiSizeAdvanced { + const child_ty = ty.optionalChild(mod); + + if (child_ty.isNoReturn(mod)) { + return AbiSizeAdvanced{ .scalar = 0 }; + } + + if (!(child_ty.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) { + error.NeedLazy => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_size = ty.toIntern() }, + } }))) }, + else => |e| return e, + })) return AbiSizeAdvanced{ .scalar = 1 }; + + if (ty.optionalReprIsPayload(mod)) { + return abiSizeAdvanced(child_ty, mod, strat); + } + + const payload_size = switch (try child_ty.abiSizeAdvanced(mod, strat)) { + .scalar => |elem_size| elem_size, + .val => switch (strat) { + .sema => unreachable, + .eager => unreachable, + .lazy => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_size = ty.toIntern() }, + } }))) }, + }, + }; + + // Optional types are represented as a struct with the child type as the first + // field and a boolean as the second. Since the child type's abi alignment is + // guaranteed to be >= that of bool's (1 byte) the added size is exactly equal + // to the child type's ABI alignment. + return AbiSizeAdvanced{ + .scalar = (child_ty.abiAlignment(mod).toByteUnits() orelse 0) + payload_size, + }; +} + +pub fn ptrAbiAlignment(target: Target) Alignment { + return Alignment.fromNonzeroByteUnits(@divExact(target.ptrBitWidth(), 8)); +} + +pub fn intAbiSize(bits: u16, target: Target, use_llvm: bool) u64 { + return intAbiAlignment(bits, target, use_llvm).forward(@as(u16, @intCast((@as(u17, bits) + 7) / 8))); +} + +pub fn intAbiAlignment(bits: u16, target: Target, use_llvm: bool) Alignment { + return switch (target.cpu.arch) { + .x86 => switch (bits) { + 0 => .none, + 1...8 => .@"1", + 9...16 => .@"2", + 17...64 => .@"4", + else => .@"16", + }, + .x86_64 => switch (bits) { + 0 => .none, + 1...8 => .@"1", + 9...16 => .@"2", + 17...32 => .@"4", + 33...64 => .@"8", + else => switch (target_util.zigBackend(target, use_llvm)) { + .stage2_x86_64 => .@"8", + else => .@"16", + }, + }, + else => return Alignment.fromByteUnits(@min( + std.math.ceilPowerOfTwoPromote(u16, @as(u16, @intCast((@as(u17, bits) + 7) / 8))), + maxIntAlignment(target, use_llvm), + )), + }; +} + +pub fn maxIntAlignment(target: std.Target, use_llvm: bool) u16 { + return switch (target.cpu.arch) { + .avr => 1, + .msp430 => 2, + .xcore => 4, + + .arm, + .armeb, + .thumb, + .thumbeb, + .hexagon, + .mips, + .mipsel, + .powerpc, + .powerpcle, + .r600, + .amdgcn, + .riscv32, + .sparc, + .sparcel, + .s390x, + .lanai, + .wasm32, + .wasm64, + => 8, + + // For these, LLVMABIAlignmentOfType(i128) reports 8. Note that 16 + // is a relevant number in three cases: + // 1. Different machine code instruction when loading into SIMD register. + // 2. The C ABI wants 16 for extern structs. + // 3. 16-byte cmpxchg needs 16-byte alignment. + // Same logic for powerpc64, mips64, sparc64. + .powerpc64, + .powerpc64le, + .mips64, + .mips64el, + .sparc64, + => switch (target.ofmt) { + .c => 16, + else => 8, + }, + + .x86_64 => switch (target_util.zigBackend(target, use_llvm)) { + .stage2_x86_64 => 8, + else => 16, + }, + + // Even LLVMABIAlignmentOfType(i128) agrees on these targets. + .x86, + .aarch64, + .aarch64_be, + .aarch64_32, + .riscv64, + .bpfel, + .bpfeb, + .nvptx, + .nvptx64, + => 16, + + // Below this comment are unverified but based on the fact that C requires + // int128_t to be 16 bytes aligned, it's a safe default. + .spu_2, + .csky, + .arc, + .m68k, + .tce, + .tcele, + .le32, + .amdil, + .hsail, + .spir, + .kalimba, + .renderscript32, + .spirv, + .spirv32, + .shave, + .le64, + .amdil64, + .hsail64, + .spir64, + .renderscript64, + .ve, + .spirv64, + .dxil, + .loongarch32, + .loongarch64, + .xtensa, + => 16, + }; +} + +pub fn bitSize(ty: Type, mod: *Module) u64 { + return bitSizeAdvanced(ty, mod, null) catch unreachable; +} + +/// If you pass `opt_sema`, any recursive type resolutions will happen if +/// necessary, possibly returning a CompileError. Passing `null` instead asserts +/// the type is fully resolved, and there will be no error, guaranteed. +pub fn bitSizeAdvanced( + ty: Type, + mod: *Module, + opt_sema: ?*Sema, +) Module.CompileError!u64 { + const target = mod.getTarget(); + const ip = &mod.intern_pool; + + const strat: AbiAlignmentAdvancedStrat = if (opt_sema) |sema| .{ .sema = sema } else .eager; + + switch (ip.indexToKey(ty.toIntern())) { + .int_type => |int_type| return int_type.bits, + .ptr_type => |ptr_type| switch (ptr_type.flags.size) { + .Slice => return target.ptrBitWidth() * 2, + else => return target.ptrBitWidth(), + }, + .anyframe_type => return target.ptrBitWidth(), + + .array_type => |array_type| { + const len = array_type.lenIncludingSentinel(); + if (len == 0) return 0; + const elem_ty = Type.fromInterned(array_type.child); + const elem_size = @max( + (try elem_ty.abiAlignmentAdvanced(mod, strat)).scalar.toByteUnits() orelse 0, + (try elem_ty.abiSizeAdvanced(mod, strat)).scalar, + ); + if (elem_size == 0) return 0; + const elem_bit_size = try bitSizeAdvanced(elem_ty, mod, opt_sema); + return (len - 1) * 8 * elem_size + elem_bit_size; + }, + .vector_type => |vector_type| { + const child_ty = Type.fromInterned(vector_type.child); + const elem_bit_size = try bitSizeAdvanced(child_ty, mod, opt_sema); + return elem_bit_size * vector_type.len; + }, + .opt_type => { + // Optionals and error unions are not packed so their bitsize + // includes padding bits. + return (try abiSizeAdvanced(ty, mod, strat)).scalar * 8; + }, + + .error_set_type, .inferred_error_set_type => return mod.errorSetBits(), + + .error_union_type => { + // Optionals and error unions are not packed so their bitsize + // includes padding bits. + return (try abiSizeAdvanced(ty, mod, strat)).scalar * 8; + }, + .func_type => unreachable, // represents machine code; not a pointer + .simple_type => |t| switch (t) { + .f16 => return 16, + .f32 => return 32, + .f64 => return 64, + .f80 => return 80, + .f128 => return 128, + + .usize, + .isize, + => return target.ptrBitWidth(), + + .c_char => return target.c_type_bit_size(.char), + .c_short => return target.c_type_bit_size(.short), + .c_ushort => return target.c_type_bit_size(.ushort), + .c_int => return target.c_type_bit_size(.int), + .c_uint => return target.c_type_bit_size(.uint), + .c_long => return target.c_type_bit_size(.long), + .c_ulong => return target.c_type_bit_size(.ulong), + .c_longlong => return target.c_type_bit_size(.longlong), + .c_ulonglong => return target.c_type_bit_size(.ulonglong), + .c_longdouble => return target.c_type_bit_size(.longdouble), + + .bool => return 1, + .void => return 0, + + .anyerror, + .adhoc_inferred_error_set, + => return mod.errorSetBits(), + + .anyopaque => unreachable, + .type => unreachable, + .comptime_int => unreachable, + .comptime_float => unreachable, + .noreturn => unreachable, + .null => unreachable, + .undefined => unreachable, + .enum_literal => unreachable, + .generic_poison => unreachable, + + .atomic_order => unreachable, + .atomic_rmw_op => unreachable, + .calling_convention => unreachable, + .address_space => unreachable, + .float_mode => unreachable, + .reduce_op => unreachable, + .call_modifier => unreachable, + .prefetch_options => unreachable, + .export_options => unreachable, + .extern_options => unreachable, + .type_info => unreachable, + }, + .struct_type => { + const struct_type = ip.loadStructType(ty.toIntern()); + const is_packed = struct_type.layout == .@"packed"; + if (opt_sema) |sema| { + try sema.resolveTypeFields(ty); + if (is_packed) try sema.resolveTypeLayout(ty); + } + if (is_packed) { + return try Type.fromInterned(struct_type.backingIntType(ip).*).bitSizeAdvanced(mod, opt_sema); + } + return (try ty.abiSizeAdvanced(mod, strat)).scalar * 8; + }, + + .anon_struct_type => { + if (opt_sema) |sema| try sema.resolveTypeFields(ty); + return (try ty.abiSizeAdvanced(mod, strat)).scalar * 8; + }, + + .union_type => { + const union_type = ip.loadUnionType(ty.toIntern()); + const is_packed = ty.containerLayout(mod) == .@"packed"; + if (opt_sema) |sema| { + try sema.resolveTypeFields(ty); + if (is_packed) try sema.resolveTypeLayout(ty); + } + if (!is_packed) { + return (try ty.abiSizeAdvanced(mod, strat)).scalar * 8; + } + assert(union_type.flagsPtr(ip).status.haveFieldTypes()); + + var size: u64 = 0; + for (0..union_type.field_types.len) |field_index| { + const field_ty = union_type.field_types.get(ip)[field_index]; + size = @max(size, try bitSizeAdvanced(Type.fromInterned(field_ty), mod, opt_sema)); + } + + return size; + }, + .opaque_type => unreachable, + .enum_type => return bitSizeAdvanced(Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty), mod, opt_sema), + + // values, not types + .undef, + .simple_value, + .variable, + .extern_func, + .func, + .int, + .err, + .error_union, + .enum_literal, + .enum_tag, + .empty_enum_value, + .float, + .ptr, + .slice, + .opt, + .aggregate, + .un, + // memoization, not types + .memoized_call, + => unreachable, + } +} + +/// Returns true if the type's layout is already resolved and it is safe +/// to use `abiSize`, `abiAlignment` and `bitSize` on it. +pub fn layoutIsResolved(ty: Type, mod: *Module) bool { + const ip = &mod.intern_pool; + return switch (ip.indexToKey(ty.toIntern())) { + .struct_type => ip.loadStructType(ty.toIntern()).haveLayout(ip), + .union_type => ip.loadUnionType(ty.toIntern()).haveLayout(ip), + .array_type => |array_type| { + if (array_type.lenIncludingSentinel() == 0) return true; + return Type.fromInterned(array_type.child).layoutIsResolved(mod); + }, + .opt_type => |child| Type.fromInterned(child).layoutIsResolved(mod), + .error_union_type => |k| Type.fromInterned(k.payload_type).layoutIsResolved(mod), + else => true, + }; +} + +pub fn isSinglePointer(ty: Type, mod: *const Module) bool { + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .ptr_type => |ptr_info| ptr_info.flags.size == .One, + else => false, + }; +} + +/// Asserts `ty` is a pointer. +pub fn ptrSize(ty: Type, mod: *const Module) std.builtin.Type.Pointer.Size { + return ptrSizeOrNull(ty, mod).?; +} + +/// Returns `null` if `ty` is not a pointer. +pub fn ptrSizeOrNull(ty: Type, mod: *const Module) ?std.builtin.Type.Pointer.Size { + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .ptr_type => |ptr_info| ptr_info.flags.size, + else => null, + }; +} + +pub fn isSlice(ty: Type, mod: *const Module) bool { + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .ptr_type => |ptr_type| ptr_type.flags.size == .Slice, + else => false, + }; +} + +pub fn slicePtrFieldType(ty: Type, mod: *const Module) Type { + return Type.fromInterned(mod.intern_pool.slicePtrType(ty.toIntern())); +} + +pub fn isConstPtr(ty: Type, mod: *const Module) bool { + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .ptr_type => |ptr_type| ptr_type.flags.is_const, + else => false, + }; +} + +pub fn isVolatilePtr(ty: Type, mod: *const Module) bool { + return isVolatilePtrIp(ty, &mod.intern_pool); +} + +pub fn isVolatilePtrIp(ty: Type, ip: *const InternPool) bool { + return switch (ip.indexToKey(ty.toIntern())) { + .ptr_type => |ptr_type| ptr_type.flags.is_volatile, + else => false, + }; +} + +pub fn isAllowzeroPtr(ty: Type, mod: *const Module) bool { + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .ptr_type => |ptr_type| ptr_type.flags.is_allowzero, + .opt_type => true, + else => false, + }; +} + +pub fn isCPtr(ty: Type, mod: *const Module) bool { + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .ptr_type => |ptr_type| ptr_type.flags.size == .C, + else => false, + }; +} + +pub fn isPtrAtRuntime(ty: Type, mod: *const Module) bool { + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .ptr_type => |ptr_type| switch (ptr_type.flags.size) { + .Slice => false, + .One, .Many, .C => true, + }, + .opt_type => |child| switch (mod.intern_pool.indexToKey(child)) { + .ptr_type => |p| switch (p.flags.size) { + .Slice, .C => false, + .Many, .One => !p.flags.is_allowzero, + }, + else => false, + }, + else => false, + }; +} + +/// For pointer-like optionals, returns true, otherwise returns the allowzero property +/// of pointers. +pub fn ptrAllowsZero(ty: Type, mod: *const Module) bool { + if (ty.isPtrLikeOptional(mod)) { + return true; + } + return ty.ptrInfo(mod).flags.is_allowzero; +} + +/// See also `isPtrLikeOptional`. +pub fn optionalReprIsPayload(ty: Type, mod: *const Module) bool { + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .opt_type => |child_type| child_type == .anyerror_type or switch (mod.intern_pool.indexToKey(child_type)) { + .ptr_type => |ptr_type| ptr_type.flags.size != .C and !ptr_type.flags.is_allowzero, + .error_set_type, .inferred_error_set_type => true, + else => false, + }, + .ptr_type => |ptr_type| ptr_type.flags.size == .C, + else => false, + }; +} + +/// Returns true if the type is optional and would be lowered to a single pointer +/// address value, using 0 for null. Note that this returns true for C pointers. +/// This function must be kept in sync with `Sema.typePtrOrOptionalPtrTy`. +pub fn isPtrLikeOptional(ty: Type, mod: *const Module) bool { + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .ptr_type => |ptr_type| ptr_type.flags.size == .C, + .opt_type => |child| switch (mod.intern_pool.indexToKey(child)) { + .ptr_type => |ptr_type| switch (ptr_type.flags.size) { + .Slice, .C => false, + .Many, .One => !ptr_type.flags.is_allowzero, + }, + else => false, + }, + else => false, + }; +} + +/// For *[N]T, returns [N]T. +/// For *T, returns T. +/// For [*]T, returns T. +pub fn childType(ty: Type, mod: *const Module) Type { + return childTypeIp(ty, &mod.intern_pool); +} + +pub fn childTypeIp(ty: Type, ip: *const InternPool) Type { + return Type.fromInterned(ip.childType(ty.toIntern())); +} + +/// For *[N]T, returns T. +/// For ?*T, returns T. +/// For ?*[N]T, returns T. +/// For ?[*]T, returns T. +/// For *T, returns T. +/// For [*]T, returns T. +/// For [N]T, returns T. +/// For []T, returns T. +/// For anyframe->T, returns T. +pub fn elemType2(ty: Type, mod: *const Module) Type { + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .ptr_type => |ptr_type| switch (ptr_type.flags.size) { + .One => Type.fromInterned(ptr_type.child).shallowElemType(mod), + .Many, .C, .Slice => Type.fromInterned(ptr_type.child), + }, + .anyframe_type => |child| { + assert(child != .none); + return Type.fromInterned(child); + }, + .vector_type => |vector_type| Type.fromInterned(vector_type.child), + .array_type => |array_type| Type.fromInterned(array_type.child), + .opt_type => |child| Type.fromInterned(mod.intern_pool.childType(child)), + else => unreachable, + }; +} + +fn shallowElemType(child_ty: Type, mod: *const Module) Type { + return switch (child_ty.zigTypeTag(mod)) { + .Array, .Vector => child_ty.childType(mod), + else => child_ty, + }; +} + +/// For vectors, returns the element type. Otherwise returns self. +pub fn scalarType(ty: Type, mod: *Module) Type { + return switch (ty.zigTypeTag(mod)) { + .Vector => ty.childType(mod), + else => ty, + }; +} + +/// Asserts that the type is an optional. +/// Note that for C pointers this returns the type unmodified. +pub fn optionalChild(ty: Type, mod: *const Module) Type { + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .opt_type => |child| Type.fromInterned(child), + .ptr_type => |ptr_type| b: { + assert(ptr_type.flags.size == .C); + break :b ty; + }, + else => unreachable, + }; +} + +/// Returns the tag type of a union, if the type is a union and it has a tag type. +/// Otherwise, returns `null`. +pub fn unionTagType(ty: Type, mod: *Module) ?Type { + const ip = &mod.intern_pool; + switch (ip.indexToKey(ty.toIntern())) { + .union_type => {}, + else => return null, + } + const union_type = ip.loadUnionType(ty.toIntern()); + switch (union_type.flagsPtr(ip).runtime_tag) { + .tagged => { + assert(union_type.flagsPtr(ip).status.haveFieldTypes()); + return Type.fromInterned(union_type.enum_tag_ty); + }, + else => return null, + } +} + +/// Same as `unionTagType` but includes safety tag. +/// Codegen should use this version. +pub fn unionTagTypeSafety(ty: Type, mod: *Module) ?Type { + const ip = &mod.intern_pool; + return switch (ip.indexToKey(ty.toIntern())) { + .union_type => { + const union_type = ip.loadUnionType(ty.toIntern()); + if (!union_type.hasTag(ip)) return null; + assert(union_type.haveFieldTypes(ip)); + return Type.fromInterned(union_type.enum_tag_ty); + }, + else => null, + }; +} + +/// Asserts the type is a union; returns the tag type, even if the tag will +/// not be stored at runtime. +pub fn unionTagTypeHypothetical(ty: Type, mod: *Module) Type { + const union_obj = mod.typeToUnion(ty).?; + return Type.fromInterned(union_obj.enum_tag_ty); +} + +pub fn unionFieldType(ty: Type, enum_tag: Value, mod: *Module) ?Type { + const ip = &mod.intern_pool; + const union_obj = mod.typeToUnion(ty).?; + const union_fields = union_obj.field_types.get(ip); + const index = mod.unionTagFieldIndex(union_obj, enum_tag) orelse return null; + return Type.fromInterned(union_fields[index]); +} + +pub fn unionFieldTypeByIndex(ty: Type, index: usize, mod: *Module) Type { + const ip = &mod.intern_pool; + const union_obj = mod.typeToUnion(ty).?; + return Type.fromInterned(union_obj.field_types.get(ip)[index]); +} + +pub fn unionTagFieldIndex(ty: Type, enum_tag: Value, mod: *Module) ?u32 { + const union_obj = mod.typeToUnion(ty).?; + return mod.unionTagFieldIndex(union_obj, enum_tag); +} + +pub fn unionHasAllZeroBitFieldTypes(ty: Type, mod: *Module) bool { + const ip = &mod.intern_pool; + const union_obj = mod.typeToUnion(ty).?; + for (union_obj.field_types.get(ip)) |field_ty| { + if (Type.fromInterned(field_ty).hasRuntimeBits(mod)) return false; + } + return true; +} + +/// Returns the type used for backing storage of this union during comptime operations. +/// Asserts the type is either an extern or packed union. +pub fn unionBackingType(ty: Type, mod: *Module) !Type { + return switch (ty.containerLayout(mod)) { + .@"extern" => try mod.arrayType(.{ .len = ty.abiSize(mod), .child = .u8_type }), + .@"packed" => try mod.intType(.unsigned, @intCast(ty.bitSize(mod))), + .auto => unreachable, + }; +} + +pub fn unionGetLayout(ty: Type, mod: *Module) Module.UnionLayout { + const ip = &mod.intern_pool; + const union_obj = ip.loadUnionType(ty.toIntern()); + return mod.getUnionLayout(union_obj); +} + +pub fn containerLayout(ty: Type, mod: *Module) std.builtin.Type.ContainerLayout { + const ip = &mod.intern_pool; + return switch (ip.indexToKey(ty.toIntern())) { + .struct_type => ip.loadStructType(ty.toIntern()).layout, + .anon_struct_type => .auto, + .union_type => ip.loadUnionType(ty.toIntern()).flagsPtr(ip).layout, + else => unreachable, + }; +} + +/// Asserts that the type is an error union. +pub fn errorUnionPayload(ty: Type, mod: *Module) Type { + return Type.fromInterned(mod.intern_pool.indexToKey(ty.toIntern()).error_union_type.payload_type); +} + +/// Asserts that the type is an error union. +pub fn errorUnionSet(ty: Type, mod: *Module) Type { + return Type.fromInterned(mod.intern_pool.errorUnionSet(ty.toIntern())); +} + +/// Returns false for unresolved inferred error sets. +pub fn errorSetIsEmpty(ty: Type, mod: *Module) bool { + const ip = &mod.intern_pool; + return switch (ty.toIntern()) { + .anyerror_type, .adhoc_inferred_error_set_type => false, + else => switch (ip.indexToKey(ty.toIntern())) { + .error_set_type => |error_set_type| error_set_type.names.len == 0, + .inferred_error_set_type => |i| switch (ip.funcIesResolved(i).*) { + .none, .anyerror_type => false, + else => |t| ip.indexToKey(t).error_set_type.names.len == 0, + }, + else => unreachable, + }, + }; +} + +/// Returns true if it is an error set that includes anyerror, false otherwise. +/// Note that the result may be a false negative if the type did not get error set +/// resolution prior to this call. +pub fn isAnyError(ty: Type, mod: *Module) bool { + const ip = &mod.intern_pool; + return switch (ty.toIntern()) { + .anyerror_type => true, + .adhoc_inferred_error_set_type => false, + else => switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .inferred_error_set_type => |i| ip.funcIesResolved(i).* == .anyerror_type, + else => false, + }, + }; +} + +pub fn isError(ty: Type, mod: *const Module) bool { + return switch (ty.zigTypeTag(mod)) { + .ErrorUnion, .ErrorSet => true, + else => false, + }; +} + +/// Returns whether ty, which must be an error set, includes an error `name`. +/// Might return a false negative if `ty` is an inferred error set and not fully +/// resolved yet. +pub fn errorSetHasFieldIp( + ip: *const InternPool, + ty: InternPool.Index, + name: InternPool.NullTerminatedString, +) bool { + return switch (ty) { + .anyerror_type => true, + else => switch (ip.indexToKey(ty)) { + .error_set_type => |error_set_type| error_set_type.nameIndex(ip, name) != null, + .inferred_error_set_type => |i| switch (ip.funcIesResolved(i).*) { + .anyerror_type => true, + .none => false, + else => |t| ip.indexToKey(t).error_set_type.nameIndex(ip, name) != null, + }, + else => unreachable, + }, + }; +} + +/// Returns whether ty, which must be an error set, includes an error `name`. +/// Might return a false negative if `ty` is an inferred error set and not fully +/// resolved yet. +pub fn errorSetHasField(ty: Type, name: []const u8, mod: *Module) bool { + const ip = &mod.intern_pool; + return switch (ty.toIntern()) { + .anyerror_type => true, + else => switch (ip.indexToKey(ty.toIntern())) { + .error_set_type => |error_set_type| { + // If the string is not interned, then the field certainly is not present. + const field_name_interned = ip.getString(name).unwrap() orelse return false; + return error_set_type.nameIndex(ip, field_name_interned) != null; + }, + .inferred_error_set_type => |i| switch (ip.funcIesResolved(i).*) { + .anyerror_type => true, + .none => false, + else => |t| { + // If the string is not interned, then the field certainly is not present. + const field_name_interned = ip.getString(name).unwrap() orelse return false; + return ip.indexToKey(t).error_set_type.nameIndex(ip, field_name_interned) != null; + }, + }, + else => unreachable, + }, + }; +} + +/// Asserts the type is an array or vector or struct. +pub fn arrayLen(ty: Type, mod: *const Module) u64 { + return ty.arrayLenIp(&mod.intern_pool); +} + +pub fn arrayLenIp(ty: Type, ip: *const InternPool) u64 { + return ip.aggregateTypeLen(ty.toIntern()); +} + +pub fn arrayLenIncludingSentinel(ty: Type, mod: *const Module) u64 { + return mod.intern_pool.aggregateTypeLenIncludingSentinel(ty.toIntern()); +} + +pub fn vectorLen(ty: Type, mod: *const Module) u32 { + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .vector_type => |vector_type| vector_type.len, + .anon_struct_type => |tuple| @intCast(tuple.types.len), + else => unreachable, + }; +} + +/// Asserts the type is an array, pointer or vector. +pub fn sentinel(ty: Type, mod: *const Module) ?Value { + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .vector_type, + .struct_type, + .anon_struct_type, + => null, + + .array_type => |t| if (t.sentinel != .none) Value.fromInterned(t.sentinel) else null, + .ptr_type => |t| if (t.sentinel != .none) Value.fromInterned(t.sentinel) else null, + + else => unreachable, + }; +} + +/// Returns true if and only if the type is a fixed-width integer. +pub fn isInt(self: Type, mod: *const Module) bool { + return self.toIntern() != .comptime_int_type and + mod.intern_pool.isIntegerType(self.toIntern()); +} + +/// Returns true if and only if the type is a fixed-width, signed integer. +pub fn isSignedInt(ty: Type, mod: *const Module) bool { + return switch (ty.toIntern()) { + .c_char_type => mod.getTarget().charSignedness() == .signed, + .isize_type, .c_short_type, .c_int_type, .c_long_type, .c_longlong_type => true, + else => switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .int_type => |int_type| int_type.signedness == .signed, + else => false, + }, + }; +} + +/// Returns true if and only if the type is a fixed-width, unsigned integer. +pub fn isUnsignedInt(ty: Type, mod: *const Module) bool { + return switch (ty.toIntern()) { + .c_char_type => mod.getTarget().charSignedness() == .unsigned, + .usize_type, .c_ushort_type, .c_uint_type, .c_ulong_type, .c_ulonglong_type => true, + else => switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .int_type => |int_type| int_type.signedness == .unsigned, + else => false, + }, + }; +} + +/// Returns true for integers, enums, error sets, and packed structs. +/// If this function returns true, then intInfo() can be called on the type. +pub fn isAbiInt(ty: Type, mod: *Module) bool { + return switch (ty.zigTypeTag(mod)) { + .Int, .Enum, .ErrorSet => true, + .Struct => ty.containerLayout(mod) == .@"packed", + else => false, + }; +} + +/// Asserts the type is an integer, enum, error set, or vector of one of them. +pub fn intInfo(starting_ty: Type, mod: *Module) InternPool.Key.IntType { + const ip = &mod.intern_pool; + const target = mod.getTarget(); + var ty = starting_ty; + + while (true) switch (ty.toIntern()) { + .anyerror_type, .adhoc_inferred_error_set_type => { + return .{ .signedness = .unsigned, .bits = mod.errorSetBits() }; + }, + .usize_type => return .{ .signedness = .unsigned, .bits = target.ptrBitWidth() }, + .isize_type => return .{ .signedness = .signed, .bits = target.ptrBitWidth() }, + .c_char_type => return .{ .signedness = mod.getTarget().charSignedness(), .bits = target.c_type_bit_size(.char) }, + .c_short_type => return .{ .signedness = .signed, .bits = target.c_type_bit_size(.short) }, + .c_ushort_type => return .{ .signedness = .unsigned, .bits = target.c_type_bit_size(.ushort) }, + .c_int_type => return .{ .signedness = .signed, .bits = target.c_type_bit_size(.int) }, + .c_uint_type => return .{ .signedness = .unsigned, .bits = target.c_type_bit_size(.uint) }, + .c_long_type => return .{ .signedness = .signed, .bits = target.c_type_bit_size(.long) }, + .c_ulong_type => return .{ .signedness = .unsigned, .bits = target.c_type_bit_size(.ulong) }, + .c_longlong_type => return .{ .signedness = .signed, .bits = target.c_type_bit_size(.longlong) }, + .c_ulonglong_type => return .{ .signedness = .unsigned, .bits = target.c_type_bit_size(.ulonglong) }, + else => switch (ip.indexToKey(ty.toIntern())) { + .int_type => |int_type| return int_type, + .struct_type => ty = Type.fromInterned(ip.loadStructType(ty.toIntern()).backingIntType(ip).*), + .enum_type => ty = Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty), + .vector_type => |vector_type| ty = Type.fromInterned(vector_type.child), + + .error_set_type, .inferred_error_set_type => { + return .{ .signedness = .unsigned, .bits = mod.errorSetBits() }; + }, + + .anon_struct_type => unreachable, + + .ptr_type => unreachable, + .anyframe_type => unreachable, + .array_type => unreachable, + + .opt_type => unreachable, + .error_union_type => unreachable, + .func_type => unreachable, + .simple_type => unreachable, // handled via Index enum tag above + + .union_type => unreachable, + .opaque_type => unreachable, + + // values, not types + .undef, + .simple_value, + .variable, + .extern_func, + .func, + .int, + .err, + .error_union, + .enum_literal, + .enum_tag, + .empty_enum_value, + .float, + .ptr, + .slice, + .opt, + .aggregate, + .un, + // memoization, not types + .memoized_call, + => unreachable, + }, + }; +} + +pub fn isNamedInt(ty: Type) bool { + return switch (ty.toIntern()) { + .usize_type, + .isize_type, + .c_char_type, + .c_short_type, + .c_ushort_type, + .c_int_type, + .c_uint_type, + .c_long_type, + .c_ulong_type, + .c_longlong_type, + .c_ulonglong_type, + => true, + + else => false, + }; +} + +/// Returns `false` for `comptime_float`. +pub fn isRuntimeFloat(ty: Type) bool { + return switch (ty.toIntern()) { + .f16_type, + .f32_type, + .f64_type, + .f80_type, + .f128_type, + .c_longdouble_type, + => true, + + else => false, + }; +} + +/// Returns `true` for `comptime_float`. +pub fn isAnyFloat(ty: Type) bool { + return switch (ty.toIntern()) { + .f16_type, + .f32_type, + .f64_type, + .f80_type, + .f128_type, + .c_longdouble_type, + .comptime_float_type, + => true, + + else => false, + }; +} + +/// Asserts the type is a fixed-size float or comptime_float. +/// Returns 128 for comptime_float types. +pub fn floatBits(ty: Type, target: Target) u16 { + return switch (ty.toIntern()) { + .f16_type => 16, + .f32_type => 32, + .f64_type => 64, + .f80_type => 80, + .f128_type, .comptime_float_type => 128, + .c_longdouble_type => target.c_type_bit_size(.longdouble), + + else => unreachable, + }; +} + +/// Asserts the type is a function or a function pointer. +pub fn fnReturnType(ty: Type, mod: *Module) Type { + return Type.fromInterned(mod.intern_pool.funcTypeReturnType(ty.toIntern())); +} + +/// Asserts the type is a function. +pub fn fnCallingConvention(ty: Type, mod: *Module) std.builtin.CallingConvention { + return mod.intern_pool.indexToKey(ty.toIntern()).func_type.cc; +} + +pub fn isValidParamType(self: Type, mod: *const Module) bool { + return switch (self.zigTypeTagOrPoison(mod) catch return true) { + .Opaque, .NoReturn => false, + else => true, + }; +} + +pub fn isValidReturnType(self: Type, mod: *const Module) bool { + return switch (self.zigTypeTagOrPoison(mod) catch return true) { + .Opaque => false, + else => true, + }; +} + +/// Asserts the type is a function. +pub fn fnIsVarArgs(ty: Type, mod: *Module) bool { + return mod.intern_pool.indexToKey(ty.toIntern()).func_type.is_var_args; +} + +pub fn isNumeric(ty: Type, mod: *const Module) bool { + return switch (ty.toIntern()) { + .f16_type, + .f32_type, + .f64_type, + .f80_type, + .f128_type, + .c_longdouble_type, + .comptime_int_type, + .comptime_float_type, + .usize_type, + .isize_type, + .c_char_type, + .c_short_type, + .c_ushort_type, + .c_int_type, + .c_uint_type, + .c_long_type, + .c_ulong_type, + .c_longlong_type, + .c_ulonglong_type, + => true, + + else => switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .int_type => true, + else => false, + }, + }; +} + +/// During semantic analysis, instead call `Sema.typeHasOnePossibleValue` which +/// resolves field types rather than asserting they are already resolved. +pub fn onePossibleValue(starting_type: Type, mod: *Module) !?Value { + var ty = starting_type; + const ip = &mod.intern_pool; + while (true) switch (ty.toIntern()) { + .empty_struct_type => return Value.empty_struct, + + else => switch (ip.indexToKey(ty.toIntern())) { + .int_type => |int_type| { + if (int_type.bits == 0) { + return try mod.intValue(ty, 0); + } else { + return null; + } + }, + + .ptr_type, + .error_union_type, + .func_type, + .anyframe_type, + .error_set_type, + .inferred_error_set_type, + => return null, + + inline .array_type, .vector_type => |seq_type, seq_tag| { + const has_sentinel = seq_tag == .array_type and seq_type.sentinel != .none; + if (seq_type.len + @intFromBool(has_sentinel) == 0) return Value.fromInterned((try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = &.{} }, + } }))); + if (try Type.fromInterned(seq_type.child).onePossibleValue(mod)) |opv| { + return Value.fromInterned((try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .repeated_elem = opv.toIntern() }, + } }))); + } + return null; + }, + .opt_type => |child| { + if (child == .noreturn_type) { + return try mod.nullValue(ty); + } else { + return null; + } + }, + + .simple_type => |t| switch (t) { + .f16, + .f32, + .f64, + .f80, + .f128, + .usize, + .isize, + .c_char, + .c_short, + .c_ushort, + .c_int, + .c_uint, + .c_long, + .c_ulong, + .c_longlong, + .c_ulonglong, + .c_longdouble, + .anyopaque, + .bool, + .type, + .anyerror, + .comptime_int, + .comptime_float, + .enum_literal, + .atomic_order, + .atomic_rmw_op, + .calling_convention, + .address_space, + .float_mode, + .reduce_op, + .call_modifier, + .prefetch_options, + .export_options, + .extern_options, + .type_info, + .adhoc_inferred_error_set, + => return null, + + .void => return Value.void, + .noreturn => return Value.@"unreachable", + .null => return Value.null, + .undefined => return Value.undef, + + .generic_poison => unreachable, + }, + .struct_type => { + const struct_type = ip.loadStructType(ty.toIntern()); + assert(struct_type.haveFieldTypes(ip)); + if (struct_type.knownNonOpv(ip)) + return null; + const field_vals = try mod.gpa.alloc(InternPool.Index, struct_type.field_types.len); + defer mod.gpa.free(field_vals); + for (field_vals, 0..) |*field_val, i_usize| { + const i: u32 = @intCast(i_usize); + if (struct_type.fieldIsComptime(ip, i)) { + assert(struct_type.haveFieldInits(ip)); + field_val.* = struct_type.field_inits.get(ip)[i]; + continue; + } + const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]); + if (try field_ty.onePossibleValue(mod)) |field_opv| { + field_val.* = field_opv.toIntern(); + } else return null; + } + + // In this case the struct has no runtime-known fields and + // therefore has one possible value. + return Value.fromInterned((try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = field_vals }, + } }))); + }, + + .anon_struct_type => |tuple| { + for (tuple.values.get(ip)) |val| { + if (val == .none) return null; + } + // In this case the struct has all comptime-known fields and + // therefore has one possible value. + // TODO: write something like getCoercedInts to avoid needing to dupe + const duped_values = try mod.gpa.dupe(InternPool.Index, tuple.values.get(ip)); + defer mod.gpa.free(duped_values); + return Value.fromInterned((try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = duped_values }, + } }))); + }, + + .union_type => { + const union_obj = ip.loadUnionType(ty.toIntern()); + const tag_val = (try Type.fromInterned(union_obj.enum_tag_ty).onePossibleValue(mod)) orelse + return null; + if (union_obj.field_types.len == 0) { + const only = try mod.intern(.{ .empty_enum_value = ty.toIntern() }); + return Value.fromInterned(only); + } + const only_field_ty = union_obj.field_types.get(ip)[0]; + const val_val = (try Type.fromInterned(only_field_ty).onePossibleValue(mod)) orelse + return null; + const only = try mod.intern(.{ .un = .{ + .ty = ty.toIntern(), + .tag = tag_val.toIntern(), + .val = val_val.toIntern(), + } }); + return Value.fromInterned(only); + }, + .opaque_type => return null, + .enum_type => { + const enum_type = ip.loadEnumType(ty.toIntern()); + switch (enum_type.tag_mode) { + .nonexhaustive => { + if (enum_type.tag_ty == .comptime_int_type) return null; + + if (try Type.fromInterned(enum_type.tag_ty).onePossibleValue(mod)) |int_opv| { + const only = try mod.intern(.{ .enum_tag = .{ + .ty = ty.toIntern(), + .int = int_opv.toIntern(), + } }); + return Value.fromInterned(only); + } + + return null; + }, + .auto, .explicit => { + if (Type.fromInterned(enum_type.tag_ty).hasRuntimeBits(mod)) return null; + + switch (enum_type.names.len) { + 0 => { + const only = try mod.intern(.{ .empty_enum_value = ty.toIntern() }); + return Value.fromInterned(only); + }, + 1 => { + if (enum_type.values.len == 0) { + const only = try mod.intern(.{ .enum_tag = .{ + .ty = ty.toIntern(), + .int = try mod.intern(.{ .int = .{ + .ty = enum_type.tag_ty, + .storage = .{ .u64 = 0 }, + } }), + } }); + return Value.fromInterned(only); + } else { + return Value.fromInterned(enum_type.values.get(ip)[0]); + } + }, + else => return null, + } + }, + } + }, + + // values, not types + .undef, + .simple_value, + .variable, + .extern_func, + .func, + .int, + .err, + .error_union, + .enum_literal, + .enum_tag, + .empty_enum_value, + .float, + .ptr, + .slice, + .opt, + .aggregate, + .un, + // memoization, not types + .memoized_call, + => unreachable, + }, + }; +} + +/// During semantic analysis, instead call `Sema.typeRequiresComptime` which +/// resolves field types rather than asserting they are already resolved. +pub fn comptimeOnly(ty: Type, mod: *Module) bool { + return ty.comptimeOnlyAdvanced(mod, null) catch unreachable; +} + +/// `generic_poison` will return false. +/// May return false negatives when structs and unions are having their field types resolved. +/// If `opt_sema` is not provided, asserts that the type is sufficiently resolved. +pub fn comptimeOnlyAdvanced(ty: Type, mod: *Module, opt_sema: ?*Sema) Module.CompileError!bool { + const ip = &mod.intern_pool; + return switch (ty.toIntern()) { + .empty_struct_type => false, + + else => switch (ip.indexToKey(ty.toIntern())) { + .int_type => false, + .ptr_type => |ptr_type| { + const child_ty = Type.fromInterned(ptr_type.child); + switch (child_ty.zigTypeTag(mod)) { + .Fn => return !try child_ty.fnHasRuntimeBitsAdvanced(mod, opt_sema), + .Opaque => return false, + else => return child_ty.comptimeOnlyAdvanced(mod, opt_sema), + } + }, + .anyframe_type => |child| { + if (child == .none) return false; + return Type.fromInterned(child).comptimeOnlyAdvanced(mod, opt_sema); + }, + .array_type => |array_type| return Type.fromInterned(array_type.child).comptimeOnlyAdvanced(mod, opt_sema), + .vector_type => |vector_type| return Type.fromInterned(vector_type.child).comptimeOnlyAdvanced(mod, opt_sema), + .opt_type => |child| return Type.fromInterned(child).comptimeOnlyAdvanced(mod, opt_sema), + .error_union_type => |error_union_type| return Type.fromInterned(error_union_type.payload_type).comptimeOnlyAdvanced(mod, opt_sema), + + .error_set_type, + .inferred_error_set_type, + => false, + + // These are function bodies, not function pointers. + .func_type => true, + + .simple_type => |t| switch (t) { + .f16, + .f32, + .f64, + .f80, + .f128, + .usize, + .isize, + .c_char, + .c_short, + .c_ushort, + .c_int, + .c_uint, + .c_long, + .c_ulong, + .c_longlong, + .c_ulonglong, + .c_longdouble, + .anyopaque, + .bool, + .void, + .anyerror, + .adhoc_inferred_error_set, + .noreturn, + .generic_poison, + .atomic_order, + .atomic_rmw_op, + .calling_convention, + .address_space, + .float_mode, + .reduce_op, + .call_modifier, + .prefetch_options, + .export_options, + .extern_options, + => false, + + .type, + .comptime_int, + .comptime_float, + .null, + .undefined, + .enum_literal, + .type_info, + => true, + }, + .struct_type => { + const struct_type = ip.loadStructType(ty.toIntern()); + // packed structs cannot be comptime-only because they have a well-defined + // memory layout and every field has a well-defined bit pattern. + if (struct_type.layout == .@"packed") + return false; + + // A struct with no fields is not comptime-only. + return switch (struct_type.flagsPtr(ip).requires_comptime) { + .no, .wip => false, + .yes => true, + .unknown => { + // The type is not resolved; assert that we have a Sema. + const sema = opt_sema.?; + + if (struct_type.flagsPtr(ip).field_types_wip) + return false; + + struct_type.flagsPtr(ip).requires_comptime = .wip; + errdefer struct_type.flagsPtr(ip).requires_comptime = .unknown; + + try sema.resolveTypeFieldsStruct(ty.toIntern(), struct_type); + + for (0..struct_type.field_types.len) |i_usize| { + const i: u32 = @intCast(i_usize); + if (struct_type.fieldIsComptime(ip, i)) continue; + const field_ty = struct_type.field_types.get(ip)[i]; + if (try Type.fromInterned(field_ty).comptimeOnlyAdvanced(mod, opt_sema)) { + // Note that this does not cause the layout to + // be considered resolved. Comptime-only types + // still maintain a layout of their + // runtime-known fields. + struct_type.flagsPtr(ip).requires_comptime = .yes; + return true; + } + } + + struct_type.flagsPtr(ip).requires_comptime = .no; + return false; + }, + }; + }, + + .anon_struct_type => |tuple| { + for (tuple.types.get(ip), tuple.values.get(ip)) |field_ty, val| { + const have_comptime_val = val != .none; + if (!have_comptime_val and try Type.fromInterned(field_ty).comptimeOnlyAdvanced(mod, opt_sema)) return true; + } + return false; + }, + + .union_type => { + const union_type = ip.loadUnionType(ty.toIntern()); + switch (union_type.flagsPtr(ip).requires_comptime) { + .no, .wip => return false, + .yes => return true, + .unknown => { + // The type is not resolved; assert that we have a Sema. + const sema = opt_sema.?; + + if (union_type.flagsPtr(ip).status == .field_types_wip) + return false; + + union_type.flagsPtr(ip).requires_comptime = .wip; + errdefer union_type.flagsPtr(ip).requires_comptime = .unknown; + + try sema.resolveTypeFieldsUnion(ty, union_type); + + for (0..union_type.field_types.len) |field_idx| { + const field_ty = union_type.field_types.get(ip)[field_idx]; + if (try Type.fromInterned(field_ty).comptimeOnlyAdvanced(mod, opt_sema)) { + union_type.flagsPtr(ip).requires_comptime = .yes; + return true; + } + } + + union_type.flagsPtr(ip).requires_comptime = .no; + return false; + }, + } + }, + + .opaque_type => false, + + .enum_type => return Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty).comptimeOnlyAdvanced(mod, opt_sema), + + // values, not types + .undef, + .simple_value, + .variable, + .extern_func, + .func, + .int, + .err, + .error_union, + .enum_literal, + .enum_tag, + .empty_enum_value, + .float, + .ptr, + .slice, + .opt, + .aggregate, + .un, + // memoization, not types + .memoized_call, + => unreachable, + }, + }; +} + +pub fn isVector(ty: Type, mod: *const Module) bool { + return ty.zigTypeTag(mod) == .Vector; +} + +/// Returns 0 if not a vector, otherwise returns @bitSizeOf(Element) * vector_len. +pub fn totalVectorBits(ty: Type, zcu: *Zcu) u64 { + if (!ty.isVector(zcu)) return 0; + const v = zcu.intern_pool.indexToKey(ty.toIntern()).vector_type; + return v.len * Type.fromInterned(v.child).bitSize(zcu); +} + +pub fn isArrayOrVector(ty: Type, mod: *const Module) bool { + return switch (ty.zigTypeTag(mod)) { + .Array, .Vector => true, + else => false, + }; +} + +pub fn isIndexable(ty: Type, mod: *Module) bool { + return switch (ty.zigTypeTag(mod)) { + .Array, .Vector => true, + .Pointer => switch (ty.ptrSize(mod)) { + .Slice, .Many, .C => true, + .One => switch (ty.childType(mod).zigTypeTag(mod)) { + .Array, .Vector => true, + .Struct => ty.childType(mod).isTuple(mod), + else => false, + }, + }, + .Struct => ty.isTuple(mod), + else => false, + }; +} + +pub fn indexableHasLen(ty: Type, mod: *Module) bool { + return switch (ty.zigTypeTag(mod)) { + .Array, .Vector => true, + .Pointer => switch (ty.ptrSize(mod)) { + .Many, .C => false, + .Slice => true, + .One => switch (ty.childType(mod).zigTypeTag(mod)) { + .Array, .Vector => true, + .Struct => ty.childType(mod).isTuple(mod), + else => false, + }, + }, + .Struct => ty.isTuple(mod), + else => false, + }; +} + +/// Asserts that the type can have a namespace. +pub fn getNamespaceIndex(ty: Type, zcu: *Zcu) InternPool.OptionalNamespaceIndex { + return ty.getNamespace(zcu).?; +} + +/// Returns null if the type has no namespace. +pub fn getNamespace(ty: Type, zcu: *Zcu) ?InternPool.OptionalNamespaceIndex { + const ip = &zcu.intern_pool; + return switch (ip.indexToKey(ty.toIntern())) { + .opaque_type => ip.loadOpaqueType(ty.toIntern()).namespace, + .struct_type => ip.loadStructType(ty.toIntern()).namespace, + .union_type => ip.loadUnionType(ty.toIntern()).namespace, + .enum_type => ip.loadEnumType(ty.toIntern()).namespace, + + .anon_struct_type => .none, + .simple_type => |s| switch (s) { + .anyopaque, + .atomic_order, + .atomic_rmw_op, + .calling_convention, + .address_space, + .float_mode, + .reduce_op, + .call_modifier, + .prefetch_options, + .export_options, + .extern_options, + .type_info, + => .none, + else => null, + }, + + else => null, + }; +} + +// Works for vectors and vectors of integers. +pub fn minInt(ty: Type, mod: *Module, dest_ty: Type) !Value { + const scalar = try minIntScalar(ty.scalarType(mod), mod, dest_ty.scalarType(mod)); + return if (ty.zigTypeTag(mod) == .Vector) Value.fromInterned((try mod.intern(.{ .aggregate = .{ + .ty = dest_ty.toIntern(), + .storage = .{ .repeated_elem = scalar.toIntern() }, + } }))) else scalar; +} + +/// Asserts that the type is an integer. +pub fn minIntScalar(ty: Type, mod: *Module, dest_ty: Type) !Value { + const info = ty.intInfo(mod); + if (info.signedness == .unsigned) return mod.intValue(dest_ty, 0); + if (info.bits == 0) return mod.intValue(dest_ty, -1); + + if (std.math.cast(u6, info.bits - 1)) |shift| { + const n = @as(i64, std.math.minInt(i64)) >> (63 - shift); + return mod.intValue(dest_ty, n); + } + + var res = try std.math.big.int.Managed.init(mod.gpa); + defer res.deinit(); + + try res.setTwosCompIntLimit(.min, info.signedness, info.bits); + + return mod.intValue_big(dest_ty, res.toConst()); +} + +// Works for vectors and vectors of integers. +/// The returned Value will have type dest_ty. +pub fn maxInt(ty: Type, mod: *Module, dest_ty: Type) !Value { + const scalar = try maxIntScalar(ty.scalarType(mod), mod, dest_ty.scalarType(mod)); + return if (ty.zigTypeTag(mod) == .Vector) Value.fromInterned((try mod.intern(.{ .aggregate = .{ + .ty = dest_ty.toIntern(), + .storage = .{ .repeated_elem = scalar.toIntern() }, + } }))) else scalar; +} + +/// The returned Value will have type dest_ty. +pub fn maxIntScalar(ty: Type, mod: *Module, dest_ty: Type) !Value { + const info = ty.intInfo(mod); + + switch (info.bits) { + 0 => return switch (info.signedness) { + .signed => try mod.intValue(dest_ty, -1), + .unsigned => try mod.intValue(dest_ty, 0), + }, + 1 => return switch (info.signedness) { + .signed => try mod.intValue(dest_ty, 0), + .unsigned => try mod.intValue(dest_ty, 1), + }, + else => {}, + } + + if (std.math.cast(u6, info.bits - 1)) |shift| switch (info.signedness) { + .signed => { + const n = @as(i64, std.math.maxInt(i64)) >> (63 - shift); + return mod.intValue(dest_ty, n); + }, + .unsigned => { + const n = @as(u64, std.math.maxInt(u64)) >> (63 - shift); + return mod.intValue(dest_ty, n); + }, + }; + + var res = try std.math.big.int.Managed.init(mod.gpa); + defer res.deinit(); + + try res.setTwosCompIntLimit(.max, info.signedness, info.bits); + + return mod.intValue_big(dest_ty, res.toConst()); +} + +/// Asserts the type is an enum or a union. +pub fn intTagType(ty: Type, mod: *Module) Type { + const ip = &mod.intern_pool; + return switch (ip.indexToKey(ty.toIntern())) { + .union_type => Type.fromInterned(ip.loadUnionType(ty.toIntern()).enum_tag_ty).intTagType(mod), + .enum_type => Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty), + else => unreachable, + }; +} + +pub fn isNonexhaustiveEnum(ty: Type, mod: *Module) bool { + const ip = &mod.intern_pool; + return switch (ip.indexToKey(ty.toIntern())) { + .enum_type => switch (ip.loadEnumType(ty.toIntern()).tag_mode) { + .nonexhaustive => true, + .auto, .explicit => false, + }, + else => false, + }; +} + +// Asserts that `ty` is an error set and not `anyerror`. +// Asserts that `ty` is resolved if it is an inferred error set. +pub fn errorSetNames(ty: Type, mod: *Module) InternPool.NullTerminatedString.Slice { + const ip = &mod.intern_pool; + return switch (ip.indexToKey(ty.toIntern())) { + .error_set_type => |x| x.names, + .inferred_error_set_type => |i| switch (ip.funcIesResolved(i).*) { + .none => unreachable, // unresolved inferred error set + .anyerror_type => unreachable, + else => |t| ip.indexToKey(t).error_set_type.names, + }, + else => unreachable, + }; +} + +pub fn enumFields(ty: Type, mod: *Module) InternPool.NullTerminatedString.Slice { + return mod.intern_pool.loadEnumType(ty.toIntern()).names; +} + +pub fn enumFieldCount(ty: Type, mod: *Module) usize { + return mod.intern_pool.loadEnumType(ty.toIntern()).names.len; +} + +pub fn enumFieldName(ty: Type, field_index: usize, mod: *Module) InternPool.NullTerminatedString { + const ip = &mod.intern_pool; + return ip.loadEnumType(ty.toIntern()).names.get(ip)[field_index]; +} + +pub fn enumFieldIndex(ty: Type, field_name: InternPool.NullTerminatedString, mod: *Module) ?u32 { + const ip = &mod.intern_pool; + const enum_type = ip.loadEnumType(ty.toIntern()); + return enum_type.nameIndex(ip, field_name); +} + +/// Asserts `ty` is an enum. `enum_tag` can either be `enum_field_index` or +/// an integer which represents the enum value. Returns the field index in +/// declaration order, or `null` if `enum_tag` does not match any field. +pub fn enumTagFieldIndex(ty: Type, enum_tag: Value, mod: *Module) ?u32 { + const ip = &mod.intern_pool; + const enum_type = ip.loadEnumType(ty.toIntern()); + const int_tag = switch (ip.indexToKey(enum_tag.toIntern())) { + .int => enum_tag.toIntern(), + .enum_tag => |info| info.int, + else => unreachable, + }; + assert(ip.typeOf(int_tag) == enum_type.tag_ty); + return enum_type.tagValueIndex(ip, int_tag); +} + +/// Returns none in the case of a tuple which uses the integer index as the field name. +pub fn structFieldName(ty: Type, index: usize, mod: *Module) InternPool.OptionalNullTerminatedString { + const ip = &mod.intern_pool; + return switch (ip.indexToKey(ty.toIntern())) { + .struct_type => ip.loadStructType(ty.toIntern()).fieldName(ip, index), + .anon_struct_type => |anon_struct| anon_struct.fieldName(ip, index), + else => unreachable, + }; +} + +pub fn structFieldCount(ty: Type, mod: *Module) u32 { + const ip = &mod.intern_pool; + return switch (ip.indexToKey(ty.toIntern())) { + .struct_type => ip.loadStructType(ty.toIntern()).field_types.len, + .anon_struct_type => |anon_struct| anon_struct.types.len, + else => unreachable, + }; +} + +/// Supports structs and unions. +pub fn structFieldType(ty: Type, index: usize, mod: *Module) Type { + const ip = &mod.intern_pool; + return switch (ip.indexToKey(ty.toIntern())) { + .struct_type => Type.fromInterned(ip.loadStructType(ty.toIntern()).field_types.get(ip)[index]), + .union_type => { + const union_obj = ip.loadUnionType(ty.toIntern()); + return Type.fromInterned(union_obj.field_types.get(ip)[index]); + }, + .anon_struct_type => |anon_struct| Type.fromInterned(anon_struct.types.get(ip)[index]), + else => unreachable, + }; +} + +pub fn structFieldAlign(ty: Type, index: usize, zcu: *Zcu) Alignment { + return ty.structFieldAlignAdvanced(index, zcu, null) catch unreachable; +} + +pub fn structFieldAlignAdvanced(ty: Type, index: usize, zcu: *Zcu, opt_sema: ?*Sema) !Alignment { + const ip = &zcu.intern_pool; + switch (ip.indexToKey(ty.toIntern())) { + .struct_type => { + const struct_type = ip.loadStructType(ty.toIntern()); + assert(struct_type.layout != .@"packed"); + const explicit_align = struct_type.fieldAlign(ip, index); + const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[index]); + if (opt_sema) |sema| { + return sema.structFieldAlignment(explicit_align, field_ty, struct_type.layout); + } else { + return zcu.structFieldAlignment(explicit_align, field_ty, struct_type.layout); + } + }, + .anon_struct_type => |anon_struct| { + return (try Type.fromInterned(anon_struct.types.get(ip)[index]).abiAlignmentAdvanced(zcu, if (opt_sema) |sema| .{ .sema = sema } else .eager)).scalar; + }, + .union_type => { + const union_obj = ip.loadUnionType(ty.toIntern()); + if (opt_sema) |sema| { + return sema.unionFieldAlignment(union_obj, @intCast(index)); + } else { + return zcu.unionFieldNormalAlignment(union_obj, @intCast(index)); + } + }, + else => unreachable, + } +} + +pub fn structFieldDefaultValue(ty: Type, index: usize, mod: *Module) Value { + const ip = &mod.intern_pool; + switch (ip.indexToKey(ty.toIntern())) { + .struct_type => { + const struct_type = ip.loadStructType(ty.toIntern()); + const val = struct_type.fieldInit(ip, index); + // TODO: avoid using `unreachable` to indicate this. + if (val == .none) return Value.@"unreachable"; + return Value.fromInterned(val); + }, + .anon_struct_type => |anon_struct| { + const val = anon_struct.values.get(ip)[index]; + // TODO: avoid using `unreachable` to indicate this. + if (val == .none) return Value.@"unreachable"; + return Value.fromInterned(val); + }, + else => unreachable, + } +} + +pub fn structFieldValueComptime(ty: Type, mod: *Module, index: usize) !?Value { + const ip = &mod.intern_pool; + switch (ip.indexToKey(ty.toIntern())) { + .struct_type => { + const struct_type = ip.loadStructType(ty.toIntern()); + if (struct_type.fieldIsComptime(ip, index)) { + assert(struct_type.haveFieldInits(ip)); + return Value.fromInterned(struct_type.field_inits.get(ip)[index]); + } else { + return Type.fromInterned(struct_type.field_types.get(ip)[index]).onePossibleValue(mod); + } + }, + .anon_struct_type => |tuple| { + const val = tuple.values.get(ip)[index]; + if (val == .none) { + return Type.fromInterned(tuple.types.get(ip)[index]).onePossibleValue(mod); + } else { + return Value.fromInterned(val); + } + }, + else => unreachable, + } +} + +pub fn structFieldIsComptime(ty: Type, index: usize, mod: *Module) bool { + const ip = &mod.intern_pool; + return switch (ip.indexToKey(ty.toIntern())) { + .struct_type => ip.loadStructType(ty.toIntern()).fieldIsComptime(ip, index), + .anon_struct_type => |anon_struct| anon_struct.values.get(ip)[index] != .none, + else => unreachable, + }; +} + +pub const FieldOffset = struct { + field: usize, + offset: u64, +}; + +/// Supports structs and unions. +pub fn structFieldOffset(ty: Type, index: usize, mod: *Module) u64 { + const ip = &mod.intern_pool; + switch (ip.indexToKey(ty.toIntern())) { + .struct_type => { + const struct_type = ip.loadStructType(ty.toIntern()); + assert(struct_type.haveLayout(ip)); + assert(struct_type.layout != .@"packed"); + return struct_type.offsets.get(ip)[index]; + }, + + .anon_struct_type => |tuple| { + var offset: u64 = 0; + var big_align: Alignment = .none; + + for (tuple.types.get(ip), tuple.values.get(ip), 0..) |field_ty, field_val, i| { + if (field_val != .none or !Type.fromInterned(field_ty).hasRuntimeBits(mod)) { + // comptime field + if (i == index) return offset; + continue; + } + + const field_align = Type.fromInterned(field_ty).abiAlignment(mod); + big_align = big_align.max(field_align); + offset = field_align.forward(offset); + if (i == index) return offset; + offset += Type.fromInterned(field_ty).abiSize(mod); + } + offset = big_align.max(.@"1").forward(offset); + return offset; + }, + + .union_type => { + const union_type = ip.loadUnionType(ty.toIntern()); + if (!union_type.hasTag(ip)) + return 0; + const layout = mod.getUnionLayout(union_type); + if (layout.tag_align.compare(.gte, layout.payload_align)) { + // {Tag, Payload} + return layout.payload_align.forward(layout.tag_size); + } else { + // {Payload, Tag} + return 0; + } + }, + + else => unreachable, + } +} + +pub fn getOwnerDecl(ty: Type, mod: *Module) InternPool.DeclIndex { + return ty.getOwnerDeclOrNull(mod) orelse unreachable; +} + +pub fn getOwnerDeclOrNull(ty: Type, mod: *Module) ?InternPool.DeclIndex { + const ip = &mod.intern_pool; + return switch (ip.indexToKey(ty.toIntern())) { + .struct_type => ip.loadStructType(ty.toIntern()).decl.unwrap(), + .union_type => ip.loadUnionType(ty.toIntern()).decl, + .opaque_type => ip.loadOpaqueType(ty.toIntern()).decl, + .enum_type => ip.loadEnumType(ty.toIntern()).decl, + else => null, + }; +} + +pub fn srcLocOrNull(ty: Type, zcu: *Zcu) ?Module.LazySrcLoc { + const ip = &zcu.intern_pool; + return .{ + .base_node_inst = switch (ip.indexToKey(ty.toIntern())) { + .struct_type, .union_type, .opaque_type, .enum_type => |info| switch (info) { + .declared => |d| d.zir_index, + .reified => |r| r.zir_index, + .generated_tag => |gt| ip.loadUnionType(gt.union_type).zir_index, + .empty_struct => return null, + }, + else => return null, + }, + .offset = Module.LazySrcLoc.Offset.nodeOffset(0), + }; +} + +pub fn srcLoc(ty: Type, zcu: *Zcu) Module.LazySrcLoc { + return ty.srcLocOrNull(zcu).?; +} + +pub fn isGenericPoison(ty: Type) bool { + return ty.toIntern() == .generic_poison_type; +} + +pub fn isTuple(ty: Type, mod: *Module) bool { + const ip = &mod.intern_pool; + return switch (ip.indexToKey(ty.toIntern())) { + .struct_type => { + const struct_type = ip.loadStructType(ty.toIntern()); + if (struct_type.layout == .@"packed") return false; + if (struct_type.decl == .none) return false; + return struct_type.flagsPtr(ip).is_tuple; + }, + .anon_struct_type => |anon_struct| anon_struct.names.len == 0, + else => false, + }; +} + +pub fn isAnonStruct(ty: Type, mod: *Module) bool { + if (ty.toIntern() == .empty_struct_type) return true; + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .anon_struct_type => |anon_struct_type| anon_struct_type.names.len > 0, + else => false, + }; +} + +pub fn isTupleOrAnonStruct(ty: Type, mod: *Module) bool { + const ip = &mod.intern_pool; + return switch (ip.indexToKey(ty.toIntern())) { + .struct_type => { + const struct_type = ip.loadStructType(ty.toIntern()); + if (struct_type.layout == .@"packed") return false; + if (struct_type.decl == .none) return false; + return struct_type.flagsPtr(ip).is_tuple; + }, + .anon_struct_type => true, + else => false, + }; +} + +pub fn isSimpleTuple(ty: Type, mod: *Module) bool { + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .anon_struct_type => |anon_struct_type| anon_struct_type.names.len == 0, + else => false, + }; +} + +pub fn isSimpleTupleOrAnonStruct(ty: Type, mod: *Module) bool { + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .anon_struct_type => true, + else => false, + }; +} + +/// Traverses optional child types and error union payloads until the type +/// is not a pointer. For `E!?u32`, returns `u32`; for `*u8`, returns `*u8`. +pub fn optEuBaseType(ty: Type, mod: *Module) Type { + var cur = ty; + while (true) switch (cur.zigTypeTag(mod)) { + .Optional => cur = cur.optionalChild(mod), + .ErrorUnion => cur = cur.errorUnionPayload(mod), + else => return cur, + }; +} + +pub fn toUnsigned(ty: Type, mod: *Module) !Type { + return switch (ty.zigTypeTag(mod)) { + .Int => mod.intType(.unsigned, ty.intInfo(mod).bits), + .Vector => try mod.vectorType(.{ + .len = ty.vectorLen(mod), + .child = (try ty.childType(mod).toUnsigned(mod)).toIntern(), + }), + else => unreachable, + }; +} + +pub fn typeDeclInst(ty: Type, zcu: *const Zcu) ?InternPool.TrackedInst.Index { + const ip = &zcu.intern_pool; + return switch (ip.indexToKey(ty.toIntern())) { + .struct_type => ip.loadStructType(ty.toIntern()).zir_index.unwrap(), + .union_type => ip.loadUnionType(ty.toIntern()).zir_index, + .enum_type => ip.loadEnumType(ty.toIntern()).zir_index.unwrap(), + .opaque_type => ip.loadOpaqueType(ty.toIntern()).zir_index, + else => null, + }; +} + +pub fn typeDeclSrcLine(ty: Type, zcu: *const Zcu) ?u32 { + const ip = &zcu.intern_pool; + const tracked = switch (ip.indexToKey(ty.toIntern())) { + .struct_type, .union_type, .opaque_type, .enum_type => |info| switch (info) { + .declared => |d| d.zir_index, + .reified => |r| r.zir_index, + .generated_tag => |gt| ip.loadUnionType(gt.union_type).zir_index, + .empty_struct => return null, + }, + else => return null, + }; + const info = tracked.resolveFull(&zcu.intern_pool); + const file = zcu.import_table.values()[zcu.path_digest_map.getIndex(info.path_digest).?]; + assert(file.zir_loaded); + const zir = file.zir; + const inst = zir.instructions.get(@intFromEnum(info.inst)); + assert(inst.tag == .extended); + return switch (inst.data.extended.opcode) { + .struct_decl => zir.extraData(Zir.Inst.StructDecl, inst.data.extended.operand).data.src_line, + .union_decl => zir.extraData(Zir.Inst.UnionDecl, inst.data.extended.operand).data.src_line, + .enum_decl => zir.extraData(Zir.Inst.EnumDecl, inst.data.extended.operand).data.src_line, + .opaque_decl => zir.extraData(Zir.Inst.OpaqueDecl, inst.data.extended.operand).data.src_line, + .reify => zir.extraData(Zir.Inst.Reify, inst.data.extended.operand).data.src_line, + else => unreachable, + }; +} + +/// Given a namespace type, returns its list of caotured values. +pub fn getCaptures(ty: Type, zcu: *const Zcu) InternPool.CaptureValue.Slice { + const ip = &zcu.intern_pool; + return switch (ip.indexToKey(ty.toIntern())) { + .struct_type => ip.loadStructType(ty.toIntern()).captures, + .union_type => ip.loadUnionType(ty.toIntern()).captures, + .enum_type => ip.loadEnumType(ty.toIntern()).captures, + .opaque_type => ip.loadOpaqueType(ty.toIntern()).captures, + else => unreachable, + }; +} + +pub fn arrayBase(ty: Type, zcu: *const Zcu) struct { Type, u64 } { + var cur_ty: Type = ty; + var cur_len: u64 = 1; + while (cur_ty.zigTypeTag(zcu) == .Array) { + cur_len *= cur_ty.arrayLenIncludingSentinel(zcu); + cur_ty = cur_ty.childType(zcu); + } + return .{ cur_ty, cur_len }; +} + +pub fn packedStructFieldPtrInfo(struct_ty: Type, parent_ptr_ty: Type, field_idx: u32, zcu: *Zcu) union(enum) { + /// The result is a bit-pointer with the same value and a new packed offset. + bit_ptr: InternPool.Key.PtrType.PackedOffset, + /// The result is a standard pointer. + byte_ptr: struct { + /// The byte offset of the field pointer from the parent pointer value. + offset: u64, + /// The alignment of the field pointer type. + alignment: InternPool.Alignment, + }, +} { + comptime assert(Type.packed_struct_layout_version == 2); + + const parent_ptr_info = parent_ptr_ty.ptrInfo(zcu); + const field_ty = struct_ty.structFieldType(field_idx, zcu); + + var bit_offset: u16 = 0; + var running_bits: u16 = 0; + for (0..struct_ty.structFieldCount(zcu)) |i| { + const f_ty = struct_ty.structFieldType(i, zcu); + if (i == field_idx) { + bit_offset = running_bits; + } + running_bits += @intCast(f_ty.bitSize(zcu)); + } + + const res_host_size: u16, const res_bit_offset: u16 = if (parent_ptr_info.packed_offset.host_size != 0) + .{ parent_ptr_info.packed_offset.host_size, parent_ptr_info.packed_offset.bit_offset + bit_offset } + else + .{ (running_bits + 7) / 8, bit_offset }; + + // If the field happens to be byte-aligned, simplify the pointer type. + // We can only do this if the pointee's bit size matches its ABI byte size, + // so that loads and stores do not interfere with surrounding packed bits. + // + // TODO: we do not attempt this with big-endian targets yet because of nested + // structs and floats. I need to double-check the desired behavior for big endian + // targets before adding the necessary complications to this code. This will not + // cause miscompilations; it only means the field pointer uses bit masking when it + // might not be strictly necessary. + if (res_bit_offset % 8 == 0 and field_ty.bitSize(zcu) == field_ty.abiSize(zcu) * 8 and zcu.getTarget().cpu.arch.endian() == .little) { + const byte_offset = res_bit_offset / 8; + const new_align = Alignment.fromLog2Units(@ctz(byte_offset | parent_ptr_ty.ptrAlignment(zcu).toByteUnits().?)); + return .{ .byte_ptr = .{ + .offset = byte_offset, + .alignment = new_align, + } }; + } + + return .{ .bit_ptr = .{ + .host_size = res_host_size, + .bit_offset = res_bit_offset, + } }; +} + +pub const @"u1": Type = .{ .ip_index = .u1_type }; +pub const @"u8": Type = .{ .ip_index = .u8_type }; +pub const @"u16": Type = .{ .ip_index = .u16_type }; +pub const @"u29": Type = .{ .ip_index = .u29_type }; +pub const @"u32": Type = .{ .ip_index = .u32_type }; +pub const @"u64": Type = .{ .ip_index = .u64_type }; +pub const @"u128": Type = .{ .ip_index = .u128_type }; + +pub const @"i8": Type = .{ .ip_index = .i8_type }; +pub const @"i16": Type = .{ .ip_index = .i16_type }; +pub const @"i32": Type = .{ .ip_index = .i32_type }; +pub const @"i64": Type = .{ .ip_index = .i64_type }; +pub const @"i128": Type = .{ .ip_index = .i128_type }; + +pub const @"f16": Type = .{ .ip_index = .f16_type }; +pub const @"f32": Type = .{ .ip_index = .f32_type }; +pub const @"f64": Type = .{ .ip_index = .f64_type }; +pub const @"f80": Type = .{ .ip_index = .f80_type }; +pub const @"f128": Type = .{ .ip_index = .f128_type }; + +pub const @"bool": Type = .{ .ip_index = .bool_type }; +pub const @"usize": Type = .{ .ip_index = .usize_type }; +pub const @"isize": Type = .{ .ip_index = .isize_type }; +pub const @"comptime_int": Type = .{ .ip_index = .comptime_int_type }; +pub const @"comptime_float": Type = .{ .ip_index = .comptime_float_type }; +pub const @"void": Type = .{ .ip_index = .void_type }; +pub const @"type": Type = .{ .ip_index = .type_type }; +pub const @"anyerror": Type = .{ .ip_index = .anyerror_type }; +pub const @"anyopaque": Type = .{ .ip_index = .anyopaque_type }; +pub const @"anyframe": Type = .{ .ip_index = .anyframe_type }; +pub const @"null": Type = .{ .ip_index = .null_type }; +pub const @"undefined": Type = .{ .ip_index = .undefined_type }; +pub const @"noreturn": Type = .{ .ip_index = .noreturn_type }; + +pub const @"c_char": Type = .{ .ip_index = .c_char_type }; +pub const @"c_short": Type = .{ .ip_index = .c_short_type }; +pub const @"c_ushort": Type = .{ .ip_index = .c_ushort_type }; +pub const @"c_int": Type = .{ .ip_index = .c_int_type }; +pub const @"c_uint": Type = .{ .ip_index = .c_uint_type }; +pub const @"c_long": Type = .{ .ip_index = .c_long_type }; +pub const @"c_ulong": Type = .{ .ip_index = .c_ulong_type }; +pub const @"c_longlong": Type = .{ .ip_index = .c_longlong_type }; +pub const @"c_ulonglong": Type = .{ .ip_index = .c_ulonglong_type }; +pub const @"c_longdouble": Type = .{ .ip_index = .c_longdouble_type }; + +pub const slice_const_u8: Type = .{ .ip_index = .slice_const_u8_type }; +pub const manyptr_u8: Type = .{ .ip_index = .manyptr_u8_type }; +pub const single_const_pointer_to_comptime_int: Type = .{ + .ip_index = .single_const_pointer_to_comptime_int_type, +}; +pub const slice_const_u8_sentinel_0: Type = .{ .ip_index = .slice_const_u8_sentinel_0_type }; +pub const empty_struct_literal: Type = .{ .ip_index = .empty_struct_type }; + +pub const generic_poison: Type = .{ .ip_index = .generic_poison_type }; + +pub fn smallestUnsignedBits(max: u64) u16 { + if (max == 0) return 0; + const base = std.math.log2(max); + const upper = (@as(u64, 1) << @as(u6, @intCast(base))) - 1; + return @as(u16, @intCast(base + @intFromBool(upper < max))); +} + +/// This is only used for comptime asserts. Bump this number when you make a change +/// to packed struct layout to find out all the places in the codebase you need to edit! +pub const packed_struct_layout_version = 2; + +fn cTypeAlign(target: Target, c_type: Target.CType) Alignment { + return Alignment.fromByteUnits(target.c_type_alignment(c_type)); +} diff --git a/src/Value.zig b/src/Value.zig index 5719ed368980..20b24510ef68 100644 --- a/src/Value.zig +++ b/src/Value.zig @@ -1,6 +1,6 @@ const std = @import("std"); const builtin = @import("builtin"); -const Type = @import("type.zig").Type; +const Type = @import("Type.zig"); const assert = std.debug.assert; const BigIntConst = std.math.big.int.Const; const BigIntMutable = std.math.big.int.Mutable; diff --git a/src/Zcu.zig b/src/Zcu.zig index 508bef971a88..27e9347268ea 100644 --- a/src/Zcu.zig +++ b/src/Zcu.zig @@ -20,7 +20,7 @@ const Zcu = @This(); const Compilation = @import("Compilation.zig"); const Cache = std.Build.Cache; const Value = @import("Value.zig"); -const Type = @import("type.zig").Type; +const Type = @import("Type.zig"); const Package = @import("Package.zig"); const link = @import("link.zig"); const Air = @import("Air.zig"); diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig index 51b62aba14bb..14b9cce3a884 100644 --- a/src/arch/aarch64/CodeGen.zig +++ b/src/arch/aarch64/CodeGen.zig @@ -8,7 +8,7 @@ const Air = @import("../../Air.zig"); const Mir = @import("Mir.zig"); const Emit = @import("Emit.zig"); const Liveness = @import("../../Liveness.zig"); -const Type = @import("../../type.zig").Type; +const Type = @import("../../Type.zig"); const Value = @import("../../Value.zig"); const link = @import("../../link.zig"); const Zcu = @import("../../Zcu.zig"); diff --git a/src/arch/aarch64/abi.zig b/src/arch/aarch64/abi.zig index 2a25dbf1bec0..5eeeee0fa2bf 100644 --- a/src/arch/aarch64/abi.zig +++ b/src/arch/aarch64/abi.zig @@ -3,7 +3,7 @@ const builtin = @import("builtin"); const bits = @import("bits.zig"); const Register = bits.Register; const RegisterManagerFn = @import("../../register_manager.zig").RegisterManager; -const Type = @import("../../type.zig").Type; +const Type = @import("../../Type.zig"); const Zcu = @import("../../Zcu.zig"); /// Deprecated. const Module = Zcu; diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index ae802c8f486b..0423b63d2383 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -8,7 +8,7 @@ const Air = @import("../../Air.zig"); const Mir = @import("Mir.zig"); const Emit = @import("Emit.zig"); const Liveness = @import("../../Liveness.zig"); -const Type = @import("../../type.zig").Type; +const Type = @import("../../Type.zig"); const Value = @import("../../Value.zig"); const link = @import("../../link.zig"); const Zcu = @import("../../Zcu.zig"); diff --git a/src/arch/arm/Emit.zig b/src/arch/arm/Emit.zig index b85deaa3ce33..da19760d8bc8 100644 --- a/src/arch/arm/Emit.zig +++ b/src/arch/arm/Emit.zig @@ -11,7 +11,7 @@ const link = @import("../../link.zig"); const Zcu = @import("../../Zcu.zig"); /// Deprecated. const Module = Zcu; -const Type = @import("../../type.zig").Type; +const Type = @import("../../Type.zig"); const ErrorMsg = Module.ErrorMsg; const Target = std.Target; const assert = std.debug.assert; diff --git a/src/arch/arm/abi.zig b/src/arch/arm/abi.zig index 1a434b3b8c33..f88218bc57dc 100644 --- a/src/arch/arm/abi.zig +++ b/src/arch/arm/abi.zig @@ -3,7 +3,7 @@ const assert = std.debug.assert; const bits = @import("bits.zig"); const Register = bits.Register; const RegisterManagerFn = @import("../../register_manager.zig").RegisterManager; -const Type = @import("../../type.zig").Type; +const Type = @import("../../Type.zig"); const Zcu = @import("../../Zcu.zig"); /// Deprecated. const Module = Zcu; diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index 2bba63f616d7..3f01b7473368 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -7,7 +7,7 @@ const Air = @import("../../Air.zig"); const Mir = @import("Mir.zig"); const Emit = @import("Emit.zig"); const Liveness = @import("../../Liveness.zig"); -const Type = @import("../../type.zig").Type; +const Type = @import("../../Type.zig"); const Value = @import("../../Value.zig"); const link = @import("../../link.zig"); const Zcu = @import("../../Zcu.zig"); diff --git a/src/arch/riscv64/Mir.zig b/src/arch/riscv64/Mir.zig index 0753b142b118..80a533d88068 100644 --- a/src/arch/riscv64/Mir.zig +++ b/src/arch/riscv64/Mir.zig @@ -431,7 +431,7 @@ pub const RegisterList = struct { const Mir = @This(); const std = @import("std"); const builtin = @import("builtin"); -const Type = @import("../../type.zig").Type; +const Type = @import("../../Type.zig"); const assert = std.debug.assert; diff --git a/src/arch/riscv64/abi.zig b/src/arch/riscv64/abi.zig index dd35fc41e53e..042af564f6ab 100644 --- a/src/arch/riscv64/abi.zig +++ b/src/arch/riscv64/abi.zig @@ -2,7 +2,7 @@ const std = @import("std"); const bits = @import("bits.zig"); const Register = bits.Register; const RegisterManagerFn = @import("../../register_manager.zig").RegisterManager; -const Type = @import("../../type.zig").Type; +const Type = @import("../../Type.zig"); const InternPool = @import("../../InternPool.zig"); const Zcu = @import("../../Zcu.zig"); const assert = std.debug.assert; diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig index ca1cef125022..2416eb9176a4 100644 --- a/src/arch/sparc64/CodeGen.zig +++ b/src/arch/sparc64/CodeGen.zig @@ -21,7 +21,7 @@ const Air = @import("../../Air.zig"); const Mir = @import("Mir.zig"); const Emit = @import("Emit.zig"); const Liveness = @import("../../Liveness.zig"); -const Type = @import("../../type.zig").Type; +const Type = @import("../../Type.zig"); const CodeGenError = codegen.CodeGenError; const Result = @import("../../codegen.zig").Result; const DebugInfoOutput = @import("../../codegen.zig").DebugInfoOutput; diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index 91d637c7653d..2ecface64e92 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -13,7 +13,7 @@ const codegen = @import("../../codegen.zig"); const Zcu = @import("../../Zcu.zig"); const InternPool = @import("../../InternPool.zig"); const Decl = Zcu.Decl; -const Type = @import("../../type.zig").Type; +const Type = @import("../../Type.zig"); const Value = @import("../../Value.zig"); const Compilation = @import("../../Compilation.zig"); const link = @import("../../link.zig"); diff --git a/src/arch/wasm/abi.zig b/src/arch/wasm/abi.zig index 03c68daa8533..23097990ac74 100644 --- a/src/arch/wasm/abi.zig +++ b/src/arch/wasm/abi.zig @@ -8,7 +8,7 @@ const std = @import("std"); const Target = std.Target; const assert = std.debug.assert; -const Type = @import("../../type.zig").Type; +const Type = @import("../../Type.zig"); const Zcu = @import("../../Zcu.zig"); /// Defines how to pass a type as part of a function signature, diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 31ed0bf51489..def0edcac9af 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -32,7 +32,7 @@ const Module = Zcu; const InternPool = @import("../../InternPool.zig"); const Alignment = InternPool.Alignment; const Target = std.Target; -const Type = @import("../../type.zig").Type; +const Type = @import("../../Type.zig"); const Value = @import("../../Value.zig"); const Instruction = @import("encoder.zig").Instruction; diff --git a/src/arch/x86_64/abi.zig b/src/arch/x86_64/abi.zig index f1117f16c1d6..05c0c9626ca2 100644 --- a/src/arch/x86_64/abi.zig +++ b/src/arch/x86_64/abi.zig @@ -537,6 +537,6 @@ const testing = std.testing; const InternPool = @import("../../InternPool.zig"); const Register = @import("bits.zig").Register; const RegisterManagerFn = @import("../../register_manager.zig").RegisterManager; -const Type = @import("../../type.zig").Type; +const Type = @import("../../Type.zig"); const Value = @import("../../Value.zig"); const Zcu = @import("../../Zcu.zig"); diff --git a/src/codegen.zig b/src/codegen.zig index 769e8f7cd54e..5e25359d4470 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -20,7 +20,7 @@ const Zcu = @import("Zcu.zig"); /// Deprecated. const Module = Zcu; const Target = std.Target; -const Type = @import("type.zig").Type; +const Type = @import("Type.zig"); const Value = @import("Value.zig"); const Zir = std.zig.Zir; const Alignment = InternPool.Alignment; diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 6bd8bcc6fc2e..13d9e6751967 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -9,7 +9,7 @@ const Zcu = @import("../Zcu.zig"); const Module = @import("../Package/Module.zig"); const Compilation = @import("../Compilation.zig"); const Value = @import("../Value.zig"); -const Type = @import("../type.zig").Type; +const Type = @import("../Type.zig"); const C = link.File.C; const Decl = Zcu.Decl; const trace = @import("../tracy.zig").trace; diff --git a/src/codegen/c/Type.zig b/src/codegen/c/Type.zig index e316d7a15491..0a0d84f06105 100644 --- a/src/codegen/c/Type.zig +++ b/src/codegen/c/Type.zig @@ -2583,6 +2583,6 @@ const assert = std.debug.assert; const CType = @This(); const Module = @import("../../Package/Module.zig"); const std = @import("std"); -const Type = @import("../../type.zig").Type; +const Type = @import("../../Type.zig"); const Zcu = @import("../../Zcu.zig"); const DeclIndex = @import("../../InternPool.zig").DeclIndex; diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index c65158a88a1c..b3718db5b118 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -22,7 +22,7 @@ const Package = @import("../Package.zig"); const Air = @import("../Air.zig"); const Liveness = @import("../Liveness.zig"); const Value = @import("../Value.zig"); -const Type = @import("../type.zig").Type; +const Type = @import("../Type.zig"); const x86_64_abi = @import("../arch/x86_64/abi.zig"); const wasm_c_abi = @import("../arch/wasm/abi.zig"); const aarch64_c_abi = @import("../arch/aarch64/abi.zig"); diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 494ec0737e63..c56a5a799e9c 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -9,7 +9,7 @@ const Zcu = @import("../Zcu.zig"); /// Deprecated. const Module = Zcu; const Decl = Module.Decl; -const Type = @import("../type.zig").Type; +const Type = @import("../Type.zig"); const Value = @import("../Value.zig"); const Air = @import("../Air.zig"); const Liveness = @import("../Liveness.zig"); diff --git a/src/link.zig b/src/link.zig index 7f108c283f9a..009b38a681af 100644 --- a/src/link.zig +++ b/src/link.zig @@ -18,7 +18,7 @@ const Zcu = @import("Zcu.zig"); /// Deprecated. const Module = Zcu; const InternPool = @import("InternPool.zig"); -const Type = @import("type.zig").Type; +const Type = @import("Type.zig"); const Value = @import("Value.zig"); const LlvmObject = @import("codegen/llvm.zig").Object; const lldMain = @import("main.zig").lldMain; diff --git a/src/link/C.zig b/src/link/C.zig index 3a8d06b5ee31..8372029d2d5e 100644 --- a/src/link/C.zig +++ b/src/link/C.zig @@ -14,7 +14,7 @@ const Compilation = @import("../Compilation.zig"); const codegen = @import("../codegen/c.zig"); const link = @import("../link.zig"); const trace = @import("../tracy.zig").trace; -const Type = @import("../type.zig").Type; +const Type = @import("../Type.zig"); const Value = @import("../Value.zig"); const Air = @import("../Air.zig"); const Liveness = @import("../Liveness.zig"); diff --git a/src/link/Coff.zig b/src/link/Coff.zig index 366ba8750906..55028fc8ad32 100644 --- a/src/link/Coff.zig +++ b/src/link/Coff.zig @@ -2742,7 +2742,7 @@ const Object = @import("Coff/Object.zig"); const Relocation = @import("Coff/Relocation.zig"); const TableSection = @import("table_section.zig").TableSection; const StringTable = @import("StringTable.zig"); -const Type = @import("../type.zig").Type; +const Type = @import("../Type.zig"); const Value = @import("../Value.zig"); const AnalUnit = InternPool.AnalUnit; diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig index 7d576abbb4fc..2bb0a4c0a0be 100644 --- a/src/link/Dwarf.zig +++ b/src/link/Dwarf.zig @@ -2969,5 +2969,5 @@ const Zcu = @import("../Zcu.zig"); const Module = Zcu; const InternPool = @import("../InternPool.zig"); const StringTable = @import("StringTable.zig"); -const Type = @import("../type.zig").Type; +const Type = @import("../Type.zig"); const Value = @import("../Value.zig"); diff --git a/src/link/Elf/ZigObject.zig b/src/link/Elf/ZigObject.zig index 57fa61001948..56311dd64b81 100644 --- a/src/link/Elf/ZigObject.zig +++ b/src/link/Elf/ZigObject.zig @@ -1647,7 +1647,7 @@ const Module = Zcu; const Object = @import("Object.zig"); const Symbol = @import("Symbol.zig"); const StringTable = @import("../StringTable.zig"); -const Type = @import("../../type.zig").Type; +const Type = @import("../../Type.zig"); const Value = @import("../../Value.zig"); const AnalUnit = InternPool.AnalUnit; const ZigObject = @This(); diff --git a/src/link/MachO/DebugSymbols.zig b/src/link/MachO/DebugSymbols.zig index 3f0e84d6a224..c022a3066433 100644 --- a/src/link/MachO/DebugSymbols.zig +++ b/src/link/MachO/DebugSymbols.zig @@ -459,4 +459,4 @@ const trace = @import("../../tracy.zig").trace; const Allocator = mem.Allocator; const MachO = @import("../MachO.zig"); const StringTable = @import("../StringTable.zig"); -const Type = @import("../../type.zig").Type; +const Type = @import("../../Type.zig"); diff --git a/src/link/MachO/ZigObject.zig b/src/link/MachO/ZigObject.zig index 861ced921472..bb5ded654d0d 100644 --- a/src/link/MachO/ZigObject.zig +++ b/src/link/MachO/ZigObject.zig @@ -1587,7 +1587,7 @@ const Object = @import("Object.zig"); const Relocation = @import("Relocation.zig"); const Symbol = @import("Symbol.zig"); const StringTable = @import("../StringTable.zig"); -const Type = @import("../../type.zig").Type; +const Type = @import("../../Type.zig"); const Value = @import("../../Value.zig"); const AnalUnit = InternPool.AnalUnit; const ZigObject = @This(); diff --git a/src/link/Plan9.zig b/src/link/Plan9.zig index 2efe569d9860..96fbaf42c7d3 100644 --- a/src/link/Plan9.zig +++ b/src/link/Plan9.zig @@ -15,7 +15,7 @@ const File = link.File; const build_options = @import("build_options"); const Air = @import("../Air.zig"); const Liveness = @import("../Liveness.zig"); -const Type = @import("../type.zig").Type; +const Type = @import("../Type.zig"); const Value = @import("../Value.zig"); const AnalUnit = InternPool.AnalUnit; diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig index 3befedad89b3..d14061fe78c1 100644 --- a/src/link/Wasm.zig +++ b/src/link/Wasm.zig @@ -33,7 +33,7 @@ const Zcu = @import("../Zcu.zig"); const Module = Zcu; const Object = @import("Wasm/Object.zig"); const Symbol = @import("Wasm/Symbol.zig"); -const Type = @import("../type.zig").Type; +const Type = @import("../Type.zig"); const Value = @import("../Value.zig"); const ZigObject = @import("Wasm/ZigObject.zig"); diff --git a/src/link/Wasm/ZigObject.zig b/src/link/Wasm/ZigObject.zig index ca950e5cef4e..24fc66367a92 100644 --- a/src/link/Wasm/ZigObject.zig +++ b/src/link/Wasm/ZigObject.zig @@ -1252,7 +1252,7 @@ const Zcu = @import("../../Zcu.zig"); const Module = Zcu; const StringTable = @import("../StringTable.zig"); const Symbol = @import("Symbol.zig"); -const Type = @import("../../type.zig").Type; +const Type = @import("../../Type.zig"); const Value = @import("../../Value.zig"); const Wasm = @import("../Wasm.zig"); const AnalUnit = InternPool.AnalUnit; diff --git a/src/mutable_value.zig b/src/mutable_value.zig index 77c082769188..1806e6ba1915 100644 --- a/src/mutable_value.zig +++ b/src/mutable_value.zig @@ -3,7 +3,7 @@ const assert = std.debug.assert; const Allocator = std.mem.Allocator; const Zcu = @import("Zcu.zig"); const InternPool = @import("InternPool.zig"); -const Type = @import("type.zig").Type; +const Type = @import("Type.zig"); const Value = @import("Value.zig"); /// We use a tagged union here because while it wastes a few bytes for some tags, having a fixed diff --git a/src/print_air.zig b/src/print_air.zig index 2dbaf3069f5f..85fbe87ec9d1 100644 --- a/src/print_air.zig +++ b/src/print_air.zig @@ -4,7 +4,7 @@ const fmtIntSizeBin = std.fmt.fmtIntSizeBin; const Zcu = @import("Zcu.zig"); const Value = @import("Value.zig"); -const Type = @import("type.zig").Type; +const Type = @import("Type.zig"); const Air = @import("Air.zig"); const Liveness = @import("Liveness.zig"); const InternPool = @import("InternPool.zig"); diff --git a/src/print_value.zig b/src/print_value.zig index 7f75b0560625..d2952c3d8eb5 100644 --- a/src/print_value.zig +++ b/src/print_value.zig @@ -2,7 +2,7 @@ //! It is a thin wrapper around a `Value` which also, redundantly, stores its `Type`. const std = @import("std"); -const Type = @import("type.zig").Type; +const Type = @import("Type.zig"); const Value = @import("Value.zig"); const Zcu = @import("Zcu.zig"); /// Deprecated. diff --git a/src/register_manager.zig b/src/register_manager.zig index e1bc4d52fa6e..fb9afbbc0109 100644 --- a/src/register_manager.zig +++ b/src/register_manager.zig @@ -5,7 +5,7 @@ const assert = std.debug.assert; const Allocator = std.mem.Allocator; const Air = @import("Air.zig"); const StaticBitSet = std.bit_set.StaticBitSet; -const Type = @import("type.zig").Type; +const Type = @import("Type.zig"); const Zcu = @import("Zcu.zig"); /// Deprecated. const Module = Zcu; diff --git a/src/target.zig b/src/target.zig index 08ccfbaaca55..a253c1fa0b81 100644 --- a/src/target.zig +++ b/src/target.zig @@ -1,5 +1,5 @@ const std = @import("std"); -const Type = @import("type.zig").Type; +const Type = @import("Type.zig"); const AddressSpace = std.builtin.AddressSpace; const Alignment = @import("InternPool.zig").Alignment; const Feature = @import("Zcu.zig").Feature; diff --git a/src/type.zig b/src/type.zig deleted file mode 100644 index df9382227357..000000000000 --- a/src/type.zig +++ /dev/null @@ -1,3617 +0,0 @@ -const std = @import("std"); -const builtin = @import("builtin"); -const Value = @import("Value.zig"); -const assert = std.debug.assert; -const Target = std.Target; -const Zcu = @import("Zcu.zig"); -/// Deprecated. -const Module = Zcu; -const log = std.log.scoped(.Type); -const target_util = @import("target.zig"); -const Sema = @import("Sema.zig"); -const InternPool = @import("InternPool.zig"); -const Alignment = InternPool.Alignment; -const Zir = std.zig.Zir; - -/// Both types and values are canonically represented by a single 32-bit integer -/// which is an index into an `InternPool` data structure. -/// This struct abstracts around this storage by providing methods only -/// applicable to types rather than values in general. -pub const Type = struct { - ip_index: InternPool.Index, - - pub fn zigTypeTag(ty: Type, mod: *const Module) std.builtin.TypeId { - return ty.zigTypeTagOrPoison(mod) catch unreachable; - } - - pub fn zigTypeTagOrPoison(ty: Type, mod: *const Module) error{GenericPoison}!std.builtin.TypeId { - return mod.intern_pool.zigTypeTagOrPoison(ty.toIntern()); - } - - pub fn baseZigTypeTag(self: Type, mod: *Module) std.builtin.TypeId { - return switch (self.zigTypeTag(mod)) { - .ErrorUnion => self.errorUnionPayload(mod).baseZigTypeTag(mod), - .Optional => { - return self.optionalChild(mod).baseZigTypeTag(mod); - }, - else => |t| t, - }; - } - - pub fn isSelfComparable(ty: Type, mod: *const Module, is_equality_cmp: bool) bool { - return switch (ty.zigTypeTag(mod)) { - .Int, - .Float, - .ComptimeFloat, - .ComptimeInt, - => true, - - .Vector => ty.elemType2(mod).isSelfComparable(mod, is_equality_cmp), - - .Bool, - .Type, - .Void, - .ErrorSet, - .Fn, - .Opaque, - .AnyFrame, - .Enum, - .EnumLiteral, - => is_equality_cmp, - - .NoReturn, - .Array, - .Struct, - .Undefined, - .Null, - .ErrorUnion, - .Union, - .Frame, - => false, - - .Pointer => !ty.isSlice(mod) and (is_equality_cmp or ty.isCPtr(mod)), - .Optional => { - if (!is_equality_cmp) return false; - return ty.optionalChild(mod).isSelfComparable(mod, is_equality_cmp); - }, - }; - } - - /// If it is a function pointer, returns the function type. Otherwise returns null. - pub fn castPtrToFn(ty: Type, mod: *const Module) ?Type { - if (ty.zigTypeTag(mod) != .Pointer) return null; - const elem_ty = ty.childType(mod); - if (elem_ty.zigTypeTag(mod) != .Fn) return null; - return elem_ty; - } - - /// Asserts the type is a pointer. - pub fn ptrIsMutable(ty: Type, mod: *const Module) bool { - return !mod.intern_pool.indexToKey(ty.toIntern()).ptr_type.flags.is_const; - } - - pub const ArrayInfo = struct { - elem_type: Type, - sentinel: ?Value = null, - len: u64, - }; - - pub fn arrayInfo(self: Type, mod: *const Module) ArrayInfo { - return .{ - .len = self.arrayLen(mod), - .sentinel = self.sentinel(mod), - .elem_type = self.childType(mod), - }; - } - - pub fn ptrInfo(ty: Type, mod: *const Module) InternPool.Key.PtrType { - return switch (mod.intern_pool.indexToKey(ty.toIntern())) { - .ptr_type => |p| p, - .opt_type => |child| switch (mod.intern_pool.indexToKey(child)) { - .ptr_type => |p| p, - else => unreachable, - }, - else => unreachable, - }; - } - - pub fn eql(a: Type, b: Type, mod: *const Module) bool { - _ = mod; // TODO: remove this parameter - // The InternPool data structure hashes based on Key to make interned objects - // unique. An Index can be treated simply as u32 value for the - // purpose of Type/Value hashing and equality. - return a.toIntern() == b.toIntern(); - } - - pub fn format(ty: Type, comptime unused_fmt_string: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void { - _ = ty; - _ = unused_fmt_string; - _ = options; - _ = writer; - @compileError("do not format types directly; use either ty.fmtDebug() or ty.fmt()"); - } - - pub const Formatter = std.fmt.Formatter(format2); - - pub fn fmt(ty: Type, module: *Module) Formatter { - return .{ .data = .{ - .ty = ty, - .module = module, - } }; - } - - const FormatContext = struct { - ty: Type, - module: *Module, - }; - - fn format2( - ctx: FormatContext, - comptime unused_format_string: []const u8, - options: std.fmt.FormatOptions, - writer: anytype, - ) !void { - comptime assert(unused_format_string.len == 0); - _ = options; - return print(ctx.ty, writer, ctx.module); - } - - pub fn fmtDebug(ty: Type) std.fmt.Formatter(dump) { - return .{ .data = ty }; - } - - /// This is a debug function. In order to print types in a meaningful way - /// we also need access to the module. - pub fn dump( - start_type: Type, - comptime unused_format_string: []const u8, - options: std.fmt.FormatOptions, - writer: anytype, - ) @TypeOf(writer).Error!void { - _ = options; - comptime assert(unused_format_string.len == 0); - return writer.print("{any}", .{start_type.ip_index}); - } - - /// Prints a name suitable for `@typeName`. - /// TODO: take an `opt_sema` to pass to `fmtValue` when printing sentinels. - pub fn print(ty: Type, writer: anytype, mod: *Module) @TypeOf(writer).Error!void { - const ip = &mod.intern_pool; - switch (ip.indexToKey(ty.toIntern())) { - .int_type => |int_type| { - const sign_char: u8 = switch (int_type.signedness) { - .signed => 'i', - .unsigned => 'u', - }; - return writer.print("{c}{d}", .{ sign_char, int_type.bits }); - }, - .ptr_type => { - const info = ty.ptrInfo(mod); - - if (info.sentinel != .none) switch (info.flags.size) { - .One, .C => unreachable, - .Many => try writer.print("[*:{}]", .{Value.fromInterned(info.sentinel).fmtValue(mod, null)}), - .Slice => try writer.print("[:{}]", .{Value.fromInterned(info.sentinel).fmtValue(mod, null)}), - } else switch (info.flags.size) { - .One => try writer.writeAll("*"), - .Many => try writer.writeAll("[*]"), - .C => try writer.writeAll("[*c]"), - .Slice => try writer.writeAll("[]"), - } - if (info.flags.alignment != .none or - info.packed_offset.host_size != 0 or - info.flags.vector_index != .none) - { - const alignment = if (info.flags.alignment != .none) - info.flags.alignment - else - Type.fromInterned(info.child).abiAlignment(mod); - try writer.print("align({d}", .{alignment.toByteUnits() orelse 0}); - - if (info.packed_offset.bit_offset != 0 or info.packed_offset.host_size != 0) { - try writer.print(":{d}:{d}", .{ - info.packed_offset.bit_offset, info.packed_offset.host_size, - }); - } - if (info.flags.vector_index == .runtime) { - try writer.writeAll(":?"); - } else if (info.flags.vector_index != .none) { - try writer.print(":{d}", .{@intFromEnum(info.flags.vector_index)}); - } - try writer.writeAll(") "); - } - if (info.flags.address_space != .generic) { - try writer.print("addrspace(.{s}) ", .{@tagName(info.flags.address_space)}); - } - if (info.flags.is_const) try writer.writeAll("const "); - if (info.flags.is_volatile) try writer.writeAll("volatile "); - if (info.flags.is_allowzero and info.flags.size != .C) try writer.writeAll("allowzero "); - - try print(Type.fromInterned(info.child), writer, mod); - return; - }, - .array_type => |array_type| { - if (array_type.sentinel == .none) { - try writer.print("[{d}]", .{array_type.len}); - try print(Type.fromInterned(array_type.child), writer, mod); - } else { - try writer.print("[{d}:{}]", .{ - array_type.len, - Value.fromInterned(array_type.sentinel).fmtValue(mod, null), - }); - try print(Type.fromInterned(array_type.child), writer, mod); - } - return; - }, - .vector_type => |vector_type| { - try writer.print("@Vector({d}, ", .{vector_type.len}); - try print(Type.fromInterned(vector_type.child), writer, mod); - try writer.writeAll(")"); - return; - }, - .opt_type => |child| { - try writer.writeByte('?'); - return print(Type.fromInterned(child), writer, mod); - }, - .error_union_type => |error_union_type| { - try print(Type.fromInterned(error_union_type.error_set_type), writer, mod); - try writer.writeByte('!'); - if (error_union_type.payload_type == .generic_poison_type) { - try writer.writeAll("anytype"); - } else { - try print(Type.fromInterned(error_union_type.payload_type), writer, mod); - } - return; - }, - .inferred_error_set_type => |func_index| { - try writer.writeAll("@typeInfo(@typeInfo(@TypeOf("); - const owner_decl = mod.funcOwnerDeclPtr(func_index); - try owner_decl.renderFullyQualifiedName(mod, writer); - try writer.writeAll(")).Fn.return_type.?).ErrorUnion.error_set"); - }, - .error_set_type => |error_set_type| { - const names = error_set_type.names; - try writer.writeAll("error{"); - for (names.get(ip), 0..) |name, i| { - if (i != 0) try writer.writeByte(','); - try writer.print("{}", .{name.fmt(ip)}); - } - try writer.writeAll("}"); - }, - .simple_type => |s| switch (s) { - .f16, - .f32, - .f64, - .f80, - .f128, - .usize, - .isize, - .c_char, - .c_short, - .c_ushort, - .c_int, - .c_uint, - .c_long, - .c_ulong, - .c_longlong, - .c_ulonglong, - .c_longdouble, - .anyopaque, - .bool, - .void, - .type, - .anyerror, - .comptime_int, - .comptime_float, - .noreturn, - .adhoc_inferred_error_set, - => return writer.writeAll(@tagName(s)), - - .null, - .undefined, - => try writer.print("@TypeOf({s})", .{@tagName(s)}), - - .enum_literal => try writer.print("@TypeOf(.{s})", .{@tagName(s)}), - .atomic_order => try writer.writeAll("std.builtin.AtomicOrder"), - .atomic_rmw_op => try writer.writeAll("std.builtin.AtomicRmwOp"), - .calling_convention => try writer.writeAll("std.builtin.CallingConvention"), - .address_space => try writer.writeAll("std.builtin.AddressSpace"), - .float_mode => try writer.writeAll("std.builtin.FloatMode"), - .reduce_op => try writer.writeAll("std.builtin.ReduceOp"), - .call_modifier => try writer.writeAll("std.builtin.CallModifier"), - .prefetch_options => try writer.writeAll("std.builtin.PrefetchOptions"), - .export_options => try writer.writeAll("std.builtin.ExportOptions"), - .extern_options => try writer.writeAll("std.builtin.ExternOptions"), - .type_info => try writer.writeAll("std.builtin.Type"), - - .generic_poison => unreachable, - }, - .struct_type => { - const struct_type = ip.loadStructType(ty.toIntern()); - if (struct_type.decl.unwrap()) |decl_index| { - const decl = mod.declPtr(decl_index); - try decl.renderFullyQualifiedName(mod, writer); - } else if (ip.loadStructType(ty.toIntern()).namespace.unwrap()) |namespace_index| { - const namespace = mod.namespacePtr(namespace_index); - try namespace.renderFullyQualifiedName(mod, .empty, writer); - } else { - try writer.writeAll("@TypeOf(.{})"); - } - }, - .anon_struct_type => |anon_struct| { - if (anon_struct.types.len == 0) { - return writer.writeAll("@TypeOf(.{})"); - } - try writer.writeAll("struct{"); - for (anon_struct.types.get(ip), anon_struct.values.get(ip), 0..) |field_ty, val, i| { - if (i != 0) try writer.writeAll(", "); - if (val != .none) { - try writer.writeAll("comptime "); - } - if (anon_struct.names.len != 0) { - try writer.print("{}: ", .{anon_struct.names.get(ip)[i].fmt(&mod.intern_pool)}); - } - - try print(Type.fromInterned(field_ty), writer, mod); - - if (val != .none) { - try writer.print(" = {}", .{Value.fromInterned(val).fmtValue(mod, null)}); - } - } - try writer.writeAll("}"); - }, - - .union_type => { - const decl = mod.declPtr(ip.loadUnionType(ty.toIntern()).decl); - try decl.renderFullyQualifiedName(mod, writer); - }, - .opaque_type => { - const decl = mod.declPtr(ip.loadOpaqueType(ty.toIntern()).decl); - try decl.renderFullyQualifiedName(mod, writer); - }, - .enum_type => { - const decl = mod.declPtr(ip.loadEnumType(ty.toIntern()).decl); - try decl.renderFullyQualifiedName(mod, writer); - }, - .func_type => |fn_info| { - if (fn_info.is_noinline) { - try writer.writeAll("noinline "); - } - try writer.writeAll("fn ("); - const param_types = fn_info.param_types.get(&mod.intern_pool); - for (param_types, 0..) |param_ty, i| { - if (i != 0) try writer.writeAll(", "); - if (std.math.cast(u5, i)) |index| { - if (fn_info.paramIsComptime(index)) { - try writer.writeAll("comptime "); - } - if (fn_info.paramIsNoalias(index)) { - try writer.writeAll("noalias "); - } - } - if (param_ty == .generic_poison_type) { - try writer.writeAll("anytype"); - } else { - try print(Type.fromInterned(param_ty), writer, mod); - } - } - if (fn_info.is_var_args) { - if (param_types.len != 0) { - try writer.writeAll(", "); - } - try writer.writeAll("..."); - } - try writer.writeAll(") "); - if (fn_info.cc != .Unspecified) { - try writer.writeAll("callconv(."); - try writer.writeAll(@tagName(fn_info.cc)); - try writer.writeAll(") "); - } - if (fn_info.return_type == .generic_poison_type) { - try writer.writeAll("anytype"); - } else { - try print(Type.fromInterned(fn_info.return_type), writer, mod); - } - }, - .anyframe_type => |child| { - if (child == .none) return writer.writeAll("anyframe"); - try writer.writeAll("anyframe->"); - return print(Type.fromInterned(child), writer, mod); - }, - - // values, not types - .undef, - .simple_value, - .variable, - .extern_func, - .func, - .int, - .err, - .error_union, - .enum_literal, - .enum_tag, - .empty_enum_value, - .float, - .ptr, - .slice, - .opt, - .aggregate, - .un, - // memoization, not types - .memoized_call, - => unreachable, - } - } - - pub fn fromInterned(i: InternPool.Index) Type { - assert(i != .none); - return .{ .ip_index = i }; - } - - pub fn toIntern(ty: Type) InternPool.Index { - assert(ty.ip_index != .none); - return ty.ip_index; - } - - pub fn toValue(self: Type) Value { - return Value.fromInterned(self.toIntern()); - } - - const RuntimeBitsError = Module.CompileError || error{NeedLazy}; - - /// true if and only if the type takes up space in memory at runtime. - /// There are two reasons a type will return false: - /// * the type is a comptime-only type. For example, the type `type` itself. - /// - note, however, that a struct can have mixed fields and only the non-comptime-only - /// fields will count towards the ABI size. For example, `struct {T: type, x: i32}` - /// hasRuntimeBits()=true and abiSize()=4 - /// * the type has only one possible value, making its ABI size 0. - /// - an enum with an explicit tag type has the ABI size of the integer tag type, - /// making it one-possible-value only if the integer tag type has 0 bits. - /// When `ignore_comptime_only` is true, then types that are comptime-only - /// may return false positives. - pub fn hasRuntimeBitsAdvanced( - ty: Type, - mod: *Module, - ignore_comptime_only: bool, - strat: AbiAlignmentAdvancedStrat, - ) RuntimeBitsError!bool { - const ip = &mod.intern_pool; - return switch (ty.toIntern()) { - // False because it is a comptime-only type. - .empty_struct_type => false, - else => switch (ip.indexToKey(ty.toIntern())) { - .int_type => |int_type| int_type.bits != 0, - .ptr_type => { - // Pointers to zero-bit types still have a runtime address; however, pointers - // to comptime-only types do not, with the exception of function pointers. - if (ignore_comptime_only) return true; - return switch (strat) { - .sema => |sema| !(try sema.typeRequiresComptime(ty)), - .eager => !comptimeOnly(ty, mod), - .lazy => error.NeedLazy, - }; - }, - .anyframe_type => true, - .array_type => |array_type| return array_type.lenIncludingSentinel() > 0 and - try Type.fromInterned(array_type.child).hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat), - .vector_type => |vector_type| return vector_type.len > 0 and - try Type.fromInterned(vector_type.child).hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat), - .opt_type => |child| { - const child_ty = Type.fromInterned(child); - if (child_ty.isNoReturn(mod)) { - // Then the optional is comptime-known to be null. - return false; - } - if (ignore_comptime_only) return true; - return switch (strat) { - .sema => |sema| !(try sema.typeRequiresComptime(child_ty)), - .eager => !comptimeOnly(child_ty, mod), - .lazy => error.NeedLazy, - }; - }, - .error_union_type, - .error_set_type, - .inferred_error_set_type, - => true, - - // These are function *bodies*, not pointers. - // They return false here because they are comptime-only types. - // Special exceptions have to be made when emitting functions due to - // this returning false. - .func_type => false, - - .simple_type => |t| switch (t) { - .f16, - .f32, - .f64, - .f80, - .f128, - .usize, - .isize, - .c_char, - .c_short, - .c_ushort, - .c_int, - .c_uint, - .c_long, - .c_ulong, - .c_longlong, - .c_ulonglong, - .c_longdouble, - .bool, - .anyerror, - .adhoc_inferred_error_set, - .anyopaque, - .atomic_order, - .atomic_rmw_op, - .calling_convention, - .address_space, - .float_mode, - .reduce_op, - .call_modifier, - .prefetch_options, - .export_options, - .extern_options, - => true, - - // These are false because they are comptime-only types. - .void, - .type, - .comptime_int, - .comptime_float, - .noreturn, - .null, - .undefined, - .enum_literal, - .type_info, - => false, - - .generic_poison => unreachable, - }, - .struct_type => { - const struct_type = ip.loadStructType(ty.toIntern()); - if (struct_type.assumeRuntimeBitsIfFieldTypesWip(ip)) { - // In this case, we guess that hasRuntimeBits() for this type is true, - // and then later if our guess was incorrect, we emit a compile error. - return true; - } - switch (strat) { - .sema => |sema| _ = try sema.resolveTypeFields(ty), - .eager => assert(struct_type.haveFieldTypes(ip)), - .lazy => if (!struct_type.haveFieldTypes(ip)) return error.NeedLazy, - } - for (0..struct_type.field_types.len) |i| { - if (struct_type.comptime_bits.getBit(ip, i)) continue; - const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]); - if (try field_ty.hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat)) - return true; - } else { - return false; - } - }, - .anon_struct_type => |tuple| { - for (tuple.types.get(ip), tuple.values.get(ip)) |field_ty, val| { - if (val != .none) continue; // comptime field - if (try Type.fromInterned(field_ty).hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat)) return true; - } - return false; - }, - - .union_type => { - const union_type = ip.loadUnionType(ty.toIntern()); - switch (union_type.flagsPtr(ip).runtime_tag) { - .none => { - if (union_type.flagsPtr(ip).status == .field_types_wip) { - // In this case, we guess that hasRuntimeBits() for this type is true, - // and then later if our guess was incorrect, we emit a compile error. - union_type.flagsPtr(ip).assumed_runtime_bits = true; - return true; - } - }, - .safety, .tagged => { - const tag_ty = union_type.tagTypePtr(ip).*; - // tag_ty will be `none` if this union's tag type is not resolved yet, - // in which case we want control flow to continue down below. - if (tag_ty != .none and - try Type.fromInterned(tag_ty).hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat)) - { - return true; - } - }, - } - switch (strat) { - .sema => |sema| _ = try sema.resolveTypeFields(ty), - .eager => assert(union_type.flagsPtr(ip).status.haveFieldTypes()), - .lazy => if (!union_type.flagsPtr(ip).status.haveFieldTypes()) - return error.NeedLazy, - } - for (0..union_type.field_types.len) |field_index| { - const field_ty = Type.fromInterned(union_type.field_types.get(ip)[field_index]); - if (try field_ty.hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat)) - return true; - } else { - return false; - } - }, - - .opaque_type => true, - .enum_type => Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty).hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat), - - // values, not types - .undef, - .simple_value, - .variable, - .extern_func, - .func, - .int, - .err, - .error_union, - .enum_literal, - .enum_tag, - .empty_enum_value, - .float, - .ptr, - .slice, - .opt, - .aggregate, - .un, - // memoization, not types - .memoized_call, - => unreachable, - }, - }; - } - - /// true if and only if the type has a well-defined memory layout - /// readFrom/writeToMemory are supported only for types with a well- - /// defined memory layout - pub fn hasWellDefinedLayout(ty: Type, mod: *Module) bool { - const ip = &mod.intern_pool; - return switch (ip.indexToKey(ty.toIntern())) { - .int_type, - .vector_type, - => true, - - .error_union_type, - .error_set_type, - .inferred_error_set_type, - .anon_struct_type, - .opaque_type, - .anyframe_type, - // These are function bodies, not function pointers. - .func_type, - => false, - - .array_type => |array_type| Type.fromInterned(array_type.child).hasWellDefinedLayout(mod), - .opt_type => ty.isPtrLikeOptional(mod), - .ptr_type => |ptr_type| ptr_type.flags.size != .Slice, - - .simple_type => |t| switch (t) { - .f16, - .f32, - .f64, - .f80, - .f128, - .usize, - .isize, - .c_char, - .c_short, - .c_ushort, - .c_int, - .c_uint, - .c_long, - .c_ulong, - .c_longlong, - .c_ulonglong, - .c_longdouble, - .bool, - .void, - => true, - - .anyerror, - .adhoc_inferred_error_set, - .anyopaque, - .atomic_order, - .atomic_rmw_op, - .calling_convention, - .address_space, - .float_mode, - .reduce_op, - .call_modifier, - .prefetch_options, - .export_options, - .extern_options, - .type, - .comptime_int, - .comptime_float, - .noreturn, - .null, - .undefined, - .enum_literal, - .type_info, - .generic_poison, - => false, - }, - .struct_type => { - const struct_type = ip.loadStructType(ty.toIntern()); - // Struct with no fields have a well-defined layout of no bits. - return struct_type.layout != .auto or struct_type.field_types.len == 0; - }, - .union_type => { - const union_type = ip.loadUnionType(ty.toIntern()); - return switch (union_type.flagsPtr(ip).runtime_tag) { - .none, .safety => union_type.flagsPtr(ip).layout != .auto, - .tagged => false, - }; - }, - .enum_type => switch (ip.loadEnumType(ty.toIntern()).tag_mode) { - .auto => false, - .explicit, .nonexhaustive => true, - }, - - // values, not types - .undef, - .simple_value, - .variable, - .extern_func, - .func, - .int, - .err, - .error_union, - .enum_literal, - .enum_tag, - .empty_enum_value, - .float, - .ptr, - .slice, - .opt, - .aggregate, - .un, - // memoization, not types - .memoized_call, - => unreachable, - }; - } - - pub fn hasRuntimeBits(ty: Type, mod: *Module) bool { - return hasRuntimeBitsAdvanced(ty, mod, false, .eager) catch unreachable; - } - - pub fn hasRuntimeBitsIgnoreComptime(ty: Type, mod: *Module) bool { - return hasRuntimeBitsAdvanced(ty, mod, true, .eager) catch unreachable; - } - - pub fn fnHasRuntimeBits(ty: Type, mod: *Module) bool { - return ty.fnHasRuntimeBitsAdvanced(mod, null) catch unreachable; - } - - /// Determines whether a function type has runtime bits, i.e. whether a - /// function with this type can exist at runtime. - /// Asserts that `ty` is a function type. - /// If `opt_sema` is not provided, asserts that the return type is sufficiently resolved. - pub fn fnHasRuntimeBitsAdvanced(ty: Type, mod: *Module, opt_sema: ?*Sema) Module.CompileError!bool { - const fn_info = mod.typeToFunc(ty).?; - if (fn_info.is_generic) return false; - if (fn_info.is_var_args) return true; - if (fn_info.cc == .Inline) return false; - return !try Type.fromInterned(fn_info.return_type).comptimeOnlyAdvanced(mod, opt_sema); - } - - pub fn isFnOrHasRuntimeBits(ty: Type, mod: *Module) bool { - switch (ty.zigTypeTag(mod)) { - .Fn => return ty.fnHasRuntimeBits(mod), - else => return ty.hasRuntimeBits(mod), - } - } - - /// Same as `isFnOrHasRuntimeBits` but comptime-only types may return a false positive. - pub fn isFnOrHasRuntimeBitsIgnoreComptime(ty: Type, mod: *Module) bool { - return switch (ty.zigTypeTag(mod)) { - .Fn => true, - else => return ty.hasRuntimeBitsIgnoreComptime(mod), - }; - } - - pub fn isNoReturn(ty: Type, mod: *Module) bool { - return mod.intern_pool.isNoReturn(ty.toIntern()); - } - - /// Returns `none` if the pointer is naturally aligned and the element type is 0-bit. - pub fn ptrAlignment(ty: Type, mod: *Module) Alignment { - return ptrAlignmentAdvanced(ty, mod, null) catch unreachable; - } - - pub fn ptrAlignmentAdvanced(ty: Type, mod: *Module, opt_sema: ?*Sema) !Alignment { - return switch (mod.intern_pool.indexToKey(ty.toIntern())) { - .ptr_type => |ptr_type| { - if (ptr_type.flags.alignment != .none) - return ptr_type.flags.alignment; - - if (opt_sema) |sema| { - const res = try Type.fromInterned(ptr_type.child).abiAlignmentAdvanced(mod, .{ .sema = sema }); - return res.scalar; - } - - return (Type.fromInterned(ptr_type.child).abiAlignmentAdvanced(mod, .eager) catch unreachable).scalar; - }, - .opt_type => |child| Type.fromInterned(child).ptrAlignmentAdvanced(mod, opt_sema), - else => unreachable, - }; - } - - pub fn ptrAddressSpace(ty: Type, mod: *const Module) std.builtin.AddressSpace { - return switch (mod.intern_pool.indexToKey(ty.toIntern())) { - .ptr_type => |ptr_type| ptr_type.flags.address_space, - .opt_type => |child| mod.intern_pool.indexToKey(child).ptr_type.flags.address_space, - else => unreachable, - }; - } - - /// Never returns `none`. Asserts that all necessary type resolution is already done. - pub fn abiAlignment(ty: Type, mod: *Module) Alignment { - return (ty.abiAlignmentAdvanced(mod, .eager) catch unreachable).scalar; - } - - /// May capture a reference to `ty`. - /// Returned value has type `comptime_int`. - pub fn lazyAbiAlignment(ty: Type, mod: *Module) !Value { - switch (try ty.abiAlignmentAdvanced(mod, .lazy)) { - .val => |val| return val, - .scalar => |x| return mod.intValue(Type.comptime_int, x.toByteUnits() orelse 0), - } - } - - pub const AbiAlignmentAdvanced = union(enum) { - scalar: Alignment, - val: Value, - }; - - pub const AbiAlignmentAdvancedStrat = union(enum) { - eager, - lazy, - sema: *Sema, - }; - - /// If you pass `eager` you will get back `scalar` and assert the type is resolved. - /// In this case there will be no error, guaranteed. - /// If you pass `lazy` you may get back `scalar` or `val`. - /// If `val` is returned, a reference to `ty` has been captured. - /// If you pass `sema` you will get back `scalar` and resolve the type if - /// necessary, possibly returning a CompileError. - pub fn abiAlignmentAdvanced( - ty: Type, - mod: *Module, - strat: AbiAlignmentAdvancedStrat, - ) Module.CompileError!AbiAlignmentAdvanced { - const target = mod.getTarget(); - const use_llvm = mod.comp.config.use_llvm; - const ip = &mod.intern_pool; - - const opt_sema = switch (strat) { - .sema => |sema| sema, - else => null, - }; - - switch (ty.toIntern()) { - .empty_struct_type => return AbiAlignmentAdvanced{ .scalar = .@"1" }, - else => switch (ip.indexToKey(ty.toIntern())) { - .int_type => |int_type| { - if (int_type.bits == 0) return AbiAlignmentAdvanced{ .scalar = .@"1" }; - return .{ .scalar = intAbiAlignment(int_type.bits, target, use_llvm) }; - }, - .ptr_type, .anyframe_type => { - return .{ .scalar = ptrAbiAlignment(target) }; - }, - .array_type => |array_type| { - return Type.fromInterned(array_type.child).abiAlignmentAdvanced(mod, strat); - }, - .vector_type => |vector_type| { - if (vector_type.len == 0) return .{ .scalar = .@"1" }; - switch (mod.comp.getZigBackend()) { - else => { - const elem_bits: u32 = @intCast(try Type.fromInterned(vector_type.child).bitSizeAdvanced(mod, opt_sema)); - if (elem_bits == 0) return .{ .scalar = .@"1" }; - const bytes = ((elem_bits * vector_type.len) + 7) / 8; - const alignment = std.math.ceilPowerOfTwoAssert(u32, bytes); - return .{ .scalar = Alignment.fromByteUnits(alignment) }; - }, - .stage2_c => { - return Type.fromInterned(vector_type.child).abiAlignmentAdvanced(mod, strat); - }, - .stage2_x86_64 => { - if (vector_type.child == .bool_type) { - if (vector_type.len > 256 and std.Target.x86.featureSetHas(target.cpu.features, .avx512f)) return .{ .scalar = .@"64" }; - if (vector_type.len > 128 and std.Target.x86.featureSetHas(target.cpu.features, .avx2)) return .{ .scalar = .@"32" }; - if (vector_type.len > 64) return .{ .scalar = .@"16" }; - const bytes = std.math.divCeil(u32, vector_type.len, 8) catch unreachable; - const alignment = std.math.ceilPowerOfTwoAssert(u32, bytes); - return .{ .scalar = Alignment.fromByteUnits(alignment) }; - } - const elem_bytes: u32 = @intCast((try Type.fromInterned(vector_type.child).abiSizeAdvanced(mod, strat)).scalar); - if (elem_bytes == 0) return .{ .scalar = .@"1" }; - const bytes = elem_bytes * vector_type.len; - if (bytes > 32 and std.Target.x86.featureSetHas(target.cpu.features, .avx512f)) return .{ .scalar = .@"64" }; - if (bytes > 16 and std.Target.x86.featureSetHas(target.cpu.features, .avx)) return .{ .scalar = .@"32" }; - return .{ .scalar = .@"16" }; - }, - } - }, - - .opt_type => return abiAlignmentAdvancedOptional(ty, mod, strat), - .error_union_type => |info| return abiAlignmentAdvancedErrorUnion(ty, mod, strat, Type.fromInterned(info.payload_type)), - - .error_set_type, .inferred_error_set_type => { - const bits = mod.errorSetBits(); - if (bits == 0) return AbiAlignmentAdvanced{ .scalar = .@"1" }; - return .{ .scalar = intAbiAlignment(bits, target, use_llvm) }; - }, - - // represents machine code; not a pointer - .func_type => return .{ .scalar = target_util.defaultFunctionAlignment(target) }, - - .simple_type => |t| switch (t) { - .bool, - .atomic_order, - .atomic_rmw_op, - .calling_convention, - .address_space, - .float_mode, - .reduce_op, - .call_modifier, - .prefetch_options, - .anyopaque, - => return .{ .scalar = .@"1" }, - - .usize, - .isize, - => return .{ .scalar = intAbiAlignment(target.ptrBitWidth(), target, use_llvm) }, - - .export_options, - .extern_options, - .type_info, - => return .{ .scalar = ptrAbiAlignment(target) }, - - .c_char => return .{ .scalar = cTypeAlign(target, .char) }, - .c_short => return .{ .scalar = cTypeAlign(target, .short) }, - .c_ushort => return .{ .scalar = cTypeAlign(target, .ushort) }, - .c_int => return .{ .scalar = cTypeAlign(target, .int) }, - .c_uint => return .{ .scalar = cTypeAlign(target, .uint) }, - .c_long => return .{ .scalar = cTypeAlign(target, .long) }, - .c_ulong => return .{ .scalar = cTypeAlign(target, .ulong) }, - .c_longlong => return .{ .scalar = cTypeAlign(target, .longlong) }, - .c_ulonglong => return .{ .scalar = cTypeAlign(target, .ulonglong) }, - .c_longdouble => return .{ .scalar = cTypeAlign(target, .longdouble) }, - - .f16 => return .{ .scalar = .@"2" }, - .f32 => return .{ .scalar = cTypeAlign(target, .float) }, - .f64 => switch (target.c_type_bit_size(.double)) { - 64 => return .{ .scalar = cTypeAlign(target, .double) }, - else => return .{ .scalar = .@"8" }, - }, - .f80 => switch (target.c_type_bit_size(.longdouble)) { - 80 => return .{ .scalar = cTypeAlign(target, .longdouble) }, - else => { - const u80_ty: Type = .{ .ip_index = .u80_type }; - return .{ .scalar = abiAlignment(u80_ty, mod) }; - }, - }, - .f128 => switch (target.c_type_bit_size(.longdouble)) { - 128 => return .{ .scalar = cTypeAlign(target, .longdouble) }, - else => return .{ .scalar = .@"16" }, - }, - - .anyerror, .adhoc_inferred_error_set => { - const bits = mod.errorSetBits(); - if (bits == 0) return AbiAlignmentAdvanced{ .scalar = .@"1" }; - return .{ .scalar = intAbiAlignment(bits, target, use_llvm) }; - }, - - .void, - .type, - .comptime_int, - .comptime_float, - .null, - .undefined, - .enum_literal, - => return .{ .scalar = .@"1" }, - - .noreturn => unreachable, - .generic_poison => unreachable, - }, - .struct_type => { - const struct_type = ip.loadStructType(ty.toIntern()); - if (struct_type.layout == .@"packed") { - switch (strat) { - .sema => |sema| try sema.resolveTypeLayout(ty), - .lazy => if (struct_type.backingIntType(ip).* == .none) return .{ - .val = Value.fromInterned((try mod.intern(.{ .int = .{ - .ty = .comptime_int_type, - .storage = .{ .lazy_align = ty.toIntern() }, - } }))), - }, - .eager => {}, - } - return .{ .scalar = Type.fromInterned(struct_type.backingIntType(ip).*).abiAlignment(mod) }; - } - - const flags = struct_type.flagsPtr(ip).*; - if (flags.alignment != .none) return .{ .scalar = flags.alignment }; - - return switch (strat) { - .eager => unreachable, // struct alignment not resolved - .sema => |sema| .{ - .scalar = try sema.resolveStructAlignment(ty.toIntern(), struct_type), - }, - .lazy => .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{ - .ty = .comptime_int_type, - .storage = .{ .lazy_align = ty.toIntern() }, - } }))) }, - }; - }, - .anon_struct_type => |tuple| { - var big_align: Alignment = .@"1"; - for (tuple.types.get(ip), tuple.values.get(ip)) |field_ty, val| { - if (val != .none) continue; // comptime field - switch (try Type.fromInterned(field_ty).abiAlignmentAdvanced(mod, strat)) { - .scalar => |field_align| big_align = big_align.max(field_align), - .val => switch (strat) { - .eager => unreachable, // field type alignment not resolved - .sema => unreachable, // passed to abiAlignmentAdvanced above - .lazy => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{ - .ty = .comptime_int_type, - .storage = .{ .lazy_align = ty.toIntern() }, - } }))) }, - }, - } - } - return .{ .scalar = big_align }; - }, - .union_type => { - const union_type = ip.loadUnionType(ty.toIntern()); - const flags = union_type.flagsPtr(ip).*; - if (flags.alignment != .none) return .{ .scalar = flags.alignment }; - - if (!union_type.haveLayout(ip)) switch (strat) { - .eager => unreachable, // union layout not resolved - .sema => |sema| return .{ .scalar = try sema.resolveUnionAlignment(ty, union_type) }, - .lazy => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{ - .ty = .comptime_int_type, - .storage = .{ .lazy_align = ty.toIntern() }, - } }))) }, - }; - - return .{ .scalar = union_type.flagsPtr(ip).alignment }; - }, - .opaque_type => return .{ .scalar = .@"1" }, - .enum_type => return .{ - .scalar = Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty).abiAlignment(mod), - }, - - // values, not types - .undef, - .simple_value, - .variable, - .extern_func, - .func, - .int, - .err, - .error_union, - .enum_literal, - .enum_tag, - .empty_enum_value, - .float, - .ptr, - .slice, - .opt, - .aggregate, - .un, - // memoization, not types - .memoized_call, - => unreachable, - }, - } - } - - fn abiAlignmentAdvancedErrorUnion( - ty: Type, - mod: *Module, - strat: AbiAlignmentAdvancedStrat, - payload_ty: Type, - ) Module.CompileError!AbiAlignmentAdvanced { - // This code needs to be kept in sync with the equivalent switch prong - // in abiSizeAdvanced. - const code_align = abiAlignment(Type.anyerror, mod); - switch (strat) { - .eager, .sema => { - if (!(payload_ty.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) { - error.NeedLazy => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{ - .ty = .comptime_int_type, - .storage = .{ .lazy_align = ty.toIntern() }, - } }))) }, - else => |e| return e, - })) { - return .{ .scalar = code_align }; - } - return .{ .scalar = code_align.max( - (try payload_ty.abiAlignmentAdvanced(mod, strat)).scalar, - ) }; - }, - .lazy => { - switch (try payload_ty.abiAlignmentAdvanced(mod, strat)) { - .scalar => |payload_align| return .{ .scalar = code_align.max(payload_align) }, - .val => {}, - } - return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{ - .ty = .comptime_int_type, - .storage = .{ .lazy_align = ty.toIntern() }, - } }))) }; - }, - } - } - - fn abiAlignmentAdvancedOptional( - ty: Type, - mod: *Module, - strat: AbiAlignmentAdvancedStrat, - ) Module.CompileError!AbiAlignmentAdvanced { - const target = mod.getTarget(); - const child_type = ty.optionalChild(mod); - - switch (child_type.zigTypeTag(mod)) { - .Pointer => return .{ .scalar = ptrAbiAlignment(target) }, - .ErrorSet => return abiAlignmentAdvanced(Type.anyerror, mod, strat), - .NoReturn => return .{ .scalar = .@"1" }, - else => {}, - } - - switch (strat) { - .eager, .sema => { - if (!(child_type.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) { - error.NeedLazy => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{ - .ty = .comptime_int_type, - .storage = .{ .lazy_align = ty.toIntern() }, - } }))) }, - else => |e| return e, - })) { - return .{ .scalar = .@"1" }; - } - return child_type.abiAlignmentAdvanced(mod, strat); - }, - .lazy => switch (try child_type.abiAlignmentAdvanced(mod, strat)) { - .scalar => |x| return .{ .scalar = x.max(.@"1") }, - .val => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{ - .ty = .comptime_int_type, - .storage = .{ .lazy_align = ty.toIntern() }, - } }))) }, - }, - } - } - - /// May capture a reference to `ty`. - pub fn lazyAbiSize(ty: Type, mod: *Module) !Value { - switch (try ty.abiSizeAdvanced(mod, .lazy)) { - .val => |val| return val, - .scalar => |x| return mod.intValue(Type.comptime_int, x), - } - } - - /// Asserts the type has the ABI size already resolved. - /// Types that return false for hasRuntimeBits() return 0. - pub fn abiSize(ty: Type, mod: *Module) u64 { - return (abiSizeAdvanced(ty, mod, .eager) catch unreachable).scalar; - } - - const AbiSizeAdvanced = union(enum) { - scalar: u64, - val: Value, - }; - - /// If you pass `eager` you will get back `scalar` and assert the type is resolved. - /// In this case there will be no error, guaranteed. - /// If you pass `lazy` you may get back `scalar` or `val`. - /// If `val` is returned, a reference to `ty` has been captured. - /// If you pass `sema` you will get back `scalar` and resolve the type if - /// necessary, possibly returning a CompileError. - pub fn abiSizeAdvanced( - ty: Type, - mod: *Module, - strat: AbiAlignmentAdvancedStrat, - ) Module.CompileError!AbiSizeAdvanced { - const target = mod.getTarget(); - const use_llvm = mod.comp.config.use_llvm; - const ip = &mod.intern_pool; - - switch (ty.toIntern()) { - .empty_struct_type => return AbiSizeAdvanced{ .scalar = 0 }, - - else => switch (ip.indexToKey(ty.toIntern())) { - .int_type => |int_type| { - if (int_type.bits == 0) return AbiSizeAdvanced{ .scalar = 0 }; - return AbiSizeAdvanced{ .scalar = intAbiSize(int_type.bits, target, use_llvm) }; - }, - .ptr_type => |ptr_type| switch (ptr_type.flags.size) { - .Slice => return .{ .scalar = @divExact(target.ptrBitWidth(), 8) * 2 }, - else => return .{ .scalar = @divExact(target.ptrBitWidth(), 8) }, - }, - .anyframe_type => return AbiSizeAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) }, - - .array_type => |array_type| { - const len = array_type.lenIncludingSentinel(); - if (len == 0) return .{ .scalar = 0 }; - switch (try Type.fromInterned(array_type.child).abiSizeAdvanced(mod, strat)) { - .scalar => |elem_size| return .{ .scalar = len * elem_size }, - .val => switch (strat) { - .sema, .eager => unreachable, - .lazy => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{ - .ty = .comptime_int_type, - .storage = .{ .lazy_size = ty.toIntern() }, - } }))) }, - }, - } - }, - .vector_type => |vector_type| { - const opt_sema = switch (strat) { - .sema => |sema| sema, - .eager => null, - .lazy => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{ - .ty = .comptime_int_type, - .storage = .{ .lazy_size = ty.toIntern() }, - } }))) }, - }; - const alignment = switch (try ty.abiAlignmentAdvanced(mod, strat)) { - .scalar => |x| x, - .val => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{ - .ty = .comptime_int_type, - .storage = .{ .lazy_size = ty.toIntern() }, - } }))) }, - }; - const total_bytes = switch (mod.comp.getZigBackend()) { - else => total_bytes: { - const elem_bits = try Type.fromInterned(vector_type.child).bitSizeAdvanced(mod, opt_sema); - const total_bits = elem_bits * vector_type.len; - break :total_bytes (total_bits + 7) / 8; - }, - .stage2_c => total_bytes: { - const elem_bytes: u32 = @intCast((try Type.fromInterned(vector_type.child).abiSizeAdvanced(mod, strat)).scalar); - break :total_bytes elem_bytes * vector_type.len; - }, - .stage2_x86_64 => total_bytes: { - if (vector_type.child == .bool_type) break :total_bytes std.math.divCeil(u32, vector_type.len, 8) catch unreachable; - const elem_bytes: u32 = @intCast((try Type.fromInterned(vector_type.child).abiSizeAdvanced(mod, strat)).scalar); - break :total_bytes elem_bytes * vector_type.len; - }, - }; - return AbiSizeAdvanced{ .scalar = alignment.forward(total_bytes) }; - }, - - .opt_type => return ty.abiSizeAdvancedOptional(mod, strat), - - .error_set_type, .inferred_error_set_type => { - const bits = mod.errorSetBits(); - if (bits == 0) return AbiSizeAdvanced{ .scalar = 0 }; - return AbiSizeAdvanced{ .scalar = intAbiSize(bits, target, use_llvm) }; - }, - - .error_union_type => |error_union_type| { - const payload_ty = Type.fromInterned(error_union_type.payload_type); - // This code needs to be kept in sync with the equivalent switch prong - // in abiAlignmentAdvanced. - const code_size = abiSize(Type.anyerror, mod); - if (!(payload_ty.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) { - error.NeedLazy => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{ - .ty = .comptime_int_type, - .storage = .{ .lazy_size = ty.toIntern() }, - } }))) }, - else => |e| return e, - })) { - // Same as anyerror. - return AbiSizeAdvanced{ .scalar = code_size }; - } - const code_align = abiAlignment(Type.anyerror, mod); - const payload_align = abiAlignment(payload_ty, mod); - const payload_size = switch (try payload_ty.abiSizeAdvanced(mod, strat)) { - .scalar => |elem_size| elem_size, - .val => switch (strat) { - .sema => unreachable, - .eager => unreachable, - .lazy => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{ - .ty = .comptime_int_type, - .storage = .{ .lazy_size = ty.toIntern() }, - } }))) }, - }, - }; - - var size: u64 = 0; - if (code_align.compare(.gt, payload_align)) { - size += code_size; - size = payload_align.forward(size); - size += payload_size; - size = code_align.forward(size); - } else { - size += payload_size; - size = code_align.forward(size); - size += code_size; - size = payload_align.forward(size); - } - return AbiSizeAdvanced{ .scalar = size }; - }, - .func_type => unreachable, // represents machine code; not a pointer - .simple_type => |t| switch (t) { - .bool, - .atomic_order, - .atomic_rmw_op, - .calling_convention, - .address_space, - .float_mode, - .reduce_op, - .call_modifier, - => return AbiSizeAdvanced{ .scalar = 1 }, - - .f16 => return AbiSizeAdvanced{ .scalar = 2 }, - .f32 => return AbiSizeAdvanced{ .scalar = 4 }, - .f64 => return AbiSizeAdvanced{ .scalar = 8 }, - .f128 => return AbiSizeAdvanced{ .scalar = 16 }, - .f80 => switch (target.c_type_bit_size(.longdouble)) { - 80 => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.longdouble) }, - else => { - const u80_ty: Type = .{ .ip_index = .u80_type }; - return AbiSizeAdvanced{ .scalar = abiSize(u80_ty, mod) }; - }, - }, - - .usize, - .isize, - => return AbiSizeAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) }, - - .c_char => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.char) }, - .c_short => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.short) }, - .c_ushort => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.ushort) }, - .c_int => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.int) }, - .c_uint => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.uint) }, - .c_long => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.long) }, - .c_ulong => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.ulong) }, - .c_longlong => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.longlong) }, - .c_ulonglong => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.ulonglong) }, - .c_longdouble => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.longdouble) }, - - .anyopaque, - .void, - .type, - .comptime_int, - .comptime_float, - .null, - .undefined, - .enum_literal, - => return AbiSizeAdvanced{ .scalar = 0 }, - - .anyerror, .adhoc_inferred_error_set => { - const bits = mod.errorSetBits(); - if (bits == 0) return AbiSizeAdvanced{ .scalar = 0 }; - return AbiSizeAdvanced{ .scalar = intAbiSize(bits, target, use_llvm) }; - }, - - .prefetch_options => unreachable, // missing call to resolveTypeFields - .export_options => unreachable, // missing call to resolveTypeFields - .extern_options => unreachable, // missing call to resolveTypeFields - - .type_info => unreachable, - .noreturn => unreachable, - .generic_poison => unreachable, - }, - .struct_type => { - const struct_type = ip.loadStructType(ty.toIntern()); - switch (strat) { - .sema => |sema| try sema.resolveTypeLayout(ty), - .lazy => switch (struct_type.layout) { - .@"packed" => { - if (struct_type.backingIntType(ip).* == .none) return .{ - .val = Value.fromInterned((try mod.intern(.{ .int = .{ - .ty = .comptime_int_type, - .storage = .{ .lazy_size = ty.toIntern() }, - } }))), - }; - }, - .auto, .@"extern" => { - if (!struct_type.haveLayout(ip)) return .{ - .val = Value.fromInterned((try mod.intern(.{ .int = .{ - .ty = .comptime_int_type, - .storage = .{ .lazy_size = ty.toIntern() }, - } }))), - }; - }, - }, - .eager => {}, - } - switch (struct_type.layout) { - .@"packed" => return .{ - .scalar = Type.fromInterned(struct_type.backingIntType(ip).*).abiSize(mod), - }, - .auto, .@"extern" => { - assert(struct_type.haveLayout(ip)); - return .{ .scalar = struct_type.size(ip).* }; - }, - } - }, - .anon_struct_type => |tuple| { - switch (strat) { - .sema => |sema| try sema.resolveTypeLayout(ty), - .lazy, .eager => {}, - } - const field_count = tuple.types.len; - if (field_count == 0) { - return AbiSizeAdvanced{ .scalar = 0 }; - } - return AbiSizeAdvanced{ .scalar = ty.structFieldOffset(field_count, mod) }; - }, - - .union_type => { - const union_type = ip.loadUnionType(ty.toIntern()); - switch (strat) { - .sema => |sema| try sema.resolveTypeLayout(ty), - .lazy => if (!union_type.flagsPtr(ip).status.haveLayout()) return .{ - .val = Value.fromInterned((try mod.intern(.{ .int = .{ - .ty = .comptime_int_type, - .storage = .{ .lazy_size = ty.toIntern() }, - } }))), - }, - .eager => {}, - } - - assert(union_type.haveLayout(ip)); - return .{ .scalar = union_type.size(ip).* }; - }, - .opaque_type => unreachable, // no size available - .enum_type => return .{ .scalar = Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty).abiSize(mod) }, - - // values, not types - .undef, - .simple_value, - .variable, - .extern_func, - .func, - .int, - .err, - .error_union, - .enum_literal, - .enum_tag, - .empty_enum_value, - .float, - .ptr, - .slice, - .opt, - .aggregate, - .un, - // memoization, not types - .memoized_call, - => unreachable, - }, - } - } - - fn abiSizeAdvancedOptional( - ty: Type, - mod: *Module, - strat: AbiAlignmentAdvancedStrat, - ) Module.CompileError!AbiSizeAdvanced { - const child_ty = ty.optionalChild(mod); - - if (child_ty.isNoReturn(mod)) { - return AbiSizeAdvanced{ .scalar = 0 }; - } - - if (!(child_ty.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) { - error.NeedLazy => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{ - .ty = .comptime_int_type, - .storage = .{ .lazy_size = ty.toIntern() }, - } }))) }, - else => |e| return e, - })) return AbiSizeAdvanced{ .scalar = 1 }; - - if (ty.optionalReprIsPayload(mod)) { - return abiSizeAdvanced(child_ty, mod, strat); - } - - const payload_size = switch (try child_ty.abiSizeAdvanced(mod, strat)) { - .scalar => |elem_size| elem_size, - .val => switch (strat) { - .sema => unreachable, - .eager => unreachable, - .lazy => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{ - .ty = .comptime_int_type, - .storage = .{ .lazy_size = ty.toIntern() }, - } }))) }, - }, - }; - - // Optional types are represented as a struct with the child type as the first - // field and a boolean as the second. Since the child type's abi alignment is - // guaranteed to be >= that of bool's (1 byte) the added size is exactly equal - // to the child type's ABI alignment. - return AbiSizeAdvanced{ - .scalar = (child_ty.abiAlignment(mod).toByteUnits() orelse 0) + payload_size, - }; - } - - pub fn ptrAbiAlignment(target: Target) Alignment { - return Alignment.fromNonzeroByteUnits(@divExact(target.ptrBitWidth(), 8)); - } - - pub fn intAbiSize(bits: u16, target: Target, use_llvm: bool) u64 { - return intAbiAlignment(bits, target, use_llvm).forward(@as(u16, @intCast((@as(u17, bits) + 7) / 8))); - } - - pub fn intAbiAlignment(bits: u16, target: Target, use_llvm: bool) Alignment { - return switch (target.cpu.arch) { - .x86 => switch (bits) { - 0 => .none, - 1...8 => .@"1", - 9...16 => .@"2", - 17...64 => .@"4", - else => .@"16", - }, - .x86_64 => switch (bits) { - 0 => .none, - 1...8 => .@"1", - 9...16 => .@"2", - 17...32 => .@"4", - 33...64 => .@"8", - else => switch (target_util.zigBackend(target, use_llvm)) { - .stage2_x86_64 => .@"8", - else => .@"16", - }, - }, - else => return Alignment.fromByteUnits(@min( - std.math.ceilPowerOfTwoPromote(u16, @as(u16, @intCast((@as(u17, bits) + 7) / 8))), - maxIntAlignment(target, use_llvm), - )), - }; - } - - pub fn maxIntAlignment(target: std.Target, use_llvm: bool) u16 { - return switch (target.cpu.arch) { - .avr => 1, - .msp430 => 2, - .xcore => 4, - - .arm, - .armeb, - .thumb, - .thumbeb, - .hexagon, - .mips, - .mipsel, - .powerpc, - .powerpcle, - .r600, - .amdgcn, - .riscv32, - .sparc, - .sparcel, - .s390x, - .lanai, - .wasm32, - .wasm64, - => 8, - - // For these, LLVMABIAlignmentOfType(i128) reports 8. Note that 16 - // is a relevant number in three cases: - // 1. Different machine code instruction when loading into SIMD register. - // 2. The C ABI wants 16 for extern structs. - // 3. 16-byte cmpxchg needs 16-byte alignment. - // Same logic for powerpc64, mips64, sparc64. - .powerpc64, - .powerpc64le, - .mips64, - .mips64el, - .sparc64, - => switch (target.ofmt) { - .c => 16, - else => 8, - }, - - .x86_64 => switch (target_util.zigBackend(target, use_llvm)) { - .stage2_x86_64 => 8, - else => 16, - }, - - // Even LLVMABIAlignmentOfType(i128) agrees on these targets. - .x86, - .aarch64, - .aarch64_be, - .aarch64_32, - .riscv64, - .bpfel, - .bpfeb, - .nvptx, - .nvptx64, - => 16, - - // Below this comment are unverified but based on the fact that C requires - // int128_t to be 16 bytes aligned, it's a safe default. - .spu_2, - .csky, - .arc, - .m68k, - .tce, - .tcele, - .le32, - .amdil, - .hsail, - .spir, - .kalimba, - .renderscript32, - .spirv, - .spirv32, - .shave, - .le64, - .amdil64, - .hsail64, - .spir64, - .renderscript64, - .ve, - .spirv64, - .dxil, - .loongarch32, - .loongarch64, - .xtensa, - => 16, - }; - } - - pub fn bitSize(ty: Type, mod: *Module) u64 { - return bitSizeAdvanced(ty, mod, null) catch unreachable; - } - - /// If you pass `opt_sema`, any recursive type resolutions will happen if - /// necessary, possibly returning a CompileError. Passing `null` instead asserts - /// the type is fully resolved, and there will be no error, guaranteed. - pub fn bitSizeAdvanced( - ty: Type, - mod: *Module, - opt_sema: ?*Sema, - ) Module.CompileError!u64 { - const target = mod.getTarget(); - const ip = &mod.intern_pool; - - const strat: AbiAlignmentAdvancedStrat = if (opt_sema) |sema| .{ .sema = sema } else .eager; - - switch (ip.indexToKey(ty.toIntern())) { - .int_type => |int_type| return int_type.bits, - .ptr_type => |ptr_type| switch (ptr_type.flags.size) { - .Slice => return target.ptrBitWidth() * 2, - else => return target.ptrBitWidth(), - }, - .anyframe_type => return target.ptrBitWidth(), - - .array_type => |array_type| { - const len = array_type.lenIncludingSentinel(); - if (len == 0) return 0; - const elem_ty = Type.fromInterned(array_type.child); - const elem_size = @max( - (try elem_ty.abiAlignmentAdvanced(mod, strat)).scalar.toByteUnits() orelse 0, - (try elem_ty.abiSizeAdvanced(mod, strat)).scalar, - ); - if (elem_size == 0) return 0; - const elem_bit_size = try bitSizeAdvanced(elem_ty, mod, opt_sema); - return (len - 1) * 8 * elem_size + elem_bit_size; - }, - .vector_type => |vector_type| { - const child_ty = Type.fromInterned(vector_type.child); - const elem_bit_size = try bitSizeAdvanced(child_ty, mod, opt_sema); - return elem_bit_size * vector_type.len; - }, - .opt_type => { - // Optionals and error unions are not packed so their bitsize - // includes padding bits. - return (try abiSizeAdvanced(ty, mod, strat)).scalar * 8; - }, - - .error_set_type, .inferred_error_set_type => return mod.errorSetBits(), - - .error_union_type => { - // Optionals and error unions are not packed so their bitsize - // includes padding bits. - return (try abiSizeAdvanced(ty, mod, strat)).scalar * 8; - }, - .func_type => unreachable, // represents machine code; not a pointer - .simple_type => |t| switch (t) { - .f16 => return 16, - .f32 => return 32, - .f64 => return 64, - .f80 => return 80, - .f128 => return 128, - - .usize, - .isize, - => return target.ptrBitWidth(), - - .c_char => return target.c_type_bit_size(.char), - .c_short => return target.c_type_bit_size(.short), - .c_ushort => return target.c_type_bit_size(.ushort), - .c_int => return target.c_type_bit_size(.int), - .c_uint => return target.c_type_bit_size(.uint), - .c_long => return target.c_type_bit_size(.long), - .c_ulong => return target.c_type_bit_size(.ulong), - .c_longlong => return target.c_type_bit_size(.longlong), - .c_ulonglong => return target.c_type_bit_size(.ulonglong), - .c_longdouble => return target.c_type_bit_size(.longdouble), - - .bool => return 1, - .void => return 0, - - .anyerror, - .adhoc_inferred_error_set, - => return mod.errorSetBits(), - - .anyopaque => unreachable, - .type => unreachable, - .comptime_int => unreachable, - .comptime_float => unreachable, - .noreturn => unreachable, - .null => unreachable, - .undefined => unreachable, - .enum_literal => unreachable, - .generic_poison => unreachable, - - .atomic_order => unreachable, - .atomic_rmw_op => unreachable, - .calling_convention => unreachable, - .address_space => unreachable, - .float_mode => unreachable, - .reduce_op => unreachable, - .call_modifier => unreachable, - .prefetch_options => unreachable, - .export_options => unreachable, - .extern_options => unreachable, - .type_info => unreachable, - }, - .struct_type => { - const struct_type = ip.loadStructType(ty.toIntern()); - const is_packed = struct_type.layout == .@"packed"; - if (opt_sema) |sema| { - try sema.resolveTypeFields(ty); - if (is_packed) try sema.resolveTypeLayout(ty); - } - if (is_packed) { - return try Type.fromInterned(struct_type.backingIntType(ip).*).bitSizeAdvanced(mod, opt_sema); - } - return (try ty.abiSizeAdvanced(mod, strat)).scalar * 8; - }, - - .anon_struct_type => { - if (opt_sema) |sema| try sema.resolveTypeFields(ty); - return (try ty.abiSizeAdvanced(mod, strat)).scalar * 8; - }, - - .union_type => { - const union_type = ip.loadUnionType(ty.toIntern()); - const is_packed = ty.containerLayout(mod) == .@"packed"; - if (opt_sema) |sema| { - try sema.resolveTypeFields(ty); - if (is_packed) try sema.resolveTypeLayout(ty); - } - if (!is_packed) { - return (try ty.abiSizeAdvanced(mod, strat)).scalar * 8; - } - assert(union_type.flagsPtr(ip).status.haveFieldTypes()); - - var size: u64 = 0; - for (0..union_type.field_types.len) |field_index| { - const field_ty = union_type.field_types.get(ip)[field_index]; - size = @max(size, try bitSizeAdvanced(Type.fromInterned(field_ty), mod, opt_sema)); - } - - return size; - }, - .opaque_type => unreachable, - .enum_type => return bitSizeAdvanced(Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty), mod, opt_sema), - - // values, not types - .undef, - .simple_value, - .variable, - .extern_func, - .func, - .int, - .err, - .error_union, - .enum_literal, - .enum_tag, - .empty_enum_value, - .float, - .ptr, - .slice, - .opt, - .aggregate, - .un, - // memoization, not types - .memoized_call, - => unreachable, - } - } - - /// Returns true if the type's layout is already resolved and it is safe - /// to use `abiSize`, `abiAlignment` and `bitSize` on it. - pub fn layoutIsResolved(ty: Type, mod: *Module) bool { - const ip = &mod.intern_pool; - return switch (ip.indexToKey(ty.toIntern())) { - .struct_type => ip.loadStructType(ty.toIntern()).haveLayout(ip), - .union_type => ip.loadUnionType(ty.toIntern()).haveLayout(ip), - .array_type => |array_type| { - if (array_type.lenIncludingSentinel() == 0) return true; - return Type.fromInterned(array_type.child).layoutIsResolved(mod); - }, - .opt_type => |child| Type.fromInterned(child).layoutIsResolved(mod), - .error_union_type => |k| Type.fromInterned(k.payload_type).layoutIsResolved(mod), - else => true, - }; - } - - pub fn isSinglePointer(ty: Type, mod: *const Module) bool { - return switch (mod.intern_pool.indexToKey(ty.toIntern())) { - .ptr_type => |ptr_info| ptr_info.flags.size == .One, - else => false, - }; - } - - /// Asserts `ty` is a pointer. - pub fn ptrSize(ty: Type, mod: *const Module) std.builtin.Type.Pointer.Size { - return ptrSizeOrNull(ty, mod).?; - } - - /// Returns `null` if `ty` is not a pointer. - pub fn ptrSizeOrNull(ty: Type, mod: *const Module) ?std.builtin.Type.Pointer.Size { - return switch (mod.intern_pool.indexToKey(ty.toIntern())) { - .ptr_type => |ptr_info| ptr_info.flags.size, - else => null, - }; - } - - pub fn isSlice(ty: Type, mod: *const Module) bool { - return switch (mod.intern_pool.indexToKey(ty.toIntern())) { - .ptr_type => |ptr_type| ptr_type.flags.size == .Slice, - else => false, - }; - } - - pub fn slicePtrFieldType(ty: Type, mod: *const Module) Type { - return Type.fromInterned(mod.intern_pool.slicePtrType(ty.toIntern())); - } - - pub fn isConstPtr(ty: Type, mod: *const Module) bool { - return switch (mod.intern_pool.indexToKey(ty.toIntern())) { - .ptr_type => |ptr_type| ptr_type.flags.is_const, - else => false, - }; - } - - pub fn isVolatilePtr(ty: Type, mod: *const Module) bool { - return isVolatilePtrIp(ty, &mod.intern_pool); - } - - pub fn isVolatilePtrIp(ty: Type, ip: *const InternPool) bool { - return switch (ip.indexToKey(ty.toIntern())) { - .ptr_type => |ptr_type| ptr_type.flags.is_volatile, - else => false, - }; - } - - pub fn isAllowzeroPtr(ty: Type, mod: *const Module) bool { - return switch (mod.intern_pool.indexToKey(ty.toIntern())) { - .ptr_type => |ptr_type| ptr_type.flags.is_allowzero, - .opt_type => true, - else => false, - }; - } - - pub fn isCPtr(ty: Type, mod: *const Module) bool { - return switch (mod.intern_pool.indexToKey(ty.toIntern())) { - .ptr_type => |ptr_type| ptr_type.flags.size == .C, - else => false, - }; - } - - pub fn isPtrAtRuntime(ty: Type, mod: *const Module) bool { - return switch (mod.intern_pool.indexToKey(ty.toIntern())) { - .ptr_type => |ptr_type| switch (ptr_type.flags.size) { - .Slice => false, - .One, .Many, .C => true, - }, - .opt_type => |child| switch (mod.intern_pool.indexToKey(child)) { - .ptr_type => |p| switch (p.flags.size) { - .Slice, .C => false, - .Many, .One => !p.flags.is_allowzero, - }, - else => false, - }, - else => false, - }; - } - - /// For pointer-like optionals, returns true, otherwise returns the allowzero property - /// of pointers. - pub fn ptrAllowsZero(ty: Type, mod: *const Module) bool { - if (ty.isPtrLikeOptional(mod)) { - return true; - } - return ty.ptrInfo(mod).flags.is_allowzero; - } - - /// See also `isPtrLikeOptional`. - pub fn optionalReprIsPayload(ty: Type, mod: *const Module) bool { - return switch (mod.intern_pool.indexToKey(ty.toIntern())) { - .opt_type => |child_type| child_type == .anyerror_type or switch (mod.intern_pool.indexToKey(child_type)) { - .ptr_type => |ptr_type| ptr_type.flags.size != .C and !ptr_type.flags.is_allowzero, - .error_set_type, .inferred_error_set_type => true, - else => false, - }, - .ptr_type => |ptr_type| ptr_type.flags.size == .C, - else => false, - }; - } - - /// Returns true if the type is optional and would be lowered to a single pointer - /// address value, using 0 for null. Note that this returns true for C pointers. - /// This function must be kept in sync with `Sema.typePtrOrOptionalPtrTy`. - pub fn isPtrLikeOptional(ty: Type, mod: *const Module) bool { - return switch (mod.intern_pool.indexToKey(ty.toIntern())) { - .ptr_type => |ptr_type| ptr_type.flags.size == .C, - .opt_type => |child| switch (mod.intern_pool.indexToKey(child)) { - .ptr_type => |ptr_type| switch (ptr_type.flags.size) { - .Slice, .C => false, - .Many, .One => !ptr_type.flags.is_allowzero, - }, - else => false, - }, - else => false, - }; - } - - /// For *[N]T, returns [N]T. - /// For *T, returns T. - /// For [*]T, returns T. - pub fn childType(ty: Type, mod: *const Module) Type { - return childTypeIp(ty, &mod.intern_pool); - } - - pub fn childTypeIp(ty: Type, ip: *const InternPool) Type { - return Type.fromInterned(ip.childType(ty.toIntern())); - } - - /// For *[N]T, returns T. - /// For ?*T, returns T. - /// For ?*[N]T, returns T. - /// For ?[*]T, returns T. - /// For *T, returns T. - /// For [*]T, returns T. - /// For [N]T, returns T. - /// For []T, returns T. - /// For anyframe->T, returns T. - pub fn elemType2(ty: Type, mod: *const Module) Type { - return switch (mod.intern_pool.indexToKey(ty.toIntern())) { - .ptr_type => |ptr_type| switch (ptr_type.flags.size) { - .One => Type.fromInterned(ptr_type.child).shallowElemType(mod), - .Many, .C, .Slice => Type.fromInterned(ptr_type.child), - }, - .anyframe_type => |child| { - assert(child != .none); - return Type.fromInterned(child); - }, - .vector_type => |vector_type| Type.fromInterned(vector_type.child), - .array_type => |array_type| Type.fromInterned(array_type.child), - .opt_type => |child| Type.fromInterned(mod.intern_pool.childType(child)), - else => unreachable, - }; - } - - fn shallowElemType(child_ty: Type, mod: *const Module) Type { - return switch (child_ty.zigTypeTag(mod)) { - .Array, .Vector => child_ty.childType(mod), - else => child_ty, - }; - } - - /// For vectors, returns the element type. Otherwise returns self. - pub fn scalarType(ty: Type, mod: *Module) Type { - return switch (ty.zigTypeTag(mod)) { - .Vector => ty.childType(mod), - else => ty, - }; - } - - /// Asserts that the type is an optional. - /// Note that for C pointers this returns the type unmodified. - pub fn optionalChild(ty: Type, mod: *const Module) Type { - return switch (mod.intern_pool.indexToKey(ty.toIntern())) { - .opt_type => |child| Type.fromInterned(child), - .ptr_type => |ptr_type| b: { - assert(ptr_type.flags.size == .C); - break :b ty; - }, - else => unreachable, - }; - } - - /// Returns the tag type of a union, if the type is a union and it has a tag type. - /// Otherwise, returns `null`. - pub fn unionTagType(ty: Type, mod: *Module) ?Type { - const ip = &mod.intern_pool; - switch (ip.indexToKey(ty.toIntern())) { - .union_type => {}, - else => return null, - } - const union_type = ip.loadUnionType(ty.toIntern()); - switch (union_type.flagsPtr(ip).runtime_tag) { - .tagged => { - assert(union_type.flagsPtr(ip).status.haveFieldTypes()); - return Type.fromInterned(union_type.enum_tag_ty); - }, - else => return null, - } - } - - /// Same as `unionTagType` but includes safety tag. - /// Codegen should use this version. - pub fn unionTagTypeSafety(ty: Type, mod: *Module) ?Type { - const ip = &mod.intern_pool; - return switch (ip.indexToKey(ty.toIntern())) { - .union_type => { - const union_type = ip.loadUnionType(ty.toIntern()); - if (!union_type.hasTag(ip)) return null; - assert(union_type.haveFieldTypes(ip)); - return Type.fromInterned(union_type.enum_tag_ty); - }, - else => null, - }; - } - - /// Asserts the type is a union; returns the tag type, even if the tag will - /// not be stored at runtime. - pub fn unionTagTypeHypothetical(ty: Type, mod: *Module) Type { - const union_obj = mod.typeToUnion(ty).?; - return Type.fromInterned(union_obj.enum_tag_ty); - } - - pub fn unionFieldType(ty: Type, enum_tag: Value, mod: *Module) ?Type { - const ip = &mod.intern_pool; - const union_obj = mod.typeToUnion(ty).?; - const union_fields = union_obj.field_types.get(ip); - const index = mod.unionTagFieldIndex(union_obj, enum_tag) orelse return null; - return Type.fromInterned(union_fields[index]); - } - - pub fn unionFieldTypeByIndex(ty: Type, index: usize, mod: *Module) Type { - const ip = &mod.intern_pool; - const union_obj = mod.typeToUnion(ty).?; - return Type.fromInterned(union_obj.field_types.get(ip)[index]); - } - - pub fn unionTagFieldIndex(ty: Type, enum_tag: Value, mod: *Module) ?u32 { - const union_obj = mod.typeToUnion(ty).?; - return mod.unionTagFieldIndex(union_obj, enum_tag); - } - - pub fn unionHasAllZeroBitFieldTypes(ty: Type, mod: *Module) bool { - const ip = &mod.intern_pool; - const union_obj = mod.typeToUnion(ty).?; - for (union_obj.field_types.get(ip)) |field_ty| { - if (Type.fromInterned(field_ty).hasRuntimeBits(mod)) return false; - } - return true; - } - - /// Returns the type used for backing storage of this union during comptime operations. - /// Asserts the type is either an extern or packed union. - pub fn unionBackingType(ty: Type, mod: *Module) !Type { - return switch (ty.containerLayout(mod)) { - .@"extern" => try mod.arrayType(.{ .len = ty.abiSize(mod), .child = .u8_type }), - .@"packed" => try mod.intType(.unsigned, @intCast(ty.bitSize(mod))), - .auto => unreachable, - }; - } - - pub fn unionGetLayout(ty: Type, mod: *Module) Module.UnionLayout { - const ip = &mod.intern_pool; - const union_obj = ip.loadUnionType(ty.toIntern()); - return mod.getUnionLayout(union_obj); - } - - pub fn containerLayout(ty: Type, mod: *Module) std.builtin.Type.ContainerLayout { - const ip = &mod.intern_pool; - return switch (ip.indexToKey(ty.toIntern())) { - .struct_type => ip.loadStructType(ty.toIntern()).layout, - .anon_struct_type => .auto, - .union_type => ip.loadUnionType(ty.toIntern()).flagsPtr(ip).layout, - else => unreachable, - }; - } - - /// Asserts that the type is an error union. - pub fn errorUnionPayload(ty: Type, mod: *Module) Type { - return Type.fromInterned(mod.intern_pool.indexToKey(ty.toIntern()).error_union_type.payload_type); - } - - /// Asserts that the type is an error union. - pub fn errorUnionSet(ty: Type, mod: *Module) Type { - return Type.fromInterned(mod.intern_pool.errorUnionSet(ty.toIntern())); - } - - /// Returns false for unresolved inferred error sets. - pub fn errorSetIsEmpty(ty: Type, mod: *Module) bool { - const ip = &mod.intern_pool; - return switch (ty.toIntern()) { - .anyerror_type, .adhoc_inferred_error_set_type => false, - else => switch (ip.indexToKey(ty.toIntern())) { - .error_set_type => |error_set_type| error_set_type.names.len == 0, - .inferred_error_set_type => |i| switch (ip.funcIesResolved(i).*) { - .none, .anyerror_type => false, - else => |t| ip.indexToKey(t).error_set_type.names.len == 0, - }, - else => unreachable, - }, - }; - } - - /// Returns true if it is an error set that includes anyerror, false otherwise. - /// Note that the result may be a false negative if the type did not get error set - /// resolution prior to this call. - pub fn isAnyError(ty: Type, mod: *Module) bool { - const ip = &mod.intern_pool; - return switch (ty.toIntern()) { - .anyerror_type => true, - .adhoc_inferred_error_set_type => false, - else => switch (mod.intern_pool.indexToKey(ty.toIntern())) { - .inferred_error_set_type => |i| ip.funcIesResolved(i).* == .anyerror_type, - else => false, - }, - }; - } - - pub fn isError(ty: Type, mod: *const Module) bool { - return switch (ty.zigTypeTag(mod)) { - .ErrorUnion, .ErrorSet => true, - else => false, - }; - } - - /// Returns whether ty, which must be an error set, includes an error `name`. - /// Might return a false negative if `ty` is an inferred error set and not fully - /// resolved yet. - pub fn errorSetHasFieldIp( - ip: *const InternPool, - ty: InternPool.Index, - name: InternPool.NullTerminatedString, - ) bool { - return switch (ty) { - .anyerror_type => true, - else => switch (ip.indexToKey(ty)) { - .error_set_type => |error_set_type| error_set_type.nameIndex(ip, name) != null, - .inferred_error_set_type => |i| switch (ip.funcIesResolved(i).*) { - .anyerror_type => true, - .none => false, - else => |t| ip.indexToKey(t).error_set_type.nameIndex(ip, name) != null, - }, - else => unreachable, - }, - }; - } - - /// Returns whether ty, which must be an error set, includes an error `name`. - /// Might return a false negative if `ty` is an inferred error set and not fully - /// resolved yet. - pub fn errorSetHasField(ty: Type, name: []const u8, mod: *Module) bool { - const ip = &mod.intern_pool; - return switch (ty.toIntern()) { - .anyerror_type => true, - else => switch (ip.indexToKey(ty.toIntern())) { - .error_set_type => |error_set_type| { - // If the string is not interned, then the field certainly is not present. - const field_name_interned = ip.getString(name).unwrap() orelse return false; - return error_set_type.nameIndex(ip, field_name_interned) != null; - }, - .inferred_error_set_type => |i| switch (ip.funcIesResolved(i).*) { - .anyerror_type => true, - .none => false, - else => |t| { - // If the string is not interned, then the field certainly is not present. - const field_name_interned = ip.getString(name).unwrap() orelse return false; - return ip.indexToKey(t).error_set_type.nameIndex(ip, field_name_interned) != null; - }, - }, - else => unreachable, - }, - }; - } - - /// Asserts the type is an array or vector or struct. - pub fn arrayLen(ty: Type, mod: *const Module) u64 { - return ty.arrayLenIp(&mod.intern_pool); - } - - pub fn arrayLenIp(ty: Type, ip: *const InternPool) u64 { - return ip.aggregateTypeLen(ty.toIntern()); - } - - pub fn arrayLenIncludingSentinel(ty: Type, mod: *const Module) u64 { - return mod.intern_pool.aggregateTypeLenIncludingSentinel(ty.toIntern()); - } - - pub fn vectorLen(ty: Type, mod: *const Module) u32 { - return switch (mod.intern_pool.indexToKey(ty.toIntern())) { - .vector_type => |vector_type| vector_type.len, - .anon_struct_type => |tuple| @intCast(tuple.types.len), - else => unreachable, - }; - } - - /// Asserts the type is an array, pointer or vector. - pub fn sentinel(ty: Type, mod: *const Module) ?Value { - return switch (mod.intern_pool.indexToKey(ty.toIntern())) { - .vector_type, - .struct_type, - .anon_struct_type, - => null, - - .array_type => |t| if (t.sentinel != .none) Value.fromInterned(t.sentinel) else null, - .ptr_type => |t| if (t.sentinel != .none) Value.fromInterned(t.sentinel) else null, - - else => unreachable, - }; - } - - /// Returns true if and only if the type is a fixed-width integer. - pub fn isInt(self: Type, mod: *const Module) bool { - return self.toIntern() != .comptime_int_type and - mod.intern_pool.isIntegerType(self.toIntern()); - } - - /// Returns true if and only if the type is a fixed-width, signed integer. - pub fn isSignedInt(ty: Type, mod: *const Module) bool { - return switch (ty.toIntern()) { - .c_char_type => mod.getTarget().charSignedness() == .signed, - .isize_type, .c_short_type, .c_int_type, .c_long_type, .c_longlong_type => true, - else => switch (mod.intern_pool.indexToKey(ty.toIntern())) { - .int_type => |int_type| int_type.signedness == .signed, - else => false, - }, - }; - } - - /// Returns true if and only if the type is a fixed-width, unsigned integer. - pub fn isUnsignedInt(ty: Type, mod: *const Module) bool { - return switch (ty.toIntern()) { - .c_char_type => mod.getTarget().charSignedness() == .unsigned, - .usize_type, .c_ushort_type, .c_uint_type, .c_ulong_type, .c_ulonglong_type => true, - else => switch (mod.intern_pool.indexToKey(ty.toIntern())) { - .int_type => |int_type| int_type.signedness == .unsigned, - else => false, - }, - }; - } - - /// Returns true for integers, enums, error sets, and packed structs. - /// If this function returns true, then intInfo() can be called on the type. - pub fn isAbiInt(ty: Type, mod: *Module) bool { - return switch (ty.zigTypeTag(mod)) { - .Int, .Enum, .ErrorSet => true, - .Struct => ty.containerLayout(mod) == .@"packed", - else => false, - }; - } - - /// Asserts the type is an integer, enum, error set, or vector of one of them. - pub fn intInfo(starting_ty: Type, mod: *Module) InternPool.Key.IntType { - const ip = &mod.intern_pool; - const target = mod.getTarget(); - var ty = starting_ty; - - while (true) switch (ty.toIntern()) { - .anyerror_type, .adhoc_inferred_error_set_type => { - return .{ .signedness = .unsigned, .bits = mod.errorSetBits() }; - }, - .usize_type => return .{ .signedness = .unsigned, .bits = target.ptrBitWidth() }, - .isize_type => return .{ .signedness = .signed, .bits = target.ptrBitWidth() }, - .c_char_type => return .{ .signedness = mod.getTarget().charSignedness(), .bits = target.c_type_bit_size(.char) }, - .c_short_type => return .{ .signedness = .signed, .bits = target.c_type_bit_size(.short) }, - .c_ushort_type => return .{ .signedness = .unsigned, .bits = target.c_type_bit_size(.ushort) }, - .c_int_type => return .{ .signedness = .signed, .bits = target.c_type_bit_size(.int) }, - .c_uint_type => return .{ .signedness = .unsigned, .bits = target.c_type_bit_size(.uint) }, - .c_long_type => return .{ .signedness = .signed, .bits = target.c_type_bit_size(.long) }, - .c_ulong_type => return .{ .signedness = .unsigned, .bits = target.c_type_bit_size(.ulong) }, - .c_longlong_type => return .{ .signedness = .signed, .bits = target.c_type_bit_size(.longlong) }, - .c_ulonglong_type => return .{ .signedness = .unsigned, .bits = target.c_type_bit_size(.ulonglong) }, - else => switch (ip.indexToKey(ty.toIntern())) { - .int_type => |int_type| return int_type, - .struct_type => ty = Type.fromInterned(ip.loadStructType(ty.toIntern()).backingIntType(ip).*), - .enum_type => ty = Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty), - .vector_type => |vector_type| ty = Type.fromInterned(vector_type.child), - - .error_set_type, .inferred_error_set_type => { - return .{ .signedness = .unsigned, .bits = mod.errorSetBits() }; - }, - - .anon_struct_type => unreachable, - - .ptr_type => unreachable, - .anyframe_type => unreachable, - .array_type => unreachable, - - .opt_type => unreachable, - .error_union_type => unreachable, - .func_type => unreachable, - .simple_type => unreachable, // handled via Index enum tag above - - .union_type => unreachable, - .opaque_type => unreachable, - - // values, not types - .undef, - .simple_value, - .variable, - .extern_func, - .func, - .int, - .err, - .error_union, - .enum_literal, - .enum_tag, - .empty_enum_value, - .float, - .ptr, - .slice, - .opt, - .aggregate, - .un, - // memoization, not types - .memoized_call, - => unreachable, - }, - }; - } - - pub fn isNamedInt(ty: Type) bool { - return switch (ty.toIntern()) { - .usize_type, - .isize_type, - .c_char_type, - .c_short_type, - .c_ushort_type, - .c_int_type, - .c_uint_type, - .c_long_type, - .c_ulong_type, - .c_longlong_type, - .c_ulonglong_type, - => true, - - else => false, - }; - } - - /// Returns `false` for `comptime_float`. - pub fn isRuntimeFloat(ty: Type) bool { - return switch (ty.toIntern()) { - .f16_type, - .f32_type, - .f64_type, - .f80_type, - .f128_type, - .c_longdouble_type, - => true, - - else => false, - }; - } - - /// Returns `true` for `comptime_float`. - pub fn isAnyFloat(ty: Type) bool { - return switch (ty.toIntern()) { - .f16_type, - .f32_type, - .f64_type, - .f80_type, - .f128_type, - .c_longdouble_type, - .comptime_float_type, - => true, - - else => false, - }; - } - - /// Asserts the type is a fixed-size float or comptime_float. - /// Returns 128 for comptime_float types. - pub fn floatBits(ty: Type, target: Target) u16 { - return switch (ty.toIntern()) { - .f16_type => 16, - .f32_type => 32, - .f64_type => 64, - .f80_type => 80, - .f128_type, .comptime_float_type => 128, - .c_longdouble_type => target.c_type_bit_size(.longdouble), - - else => unreachable, - }; - } - - /// Asserts the type is a function or a function pointer. - pub fn fnReturnType(ty: Type, mod: *Module) Type { - return Type.fromInterned(mod.intern_pool.funcTypeReturnType(ty.toIntern())); - } - - /// Asserts the type is a function. - pub fn fnCallingConvention(ty: Type, mod: *Module) std.builtin.CallingConvention { - return mod.intern_pool.indexToKey(ty.toIntern()).func_type.cc; - } - - pub fn isValidParamType(self: Type, mod: *const Module) bool { - return switch (self.zigTypeTagOrPoison(mod) catch return true) { - .Opaque, .NoReturn => false, - else => true, - }; - } - - pub fn isValidReturnType(self: Type, mod: *const Module) bool { - return switch (self.zigTypeTagOrPoison(mod) catch return true) { - .Opaque => false, - else => true, - }; - } - - /// Asserts the type is a function. - pub fn fnIsVarArgs(ty: Type, mod: *Module) bool { - return mod.intern_pool.indexToKey(ty.toIntern()).func_type.is_var_args; - } - - pub fn isNumeric(ty: Type, mod: *const Module) bool { - return switch (ty.toIntern()) { - .f16_type, - .f32_type, - .f64_type, - .f80_type, - .f128_type, - .c_longdouble_type, - .comptime_int_type, - .comptime_float_type, - .usize_type, - .isize_type, - .c_char_type, - .c_short_type, - .c_ushort_type, - .c_int_type, - .c_uint_type, - .c_long_type, - .c_ulong_type, - .c_longlong_type, - .c_ulonglong_type, - => true, - - else => switch (mod.intern_pool.indexToKey(ty.toIntern())) { - .int_type => true, - else => false, - }, - }; - } - - /// During semantic analysis, instead call `Sema.typeHasOnePossibleValue` which - /// resolves field types rather than asserting they are already resolved. - pub fn onePossibleValue(starting_type: Type, mod: *Module) !?Value { - var ty = starting_type; - const ip = &mod.intern_pool; - while (true) switch (ty.toIntern()) { - .empty_struct_type => return Value.empty_struct, - - else => switch (ip.indexToKey(ty.toIntern())) { - .int_type => |int_type| { - if (int_type.bits == 0) { - return try mod.intValue(ty, 0); - } else { - return null; - } - }, - - .ptr_type, - .error_union_type, - .func_type, - .anyframe_type, - .error_set_type, - .inferred_error_set_type, - => return null, - - inline .array_type, .vector_type => |seq_type, seq_tag| { - const has_sentinel = seq_tag == .array_type and seq_type.sentinel != .none; - if (seq_type.len + @intFromBool(has_sentinel) == 0) return Value.fromInterned((try mod.intern(.{ .aggregate = .{ - .ty = ty.toIntern(), - .storage = .{ .elems = &.{} }, - } }))); - if (try Type.fromInterned(seq_type.child).onePossibleValue(mod)) |opv| { - return Value.fromInterned((try mod.intern(.{ .aggregate = .{ - .ty = ty.toIntern(), - .storage = .{ .repeated_elem = opv.toIntern() }, - } }))); - } - return null; - }, - .opt_type => |child| { - if (child == .noreturn_type) { - return try mod.nullValue(ty); - } else { - return null; - } - }, - - .simple_type => |t| switch (t) { - .f16, - .f32, - .f64, - .f80, - .f128, - .usize, - .isize, - .c_char, - .c_short, - .c_ushort, - .c_int, - .c_uint, - .c_long, - .c_ulong, - .c_longlong, - .c_ulonglong, - .c_longdouble, - .anyopaque, - .bool, - .type, - .anyerror, - .comptime_int, - .comptime_float, - .enum_literal, - .atomic_order, - .atomic_rmw_op, - .calling_convention, - .address_space, - .float_mode, - .reduce_op, - .call_modifier, - .prefetch_options, - .export_options, - .extern_options, - .type_info, - .adhoc_inferred_error_set, - => return null, - - .void => return Value.void, - .noreturn => return Value.@"unreachable", - .null => return Value.null, - .undefined => return Value.undef, - - .generic_poison => unreachable, - }, - .struct_type => { - const struct_type = ip.loadStructType(ty.toIntern()); - assert(struct_type.haveFieldTypes(ip)); - if (struct_type.knownNonOpv(ip)) - return null; - const field_vals = try mod.gpa.alloc(InternPool.Index, struct_type.field_types.len); - defer mod.gpa.free(field_vals); - for (field_vals, 0..) |*field_val, i_usize| { - const i: u32 = @intCast(i_usize); - if (struct_type.fieldIsComptime(ip, i)) { - assert(struct_type.haveFieldInits(ip)); - field_val.* = struct_type.field_inits.get(ip)[i]; - continue; - } - const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]); - if (try field_ty.onePossibleValue(mod)) |field_opv| { - field_val.* = field_opv.toIntern(); - } else return null; - } - - // In this case the struct has no runtime-known fields and - // therefore has one possible value. - return Value.fromInterned((try mod.intern(.{ .aggregate = .{ - .ty = ty.toIntern(), - .storage = .{ .elems = field_vals }, - } }))); - }, - - .anon_struct_type => |tuple| { - for (tuple.values.get(ip)) |val| { - if (val == .none) return null; - } - // In this case the struct has all comptime-known fields and - // therefore has one possible value. - // TODO: write something like getCoercedInts to avoid needing to dupe - const duped_values = try mod.gpa.dupe(InternPool.Index, tuple.values.get(ip)); - defer mod.gpa.free(duped_values); - return Value.fromInterned((try mod.intern(.{ .aggregate = .{ - .ty = ty.toIntern(), - .storage = .{ .elems = duped_values }, - } }))); - }, - - .union_type => { - const union_obj = ip.loadUnionType(ty.toIntern()); - const tag_val = (try Type.fromInterned(union_obj.enum_tag_ty).onePossibleValue(mod)) orelse - return null; - if (union_obj.field_types.len == 0) { - const only = try mod.intern(.{ .empty_enum_value = ty.toIntern() }); - return Value.fromInterned(only); - } - const only_field_ty = union_obj.field_types.get(ip)[0]; - const val_val = (try Type.fromInterned(only_field_ty).onePossibleValue(mod)) orelse - return null; - const only = try mod.intern(.{ .un = .{ - .ty = ty.toIntern(), - .tag = tag_val.toIntern(), - .val = val_val.toIntern(), - } }); - return Value.fromInterned(only); - }, - .opaque_type => return null, - .enum_type => { - const enum_type = ip.loadEnumType(ty.toIntern()); - switch (enum_type.tag_mode) { - .nonexhaustive => { - if (enum_type.tag_ty == .comptime_int_type) return null; - - if (try Type.fromInterned(enum_type.tag_ty).onePossibleValue(mod)) |int_opv| { - const only = try mod.intern(.{ .enum_tag = .{ - .ty = ty.toIntern(), - .int = int_opv.toIntern(), - } }); - return Value.fromInterned(only); - } - - return null; - }, - .auto, .explicit => { - if (Type.fromInterned(enum_type.tag_ty).hasRuntimeBits(mod)) return null; - - switch (enum_type.names.len) { - 0 => { - const only = try mod.intern(.{ .empty_enum_value = ty.toIntern() }); - return Value.fromInterned(only); - }, - 1 => { - if (enum_type.values.len == 0) { - const only = try mod.intern(.{ .enum_tag = .{ - .ty = ty.toIntern(), - .int = try mod.intern(.{ .int = .{ - .ty = enum_type.tag_ty, - .storage = .{ .u64 = 0 }, - } }), - } }); - return Value.fromInterned(only); - } else { - return Value.fromInterned(enum_type.values.get(ip)[0]); - } - }, - else => return null, - } - }, - } - }, - - // values, not types - .undef, - .simple_value, - .variable, - .extern_func, - .func, - .int, - .err, - .error_union, - .enum_literal, - .enum_tag, - .empty_enum_value, - .float, - .ptr, - .slice, - .opt, - .aggregate, - .un, - // memoization, not types - .memoized_call, - => unreachable, - }, - }; - } - - /// During semantic analysis, instead call `Sema.typeRequiresComptime` which - /// resolves field types rather than asserting they are already resolved. - pub fn comptimeOnly(ty: Type, mod: *Module) bool { - return ty.comptimeOnlyAdvanced(mod, null) catch unreachable; - } - - /// `generic_poison` will return false. - /// May return false negatives when structs and unions are having their field types resolved. - /// If `opt_sema` is not provided, asserts that the type is sufficiently resolved. - pub fn comptimeOnlyAdvanced(ty: Type, mod: *Module, opt_sema: ?*Sema) Module.CompileError!bool { - const ip = &mod.intern_pool; - return switch (ty.toIntern()) { - .empty_struct_type => false, - - else => switch (ip.indexToKey(ty.toIntern())) { - .int_type => false, - .ptr_type => |ptr_type| { - const child_ty = Type.fromInterned(ptr_type.child); - switch (child_ty.zigTypeTag(mod)) { - .Fn => return !try child_ty.fnHasRuntimeBitsAdvanced(mod, opt_sema), - .Opaque => return false, - else => return child_ty.comptimeOnlyAdvanced(mod, opt_sema), - } - }, - .anyframe_type => |child| { - if (child == .none) return false; - return Type.fromInterned(child).comptimeOnlyAdvanced(mod, opt_sema); - }, - .array_type => |array_type| return Type.fromInterned(array_type.child).comptimeOnlyAdvanced(mod, opt_sema), - .vector_type => |vector_type| return Type.fromInterned(vector_type.child).comptimeOnlyAdvanced(mod, opt_sema), - .opt_type => |child| return Type.fromInterned(child).comptimeOnlyAdvanced(mod, opt_sema), - .error_union_type => |error_union_type| return Type.fromInterned(error_union_type.payload_type).comptimeOnlyAdvanced(mod, opt_sema), - - .error_set_type, - .inferred_error_set_type, - => false, - - // These are function bodies, not function pointers. - .func_type => true, - - .simple_type => |t| switch (t) { - .f16, - .f32, - .f64, - .f80, - .f128, - .usize, - .isize, - .c_char, - .c_short, - .c_ushort, - .c_int, - .c_uint, - .c_long, - .c_ulong, - .c_longlong, - .c_ulonglong, - .c_longdouble, - .anyopaque, - .bool, - .void, - .anyerror, - .adhoc_inferred_error_set, - .noreturn, - .generic_poison, - .atomic_order, - .atomic_rmw_op, - .calling_convention, - .address_space, - .float_mode, - .reduce_op, - .call_modifier, - .prefetch_options, - .export_options, - .extern_options, - => false, - - .type, - .comptime_int, - .comptime_float, - .null, - .undefined, - .enum_literal, - .type_info, - => true, - }, - .struct_type => { - const struct_type = ip.loadStructType(ty.toIntern()); - // packed structs cannot be comptime-only because they have a well-defined - // memory layout and every field has a well-defined bit pattern. - if (struct_type.layout == .@"packed") - return false; - - // A struct with no fields is not comptime-only. - return switch (struct_type.flagsPtr(ip).requires_comptime) { - .no, .wip => false, - .yes => true, - .unknown => { - // The type is not resolved; assert that we have a Sema. - const sema = opt_sema.?; - - if (struct_type.flagsPtr(ip).field_types_wip) - return false; - - struct_type.flagsPtr(ip).requires_comptime = .wip; - errdefer struct_type.flagsPtr(ip).requires_comptime = .unknown; - - try sema.resolveTypeFieldsStruct(ty.toIntern(), struct_type); - - for (0..struct_type.field_types.len) |i_usize| { - const i: u32 = @intCast(i_usize); - if (struct_type.fieldIsComptime(ip, i)) continue; - const field_ty = struct_type.field_types.get(ip)[i]; - if (try Type.fromInterned(field_ty).comptimeOnlyAdvanced(mod, opt_sema)) { - // Note that this does not cause the layout to - // be considered resolved. Comptime-only types - // still maintain a layout of their - // runtime-known fields. - struct_type.flagsPtr(ip).requires_comptime = .yes; - return true; - } - } - - struct_type.flagsPtr(ip).requires_comptime = .no; - return false; - }, - }; - }, - - .anon_struct_type => |tuple| { - for (tuple.types.get(ip), tuple.values.get(ip)) |field_ty, val| { - const have_comptime_val = val != .none; - if (!have_comptime_val and try Type.fromInterned(field_ty).comptimeOnlyAdvanced(mod, opt_sema)) return true; - } - return false; - }, - - .union_type => { - const union_type = ip.loadUnionType(ty.toIntern()); - switch (union_type.flagsPtr(ip).requires_comptime) { - .no, .wip => return false, - .yes => return true, - .unknown => { - // The type is not resolved; assert that we have a Sema. - const sema = opt_sema.?; - - if (union_type.flagsPtr(ip).status == .field_types_wip) - return false; - - union_type.flagsPtr(ip).requires_comptime = .wip; - errdefer union_type.flagsPtr(ip).requires_comptime = .unknown; - - try sema.resolveTypeFieldsUnion(ty, union_type); - - for (0..union_type.field_types.len) |field_idx| { - const field_ty = union_type.field_types.get(ip)[field_idx]; - if (try Type.fromInterned(field_ty).comptimeOnlyAdvanced(mod, opt_sema)) { - union_type.flagsPtr(ip).requires_comptime = .yes; - return true; - } - } - - union_type.flagsPtr(ip).requires_comptime = .no; - return false; - }, - } - }, - - .opaque_type => false, - - .enum_type => return Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty).comptimeOnlyAdvanced(mod, opt_sema), - - // values, not types - .undef, - .simple_value, - .variable, - .extern_func, - .func, - .int, - .err, - .error_union, - .enum_literal, - .enum_tag, - .empty_enum_value, - .float, - .ptr, - .slice, - .opt, - .aggregate, - .un, - // memoization, not types - .memoized_call, - => unreachable, - }, - }; - } - - pub fn isVector(ty: Type, mod: *const Module) bool { - return ty.zigTypeTag(mod) == .Vector; - } - - /// Returns 0 if not a vector, otherwise returns @bitSizeOf(Element) * vector_len. - pub fn totalVectorBits(ty: Type, zcu: *Zcu) u64 { - if (!ty.isVector(zcu)) return 0; - const v = zcu.intern_pool.indexToKey(ty.toIntern()).vector_type; - return v.len * Type.fromInterned(v.child).bitSize(zcu); - } - - pub fn isArrayOrVector(ty: Type, mod: *const Module) bool { - return switch (ty.zigTypeTag(mod)) { - .Array, .Vector => true, - else => false, - }; - } - - pub fn isIndexable(ty: Type, mod: *Module) bool { - return switch (ty.zigTypeTag(mod)) { - .Array, .Vector => true, - .Pointer => switch (ty.ptrSize(mod)) { - .Slice, .Many, .C => true, - .One => switch (ty.childType(mod).zigTypeTag(mod)) { - .Array, .Vector => true, - .Struct => ty.childType(mod).isTuple(mod), - else => false, - }, - }, - .Struct => ty.isTuple(mod), - else => false, - }; - } - - pub fn indexableHasLen(ty: Type, mod: *Module) bool { - return switch (ty.zigTypeTag(mod)) { - .Array, .Vector => true, - .Pointer => switch (ty.ptrSize(mod)) { - .Many, .C => false, - .Slice => true, - .One => switch (ty.childType(mod).zigTypeTag(mod)) { - .Array, .Vector => true, - .Struct => ty.childType(mod).isTuple(mod), - else => false, - }, - }, - .Struct => ty.isTuple(mod), - else => false, - }; - } - - /// Asserts that the type can have a namespace. - pub fn getNamespaceIndex(ty: Type, zcu: *Zcu) InternPool.OptionalNamespaceIndex { - return ty.getNamespace(zcu).?; - } - - /// Returns null if the type has no namespace. - pub fn getNamespace(ty: Type, zcu: *Zcu) ?InternPool.OptionalNamespaceIndex { - const ip = &zcu.intern_pool; - return switch (ip.indexToKey(ty.toIntern())) { - .opaque_type => ip.loadOpaqueType(ty.toIntern()).namespace, - .struct_type => ip.loadStructType(ty.toIntern()).namespace, - .union_type => ip.loadUnionType(ty.toIntern()).namespace, - .enum_type => ip.loadEnumType(ty.toIntern()).namespace, - - .anon_struct_type => .none, - .simple_type => |s| switch (s) { - .anyopaque, - .atomic_order, - .atomic_rmw_op, - .calling_convention, - .address_space, - .float_mode, - .reduce_op, - .call_modifier, - .prefetch_options, - .export_options, - .extern_options, - .type_info, - => .none, - else => null, - }, - - else => null, - }; - } - - // Works for vectors and vectors of integers. - pub fn minInt(ty: Type, mod: *Module, dest_ty: Type) !Value { - const scalar = try minIntScalar(ty.scalarType(mod), mod, dest_ty.scalarType(mod)); - return if (ty.zigTypeTag(mod) == .Vector) Value.fromInterned((try mod.intern(.{ .aggregate = .{ - .ty = dest_ty.toIntern(), - .storage = .{ .repeated_elem = scalar.toIntern() }, - } }))) else scalar; - } - - /// Asserts that the type is an integer. - pub fn minIntScalar(ty: Type, mod: *Module, dest_ty: Type) !Value { - const info = ty.intInfo(mod); - if (info.signedness == .unsigned) return mod.intValue(dest_ty, 0); - if (info.bits == 0) return mod.intValue(dest_ty, -1); - - if (std.math.cast(u6, info.bits - 1)) |shift| { - const n = @as(i64, std.math.minInt(i64)) >> (63 - shift); - return mod.intValue(dest_ty, n); - } - - var res = try std.math.big.int.Managed.init(mod.gpa); - defer res.deinit(); - - try res.setTwosCompIntLimit(.min, info.signedness, info.bits); - - return mod.intValue_big(dest_ty, res.toConst()); - } - - // Works for vectors and vectors of integers. - /// The returned Value will have type dest_ty. - pub fn maxInt(ty: Type, mod: *Module, dest_ty: Type) !Value { - const scalar = try maxIntScalar(ty.scalarType(mod), mod, dest_ty.scalarType(mod)); - return if (ty.zigTypeTag(mod) == .Vector) Value.fromInterned((try mod.intern(.{ .aggregate = .{ - .ty = dest_ty.toIntern(), - .storage = .{ .repeated_elem = scalar.toIntern() }, - } }))) else scalar; - } - - /// The returned Value will have type dest_ty. - pub fn maxIntScalar(ty: Type, mod: *Module, dest_ty: Type) !Value { - const info = ty.intInfo(mod); - - switch (info.bits) { - 0 => return switch (info.signedness) { - .signed => try mod.intValue(dest_ty, -1), - .unsigned => try mod.intValue(dest_ty, 0), - }, - 1 => return switch (info.signedness) { - .signed => try mod.intValue(dest_ty, 0), - .unsigned => try mod.intValue(dest_ty, 1), - }, - else => {}, - } - - if (std.math.cast(u6, info.bits - 1)) |shift| switch (info.signedness) { - .signed => { - const n = @as(i64, std.math.maxInt(i64)) >> (63 - shift); - return mod.intValue(dest_ty, n); - }, - .unsigned => { - const n = @as(u64, std.math.maxInt(u64)) >> (63 - shift); - return mod.intValue(dest_ty, n); - }, - }; - - var res = try std.math.big.int.Managed.init(mod.gpa); - defer res.deinit(); - - try res.setTwosCompIntLimit(.max, info.signedness, info.bits); - - return mod.intValue_big(dest_ty, res.toConst()); - } - - /// Asserts the type is an enum or a union. - pub fn intTagType(ty: Type, mod: *Module) Type { - const ip = &mod.intern_pool; - return switch (ip.indexToKey(ty.toIntern())) { - .union_type => Type.fromInterned(ip.loadUnionType(ty.toIntern()).enum_tag_ty).intTagType(mod), - .enum_type => Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty), - else => unreachable, - }; - } - - pub fn isNonexhaustiveEnum(ty: Type, mod: *Module) bool { - const ip = &mod.intern_pool; - return switch (ip.indexToKey(ty.toIntern())) { - .enum_type => switch (ip.loadEnumType(ty.toIntern()).tag_mode) { - .nonexhaustive => true, - .auto, .explicit => false, - }, - else => false, - }; - } - - // Asserts that `ty` is an error set and not `anyerror`. - // Asserts that `ty` is resolved if it is an inferred error set. - pub fn errorSetNames(ty: Type, mod: *Module) InternPool.NullTerminatedString.Slice { - const ip = &mod.intern_pool; - return switch (ip.indexToKey(ty.toIntern())) { - .error_set_type => |x| x.names, - .inferred_error_set_type => |i| switch (ip.funcIesResolved(i).*) { - .none => unreachable, // unresolved inferred error set - .anyerror_type => unreachable, - else => |t| ip.indexToKey(t).error_set_type.names, - }, - else => unreachable, - }; - } - - pub fn enumFields(ty: Type, mod: *Module) InternPool.NullTerminatedString.Slice { - return mod.intern_pool.loadEnumType(ty.toIntern()).names; - } - - pub fn enumFieldCount(ty: Type, mod: *Module) usize { - return mod.intern_pool.loadEnumType(ty.toIntern()).names.len; - } - - pub fn enumFieldName(ty: Type, field_index: usize, mod: *Module) InternPool.NullTerminatedString { - const ip = &mod.intern_pool; - return ip.loadEnumType(ty.toIntern()).names.get(ip)[field_index]; - } - - pub fn enumFieldIndex(ty: Type, field_name: InternPool.NullTerminatedString, mod: *Module) ?u32 { - const ip = &mod.intern_pool; - const enum_type = ip.loadEnumType(ty.toIntern()); - return enum_type.nameIndex(ip, field_name); - } - - /// Asserts `ty` is an enum. `enum_tag` can either be `enum_field_index` or - /// an integer which represents the enum value. Returns the field index in - /// declaration order, or `null` if `enum_tag` does not match any field. - pub fn enumTagFieldIndex(ty: Type, enum_tag: Value, mod: *Module) ?u32 { - const ip = &mod.intern_pool; - const enum_type = ip.loadEnumType(ty.toIntern()); - const int_tag = switch (ip.indexToKey(enum_tag.toIntern())) { - .int => enum_tag.toIntern(), - .enum_tag => |info| info.int, - else => unreachable, - }; - assert(ip.typeOf(int_tag) == enum_type.tag_ty); - return enum_type.tagValueIndex(ip, int_tag); - } - - /// Returns none in the case of a tuple which uses the integer index as the field name. - pub fn structFieldName(ty: Type, index: usize, mod: *Module) InternPool.OptionalNullTerminatedString { - const ip = &mod.intern_pool; - return switch (ip.indexToKey(ty.toIntern())) { - .struct_type => ip.loadStructType(ty.toIntern()).fieldName(ip, index), - .anon_struct_type => |anon_struct| anon_struct.fieldName(ip, index), - else => unreachable, - }; - } - - pub fn structFieldCount(ty: Type, mod: *Module) u32 { - const ip = &mod.intern_pool; - return switch (ip.indexToKey(ty.toIntern())) { - .struct_type => ip.loadStructType(ty.toIntern()).field_types.len, - .anon_struct_type => |anon_struct| anon_struct.types.len, - else => unreachable, - }; - } - - /// Supports structs and unions. - pub fn structFieldType(ty: Type, index: usize, mod: *Module) Type { - const ip = &mod.intern_pool; - return switch (ip.indexToKey(ty.toIntern())) { - .struct_type => Type.fromInterned(ip.loadStructType(ty.toIntern()).field_types.get(ip)[index]), - .union_type => { - const union_obj = ip.loadUnionType(ty.toIntern()); - return Type.fromInterned(union_obj.field_types.get(ip)[index]); - }, - .anon_struct_type => |anon_struct| Type.fromInterned(anon_struct.types.get(ip)[index]), - else => unreachable, - }; - } - - pub fn structFieldAlign(ty: Type, index: usize, zcu: *Zcu) Alignment { - return ty.structFieldAlignAdvanced(index, zcu, null) catch unreachable; - } - - pub fn structFieldAlignAdvanced(ty: Type, index: usize, zcu: *Zcu, opt_sema: ?*Sema) !Alignment { - const ip = &zcu.intern_pool; - switch (ip.indexToKey(ty.toIntern())) { - .struct_type => { - const struct_type = ip.loadStructType(ty.toIntern()); - assert(struct_type.layout != .@"packed"); - const explicit_align = struct_type.fieldAlign(ip, index); - const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[index]); - if (opt_sema) |sema| { - return sema.structFieldAlignment(explicit_align, field_ty, struct_type.layout); - } else { - return zcu.structFieldAlignment(explicit_align, field_ty, struct_type.layout); - } - }, - .anon_struct_type => |anon_struct| { - return (try Type.fromInterned(anon_struct.types.get(ip)[index]).abiAlignmentAdvanced(zcu, if (opt_sema) |sema| .{ .sema = sema } else .eager)).scalar; - }, - .union_type => { - const union_obj = ip.loadUnionType(ty.toIntern()); - if (opt_sema) |sema| { - return sema.unionFieldAlignment(union_obj, @intCast(index)); - } else { - return zcu.unionFieldNormalAlignment(union_obj, @intCast(index)); - } - }, - else => unreachable, - } - } - - pub fn structFieldDefaultValue(ty: Type, index: usize, mod: *Module) Value { - const ip = &mod.intern_pool; - switch (ip.indexToKey(ty.toIntern())) { - .struct_type => { - const struct_type = ip.loadStructType(ty.toIntern()); - const val = struct_type.fieldInit(ip, index); - // TODO: avoid using `unreachable` to indicate this. - if (val == .none) return Value.@"unreachable"; - return Value.fromInterned(val); - }, - .anon_struct_type => |anon_struct| { - const val = anon_struct.values.get(ip)[index]; - // TODO: avoid using `unreachable` to indicate this. - if (val == .none) return Value.@"unreachable"; - return Value.fromInterned(val); - }, - else => unreachable, - } - } - - pub fn structFieldValueComptime(ty: Type, mod: *Module, index: usize) !?Value { - const ip = &mod.intern_pool; - switch (ip.indexToKey(ty.toIntern())) { - .struct_type => { - const struct_type = ip.loadStructType(ty.toIntern()); - if (struct_type.fieldIsComptime(ip, index)) { - assert(struct_type.haveFieldInits(ip)); - return Value.fromInterned(struct_type.field_inits.get(ip)[index]); - } else { - return Type.fromInterned(struct_type.field_types.get(ip)[index]).onePossibleValue(mod); - } - }, - .anon_struct_type => |tuple| { - const val = tuple.values.get(ip)[index]; - if (val == .none) { - return Type.fromInterned(tuple.types.get(ip)[index]).onePossibleValue(mod); - } else { - return Value.fromInterned(val); - } - }, - else => unreachable, - } - } - - pub fn structFieldIsComptime(ty: Type, index: usize, mod: *Module) bool { - const ip = &mod.intern_pool; - return switch (ip.indexToKey(ty.toIntern())) { - .struct_type => ip.loadStructType(ty.toIntern()).fieldIsComptime(ip, index), - .anon_struct_type => |anon_struct| anon_struct.values.get(ip)[index] != .none, - else => unreachable, - }; - } - - pub const FieldOffset = struct { - field: usize, - offset: u64, - }; - - /// Supports structs and unions. - pub fn structFieldOffset(ty: Type, index: usize, mod: *Module) u64 { - const ip = &mod.intern_pool; - switch (ip.indexToKey(ty.toIntern())) { - .struct_type => { - const struct_type = ip.loadStructType(ty.toIntern()); - assert(struct_type.haveLayout(ip)); - assert(struct_type.layout != .@"packed"); - return struct_type.offsets.get(ip)[index]; - }, - - .anon_struct_type => |tuple| { - var offset: u64 = 0; - var big_align: Alignment = .none; - - for (tuple.types.get(ip), tuple.values.get(ip), 0..) |field_ty, field_val, i| { - if (field_val != .none or !Type.fromInterned(field_ty).hasRuntimeBits(mod)) { - // comptime field - if (i == index) return offset; - continue; - } - - const field_align = Type.fromInterned(field_ty).abiAlignment(mod); - big_align = big_align.max(field_align); - offset = field_align.forward(offset); - if (i == index) return offset; - offset += Type.fromInterned(field_ty).abiSize(mod); - } - offset = big_align.max(.@"1").forward(offset); - return offset; - }, - - .union_type => { - const union_type = ip.loadUnionType(ty.toIntern()); - if (!union_type.hasTag(ip)) - return 0; - const layout = mod.getUnionLayout(union_type); - if (layout.tag_align.compare(.gte, layout.payload_align)) { - // {Tag, Payload} - return layout.payload_align.forward(layout.tag_size); - } else { - // {Payload, Tag} - return 0; - } - }, - - else => unreachable, - } - } - - pub fn getOwnerDecl(ty: Type, mod: *Module) InternPool.DeclIndex { - return ty.getOwnerDeclOrNull(mod) orelse unreachable; - } - - pub fn getOwnerDeclOrNull(ty: Type, mod: *Module) ?InternPool.DeclIndex { - const ip = &mod.intern_pool; - return switch (ip.indexToKey(ty.toIntern())) { - .struct_type => ip.loadStructType(ty.toIntern()).decl.unwrap(), - .union_type => ip.loadUnionType(ty.toIntern()).decl, - .opaque_type => ip.loadOpaqueType(ty.toIntern()).decl, - .enum_type => ip.loadEnumType(ty.toIntern()).decl, - else => null, - }; - } - - pub fn srcLocOrNull(ty: Type, zcu: *Zcu) ?Module.LazySrcLoc { - const ip = &zcu.intern_pool; - return .{ - .base_node_inst = switch (ip.indexToKey(ty.toIntern())) { - .struct_type, .union_type, .opaque_type, .enum_type => |info| switch (info) { - .declared => |d| d.zir_index, - .reified => |r| r.zir_index, - .generated_tag => |gt| ip.loadUnionType(gt.union_type).zir_index, - .empty_struct => return null, - }, - else => return null, - }, - .offset = Module.LazySrcLoc.Offset.nodeOffset(0), - }; - } - - pub fn srcLoc(ty: Type, zcu: *Zcu) Module.LazySrcLoc { - return ty.srcLocOrNull(zcu).?; - } - - pub fn isGenericPoison(ty: Type) bool { - return ty.toIntern() == .generic_poison_type; - } - - pub fn isTuple(ty: Type, mod: *Module) bool { - const ip = &mod.intern_pool; - return switch (ip.indexToKey(ty.toIntern())) { - .struct_type => { - const struct_type = ip.loadStructType(ty.toIntern()); - if (struct_type.layout == .@"packed") return false; - if (struct_type.decl == .none) return false; - return struct_type.flagsPtr(ip).is_tuple; - }, - .anon_struct_type => |anon_struct| anon_struct.names.len == 0, - else => false, - }; - } - - pub fn isAnonStruct(ty: Type, mod: *Module) bool { - if (ty.toIntern() == .empty_struct_type) return true; - return switch (mod.intern_pool.indexToKey(ty.toIntern())) { - .anon_struct_type => |anon_struct_type| anon_struct_type.names.len > 0, - else => false, - }; - } - - pub fn isTupleOrAnonStruct(ty: Type, mod: *Module) bool { - const ip = &mod.intern_pool; - return switch (ip.indexToKey(ty.toIntern())) { - .struct_type => { - const struct_type = ip.loadStructType(ty.toIntern()); - if (struct_type.layout == .@"packed") return false; - if (struct_type.decl == .none) return false; - return struct_type.flagsPtr(ip).is_tuple; - }, - .anon_struct_type => true, - else => false, - }; - } - - pub fn isSimpleTuple(ty: Type, mod: *Module) bool { - return switch (mod.intern_pool.indexToKey(ty.toIntern())) { - .anon_struct_type => |anon_struct_type| anon_struct_type.names.len == 0, - else => false, - }; - } - - pub fn isSimpleTupleOrAnonStruct(ty: Type, mod: *Module) bool { - return switch (mod.intern_pool.indexToKey(ty.toIntern())) { - .anon_struct_type => true, - else => false, - }; - } - - /// Traverses optional child types and error union payloads until the type - /// is not a pointer. For `E!?u32`, returns `u32`; for `*u8`, returns `*u8`. - pub fn optEuBaseType(ty: Type, mod: *Module) Type { - var cur = ty; - while (true) switch (cur.zigTypeTag(mod)) { - .Optional => cur = cur.optionalChild(mod), - .ErrorUnion => cur = cur.errorUnionPayload(mod), - else => return cur, - }; - } - - pub fn toUnsigned(ty: Type, mod: *Module) !Type { - return switch (ty.zigTypeTag(mod)) { - .Int => mod.intType(.unsigned, ty.intInfo(mod).bits), - .Vector => try mod.vectorType(.{ - .len = ty.vectorLen(mod), - .child = (try ty.childType(mod).toUnsigned(mod)).toIntern(), - }), - else => unreachable, - }; - } - - pub fn typeDeclInst(ty: Type, zcu: *const Zcu) ?InternPool.TrackedInst.Index { - const ip = &zcu.intern_pool; - return switch (ip.indexToKey(ty.toIntern())) { - .struct_type => ip.loadStructType(ty.toIntern()).zir_index.unwrap(), - .union_type => ip.loadUnionType(ty.toIntern()).zir_index, - .enum_type => ip.loadEnumType(ty.toIntern()).zir_index.unwrap(), - .opaque_type => ip.loadOpaqueType(ty.toIntern()).zir_index, - else => null, - }; - } - - pub fn typeDeclSrcLine(ty: Type, zcu: *const Zcu) ?u32 { - const ip = &zcu.intern_pool; - const tracked = switch (ip.indexToKey(ty.toIntern())) { - .struct_type, .union_type, .opaque_type, .enum_type => |info| switch (info) { - .declared => |d| d.zir_index, - .reified => |r| r.zir_index, - .generated_tag => |gt| ip.loadUnionType(gt.union_type).zir_index, - .empty_struct => return null, - }, - else => return null, - }; - const info = tracked.resolveFull(&zcu.intern_pool); - const file = zcu.import_table.values()[zcu.path_digest_map.getIndex(info.path_digest).?]; - assert(file.zir_loaded); - const zir = file.zir; - const inst = zir.instructions.get(@intFromEnum(info.inst)); - assert(inst.tag == .extended); - return switch (inst.data.extended.opcode) { - .struct_decl => zir.extraData(Zir.Inst.StructDecl, inst.data.extended.operand).data.src_line, - .union_decl => zir.extraData(Zir.Inst.UnionDecl, inst.data.extended.operand).data.src_line, - .enum_decl => zir.extraData(Zir.Inst.EnumDecl, inst.data.extended.operand).data.src_line, - .opaque_decl => zir.extraData(Zir.Inst.OpaqueDecl, inst.data.extended.operand).data.src_line, - .reify => zir.extraData(Zir.Inst.Reify, inst.data.extended.operand).data.src_line, - else => unreachable, - }; - } - - /// Given a namespace type, returns its list of caotured values. - pub fn getCaptures(ty: Type, zcu: *const Zcu) InternPool.CaptureValue.Slice { - const ip = &zcu.intern_pool; - return switch (ip.indexToKey(ty.toIntern())) { - .struct_type => ip.loadStructType(ty.toIntern()).captures, - .union_type => ip.loadUnionType(ty.toIntern()).captures, - .enum_type => ip.loadEnumType(ty.toIntern()).captures, - .opaque_type => ip.loadOpaqueType(ty.toIntern()).captures, - else => unreachable, - }; - } - - pub fn arrayBase(ty: Type, zcu: *const Zcu) struct { Type, u64 } { - var cur_ty: Type = ty; - var cur_len: u64 = 1; - while (cur_ty.zigTypeTag(zcu) == .Array) { - cur_len *= cur_ty.arrayLenIncludingSentinel(zcu); - cur_ty = cur_ty.childType(zcu); - } - return .{ cur_ty, cur_len }; - } - - pub fn packedStructFieldPtrInfo(struct_ty: Type, parent_ptr_ty: Type, field_idx: u32, zcu: *Zcu) union(enum) { - /// The result is a bit-pointer with the same value and a new packed offset. - bit_ptr: InternPool.Key.PtrType.PackedOffset, - /// The result is a standard pointer. - byte_ptr: struct { - /// The byte offset of the field pointer from the parent pointer value. - offset: u64, - /// The alignment of the field pointer type. - alignment: InternPool.Alignment, - }, - } { - comptime assert(Type.packed_struct_layout_version == 2); - - const parent_ptr_info = parent_ptr_ty.ptrInfo(zcu); - const field_ty = struct_ty.structFieldType(field_idx, zcu); - - var bit_offset: u16 = 0; - var running_bits: u16 = 0; - for (0..struct_ty.structFieldCount(zcu)) |i| { - const f_ty = struct_ty.structFieldType(i, zcu); - if (i == field_idx) { - bit_offset = running_bits; - } - running_bits += @intCast(f_ty.bitSize(zcu)); - } - - const res_host_size: u16, const res_bit_offset: u16 = if (parent_ptr_info.packed_offset.host_size != 0) - .{ parent_ptr_info.packed_offset.host_size, parent_ptr_info.packed_offset.bit_offset + bit_offset } - else - .{ (running_bits + 7) / 8, bit_offset }; - - // If the field happens to be byte-aligned, simplify the pointer type. - // We can only do this if the pointee's bit size matches its ABI byte size, - // so that loads and stores do not interfere with surrounding packed bits. - // - // TODO: we do not attempt this with big-endian targets yet because of nested - // structs and floats. I need to double-check the desired behavior for big endian - // targets before adding the necessary complications to this code. This will not - // cause miscompilations; it only means the field pointer uses bit masking when it - // might not be strictly necessary. - if (res_bit_offset % 8 == 0 and field_ty.bitSize(zcu) == field_ty.abiSize(zcu) * 8 and zcu.getTarget().cpu.arch.endian() == .little) { - const byte_offset = res_bit_offset / 8; - const new_align = Alignment.fromLog2Units(@ctz(byte_offset | parent_ptr_ty.ptrAlignment(zcu).toByteUnits().?)); - return .{ .byte_ptr = .{ - .offset = byte_offset, - .alignment = new_align, - } }; - } - - return .{ .bit_ptr = .{ - .host_size = res_host_size, - .bit_offset = res_bit_offset, - } }; - } - - pub const @"u1": Type = .{ .ip_index = .u1_type }; - pub const @"u8": Type = .{ .ip_index = .u8_type }; - pub const @"u16": Type = .{ .ip_index = .u16_type }; - pub const @"u29": Type = .{ .ip_index = .u29_type }; - pub const @"u32": Type = .{ .ip_index = .u32_type }; - pub const @"u64": Type = .{ .ip_index = .u64_type }; - pub const @"u128": Type = .{ .ip_index = .u128_type }; - - pub const @"i8": Type = .{ .ip_index = .i8_type }; - pub const @"i16": Type = .{ .ip_index = .i16_type }; - pub const @"i32": Type = .{ .ip_index = .i32_type }; - pub const @"i64": Type = .{ .ip_index = .i64_type }; - pub const @"i128": Type = .{ .ip_index = .i128_type }; - - pub const @"f16": Type = .{ .ip_index = .f16_type }; - pub const @"f32": Type = .{ .ip_index = .f32_type }; - pub const @"f64": Type = .{ .ip_index = .f64_type }; - pub const @"f80": Type = .{ .ip_index = .f80_type }; - pub const @"f128": Type = .{ .ip_index = .f128_type }; - - pub const @"bool": Type = .{ .ip_index = .bool_type }; - pub const @"usize": Type = .{ .ip_index = .usize_type }; - pub const @"isize": Type = .{ .ip_index = .isize_type }; - pub const @"comptime_int": Type = .{ .ip_index = .comptime_int_type }; - pub const @"comptime_float": Type = .{ .ip_index = .comptime_float_type }; - pub const @"void": Type = .{ .ip_index = .void_type }; - pub const @"type": Type = .{ .ip_index = .type_type }; - pub const @"anyerror": Type = .{ .ip_index = .anyerror_type }; - pub const @"anyopaque": Type = .{ .ip_index = .anyopaque_type }; - pub const @"anyframe": Type = .{ .ip_index = .anyframe_type }; - pub const @"null": Type = .{ .ip_index = .null_type }; - pub const @"undefined": Type = .{ .ip_index = .undefined_type }; - pub const @"noreturn": Type = .{ .ip_index = .noreturn_type }; - - pub const @"c_char": Type = .{ .ip_index = .c_char_type }; - pub const @"c_short": Type = .{ .ip_index = .c_short_type }; - pub const @"c_ushort": Type = .{ .ip_index = .c_ushort_type }; - pub const @"c_int": Type = .{ .ip_index = .c_int_type }; - pub const @"c_uint": Type = .{ .ip_index = .c_uint_type }; - pub const @"c_long": Type = .{ .ip_index = .c_long_type }; - pub const @"c_ulong": Type = .{ .ip_index = .c_ulong_type }; - pub const @"c_longlong": Type = .{ .ip_index = .c_longlong_type }; - pub const @"c_ulonglong": Type = .{ .ip_index = .c_ulonglong_type }; - pub const @"c_longdouble": Type = .{ .ip_index = .c_longdouble_type }; - - pub const slice_const_u8: Type = .{ .ip_index = .slice_const_u8_type }; - pub const manyptr_u8: Type = .{ .ip_index = .manyptr_u8_type }; - pub const single_const_pointer_to_comptime_int: Type = .{ - .ip_index = .single_const_pointer_to_comptime_int_type, - }; - pub const slice_const_u8_sentinel_0: Type = .{ .ip_index = .slice_const_u8_sentinel_0_type }; - pub const empty_struct_literal: Type = .{ .ip_index = .empty_struct_type }; - - pub const generic_poison: Type = .{ .ip_index = .generic_poison_type }; - - pub fn smallestUnsignedBits(max: u64) u16 { - if (max == 0) return 0; - const base = std.math.log2(max); - const upper = (@as(u64, 1) << @as(u6, @intCast(base))) - 1; - return @as(u16, @intCast(base + @intFromBool(upper < max))); - } - - /// This is only used for comptime asserts. Bump this number when you make a change - /// to packed struct layout to find out all the places in the codebase you need to edit! - pub const packed_struct_layout_version = 2; -}; - -fn cTypeAlign(target: Target, c_type: Target.CType) Alignment { - return Alignment.fromByteUnits(target.c_type_alignment(c_type)); -} From 0e5335aaf5e0ac646fbd46a319710019d10c2971 Mon Sep 17 00:00:00 2001 From: mlugg Date: Thu, 4 Jul 2024 05:00:32 +0100 Subject: [PATCH 040/152] compiler: rework type resolution, fully resolve all types I'm so sorry. This commit was just meant to be making all types fully resolve by queueing resolution at the moment of their creation. Unfortunately, a lot of dominoes ended up falling. Here's what happened: * I added a work queue job to fully resolve a type. * I realised that from here we could eliminate `Sema.types_to_resolve` if we made function codegen a separate job. This is desirable for simplicity of both spec and implementation. * This led to a new AIR traversal to detect whether any required type is unresolved. If a type in the AIR failed to resolve, then we can't run codegen. * Because full type resolution now occurs by the work queue job, a bug was exposed whereby error messages for type resolution were associated with the wrong `Decl`, resulting in duplicate error messages when the type was also resolved "by" its owner `Decl` (which really *all* resolution should be done on). * A correct fix for this requires using a different `Sema` when performing type resolution: we need a `Sema` owned by the type. Also note that this fix is necessary for incremental compilation. * This means a whole bunch of functions no longer need to take `Sema`s. * First-order effects: `resolveTypeFields`, `resolveTypeLayout`, etc * Second-order effects: `Type.abiAlignmentAdvanced`, `Value.orderAgainstZeroAdvanced`, etc The end result of this is, in short, a more correct compiler and a simpler language specification. This regressed a few error notes in the test cases, but nothing that seems worth blocking this change. Oh, also, I ripped out the old code in `test/src/Cases.zig` which introduced a dependency on `Compilation`. This dependency was problematic at best, and this code has been unused for a while. When we re-enable incremental test cases, we must rewrite their executor to use the compiler server protocol. --- build.zig | 28 +- src/Air.zig | 2 + src/Air/types_resolved.zig | 521 ++++++ src/Compilation.zig | 34 +- src/Sema.zig | 1458 +++++------------ src/Sema/bitcast.zig | 8 +- src/Type.zig | 624 +++++-- src/Value.zig | 306 ++-- src/Zcu.zig | 164 +- src/codegen/llvm.zig | 5 +- src/print_value.zig | 8 +- .../compile_errors/direct_struct_loop.zig | 1 - .../compile_errors/indirect_struct_loop.zig | 3 - ...an_invalid_struct_that_contains_itself.zig | 1 - ..._an_invalid_union_that_contains_itself.zig | 1 - .../invalid_dependency_on_struct_size.zig | 1 - ...t_depends_on_itself_via_optional_field.zig | 2 - ...ype_returned_from_non-generic_function.zig | 2 +- test/src/Cases.zig | 703 +------- test/tests.zig | 4 - 20 files changed, 1850 insertions(+), 2026 deletions(-) create mode 100644 src/Air/types_resolved.zig diff --git a/build.zig b/build.zig index 3898acc6ac16..0f0d7d4d67a2 100644 --- a/build.zig +++ b/build.zig @@ -82,15 +82,6 @@ pub fn build(b: *std.Build) !void { docs_step.dependOn(langref_step); docs_step.dependOn(std_docs_step); - const check_case_exe = b.addExecutable(.{ - .name = "check-case", - .root_source_file = b.path("test/src/Cases.zig"), - .target = b.graph.host, - .optimize = optimize, - .single_threaded = single_threaded, - }); - check_case_exe.stack_size = stack_size; - const skip_debug = b.option(bool, "skip-debug", "Main test suite skips debug builds") orelse false; const skip_release = b.option(bool, "skip-release", "Main test suite skips release builds") orelse false; const skip_release_small = b.option(bool, "skip-release-small", "Main test suite skips release-small builds") orelse skip_release; @@ -222,7 +213,6 @@ pub fn build(b: *std.Build) !void { if (target.result.os.tag == .windows and target.result.abi == .gnu) { // LTO is currently broken on mingw, this can be removed when it's fixed. exe.want_lto = false; - check_case_exe.want_lto = false; } const use_llvm = b.option(bool, "use-llvm", "Use the llvm backend"); @@ -245,7 +235,6 @@ pub fn build(b: *std.Build) !void { if (link_libc) { exe.linkLibC(); - check_case_exe.linkLibC(); } const is_debug = optimize == .Debug; @@ -339,21 +328,17 @@ pub fn build(b: *std.Build) !void { } try addCmakeCfgOptionsToExe(b, cfg, exe, use_zig_libcxx); - try addCmakeCfgOptionsToExe(b, cfg, check_case_exe, use_zig_libcxx); } else { // Here we are -Denable-llvm but no cmake integration. try addStaticLlvmOptionsToExe(exe); - try addStaticLlvmOptionsToExe(check_case_exe); } if (target.result.os.tag == .windows) { - inline for (.{ exe, check_case_exe }) |artifact| { - // LLVM depends on networking as of version 18. - artifact.linkSystemLibrary("ws2_32"); + // LLVM depends on networking as of version 18. + exe.linkSystemLibrary("ws2_32"); - artifact.linkSystemLibrary("version"); - artifact.linkSystemLibrary("uuid"); - artifact.linkSystemLibrary("ole32"); - } + exe.linkSystemLibrary("version"); + exe.linkSystemLibrary("uuid"); + exe.linkSystemLibrary("ole32"); } } @@ -394,7 +379,6 @@ pub fn build(b: *std.Build) !void { const test_filters = b.option([]const []const u8, "test-filter", "Skip tests that do not match any filter") orelse &[0][]const u8{}; const test_cases_options = b.addOptions(); - check_case_exe.root_module.addOptions("build_options", test_cases_options); test_cases_options.addOption(bool, "enable_tracy", false); test_cases_options.addOption(bool, "enable_debug_extensions", enable_debug_extensions); @@ -458,7 +442,7 @@ pub fn build(b: *std.Build) !void { test_step.dependOn(check_fmt); const test_cases_step = b.step("test-cases", "Run the main compiler test cases"); - try tests.addCases(b, test_cases_step, test_filters, check_case_exe, target, .{ + try tests.addCases(b, test_cases_step, test_filters, target, .{ .skip_translate_c = skip_translate_c, .skip_run_translated_c = skip_run_translated_c, }, .{ diff --git a/src/Air.zig b/src/Air.zig index 0a05470e1c85..5799c31b259c 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -1801,3 +1801,5 @@ pub fn mustLower(air: Air, inst: Air.Inst.Index, ip: *const InternPool) bool { .atomic_load => air.typeOf(data.atomic_load.ptr, ip).isVolatilePtrIp(ip), }; } + +pub const typesFullyResolved = @import("Air/types_resolved.zig").typesFullyResolved; diff --git a/src/Air/types_resolved.zig b/src/Air/types_resolved.zig new file mode 100644 index 000000000000..073f2d68d477 --- /dev/null +++ b/src/Air/types_resolved.zig @@ -0,0 +1,521 @@ +const Air = @import("../Air.zig"); +const Zcu = @import("../Zcu.zig"); +const Type = @import("../Type.zig"); +const Value = @import("../Value.zig"); +const InternPool = @import("../InternPool.zig"); + +/// Given a body of AIR instructions, returns whether all type resolution necessary for codegen is complete. +/// If `false`, then type resolution must have failed, so codegen cannot proceed. +pub fn typesFullyResolved(air: Air, zcu: *Zcu) bool { + return checkBody(air, air.getMainBody(), zcu); +} + +fn checkBody(air: Air, body: []const Air.Inst.Index, zcu: *Zcu) bool { + const tags = air.instructions.items(.tag); + const datas = air.instructions.items(.data); + + for (body) |inst| { + const data = datas[@intFromEnum(inst)]; + switch (tags[@intFromEnum(inst)]) { + .inferred_alloc, .inferred_alloc_comptime => unreachable, + + .arg => { + if (!checkType(data.arg.ty.toType(), zcu)) return false; + }, + + .add, + .add_safe, + .add_optimized, + .add_wrap, + .add_sat, + .sub, + .sub_safe, + .sub_optimized, + .sub_wrap, + .sub_sat, + .mul, + .mul_safe, + .mul_optimized, + .mul_wrap, + .mul_sat, + .div_float, + .div_float_optimized, + .div_trunc, + .div_trunc_optimized, + .div_floor, + .div_floor_optimized, + .div_exact, + .div_exact_optimized, + .rem, + .rem_optimized, + .mod, + .mod_optimized, + .max, + .min, + .bit_and, + .bit_or, + .shr, + .shr_exact, + .shl, + .shl_exact, + .shl_sat, + .xor, + .cmp_lt, + .cmp_lt_optimized, + .cmp_lte, + .cmp_lte_optimized, + .cmp_eq, + .cmp_eq_optimized, + .cmp_gte, + .cmp_gte_optimized, + .cmp_gt, + .cmp_gt_optimized, + .cmp_neq, + .cmp_neq_optimized, + .bool_and, + .bool_or, + .store, + .store_safe, + .set_union_tag, + .array_elem_val, + .slice_elem_val, + .ptr_elem_val, + .memset, + .memset_safe, + .memcpy, + .atomic_store_unordered, + .atomic_store_monotonic, + .atomic_store_release, + .atomic_store_seq_cst, + => { + if (!checkRef(data.bin_op.lhs, zcu)) return false; + if (!checkRef(data.bin_op.rhs, zcu)) return false; + }, + + .not, + .bitcast, + .clz, + .ctz, + .popcount, + .byte_swap, + .bit_reverse, + .abs, + .load, + .fptrunc, + .fpext, + .intcast, + .trunc, + .optional_payload, + .optional_payload_ptr, + .optional_payload_ptr_set, + .wrap_optional, + .unwrap_errunion_payload, + .unwrap_errunion_err, + .unwrap_errunion_payload_ptr, + .unwrap_errunion_err_ptr, + .errunion_payload_ptr_set, + .wrap_errunion_payload, + .wrap_errunion_err, + .struct_field_ptr_index_0, + .struct_field_ptr_index_1, + .struct_field_ptr_index_2, + .struct_field_ptr_index_3, + .get_union_tag, + .slice_len, + .slice_ptr, + .ptr_slice_len_ptr, + .ptr_slice_ptr_ptr, + .array_to_slice, + .int_from_float, + .int_from_float_optimized, + .float_from_int, + .splat, + .error_set_has_value, + .addrspace_cast, + .c_va_arg, + .c_va_copy, + => { + if (!checkType(data.ty_op.ty.toType(), zcu)) return false; + if (!checkRef(data.ty_op.operand, zcu)) return false; + }, + + .alloc, + .ret_ptr, + .c_va_start, + => { + if (!checkType(data.ty, zcu)) return false; + }, + + .ptr_add, + .ptr_sub, + .add_with_overflow, + .sub_with_overflow, + .mul_with_overflow, + .shl_with_overflow, + .slice, + .slice_elem_ptr, + .ptr_elem_ptr, + => { + const bin = air.extraData(Air.Bin, data.ty_pl.payload).data; + if (!checkType(data.ty_pl.ty.toType(), zcu)) return false; + if (!checkRef(bin.lhs, zcu)) return false; + if (!checkRef(bin.rhs, zcu)) return false; + }, + + .block, + .loop, + => { + const extra = air.extraData(Air.Block, data.ty_pl.payload); + if (!checkType(data.ty_pl.ty.toType(), zcu)) return false; + if (!checkBody( + air, + @ptrCast(air.extra[extra.end..][0..extra.data.body_len]), + zcu, + )) return false; + }, + + .dbg_inline_block => { + const extra = air.extraData(Air.DbgInlineBlock, data.ty_pl.payload); + if (!checkType(data.ty_pl.ty.toType(), zcu)) return false; + if (!checkBody( + air, + @ptrCast(air.extra[extra.end..][0..extra.data.body_len]), + zcu, + )) return false; + }, + + .sqrt, + .sin, + .cos, + .tan, + .exp, + .exp2, + .log, + .log2, + .log10, + .floor, + .ceil, + .round, + .trunc_float, + .neg, + .neg_optimized, + .is_null, + .is_non_null, + .is_null_ptr, + .is_non_null_ptr, + .is_err, + .is_non_err, + .is_err_ptr, + .is_non_err_ptr, + .int_from_ptr, + .int_from_bool, + .ret, + .ret_safe, + .ret_load, + .is_named_enum_value, + .tag_name, + .error_name, + .cmp_lt_errors_len, + .c_va_end, + .set_err_return_trace, + => { + if (!checkRef(data.un_op, zcu)) return false; + }, + + .br => { + if (!checkRef(data.br.operand, zcu)) return false; + }, + + .cmp_vector, + .cmp_vector_optimized, + => { + const extra = air.extraData(Air.VectorCmp, data.ty_pl.payload).data; + if (!checkType(data.ty_pl.ty.toType(), zcu)) return false; + if (!checkRef(extra.lhs, zcu)) return false; + if (!checkRef(extra.rhs, zcu)) return false; + }, + + .reduce, + .reduce_optimized, + => { + if (!checkRef(data.reduce.operand, zcu)) return false; + }, + + .struct_field_ptr, + .struct_field_val, + => { + const extra = air.extraData(Air.StructField, data.ty_pl.payload).data; + if (!checkType(data.ty_pl.ty.toType(), zcu)) return false; + if (!checkRef(extra.struct_operand, zcu)) return false; + }, + + .shuffle => { + const extra = air.extraData(Air.Shuffle, data.ty_pl.payload).data; + if (!checkType(data.ty_pl.ty.toType(), zcu)) return false; + if (!checkRef(extra.a, zcu)) return false; + if (!checkRef(extra.b, zcu)) return false; + if (!checkVal(Value.fromInterned(extra.mask), zcu)) return false; + }, + + .cmpxchg_weak, + .cmpxchg_strong, + => { + const extra = air.extraData(Air.Cmpxchg, data.ty_pl.payload).data; + if (!checkType(data.ty_pl.ty.toType(), zcu)) return false; + if (!checkRef(extra.ptr, zcu)) return false; + if (!checkRef(extra.expected_value, zcu)) return false; + if (!checkRef(extra.new_value, zcu)) return false; + }, + + .aggregate_init => { + const ty = data.ty_pl.ty.toType(); + const elems_len: usize = @intCast(ty.arrayLen(zcu)); + const elems: []const Air.Inst.Ref = @ptrCast(air.extra[data.ty_pl.payload..][0..elems_len]); + if (!checkType(ty, zcu)) return false; + if (ty.zigTypeTag(zcu) == .Struct) { + for (elems, 0..) |elem, elem_idx| { + if (ty.structFieldIsComptime(elem_idx, zcu)) continue; + if (!checkRef(elem, zcu)) return false; + } + } else { + for (elems) |elem| { + if (!checkRef(elem, zcu)) return false; + } + } + }, + + .union_init => { + const extra = air.extraData(Air.UnionInit, data.ty_pl.payload).data; + if (!checkType(data.ty_pl.ty.toType(), zcu)) return false; + if (!checkRef(extra.init, zcu)) return false; + }, + + .field_parent_ptr => { + const extra = air.extraData(Air.FieldParentPtr, data.ty_pl.payload).data; + if (!checkType(data.ty_pl.ty.toType(), zcu)) return false; + if (!checkRef(extra.field_ptr, zcu)) return false; + }, + + .atomic_load => { + if (!checkRef(data.atomic_load.ptr, zcu)) return false; + }, + + .prefetch => { + if (!checkRef(data.prefetch.ptr, zcu)) return false; + }, + + .vector_store_elem => { + const bin = air.extraData(Air.Bin, data.vector_store_elem.payload).data; + if (!checkRef(data.vector_store_elem.vector_ptr, zcu)) return false; + if (!checkRef(bin.lhs, zcu)) return false; + if (!checkRef(bin.rhs, zcu)) return false; + }, + + .select, + .mul_add, + => { + const bin = air.extraData(Air.Bin, data.pl_op.payload).data; + if (!checkRef(data.pl_op.operand, zcu)) return false; + if (!checkRef(bin.lhs, zcu)) return false; + if (!checkRef(bin.rhs, zcu)) return false; + }, + + .atomic_rmw => { + const extra = air.extraData(Air.AtomicRmw, data.pl_op.payload).data; + if (!checkRef(data.pl_op.operand, zcu)) return false; + if (!checkRef(extra.operand, zcu)) return false; + }, + + .call, + .call_always_tail, + .call_never_tail, + .call_never_inline, + => { + const extra = air.extraData(Air.Call, data.pl_op.payload); + const args: []const Air.Inst.Ref = @ptrCast(air.extra[extra.end..][0..extra.data.args_len]); + if (!checkRef(data.pl_op.operand, zcu)) return false; + for (args) |arg| if (!checkRef(arg, zcu)) return false; + }, + + .dbg_var_ptr, + .dbg_var_val, + => { + if (!checkRef(data.pl_op.operand, zcu)) return false; + }, + + .@"try" => { + const extra = air.extraData(Air.Try, data.pl_op.payload); + if (!checkRef(data.pl_op.operand, zcu)) return false; + if (!checkBody( + air, + @ptrCast(air.extra[extra.end..][0..extra.data.body_len]), + zcu, + )) return false; + }, + + .try_ptr => { + const extra = air.extraData(Air.TryPtr, data.ty_pl.payload); + if (!checkType(data.ty_pl.ty.toType(), zcu)) return false; + if (!checkRef(extra.data.ptr, zcu)) return false; + if (!checkBody( + air, + @ptrCast(air.extra[extra.end..][0..extra.data.body_len]), + zcu, + )) return false; + }, + + .cond_br => { + const extra = air.extraData(Air.CondBr, data.pl_op.payload); + if (!checkRef(data.pl_op.operand, zcu)) return false; + if (!checkBody( + air, + @ptrCast(air.extra[extra.end..][0..extra.data.then_body_len]), + zcu, + )) return false; + if (!checkBody( + air, + @ptrCast(air.extra[extra.end + extra.data.then_body_len ..][0..extra.data.else_body_len]), + zcu, + )) return false; + }, + + .switch_br => { + const extra = air.extraData(Air.SwitchBr, data.pl_op.payload); + if (!checkRef(data.pl_op.operand, zcu)) return false; + var extra_index = extra.end; + for (0..extra.data.cases_len) |_| { + const case = air.extraData(Air.SwitchBr.Case, extra_index); + extra_index = case.end; + const items: []const Air.Inst.Ref = @ptrCast(air.extra[extra_index..][0..case.data.items_len]); + extra_index += case.data.items_len; + for (items) |item| if (!checkRef(item, zcu)) return false; + if (!checkBody( + air, + @ptrCast(air.extra[extra_index..][0..case.data.body_len]), + zcu, + )) return false; + extra_index += case.data.body_len; + } + if (!checkBody( + air, + @ptrCast(air.extra[extra_index..][0..extra.data.else_body_len]), + zcu, + )) return false; + }, + + .assembly => { + const extra = air.extraData(Air.Asm, data.ty_pl.payload); + if (!checkType(data.ty_pl.ty.toType(), zcu)) return false; + // Luckily, we only care about the inputs and outputs, so we don't have to do + // the whole null-terminated string dance. + const outputs: []const Air.Inst.Ref = @ptrCast(air.extra[extra.end..][0..extra.data.outputs_len]); + const inputs: []const Air.Inst.Ref = @ptrCast(air.extra[extra.end + extra.data.outputs_len ..][0..extra.data.inputs_len]); + for (outputs) |output| if (output != .none and !checkRef(output, zcu)) return false; + for (inputs) |input| if (input != .none and !checkRef(input, zcu)) return false; + }, + + .trap, + .breakpoint, + .ret_addr, + .frame_addr, + .unreach, + .wasm_memory_size, + .wasm_memory_grow, + .work_item_id, + .work_group_size, + .work_group_id, + .fence, + .dbg_stmt, + .err_return_trace, + .save_err_return_trace_index, + => {}, + } + } + return true; +} + +fn checkRef(ref: Air.Inst.Ref, zcu: *Zcu) bool { + const ip_index = ref.toInterned() orelse { + // This operand refers back to a previous instruction. + // We have already checked that instruction's type. + // So, there's no need to check this operand's type. + return true; + }; + return checkVal(Value.fromInterned(ip_index), zcu); +} + +fn checkVal(val: Value, zcu: *Zcu) bool { + if (!checkType(val.typeOf(zcu), zcu)) return false; + // Check for lazy values + switch (zcu.intern_pool.indexToKey(val.toIntern())) { + .int => |int| switch (int.storage) { + .u64, .i64, .big_int => return true, + .lazy_align, .lazy_size => |ty_index| { + return checkType(Type.fromInterned(ty_index), zcu); + }, + }, + else => return true, + } +} + +fn checkType(ty: Type, zcu: *Zcu) bool { + const ip = &zcu.intern_pool; + return switch (ty.zigTypeTag(zcu)) { + .Type, + .Void, + .Bool, + .NoReturn, + .Int, + .Float, + .ErrorSet, + .Enum, + .Opaque, + .Vector, + // These types can appear due to some dummy instructions Sema introduces and expects to be omitted by Liveness. + // It's a little silly -- but fine, we'll return `true`. + .ComptimeFloat, + .ComptimeInt, + .Undefined, + .Null, + .EnumLiteral, + => true, + + .Frame, + .AnyFrame, + => @panic("TODO Air.types_resolved.checkType async frames"), + + .Optional => checkType(ty.childType(zcu), zcu), + .ErrorUnion => checkType(ty.errorUnionPayload(zcu), zcu), + .Pointer => checkType(ty.childType(zcu), zcu), + .Array => checkType(ty.childType(zcu), zcu), + + .Fn => { + const info = zcu.typeToFunc(ty).?; + for (0..info.param_types.len) |i| { + const param_ty = info.param_types.get(ip)[i]; + if (!checkType(Type.fromInterned(param_ty), zcu)) return false; + } + return checkType(Type.fromInterned(info.return_type), zcu); + }, + .Struct => switch (ip.indexToKey(ty.toIntern())) { + .struct_type => { + const struct_obj = zcu.typeToStruct(ty).?; + return switch (struct_obj.layout) { + .@"packed" => struct_obj.backingIntType(ip).* != .none, + .auto, .@"extern" => struct_obj.flagsPtr(ip).fully_resolved, + }; + }, + .anon_struct_type => |tuple| { + for (0..tuple.types.len) |i| { + const field_is_comptime = tuple.values.get(ip)[i] != .none; + if (field_is_comptime) continue; + const field_ty = tuple.types.get(ip)[i]; + if (!checkType(Type.fromInterned(field_ty), zcu)) return false; + } + return true; + }, + else => unreachable, + }, + .Union => return zcu.typeToUnion(ty).?.flagsPtr(ip).status == .fully_resolved, + }; +} diff --git a/src/Compilation.zig b/src/Compilation.zig index b964ffd0d136..7447d589fd90 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -37,6 +37,7 @@ const Cache = std.Build.Cache; const c_codegen = @import("codegen/c.zig"); const libtsan = @import("libtsan.zig"); const Zir = std.zig.Zir; +const Air = @import("Air.zig"); const Builtin = @import("Builtin.zig"); const LlvmObject = @import("codegen/llvm.zig").Object; @@ -316,18 +317,29 @@ const Job = union(enum) { codegen_decl: InternPool.DeclIndex, /// Write the machine code for a function to the output file. /// This will either be a non-generic `func_decl` or a `func_instance`. - codegen_func: InternPool.Index, + codegen_func: struct { + func: InternPool.Index, + /// This `Air` is owned by the `Job` and allocated with `gpa`. + /// It must be deinited when the job is processed. + air: Air, + }, /// Render the .h file snippet for the Decl. emit_h_decl: InternPool.DeclIndex, /// The Decl needs to be analyzed and possibly export itself. /// It may have already be analyzed, or it may have been determined /// to be outdated; in this case perform semantic analysis again. analyze_decl: InternPool.DeclIndex, + /// Analyze the body of a runtime function. + /// After analysis, a `codegen_func` job will be queued. + /// These must be separate jobs to ensure any needed type resolution occurs *before* codegen. + analyze_func: InternPool.Index, /// The source file containing the Decl has been updated, and so the /// Decl may need its line number information updated in the debug info. update_line_number: InternPool.DeclIndex, /// The main source file for the module needs to be analyzed. analyze_mod: *Package.Module, + /// Fully resolve the given `struct` or `union` type. + resolve_type_fully: InternPool.Index, /// one of the glibc static objects glibc_crt_file: glibc.CRTFile, @@ -3389,7 +3401,7 @@ pub fn performAllTheWork( if (try zcu.findOutdatedToAnalyze()) |outdated| { switch (outdated.unwrap()) { .decl => |decl| try comp.work_queue.writeItem(.{ .analyze_decl = decl }), - .func => |func| try comp.work_queue.writeItem(.{ .codegen_func = func }), + .func => |func| try comp.work_queue.writeItem(.{ .analyze_func = func }), } continue; } @@ -3439,6 +3451,14 @@ fn processOneJob(comp: *Compilation, job: Job, prog_node: std.Progress.Node) !vo const named_frame = tracy.namedFrame("codegen_func"); defer named_frame.end(); + const module = comp.module.?; + // This call takes ownership of `func.air`. + try module.linkerUpdateFunc(func.func, func.air); + }, + .analyze_func => |func| { + const named_frame = tracy.namedFrame("analyze_func"); + defer named_frame.end(); + const module = comp.module.?; module.ensureFuncBodyAnalyzed(func) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, @@ -3518,6 +3538,16 @@ fn processOneJob(comp: *Compilation, job: Job, prog_node: std.Progress.Node) !vo try module.ensureFuncBodyAnalysisQueued(decl.val.toIntern()); } }, + .resolve_type_fully => |ty| { + const named_frame = tracy.namedFrame("resolve_type_fully"); + defer named_frame.end(); + + const zcu = comp.module.?; + Type.fromInterned(ty).resolveFully(zcu) catch |err| switch (err) { + error.OutOfMemory => return error.OutOfMemory, + error.AnalysisFail => return, + }; + }, .update_line_number => |decl_index| { const named_frame = tracy.namedFrame("update_line_number"); defer named_frame.end(); diff --git a/src/Sema.zig b/src/Sema.zig index 57b2c897a118..9dfbc724ebb4 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -64,14 +64,6 @@ generic_owner: InternPool.Index = .none, /// instantiation can point back to the instantiation site in addition to the /// declaration site. generic_call_src: LazySrcLoc = LazySrcLoc.unneeded, -/// The key is types that must be fully resolved prior to machine code -/// generation pass. Types are added to this set when resolving them -/// immediately could cause a dependency loop, but they do need to be resolved -/// before machine code generation passes process the AIR. -/// It would work fine if this were an array list instead of an array hash map. -/// I chose array hash map with the intention to save time by omitting -/// duplicates. -types_to_resolve: std.AutoArrayHashMapUnmanaged(InternPool.Index, void) = .{}, /// These are lazily created runtime blocks from block_inline instructions. /// They are created when an break_inline passes through a runtime condition, because /// Sema must convert comptime control flow to runtime control flow, which means @@ -872,7 +864,6 @@ pub fn deinit(sema: *Sema) void { sema.air_extra.deinit(gpa); sema.inst_map.deinit(gpa); sema.decl_val_table.deinit(gpa); - sema.types_to_resolve.deinit(gpa); { var it = sema.post_hoc_blocks.iterator(); while (it.next()) |entry| { @@ -2078,8 +2069,8 @@ pub fn setupErrorReturnTrace(sema: *Sema, block: *Block, last_arg_index: usize) const addrs_ptr = try err_trace_block.addTy(.alloc, try mod.singleMutPtrType(addr_arr_ty)); // var st: StackTrace = undefined; - const stack_trace_ty = try sema.getBuiltinType("StackTrace"); - try sema.resolveTypeFields(stack_trace_ty); + const stack_trace_ty = try mod.getBuiltinType("StackTrace"); + try stack_trace_ty.resolveFields(mod); const st_ptr = try err_trace_block.addTy(.alloc, try mod.singleMutPtrType(stack_trace_ty)); // st.instruction_addresses = &addrs; @@ -2628,7 +2619,7 @@ fn analyzeAsInt( const mod = sema.mod; const coerced = try sema.coerce(block, dest_ty, air_ref, src); const val = try sema.resolveConstDefinedValue(block, src, coerced, reason); - return (try val.getUnsignedIntAdvanced(mod, sema)).?; + return (try val.getUnsignedIntAdvanced(mod, .sema)).?; } /// Given a ZIR extra index which points to a list of `Zir.Inst.Capture`, @@ -2832,6 +2823,7 @@ fn zirStructDecl( } try mod.finalizeAnonDecl(new_decl_index); + try mod.comp.work_queue.writeItem(.{ .resolve_type_fully = wip_ty.index }); return Air.internedToRef(wip_ty.finish(ip, new_decl_index, new_namespace_index)); } @@ -3332,7 +3324,7 @@ fn zirUnionDecl( } try mod.finalizeAnonDecl(new_decl_index); - + try mod.comp.work_queue.writeItem(.{ .resolve_type_fully = wip_ty.index }); return Air.internedToRef(wip_ty.finish(ip, new_decl_index, new_namespace_index)); } @@ -3457,12 +3449,12 @@ fn zirRetPtr(sema: *Sema, block: *Block) CompileError!Air.Inst.Ref { defer tracy.end(); if (block.is_comptime or try sema.typeRequiresComptime(sema.fn_ret_ty)) { - try sema.resolveTypeFields(sema.fn_ret_ty); + try sema.fn_ret_ty.resolveFields(sema.mod); return sema.analyzeComptimeAlloc(block, sema.fn_ret_ty, .none); } const target = sema.mod.getTarget(); - const ptr_type = try sema.ptrType(.{ + const ptr_type = try sema.mod.ptrTypeSema(.{ .child = sema.fn_ret_ty.toIntern(), .flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) }, }); @@ -3471,7 +3463,6 @@ fn zirRetPtr(sema: *Sema, block: *Block) CompileError!Air.Inst.Ref { // We are inlining a function call; this should be emitted as an alloc, not a ret_ptr. // TODO when functions gain result location support, the inlining struct in // Block should contain the return pointer, and we would pass that through here. - try sema.queueFullTypeResolution(sema.fn_ret_ty); return block.addTy(.alloc, ptr_type); } @@ -3667,8 +3658,8 @@ fn zirAllocExtended( try sema.validateVarType(block, ty_src, var_ty, false); } const target = sema.mod.getTarget(); - try sema.resolveTypeLayout(var_ty); - const ptr_type = try sema.ptrType(.{ + try var_ty.resolveLayout(sema.mod); + const ptr_type = try sema.mod.ptrTypeSema(.{ .child = var_ty.toIntern(), .flags = .{ .alignment = alignment, @@ -3902,7 +3893,7 @@ fn resolveComptimeKnownAllocPtr(sema: *Sema, block: *Block, alloc: Air.Inst.Ref, const idx_val = (try sema.resolveValue(data.rhs)).?; break :blk .{ data.lhs, - .{ .elem = try idx_val.toUnsignedIntAdvanced(sema) }, + .{ .elem = try idx_val.toUnsignedIntSema(zcu) }, }; }, .bitcast => .{ @@ -3940,7 +3931,7 @@ fn resolveComptimeKnownAllocPtr(sema: *Sema, block: *Block, alloc: Air.Inst.Ref, .val = payload_val.toIntern(), } }); try sema.storePtrVal(block, LazySrcLoc.unneeded, Value.fromInterned(decl_parent_ptr), Value.fromInterned(opt_val), opt_ty); - break :ptr (try Value.fromInterned(decl_parent_ptr).ptrOptPayload(sema)).toIntern(); + break :ptr (try Value.fromInterned(decl_parent_ptr).ptrOptPayload(zcu)).toIntern(); }, .eu_payload => ptr: { // Set the error union to non-error at comptime. @@ -3953,7 +3944,7 @@ fn resolveComptimeKnownAllocPtr(sema: *Sema, block: *Block, alloc: Air.Inst.Ref, .val = .{ .payload = payload_val.toIntern() }, } }); try sema.storePtrVal(block, LazySrcLoc.unneeded, Value.fromInterned(decl_parent_ptr), Value.fromInterned(eu_val), eu_ty); - break :ptr (try Value.fromInterned(decl_parent_ptr).ptrEuPayload(sema)).toIntern(); + break :ptr (try Value.fromInterned(decl_parent_ptr).ptrEuPayload(zcu)).toIntern(); }, .field => |idx| ptr: { const maybe_union_ty = Value.fromInterned(decl_parent_ptr).typeOf(zcu).childType(zcu); @@ -3967,9 +3958,9 @@ fn resolveComptimeKnownAllocPtr(sema: *Sema, block: *Block, alloc: Air.Inst.Ref, const store_val = try zcu.unionValue(maybe_union_ty, tag_val, payload_val); try sema.storePtrVal(block, LazySrcLoc.unneeded, Value.fromInterned(decl_parent_ptr), store_val, maybe_union_ty); } - break :ptr (try Value.fromInterned(decl_parent_ptr).ptrField(idx, sema)).toIntern(); + break :ptr (try Value.fromInterned(decl_parent_ptr).ptrField(idx, zcu)).toIntern(); }, - .elem => |idx| (try Value.fromInterned(decl_parent_ptr).ptrElem(idx, sema)).toIntern(), + .elem => |idx| (try Value.fromInterned(decl_parent_ptr).ptrElem(idx, zcu)).toIntern(), }; try ptr_mapping.put(air_ptr, new_ptr); } @@ -4060,7 +4051,7 @@ fn finishResolveComptimeKnownAllocPtr( fn makePtrTyConst(sema: *Sema, ptr_ty: Type) CompileError!Type { var ptr_info = ptr_ty.ptrInfo(sema.mod); ptr_info.flags.is_const = true; - return sema.ptrType(ptr_info); + return sema.mod.ptrTypeSema(ptr_info); } fn makePtrConst(sema: *Sema, block: *Block, alloc: Air.Inst.Ref) CompileError!Air.Inst.Ref { @@ -4103,11 +4094,10 @@ fn zirAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.I return sema.analyzeComptimeAlloc(block, var_ty, .none); } const target = sema.mod.getTarget(); - const ptr_type = try sema.ptrType(.{ + const ptr_type = try sema.mod.ptrTypeSema(.{ .child = var_ty.toIntern(), .flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) }, }); - try sema.queueFullTypeResolution(var_ty); const ptr = try block.addTy(.alloc, ptr_type); const ptr_inst = ptr.toIndex().?; try sema.maybe_comptime_allocs.put(sema.gpa, ptr_inst, .{ .runtime_index = block.runtime_index }); @@ -4127,11 +4117,10 @@ fn zirAllocMut(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } try sema.validateVarType(block, ty_src, var_ty, false); const target = sema.mod.getTarget(); - const ptr_type = try sema.ptrType(.{ + const ptr_type = try sema.mod.ptrTypeSema(.{ .child = var_ty.toIntern(), .flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) }, }); - try sema.queueFullTypeResolution(var_ty); return block.addTy(.alloc, ptr_type); } @@ -4227,7 +4216,7 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com } const final_elem_ty = try sema.resolvePeerTypes(block, ty_src, peer_vals, .none); - const final_ptr_ty = try sema.ptrType(.{ + const final_ptr_ty = try mod.ptrTypeSema(.{ .child = final_elem_ty.toIntern(), .flags = .{ .alignment = ia1.alignment, @@ -4247,7 +4236,7 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com // Unless the block is comptime, `alloc_inferred` always produces // a runtime constant. The final inferred type needs to be // fully resolved so it can be lowered in codegen. - try sema.resolveTypeFully(final_elem_ty); + try final_elem_ty.resolveFully(mod); return; } @@ -4259,8 +4248,6 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com return sema.fail(block, src, "value with comptime-only type '{}' depends on runtime control flow", .{final_elem_ty.fmt(mod)}); } - try sema.queueFullTypeResolution(final_elem_ty); - // Change it to a normal alloc. sema.air_instructions.set(@intFromEnum(ptr_inst), .{ .tag = .alloc, @@ -4633,7 +4620,7 @@ fn validateArrayInitTy( return; }, .Struct => if (ty.isTuple(mod)) { - try sema.resolveTypeFields(ty); + try ty.resolveFields(mod); const array_len = ty.arrayLen(mod); if (init_count > array_len) { return sema.fail(block, src, "expected at most {d} tuple fields; found {d}", .{ @@ -4911,7 +4898,7 @@ fn validateStructInit( if (block.is_comptime and (try sema.resolveDefinedValue(block, init_src, struct_ptr)) != null) { - try sema.resolveStructLayout(struct_ty); + try struct_ty.resolveLayout(mod); // In this case the only thing we need to do is evaluate the implicit // store instructions for default field values, and report any missing fields. // Avoid the cost of the extra machinery for detecting a comptime struct init value. @@ -4919,7 +4906,7 @@ fn validateStructInit( const i: u32 = @intCast(i_usize); if (field_ptr != .none) continue; - try sema.resolveStructFieldInits(struct_ty); + try struct_ty.resolveStructFieldInits(mod); const default_val = struct_ty.structFieldDefaultValue(i, mod); if (default_val.toIntern() == .unreachable_value) { const field_name = struct_ty.structFieldName(i, mod).unwrap() orelse { @@ -4968,7 +4955,7 @@ fn validateStructInit( const air_tags = sema.air_instructions.items(.tag); const air_datas = sema.air_instructions.items(.data); - try sema.resolveStructFieldInits(struct_ty); + try struct_ty.resolveStructFieldInits(mod); // We collect the comptime field values in case the struct initialization // ends up being comptime-known. @@ -5127,7 +5114,7 @@ fn validateStructInit( try sema.storePtr2(block, init_src, struct_ptr, init_src, struct_init, init_src, .store); return; } - try sema.resolveStructLayout(struct_ty); + try struct_ty.resolveLayout(mod); // Our task is to insert `store` instructions for all the default field values. for (found_fields, 0..) |field_ptr, i| { @@ -5172,7 +5159,7 @@ fn zirValidatePtrArrayInit( var root_msg: ?*Module.ErrorMsg = null; errdefer if (root_msg) |msg| msg.destroy(sema.gpa); - try sema.resolveStructFieldInits(array_ty); + try array_ty.resolveStructFieldInits(mod); var i = instrs.len; while (i < array_len) : (i += 1) { const default_val = array_ty.structFieldDefaultValue(i, mod).toIntern(); @@ -5241,7 +5228,7 @@ fn zirValidatePtrArrayInit( if (array_ty.isTuple(mod)) { if (array_ty.structFieldIsComptime(i, mod)) - try sema.resolveStructFieldInits(array_ty); + try array_ty.resolveStructFieldInits(mod); if (try array_ty.structFieldValueComptime(mod, i)) |opv| { element_vals[i] = opv.toIntern(); continue; @@ -5581,7 +5568,7 @@ fn storeToInferredAllocComptime( .needed_comptime_reason = "value being stored to a comptime variable must be comptime-known", }); }; - const alloc_ty = try sema.ptrType(.{ + const alloc_ty = try zcu.ptrTypeSema(.{ .child = operand_ty.toIntern(), .flags = .{ .alignment = iac.alignment, @@ -5688,7 +5675,7 @@ fn anonDeclRef(sema: *Sema, val: InternPool.Index) CompileError!Air.Inst.Ref { fn refValue(sema: *Sema, val: InternPool.Index) CompileError!InternPool.Index { const mod = sema.mod; - const ptr_ty = (try sema.ptrType(.{ + const ptr_ty = (try mod.ptrTypeSema(.{ .child = mod.intern_pool.typeOf(val), .flags = .{ .alignment = .none, @@ -6645,8 +6632,6 @@ fn addDbgVar( // real `block` instruction. if (block.need_debug_scope) |ptr| ptr.* = true; - try sema.queueFullTypeResolution(operand_ty); - // Add the name to the AIR. const name_extra_index: u32 = @intCast(sema.air_extra.items.len); const elements_used = name.len / 4 + 1; @@ -6832,14 +6817,8 @@ pub fn analyzeSaveErrRetIndex(sema: *Sema, block: *Block) SemaError!Air.Inst.Ref if (!block.ownerModule().error_tracing) return .none; - const stack_trace_ty = sema.getBuiltinType("StackTrace") catch |err| switch (err) { - error.GenericPoison, error.ComptimeReturn, error.ComptimeBreak => unreachable, - else => |e| return e, - }; - sema.resolveTypeFields(stack_trace_ty) catch |err| switch (err) { - error.GenericPoison, error.ComptimeReturn, error.ComptimeBreak => unreachable, - else => |e| return e, - }; + const stack_trace_ty = try mod.getBuiltinType("StackTrace"); + try stack_trace_ty.resolveFields(mod); const field_name = try mod.intern_pool.getOrPutString(gpa, "index", .no_embedded_nulls); const field_index = sema.structFieldIndex(block, stack_trace_ty, field_name, LazySrcLoc.unneeded) catch |err| switch (err) { error.AnalysisFail => @panic("std.builtin.StackTrace is corrupt"), @@ -6879,8 +6858,8 @@ fn popErrorReturnTrace( // AstGen determined this result does not go to an error-handling expr (try/catch/return etc.), or // the result is comptime-known to be a non-error. Either way, pop unconditionally. - const stack_trace_ty = try sema.getBuiltinType("StackTrace"); - try sema.resolveTypeFields(stack_trace_ty); + const stack_trace_ty = try mod.getBuiltinType("StackTrace"); + try stack_trace_ty.resolveFields(mod); const ptr_stack_trace_ty = try mod.singleMutPtrType(stack_trace_ty); const err_return_trace = try block.addTy(.err_return_trace, ptr_stack_trace_ty); const field_name = try mod.intern_pool.getOrPutString(gpa, "index", .no_embedded_nulls); @@ -6905,8 +6884,8 @@ fn popErrorReturnTrace( defer then_block.instructions.deinit(gpa); // If non-error, then pop the error return trace by restoring the index. - const stack_trace_ty = try sema.getBuiltinType("StackTrace"); - try sema.resolveTypeFields(stack_trace_ty); + const stack_trace_ty = try mod.getBuiltinType("StackTrace"); + try stack_trace_ty.resolveFields(mod); const ptr_stack_trace_ty = try mod.singleMutPtrType(stack_trace_ty); const err_return_trace = try then_block.addTy(.err_return_trace, ptr_stack_trace_ty); const field_name = try mod.intern_pool.getOrPutString(gpa, "index", .no_embedded_nulls); @@ -7032,8 +7011,8 @@ fn zirCall( // If any input is an error-type, we might need to pop any trace it generated. Otherwise, we only // need to clean-up our own trace if we were passed to a non-error-handling expression. if (input_is_error or (pop_error_return_trace and return_ty.isError(mod))) { - const stack_trace_ty = try sema.getBuiltinType("StackTrace"); - try sema.resolveTypeFields(stack_trace_ty); + const stack_trace_ty = try mod.getBuiltinType("StackTrace"); + try stack_trace_ty.resolveFields(mod); const field_name = try mod.intern_pool.getOrPutString(sema.gpa, "index", .no_embedded_nulls); const field_index = try sema.structFieldIndex(block, stack_trace_ty, field_name, call_src); @@ -7264,10 +7243,6 @@ const CallArgsInfo = union(enum) { ) CompileError!Air.Inst.Ref { const mod = sema.mod; const param_count = func_ty_info.param_types.len; - if (maybe_param_ty) |param_ty| switch (param_ty.toIntern()) { - .generic_poison_type => {}, - else => try sema.queueFullTypeResolution(param_ty), - }; const uncoerced_arg: Air.Inst.Ref = switch (cai) { inline .resolved, .call_builtin => |resolved| resolved.args[arg_index], .zir_call => |zir_call| arg_val: { @@ -7494,24 +7469,19 @@ fn analyzeCall( const gpa = sema.gpa; - var is_generic_call = func_ty_info.is_generic; + const is_generic_call = func_ty_info.is_generic; var is_comptime_call = block.is_comptime or modifier == .compile_time; var is_inline_call = is_comptime_call or modifier == .always_inline or func_ty_info.cc == .Inline; var comptime_reason: ?*const Block.ComptimeReason = null; if (!is_inline_call and !is_comptime_call) { - if (sema.typeRequiresComptime(Type.fromInterned(func_ty_info.return_type))) |ct| { - is_comptime_call = ct; - is_inline_call = ct; - if (ct) { - comptime_reason = &.{ .comptime_ret_ty = .{ - .func = func, - .func_src = func_src, - .return_ty = Type.fromInterned(func_ty_info.return_type), - } }; - } - } else |err| switch (err) { - error.GenericPoison => is_generic_call = true, - else => |e| return e, + if (try sema.typeRequiresComptime(Type.fromInterned(func_ty_info.return_type))) { + is_comptime_call = true; + is_inline_call = true; + comptime_reason = &.{ .comptime_ret_ty = .{ + .func = func, + .func_src = func_src, + .return_ty = Type.fromInterned(func_ty_info.return_type), + } }; } } @@ -7871,7 +7841,6 @@ fn analyzeCall( if (call_dbg_node) |some| try sema.zirDbgStmt(block, some); - try sema.queueFullTypeResolution(Type.fromInterned(func_ty_info.return_type)); if (sema.owner_func_index != .none and Type.fromInterned(func_ty_info.return_type).isError(mod)) { ip.funcAnalysis(sema.owner_func_index).calls_or_awaits_errorable_fn = true; } @@ -8281,7 +8250,6 @@ fn instantiateGenericCall( } } else { // The parameter is runtime-known. - try sema.queueFullTypeResolution(arg_ty); child_sema.inst_map.putAssumeCapacityNoClobber(param_inst, try child_block.addInst(.{ .tag = .arg, .data = .{ .arg = .{ @@ -8330,8 +8298,6 @@ fn instantiateGenericCall( return error.GenericPoison; } - try sema.queueFullTypeResolution(Type.fromInterned(func_ty_info.return_type)); - if (call_dbg_node) |some| try sema.zirDbgStmt(block, some); if (sema.owner_func_index != .none and @@ -8423,7 +8389,7 @@ fn zirArrayInitElemType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Compil else => |e| return e, }; const indexable_ty = maybe_wrapped_indexable_ty.optEuBaseType(mod); - try sema.resolveTypeFields(indexable_ty); + try indexable_ty.resolveFields(mod); assert(indexable_ty.isIndexable(mod)); // validated by a previous instruction if (indexable_ty.zigTypeTag(mod) == .Struct) { const elem_type = indexable_ty.structFieldType(@intFromEnum(bin.rhs), mod); @@ -8687,7 +8653,7 @@ fn zirErrorFromInt(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstD const operand = try sema.coerce(block, err_int_ty, uncasted_operand, operand_src); if (try sema.resolveDefinedValue(block, operand_src, operand)) |value| { - const int = try sema.usizeCast(block, operand_src, try value.toUnsignedIntAdvanced(sema)); + const int = try sema.usizeCast(block, operand_src, try value.toUnsignedIntSema(mod)); if (int > mod.global_error_set.count() or int == 0) return sema.fail(block, operand_src, "integer value '{d}' represents no error", .{int}); return Air.internedToRef((try mod.intern(.{ .err = .{ @@ -8791,7 +8757,7 @@ fn zirIntFromEnum(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const enum_tag: Air.Inst.Ref = switch (operand_ty.zigTypeTag(mod)) { .Enum => operand, .Union => blk: { - try sema.resolveTypeFields(operand_ty); + try operand_ty.resolveFields(mod); const tag_ty = operand_ty.unionTagType(mod) orelse { return sema.fail( block, @@ -8933,7 +8899,7 @@ fn analyzeOptionalPayloadPtr( } const child_type = opt_type.optionalChild(zcu); - const child_pointer = try sema.ptrType(.{ + const child_pointer = try zcu.ptrTypeSema(.{ .child = child_type.toIntern(), .flags = .{ .is_const = optional_ptr_ty.isConstPtr(zcu), @@ -8957,13 +8923,13 @@ fn analyzeOptionalPayloadPtr( const opt_payload_ptr = try block.addTyOp(.optional_payload_ptr_set, child_pointer, optional_ptr); try sema.checkKnownAllocPtr(block, optional_ptr, opt_payload_ptr); } - return Air.internedToRef((try ptr_val.ptrOptPayload(sema)).toIntern()); + return Air.internedToRef((try ptr_val.ptrOptPayload(zcu)).toIntern()); } if (try sema.pointerDeref(block, src, ptr_val, optional_ptr_ty)) |val| { if (val.isNull(zcu)) { return sema.fail(block, src, "unable to unwrap null", .{}); } - return Air.internedToRef((try ptr_val.ptrOptPayload(sema)).toIntern()); + return Air.internedToRef((try ptr_val.ptrOptPayload(zcu)).toIntern()); } } @@ -9006,7 +8972,7 @@ fn zirOptionalPayload( // TODO https://github.com/ziglang/zig/issues/6597 if (true) break :t operand_ty; const ptr_info = operand_ty.ptrInfo(mod); - break :t try sema.ptrType(.{ + break :t try mod.ptrTypeSema(.{ .child = ptr_info.child, .flags = .{ .alignment = ptr_info.flags.alignment, @@ -9124,7 +9090,7 @@ fn analyzeErrUnionPayloadPtr( const err_union_ty = operand_ty.childType(zcu); const payload_ty = err_union_ty.errorUnionPayload(zcu); - const operand_pointer_ty = try sema.ptrType(.{ + const operand_pointer_ty = try zcu.ptrTypeSema(.{ .child = payload_ty.toIntern(), .flags = .{ .is_const = operand_ty.isConstPtr(zcu), @@ -9149,13 +9115,13 @@ fn analyzeErrUnionPayloadPtr( const eu_payload_ptr = try block.addTyOp(.errunion_payload_ptr_set, operand_pointer_ty, operand); try sema.checkKnownAllocPtr(block, operand, eu_payload_ptr); } - return Air.internedToRef((try ptr_val.ptrEuPayload(sema)).toIntern()); + return Air.internedToRef((try ptr_val.ptrEuPayload(zcu)).toIntern()); } if (try sema.pointerDeref(block, src, ptr_val, operand_ty)) |val| { if (val.getErrorName(zcu).unwrap()) |name| { return sema.failWithComptimeErrorRetTrace(block, src, name); } - return Air.internedToRef((try ptr_val.ptrEuPayload(sema)).toIntern()); + return Air.internedToRef((try ptr_val.ptrEuPayload(zcu)).toIntern()); } } @@ -9603,17 +9569,8 @@ fn funcCommon( } } - var ret_ty_requires_comptime = false; - const ret_poison = if (sema.typeRequiresComptime(bare_return_type)) |ret_comptime| rp: { - ret_ty_requires_comptime = ret_comptime; - break :rp bare_return_type.isGenericPoison(); - } else |err| switch (err) { - error.GenericPoison => rp: { - is_generic = true; - break :rp true; - }, - else => |e| return e, - }; + const ret_ty_requires_comptime = try sema.typeRequiresComptime(bare_return_type); + const ret_poison = bare_return_type.isGenericPoison(); const final_is_generic = is_generic or comptime_bits != 0 or ret_ty_requires_comptime; const param_types = block.params.items(.ty); @@ -9961,8 +9918,8 @@ fn finishFunc( if (!final_is_generic and sema.wantErrorReturnTracing(return_type)) { // Make sure that StackTrace's fields are resolved so that the backend can // lower this fn type. - const unresolved_stack_trace_ty = try sema.getBuiltinType("StackTrace"); - try sema.resolveTypeFields(unresolved_stack_trace_ty); + const unresolved_stack_trace_ty = try mod.getBuiltinType("StackTrace"); + try unresolved_stack_trace_ty.resolveFields(mod); } return Air.internedToRef(if (opt_func_index != .none) opt_func_index else func_ty); @@ -10021,21 +9978,7 @@ fn zirParam( } }; - const is_comptime = sema.typeRequiresComptime(param_ty) catch |err| switch (err) { - error.GenericPoison => { - // The type is not available until the generic instantiation. - // We result the param instruction with a poison value and - // insert an anytype parameter. - try block.params.append(sema.arena, .{ - .ty = .generic_poison_type, - .is_comptime = comptime_syntax, - .name = param_name, - }); - sema.inst_map.putAssumeCapacity(inst, .generic_poison); - return; - }, - else => |e| return e, - } or comptime_syntax; + const is_comptime = try sema.typeRequiresComptime(param_ty) or comptime_syntax; try block.params.append(sema.arena, .{ .ty = param_ty.toIntern(), @@ -10162,7 +10105,7 @@ fn zirIntFromPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! } return Air.internedToRef((try zcu.intValue( Type.usize, - (try operand_val.getUnsignedIntAdvanced(zcu, sema)).?, + (try operand_val.getUnsignedIntAdvanced(zcu, .sema)).?, )).toIntern()); } const len = operand_ty.vectorLen(zcu); @@ -10174,7 +10117,7 @@ fn zirIntFromPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! new_elem.* = (try zcu.undefValue(Type.usize)).toIntern(); continue; } - const addr = try ptr_val.getUnsignedIntAdvanced(zcu, sema) orelse { + const addr = try ptr_val.getUnsignedIntAdvanced(zcu, .sema) orelse { // A vector element wasn't an integer pointer. This is a runtime operation. break :ct; }; @@ -11047,7 +10990,7 @@ const SwitchProngAnalysis = struct { const union_obj = zcu.typeToUnion(operand_ty).?; const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]); if (capture_byref) { - const ptr_field_ty = try sema.ptrType(.{ + const ptr_field_ty = try zcu.ptrTypeSema(.{ .child = field_ty.toIntern(), .flags = .{ .is_const = !operand_ptr_ty.ptrIsMutable(zcu), @@ -11056,7 +10999,7 @@ const SwitchProngAnalysis = struct { }, }); if (try sema.resolveDefinedValue(block, operand_src, spa.operand_ptr)) |union_ptr| { - return Air.internedToRef((try union_ptr.ptrField(field_index, sema)).toIntern()); + return Air.internedToRef((try union_ptr.ptrField(field_index, zcu)).toIntern()); } return block.addStructFieldPtr(spa.operand_ptr, field_index, ptr_field_ty); } else { @@ -11150,7 +11093,7 @@ const SwitchProngAnalysis = struct { const dummy_captures = try sema.arena.alloc(Air.Inst.Ref, case_vals.len); for (field_indices, dummy_captures) |field_idx, *dummy| { const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_idx]); - const field_ptr_ty = try sema.ptrType(.{ + const field_ptr_ty = try zcu.ptrTypeSema(.{ .child = field_ty.toIntern(), .flags = .{ .is_const = operand_ptr_info.flags.is_const, @@ -11186,7 +11129,7 @@ const SwitchProngAnalysis = struct { if (try sema.resolveDefinedValue(block, operand_src, spa.operand_ptr)) |op_ptr_val| { if (op_ptr_val.isUndef(zcu)) return zcu.undefRef(capture_ptr_ty); - const field_ptr_val = try op_ptr_val.ptrField(first_field_index, sema); + const field_ptr_val = try op_ptr_val.ptrField(first_field_index, zcu); return Air.internedToRef((try zcu.getCoerced(field_ptr_val, capture_ptr_ty)).toIntern()); } @@ -11399,7 +11342,7 @@ fn switchCond( }, .Union => { - try sema.resolveTypeFields(operand_ty); + try operand_ty.resolveFields(mod); const enum_ty = operand_ty.unionTagType(mod) orelse { const msg = msg: { const msg = try sema.errMsg(src, "switch on union with no attached enum", .{}); @@ -13691,7 +13634,7 @@ fn maybeErrorUnwrap( return true; } - const panic_fn = try sema.getBuiltin("panicUnwrapError"); + const panic_fn = try mod.getBuiltin("panicUnwrapError"); const err_return_trace = try sema.getErrorReturnTrace(block); const args: [2]Air.Inst.Ref = .{ err_return_trace, operand }; try sema.callBuiltin(block, operand_src, panic_fn, .auto, &args, .@"safety check"); @@ -13701,7 +13644,7 @@ fn maybeErrorUnwrap( const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node; const msg_inst = try sema.resolveInst(inst_data.operand); - const panic_fn = try sema.getBuiltin("panic"); + const panic_fn = try mod.getBuiltin("panic"); const err_return_trace = try sema.getErrorReturnTrace(block); const args: [3]Air.Inst.Ref = .{ msg_inst, err_return_trace, .null_value }; try sema.callBuiltin(block, operand_src, panic_fn, .auto, &args, .@"safety check"); @@ -13766,7 +13709,7 @@ fn zirHasField(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const field_name = try sema.resolveConstStringIntern(block, name_src, extra.rhs, .{ .needed_comptime_reason = "field name must be comptime-known", }); - try sema.resolveTypeFields(ty); + try ty.resolveFields(mod); const ip = &mod.intern_pool; const has_field = hf: { @@ -13946,7 +13889,7 @@ fn zirShl( return mod.undefRef(sema.typeOf(lhs)); } // If rhs is 0, return lhs without doing any calculations. - if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { + if (try rhs_val.compareAllWithZeroSema(.eq, mod)) { return lhs; } if (scalar_ty.zigTypeTag(mod) != .ComptimeInt and air_tag != .shl_sat) { @@ -14111,7 +14054,7 @@ fn zirShr( return mod.undefRef(lhs_ty); } // If rhs is 0, return lhs without doing any calculations. - if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { + if (try rhs_val.compareAllWithZeroSema(.eq, mod)) { return lhs; } if (scalar_ty.zigTypeTag(mod) != .ComptimeInt) { @@ -14158,7 +14101,7 @@ fn zirShr( if (air_tag == .shr_exact) { // Detect if any ones would be shifted out. const truncated = try lhs_val.intTruncBitsAsValue(lhs_ty, sema.arena, .unsigned, rhs_val, mod); - if (!(try truncated.compareAllWithZeroAdvanced(.eq, sema))) { + if (!(try truncated.compareAllWithZeroSema(.eq, mod))) { return sema.fail(block, src, "exact shift shifted out 1 bits", .{}); } } @@ -14582,12 +14525,12 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai try sema.requireRuntimeBlock(block, src, runtime_src); if (ptr_addrspace) |ptr_as| { - const alloc_ty = try sema.ptrType(.{ + const alloc_ty = try mod.ptrTypeSema(.{ .child = result_ty.toIntern(), .flags = .{ .address_space = ptr_as }, }); const alloc = try block.addTy(.alloc, alloc_ty); - const elem_ptr_ty = try sema.ptrType(.{ + const elem_ptr_ty = try mod.ptrTypeSema(.{ .child = resolved_elem_ty.toIntern(), .flags = .{ .address_space = ptr_as }, }); @@ -14670,7 +14613,7 @@ fn getArrayCatInfo(sema: *Sema, block: *Block, src: LazySrcLoc, operand: Air.Ins .none => null, else => Value.fromInterned(ptr_info.sentinel), }, - .len = try val.sliceLen(sema), + .len = try val.sliceLen(mod), }; }, .One => { @@ -14912,12 +14855,12 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } if (ptr_addrspace) |ptr_as| { - const alloc_ty = try sema.ptrType(.{ + const alloc_ty = try mod.ptrTypeSema(.{ .child = result_ty.toIntern(), .flags = .{ .address_space = ptr_as }, }); const alloc = try block.addTy(.alloc, alloc_ty); - const elem_ptr_ty = try sema.ptrType(.{ + const elem_ptr_ty = try mod.ptrTypeSema(.{ .child = lhs_info.elem_type.toIntern(), .flags = .{ .address_space = ptr_as }, }); @@ -15105,7 +15048,7 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins .Int, .ComptimeInt, .ComptimeFloat => { if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef(mod)) { - if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { + if (try lhs_val.compareAllWithZeroSema(.eq, mod)) { const scalar_zero = switch (scalar_tag) { .ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0.0), .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0), @@ -15120,7 +15063,7 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins if (rhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, rhs_src); } - if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) { + if (!(try rhs_val.compareAllWithZeroSema(.neq, mod))) { return sema.failWithDivideByZero(block, rhs_src); } // TODO: if the RHS is one, return the LHS directly @@ -15241,7 +15184,7 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai if (lhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, rhs_src); } else { - if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { + if (try lhs_val.compareAllWithZeroSema(.eq, mod)) { const scalar_zero = switch (scalar_tag) { .ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0.0), .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0), @@ -15256,7 +15199,7 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai if (rhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, rhs_src); } - if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) { + if (!(try rhs_val.compareAllWithZeroSema(.neq, mod))) { return sema.failWithDivideByZero(block, rhs_src); } // TODO: if the RHS is one, return the LHS directly @@ -15408,7 +15351,7 @@ fn zirDivFloor(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // If the lhs is undefined, result is undefined. if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef(mod)) { - if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { + if (try lhs_val.compareAllWithZeroSema(.eq, mod)) { const scalar_zero = switch (scalar_tag) { .ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0.0), .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0), @@ -15423,7 +15366,7 @@ fn zirDivFloor(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai if (rhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, rhs_src); } - if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) { + if (!(try rhs_val.compareAllWithZeroSema(.neq, mod))) { return sema.failWithDivideByZero(block, rhs_src); } // TODO: if the RHS is one, return the LHS directly @@ -15518,7 +15461,7 @@ fn zirDivTrunc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // If the lhs is undefined, result is undefined. if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef(mod)) { - if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { + if (try lhs_val.compareAllWithZeroSema(.eq, mod)) { const scalar_zero = switch (scalar_tag) { .ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0.0), .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0), @@ -15533,7 +15476,7 @@ fn zirDivTrunc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai if (rhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, rhs_src); } - if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) { + if (!(try rhs_val.compareAllWithZeroSema(.neq, mod))) { return sema.failWithDivideByZero(block, rhs_src); } } @@ -15758,7 +15701,7 @@ fn zirModRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. if (lhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, lhs_src); } - if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { + if (try lhs_val.compareAllWithZeroSema(.eq, mod)) { const scalar_zero = switch (scalar_tag) { .ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0.0), .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0), @@ -15777,18 +15720,18 @@ fn zirModRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. if (rhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, rhs_src); } - if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) { + if (!(try rhs_val.compareAllWithZeroSema(.neq, mod))) { return sema.failWithDivideByZero(block, rhs_src); } - if (!(try rhs_val.compareAllWithZeroAdvanced(.gte, sema))) { + if (!(try rhs_val.compareAllWithZeroSema(.gte, mod))) { return sema.failWithModRemNegative(block, rhs_src, lhs_ty, rhs_ty); } if (maybe_lhs_val) |lhs_val| { const rem_result = try sema.intRem(resolved_type, lhs_val, rhs_val); // If this answer could possibly be different by doing `intMod`, // we must emit a compile error. Otherwise, it's OK. - if (!(try lhs_val.compareAllWithZeroAdvanced(.gte, sema)) and - !(try rem_result.compareAllWithZeroAdvanced(.eq, sema))) + if (!(try lhs_val.compareAllWithZeroSema(.gte, mod)) and + !(try rem_result.compareAllWithZeroSema(.eq, mod))) { return sema.failWithModRemNegative(block, lhs_src, lhs_ty, rhs_ty); } @@ -15806,14 +15749,14 @@ fn zirModRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. if (rhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, rhs_src); } - if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) { + if (!(try rhs_val.compareAllWithZeroSema(.neq, mod))) { return sema.failWithDivideByZero(block, rhs_src); } - if (!(try rhs_val.compareAllWithZeroAdvanced(.gte, sema))) { + if (!(try rhs_val.compareAllWithZeroSema(.gte, mod))) { return sema.failWithModRemNegative(block, rhs_src, lhs_ty, rhs_ty); } if (maybe_lhs_val) |lhs_val| { - if (lhs_val.isUndef(mod) or !(try lhs_val.compareAllWithZeroAdvanced(.gte, sema))) { + if (lhs_val.isUndef(mod) or !(try lhs_val.compareAllWithZeroSema(.gte, mod))) { return sema.failWithModRemNegative(block, lhs_src, lhs_ty, rhs_ty); } return Air.internedToRef((try lhs_val.floatRem(rhs_val, resolved_type, sema.arena, mod)).toIntern()); @@ -15864,8 +15807,8 @@ fn intRemScalar(sema: *Sema, lhs: Value, rhs: Value, scalar_ty: Type) CompileErr // resorting to BigInt first. var lhs_space: Value.BigIntSpace = undefined; var rhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, mod, sema); - const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, mod, sema); + const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, mod, .sema); + const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, mod, .sema); const limbs_q = try sema.arena.alloc( math.big.Limb, lhs_bigint.limbs.len, @@ -15941,7 +15884,7 @@ fn zirMod(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins if (rhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, rhs_src); } - if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) { + if (!(try rhs_val.compareAllWithZeroSema(.neq, mod))) { return sema.failWithDivideByZero(block, rhs_src); } if (maybe_lhs_val) |lhs_val| { @@ -15957,7 +15900,7 @@ fn zirMod(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins if (rhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, rhs_src); } - if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) { + if (!(try rhs_val.compareAllWithZeroSema(.neq, mod))) { return sema.failWithDivideByZero(block, rhs_src); } } @@ -16036,7 +15979,7 @@ fn zirRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins if (rhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, rhs_src); } - if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) { + if (!(try rhs_val.compareAllWithZeroSema(.neq, mod))) { return sema.failWithDivideByZero(block, rhs_src); } if (maybe_lhs_val) |lhs_val| { @@ -16052,7 +15995,7 @@ fn zirRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins if (rhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, rhs_src); } - if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) { + if (!(try rhs_val.compareAllWithZeroSema(.neq, mod))) { return sema.failWithDivideByZero(block, rhs_src); } } @@ -16139,12 +16082,12 @@ fn zirOverflowArithmetic( // to the result, even if it is undefined.. // Otherwise, if either of the argument is undefined, undefined is returned. if (maybe_lhs_val) |lhs_val| { - if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroAdvanced(.eq, sema))) { + if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroSema(.eq, mod))) { break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = rhs }; } } if (maybe_rhs_val) |rhs_val| { - if (!rhs_val.isUndef(mod) and (try rhs_val.compareAllWithZeroAdvanced(.eq, sema))) { + if (!rhs_val.isUndef(mod) and (try rhs_val.compareAllWithZeroSema(.eq, mod))) { break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = lhs }; } } @@ -16165,7 +16108,7 @@ fn zirOverflowArithmetic( if (maybe_rhs_val) |rhs_val| { if (rhs_val.isUndef(mod)) { break :result .{ .overflow_bit = Value.undef, .wrapped = Value.undef }; - } else if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { + } else if (try rhs_val.compareAllWithZeroSema(.eq, mod)) { break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = lhs }; } else if (maybe_lhs_val) |lhs_val| { if (lhs_val.isUndef(mod)) { @@ -16184,7 +16127,7 @@ fn zirOverflowArithmetic( const scalar_one = try mod.intValue(dest_ty.scalarType(mod), 1); if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef(mod)) { - if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { + if (try lhs_val.compareAllWithZeroSema(.eq, mod)) { break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = lhs }; } else if (try sema.compareAll(lhs_val, .eq, try sema.splat(dest_ty, scalar_one), dest_ty)) { break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = rhs }; @@ -16194,7 +16137,7 @@ fn zirOverflowArithmetic( if (maybe_rhs_val) |rhs_val| { if (!rhs_val.isUndef(mod)) { - if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { + if (try rhs_val.compareAllWithZeroSema(.eq, mod)) { break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = rhs }; } else if (try sema.compareAll(rhs_val, .eq, try sema.splat(dest_ty, scalar_one), dest_ty)) { break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = lhs }; @@ -16218,12 +16161,12 @@ fn zirOverflowArithmetic( // If rhs is zero, the result is lhs (even if undefined) and no overflow occurred. // Oterhwise if either of the arguments is undefined, both results are undefined. if (maybe_lhs_val) |lhs_val| { - if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroAdvanced(.eq, sema))) { + if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroSema(.eq, mod))) { break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = lhs }; } } if (maybe_rhs_val) |rhs_val| { - if (!rhs_val.isUndef(mod) and (try rhs_val.compareAllWithZeroAdvanced(.eq, sema))) { + if (!rhs_val.isUndef(mod) and (try rhs_val.compareAllWithZeroSema(.eq, mod))) { break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = lhs }; } } @@ -16374,7 +16317,7 @@ fn analyzeArithmetic( // overflow (max_int), causing illegal behavior. // For floats: either operand being undef makes the result undef. if (maybe_lhs_val) |lhs_val| { - if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroAdvanced(.eq, sema))) { + if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroSema(.eq, mod))) { return casted_rhs; } } @@ -16386,7 +16329,7 @@ fn analyzeArithmetic( return mod.undefRef(resolved_type); } } - if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { + if (try rhs_val.compareAllWithZeroSema(.eq, mod)) { return casted_lhs; } } @@ -16418,7 +16361,7 @@ fn analyzeArithmetic( // If either of the operands are zero, the other operand is returned. // If either of the operands are undefined, the result is undefined. if (maybe_lhs_val) |lhs_val| { - if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroAdvanced(.eq, sema))) { + if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroSema(.eq, mod))) { return casted_rhs; } } @@ -16426,7 +16369,7 @@ fn analyzeArithmetic( if (rhs_val.isUndef(mod)) { return mod.undefRef(resolved_type); } - if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { + if (try rhs_val.compareAllWithZeroSema(.eq, mod)) { return casted_lhs; } if (maybe_lhs_val) |lhs_val| { @@ -16439,7 +16382,7 @@ fn analyzeArithmetic( // If either of the operands are zero, then the other operand is returned. // If either of the operands are undefined, the result is undefined. if (maybe_lhs_val) |lhs_val| { - if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroAdvanced(.eq, sema))) { + if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroSema(.eq, mod))) { return casted_rhs; } } @@ -16447,7 +16390,7 @@ fn analyzeArithmetic( if (rhs_val.isUndef(mod)) { return mod.undefRef(resolved_type); } - if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { + if (try rhs_val.compareAllWithZeroSema(.eq, mod)) { return casted_lhs; } if (maybe_lhs_val) |lhs_val| { @@ -16488,7 +16431,7 @@ fn analyzeArithmetic( return mod.undefRef(resolved_type); } } - if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { + if (try rhs_val.compareAllWithZeroSema(.eq, mod)) { return casted_lhs; } } @@ -16523,7 +16466,7 @@ fn analyzeArithmetic( if (rhs_val.isUndef(mod)) { return mod.undefRef(resolved_type); } - if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { + if (try rhs_val.compareAllWithZeroSema(.eq, mod)) { return casted_lhs; } } @@ -16544,7 +16487,7 @@ fn analyzeArithmetic( if (rhs_val.isUndef(mod)) { return mod.undefRef(resolved_type); } - if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { + if (try rhs_val.compareAllWithZeroSema(.eq, mod)) { return casted_lhs; } } @@ -16591,7 +16534,7 @@ fn analyzeArithmetic( if (lhs_val.isNan(mod)) { return Air.internedToRef(lhs_val.toIntern()); } - if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) lz: { + if (try lhs_val.compareAllWithZeroSema(.eq, mod)) lz: { if (maybe_rhs_val) |rhs_val| { if (rhs_val.isNan(mod)) { return Air.internedToRef(rhs_val.toIntern()); @@ -16622,7 +16565,7 @@ fn analyzeArithmetic( if (rhs_val.isNan(mod)) { return Air.internedToRef(rhs_val.toIntern()); } - if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) rz: { + if (try rhs_val.compareAllWithZeroSema(.eq, mod)) rz: { if (maybe_lhs_val) |lhs_val| { if (lhs_val.isInf(mod)) { return Air.internedToRef((try mod.floatValue(resolved_type, std.math.nan(f128))).toIntern()); @@ -16674,7 +16617,7 @@ fn analyzeArithmetic( }; if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef(mod)) { - if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { + if (try lhs_val.compareAllWithZeroSema(.eq, mod)) { const zero_val = try sema.splat(resolved_type, scalar_zero); return Air.internedToRef(zero_val.toIntern()); } @@ -16687,7 +16630,7 @@ fn analyzeArithmetic( if (rhs_val.isUndef(mod)) { return mod.undefRef(resolved_type); } - if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { + if (try rhs_val.compareAllWithZeroSema(.eq, mod)) { const zero_val = try sema.splat(resolved_type, scalar_zero); return Air.internedToRef(zero_val.toIntern()); } @@ -16719,7 +16662,7 @@ fn analyzeArithmetic( }; if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef(mod)) { - if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { + if (try lhs_val.compareAllWithZeroSema(.eq, mod)) { const zero_val = try sema.splat(resolved_type, scalar_zero); return Air.internedToRef(zero_val.toIntern()); } @@ -16732,7 +16675,7 @@ fn analyzeArithmetic( if (rhs_val.isUndef(mod)) { return mod.undefRef(resolved_type); } - if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { + if (try rhs_val.compareAllWithZeroSema(.eq, mod)) { const zero_val = try sema.splat(resolved_type, scalar_zero); return Air.internedToRef(zero_val.toIntern()); } @@ -16828,7 +16771,7 @@ fn analyzePtrArithmetic( const new_ptr_ty = t: { // Calculate the new pointer alignment. - // This code is duplicated in `elemPtrType`. + // This code is duplicated in `Type.elemPtrType`. if (ptr_info.flags.alignment == .none) { // ABI-aligned pointer. Any pointer arithmetic maintains the same ABI-alignedness. break :t ptr_ty; @@ -16837,7 +16780,7 @@ fn analyzePtrArithmetic( // it being a multiple of the type size. const elem_size = try sema.typeAbiSize(Type.fromInterned(ptr_info.child)); const addend = if (opt_off_val) |off_val| a: { - const off_int = try sema.usizeCast(block, offset_src, try off_val.toUnsignedIntAdvanced(sema)); + const off_int = try sema.usizeCast(block, offset_src, try off_val.toUnsignedIntSema(mod)); break :a elem_size * off_int; } else elem_size; @@ -16850,7 +16793,7 @@ fn analyzePtrArithmetic( )); assert(new_align != .none); - break :t try sema.ptrType(.{ + break :t try mod.ptrTypeSema(.{ .child = ptr_info.child, .sentinel = ptr_info.sentinel, .flags = .{ @@ -16869,14 +16812,14 @@ fn analyzePtrArithmetic( if (opt_off_val) |offset_val| { if (ptr_val.isUndef(mod)) return mod.undefRef(new_ptr_ty); - const offset_int = try sema.usizeCast(block, offset_src, try offset_val.toUnsignedIntAdvanced(sema)); + const offset_int = try sema.usizeCast(block, offset_src, try offset_val.toUnsignedIntSema(mod)); if (offset_int == 0) return ptr; if (air_tag == .ptr_sub) { const elem_size = try sema.typeAbiSize(Type.fromInterned(ptr_info.child)); const new_ptr_val = try sema.ptrSubtract(block, op_src, ptr_val, offset_int * elem_size, new_ptr_ty); return Air.internedToRef(new_ptr_val.toIntern()); } else { - const new_ptr_val = try mod.getCoerced(try ptr_val.ptrElem(offset_int, sema), new_ptr_ty); + const new_ptr_val = try mod.getCoerced(try ptr_val.ptrElem(offset_int, mod), new_ptr_ty); return Air.internedToRef(new_ptr_val.toIntern()); } } else break :rs offset_src; @@ -16975,7 +16918,6 @@ fn zirAsm( // Indicate the output is the asm instruction return value. arg.* = .none; const out_ty = try sema.resolveType(block, ret_ty_src, output.data.operand); - try sema.queueFullTypeResolution(out_ty); expr_ty = Air.internedToRef(out_ty.toIntern()); } else { arg.* = try sema.resolveInst(output.data.operand); @@ -17010,7 +16952,6 @@ fn zirAsm( .ComptimeFloat => arg.* = try sema.coerce(block, Type.f64, uncasted_arg, src), else => { arg.* = uncasted_arg; - try sema.queueFullTypeResolution(uncasted_arg_ty); }, } @@ -17169,7 +17110,7 @@ fn analyzeCmpUnionTag( ) CompileError!Air.Inst.Ref { const mod = sema.mod; const union_ty = sema.typeOf(un); - try sema.resolveTypeFields(union_ty); + try union_ty.resolveFields(mod); const union_tag_ty = union_ty.unionTagType(mod) orelse { const msg = msg: { const msg = try sema.errMsg(un_src, "comparison of union and enum literal is only valid for tagged union types", .{}); @@ -17385,9 +17326,6 @@ fn zirSizeOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. => {}, } const val = try ty.lazyAbiSize(mod); - if (val.isLazySize(mod)) { - try sema.queueFullTypeResolution(ty); - } return Air.internedToRef(val.toIntern()); } @@ -17427,7 +17365,7 @@ fn zirBitSizeOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A .AnyFrame, => {}, } - const bit_size = try operand_ty.bitSizeAdvanced(mod, sema); + const bit_size = try operand_ty.bitSizeAdvanced(mod, .sema); return mod.intRef(Type.comptime_int, bit_size); } @@ -17613,7 +17551,7 @@ fn zirBuiltinSrc( } }); }; - const src_loc_ty = try sema.getBuiltinType("SourceLocation"); + const src_loc_ty = try mod.getBuiltinType("SourceLocation"); const fields = .{ // file: [:0]const u8, file_name_val, @@ -17637,7 +17575,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node; const src = block.nodeOffset(inst_data.src_node); const ty = try sema.resolveType(block, src, inst_data.operand); - const type_info_ty = try sema.getBuiltinType("Type"); + const type_info_ty = try mod.getBuiltinType("Type"); const type_info_tag_ty = type_info_ty.unionTagType(mod).?; if (ty.typeDeclInst(mod)) |type_decl_inst| { @@ -17718,7 +17656,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai .ty = new_decl_ty.toIntern(), .storage = .{ .elems = param_vals }, } }); - const slice_ty = (try sema.ptrType(.{ + const slice_ty = (try mod.ptrTypeSema(.{ .child = param_info_ty.toIntern(), .flags = .{ .size = .Slice, @@ -17748,7 +17686,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai func_ty_info.return_type, } }); - const callconv_ty = try sema.getBuiltinType("CallingConvention"); + const callconv_ty = try mod.getBuiltinType("CallingConvention"); const field_values = .{ // calling_convention: CallingConvention, @@ -17782,7 +17720,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const int_info_decl = mod.declPtr(int_info_decl_index); const int_info_ty = int_info_decl.val.toType(); - const signedness_ty = try sema.getBuiltinType("Signedness"); + const signedness_ty = try mod.getBuiltinType("Signedness"); const info = ty.intInfo(mod); const field_values = .{ // signedness: Signedness, @@ -17830,12 +17768,12 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai else try Type.fromInterned(info.child).lazyAbiAlignment(mod); - const addrspace_ty = try sema.getBuiltinType("AddressSpace"); + const addrspace_ty = try mod.getBuiltinType("AddressSpace"); const pointer_ty = t: { const decl_index = (try sema.namespaceLookup( block, src, - (try sema.getBuiltinType("Type")).getNamespaceIndex(mod), + (try mod.getBuiltinType("Type")).getNamespaceIndex(mod), try ip.getOrPutString(gpa, "Pointer", .no_embedded_nulls), )).?; try sema.ensureDeclAnalyzed(decl_index); @@ -17984,8 +17922,6 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai break :t set_field_ty_decl.val.toType(); }; - try sema.queueFullTypeResolution(error_field_ty); - // Build our list of Error values // Optional value is only null if anyerror // Value can be zero-length slice otherwise @@ -18036,7 +17972,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai }; // Build our ?[]const Error value - const slice_errors_ty = try sema.ptrType(.{ + const slice_errors_ty = try mod.ptrTypeSema(.{ .child = error_field_ty.toIntern(), .flags = .{ .size = .Slice, @@ -18182,7 +18118,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai .ty = fields_array_ty.toIntern(), .storage = .{ .elems = enum_field_vals }, } }); - const slice_ty = (try sema.ptrType(.{ + const slice_ty = (try mod.ptrTypeSema(.{ .child = enum_field_ty.toIntern(), .flags = .{ .size = .Slice, @@ -18262,7 +18198,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai break :t union_field_ty_decl.val.toType(); }; - try sema.resolveTypeLayout(ty); // Getting alignment requires type layout + try ty.resolveLayout(mod); // Getting alignment requires type layout const union_obj = mod.typeToUnion(ty).?; const tag_type = union_obj.loadTagType(ip); const layout = union_obj.getLayout(ip); @@ -18298,7 +18234,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai }; const alignment = switch (layout) { - .auto, .@"extern" => try sema.unionFieldAlignment(union_obj, @intCast(field_index)), + .auto, .@"extern" => try mod.unionFieldNormalAlignmentAdvanced(union_obj, @intCast(field_index), .sema), .@"packed" => .none, }; @@ -18326,7 +18262,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai .ty = array_fields_ty.toIntern(), .storage = .{ .elems = union_field_vals }, } }); - const slice_ty = (try sema.ptrType(.{ + const slice_ty = (try mod.ptrTypeSema(.{ .child = union_field_ty.toIntern(), .flags = .{ .size = .Slice, @@ -18359,7 +18295,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const decl_index = (try sema.namespaceLookup( block, src, - (try sema.getBuiltinType("Type")).getNamespaceIndex(mod), + (try mod.getBuiltinType("Type")).getNamespaceIndex(mod), try ip.getOrPutString(gpa, "ContainerLayout", .no_embedded_nulls), )).?; try sema.ensureDeclAnalyzed(decl_index); @@ -18412,7 +18348,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai break :t struct_field_ty_decl.val.toType(); }; - try sema.resolveTypeLayout(ty); // Getting alignment requires type layout + try ty.resolveLayout(mod); // Getting alignment requires type layout var struct_field_vals: []InternPool.Index = &.{}; defer gpa.free(struct_field_vals); @@ -18452,7 +18388,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } }); }; - try sema.resolveTypeLayout(Type.fromInterned(field_ty)); + try Type.fromInterned(field_ty).resolveLayout(mod); const is_comptime = field_val != .none; const opt_default_val = if (is_comptime) Value.fromInterned(field_val) else null; @@ -18481,7 +18417,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai }; struct_field_vals = try gpa.alloc(InternPool.Index, struct_type.field_types.len); - try sema.resolveStructFieldInits(ty); + try ty.resolveStructFieldInits(mod); for (struct_field_vals, 0..) |*field_val, field_index| { const field_name = if (struct_type.fieldName(ip, field_index).unwrap()) |field_name| @@ -18520,10 +18456,11 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const default_val_ptr = try sema.optRefValue(opt_default_val); const alignment = switch (struct_type.layout) { .@"packed" => .none, - else => try sema.structFieldAlignment( + else => try mod.structFieldAlignmentAdvanced( struct_type.fieldAlign(ip, field_index), field_ty, struct_type.layout, + .sema, ), }; @@ -18555,7 +18492,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai .ty = array_fields_ty.toIntern(), .storage = .{ .elems = struct_field_vals }, } }); - const slice_ty = (try sema.ptrType(.{ + const slice_ty = (try mod.ptrTypeSema(.{ .child = struct_field_ty.toIntern(), .flags = .{ .size = .Slice, @@ -18591,7 +18528,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const decl_index = (try sema.namespaceLookup( block, src, - (try sema.getBuiltinType("Type")).getNamespaceIndex(mod), + (try mod.getBuiltinType("Type")).getNamespaceIndex(mod), try ip.getOrPutString(gpa, "ContainerLayout", .no_embedded_nulls), )).?; try sema.ensureDeclAnalyzed(decl_index); @@ -18635,7 +18572,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai break :t type_opaque_ty_decl.val.toType(); }; - try sema.resolveTypeFields(ty); + try ty.resolveFields(mod); const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, ty.getNamespaceIndex(mod)); const field_values = .{ @@ -18677,7 +18614,6 @@ fn typeInfoDecls( const declaration_ty_decl = mod.declPtr(declaration_ty_decl_index); break :t declaration_ty_decl.val.toType(); }; - try sema.queueFullTypeResolution(declaration_ty); var decl_vals = std.ArrayList(InternPool.Index).init(gpa); defer decl_vals.deinit(); @@ -18695,7 +18631,7 @@ fn typeInfoDecls( .ty = array_decl_ty.toIntern(), .storage = .{ .elems = decl_vals.items }, } }); - const slice_ty = (try sema.ptrType(.{ + const slice_ty = (try mod.ptrTypeSema(.{ .child = declaration_ty.toIntern(), .flags = .{ .size = .Slice, @@ -19295,7 +19231,7 @@ fn zirTryPtr(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileErr const operand_ty = sema.typeOf(operand); const ptr_info = operand_ty.ptrInfo(mod); - const res_ty = try sema.ptrType(.{ + const res_ty = try mod.ptrTypeSema(.{ .child = err_union_ty.errorUnionPayload(mod).toIntern(), .flags = .{ .is_const = ptr_info.flags.is_const, @@ -19528,11 +19464,11 @@ fn retWithErrTracing( else => true, }; const gpa = sema.gpa; - const stack_trace_ty = try sema.getBuiltinType("StackTrace"); - try sema.resolveTypeFields(stack_trace_ty); + const stack_trace_ty = try mod.getBuiltinType("StackTrace"); + try stack_trace_ty.resolveFields(mod); const ptr_stack_trace_ty = try mod.singleMutPtrType(stack_trace_ty); const err_return_trace = try block.addTy(.err_return_trace, ptr_stack_trace_ty); - const return_err_fn = try sema.getBuiltin("returnError"); + const return_err_fn = try mod.getBuiltin("returnError"); const args: [1]Air.Inst.Ref = .{err_return_trace}; if (!need_check) { @@ -19735,7 +19671,7 @@ fn analyzeRet( return sema.failWithOwnedErrorMsg(block, msg); } - try sema.resolveTypeLayout(sema.fn_ret_ty); + try sema.fn_ret_ty.resolveLayout(mod); try sema.validateRuntimeValue(block, operand_src, operand); @@ -19817,7 +19753,7 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air }, else => {}, } - const align_bytes = (try val.getUnsignedIntAdvanced(mod, sema)).?; + const align_bytes = (try val.getUnsignedIntAdvanced(mod, .sema)).?; break :blk try sema.validateAlignAllowZero(block, align_src, align_bytes); } else .none; @@ -19851,7 +19787,7 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air elem_ty.fmt(mod), bit_offset, bit_offset - host_size * 8, host_size, }); } - const elem_bit_size = try elem_ty.bitSizeAdvanced(mod, sema); + const elem_bit_size = try elem_ty.bitSizeAdvanced(mod, .sema); if (elem_bit_size > host_size * 8 - bit_offset) { return sema.fail(block, bitoffset_src, "packed type '{}' at bit offset {} ends {} bits after the end of a {} byte host integer", .{ elem_ty.fmt(mod), bit_offset, elem_bit_size - (host_size * 8 - bit_offset), host_size, @@ -19892,7 +19828,7 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air }); } - const ty = try sema.ptrType(.{ + const ty = try mod.ptrTypeSema(.{ .child = elem_ty.toIntern(), .sentinel = sentinel, .flags = .{ @@ -19983,7 +19919,7 @@ fn structInitEmpty( const mod = sema.mod; const gpa = sema.gpa; // This logic must be synchronized with that in `zirStructInit`. - try sema.resolveTypeFields(struct_ty); + try struct_ty.resolveFields(mod); // The init values to use for the struct instance. const field_inits = try gpa.alloc(Air.Inst.Ref, struct_ty.structFieldCount(mod)); @@ -20054,7 +19990,6 @@ fn unionInit( try sema.requireRuntimeBlock(block, init_src, null); _ = union_ty_src; - try sema.queueFullTypeResolution(union_ty); return block.addUnionInit(union_ty, field_index, init); } @@ -20083,7 +20018,7 @@ fn zirStructInit( else => |e| return e, }; const resolved_ty = result_ty.optEuBaseType(mod); - try sema.resolveTypeLayout(resolved_ty); + try resolved_ty.resolveLayout(mod); if (resolved_ty.zigTypeTag(mod) == .Struct) { // This logic must be synchronized with that in `zirStructInitEmpty`. @@ -20124,7 +20059,7 @@ fn zirStructInit( const field_ty = resolved_ty.structFieldType(field_index, mod); field_inits[field_index] = try sema.coerce(block, field_ty, uncoerced_init, field_src); if (!is_packed) { - try sema.resolveStructFieldInits(resolved_ty); + try resolved_ty.resolveStructFieldInits(mod); if (try resolved_ty.structFieldValueComptime(mod, field_index)) |default_value| { const init_val = (try sema.resolveValue(field_inits[field_index])) orelse { return sema.failWithNeededComptime(block, field_src, .{ @@ -20197,7 +20132,7 @@ fn zirStructInit( if (is_ref) { const target = mod.getTarget(); - const alloc_ty = try sema.ptrType(.{ + const alloc_ty = try mod.ptrTypeSema(.{ .child = result_ty.toIntern(), .flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) }, }); @@ -20211,7 +20146,6 @@ fn zirStructInit( } try sema.requireRuntimeBlock(block, src, null); - try sema.queueFullTypeResolution(resolved_ty); const union_val = try block.addUnionInit(resolved_ty, field_index, init_inst); return sema.coerce(block, result_ty, union_val, src); } @@ -20288,7 +20222,7 @@ fn finishStructInit( continue; } - try sema.resolveStructFieldInits(struct_ty); + try struct_ty.resolveStructFieldInits(mod); const field_init = struct_type.fieldInit(ip, i); if (field_init == .none) { @@ -20358,9 +20292,9 @@ fn finishStructInit( } if (is_ref) { - try sema.resolveStructLayout(struct_ty); + try struct_ty.resolveLayout(mod); const target = sema.mod.getTarget(); - const alloc_ty = try sema.ptrType(.{ + const alloc_ty = try mod.ptrTypeSema(.{ .child = result_ty.toIntern(), .flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) }, }); @@ -20380,8 +20314,7 @@ fn finishStructInit( .init_node_offset = init_src.offset.node_offset.x, .elem_index = @intCast(runtime_index), } })); - try sema.resolveStructFieldInits(struct_ty); - try sema.queueFullTypeResolution(struct_ty); + try struct_ty.resolveStructFieldInits(mod); const struct_val = try block.addAggregateInit(struct_ty, field_inits); return sema.coerce(block, result_ty, struct_val, init_src); } @@ -20490,7 +20423,7 @@ fn structInitAnon( if (is_ref) { const target = mod.getTarget(); - const alloc_ty = try sema.ptrType(.{ + const alloc_ty = try mod.ptrTypeSema(.{ .child = tuple_ty, .flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) }, }); @@ -20504,7 +20437,7 @@ fn structInitAnon( }; extra_index = item.end; - const field_ptr_ty = try sema.ptrType(.{ + const field_ptr_ty = try mod.ptrTypeSema(.{ .child = field_ty, .flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) }, }); @@ -20597,7 +20530,7 @@ fn zirArrayInit( dest.* = try sema.coerce(block, elem_ty, resolved_arg, elem_src); if (is_tuple) { if (array_ty.structFieldIsComptime(i, mod)) - try sema.resolveStructFieldInits(array_ty); + try array_ty.resolveStructFieldInits(mod); if (try array_ty.structFieldValueComptime(mod, i)) |field_val| { const init_val = try sema.resolveValue(dest.*) orelse { return sema.failWithNeededComptime(block, elem_src, .{ @@ -20641,11 +20574,10 @@ fn zirArrayInit( .init_node_offset = src.offset.node_offset.x, .elem_index = runtime_index, } })); - try sema.queueFullTypeResolution(array_ty); if (is_ref) { const target = mod.getTarget(); - const alloc_ty = try sema.ptrType(.{ + const alloc_ty = try mod.ptrTypeSema(.{ .child = result_ty.toIntern(), .flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) }, }); @@ -20654,7 +20586,7 @@ fn zirArrayInit( if (is_tuple) { for (resolved_args, 0..) |arg, i| { - const elem_ptr_ty = try sema.ptrType(.{ + const elem_ptr_ty = try mod.ptrTypeSema(.{ .child = array_ty.structFieldType(i, mod).toIntern(), .flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) }, }); @@ -20667,7 +20599,7 @@ fn zirArrayInit( return sema.makePtrConst(block, alloc); } - const elem_ptr_ty = try sema.ptrType(.{ + const elem_ptr_ty = try mod.ptrTypeSema(.{ .child = array_ty.elemType2(mod).toIntern(), .flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) }, }); @@ -20755,14 +20687,14 @@ fn arrayInitAnon( if (is_ref) { const target = sema.mod.getTarget(); - const alloc_ty = try sema.ptrType(.{ + const alloc_ty = try mod.ptrTypeSema(.{ .child = tuple_ty, .flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) }, }); const alloc = try block.addTy(.alloc, alloc_ty); for (operands, 0..) |operand, i_usize| { const i: u32 = @intCast(i_usize); - const field_ptr_ty = try sema.ptrType(.{ + const field_ptr_ty = try mod.ptrTypeSema(.{ .child = types[i], .flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) }, }); @@ -20832,7 +20764,7 @@ fn fieldType( const ip = &mod.intern_pool; var cur_ty = aggregate_ty; while (true) { - try sema.resolveTypeFields(cur_ty); + try cur_ty.resolveFields(mod); switch (cur_ty.zigTypeTag(mod)) { .Struct => switch (ip.indexToKey(cur_ty.toIntern())) { .anon_struct_type => |anon_struct| { @@ -20883,8 +20815,8 @@ fn zirErrorReturnTrace(sema: *Sema, block: *Block) CompileError!Air.Inst.Ref { fn getErrorReturnTrace(sema: *Sema, block: *Block) CompileError!Air.Inst.Ref { const mod = sema.mod; const ip = &mod.intern_pool; - const stack_trace_ty = try sema.getBuiltinType("StackTrace"); - try sema.resolveTypeFields(stack_trace_ty); + const stack_trace_ty = try mod.getBuiltinType("StackTrace"); + try stack_trace_ty.resolveFields(mod); const ptr_stack_trace_ty = try mod.singleMutPtrType(stack_trace_ty); const opt_ptr_stack_trace_ty = try mod.optionalType(ptr_stack_trace_ty.toIntern()); @@ -20918,9 +20850,6 @@ fn zirAlignOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air return sema.fail(block, operand_src, "no align available for type '{}'", .{ty.fmt(sema.mod)}); } const val = try ty.lazyAbiAlignment(mod); - if (val.isLazyAlign(mod)) { - try sema.queueFullTypeResolution(ty); - } return Air.internedToRef(val.toIntern()); } @@ -21095,7 +21024,7 @@ fn zirTagName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air const mod = sema.mod; const ip = &mod.intern_pool; - try sema.resolveTypeLayout(operand_ty); + try operand_ty.resolveLayout(mod); const enum_ty = switch (operand_ty.zigTypeTag(mod)) { .EnumLiteral => { const val = try sema.resolveConstDefinedValue(block, LazySrcLoc.unneeded, operand, undefined); @@ -21171,7 +21100,7 @@ fn zirReify( }, }, }; - const type_info_ty = try sema.getBuiltinType("Type"); + const type_info_ty = try mod.getBuiltinType("Type"); const uncasted_operand = try sema.resolveInst(extra.operand); const type_info = try sema.coerce(block, type_info_ty, uncasted_operand, operand_src); const val = try sema.resolveConstDefinedValue(block, operand_src, type_info, .{ @@ -21205,7 +21134,7 @@ fn zirReify( ); const signedness = mod.toEnum(std.builtin.Signedness, signedness_val); - const bits: u16 = @intCast(try bits_val.toUnsignedIntAdvanced(sema)); + const bits: u16 = @intCast(try bits_val.toUnsignedIntSema(mod)); const ty = try mod.intType(signedness, bits); return Air.internedToRef(ty.toIntern()); }, @@ -21220,7 +21149,7 @@ fn zirReify( try ip.getOrPutString(gpa, "child", .no_embedded_nulls), ).?); - const len: u32 = @intCast(try len_val.toUnsignedIntAdvanced(sema)); + const len: u32 = @intCast(try len_val.toUnsignedIntSema(mod)); const child_ty = child_val.toType(); try sema.checkVectorElemType(block, src, child_ty); @@ -21238,7 +21167,7 @@ fn zirReify( try ip.getOrPutString(gpa, "bits", .no_embedded_nulls), ).?); - const bits: u16 = @intCast(try bits_val.toUnsignedIntAdvanced(sema)); + const bits: u16 = @intCast(try bits_val.toUnsignedIntSema(mod)); const ty = switch (bits) { 16 => Type.f16, 32 => Type.f32, @@ -21288,7 +21217,7 @@ fn zirReify( return sema.fail(block, src, "alignment must fit in 'u32'", .{}); } - const alignment_val_int = (try alignment_val.getUnsignedIntAdvanced(mod, sema)).?; + const alignment_val_int = (try alignment_val.getUnsignedIntAdvanced(mod, .sema)).?; if (alignment_val_int > 0 and !math.isPowerOfTwo(alignment_val_int)) { return sema.fail(block, src, "alignment value '{d}' is not a power of two or zero", .{alignment_val_int}); } @@ -21296,7 +21225,7 @@ fn zirReify( const elem_ty = child_val.toType(); if (abi_align != .none) { - try sema.resolveTypeLayout(elem_ty); + try elem_ty.resolveLayout(mod); } const ptr_size = mod.toEnum(std.builtin.Type.Pointer.Size, size_val); @@ -21340,7 +21269,7 @@ fn zirReify( } } - const ty = try sema.ptrType(.{ + const ty = try mod.ptrTypeSema(.{ .child = elem_ty.toIntern(), .sentinel = actual_sentinel, .flags = .{ @@ -21369,7 +21298,7 @@ fn zirReify( try ip.getOrPutString(gpa, "sentinel", .no_embedded_nulls), ).?); - const len = try len_val.toUnsignedIntAdvanced(sema); + const len = try len_val.toUnsignedIntSema(mod); const child_ty = child_val.toType(); const sentinel = if (sentinel_val.optionalValue(mod)) |p| blk: { const ptr_ty = try mod.singleMutPtrType(child_ty); @@ -21476,7 +21405,7 @@ fn zirReify( const layout = mod.toEnum(std.builtin.Type.ContainerLayout, layout_val); // Decls - if (try decls_val.sliceLen(sema) > 0) { + if (try decls_val.sliceLen(mod) > 0) { return sema.fail(block, src, "reified structs must have no decls", .{}); } @@ -21509,7 +21438,7 @@ fn zirReify( try ip.getOrPutString(gpa, "is_exhaustive", .no_embedded_nulls), ).?); - if (try decls_val.sliceLen(sema) > 0) { + if (try decls_val.sliceLen(mod) > 0) { return sema.fail(block, src, "reified enums must have no decls", .{}); } @@ -21527,7 +21456,7 @@ fn zirReify( ).?); // Decls - if (try decls_val.sliceLen(sema) > 0) { + if (try decls_val.sliceLen(mod) > 0) { return sema.fail(block, src, "reified opaque must have no decls", .{}); } @@ -21575,7 +21504,7 @@ fn zirReify( try ip.getOrPutString(gpa, "decls", .no_embedded_nulls), ).?); - if (try decls_val.sliceLen(sema) > 0) { + if (try decls_val.sliceLen(mod) > 0) { return sema.fail(block, src, "reified unions must have no decls", .{}); } const layout = mod.toEnum(std.builtin.Type.ContainerLayout, layout_val); @@ -21934,7 +21863,7 @@ fn reifyUnion( field_ty.* = field_type_val.toIntern(); if (any_aligns) { - const byte_align = try (try field_info.fieldValue(mod, 2)).toUnsignedIntAdvanced(sema); + const byte_align = try (try field_info.fieldValue(mod, 2)).toUnsignedIntSema(mod); if (byte_align > 0 and !math.isPowerOfTwo(byte_align)) { // TODO: better source location return sema.fail(block, src, "alignment value '{d}' is not a power of two or zero", .{byte_align}); @@ -21979,7 +21908,7 @@ fn reifyUnion( field_ty.* = field_type_val.toIntern(); if (any_aligns) { - const byte_align = try (try field_info.fieldValue(mod, 2)).toUnsignedIntAdvanced(sema); + const byte_align = try (try field_info.fieldValue(mod, 2)).toUnsignedIntSema(mod); if (byte_align > 0 and !math.isPowerOfTwo(byte_align)) { // TODO: better source location return sema.fail(block, src, "alignment value '{d}' is not a power of two or zero", .{byte_align}); @@ -22036,6 +21965,7 @@ fn reifyUnion( loaded_union.flagsPtr(ip).status = .have_field_types; try mod.finalizeAnonDecl(new_decl_index); + try mod.comp.work_queue.writeItem(.{ .resolve_type_fully = wip_ty.index }); return Air.internedToRef(wip_ty.finish(ip, new_decl_index, .none)); } @@ -22109,7 +22039,7 @@ fn reifyStruct( if (field_is_comptime) any_comptime_fields = true; if (field_default_value != .none) any_default_inits = true; - switch (try field_alignment_val.orderAgainstZeroAdvanced(mod, sema)) { + switch (try field_alignment_val.orderAgainstZeroAdvanced(mod, .sema)) { .eq => {}, .gt => any_aligned_fields = true, .lt => unreachable, @@ -22192,7 +22122,7 @@ fn reifyStruct( return sema.fail(block, src, "alignment must fit in 'u32'", .{}); } - const byte_align = try field_alignment_val.toUnsignedIntAdvanced(sema); + const byte_align = try field_alignment_val.toUnsignedIntSema(mod); if (byte_align == 0) { if (layout != .@"packed") { struct_type.field_aligns.get(ip)[field_idx] = .none; @@ -22278,7 +22208,7 @@ fn reifyStruct( var fields_bit_sum: u64 = 0; for (0..struct_type.field_types.len) |field_idx| { const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_idx]); - sema.resolveTypeLayout(field_ty) catch |err| switch (err) { + field_ty.resolveLayout(mod) catch |err| switch (err) { error.AnalysisFail => { const msg = sema.err orelse return err; try sema.errNote(src, msg, "while checking a field of this struct", .{}); @@ -22300,11 +22230,12 @@ fn reifyStruct( } try mod.finalizeAnonDecl(new_decl_index); + try mod.comp.work_queue.writeItem(.{ .resolve_type_fully = wip_ty.index }); return Air.internedToRef(wip_ty.finish(ip, new_decl_index, .none)); } fn resolveVaListRef(sema: *Sema, block: *Block, src: LazySrcLoc, zir_ref: Zir.Inst.Ref) CompileError!Air.Inst.Ref { - const va_list_ty = try sema.getBuiltinType("VaList"); + const va_list_ty = try sema.mod.getBuiltinType("VaList"); const va_list_ptr = try sema.mod.singleMutPtrType(va_list_ty); const inst = try sema.resolveInst(zir_ref); @@ -22343,7 +22274,7 @@ fn zirCVaCopy(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) const va_list_src = block.builtinCallArgSrc(extra.node, 0); const va_list_ref = try sema.resolveVaListRef(block, va_list_src, extra.operand); - const va_list_ty = try sema.getBuiltinType("VaList"); + const va_list_ty = try sema.mod.getBuiltinType("VaList"); try sema.requireRuntimeBlock(block, src, null); return block.addTyOp(.c_va_copy, va_list_ty, va_list_ref); @@ -22363,7 +22294,7 @@ fn zirCVaEnd(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) C fn zirCVaStart(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref { const src = block.nodeOffset(@bitCast(extended.operand)); - const va_list_ty = try sema.getBuiltinType("VaList"); + const va_list_ty = try sema.mod.getBuiltinType("VaList"); try sema.requireRuntimeBlock(block, src, null); return block.addInst(.{ .tag = .c_va_start, @@ -22497,7 +22428,7 @@ fn zirFloatFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro _ = try sema.checkIntType(block, operand_src, operand_scalar_ty); if (try sema.resolveValue(operand)) |operand_val| { - const result_val = try operand_val.floatFromIntAdvanced(sema.arena, operand_ty, dest_ty, mod, sema); + const result_val = try operand_val.floatFromIntAdvanced(sema.arena, operand_ty, dest_ty, mod, .sema); return Air.internedToRef(result_val.toIntern()); } else if (dest_scalar_ty.zigTypeTag(mod) == .ComptimeFloat) { return sema.failWithNeededComptime(block, operand_src, .{ @@ -22545,7 +22476,7 @@ fn zirPtrFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! try sema.checkPtrType(block, src, ptr_ty, true); const elem_ty = ptr_ty.elemType2(mod); - const ptr_align = try ptr_ty.ptrAlignmentAdvanced(mod, sema); + const ptr_align = try ptr_ty.ptrAlignmentAdvanced(mod, .sema); if (ptr_ty.isSlice(mod)) { const msg = msg: { @@ -22644,7 +22575,7 @@ fn ptrFromIntVal( } return sema.failWithUseOfUndef(block, operand_src); } - const addr = try operand_val.toUnsignedIntAdvanced(sema); + const addr = try operand_val.toUnsignedIntSema(zcu); if (!ptr_ty.isAllowzeroPtr(zcu) and addr == 0) return sema.fail(block, operand_src, "pointer type '{}' does not allow address zero", .{ptr_ty.fmt(zcu)}); if (addr != 0 and ptr_align != .none and !ptr_align.check(addr)) @@ -22842,8 +22773,8 @@ fn ptrCastFull( const src_info = operand_ty.ptrInfo(mod); const dest_info = dest_ty.ptrInfo(mod); - try sema.resolveTypeLayout(Type.fromInterned(src_info.child)); - try sema.resolveTypeLayout(Type.fromInterned(dest_info.child)); + try Type.fromInterned(src_info.child).resolveLayout(mod); + try Type.fromInterned(dest_info.child).resolveLayout(mod); const src_slice_like = src_info.flags.size == .Slice or (src_info.flags.size == .One and Type.fromInterned(src_info.child).zigTypeTag(mod) == .Array); @@ -23091,7 +23022,7 @@ fn ptrCastFull( // Only convert to a many-pointer at first var info = dest_info; info.flags.size = .Many; - const ty = try sema.ptrType(info); + const ty = try mod.ptrTypeSema(info); if (dest_ty.zigTypeTag(mod) == .Optional) { break :blk try mod.optionalType(ty.toIntern()); } else { @@ -23109,7 +23040,7 @@ fn ptrCastFull( return sema.fail(block, operand_src, "null pointer casted to type '{}'", .{dest_ty.fmt(mod)}); } if (dest_align.compare(.gt, src_align)) { - if (try ptr_val.getUnsignedIntAdvanced(mod, null)) |addr| { + if (try ptr_val.getUnsignedIntAdvanced(mod, .sema)) |addr| { if (!dest_align.check(addr)) { return sema.fail(block, operand_src, "pointer address 0x{X} is not aligned to {d} bytes", .{ addr, @@ -23176,7 +23107,7 @@ fn ptrCastFull( // We can't change address spaces with a bitcast, so this requires two instructions var intermediate_info = src_info; intermediate_info.flags.address_space = dest_info.flags.address_space; - const intermediate_ptr_ty = try sema.ptrType(intermediate_info); + const intermediate_ptr_ty = try mod.ptrTypeSema(intermediate_info); const intermediate_ty = if (dest_ptr_ty.zigTypeTag(mod) == .Optional) blk: { break :blk try mod.optionalType(intermediate_ptr_ty.toIntern()); } else intermediate_ptr_ty; @@ -23233,7 +23164,7 @@ fn zirPtrCastNoDest(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Inst if (flags.volatile_cast) ptr_info.flags.is_volatile = false; const dest_ty = blk: { - const dest_ty = try sema.ptrType(ptr_info); + const dest_ty = try mod.ptrTypeSema(ptr_info); if (operand_ty.zigTypeTag(mod) == .Optional) { break :blk try mod.optionalType(dest_ty.toIntern()); } @@ -23523,7 +23454,7 @@ fn bitOffsetOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!u6 const mod = sema.mod; const ip = &mod.intern_pool; - try sema.resolveTypeLayout(ty); + try ty.resolveLayout(mod); switch (ty.zigTypeTag(mod)) { .Struct => {}, else => return sema.fail(block, lhs_src, "expected struct type, found '{}'", .{ty.fmt(mod)}), @@ -23766,7 +23697,7 @@ fn checkAtomicPtrOperand( const ptr_data = switch (try ptr_ty.zigTypeTagOrPoison(mod)) { .Pointer => ptr_ty.ptrInfo(mod), else => { - const wanted_ptr_ty = try sema.ptrType(wanted_ptr_data); + const wanted_ptr_ty = try mod.ptrTypeSema(wanted_ptr_data); _ = try sema.coerce(block, wanted_ptr_ty, ptr, ptr_src); unreachable; }, @@ -23776,7 +23707,7 @@ fn checkAtomicPtrOperand( wanted_ptr_data.flags.is_allowzero = ptr_data.flags.is_allowzero; wanted_ptr_data.flags.is_volatile = ptr_data.flags.is_volatile; - const wanted_ptr_ty = try sema.ptrType(wanted_ptr_data); + const wanted_ptr_ty = try mod.ptrTypeSema(wanted_ptr_data); const casted_ptr = try sema.coerce(block, wanted_ptr_ty, ptr, ptr_src); return casted_ptr; @@ -23953,7 +23884,7 @@ fn resolveExportOptions( const mod = sema.mod; const gpa = sema.gpa; const ip = &mod.intern_pool; - const export_options_ty = try sema.getBuiltinType("ExportOptions"); + const export_options_ty = try mod.getBuiltinType("ExportOptions"); const air_ref = try sema.resolveInst(zir_ref); const options = try sema.coerce(block, export_options_ty, air_ref, src); @@ -24017,7 +23948,7 @@ fn resolveBuiltinEnum( reason: NeededComptimeReason, ) CompileError!@field(std.builtin, name) { const mod = sema.mod; - const ty = try sema.getBuiltinType(name); + const ty = try mod.getBuiltinType(name); const air_ref = try sema.resolveInst(zir_ref); const coerced = try sema.coerce(block, ty, air_ref, src); const val = try sema.resolveConstDefinedValue(block, src, coerced, reason); @@ -24777,7 +24708,7 @@ fn zirBuiltinCall(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const extra = sema.code.extraData(Zir.Inst.BuiltinCall, inst_data.payload_index).data; const func = try sema.resolveInst(extra.callee); - const modifier_ty = try sema.getBuiltinType("CallModifier"); + const modifier_ty = try mod.getBuiltinType("CallModifier"); const air_ref = try sema.resolveInst(extra.modifier); const modifier_ref = try sema.coerce(block, modifier_ty, air_ref, modifier_src); const modifier_val = try sema.resolveConstDefinedValue(block, modifier_src, modifier_ref, .{ @@ -24881,7 +24812,7 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Ins .Struct, .Union => {}, else => return sema.fail(block, inst_src, "expected pointer to struct or union type, found '{}'", .{parent_ptr_ty.fmt(zcu)}), } - try sema.resolveTypeLayout(parent_ty); + try parent_ty.resolveLayout(zcu); const field_name = try sema.resolveConstStringIntern(block, field_name_src, extra.field_name, .{ .needed_comptime_reason = "field name must be comptime-known", @@ -24912,7 +24843,7 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Ins var actual_parent_ptr_info: InternPool.Key.PtrType = .{ .child = parent_ty.toIntern(), .flags = .{ - .alignment = try parent_ptr_ty.ptrAlignmentAdvanced(zcu, sema), + .alignment = try parent_ptr_ty.ptrAlignmentAdvanced(zcu, .sema), .is_const = field_ptr_info.flags.is_const, .is_volatile = field_ptr_info.flags.is_volatile, .is_allowzero = field_ptr_info.flags.is_allowzero, @@ -24924,7 +24855,7 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Ins var actual_field_ptr_info: InternPool.Key.PtrType = .{ .child = field_ty.toIntern(), .flags = .{ - .alignment = try field_ptr_ty.ptrAlignmentAdvanced(zcu, sema), + .alignment = try field_ptr_ty.ptrAlignmentAdvanced(zcu, .sema), .is_const = field_ptr_info.flags.is_const, .is_volatile = field_ptr_info.flags.is_volatile, .is_allowzero = field_ptr_info.flags.is_allowzero, @@ -24935,12 +24866,13 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Ins switch (parent_ty.containerLayout(zcu)) { .auto => { actual_parent_ptr_info.flags.alignment = actual_field_ptr_info.flags.alignment.minStrict( - if (zcu.typeToStruct(parent_ty)) |struct_obj| try sema.structFieldAlignment( + if (zcu.typeToStruct(parent_ty)) |struct_obj| try zcu.structFieldAlignmentAdvanced( struct_obj.fieldAlign(ip, field_index), field_ty, struct_obj.layout, + .sema, ) else if (zcu.typeToUnion(parent_ty)) |union_obj| - try sema.unionFieldAlignment(union_obj, field_index) + try zcu.unionFieldNormalAlignmentAdvanced(union_obj, field_index, .sema) else actual_field_ptr_info.flags.alignment, ); @@ -24970,9 +24902,9 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Ins }, } - const actual_field_ptr_ty = try sema.ptrType(actual_field_ptr_info); + const actual_field_ptr_ty = try zcu.ptrTypeSema(actual_field_ptr_info); const casted_field_ptr = try sema.coerce(block, actual_field_ptr_ty, field_ptr, field_ptr_src); - const actual_parent_ptr_ty = try sema.ptrType(actual_parent_ptr_info); + const actual_parent_ptr_ty = try zcu.ptrTypeSema(actual_parent_ptr_info); const result = if (try sema.resolveDefinedValue(block, field_ptr_src, casted_field_ptr)) |field_ptr_val| result: { switch (parent_ty.zigTypeTag(zcu)) { @@ -25032,7 +24964,6 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Ins break :result try sema.coerce(block, actual_parent_ptr_ty, Air.internedToRef(field.base), inst_src); } else result: { try sema.requireRuntimeBlock(block, inst_src, field_ptr_src); - try sema.queueFullTypeResolution(parent_ty); break :result try block.addInst(.{ .tag = .field_parent_ptr, .data = .{ .ty_pl = .{ @@ -25345,7 +25276,7 @@ fn upgradeToArrayPtr(sema: *Sema, block: *Block, ptr: Air.Inst.Ref, len: u64) !A // Already an array pointer. return ptr; } - const new_ty = try sema.ptrType(.{ + const new_ty = try mod.ptrTypeSema(.{ .child = (try mod.arrayType(.{ .len = len, .sentinel = info.sentinel, @@ -25444,7 +25375,7 @@ fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void const runtime_src = if (try sema.resolveDefinedValue(block, dest_src, dest_ptr)) |dest_ptr_val| rs: { if (!sema.isComptimeMutablePtr(dest_ptr_val)) break :rs dest_src; if (try sema.resolveDefinedValue(block, src_src, src_ptr)) |_| { - const len_u64 = (try len_val.?.getUnsignedIntAdvanced(mod, sema)).?; + const len_u64 = (try len_val.?.getUnsignedIntAdvanced(mod, .sema)).?; const len = try sema.usizeCast(block, dest_src, len_u64); for (0..len) |i| { const elem_index = try mod.intRef(Type.usize, i); @@ -25503,7 +25434,7 @@ fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void var new_dest_ptr = dest_ptr; var new_src_ptr = src_ptr; if (len_val) |val| { - const len = try val.toUnsignedIntAdvanced(sema); + const len = try val.toUnsignedIntSema(mod); if (len == 0) { // This AIR instruction guarantees length > 0 if it is comptime-known. return; @@ -25550,7 +25481,7 @@ fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void assert(dest_manyptr_ty_key.flags.size == .One); dest_manyptr_ty_key.child = dest_elem_ty.toIntern(); dest_manyptr_ty_key.flags.size = .Many; - break :ptr try sema.coerceCompatiblePtrs(block, try sema.ptrType(dest_manyptr_ty_key), new_dest_ptr, dest_src); + break :ptr try sema.coerceCompatiblePtrs(block, try mod.ptrTypeSema(dest_manyptr_ty_key), new_dest_ptr, dest_src); } else new_dest_ptr; const new_src_ptr_ty = sema.typeOf(new_src_ptr); @@ -25561,7 +25492,7 @@ fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void assert(src_manyptr_ty_key.flags.size == .One); src_manyptr_ty_key.child = src_elem_ty.toIntern(); src_manyptr_ty_key.flags.size = .Many; - break :ptr try sema.coerceCompatiblePtrs(block, try sema.ptrType(src_manyptr_ty_key), new_src_ptr, src_src); + break :ptr try sema.coerceCompatiblePtrs(block, try mod.ptrTypeSema(src_manyptr_ty_key), new_src_ptr, src_src); } else new_src_ptr; // ok1: dest >= src + len @@ -25628,7 +25559,7 @@ fn zirMemset(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void const ptr_val = try sema.resolveDefinedValue(block, dest_src, dest_ptr) orelse break :rs dest_src; const len_air_ref = try sema.fieldVal(block, src, dest_ptr, try ip.getOrPutString(gpa, "len", .no_embedded_nulls), dest_src); const len_val = (try sema.resolveDefinedValue(block, dest_src, len_air_ref)) orelse break :rs dest_src; - const len_u64 = (try len_val.getUnsignedIntAdvanced(mod, sema)).?; + const len_u64 = (try len_val.getUnsignedIntAdvanced(mod, .sema)).?; const len = try sema.usizeCast(block, dest_src, len_u64); if (len == 0) { // This AIR instruction guarantees length > 0 if it is comptime-known. @@ -25808,7 +25739,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A if (val.isGenericPoison()) { break :blk null; } - const alignment = try sema.validateAlignAllowZero(block, align_src, try val.toUnsignedIntAdvanced(sema)); + const alignment = try sema.validateAlignAllowZero(block, align_src, try val.toUnsignedIntSema(mod)); const default = target_util.defaultFunctionAlignment(target); break :blk if (alignment == default) .none else alignment; } else if (extra.data.bits.has_align_ref) blk: { @@ -25828,7 +25759,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A error.GenericPoison => break :blk null, else => |e| return e, }; - const alignment = try sema.validateAlignAllowZero(block, align_src, try align_val.toUnsignedIntAdvanced(sema)); + const alignment = try sema.validateAlignAllowZero(block, align_src, try align_val.toUnsignedIntSema(mod)); const default = target_util.defaultFunctionAlignment(target); break :blk if (alignment == default) .none else alignment; } else .none; @@ -25904,7 +25835,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A const body = sema.code.bodySlice(extra_index, body_len); extra_index += body.len; - const cc_ty = try sema.getBuiltinType("CallingConvention"); + const cc_ty = try mod.getBuiltinType("CallingConvention"); const val = try sema.resolveGenericBody(block, cc_src, body, inst, cc_ty, .{ .needed_comptime_reason = "calling convention must be comptime-known", }); @@ -26117,7 +26048,7 @@ fn resolvePrefetchOptions( const mod = sema.mod; const gpa = sema.gpa; const ip = &mod.intern_pool; - const options_ty = try sema.getBuiltinType("PrefetchOptions"); + const options_ty = try mod.getBuiltinType("PrefetchOptions"); const options = try sema.coerce(block, options_ty, try sema.resolveInst(zir_ref), src); const rw_src = block.src(.{ .init_field_rw = src.offset.node_offset_builtin_call_arg.builtin_call_node }); @@ -26141,7 +26072,7 @@ fn resolvePrefetchOptions( return std.builtin.PrefetchOptions{ .rw = mod.toEnum(std.builtin.PrefetchOptions.Rw, rw_val), - .locality = @intCast(try locality_val.toUnsignedIntAdvanced(sema)), + .locality = @intCast(try locality_val.toUnsignedIntSema(mod)), .cache = mod.toEnum(std.builtin.PrefetchOptions.Cache, cache_val), }; } @@ -26189,7 +26120,7 @@ fn resolveExternOptions( const gpa = sema.gpa; const ip = &mod.intern_pool; const options_inst = try sema.resolveInst(zir_ref); - const extern_options_ty = try sema.getBuiltinType("ExternOptions"); + const extern_options_ty = try mod.getBuiltinType("ExternOptions"); const options = try sema.coerce(block, extern_options_ty, options_inst, src); const name_src = block.src(.{ .init_field_name = src.offset.node_offset_builtin_call_arg.builtin_call_node }); @@ -26440,7 +26371,7 @@ fn explainWhyTypeIsComptime( var type_set = TypeSet{}; defer type_set.deinit(sema.gpa); - try sema.resolveTypeFully(ty); + try ty.resolveFully(sema.mod); return sema.explainWhyTypeIsComptimeInner(msg, src_loc, ty, &type_set); } @@ -26567,7 +26498,7 @@ const ExternPosition = enum { /// Returns true if `ty` is allowed in extern types. /// Does *NOT* require `ty` to be resolved in any way. -/// Calls `resolveTypeLayout` for packed containers. +/// Calls `resolveLayout` for packed containers. fn validateExternType( sema: *Sema, ty: Type, @@ -26618,7 +26549,7 @@ fn validateExternType( .Struct, .Union => switch (ty.containerLayout(mod)) { .@"extern" => return true, .@"packed" => { - const bit_size = try ty.bitSizeAdvanced(mod, sema); + const bit_size = try ty.bitSizeAdvanced(mod, .sema); switch (bit_size) { 0, 8, 16, 32, 64, 128 => return true, else => return false, @@ -26796,11 +26727,11 @@ fn explainWhyTypeIsNotPacked( } } -fn prepareSimplePanic(sema: *Sema, block: *Block) !void { +fn prepareSimplePanic(sema: *Sema) !void { const mod = sema.mod; if (mod.panic_func_index == .none) { - const decl_index = (try sema.getBuiltinDecl(block, "panic")); + const decl_index = (try mod.getBuiltinDecl("panic")); // decl_index may be an alias; we must find the decl that actually // owns the function. try sema.ensureDeclAnalyzed(decl_index); @@ -26813,10 +26744,10 @@ fn prepareSimplePanic(sema: *Sema, block: *Block) !void { } if (mod.null_stack_trace == .none) { - const stack_trace_ty = try sema.getBuiltinType("StackTrace"); - try sema.resolveTypeFields(stack_trace_ty); + const stack_trace_ty = try mod.getBuiltinType("StackTrace"); + try stack_trace_ty.resolveFields(mod); const target = mod.getTarget(); - const ptr_stack_trace_ty = try sema.ptrType(.{ + const ptr_stack_trace_ty = try mod.ptrTypeSema(.{ .child = stack_trace_ty.toIntern(), .flags = .{ .address_space = target_util.defaultAddressSpace(target, .global_constant), @@ -26838,9 +26769,9 @@ fn preparePanicId(sema: *Sema, block: *Block, panic_id: Module.PanicId) !InternP const gpa = sema.gpa; if (mod.panic_messages[@intFromEnum(panic_id)].unwrap()) |x| return x; - try sema.prepareSimplePanic(block); + try sema.prepareSimplePanic(); - const panic_messages_ty = try sema.getBuiltinType("panic_messages"); + const panic_messages_ty = try mod.getBuiltinType("panic_messages"); const msg_decl_index = (sema.namespaceLookup( block, LazySrcLoc.unneeded, @@ -26946,7 +26877,7 @@ fn panicWithMsg(sema: *Sema, block: *Block, src: LazySrcLoc, msg_inst: Air.Inst. return; } - try sema.prepareSimplePanic(block); + try sema.prepareSimplePanic(); const panic_func = mod.funcInfo(mod.panic_func_index); const panic_fn = try sema.analyzeDeclVal(block, src, panic_func.owner_decl); @@ -26992,7 +26923,7 @@ fn panicUnwrapError( if (!sema.mod.backendSupportsFeature(.panic_unwrap_error)) { _ = try fail_block.addNoOp(.trap); } else { - const panic_fn = try sema.getBuiltin("panicUnwrapError"); + const panic_fn = try sema.mod.getBuiltin("panicUnwrapError"); const err = try fail_block.addTyOp(unwrap_err_tag, Type.anyerror, operand); const err_return_trace = try sema.getErrorReturnTrace(&fail_block); const args: [2]Air.Inst.Ref = .{ err_return_trace, err }; @@ -27051,7 +26982,7 @@ fn panicSentinelMismatch( const actual_sentinel = if (ptr_ty.isSlice(mod)) try parent_block.addBinOp(.slice_elem_val, ptr, sentinel_index) else blk: { - const elem_ptr_ty = try sema.elemPtrType(ptr_ty, null); + const elem_ptr_ty = try ptr_ty.elemPtrType(null, mod); const sentinel_ptr = try parent_block.addPtrElemPtr(ptr, sentinel_index, elem_ptr_ty); break :blk try parent_block.addTyOp(.load, sentinel_ty, sentinel_ptr); }; @@ -27069,7 +27000,7 @@ fn panicSentinelMismatch( } else if (sentinel_ty.isSelfComparable(mod, true)) try parent_block.addBinOp(.cmp_eq, expected_sentinel, actual_sentinel) else { - const panic_fn = try sema.getBuiltin("checkNonScalarSentinel"); + const panic_fn = try mod.getBuiltin("checkNonScalarSentinel"); const args: [2]Air.Inst.Ref = .{ expected_sentinel, actual_sentinel }; try sema.callBuiltin(parent_block, src, panic_fn, .auto, &args, .@"safety check"); return; @@ -27108,7 +27039,7 @@ fn safetyCheckFormatted( if (!sema.mod.backendSupportsFeature(.safety_check_formatted)) { _ = try fail_block.addNoOp(.trap); } else { - const panic_fn = try sema.getBuiltin(func); + const panic_fn = try sema.mod.getBuiltin(func); try sema.callBuiltin(&fail_block, src, panic_fn, .auto, args, .@"safety check"); } try sema.addSafetyCheckExtra(parent_block, ok, &fail_block); @@ -27170,7 +27101,7 @@ fn fieldVal( return Air.internedToRef((try mod.intValue(Type.usize, inner_ty.arrayLen(mod))).toIntern()); } else if (field_name.eqlSlice("ptr", ip) and is_pointer_to) { const ptr_info = object_ty.ptrInfo(mod); - const result_ty = try sema.ptrType(.{ + const result_ty = try mod.ptrTypeSema(.{ .child = Type.fromInterned(ptr_info.child).childType(mod).toIntern(), .sentinel = if (inner_ty.sentinel(mod)) |s| s.toIntern() else .none, .flags = .{ @@ -27267,7 +27198,7 @@ fn fieldVal( if (try sema.namespaceLookupVal(block, src, child_type.getNamespaceIndex(mod), field_name)) |inst| { return inst; } - try sema.resolveTypeFields(child_type); + try child_type.resolveFields(mod); if (child_type.unionTagType(mod)) |enum_ty| { if (enum_ty.enumFieldIndex(field_name, mod)) |field_index_usize| { const field_index: u32 = @intCast(field_index_usize); @@ -27361,7 +27292,7 @@ fn fieldPtr( return anonDeclRef(sema, int_val.toIntern()); } else if (field_name.eqlSlice("ptr", ip) and is_pointer_to) { const ptr_info = object_ty.ptrInfo(mod); - const new_ptr_ty = try sema.ptrType(.{ + const new_ptr_ty = try mod.ptrTypeSema(.{ .child = Type.fromInterned(ptr_info.child).childType(mod).toIntern(), .sentinel = if (object_ty.sentinel(mod)) |s| s.toIntern() else .none, .flags = .{ @@ -27376,7 +27307,7 @@ fn fieldPtr( .packed_offset = ptr_info.packed_offset, }); const ptr_ptr_info = object_ptr_ty.ptrInfo(mod); - const result_ty = try sema.ptrType(.{ + const result_ty = try mod.ptrTypeSema(.{ .child = new_ptr_ty.toIntern(), .sentinel = if (object_ptr_ty.sentinel(mod)) |s| s.toIntern() else .none, .flags = .{ @@ -27410,7 +27341,7 @@ fn fieldPtr( if (field_name.eqlSlice("ptr", ip)) { const slice_ptr_ty = inner_ty.slicePtrFieldType(mod); - const result_ty = try sema.ptrType(.{ + const result_ty = try mod.ptrTypeSema(.{ .child = slice_ptr_ty.toIntern(), .flags = .{ .is_const = !attr_ptr_ty.ptrIsMutable(mod), @@ -27420,7 +27351,7 @@ fn fieldPtr( }); if (try sema.resolveDefinedValue(block, object_ptr_src, inner_ptr)) |val| { - return Air.internedToRef((try val.ptrField(Value.slice_ptr_index, sema)).toIntern()); + return Air.internedToRef((try val.ptrField(Value.slice_ptr_index, mod)).toIntern()); } try sema.requireRuntimeBlock(block, src, null); @@ -27428,7 +27359,7 @@ fn fieldPtr( try sema.checkKnownAllocPtr(block, inner_ptr, field_ptr); return field_ptr; } else if (field_name.eqlSlice("len", ip)) { - const result_ty = try sema.ptrType(.{ + const result_ty = try mod.ptrTypeSema(.{ .child = .usize_type, .flags = .{ .is_const = !attr_ptr_ty.ptrIsMutable(mod), @@ -27438,7 +27369,7 @@ fn fieldPtr( }); if (try sema.resolveDefinedValue(block, object_ptr_src, inner_ptr)) |val| { - return Air.internedToRef((try val.ptrField(Value.slice_len_index, sema)).toIntern()); + return Air.internedToRef((try val.ptrField(Value.slice_len_index, mod)).toIntern()); } try sema.requireRuntimeBlock(block, src, null); @@ -27506,7 +27437,7 @@ fn fieldPtr( if (try sema.namespaceLookupRef(block, src, child_type.getNamespaceIndex(mod), field_name)) |inst| { return inst; } - try sema.resolveTypeFields(child_type); + try child_type.resolveFields(mod); if (child_type.unionTagType(mod)) |enum_ty| { if (enum_ty.enumFieldIndex(field_name, mod)) |field_index| { const field_index_u32: u32 = @intCast(field_index); @@ -27601,7 +27532,7 @@ fn fieldCallBind( find_field: { switch (concrete_ty.zigTypeTag(mod)) { .Struct => { - try sema.resolveTypeFields(concrete_ty); + try concrete_ty.resolveFields(mod); if (mod.typeToStruct(concrete_ty)) |struct_type| { const field_index = struct_type.nameIndex(ip, field_name) orelse break :find_field; @@ -27627,7 +27558,7 @@ fn fieldCallBind( } }, .Union => { - try sema.resolveTypeFields(concrete_ty); + try concrete_ty.resolveFields(mod); const union_obj = mod.typeToUnion(concrete_ty).?; _ = union_obj.loadTagType(ip).nameIndex(ip, field_name) orelse break :find_field; const field_ptr = try unionFieldPtr(sema, block, src, object_ptr, field_name, field_name_src, concrete_ty, false); @@ -27737,7 +27668,7 @@ fn finishFieldCallBind( object_ptr: Air.Inst.Ref, ) CompileError!ResolvedFieldCallee { const mod = sema.mod; - const ptr_field_ty = try sema.ptrType(.{ + const ptr_field_ty = try mod.ptrTypeSema(.{ .child = field_ty.toIntern(), .flags = .{ .is_const = !ptr_ty.ptrIsMutable(mod), @@ -27748,14 +27679,14 @@ fn finishFieldCallBind( const container_ty = ptr_ty.childType(mod); if (container_ty.zigTypeTag(mod) == .Struct) { if (container_ty.structFieldIsComptime(field_index, mod)) { - try sema.resolveStructFieldInits(container_ty); + try container_ty.resolveStructFieldInits(mod); const default_val = (try container_ty.structFieldValueComptime(mod, field_index)).?; return .{ .direct = Air.internedToRef(default_val.toIntern()) }; } } if (try sema.resolveDefinedValue(block, src, object_ptr)) |struct_ptr_val| { - const ptr_val = try struct_ptr_val.ptrField(field_index, sema); + const ptr_val = try struct_ptr_val.ptrField(field_index, mod); const pointer = Air.internedToRef(ptr_val.toIntern()); return .{ .direct = try sema.analyzeLoad(block, src, pointer, src) }; } @@ -27831,8 +27762,8 @@ fn structFieldPtr( const ip = &mod.intern_pool; assert(struct_ty.zigTypeTag(mod) == .Struct); - try sema.resolveTypeFields(struct_ty); - try sema.resolveStructLayout(struct_ty); + try struct_ty.resolveFields(mod); + try struct_ty.resolveLayout(mod); if (struct_ty.isTuple(mod)) { if (field_name.eqlSlice("len", ip)) { @@ -27871,7 +27802,7 @@ fn structFieldPtrByIndex( } if (try sema.resolveDefinedValue(block, src, struct_ptr)) |struct_ptr_val| { - const val = try struct_ptr_val.ptrField(field_index, sema); + const val = try struct_ptr_val.ptrField(field_index, mod); return Air.internedToRef(val.toIntern()); } @@ -27915,10 +27846,11 @@ fn structFieldPtrByIndex( @enumFromInt(@min(@intFromEnum(parent_align), @ctz(field_offset))); } else { // Our alignment is capped at the field alignment. - const field_align = try sema.structFieldAlignment( + const field_align = try mod.structFieldAlignmentAdvanced( struct_type.fieldAlign(ip, field_index), Type.fromInterned(field_ty), struct_type.layout, + .sema, ); ptr_ty_data.flags.alignment = if (struct_ptr_ty_info.flags.alignment == .none) field_align @@ -27926,10 +27858,10 @@ fn structFieldPtrByIndex( field_align.min(parent_align); } - const ptr_field_ty = try sema.ptrType(ptr_ty_data); + const ptr_field_ty = try mod.ptrTypeSema(ptr_ty_data); if (struct_type.fieldIsComptime(ip, field_index)) { - try sema.resolveStructFieldInits(struct_ty); + try struct_ty.resolveStructFieldInits(mod); const val = try mod.intern(.{ .ptr = .{ .ty = ptr_field_ty.toIntern(), .base_addr = .{ .comptime_field = struct_type.field_inits.get(ip)[field_index] }, @@ -27955,7 +27887,7 @@ fn structFieldVal( const ip = &mod.intern_pool; assert(struct_ty.zigTypeTag(mod) == .Struct); - try sema.resolveTypeFields(struct_ty); + try struct_ty.resolveFields(mod); switch (ip.indexToKey(struct_ty.toIntern())) { .struct_type => { @@ -27966,7 +27898,7 @@ fn structFieldVal( const field_index = struct_type.nameIndex(ip, field_name) orelse return sema.failWithBadStructFieldAccess(block, struct_ty, struct_type, field_name_src, field_name); if (struct_type.fieldIsComptime(ip, field_index)) { - try sema.resolveStructFieldInits(struct_ty); + try struct_ty.resolveStructFieldInits(mod); return Air.internedToRef(struct_type.field_inits.get(ip)[field_index]); } @@ -27983,7 +27915,7 @@ fn structFieldVal( } try sema.requireRuntimeBlock(block, src, null); - try sema.resolveTypeLayout(field_ty); + try field_ty.resolveLayout(mod); return block.addStructFieldVal(struct_byval, field_index, field_ty); }, .anon_struct_type => |anon_struct| { @@ -28050,7 +27982,7 @@ fn tupleFieldValByIndex( const field_ty = tuple_ty.structFieldType(field_index, mod); if (tuple_ty.structFieldIsComptime(field_index, mod)) - try sema.resolveStructFieldInits(tuple_ty); + try tuple_ty.resolveStructFieldInits(mod); if (try tuple_ty.structFieldValueComptime(mod, field_index)) |default_value| { return Air.internedToRef(default_value.toIntern()); } @@ -28071,7 +28003,7 @@ fn tupleFieldValByIndex( } try sema.requireRuntimeBlock(block, src, null); - try sema.resolveTypeLayout(field_ty); + try field_ty.resolveLayout(mod); return block.addStructFieldVal(tuple_byval, field_index, field_ty); } @@ -28092,11 +28024,11 @@ fn unionFieldPtr( const union_ptr_ty = sema.typeOf(union_ptr); const union_ptr_info = union_ptr_ty.ptrInfo(mod); - try sema.resolveTypeFields(union_ty); + try union_ty.resolveFields(mod); const union_obj = mod.typeToUnion(union_ty).?; const field_index = try sema.unionFieldIndex(block, union_ty, field_name, field_name_src); const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]); - const ptr_field_ty = try sema.ptrType(.{ + const ptr_field_ty = try mod.ptrTypeSema(.{ .child = field_ty.toIntern(), .flags = .{ .is_const = union_ptr_info.flags.is_const, @@ -28107,7 +28039,7 @@ fn unionFieldPtr( union_ptr_info.flags.alignment else try sema.typeAbiAlignment(union_ty); - const field_align = try sema.unionFieldAlignment(union_obj, field_index); + const field_align = try mod.unionFieldNormalAlignmentAdvanced(union_obj, field_index, .sema); break :blk union_align.min(field_align); } else union_ptr_info.flags.alignment, }, @@ -28163,7 +28095,7 @@ fn unionFieldPtr( }, .@"packed", .@"extern" => {}, } - const field_ptr_val = try union_ptr_val.ptrField(field_index, sema); + const field_ptr_val = try union_ptr_val.ptrField(field_index, mod); return Air.internedToRef(field_ptr_val.toIntern()); } @@ -28198,7 +28130,7 @@ fn unionFieldVal( const ip = &zcu.intern_pool; assert(union_ty.zigTypeTag(zcu) == .Union); - try sema.resolveTypeFields(union_ty); + try union_ty.resolveFields(zcu); const union_obj = zcu.typeToUnion(union_ty).?; const field_index = try sema.unionFieldIndex(block, union_ty, field_name, field_name_src); const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]); @@ -28237,7 +28169,7 @@ fn unionFieldVal( .@"packed" => if (tag_matches) { // Fast path - no need to use bitcast logic. return Air.internedToRef(un.val); - } else if (try sema.bitCastVal(union_val, field_ty, 0, try union_ty.bitSizeAdvanced(zcu, sema), 0)) |field_val| { + } else if (try sema.bitCastVal(union_val, field_ty, 0, try union_ty.bitSizeAdvanced(zcu, .sema), 0)) |field_val| { return Air.internedToRef(field_val.toIntern()); }, } @@ -28256,7 +28188,7 @@ fn unionFieldVal( _ = try block.addNoOp(.unreach); return .unreachable_value; } - try sema.resolveTypeLayout(field_ty); + try field_ty.resolveLayout(zcu); return block.addStructFieldVal(union_byval, field_index, field_ty); } @@ -28287,7 +28219,7 @@ fn elemPtr( const index_val = try sema.resolveConstDefinedValue(block, elem_index_src, elem_index, .{ .needed_comptime_reason = "tuple field access index must be comptime-known", }); - const index: u32 = @intCast(try index_val.toUnsignedIntAdvanced(sema)); + const index: u32 = @intCast(try index_val.toUnsignedIntSema(mod)); break :blk try sema.tupleFieldPtr(block, src, indexable_ptr, elem_index_src, index, init); }, else => { @@ -28325,11 +28257,11 @@ fn elemPtrOneLayerOnly( const runtime_src = rs: { const ptr_val = maybe_ptr_val orelse break :rs indexable_src; const index_val = maybe_index_val orelse break :rs elem_index_src; - const index: usize = @intCast(try index_val.toUnsignedIntAdvanced(sema)); - const elem_ptr = try ptr_val.ptrElem(index, sema); + const index: usize = @intCast(try index_val.toUnsignedIntSema(mod)); + const elem_ptr = try ptr_val.ptrElem(index, mod); return Air.internedToRef(elem_ptr.toIntern()); }; - const result_ty = try sema.elemPtrType(indexable_ty, null); + const result_ty = try indexable_ty.elemPtrType(null, mod); try sema.requireRuntimeBlock(block, src, runtime_src); return block.addPtrElemPtr(indexable, elem_index, result_ty); @@ -28343,7 +28275,7 @@ fn elemPtrOneLayerOnly( const index_val = try sema.resolveConstDefinedValue(block, elem_index_src, elem_index, .{ .needed_comptime_reason = "tuple field access index must be comptime-known", }); - const index: u32 = @intCast(try index_val.toUnsignedIntAdvanced(sema)); + const index: u32 = @intCast(try index_val.toUnsignedIntSema(mod)); break :blk try sema.tupleFieldPtr(block, indexable_src, indexable, elem_index_src, index, false); }, else => unreachable, // Guaranteed by checkIndexable @@ -28383,12 +28315,12 @@ fn elemVal( const runtime_src = rs: { const indexable_val = maybe_indexable_val orelse break :rs indexable_src; const index_val = maybe_index_val orelse break :rs elem_index_src; - const index: usize = @intCast(try index_val.toUnsignedIntAdvanced(sema)); + const index: usize = @intCast(try index_val.toUnsignedIntSema(mod)); const elem_ty = indexable_ty.elemType2(mod); const many_ptr_ty = try mod.manyConstPtrType(elem_ty); const many_ptr_val = try mod.getCoerced(indexable_val, many_ptr_ty); const elem_ptr_ty = try mod.singleConstPtrType(elem_ty); - const elem_ptr_val = try many_ptr_val.ptrElem(index, sema); + const elem_ptr_val = try many_ptr_val.ptrElem(index, mod); if (try sema.pointerDeref(block, indexable_src, elem_ptr_val, elem_ptr_ty)) |elem_val| { return Air.internedToRef((try mod.getCoerced(elem_val, elem_ty)).toIntern()); } @@ -28404,7 +28336,7 @@ fn elemVal( if (inner_ty.zigTypeTag(mod) != .Array) break :arr_sent; const sentinel = inner_ty.sentinel(mod) orelse break :arr_sent; const index_val = try sema.resolveDefinedValue(block, elem_index_src, elem_index) orelse break :arr_sent; - const index = try sema.usizeCast(block, src, try index_val.toUnsignedIntAdvanced(sema)); + const index = try sema.usizeCast(block, src, try index_val.toUnsignedIntSema(mod)); if (index != inner_ty.arrayLen(mod)) break :arr_sent; return Air.internedToRef(sentinel.toIntern()); } @@ -28422,7 +28354,7 @@ fn elemVal( const index_val = try sema.resolveConstDefinedValue(block, elem_index_src, elem_index, .{ .needed_comptime_reason = "tuple field access index must be comptime-known", }); - const index: u32 = @intCast(try index_val.toUnsignedIntAdvanced(sema)); + const index: u32 = @intCast(try index_val.toUnsignedIntSema(mod)); return sema.tupleField(block, indexable_src, indexable, elem_index_src, index); }, else => unreachable, @@ -28467,7 +28399,7 @@ fn tupleFieldPtr( const mod = sema.mod; const tuple_ptr_ty = sema.typeOf(tuple_ptr); const tuple_ty = tuple_ptr_ty.childType(mod); - try sema.resolveTypeFields(tuple_ty); + try tuple_ty.resolveFields(mod); const field_count = tuple_ty.structFieldCount(mod); if (field_count == 0) { @@ -28481,7 +28413,7 @@ fn tupleFieldPtr( } const field_ty = tuple_ty.structFieldType(field_index, mod); - const ptr_field_ty = try sema.ptrType(.{ + const ptr_field_ty = try mod.ptrTypeSema(.{ .child = field_ty.toIntern(), .flags = .{ .is_const = !tuple_ptr_ty.ptrIsMutable(mod), @@ -28491,7 +28423,7 @@ fn tupleFieldPtr( }); if (tuple_ty.structFieldIsComptime(field_index, mod)) - try sema.resolveStructFieldInits(tuple_ty); + try tuple_ty.resolveStructFieldInits(mod); if (try tuple_ty.structFieldValueComptime(mod, field_index)) |default_val| { return Air.internedToRef((try mod.intern(.{ .ptr = .{ @@ -28502,7 +28434,7 @@ fn tupleFieldPtr( } if (try sema.resolveValue(tuple_ptr)) |tuple_ptr_val| { - const field_ptr_val = try tuple_ptr_val.ptrField(field_index, sema); + const field_ptr_val = try tuple_ptr_val.ptrField(field_index, mod); return Air.internedToRef(field_ptr_val.toIntern()); } @@ -28524,7 +28456,7 @@ fn tupleField( ) CompileError!Air.Inst.Ref { const mod = sema.mod; const tuple_ty = sema.typeOf(tuple); - try sema.resolveTypeFields(tuple_ty); + try tuple_ty.resolveFields(mod); const field_count = tuple_ty.structFieldCount(mod); if (field_count == 0) { @@ -28540,7 +28472,7 @@ fn tupleField( const field_ty = tuple_ty.structFieldType(field_index, mod); if (tuple_ty.structFieldIsComptime(field_index, mod)) - try sema.resolveStructFieldInits(tuple_ty); + try tuple_ty.resolveStructFieldInits(mod); if (try tuple_ty.structFieldValueComptime(mod, field_index)) |default_value| { return Air.internedToRef(default_value.toIntern()); // comptime field } @@ -28553,7 +28485,7 @@ fn tupleField( try sema.validateRuntimeElemAccess(block, field_index_src, field_ty, tuple_ty, tuple_src); try sema.requireRuntimeBlock(block, tuple_src, null); - try sema.resolveTypeLayout(field_ty); + try field_ty.resolveLayout(mod); return block.addStructFieldVal(tuple, field_index, field_ty); } @@ -28583,7 +28515,7 @@ fn elemValArray( const maybe_index_val = try sema.resolveDefinedValue(block, elem_index_src, elem_index); if (maybe_index_val) |index_val| { - const index: usize = @intCast(try index_val.toUnsignedIntAdvanced(sema)); + const index: usize = @intCast(try index_val.toUnsignedIntSema(mod)); if (array_sent) |s| { if (index == array_len) { return Air.internedToRef(s.toIntern()); @@ -28599,7 +28531,7 @@ fn elemValArray( return mod.undefRef(elem_ty); } if (maybe_index_val) |index_val| { - const index: usize = @intCast(try index_val.toUnsignedIntAdvanced(sema)); + const index: usize = @intCast(try index_val.toUnsignedIntSema(mod)); const elem_val = try array_val.elemValue(mod, index); return Air.internedToRef(elem_val.toIntern()); } @@ -28621,7 +28553,6 @@ fn elemValArray( return Air.internedToRef(elem_val.toIntern()); try sema.requireRuntimeBlock(block, src, runtime_src); - try sema.queueFullTypeResolution(array_ty); return block.addBinOp(.array_elem_val, array, elem_index); } @@ -28650,7 +28581,7 @@ fn elemPtrArray( const maybe_undef_array_ptr_val = try sema.resolveValue(array_ptr); // The index must not be undefined since it can be out of bounds. const offset: ?usize = if (try sema.resolveDefinedValue(block, elem_index_src, elem_index)) |index_val| o: { - const index = try sema.usizeCast(block, elem_index_src, try index_val.toUnsignedIntAdvanced(sema)); + const index = try sema.usizeCast(block, elem_index_src, try index_val.toUnsignedIntSema(mod)); if (index >= array_len_s) { const sentinel_label: []const u8 = if (array_sent) " +1 (sentinel)" else ""; return sema.fail(block, elem_index_src, "index {d} outside array of length {d}{s}", .{ index, array_len, sentinel_label }); @@ -28658,14 +28589,14 @@ fn elemPtrArray( break :o index; } else null; - const elem_ptr_ty = try sema.elemPtrType(array_ptr_ty, offset); + const elem_ptr_ty = try array_ptr_ty.elemPtrType(offset, mod); if (maybe_undef_array_ptr_val) |array_ptr_val| { if (array_ptr_val.isUndef(mod)) { return mod.undefRef(elem_ptr_ty); } if (offset) |index| { - const elem_ptr = try array_ptr_val.ptrElem(index, sema); + const elem_ptr = try array_ptr_val.ptrElem(index, mod); return Air.internedToRef(elem_ptr.toIntern()); } } @@ -28710,19 +28641,19 @@ fn elemValSlice( if (maybe_slice_val) |slice_val| { runtime_src = elem_index_src; - const slice_len = try slice_val.sliceLen(sema); + const slice_len = try slice_val.sliceLen(mod); const slice_len_s = slice_len + @intFromBool(slice_sent); if (slice_len_s == 0) { return sema.fail(block, slice_src, "indexing into empty slice is not allowed", .{}); } if (maybe_index_val) |index_val| { - const index: usize = @intCast(try index_val.toUnsignedIntAdvanced(sema)); + const index: usize = @intCast(try index_val.toUnsignedIntSema(mod)); if (index >= slice_len_s) { const sentinel_label: []const u8 = if (slice_sent) " +1 (sentinel)" else ""; return sema.fail(block, elem_index_src, "index {d} outside slice of length {d}{s}", .{ index, slice_len, sentinel_label }); } - const elem_ptr_ty = try sema.elemPtrType(slice_ty, index); - const elem_ptr_val = try slice_val.ptrElem(index, sema); + const elem_ptr_ty = try slice_ty.elemPtrType(index, mod); + const elem_ptr_val = try slice_val.ptrElem(index, mod); if (try sema.pointerDeref(block, slice_src, elem_ptr_val, elem_ptr_ty)) |elem_val| { return Air.internedToRef(elem_val.toIntern()); } @@ -28735,13 +28666,12 @@ fn elemValSlice( try sema.requireRuntimeBlock(block, src, runtime_src); if (oob_safety and block.wantSafety()) { const len_inst = if (maybe_slice_val) |slice_val| - try mod.intRef(Type.usize, try slice_val.sliceLen(sema)) + try mod.intRef(Type.usize, try slice_val.sliceLen(mod)) else try block.addTyOp(.slice_len, Type.usize, slice); const cmp_op: Air.Inst.Tag = if (slice_sent) .cmp_lte else .cmp_lt; try sema.panicIndexOutOfBounds(block, src, elem_index, len_inst, cmp_op); } - try sema.queueFullTypeResolution(sema.typeOf(slice)); return block.addBinOp(.slice_elem_val, slice, elem_index); } @@ -28762,17 +28692,17 @@ fn elemPtrSlice( const maybe_undef_slice_val = try sema.resolveValue(slice); // The index must not be undefined since it can be out of bounds. const offset: ?usize = if (try sema.resolveDefinedValue(block, elem_index_src, elem_index)) |index_val| o: { - const index = try sema.usizeCast(block, elem_index_src, try index_val.toUnsignedIntAdvanced(sema)); + const index = try sema.usizeCast(block, elem_index_src, try index_val.toUnsignedIntSema(mod)); break :o index; } else null; - const elem_ptr_ty = try sema.elemPtrType(slice_ty, offset); + const elem_ptr_ty = try slice_ty.elemPtrType(offset, mod); if (maybe_undef_slice_val) |slice_val| { if (slice_val.isUndef(mod)) { return mod.undefRef(elem_ptr_ty); } - const slice_len = try slice_val.sliceLen(sema); + const slice_len = try slice_val.sliceLen(mod); const slice_len_s = slice_len + @intFromBool(slice_sent); if (slice_len_s == 0) { return sema.fail(block, slice_src, "indexing into empty slice is not allowed", .{}); @@ -28782,7 +28712,7 @@ fn elemPtrSlice( const sentinel_label: []const u8 = if (slice_sent) " +1 (sentinel)" else ""; return sema.fail(block, elem_index_src, "index {d} outside slice of length {d}{s}", .{ index, slice_len, sentinel_label }); } - const elem_ptr_val = try slice_val.ptrElem(index, sema); + const elem_ptr_val = try slice_val.ptrElem(index, mod); return Air.internedToRef(elem_ptr_val.toIntern()); } } @@ -28795,7 +28725,7 @@ fn elemPtrSlice( const len_inst = len: { if (maybe_undef_slice_val) |slice_val| if (!slice_val.isUndef(mod)) - break :len try mod.intRef(Type.usize, try slice_val.sliceLen(sema)); + break :len try mod.intRef(Type.usize, try slice_val.sliceLen(mod)); break :len try block.addTyOp(.slice_len, Type.usize, slice); }; const cmp_op: Air.Inst.Tag = if (slice_sent) .cmp_lte else .cmp_lt; @@ -28860,9 +28790,9 @@ fn coerceExtra( if (dest_ty.isGenericPoison()) return inst; const zcu = sema.mod; const dest_ty_src = inst_src; // TODO better source location - try sema.resolveTypeFields(dest_ty); + try dest_ty.resolveFields(zcu); const inst_ty = sema.typeOf(inst); - try sema.resolveTypeFields(inst_ty); + try inst_ty.resolveFields(zcu); const target = zcu.getTarget(); // If the types are the same, we can return the operand. if (dest_ty.eql(inst_ty, zcu)) @@ -28876,7 +28806,6 @@ fn coerceExtra( return sema.coerceInMemory(val, dest_ty); } try sema.requireRuntimeBlock(block, inst_src, null); - try sema.queueFullTypeResolution(dest_ty); const new_val = try block.addBitCast(dest_ty, inst); try sema.checkKnownAllocPtr(block, inst, new_val); return new_val; @@ -29172,7 +29101,7 @@ fn coerceExtra( // empty tuple to zero-length slice // note that this allows coercing to a mutable slice. if (inst_child_ty.structFieldCount(zcu) == 0) { - const align_val = try dest_ty.ptrAlignmentAdvanced(zcu, sema); + const align_val = try dest_ty.ptrAlignmentAdvanced(zcu, .sema); return Air.internedToRef(try zcu.intern(.{ .slice = .{ .ty = dest_ty.toIntern(), .ptr = try zcu.intern(.{ .ptr = .{ @@ -29317,7 +29246,7 @@ fn coerceExtra( } break :int; }; - const result_val = try val.floatFromIntAdvanced(sema.arena, inst_ty, dest_ty, zcu, sema); + const result_val = try val.floatFromIntAdvanced(sema.arena, inst_ty, dest_ty, zcu, .sema); // TODO implement this compile error //const int_again_val = try result_val.intFromFloat(sema.arena, inst_ty); //if (!int_again_val.eql(val, inst_ty, zcu)) { @@ -30649,7 +30578,6 @@ fn storePtr2( } try sema.requireRuntimeBlock(block, src, runtime_src); - try sema.queueFullTypeResolution(elem_ty); if (ptr_ty.ptrInfo(mod).flags.vector_index == .runtime) { const ptr_inst = ptr.toIndex().?; @@ -30871,10 +30799,10 @@ fn bitCast( operand_src: ?LazySrcLoc, ) CompileError!Air.Inst.Ref { const zcu = sema.mod; - try sema.resolveTypeLayout(dest_ty); + try dest_ty.resolveLayout(zcu); const old_ty = sema.typeOf(inst); - try sema.resolveTypeLayout(old_ty); + try old_ty.resolveLayout(zcu); const dest_bits = dest_ty.bitSize(zcu); const old_bits = old_ty.bitSize(zcu); @@ -31056,7 +30984,7 @@ fn coerceEnumToUnion( const union_obj = mod.typeToUnion(union_ty).?; const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]); - try sema.resolveTypeFields(field_ty); + try field_ty.resolveFields(mod); if (field_ty.zigTypeTag(mod) == .NoReturn) { const msg = msg: { const msg = try sema.errMsg(inst_src, "cannot initialize 'noreturn' field of union", .{}); @@ -31469,8 +31397,8 @@ fn coerceTupleToStruct( ) !Air.Inst.Ref { const mod = sema.mod; const ip = &mod.intern_pool; - try sema.resolveTypeFields(struct_ty); - try sema.resolveStructFieldInits(struct_ty); + try struct_ty.resolveFields(mod); + try struct_ty.resolveStructFieldInits(mod); if (struct_ty.isTupleOrAnonStruct(mod)) { return sema.coerceTupleToTuple(block, struct_ty, inst, inst_src); @@ -31817,7 +31745,7 @@ fn analyzeDeclRefInner(sema: *Sema, src: LazySrcLoc, decl_index: InternPool.Decl }); // TODO: if this is a `decl_ref` of a non-variable decl, only depend on decl type try sema.declareDependency(.{ .decl_val = decl_index }); - const ptr_ty = try sema.ptrType(.{ + const ptr_ty = try mod.ptrTypeSema(.{ .child = decl_val.typeOf(mod).toIntern(), .flags = .{ .alignment = owner_decl.alignment, @@ -31864,14 +31792,14 @@ fn analyzeRef( try sema.requireRuntimeBlock(block, src, null); const address_space = target_util.defaultAddressSpace(mod.getTarget(), .local); - const ptr_type = try sema.ptrType(.{ + const ptr_type = try mod.ptrTypeSema(.{ .child = operand_ty.toIntern(), .flags = .{ .is_const = true, .address_space = address_space, }, }); - const mut_ptr_type = try sema.ptrType(.{ + const mut_ptr_type = try mod.ptrTypeSema(.{ .child = operand_ty.toIntern(), .flags = .{ .address_space = address_space }, }); @@ -31979,7 +31907,7 @@ fn analyzeSliceLen( if (slice_val.isUndef(mod)) { return mod.undefRef(Type.usize); } - return mod.intRef(Type.usize, try slice_val.sliceLen(sema)); + return mod.intRef(Type.usize, try slice_val.sliceLen(mod)); } try sema.requireRuntimeBlock(block, src, null); return block.addTyOp(.slice_len, Type.usize, slice_inst); @@ -32347,7 +32275,7 @@ fn analyzeSlice( assert(manyptr_ty_key.flags.size == .One); manyptr_ty_key.child = elem_ty.toIntern(); manyptr_ty_key.flags.size = .Many; - break :ptr try sema.coerceCompatiblePtrs(block, try sema.ptrType(manyptr_ty_key), ptr_or_slice, ptr_src); + break :ptr try sema.coerceCompatiblePtrs(block, try mod.ptrTypeSema(manyptr_ty_key), ptr_or_slice, ptr_src); } else ptr_or_slice; const start = try sema.coerce(block, Type.usize, uncasted_start, start_src); @@ -32416,7 +32344,7 @@ fn analyzeSlice( return sema.fail(block, src, "slice of undefined", .{}); } const has_sentinel = slice_ty.sentinel(mod) != null; - const slice_len = try slice_val.sliceLen(sema); + const slice_len = try slice_val.sliceLen(mod); const len_plus_sent = slice_len + @intFromBool(has_sentinel); const slice_len_val_with_sentinel = try mod.intValue(Type.usize, len_plus_sent); if (!(try sema.compareAll(end_val, .lte, slice_len_val_with_sentinel, Type.usize))) { @@ -32431,7 +32359,7 @@ fn analyzeSlice( "end index {} out of bounds for slice of length {d}{s}", .{ end_val.fmtValue(mod, sema), - try slice_val.sliceLen(sema), + try slice_val.sliceLen(mod), sentinel_label, }, ); @@ -32504,7 +32432,7 @@ fn analyzeSlice( const many_ptr_ty = try mod.manyConstPtrType(elem_ty); const many_ptr_val = try mod.getCoerced(ptr_val, many_ptr_ty); - const elem_ptr = try many_ptr_val.ptrElem(sentinel_index, sema); + const elem_ptr = try many_ptr_val.ptrElem(sentinel_index, mod); const res = try sema.pointerDerefExtra(block, src, elem_ptr); const actual_sentinel = switch (res) { .runtime_load => break :sentinel_check, @@ -32567,9 +32495,9 @@ fn analyzeSlice( const new_allowzero = new_ptr_ty_info.flags.is_allowzero and sema.typeOf(ptr).ptrSize(mod) != .C; if (opt_new_len_val) |new_len_val| { - const new_len_int = try new_len_val.toUnsignedIntAdvanced(sema); + const new_len_int = try new_len_val.toUnsignedIntSema(mod); - const return_ty = try sema.ptrType(.{ + const return_ty = try mod.ptrTypeSema(.{ .child = (try mod.arrayType(.{ .len = new_len_int, .sentinel = if (sentinel) |s| s.toIntern() else .none, @@ -32631,7 +32559,7 @@ fn analyzeSlice( return sema.fail(block, src, "non-zero length slice of undefined pointer", .{}); } - const return_ty = try sema.ptrType(.{ + const return_ty = try mod.ptrTypeSema(.{ .child = elem_ty.toIntern(), .sentinel = if (sentinel) |s| s.toIntern() else .none, .flags = .{ @@ -32659,7 +32587,7 @@ fn analyzeSlice( if (try sema.resolveDefinedValue(block, src, ptr_or_slice)) |slice_val| { // we don't need to add one for sentinels because the // underlying value data includes the sentinel - break :blk try mod.intRef(Type.usize, try slice_val.sliceLen(sema)); + break :blk try mod.intRef(Type.usize, try slice_val.sliceLen(mod)); } const slice_len_inst = try block.addTyOp(.slice_len, Type.usize, ptr_or_slice); @@ -32751,7 +32679,7 @@ fn cmpNumeric( if (lhs_val.isNan(mod) or rhs_val.isNan(mod)) { return if (op == std.math.CompareOperator.neq) .bool_true else .bool_false; } - return if (try Value.compareHeteroAdvanced(lhs_val, op, rhs_val, mod, sema)) + return if (try Value.compareHeteroAdvanced(lhs_val, op, rhs_val, mod, .sema)) .bool_true else .bool_false; @@ -32820,11 +32748,11 @@ fn cmpNumeric( // a signed integer with mantissa bits + 1, and if there was any non-integral part of the float, // add/subtract 1. const lhs_is_signed = if (try sema.resolveDefinedValue(block, lhs_src, lhs)) |lhs_val| - !(try lhs_val.compareAllWithZeroAdvanced(.gte, sema)) + !(try lhs_val.compareAllWithZeroSema(.gte, mod)) else (lhs_ty.isRuntimeFloat() or lhs_ty.isSignedInt(mod)); const rhs_is_signed = if (try sema.resolveDefinedValue(block, rhs_src, rhs)) |rhs_val| - !(try rhs_val.compareAllWithZeroAdvanced(.gte, sema)) + !(try rhs_val.compareAllWithZeroSema(.gte, mod)) else (rhs_ty.isRuntimeFloat() or rhs_ty.isSignedInt(mod)); const dest_int_is_signed = lhs_is_signed or rhs_is_signed; @@ -32972,7 +32900,7 @@ fn compareIntsOnlyPossibleResult( ) Allocator.Error!?bool { const mod = sema.mod; const rhs_info = rhs_ty.intInfo(mod); - const vs_zero = lhs_val.orderAgainstZeroAdvanced(mod, sema) catch unreachable; + const vs_zero = lhs_val.orderAgainstZeroAdvanced(mod, .sema) catch unreachable; const is_zero = vs_zero == .eq; const is_negative = vs_zero == .lt; const is_positive = vs_zero == .gt; @@ -33136,7 +33064,6 @@ fn wrapErrorUnionPayload( } }))); } try sema.requireRuntimeBlock(block, inst_src, null); - try sema.queueFullTypeResolution(dest_payload_ty); return block.addTyOp(.wrap_errunion_payload, dest_ty, coerced); } @@ -33939,7 +33866,7 @@ fn resolvePeerTypesInner( opt_ptr_info = ptr_info; } - return .{ .success = try sema.ptrType(opt_ptr_info.?) }; + return .{ .success = try mod.ptrTypeSema(opt_ptr_info.?) }; }, .ptr => { @@ -34249,7 +34176,7 @@ fn resolvePeerTypesInner( }, } - return .{ .success = try sema.ptrType(opt_ptr_info.?) }; + return .{ .success = try mod.ptrTypeSema(opt_ptr_info.?) }; }, .func => { @@ -34606,7 +34533,7 @@ fn resolvePeerTypesInner( var comptime_val: ?Value = null; for (peer_tys) |opt_ty| { const struct_ty = opt_ty orelse continue; - try sema.resolveStructFieldInits(struct_ty); + try struct_ty.resolveStructFieldInits(mod); const uncoerced_field_val = try struct_ty.structFieldValueComptime(mod, field_index) orelse { comptime_val = null; @@ -34742,181 +34669,22 @@ pub fn resolveFnTypes(sema: *Sema, fn_ty: Type) CompileError!void { const ip = &mod.intern_pool; const fn_ty_info = mod.typeToFunc(fn_ty).?; - try sema.resolveTypeFully(Type.fromInterned(fn_ty_info.return_type)); + try Type.fromInterned(fn_ty_info.return_type).resolveFully(mod); if (mod.comp.config.any_error_tracing and Type.fromInterned(fn_ty_info.return_type).isError(mod)) { // Ensure the type exists so that backends can assume that. - _ = try sema.getBuiltinType("StackTrace"); + _ = try mod.getBuiltinType("StackTrace"); } for (0..fn_ty_info.param_types.len) |i| { - try sema.resolveTypeFully(Type.fromInterned(fn_ty_info.param_types.get(ip)[i])); + try Type.fromInterned(fn_ty_info.param_types.get(ip)[i]).resolveFully(mod); } } -/// Make it so that calling hash() and eql() on `val` will not assert due -/// to a type not having its layout resolved. fn resolveLazyValue(sema: *Sema, val: Value) CompileError!Value { - const mod = sema.mod; - switch (mod.intern_pool.indexToKey(val.toIntern())) { - .int => |int| switch (int.storage) { - .u64, .i64, .big_int => return val, - .lazy_align, .lazy_size => return mod.intValue( - Type.fromInterned(int.ty), - (try val.getUnsignedIntAdvanced(mod, sema)).?, - ), - }, - .slice => |slice| { - const ptr = try sema.resolveLazyValue(Value.fromInterned(slice.ptr)); - const len = try sema.resolveLazyValue(Value.fromInterned(slice.len)); - if (ptr.toIntern() == slice.ptr and len.toIntern() == slice.len) return val; - return Value.fromInterned(try mod.intern(.{ .slice = .{ - .ty = slice.ty, - .ptr = ptr.toIntern(), - .len = len.toIntern(), - } })); - }, - .ptr => |ptr| { - switch (ptr.base_addr) { - .decl, .comptime_alloc, .anon_decl, .int => return val, - .comptime_field => |field_val| { - const resolved_field_val = - (try sema.resolveLazyValue(Value.fromInterned(field_val))).toIntern(); - return if (resolved_field_val == field_val) - val - else - Value.fromInterned((try mod.intern(.{ .ptr = .{ - .ty = ptr.ty, - .base_addr = .{ .comptime_field = resolved_field_val }, - .byte_offset = ptr.byte_offset, - } }))); - }, - .eu_payload, .opt_payload => |base| { - const resolved_base = (try sema.resolveLazyValue(Value.fromInterned(base))).toIntern(); - return if (resolved_base == base) - val - else - Value.fromInterned((try mod.intern(.{ .ptr = .{ - .ty = ptr.ty, - .base_addr = switch (ptr.base_addr) { - .eu_payload => .{ .eu_payload = resolved_base }, - .opt_payload => .{ .opt_payload = resolved_base }, - else => unreachable, - }, - .byte_offset = ptr.byte_offset, - } }))); - }, - .arr_elem, .field => |base_index| { - const resolved_base = (try sema.resolveLazyValue(Value.fromInterned(base_index.base))).toIntern(); - return if (resolved_base == base_index.base) - val - else - Value.fromInterned((try mod.intern(.{ .ptr = .{ - .ty = ptr.ty, - .base_addr = switch (ptr.base_addr) { - .arr_elem => .{ .arr_elem = .{ - .base = resolved_base, - .index = base_index.index, - } }, - .field => .{ .field = .{ - .base = resolved_base, - .index = base_index.index, - } }, - else => unreachable, - }, - .byte_offset = ptr.byte_offset, - } }))); - }, - } - }, - .aggregate => |aggregate| switch (aggregate.storage) { - .bytes => return val, - .elems => |elems| { - var resolved_elems: []InternPool.Index = &.{}; - for (elems, 0..) |elem, i| { - const resolved_elem = (try sema.resolveLazyValue(Value.fromInterned(elem))).toIntern(); - if (resolved_elems.len == 0 and resolved_elem != elem) { - resolved_elems = try sema.arena.alloc(InternPool.Index, elems.len); - @memcpy(resolved_elems[0..i], elems[0..i]); - } - if (resolved_elems.len > 0) resolved_elems[i] = resolved_elem; - } - return if (resolved_elems.len == 0) val else Value.fromInterned((try mod.intern(.{ .aggregate = .{ - .ty = aggregate.ty, - .storage = .{ .elems = resolved_elems }, - } }))); - }, - .repeated_elem => |elem| { - const resolved_elem = (try sema.resolveLazyValue(Value.fromInterned(elem))).toIntern(); - return if (resolved_elem == elem) val else Value.fromInterned((try mod.intern(.{ .aggregate = .{ - .ty = aggregate.ty, - .storage = .{ .repeated_elem = resolved_elem }, - } }))); - }, - }, - .un => |un| { - const resolved_tag = if (un.tag == .none) - .none - else - (try sema.resolveLazyValue(Value.fromInterned(un.tag))).toIntern(); - const resolved_val = (try sema.resolveLazyValue(Value.fromInterned(un.val))).toIntern(); - return if (resolved_tag == un.tag and resolved_val == un.val) - val - else - Value.fromInterned((try mod.intern(.{ .un = .{ - .ty = un.ty, - .tag = resolved_tag, - .val = resolved_val, - } }))); - }, - else => return val, - } -} - -pub fn resolveTypeLayout(sema: *Sema, ty: Type) CompileError!void { - const mod = sema.mod; - switch (mod.intern_pool.indexToKey(ty.toIntern())) { - .simple_type => |simple_type| return sema.resolveSimpleType(simple_type), - else => {}, - } - switch (ty.zigTypeTag(mod)) { - .Struct => return sema.resolveStructLayout(ty), - .Union => return sema.resolveUnionLayout(ty), - .Array => { - if (ty.arrayLenIncludingSentinel(mod) == 0) return; - const elem_ty = ty.childType(mod); - return sema.resolveTypeLayout(elem_ty); - }, - .Optional => { - const payload_ty = ty.optionalChild(mod); - // In case of querying the ABI alignment of this optional, we will ask - // for hasRuntimeBits() of the payload type, so we need "requires comptime" - // to be known already before this function returns. - _ = try sema.typeRequiresComptime(payload_ty); - return sema.resolveTypeLayout(payload_ty); - }, - .ErrorUnion => { - const payload_ty = ty.errorUnionPayload(mod); - return sema.resolveTypeLayout(payload_ty); - }, - .Fn => { - const info = mod.typeToFunc(ty).?; - if (info.is_generic) { - // Resolving of generic function types is deferred to when - // the function is instantiated. - return; - } - const ip = &mod.intern_pool; - for (0..info.param_types.len) |i| { - const param_ty = info.param_types.get(ip)[i]; - try sema.resolveTypeLayout(Type.fromInterned(param_ty)); - } - try sema.resolveTypeLayout(Type.fromInterned(info.return_type)); - }, - else => {}, - } + return val.resolveLazy(sema.arena, sema.mod); } /// Resolve a struct's alignment only without triggering resolution of its layout. @@ -34925,11 +34693,13 @@ pub fn resolveStructAlignment( sema: *Sema, ty: InternPool.Index, struct_type: InternPool.LoadedStructType, -) CompileError!Alignment { +) SemaError!void { const mod = sema.mod; const ip = &mod.intern_pool; const target = mod.getTarget(); + assert(sema.ownerUnit().unwrap().decl == struct_type.decl.unwrap().?); + assert(struct_type.flagsPtr(ip).alignment == .none); assert(struct_type.layout != .@"packed"); @@ -34940,7 +34710,7 @@ pub fn resolveStructAlignment( struct_type.flagsPtr(ip).assumed_pointer_aligned = true; const result = Alignment.fromByteUnits(@divExact(target.ptrBitWidth(), 8)); struct_type.flagsPtr(ip).alignment = result; - return result; + return; } try sema.resolveTypeFieldsStruct(ty, struct_type); @@ -34952,7 +34722,7 @@ pub fn resolveStructAlignment( struct_type.flagsPtr(ip).assumed_pointer_aligned = true; const result = Alignment.fromByteUnits(@divExact(target.ptrBitWidth(), 8)); struct_type.flagsPtr(ip).alignment = result; - return result; + return; } defer struct_type.clearAlignmentWip(ip); @@ -34962,30 +34732,35 @@ pub fn resolveStructAlignment( const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]); if (struct_type.fieldIsComptime(ip, i) or try sema.typeRequiresComptime(field_ty)) continue; - const field_align = try sema.structFieldAlignment( + const field_align = try mod.structFieldAlignmentAdvanced( struct_type.fieldAlign(ip, i), field_ty, struct_type.layout, + .sema, ); result = result.maxStrict(field_align); } struct_type.flagsPtr(ip).alignment = result; - return result; } -fn resolveStructLayout(sema: *Sema, ty: Type) CompileError!void { +pub fn resolveStructLayout(sema: *Sema, ty: Type) SemaError!void { const zcu = sema.mod; const ip = &zcu.intern_pool; const struct_type = zcu.typeToStruct(ty) orelse return; + assert(sema.ownerUnit().unwrap().decl == struct_type.decl.unwrap().?); + if (struct_type.haveLayout(ip)) return; - try sema.resolveTypeFields(ty); + try ty.resolveFields(zcu); if (struct_type.layout == .@"packed") { - try semaBackingIntType(zcu, struct_type); + semaBackingIntType(zcu, struct_type) catch |err| switch (err) { + error.OutOfMemory, error.AnalysisFail => |e| return e, + error.ComptimeBreak, error.ComptimeReturn, error.GenericPoison => unreachable, + }; return; } @@ -35021,10 +34796,11 @@ fn resolveStructLayout(sema: *Sema, ty: Type) CompileError!void { }, else => return err, }; - field_align.* = try sema.structFieldAlignment( + field_align.* = try zcu.structFieldAlignmentAdvanced( struct_type.fieldAlign(ip, i), field_ty, struct_type.layout, + .sema, ); big_align = big_align.maxStrict(field_align.*); } @@ -35160,7 +34936,7 @@ fn semaBackingIntType(mod: *Module, struct_type: InternPool.LoadedStructType) Co var accumulator: u64 = 0; for (0..struct_type.field_types.len) |i| { const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]); - accumulator += try field_ty.bitSizeAdvanced(mod, &sema); + accumulator += try field_ty.bitSizeAdvanced(mod, .sema); } break :blk accumulator; }; @@ -35270,11 +35046,13 @@ pub fn resolveUnionAlignment( sema: *Sema, ty: Type, union_type: InternPool.LoadedUnionType, -) CompileError!Alignment { +) SemaError!void { const mod = sema.mod; const ip = &mod.intern_pool; const target = mod.getTarget(); + assert(sema.ownerUnit().unwrap().decl == union_type.decl); + assert(!union_type.haveLayout(ip)); if (union_type.flagsPtr(ip).status == .field_types_wip) { @@ -35284,7 +35062,7 @@ pub fn resolveUnionAlignment( union_type.flagsPtr(ip).assumed_pointer_aligned = true; const result = Alignment.fromByteUnits(@divExact(target.ptrBitWidth(), 8)); union_type.flagsPtr(ip).alignment = result; - return result; + return; } try sema.resolveTypeFieldsUnion(ty, union_type); @@ -35304,11 +35082,10 @@ pub fn resolveUnionAlignment( } union_type.flagsPtr(ip).alignment = max_align; - return max_align; } /// This logic must be kept in sync with `Module.getUnionLayout`. -fn resolveUnionLayout(sema: *Sema, ty: Type) CompileError!void { +pub fn resolveUnionLayout(sema: *Sema, ty: Type) SemaError!void { const zcu = sema.mod; const ip = &zcu.intern_pool; @@ -35317,6 +35094,8 @@ fn resolveUnionLayout(sema: *Sema, ty: Type) CompileError!void { // Load again, since the tag type might have changed due to resolution. const union_type = ip.loadUnionType(ty.ip_index); + assert(sema.ownerUnit().unwrap().decl == union_type.decl); + switch (union_type.flagsPtr(ip).status) { .none, .have_field_types => {}, .field_types_wip, .layout_wip => { @@ -35425,53 +35204,15 @@ fn resolveUnionLayout(sema: *Sema, ty: Type) CompileError!void { /// Returns `error.AnalysisFail` if any of the types (recursively) failed to /// be resolved. -pub fn resolveTypeFully(sema: *Sema, ty: Type) CompileError!void { - const mod = sema.mod; - const ip = &mod.intern_pool; - switch (ty.zigTypeTag(mod)) { - .Pointer => { - return sema.resolveTypeFully(ty.childType(mod)); - }, - .Struct => switch (mod.intern_pool.indexToKey(ty.toIntern())) { - .struct_type => try sema.resolveStructFully(ty), - .anon_struct_type => |tuple| { - for (tuple.types.get(ip)) |field_ty| { - try sema.resolveTypeFully(Type.fromInterned(field_ty)); - } - }, - .simple_type => |simple_type| try sema.resolveSimpleType(simple_type), - else => {}, - }, - .Union => return sema.resolveUnionFully(ty), - .Array => return sema.resolveTypeFully(ty.childType(mod)), - .Optional => { - return sema.resolveTypeFully(ty.optionalChild(mod)); - }, - .ErrorUnion => return sema.resolveTypeFully(ty.errorUnionPayload(mod)), - .Fn => { - const info = mod.typeToFunc(ty).?; - if (info.is_generic) { - // Resolving of generic function types is deferred to when - // the function is instantiated. - return; - } - for (0..info.param_types.len) |i| { - const param_ty = info.param_types.get(ip)[i]; - try sema.resolveTypeFully(Type.fromInterned(param_ty)); - } - try sema.resolveTypeFully(Type.fromInterned(info.return_type)); - }, - else => {}, - } -} - -fn resolveStructFully(sema: *Sema, ty: Type) CompileError!void { +pub fn resolveStructFully(sema: *Sema, ty: Type) SemaError!void { try sema.resolveStructLayout(ty); const mod = sema.mod; const ip = &mod.intern_pool; const struct_type = mod.typeToStruct(ty).?; + assert(sema.ownerUnit().unwrap().decl == struct_type.decl.unwrap().?); + if (struct_type.setFullyResolved(ip)) return; errdefer struct_type.clearFullyResolved(ip); @@ -35481,16 +35222,19 @@ fn resolveStructFully(sema: *Sema, ty: Type) CompileError!void { for (0..struct_type.field_types.len) |i| { const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]); - try sema.resolveTypeFully(field_ty); + try field_ty.resolveFully(mod); } } -fn resolveUnionFully(sema: *Sema, ty: Type) CompileError!void { +pub fn resolveUnionFully(sema: *Sema, ty: Type) SemaError!void { try sema.resolveUnionLayout(ty); const mod = sema.mod; const ip = &mod.intern_pool; const union_obj = mod.typeToUnion(ty).?; + + assert(sema.ownerUnit().unwrap().decl == union_obj.decl); + switch (union_obj.flagsPtr(ip).status) { .none, .have_field_types, .field_types_wip, .layout_wip, .have_layout => {}, .fully_resolved_wip, .fully_resolved => return, @@ -35506,7 +35250,7 @@ fn resolveUnionFully(sema: *Sema, ty: Type) CompileError!void { union_obj.flagsPtr(ip).status = .fully_resolved_wip; for (0..union_obj.field_types.len) |field_index| { const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]); - try sema.resolveTypeFully(field_ty); + try field_ty.resolveFully(mod); } union_obj.flagsPtr(ip).status = .fully_resolved; } @@ -35515,135 +35259,18 @@ fn resolveUnionFully(sema: *Sema, ty: Type) CompileError!void { _ = try sema.typeRequiresComptime(ty); } -pub fn resolveTypeFields(sema: *Sema, ty: Type) CompileError!void { - const mod = sema.mod; - const ip = &mod.intern_pool; - const ty_ip = ty.toIntern(); - - switch (ty_ip) { - .none => unreachable, - - .u0_type, - .i0_type, - .u1_type, - .u8_type, - .i8_type, - .u16_type, - .i16_type, - .u29_type, - .u32_type, - .i32_type, - .u64_type, - .i64_type, - .u80_type, - .u128_type, - .i128_type, - .usize_type, - .isize_type, - .c_char_type, - .c_short_type, - .c_ushort_type, - .c_int_type, - .c_uint_type, - .c_long_type, - .c_ulong_type, - .c_longlong_type, - .c_ulonglong_type, - .c_longdouble_type, - .f16_type, - .f32_type, - .f64_type, - .f80_type, - .f128_type, - .anyopaque_type, - .bool_type, - .void_type, - .type_type, - .anyerror_type, - .adhoc_inferred_error_set_type, - .comptime_int_type, - .comptime_float_type, - .noreturn_type, - .anyframe_type, - .null_type, - .undefined_type, - .enum_literal_type, - .manyptr_u8_type, - .manyptr_const_u8_type, - .manyptr_const_u8_sentinel_0_type, - .single_const_pointer_to_comptime_int_type, - .slice_const_u8_type, - .slice_const_u8_sentinel_0_type, - .optional_noreturn_type, - .anyerror_void_error_union_type, - .generic_poison_type, - .empty_struct_type, - => {}, - - .undef => unreachable, - .zero => unreachable, - .zero_usize => unreachable, - .zero_u8 => unreachable, - .one => unreachable, - .one_usize => unreachable, - .one_u8 => unreachable, - .four_u8 => unreachable, - .negative_one => unreachable, - .calling_convention_c => unreachable, - .calling_convention_inline => unreachable, - .void_value => unreachable, - .unreachable_value => unreachable, - .null_value => unreachable, - .bool_true => unreachable, - .bool_false => unreachable, - .empty_struct => unreachable, - .generic_poison => unreachable, - - else => switch (ip.items.items(.tag)[@intFromEnum(ty_ip)]) { - .type_struct, - .type_struct_packed, - .type_struct_packed_inits, - => try sema.resolveTypeFieldsStruct(ty_ip, ip.loadStructType(ty_ip)), - - .type_union => try sema.resolveTypeFieldsUnion(Type.fromInterned(ty_ip), ip.loadUnionType(ty_ip)), - .simple_type => try sema.resolveSimpleType(ip.indexToKey(ty_ip).simple_type), - else => {}, - }, - } -} - -/// Fully resolves a simple type. This is usually a nop, but for builtin types with -/// special InternPool indices (such as std.builtin.Type) it will analyze and fully -/// resolve the container type. -fn resolveSimpleType(sema: *Sema, simple_type: InternPool.SimpleType) CompileError!void { - const builtin_type_name: []const u8 = switch (simple_type) { - .atomic_order => "AtomicOrder", - .atomic_rmw_op => "AtomicRmwOp", - .calling_convention => "CallingConvention", - .address_space => "AddressSpace", - .float_mode => "FloatMode", - .reduce_op => "ReduceOp", - .call_modifier => "CallModifer", - .prefetch_options => "PrefetchOptions", - .export_options => "ExportOptions", - .extern_options => "ExternOptions", - .type_info => "Type", - else => return, - }; - // This will fully resolve the type. - _ = try sema.getBuiltinType(builtin_type_name); -} - pub fn resolveTypeFieldsStruct( sema: *Sema, ty: InternPool.Index, struct_type: InternPool.LoadedStructType, -) CompileError!void { +) SemaError!void { const zcu = sema.mod; const ip = &zcu.intern_pool; // If there is no owner decl it means the struct has no fields. const owner_decl = struct_type.decl.unwrap() orelse return; + assert(sema.ownerUnit().unwrap().decl == owner_decl); + switch (zcu.declPtr(owner_decl).analysis) { .file_failure, .dependency_failure, @@ -35674,16 +35301,19 @@ pub fn resolveTypeFieldsStruct( } return error.AnalysisFail; }, - else => |e| return e, + error.OutOfMemory => return error.OutOfMemory, + error.ComptimeBreak, error.ComptimeReturn, error.GenericPoison => unreachable, }; } -pub fn resolveStructFieldInits(sema: *Sema, ty: Type) CompileError!void { +pub fn resolveStructFieldInits(sema: *Sema, ty: Type) SemaError!void { const zcu = sema.mod; const ip = &zcu.intern_pool; const struct_type = zcu.typeToStruct(ty) orelse return; const owner_decl = struct_type.decl.unwrap() orelse return; + assert(sema.ownerUnit().unwrap().decl == owner_decl); + // Inits can start as resolved if (struct_type.haveFieldInits(ip)) return; @@ -35706,15 +35336,19 @@ pub fn resolveStructFieldInits(sema: *Sema, ty: Type) CompileError!void { } return error.AnalysisFail; }, - else => |e| return e, + error.OutOfMemory => return error.OutOfMemory, + error.ComptimeBreak, error.ComptimeReturn, error.GenericPoison => unreachable, }; struct_type.setHaveFieldInits(ip); } -pub fn resolveTypeFieldsUnion(sema: *Sema, ty: Type, union_type: InternPool.LoadedUnionType) CompileError!void { +pub fn resolveTypeFieldsUnion(sema: *Sema, ty: Type, union_type: InternPool.LoadedUnionType) SemaError!void { const zcu = sema.mod; const ip = &zcu.intern_pool; const owner_decl = zcu.declPtr(union_type.decl); + + assert(sema.ownerUnit().unwrap().decl == union_type.decl); + switch (owner_decl.analysis) { .file_failure, .dependency_failure, @@ -35752,7 +35386,8 @@ pub fn resolveTypeFieldsUnion(sema: *Sema, ty: Type, union_type: InternPool.Load } return error.AnalysisFail; }, - else => |e| return e, + error.OutOfMemory => return error.OutOfMemory, + error.ComptimeBreak, error.ComptimeReturn, error.GenericPoison => unreachable, }; union_type.flagsPtr(ip).status = .have_field_types; } @@ -36801,106 +36436,6 @@ fn generateUnionTagTypeSimple( return enum_ty; } -fn getBuiltin(sema: *Sema, name: []const u8) CompileError!Air.Inst.Ref { - const zcu = sema.mod; - - var block: Block = .{ - .parent = null, - .sema = sema, - .namespace = sema.owner_decl.src_namespace, - .instructions = .{}, - .inlining = null, - .is_comptime = true, - .src_base_inst = sema.owner_decl.zir_decl_index.unwrap() orelse owner: { - assert(sema.owner_decl.has_tv); - assert(sema.owner_decl.owns_tv); - switch (sema.owner_decl.typeOf(zcu).zigTypeTag(zcu)) { - .Type => break :owner sema.owner_decl.val.toType().typeDeclInst(zcu).?, - .Fn => { - const owner = zcu.funcInfo(sema.owner_decl.val.toIntern()).generic_owner; - const generic_owner_decl = zcu.declPtr(zcu.funcInfo(owner).owner_decl); - break :owner generic_owner_decl.zir_decl_index.unwrap().?; - }, - else => unreachable, - } - }, - .type_name_ctx = sema.owner_decl.name, - }; - defer block.instructions.deinit(sema.gpa); - - const src = block.nodeOffset(0); - - const decl_index = try getBuiltinDecl(sema, &block, name); - return sema.analyzeDeclVal(&block, src, decl_index); -} - -fn getBuiltinDecl(sema: *Sema, block: *Block, name: []const u8) CompileError!InternPool.DeclIndex { - const gpa = sema.gpa; - - const src = block.nodeOffset(0); - - const mod = sema.mod; - const ip = &mod.intern_pool; - const std_mod = mod.std_mod; - const std_file = (mod.importPkg(std_mod) catch unreachable).file; - const opt_builtin_inst = (try sema.namespaceLookupRef( - block, - src, - mod.declPtr(std_file.root_decl.unwrap().?).src_namespace.toOptional(), - try ip.getOrPutString(gpa, "builtin", .no_embedded_nulls), - )) orelse @panic("lib/std.zig is corrupt and missing 'builtin'"); - const builtin_inst = try sema.analyzeLoad(block, src, opt_builtin_inst, src); - const builtin_ty = sema.analyzeAsType(block, src, builtin_inst) catch |err| switch (err) { - error.AnalysisFail => std.debug.panic("std.builtin is corrupt", .{}), - else => |e| return e, - }; - const decl_index = (try sema.namespaceLookup( - block, - src, - builtin_ty.getNamespaceIndex(mod), - try ip.getOrPutString(gpa, name, .no_embedded_nulls), - )) orelse std.debug.panic("lib/std/builtin.zig is corrupt and missing '{s}'", .{name}); - return decl_index; -} - -fn getBuiltinType(sema: *Sema, name: []const u8) CompileError!Type { - const zcu = sema.mod; - const ty_inst = try sema.getBuiltin(name); - - var block: Block = .{ - .parent = null, - .sema = sema, - .namespace = sema.owner_decl.src_namespace, - .instructions = .{}, - .inlining = null, - .is_comptime = true, - .src_base_inst = sema.owner_decl.zir_decl_index.unwrap() orelse owner: { - assert(sema.owner_decl.has_tv); - assert(sema.owner_decl.owns_tv); - switch (sema.owner_decl.typeOf(zcu).zigTypeTag(zcu)) { - .Type => break :owner sema.owner_decl.val.toType().typeDeclInst(zcu).?, - .Fn => { - const owner = zcu.funcInfo(sema.owner_decl.val.toIntern()).generic_owner; - const generic_owner_decl = zcu.declPtr(zcu.funcInfo(owner).owner_decl); - break :owner generic_owner_decl.zir_decl_index.unwrap().?; - }, - else => unreachable, - } - }, - .type_name_ctx = sema.owner_decl.name, - }; - defer block.instructions.deinit(sema.gpa); - - const src = block.nodeOffset(0); - - const result_ty = sema.analyzeAsType(&block, src, ty_inst) catch |err| switch (err) { - error.AnalysisFail => std.debug.panic("std.builtin.{s} is corrupt", .{name}), - else => |e| return e, - }; - try sema.resolveTypeFully(result_ty); // Should not fail - return result_ty; -} - /// There is another implementation of this in `Type.onePossibleValue`. This one /// in `Sema` is for calling during semantic analysis, and performs field resolution /// to get the answer. The one in `Type` is for calling during codegen and asserts @@ -37104,8 +36639,11 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { }, .struct_type => { + // Resolving the layout first helps to avoid loops. + // If the type has a coherent layout, we can recurse through fields safely. + try ty.resolveLayout(zcu); + const struct_type = ip.loadStructType(ty.toIntern()); - try sema.resolveTypeFieldsStruct(ty.toIntern(), struct_type); if (struct_type.field_types.len == 0) { // In this case the struct has no fields at all and @@ -37122,20 +36660,11 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { ); for (field_vals, 0..) |*field_val, i| { if (struct_type.fieldIsComptime(ip, i)) { - try sema.resolveStructFieldInits(ty); + try ty.resolveStructFieldInits(zcu); field_val.* = struct_type.field_inits.get(ip)[i]; continue; } const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]); - if (field_ty.eql(ty, zcu)) { - const msg = try sema.errMsg( - ty.srcLoc(zcu), - "struct '{}' depends on itself", - .{ty.fmt(zcu)}, - ); - try sema.addFieldErrNote(ty, i, msg, "while checking this field", .{}); - return sema.failWithOwnedErrorMsg(null, msg); - } if (try sema.typeHasOnePossibleValue(field_ty)) |field_opv| { field_val.* = field_opv.toIntern(); } else return null; @@ -37163,8 +36692,11 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { }, .union_type => { + // Resolving the layout first helps to avoid loops. + // If the type has a coherent layout, we can recurse through fields safely. + try ty.resolveLayout(zcu); + const union_obj = ip.loadUnionType(ty.toIntern()); - try sema.resolveTypeFieldsUnion(ty, union_obj); const tag_val = (try sema.typeHasOnePossibleValue(Type.fromInterned(union_obj.tagTypePtr(ip).*))) orelse return null; if (union_obj.field_types.len == 0) { @@ -37172,15 +36704,6 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { return Value.fromInterned(only); } const only_field_ty = Type.fromInterned(union_obj.field_types.get(ip)[0]); - if (only_field_ty.eql(ty, zcu)) { - const msg = try sema.errMsg( - ty.srcLoc(zcu), - "union '{}' depends on itself", - .{ty.fmt(zcu)}, - ); - try sema.addFieldErrNote(ty, 0, msg, "while checking this field", .{}); - return sema.failWithOwnedErrorMsg(null, msg); - } const val_val = (try sema.typeHasOnePossibleValue(only_field_ty)) orelse return null; const only = try zcu.intern(.{ .un = .{ @@ -37298,7 +36821,7 @@ fn analyzeComptimeAlloc( // Needed to make an anon decl with type `var_type` (the `finish()` call below). _ = try sema.typeHasOnePossibleValue(var_type); - const ptr_type = try sema.ptrType(.{ + const ptr_type = try mod.ptrTypeSema(.{ .child = var_type.toIntern(), .flags = .{ .alignment = alignment, @@ -37485,64 +37008,28 @@ fn typePtrOrOptionalPtrTy(sema: *Sema, ty: Type) !?Type { /// `generic_poison` will return false. /// May return false negatives when structs and unions are having their field types resolved. -pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { - return ty.comptimeOnlyAdvanced(sema.mod, sema); +pub fn typeRequiresComptime(sema: *Sema, ty: Type) SemaError!bool { + return ty.comptimeOnlyAdvanced(sema.mod, .sema); } -pub fn typeHasRuntimeBits(sema: *Sema, ty: Type) CompileError!bool { - const mod = sema.mod; - return ty.hasRuntimeBitsAdvanced(mod, false, .{ .sema = sema }) catch |err| switch (err) { +pub fn typeHasRuntimeBits(sema: *Sema, ty: Type) SemaError!bool { + return ty.hasRuntimeBitsAdvanced(sema.mod, false, .sema) catch |err| switch (err) { error.NeedLazy => unreachable, else => |e| return e, }; } -pub fn typeAbiSize(sema: *Sema, ty: Type) !u64 { - try sema.resolveTypeLayout(ty); +pub fn typeAbiSize(sema: *Sema, ty: Type) SemaError!u64 { + try ty.resolveLayout(sema.mod); return ty.abiSize(sema.mod); } -pub fn typeAbiAlignment(sema: *Sema, ty: Type) CompileError!Alignment { - return (try ty.abiAlignmentAdvanced(sema.mod, .{ .sema = sema })).scalar; -} - -/// Not valid to call for packed unions. -/// Keep implementation in sync with `Module.unionFieldNormalAlignment`. -pub fn unionFieldAlignment(sema: *Sema, u: InternPool.LoadedUnionType, field_index: u32) !Alignment { - const mod = sema.mod; - const ip = &mod.intern_pool; - const field_align = u.fieldAlign(ip, field_index); - if (field_align != .none) return field_align; - const field_ty = Type.fromInterned(u.field_types.get(ip)[field_index]); - if (field_ty.isNoReturn(sema.mod)) return .none; - return sema.typeAbiAlignment(field_ty); -} - -/// Keep implementation in sync with `Module.structFieldAlignment`. -pub fn structFieldAlignment( - sema: *Sema, - explicit_alignment: InternPool.Alignment, - field_ty: Type, - layout: std.builtin.Type.ContainerLayout, -) !Alignment { - if (explicit_alignment != .none) - return explicit_alignment; - const mod = sema.mod; - switch (layout) { - .@"packed" => return .none, - .auto => if (mod.getTarget().ofmt != .c) return sema.typeAbiAlignment(field_ty), - .@"extern" => {}, - } - // extern - const ty_abi_align = try sema.typeAbiAlignment(field_ty); - if (field_ty.isAbiInt(mod) and field_ty.intInfo(mod).bits >= 128) { - return ty_abi_align.maxStrict(.@"16"); - } - return ty_abi_align; +pub fn typeAbiAlignment(sema: *Sema, ty: Type) SemaError!Alignment { + return (try ty.abiAlignmentAdvanced(sema.mod, .sema)).scalar; } pub fn fnHasRuntimeBits(sema: *Sema, ty: Type) CompileError!bool { - return ty.fnHasRuntimeBitsAdvanced(sema.mod, sema); + return ty.fnHasRuntimeBitsAdvanced(sema.mod, .sema); } fn unionFieldIndex( @@ -37554,7 +37041,7 @@ fn unionFieldIndex( ) !u32 { const mod = sema.mod; const ip = &mod.intern_pool; - try sema.resolveTypeFields(union_ty); + try union_ty.resolveFields(mod); const union_obj = mod.typeToUnion(union_ty).?; const field_index = union_obj.loadTagType(ip).nameIndex(ip, field_name) orelse return sema.failWithBadUnionFieldAccess(block, union_ty, union_obj, field_src, field_name); @@ -37570,7 +37057,7 @@ fn structFieldIndex( ) !u32 { const mod = sema.mod; const ip = &mod.intern_pool; - try sema.resolveTypeFields(struct_ty); + try struct_ty.resolveFields(mod); if (struct_ty.isAnonStruct(mod)) { return sema.anonStructFieldIndex(block, struct_ty, field_name, field_src); } else { @@ -37601,10 +37088,6 @@ fn anonStructFieldIndex( }); } -fn queueFullTypeResolution(sema: *Sema, ty: Type) !void { - try sema.types_to_resolve.put(sema.gpa, ty.toIntern(), {}); -} - /// If the value overflowed the type, returns a comptime_int (or vector thereof) instead, setting /// overflow_idx to the vector index the overflow was at (or 0 for a scalar). fn intAdd(sema: *Sema, lhs: Value, rhs: Value, ty: Type, overflow_idx: *?usize) !Value { @@ -37662,8 +37145,8 @@ fn intAddScalar(sema: *Sema, lhs: Value, rhs: Value, scalar_ty: Type) !Value { // resorting to BigInt first. var lhs_space: Value.BigIntSpace = undefined; var rhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, mod, sema); - const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, mod, sema); + const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, mod, .sema); + const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, mod, .sema); const limbs = try sema.arena.alloc( std.math.big.Limb, @max(lhs_bigint.limbs.len, rhs_bigint.limbs.len) + 1, @@ -37752,8 +37235,8 @@ fn intSubScalar(sema: *Sema, lhs: Value, rhs: Value, scalar_ty: Type) !Value { // resorting to BigInt first. var lhs_space: Value.BigIntSpace = undefined; var rhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, mod, sema); - const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, mod, sema); + const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, mod, .sema); + const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, mod, .sema); const limbs = try sema.arena.alloc( std.math.big.Limb, @max(lhs_bigint.limbs.len, rhs_bigint.limbs.len) + 1, @@ -37836,8 +37319,8 @@ fn intSubWithOverflowScalar( var lhs_space: Value.BigIntSpace = undefined; var rhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, mod, sema); - const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, mod, sema); + const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, mod, .sema); + const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, mod, .sema); const limbs = try sema.arena.alloc( std.math.big.Limb, std.math.big.int.calcTwosCompLimbCount(info.bits), @@ -38024,7 +37507,7 @@ fn intFitsInType( fn intInRange(sema: *Sema, tag_ty: Type, int_val: Value, end: usize) !bool { const mod = sema.mod; - if (!(try int_val.compareAllWithZeroAdvanced(.gte, sema))) return false; + if (!(try int_val.compareAllWithZeroSema(.gte, mod))) return false; const end_val = try mod.intValue(tag_ty, end); if (!(try sema.compareAll(int_val, .lt, end_val, tag_ty))) return false; return true; @@ -38094,8 +37577,8 @@ fn intAddWithOverflowScalar( var lhs_space: Value.BigIntSpace = undefined; var rhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, mod, sema); - const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, mod, sema); + const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, mod, .sema); + const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, mod, .sema); const limbs = try sema.arena.alloc( std.math.big.Limb, std.math.big.int.calcTwosCompLimbCount(info.bits), @@ -38149,7 +37632,7 @@ fn compareScalar( switch (op) { .eq => return sema.valuesEqual(coerced_lhs, coerced_rhs, ty), .neq => return !(try sema.valuesEqual(coerced_lhs, coerced_rhs, ty)), - else => return Value.compareHeteroAdvanced(coerced_lhs, op, coerced_rhs, mod, sema), + else => return Value.compareHeteroAdvanced(coerced_lhs, op, coerced_rhs, mod, .sema), } } @@ -38185,80 +37668,6 @@ fn compareVector( } }))); } -/// Returns the type of a pointer to an element. -/// Asserts that the type is a pointer, and that the element type is indexable. -/// If the element index is comptime-known, it must be passed in `offset`. -/// For *@Vector(n, T), return *align(a:b:h:v) T -/// For *[N]T, return *T -/// For [*]T, returns *T -/// For []T, returns *T -/// Handles const-ness and address spaces in particular. -/// This code is duplicated in `analyzePtrArithmetic`. -pub fn elemPtrType(sema: *Sema, ptr_ty: Type, offset: ?usize) !Type { - const mod = sema.mod; - const ptr_info = ptr_ty.ptrInfo(mod); - const elem_ty = ptr_ty.elemType2(mod); - const is_allowzero = ptr_info.flags.is_allowzero and (offset orelse 0) == 0; - const parent_ty = ptr_ty.childType(mod); - - const VI = InternPool.Key.PtrType.VectorIndex; - - const vector_info: struct { - host_size: u16 = 0, - alignment: Alignment = .none, - vector_index: VI = .none, - } = if (parent_ty.isVector(mod) and ptr_info.flags.size == .One) blk: { - const elem_bits = elem_ty.bitSize(mod); - if (elem_bits == 0) break :blk .{}; - const is_packed = elem_bits < 8 or !std.math.isPowerOfTwo(elem_bits); - if (!is_packed) break :blk .{}; - - break :blk .{ - .host_size = @intCast(parent_ty.arrayLen(mod)), - .alignment = parent_ty.abiAlignment(mod), - .vector_index = if (offset) |some| @enumFromInt(some) else .runtime, - }; - } else .{}; - - const alignment: Alignment = a: { - // Calculate the new pointer alignment. - if (ptr_info.flags.alignment == .none) { - // In case of an ABI-aligned pointer, any pointer arithmetic - // maintains the same ABI-alignedness. - break :a vector_info.alignment; - } - // If the addend is not a comptime-known value we can still count on - // it being a multiple of the type size. - const elem_size = try sema.typeAbiSize(elem_ty); - const addend = if (offset) |off| elem_size * off else elem_size; - - // The resulting pointer is aligned to the lcd between the offset (an - // arbitrary number) and the alignment factor (always a power of two, - // non zero). - const new_align: Alignment = @enumFromInt(@min( - @ctz(addend), - ptr_info.flags.alignment.toLog2Units(), - )); - assert(new_align != .none); - break :a new_align; - }; - return sema.ptrType(.{ - .child = elem_ty.toIntern(), - .flags = .{ - .alignment = alignment, - .is_const = ptr_info.flags.is_const, - .is_volatile = ptr_info.flags.is_volatile, - .is_allowzero = is_allowzero, - .address_space = ptr_info.flags.address_space, - .vector_index = vector_info.vector_index, - }, - .packed_offset = .{ - .host_size = vector_info.host_size, - .bit_offset = 0, - }, - }); -} - /// Merge lhs with rhs. /// Asserts that lhs and rhs are both error sets and are resolved. fn errorSetMerge(sema: *Sema, lhs: Type, rhs: Type) !Type { @@ -38299,13 +37708,6 @@ fn isKnownZigType(sema: *Sema, ref: Air.Inst.Ref, tag: std.builtin.TypeId) bool return sema.typeOf(ref).zigTypeTag(sema.mod) == tag; } -pub fn ptrType(sema: *Sema, info: InternPool.Key.PtrType) CompileError!Type { - if (info.flags.alignment != .none) { - _ = try sema.typeAbiAlignment(Type.fromInterned(info.child)); - } - return sema.mod.ptrType(info); -} - pub fn declareDependency(sema: *Sema, dependee: InternPool.Dependee) !void { if (!sema.mod.comp.debug_incremental) return; @@ -38425,12 +37827,12 @@ fn maybeDerefSliceAsArray( else => unreachable, }; const elem_ty = Type.fromInterned(slice.ty).childType(zcu); - const len = try Value.fromInterned(slice.len).toUnsignedIntAdvanced(sema); + const len = try Value.fromInterned(slice.len).toUnsignedIntSema(zcu); const array_ty = try zcu.arrayType(.{ .child = elem_ty.toIntern(), .len = len, }); - const ptr_ty = try sema.ptrType(p: { + const ptr_ty = try zcu.ptrTypeSema(p: { var p = Type.fromInterned(slice.ty).ptrInfo(zcu); p.flags.size = .One; p.child = array_ty.toIntern(); diff --git a/src/Sema/bitcast.zig b/src/Sema/bitcast.zig index 9536ee33cd53..3c3ccdbfaaa4 100644 --- a/src/Sema/bitcast.zig +++ b/src/Sema/bitcast.zig @@ -78,8 +78,8 @@ fn bitCastInner( const val_ty = val.typeOf(zcu); - try sema.resolveTypeLayout(val_ty); - try sema.resolveTypeLayout(dest_ty); + try val_ty.resolveLayout(zcu); + try dest_ty.resolveLayout(zcu); assert(val_ty.hasWellDefinedLayout(zcu)); @@ -136,8 +136,8 @@ fn bitCastSpliceInner( const val_ty = val.typeOf(zcu); const splice_val_ty = splice_val.typeOf(zcu); - try sema.resolveTypeLayout(val_ty); - try sema.resolveTypeLayout(splice_val_ty); + try val_ty.resolveLayout(zcu); + try splice_val_ty.resolveLayout(zcu); const splice_bits = splice_val_ty.bitSize(zcu); diff --git a/src/Type.zig b/src/Type.zig index 96c3e055fdb6..9f11a70bf360 100644 --- a/src/Type.zig +++ b/src/Type.zig @@ -5,6 +5,7 @@ const std = @import("std"); const builtin = @import("builtin"); +const Allocator = std.mem.Allocator; const Value = @import("Value.zig"); const assert = std.debug.assert; const Target = std.Target; @@ -18,6 +19,7 @@ const InternPool = @import("InternPool.zig"); const Alignment = InternPool.Alignment; const Zir = std.zig.Zir; const Type = @This(); +const SemaError = Zcu.SemaError; ip_index: InternPool.Index, @@ -458,7 +460,7 @@ pub fn toValue(self: Type) Value { return Value.fromInterned(self.toIntern()); } -const RuntimeBitsError = Module.CompileError || error{NeedLazy}; +const RuntimeBitsError = SemaError || error{NeedLazy}; /// true if and only if the type takes up space in memory at runtime. /// There are two reasons a type will return false: @@ -475,7 +477,7 @@ pub fn hasRuntimeBitsAdvanced( ty: Type, mod: *Module, ignore_comptime_only: bool, - strat: AbiAlignmentAdvancedStrat, + strat: ResolveStratLazy, ) RuntimeBitsError!bool { const ip = &mod.intern_pool; return switch (ty.toIntern()) { @@ -488,8 +490,8 @@ pub fn hasRuntimeBitsAdvanced( // to comptime-only types do not, with the exception of function pointers. if (ignore_comptime_only) return true; return switch (strat) { - .sema => |sema| !(try sema.typeRequiresComptime(ty)), - .eager => !comptimeOnly(ty, mod), + .sema => !try ty.comptimeOnlyAdvanced(mod, .sema), + .eager => !ty.comptimeOnly(mod), .lazy => error.NeedLazy, }; }, @@ -506,8 +508,8 @@ pub fn hasRuntimeBitsAdvanced( } if (ignore_comptime_only) return true; return switch (strat) { - .sema => |sema| !(try sema.typeRequiresComptime(child_ty)), - .eager => !comptimeOnly(child_ty, mod), + .sema => !try child_ty.comptimeOnlyAdvanced(mod, .sema), + .eager => !child_ty.comptimeOnly(mod), .lazy => error.NeedLazy, }; }, @@ -578,7 +580,7 @@ pub fn hasRuntimeBitsAdvanced( return true; } switch (strat) { - .sema => |sema| _ = try sema.resolveTypeFields(ty), + .sema => try ty.resolveFields(mod), .eager => assert(struct_type.haveFieldTypes(ip)), .lazy => if (!struct_type.haveFieldTypes(ip)) return error.NeedLazy, } @@ -622,7 +624,7 @@ pub fn hasRuntimeBitsAdvanced( }, } switch (strat) { - .sema => |sema| _ = try sema.resolveTypeFields(ty), + .sema => try ty.resolveFields(mod), .eager => assert(union_type.flagsPtr(ip).status.haveFieldTypes()), .lazy => if (!union_type.flagsPtr(ip).status.haveFieldTypes()) return error.NeedLazy, @@ -784,19 +786,18 @@ pub fn hasRuntimeBitsIgnoreComptime(ty: Type, mod: *Module) bool { } pub fn fnHasRuntimeBits(ty: Type, mod: *Module) bool { - return ty.fnHasRuntimeBitsAdvanced(mod, null) catch unreachable; + return ty.fnHasRuntimeBitsAdvanced(mod, .normal) catch unreachable; } /// Determines whether a function type has runtime bits, i.e. whether a /// function with this type can exist at runtime. /// Asserts that `ty` is a function type. -/// If `opt_sema` is not provided, asserts that the return type is sufficiently resolved. -pub fn fnHasRuntimeBitsAdvanced(ty: Type, mod: *Module, opt_sema: ?*Sema) Module.CompileError!bool { +pub fn fnHasRuntimeBitsAdvanced(ty: Type, mod: *Module, strat: ResolveStrat) SemaError!bool { const fn_info = mod.typeToFunc(ty).?; if (fn_info.is_generic) return false; if (fn_info.is_var_args) return true; if (fn_info.cc == .Inline) return false; - return !try Type.fromInterned(fn_info.return_type).comptimeOnlyAdvanced(mod, opt_sema); + return !try Type.fromInterned(fn_info.return_type).comptimeOnlyAdvanced(mod, strat); } pub fn isFnOrHasRuntimeBits(ty: Type, mod: *Module) bool { @@ -820,23 +821,23 @@ pub fn isNoReturn(ty: Type, mod: *Module) bool { /// Returns `none` if the pointer is naturally aligned and the element type is 0-bit. pub fn ptrAlignment(ty: Type, mod: *Module) Alignment { - return ptrAlignmentAdvanced(ty, mod, null) catch unreachable; + return ptrAlignmentAdvanced(ty, mod, .normal) catch unreachable; } -pub fn ptrAlignmentAdvanced(ty: Type, mod: *Module, opt_sema: ?*Sema) !Alignment { +pub fn ptrAlignmentAdvanced(ty: Type, mod: *Module, strat: ResolveStrat) !Alignment { return switch (mod.intern_pool.indexToKey(ty.toIntern())) { .ptr_type => |ptr_type| { if (ptr_type.flags.alignment != .none) return ptr_type.flags.alignment; - if (opt_sema) |sema| { - const res = try Type.fromInterned(ptr_type.child).abiAlignmentAdvanced(mod, .{ .sema = sema }); + if (strat == .sema) { + const res = try Type.fromInterned(ptr_type.child).abiAlignmentAdvanced(mod, .sema); return res.scalar; } return (Type.fromInterned(ptr_type.child).abiAlignmentAdvanced(mod, .eager) catch unreachable).scalar; }, - .opt_type => |child| Type.fromInterned(child).ptrAlignmentAdvanced(mod, opt_sema), + .opt_type => |child| Type.fromInterned(child).ptrAlignmentAdvanced(mod, strat), else => unreachable, }; } @@ -868,10 +869,34 @@ pub const AbiAlignmentAdvanced = union(enum) { val: Value, }; -pub const AbiAlignmentAdvancedStrat = union(enum) { - eager, +pub const ResolveStratLazy = enum { + /// Return a `lazy_size` or `lazy_align` value if necessary. + /// This value can be resolved later using `Value.resolveLazy`. lazy, - sema: *Sema, + /// Return a scalar result, expecting all necessary type resolution to be completed. + /// Backends should typically use this, since they must not perform type resolution. + eager, + /// Return a scalar result, performing type resolution as necessary. + /// This should typically be used from semantic analysis. + sema, +}; + +/// The chosen strategy can be easily optimized away in release builds. +/// However, in debug builds, it helps to avoid acceidentally resolving types in backends. +pub const ResolveStrat = enum { + /// Assert that all necessary resolution is completed. + /// Backends should typically use this, since they must not perform type resolution. + normal, + /// Perform type resolution as necessary using `Zcu`. + /// This should typically be used from semantic analysis. + sema, + + pub fn toLazy(strat: ResolveStrat) ResolveStratLazy { + return switch (strat) { + .normal => .eager, + .sema => .sema, + }; + } }; /// If you pass `eager` you will get back `scalar` and assert the type is resolved. @@ -883,17 +908,12 @@ pub const AbiAlignmentAdvancedStrat = union(enum) { pub fn abiAlignmentAdvanced( ty: Type, mod: *Module, - strat: AbiAlignmentAdvancedStrat, -) Module.CompileError!AbiAlignmentAdvanced { + strat: ResolveStratLazy, +) SemaError!AbiAlignmentAdvanced { const target = mod.getTarget(); const use_llvm = mod.comp.config.use_llvm; const ip = &mod.intern_pool; - const opt_sema = switch (strat) { - .sema => |sema| sema, - else => null, - }; - switch (ty.toIntern()) { .empty_struct_type => return AbiAlignmentAdvanced{ .scalar = .@"1" }, else => switch (ip.indexToKey(ty.toIntern())) { @@ -911,7 +931,7 @@ pub fn abiAlignmentAdvanced( if (vector_type.len == 0) return .{ .scalar = .@"1" }; switch (mod.comp.getZigBackend()) { else => { - const elem_bits: u32 = @intCast(try Type.fromInterned(vector_type.child).bitSizeAdvanced(mod, opt_sema)); + const elem_bits: u32 = @intCast(try Type.fromInterned(vector_type.child).bitSizeAdvanced(mod, .sema)); if (elem_bits == 0) return .{ .scalar = .@"1" }; const bytes = ((elem_bits * vector_type.len) + 7) / 8; const alignment = std.math.ceilPowerOfTwoAssert(u32, bytes); @@ -1024,7 +1044,7 @@ pub fn abiAlignmentAdvanced( const struct_type = ip.loadStructType(ty.toIntern()); if (struct_type.layout == .@"packed") { switch (strat) { - .sema => |sema| try sema.resolveTypeLayout(ty), + .sema => try ty.resolveLayout(mod), .lazy => if (struct_type.backingIntType(ip).* == .none) return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{ .ty = .comptime_int_type, @@ -1036,19 +1056,16 @@ pub fn abiAlignmentAdvanced( return .{ .scalar = Type.fromInterned(struct_type.backingIntType(ip).*).abiAlignment(mod) }; } - const flags = struct_type.flagsPtr(ip).*; - if (flags.alignment != .none) return .{ .scalar = flags.alignment }; - - return switch (strat) { + if (struct_type.flagsPtr(ip).alignment == .none) switch (strat) { .eager => unreachable, // struct alignment not resolved - .sema => |sema| .{ - .scalar = try sema.resolveStructAlignment(ty.toIntern(), struct_type), - }, - .lazy => .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{ + .sema => try ty.resolveStructAlignment(mod), + .lazy => return .{ .val = Value.fromInterned(try mod.intern(.{ .int = .{ .ty = .comptime_int_type, .storage = .{ .lazy_align = ty.toIntern() }, - } }))) }, + } })) }, }; + + return .{ .scalar = struct_type.flagsPtr(ip).alignment }; }, .anon_struct_type => |tuple| { var big_align: Alignment = .@"1"; @@ -1070,12 +1087,10 @@ pub fn abiAlignmentAdvanced( }, .union_type => { const union_type = ip.loadUnionType(ty.toIntern()); - const flags = union_type.flagsPtr(ip).*; - if (flags.alignment != .none) return .{ .scalar = flags.alignment }; - if (!union_type.haveLayout(ip)) switch (strat) { + if (union_type.flagsPtr(ip).alignment == .none) switch (strat) { .eager => unreachable, // union layout not resolved - .sema => |sema| return .{ .scalar = try sema.resolveUnionAlignment(ty, union_type) }, + .sema => try ty.resolveUnionAlignment(mod), .lazy => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{ .ty = .comptime_int_type, .storage = .{ .lazy_align = ty.toIntern() }, @@ -1117,9 +1132,9 @@ pub fn abiAlignmentAdvanced( fn abiAlignmentAdvancedErrorUnion( ty: Type, mod: *Module, - strat: AbiAlignmentAdvancedStrat, + strat: ResolveStratLazy, payload_ty: Type, -) Module.CompileError!AbiAlignmentAdvanced { +) SemaError!AbiAlignmentAdvanced { // This code needs to be kept in sync with the equivalent switch prong // in abiSizeAdvanced. const code_align = abiAlignment(Type.anyerror, mod); @@ -1154,8 +1169,8 @@ fn abiAlignmentAdvancedErrorUnion( fn abiAlignmentAdvancedOptional( ty: Type, mod: *Module, - strat: AbiAlignmentAdvancedStrat, -) Module.CompileError!AbiAlignmentAdvanced { + strat: ResolveStratLazy, +) SemaError!AbiAlignmentAdvanced { const target = mod.getTarget(); const child_type = ty.optionalChild(mod); @@ -1217,8 +1232,8 @@ const AbiSizeAdvanced = union(enum) { pub fn abiSizeAdvanced( ty: Type, mod: *Module, - strat: AbiAlignmentAdvancedStrat, -) Module.CompileError!AbiSizeAdvanced { + strat: ResolveStratLazy, +) SemaError!AbiSizeAdvanced { const target = mod.getTarget(); const use_llvm = mod.comp.config.use_llvm; const ip = &mod.intern_pool; @@ -1252,9 +1267,9 @@ pub fn abiSizeAdvanced( } }, .vector_type => |vector_type| { - const opt_sema = switch (strat) { - .sema => |sema| sema, - .eager => null, + const sub_strat: ResolveStrat = switch (strat) { + .sema => .sema, + .eager => .normal, .lazy => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{ .ty = .comptime_int_type, .storage = .{ .lazy_size = ty.toIntern() }, @@ -1269,7 +1284,7 @@ pub fn abiSizeAdvanced( }; const total_bytes = switch (mod.comp.getZigBackend()) { else => total_bytes: { - const elem_bits = try Type.fromInterned(vector_type.child).bitSizeAdvanced(mod, opt_sema); + const elem_bits = try Type.fromInterned(vector_type.child).bitSizeAdvanced(mod, sub_strat); const total_bits = elem_bits * vector_type.len; break :total_bytes (total_bits + 7) / 8; }, @@ -1403,7 +1418,7 @@ pub fn abiSizeAdvanced( .struct_type => { const struct_type = ip.loadStructType(ty.toIntern()); switch (strat) { - .sema => |sema| try sema.resolveTypeLayout(ty), + .sema => try ty.resolveLayout(mod), .lazy => switch (struct_type.layout) { .@"packed" => { if (struct_type.backingIntType(ip).* == .none) return .{ @@ -1436,7 +1451,7 @@ pub fn abiSizeAdvanced( }, .anon_struct_type => |tuple| { switch (strat) { - .sema => |sema| try sema.resolveTypeLayout(ty), + .sema => try ty.resolveLayout(mod), .lazy, .eager => {}, } const field_count = tuple.types.len; @@ -1449,7 +1464,7 @@ pub fn abiSizeAdvanced( .union_type => { const union_type = ip.loadUnionType(ty.toIntern()); switch (strat) { - .sema => |sema| try sema.resolveTypeLayout(ty), + .sema => try ty.resolveLayout(mod), .lazy => if (!union_type.flagsPtr(ip).status.haveLayout()) return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{ .ty = .comptime_int_type, @@ -1493,8 +1508,8 @@ pub fn abiSizeAdvanced( fn abiSizeAdvancedOptional( ty: Type, mod: *Module, - strat: AbiAlignmentAdvancedStrat, -) Module.CompileError!AbiSizeAdvanced { + strat: ResolveStratLazy, +) SemaError!AbiSizeAdvanced { const child_ty = ty.optionalChild(mod); if (child_ty.isNoReturn(mod)) { @@ -1661,21 +1676,18 @@ pub fn maxIntAlignment(target: std.Target, use_llvm: bool) u16 { } pub fn bitSize(ty: Type, mod: *Module) u64 { - return bitSizeAdvanced(ty, mod, null) catch unreachable; + return bitSizeAdvanced(ty, mod, .normal) catch unreachable; } -/// If you pass `opt_sema`, any recursive type resolutions will happen if -/// necessary, possibly returning a CompileError. Passing `null` instead asserts -/// the type is fully resolved, and there will be no error, guaranteed. pub fn bitSizeAdvanced( ty: Type, mod: *Module, - opt_sema: ?*Sema, -) Module.CompileError!u64 { + strat: ResolveStrat, +) SemaError!u64 { const target = mod.getTarget(); const ip = &mod.intern_pool; - const strat: AbiAlignmentAdvancedStrat = if (opt_sema) |sema| .{ .sema = sema } else .eager; + const strat_lazy: ResolveStratLazy = strat.toLazy(); switch (ip.indexToKey(ty.toIntern())) { .int_type => |int_type| return int_type.bits, @@ -1690,22 +1702,22 @@ pub fn bitSizeAdvanced( if (len == 0) return 0; const elem_ty = Type.fromInterned(array_type.child); const elem_size = @max( - (try elem_ty.abiAlignmentAdvanced(mod, strat)).scalar.toByteUnits() orelse 0, - (try elem_ty.abiSizeAdvanced(mod, strat)).scalar, + (try elem_ty.abiAlignmentAdvanced(mod, strat_lazy)).scalar.toByteUnits() orelse 0, + (try elem_ty.abiSizeAdvanced(mod, strat_lazy)).scalar, ); if (elem_size == 0) return 0; - const elem_bit_size = try bitSizeAdvanced(elem_ty, mod, opt_sema); + const elem_bit_size = try bitSizeAdvanced(elem_ty, mod, strat); return (len - 1) * 8 * elem_size + elem_bit_size; }, .vector_type => |vector_type| { const child_ty = Type.fromInterned(vector_type.child); - const elem_bit_size = try bitSizeAdvanced(child_ty, mod, opt_sema); + const elem_bit_size = try bitSizeAdvanced(child_ty, mod, strat); return elem_bit_size * vector_type.len; }, .opt_type => { // Optionals and error unions are not packed so their bitsize // includes padding bits. - return (try abiSizeAdvanced(ty, mod, strat)).scalar * 8; + return (try abiSizeAdvanced(ty, mod, strat_lazy)).scalar * 8; }, .error_set_type, .inferred_error_set_type => return mod.errorSetBits(), @@ -1713,7 +1725,7 @@ pub fn bitSizeAdvanced( .error_union_type => { // Optionals and error unions are not packed so their bitsize // includes padding bits. - return (try abiSizeAdvanced(ty, mod, strat)).scalar * 8; + return (try abiSizeAdvanced(ty, mod, strat_lazy)).scalar * 8; }, .func_type => unreachable, // represents machine code; not a pointer .simple_type => |t| switch (t) { @@ -1770,43 +1782,43 @@ pub fn bitSizeAdvanced( .struct_type => { const struct_type = ip.loadStructType(ty.toIntern()); const is_packed = struct_type.layout == .@"packed"; - if (opt_sema) |sema| { - try sema.resolveTypeFields(ty); - if (is_packed) try sema.resolveTypeLayout(ty); + if (strat == .sema) { + try ty.resolveFields(mod); + if (is_packed) try ty.resolveLayout(mod); } if (is_packed) { - return try Type.fromInterned(struct_type.backingIntType(ip).*).bitSizeAdvanced(mod, opt_sema); + return try Type.fromInterned(struct_type.backingIntType(ip).*).bitSizeAdvanced(mod, strat); } - return (try ty.abiSizeAdvanced(mod, strat)).scalar * 8; + return (try ty.abiSizeAdvanced(mod, strat_lazy)).scalar * 8; }, .anon_struct_type => { - if (opt_sema) |sema| try sema.resolveTypeFields(ty); - return (try ty.abiSizeAdvanced(mod, strat)).scalar * 8; + if (strat == .sema) try ty.resolveFields(mod); + return (try ty.abiSizeAdvanced(mod, strat_lazy)).scalar * 8; }, .union_type => { const union_type = ip.loadUnionType(ty.toIntern()); const is_packed = ty.containerLayout(mod) == .@"packed"; - if (opt_sema) |sema| { - try sema.resolveTypeFields(ty); - if (is_packed) try sema.resolveTypeLayout(ty); + if (strat == .sema) { + try ty.resolveFields(mod); + if (is_packed) try ty.resolveLayout(mod); } if (!is_packed) { - return (try ty.abiSizeAdvanced(mod, strat)).scalar * 8; + return (try ty.abiSizeAdvanced(mod, strat_lazy)).scalar * 8; } assert(union_type.flagsPtr(ip).status.haveFieldTypes()); var size: u64 = 0; for (0..union_type.field_types.len) |field_index| { const field_ty = union_type.field_types.get(ip)[field_index]; - size = @max(size, try bitSizeAdvanced(Type.fromInterned(field_ty), mod, opt_sema)); + size = @max(size, try bitSizeAdvanced(Type.fromInterned(field_ty), mod, strat)); } return size; }, .opaque_type => unreachable, - .enum_type => return bitSizeAdvanced(Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty), mod, opt_sema), + .enum_type => return bitSizeAdvanced(Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty), mod, strat), // values, not types .undef, @@ -2722,13 +2734,12 @@ pub fn onePossibleValue(starting_type: Type, mod: *Module) !?Value { /// During semantic analysis, instead call `Sema.typeRequiresComptime` which /// resolves field types rather than asserting they are already resolved. pub fn comptimeOnly(ty: Type, mod: *Module) bool { - return ty.comptimeOnlyAdvanced(mod, null) catch unreachable; + return ty.comptimeOnlyAdvanced(mod, .normal) catch unreachable; } /// `generic_poison` will return false. /// May return false negatives when structs and unions are having their field types resolved. -/// If `opt_sema` is not provided, asserts that the type is sufficiently resolved. -pub fn comptimeOnlyAdvanced(ty: Type, mod: *Module, opt_sema: ?*Sema) Module.CompileError!bool { +pub fn comptimeOnlyAdvanced(ty: Type, mod: *Module, strat: ResolveStrat) SemaError!bool { const ip = &mod.intern_pool; return switch (ty.toIntern()) { .empty_struct_type => false, @@ -2738,19 +2749,19 @@ pub fn comptimeOnlyAdvanced(ty: Type, mod: *Module, opt_sema: ?*Sema) Module.Com .ptr_type => |ptr_type| { const child_ty = Type.fromInterned(ptr_type.child); switch (child_ty.zigTypeTag(mod)) { - .Fn => return !try child_ty.fnHasRuntimeBitsAdvanced(mod, opt_sema), + .Fn => return !try child_ty.fnHasRuntimeBitsAdvanced(mod, strat), .Opaque => return false, - else => return child_ty.comptimeOnlyAdvanced(mod, opt_sema), + else => return child_ty.comptimeOnlyAdvanced(mod, strat), } }, .anyframe_type => |child| { if (child == .none) return false; - return Type.fromInterned(child).comptimeOnlyAdvanced(mod, opt_sema); + return Type.fromInterned(child).comptimeOnlyAdvanced(mod, strat); }, - .array_type => |array_type| return Type.fromInterned(array_type.child).comptimeOnlyAdvanced(mod, opt_sema), - .vector_type => |vector_type| return Type.fromInterned(vector_type.child).comptimeOnlyAdvanced(mod, opt_sema), - .opt_type => |child| return Type.fromInterned(child).comptimeOnlyAdvanced(mod, opt_sema), - .error_union_type => |error_union_type| return Type.fromInterned(error_union_type.payload_type).comptimeOnlyAdvanced(mod, opt_sema), + .array_type => |array_type| return Type.fromInterned(array_type.child).comptimeOnlyAdvanced(mod, strat), + .vector_type => |vector_type| return Type.fromInterned(vector_type.child).comptimeOnlyAdvanced(mod, strat), + .opt_type => |child| return Type.fromInterned(child).comptimeOnlyAdvanced(mod, strat), + .error_union_type => |error_union_type| return Type.fromInterned(error_union_type.payload_type).comptimeOnlyAdvanced(mod, strat), .error_set_type, .inferred_error_set_type, @@ -2817,8 +2828,7 @@ pub fn comptimeOnlyAdvanced(ty: Type, mod: *Module, opt_sema: ?*Sema) Module.Com .no, .wip => false, .yes => true, .unknown => { - // The type is not resolved; assert that we have a Sema. - const sema = opt_sema.?; + assert(strat == .sema); if (struct_type.flagsPtr(ip).field_types_wip) return false; @@ -2826,13 +2836,13 @@ pub fn comptimeOnlyAdvanced(ty: Type, mod: *Module, opt_sema: ?*Sema) Module.Com struct_type.flagsPtr(ip).requires_comptime = .wip; errdefer struct_type.flagsPtr(ip).requires_comptime = .unknown; - try sema.resolveTypeFieldsStruct(ty.toIntern(), struct_type); + try ty.resolveFields(mod); for (0..struct_type.field_types.len) |i_usize| { const i: u32 = @intCast(i_usize); if (struct_type.fieldIsComptime(ip, i)) continue; const field_ty = struct_type.field_types.get(ip)[i]; - if (try Type.fromInterned(field_ty).comptimeOnlyAdvanced(mod, opt_sema)) { + if (try Type.fromInterned(field_ty).comptimeOnlyAdvanced(mod, strat)) { // Note that this does not cause the layout to // be considered resolved. Comptime-only types // still maintain a layout of their @@ -2851,7 +2861,7 @@ pub fn comptimeOnlyAdvanced(ty: Type, mod: *Module, opt_sema: ?*Sema) Module.Com .anon_struct_type => |tuple| { for (tuple.types.get(ip), tuple.values.get(ip)) |field_ty, val| { const have_comptime_val = val != .none; - if (!have_comptime_val and try Type.fromInterned(field_ty).comptimeOnlyAdvanced(mod, opt_sema)) return true; + if (!have_comptime_val and try Type.fromInterned(field_ty).comptimeOnlyAdvanced(mod, strat)) return true; } return false; }, @@ -2862,8 +2872,7 @@ pub fn comptimeOnlyAdvanced(ty: Type, mod: *Module, opt_sema: ?*Sema) Module.Com .no, .wip => return false, .yes => return true, .unknown => { - // The type is not resolved; assert that we have a Sema. - const sema = opt_sema.?; + assert(strat == .sema); if (union_type.flagsPtr(ip).status == .field_types_wip) return false; @@ -2871,11 +2880,11 @@ pub fn comptimeOnlyAdvanced(ty: Type, mod: *Module, opt_sema: ?*Sema) Module.Com union_type.flagsPtr(ip).requires_comptime = .wip; errdefer union_type.flagsPtr(ip).requires_comptime = .unknown; - try sema.resolveTypeFieldsUnion(ty, union_type); + try ty.resolveFields(mod); for (0..union_type.field_types.len) |field_idx| { const field_ty = union_type.field_types.get(ip)[field_idx]; - if (try Type.fromInterned(field_ty).comptimeOnlyAdvanced(mod, opt_sema)) { + if (try Type.fromInterned(field_ty).comptimeOnlyAdvanced(mod, strat)) { union_type.flagsPtr(ip).requires_comptime = .yes; return true; } @@ -2889,7 +2898,7 @@ pub fn comptimeOnlyAdvanced(ty: Type, mod: *Module, opt_sema: ?*Sema) Module.Com .opaque_type => false, - .enum_type => return Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty).comptimeOnlyAdvanced(mod, opt_sema), + .enum_type => return Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty).comptimeOnlyAdvanced(mod, strat), // values, not types .undef, @@ -3180,10 +3189,10 @@ pub fn structFieldType(ty: Type, index: usize, mod: *Module) Type { } pub fn structFieldAlign(ty: Type, index: usize, zcu: *Zcu) Alignment { - return ty.structFieldAlignAdvanced(index, zcu, null) catch unreachable; + return ty.structFieldAlignAdvanced(index, zcu, .normal) catch unreachable; } -pub fn structFieldAlignAdvanced(ty: Type, index: usize, zcu: *Zcu, opt_sema: ?*Sema) !Alignment { +pub fn structFieldAlignAdvanced(ty: Type, index: usize, zcu: *Zcu, strat: ResolveStrat) !Alignment { const ip = &zcu.intern_pool; switch (ip.indexToKey(ty.toIntern())) { .struct_type => { @@ -3191,22 +3200,14 @@ pub fn structFieldAlignAdvanced(ty: Type, index: usize, zcu: *Zcu, opt_sema: ?*S assert(struct_type.layout != .@"packed"); const explicit_align = struct_type.fieldAlign(ip, index); const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[index]); - if (opt_sema) |sema| { - return sema.structFieldAlignment(explicit_align, field_ty, struct_type.layout); - } else { - return zcu.structFieldAlignment(explicit_align, field_ty, struct_type.layout); - } + return zcu.structFieldAlignmentAdvanced(explicit_align, field_ty, struct_type.layout, strat); }, .anon_struct_type => |anon_struct| { - return (try Type.fromInterned(anon_struct.types.get(ip)[index]).abiAlignmentAdvanced(zcu, if (opt_sema) |sema| .{ .sema = sema } else .eager)).scalar; + return (try Type.fromInterned(anon_struct.types.get(ip)[index]).abiAlignmentAdvanced(zcu, strat.toLazy())).scalar; }, .union_type => { const union_obj = ip.loadUnionType(ty.toIntern()); - if (opt_sema) |sema| { - return sema.unionFieldAlignment(union_obj, @intCast(index)); - } else { - return zcu.unionFieldNormalAlignment(union_obj, @intCast(index)); - } + return zcu.unionFieldNormalAlignmentAdvanced(union_obj, @intCast(index), strat); }, else => unreachable, } @@ -3546,6 +3547,397 @@ pub fn packedStructFieldPtrInfo(struct_ty: Type, parent_ptr_ty: Type, field_idx: } }; } +pub fn resolveLayout(ty: Type, zcu: *Zcu) SemaError!void { + const ip = &zcu.intern_pool; + switch (ip.indexToKey(ty.toIntern())) { + .simple_type => |simple_type| return resolveSimpleType(simple_type, zcu), + else => {}, + } + switch (ty.zigTypeTag(zcu)) { + .Struct => switch (ip.indexToKey(ty.toIntern())) { + .anon_struct_type => |anon_struct_type| for (0..anon_struct_type.types.len) |i| { + const field_ty = Type.fromInterned(anon_struct_type.types.get(ip)[i]); + try field_ty.resolveLayout(zcu); + }, + .struct_type => return ty.resolveStructInner(zcu, .layout), + else => unreachable, + }, + .Union => return ty.resolveUnionInner(zcu, .layout), + .Array => { + if (ty.arrayLenIncludingSentinel(zcu) == 0) return; + const elem_ty = ty.childType(zcu); + return elem_ty.resolveLayout(zcu); + }, + .Optional => { + const payload_ty = ty.optionalChild(zcu); + return payload_ty.resolveLayout(zcu); + }, + .ErrorUnion => { + const payload_ty = ty.errorUnionPayload(zcu); + return payload_ty.resolveLayout(zcu); + }, + .Fn => { + const info = zcu.typeToFunc(ty).?; + if (info.is_generic) { + // Resolving of generic function types is deferred to when + // the function is instantiated. + return; + } + for (0..info.param_types.len) |i| { + const param_ty = info.param_types.get(ip)[i]; + try Type.fromInterned(param_ty).resolveLayout(zcu); + } + try Type.fromInterned(info.return_type).resolveLayout(zcu); + }, + else => {}, + } +} + +pub fn resolveFields(ty: Type, zcu: *Zcu) SemaError!void { + const ip = &zcu.intern_pool; + const ty_ip = ty.toIntern(); + + switch (ty_ip) { + .none => unreachable, + + .u0_type, + .i0_type, + .u1_type, + .u8_type, + .i8_type, + .u16_type, + .i16_type, + .u29_type, + .u32_type, + .i32_type, + .u64_type, + .i64_type, + .u80_type, + .u128_type, + .i128_type, + .usize_type, + .isize_type, + .c_char_type, + .c_short_type, + .c_ushort_type, + .c_int_type, + .c_uint_type, + .c_long_type, + .c_ulong_type, + .c_longlong_type, + .c_ulonglong_type, + .c_longdouble_type, + .f16_type, + .f32_type, + .f64_type, + .f80_type, + .f128_type, + .anyopaque_type, + .bool_type, + .void_type, + .type_type, + .anyerror_type, + .adhoc_inferred_error_set_type, + .comptime_int_type, + .comptime_float_type, + .noreturn_type, + .anyframe_type, + .null_type, + .undefined_type, + .enum_literal_type, + .manyptr_u8_type, + .manyptr_const_u8_type, + .manyptr_const_u8_sentinel_0_type, + .single_const_pointer_to_comptime_int_type, + .slice_const_u8_type, + .slice_const_u8_sentinel_0_type, + .optional_noreturn_type, + .anyerror_void_error_union_type, + .generic_poison_type, + .empty_struct_type, + => {}, + + .undef => unreachable, + .zero => unreachable, + .zero_usize => unreachable, + .zero_u8 => unreachable, + .one => unreachable, + .one_usize => unreachable, + .one_u8 => unreachable, + .four_u8 => unreachable, + .negative_one => unreachable, + .calling_convention_c => unreachable, + .calling_convention_inline => unreachable, + .void_value => unreachable, + .unreachable_value => unreachable, + .null_value => unreachable, + .bool_true => unreachable, + .bool_false => unreachable, + .empty_struct => unreachable, + .generic_poison => unreachable, + + else => switch (ip.items.items(.tag)[@intFromEnum(ty_ip)]) { + .type_struct, + .type_struct_packed, + .type_struct_packed_inits, + => return ty.resolveStructInner(zcu, .fields), + + .type_union => return ty.resolveUnionInner(zcu, .fields), + + .simple_type => return resolveSimpleType(ip.indexToKey(ty_ip).simple_type, zcu), + + else => {}, + }, + } +} + +pub fn resolveFully(ty: Type, zcu: *Zcu) SemaError!void { + const ip = &zcu.intern_pool; + + switch (ip.indexToKey(ty.toIntern())) { + .simple_type => |simple_type| return resolveSimpleType(simple_type, zcu), + else => {}, + } + + switch (ty.zigTypeTag(zcu)) { + .Type, + .Void, + .Bool, + .NoReturn, + .Int, + .Float, + .ComptimeFloat, + .ComptimeInt, + .Undefined, + .Null, + .ErrorSet, + .Enum, + .Opaque, + .Frame, + .AnyFrame, + .Vector, + .EnumLiteral, + => {}, + + .Pointer => return ty.childType(zcu).resolveFully(zcu), + .Array => return ty.childType(zcu).resolveFully(zcu), + .Optional => return ty.optionalChild(zcu).resolveFully(zcu), + .ErrorUnion => return ty.errorUnionPayload(zcu).resolveFully(zcu), + .Fn => { + const info = zcu.typeToFunc(ty).?; + if (info.is_generic) return; + for (0..info.param_types.len) |i| { + const param_ty = info.param_types.get(ip)[i]; + try Type.fromInterned(param_ty).resolveFully(zcu); + } + try Type.fromInterned(info.return_type).resolveFully(zcu); + }, + + .Struct => switch (ip.indexToKey(ty.toIntern())) { + .anon_struct_type => |anon_struct_type| for (0..anon_struct_type.types.len) |i| { + const field_ty = Type.fromInterned(anon_struct_type.types.get(ip)[i]); + try field_ty.resolveFully(zcu); + }, + .struct_type => return ty.resolveStructInner(zcu, .full), + else => unreachable, + }, + .Union => return ty.resolveUnionInner(zcu, .full), + } +} + +pub fn resolveStructFieldInits(ty: Type, zcu: *Zcu) SemaError!void { + // TODO: stop calling this for tuples! + _ = zcu.typeToStruct(ty) orelse return; + return ty.resolveStructInner(zcu, .inits); +} + +pub fn resolveStructAlignment(ty: Type, zcu: *Zcu) SemaError!void { + return ty.resolveStructInner(zcu, .alignment); +} + +pub fn resolveUnionAlignment(ty: Type, zcu: *Zcu) SemaError!void { + return ty.resolveUnionInner(zcu, .alignment); +} + +/// `ty` must be a struct. +fn resolveStructInner( + ty: Type, + zcu: *Zcu, + resolution: enum { fields, inits, alignment, layout, full }, +) SemaError!void { + const gpa = zcu.gpa; + + const struct_obj = zcu.typeToStruct(ty).?; + const owner_decl_index = struct_obj.decl.unwrap() orelse return; + + var analysis_arena = std.heap.ArenaAllocator.init(gpa); + defer analysis_arena.deinit(); + + var comptime_err_ret_trace = std.ArrayList(Zcu.LazySrcLoc).init(gpa); + defer comptime_err_ret_trace.deinit(); + + var sema: Sema = .{ + .mod = zcu, + .gpa = gpa, + .arena = analysis_arena.allocator(), + .code = undefined, // This ZIR will not be used. + .owner_decl = zcu.declPtr(owner_decl_index), + .owner_decl_index = owner_decl_index, + .func_index = .none, + .func_is_naked = false, + .fn_ret_ty = Type.void, + .fn_ret_ty_ies = null, + .owner_func_index = .none, + .comptime_err_ret_trace = &comptime_err_ret_trace, + }; + defer sema.deinit(); + + switch (resolution) { + .fields => return sema.resolveTypeFieldsStruct(ty.toIntern(), struct_obj), + .inits => return sema.resolveStructFieldInits(ty), + .alignment => return sema.resolveStructAlignment(ty.toIntern(), struct_obj), + .layout => return sema.resolveStructLayout(ty), + .full => return sema.resolveStructFully(ty), + } +} + +/// `ty` must be a union. +fn resolveUnionInner( + ty: Type, + zcu: *Zcu, + resolution: enum { fields, alignment, layout, full }, +) SemaError!void { + const gpa = zcu.gpa; + + const union_obj = zcu.typeToUnion(ty).?; + const owner_decl_index = union_obj.decl; + + var analysis_arena = std.heap.ArenaAllocator.init(gpa); + defer analysis_arena.deinit(); + + var comptime_err_ret_trace = std.ArrayList(Zcu.LazySrcLoc).init(gpa); + defer comptime_err_ret_trace.deinit(); + + var sema: Sema = .{ + .mod = zcu, + .gpa = gpa, + .arena = analysis_arena.allocator(), + .code = undefined, // This ZIR will not be used. + .owner_decl = zcu.declPtr(owner_decl_index), + .owner_decl_index = owner_decl_index, + .func_index = .none, + .func_is_naked = false, + .fn_ret_ty = Type.void, + .fn_ret_ty_ies = null, + .owner_func_index = .none, + .comptime_err_ret_trace = &comptime_err_ret_trace, + }; + defer sema.deinit(); + + switch (resolution) { + .fields => return sema.resolveTypeFieldsUnion(ty, union_obj), + .alignment => return sema.resolveUnionAlignment(ty, union_obj), + .layout => return sema.resolveUnionLayout(ty), + .full => return sema.resolveUnionFully(ty), + } +} + +/// Fully resolves a simple type. This is usually a nop, but for builtin types with +/// special InternPool indices (such as std.builtin.Type) it will analyze and fully +/// resolve the type. +fn resolveSimpleType(simple_type: InternPool.SimpleType, zcu: *Zcu) Allocator.Error!void { + const builtin_type_name: []const u8 = switch (simple_type) { + .atomic_order => "AtomicOrder", + .atomic_rmw_op => "AtomicRmwOp", + .calling_convention => "CallingConvention", + .address_space => "AddressSpace", + .float_mode => "FloatMode", + .reduce_op => "ReduceOp", + .call_modifier => "CallModifer", + .prefetch_options => "PrefetchOptions", + .export_options => "ExportOptions", + .extern_options => "ExternOptions", + .type_info => "Type", + else => return, + }; + // This will fully resolve the type. + _ = try zcu.getBuiltinType(builtin_type_name); +} + +/// Returns the type of a pointer to an element. +/// Asserts that the type is a pointer, and that the element type is indexable. +/// If the element index is comptime-known, it must be passed in `offset`. +/// For *@Vector(n, T), return *align(a:b:h:v) T +/// For *[N]T, return *T +/// For [*]T, returns *T +/// For []T, returns *T +/// Handles const-ness and address spaces in particular. +/// This code is duplicated in `Sema.analyzePtrArithmetic`. +/// May perform type resolution and return a transitive `error.AnalysisFail`. +pub fn elemPtrType(ptr_ty: Type, offset: ?usize, zcu: *Zcu) !Type { + const ptr_info = ptr_ty.ptrInfo(zcu); + const elem_ty = ptr_ty.elemType2(zcu); + const is_allowzero = ptr_info.flags.is_allowzero and (offset orelse 0) == 0; + const parent_ty = ptr_ty.childType(zcu); + + const VI = InternPool.Key.PtrType.VectorIndex; + + const vector_info: struct { + host_size: u16 = 0, + alignment: Alignment = .none, + vector_index: VI = .none, + } = if (parent_ty.isVector(zcu) and ptr_info.flags.size == .One) blk: { + const elem_bits = elem_ty.bitSize(zcu); + if (elem_bits == 0) break :blk .{}; + const is_packed = elem_bits < 8 or !std.math.isPowerOfTwo(elem_bits); + if (!is_packed) break :blk .{}; + + break :blk .{ + .host_size = @intCast(parent_ty.arrayLen(zcu)), + .alignment = parent_ty.abiAlignment(zcu), + .vector_index = if (offset) |some| @enumFromInt(some) else .runtime, + }; + } else .{}; + + const alignment: Alignment = a: { + // Calculate the new pointer alignment. + if (ptr_info.flags.alignment == .none) { + // In case of an ABI-aligned pointer, any pointer arithmetic + // maintains the same ABI-alignedness. + break :a vector_info.alignment; + } + // If the addend is not a comptime-known value we can still count on + // it being a multiple of the type size. + const elem_size = (try elem_ty.abiSizeAdvanced(zcu, .sema)).scalar; + const addend = if (offset) |off| elem_size * off else elem_size; + + // The resulting pointer is aligned to the lcd between the offset (an + // arbitrary number) and the alignment factor (always a power of two, + // non zero). + const new_align: Alignment = @enumFromInt(@min( + @ctz(addend), + ptr_info.flags.alignment.toLog2Units(), + )); + assert(new_align != .none); + break :a new_align; + }; + return zcu.ptrTypeSema(.{ + .child = elem_ty.toIntern(), + .flags = .{ + .alignment = alignment, + .is_const = ptr_info.flags.is_const, + .is_volatile = ptr_info.flags.is_volatile, + .is_allowzero = is_allowzero, + .address_space = ptr_info.flags.address_space, + .vector_index = vector_info.vector_index, + }, + .packed_offset = .{ + .host_size = vector_info.host_size, + .bit_offset = 0, + }, + }); +} + pub const @"u1": Type = .{ .ip_index = .u1_type }; pub const @"u8": Type = .{ .ip_index = .u8_type }; pub const @"u16": Type = .{ .ip_index = .u16_type }; diff --git a/src/Value.zig b/src/Value.zig index 20b24510ef68..34a0472c169b 100644 --- a/src/Value.zig +++ b/src/Value.zig @@ -161,9 +161,11 @@ pub fn intFromEnum(val: Value, ty: Type, mod: *Module) Allocator.Error!Value { }; } +pub const ResolveStrat = Type.ResolveStrat; + /// Asserts the value is an integer. pub fn toBigInt(val: Value, space: *BigIntSpace, mod: *Module) BigIntConst { - return val.toBigIntAdvanced(space, mod, null) catch unreachable; + return val.toBigIntAdvanced(space, mod, .normal) catch unreachable; } /// Asserts the value is an integer. @@ -171,7 +173,7 @@ pub fn toBigIntAdvanced( val: Value, space: *BigIntSpace, mod: *Module, - opt_sema: ?*Sema, + strat: ResolveStrat, ) Module.CompileError!BigIntConst { return switch (val.toIntern()) { .bool_false => BigIntMutable.init(&space.limbs, 0).toConst(), @@ -181,7 +183,7 @@ pub fn toBigIntAdvanced( .int => |int| switch (int.storage) { .u64, .i64, .big_int => int.storage.toBigInt(space), .lazy_align, .lazy_size => |ty| { - if (opt_sema) |sema| try sema.resolveTypeLayout(Type.fromInterned(ty)); + if (strat == .sema) try Type.fromInterned(ty).resolveLayout(mod); const x = switch (int.storage) { else => unreachable, .lazy_align => Type.fromInterned(ty).abiAlignment(mod).toByteUnits() orelse 0, @@ -190,10 +192,10 @@ pub fn toBigIntAdvanced( return BigIntMutable.init(&space.limbs, x).toConst(); }, }, - .enum_tag => |enum_tag| Value.fromInterned(enum_tag.int).toBigIntAdvanced(space, mod, opt_sema), + .enum_tag => |enum_tag| Value.fromInterned(enum_tag.int).toBigIntAdvanced(space, mod, strat), .opt, .ptr => BigIntMutable.init( &space.limbs, - (try val.getUnsignedIntAdvanced(mod, opt_sema)).?, + (try val.getUnsignedIntAdvanced(mod, strat)).?, ).toConst(), else => unreachable, }, @@ -228,12 +230,12 @@ pub fn getVariable(val: Value, mod: *Module) ?InternPool.Key.Variable { /// If the value fits in a u64, return it, otherwise null. /// Asserts not undefined. pub fn getUnsignedInt(val: Value, mod: *Module) ?u64 { - return getUnsignedIntAdvanced(val, mod, null) catch unreachable; + return getUnsignedIntAdvanced(val, mod, .normal) catch unreachable; } /// If the value fits in a u64, return it, otherwise null. /// Asserts not undefined. -pub fn getUnsignedIntAdvanced(val: Value, mod: *Module, opt_sema: ?*Sema) !?u64 { +pub fn getUnsignedIntAdvanced(val: Value, mod: *Module, strat: ResolveStrat) !?u64 { return switch (val.toIntern()) { .undef => unreachable, .bool_false => 0, @@ -244,28 +246,22 @@ pub fn getUnsignedIntAdvanced(val: Value, mod: *Module, opt_sema: ?*Sema) !?u64 .big_int => |big_int| big_int.to(u64) catch null, .u64 => |x| x, .i64 => |x| std.math.cast(u64, x), - .lazy_align => |ty| if (opt_sema) |sema| - (try Type.fromInterned(ty).abiAlignmentAdvanced(mod, .{ .sema = sema })).scalar.toByteUnits() orelse 0 - else - Type.fromInterned(ty).abiAlignment(mod).toByteUnits() orelse 0, - .lazy_size => |ty| if (opt_sema) |sema| - (try Type.fromInterned(ty).abiSizeAdvanced(mod, .{ .sema = sema })).scalar - else - Type.fromInterned(ty).abiSize(mod), + .lazy_align => |ty| (try Type.fromInterned(ty).abiAlignmentAdvanced(mod, strat.toLazy())).scalar.toByteUnits() orelse 0, + .lazy_size => |ty| (try Type.fromInterned(ty).abiSizeAdvanced(mod, strat.toLazy())).scalar, }, .ptr => |ptr| switch (ptr.base_addr) { .int => ptr.byte_offset, .field => |field| { - const base_addr = (try Value.fromInterned(field.base).getUnsignedIntAdvanced(mod, opt_sema)) orelse return null; + const base_addr = (try Value.fromInterned(field.base).getUnsignedIntAdvanced(mod, strat)) orelse return null; const struct_ty = Value.fromInterned(field.base).typeOf(mod).childType(mod); - if (opt_sema) |sema| try sema.resolveTypeLayout(struct_ty); + if (strat == .sema) try struct_ty.resolveLayout(mod); return base_addr + struct_ty.structFieldOffset(@intCast(field.index), mod) + ptr.byte_offset; }, else => null, }, .opt => |opt| switch (opt.val) { .none => 0, - else => |payload| Value.fromInterned(payload).getUnsignedIntAdvanced(mod, opt_sema), + else => |payload| Value.fromInterned(payload).getUnsignedIntAdvanced(mod, strat), }, else => null, }, @@ -273,13 +269,13 @@ pub fn getUnsignedIntAdvanced(val: Value, mod: *Module, opt_sema: ?*Sema) !?u64 } /// Asserts the value is an integer and it fits in a u64 -pub fn toUnsignedInt(val: Value, mod: *Module) u64 { - return getUnsignedInt(val, mod).?; +pub fn toUnsignedInt(val: Value, zcu: *Zcu) u64 { + return getUnsignedInt(val, zcu).?; } /// Asserts the value is an integer and it fits in a u64 -pub fn toUnsignedIntAdvanced(val: Value, sema: *Sema) !u64 { - return (try getUnsignedIntAdvanced(val, sema.mod, sema)).?; +pub fn toUnsignedIntSema(val: Value, zcu: *Zcu) !u64 { + return (try getUnsignedIntAdvanced(val, zcu, .sema)).?; } /// Asserts the value is an integer and it fits in a i64 @@ -1028,13 +1024,13 @@ pub fn floatHasFraction(self: Value, mod: *const Module) bool { } pub fn orderAgainstZero(lhs: Value, mod: *Module) std.math.Order { - return orderAgainstZeroAdvanced(lhs, mod, null) catch unreachable; + return orderAgainstZeroAdvanced(lhs, mod, .normal) catch unreachable; } pub fn orderAgainstZeroAdvanced( lhs: Value, mod: *Module, - opt_sema: ?*Sema, + strat: ResolveStrat, ) Module.CompileError!std.math.Order { return switch (lhs.toIntern()) { .bool_false => .eq, @@ -1052,13 +1048,13 @@ pub fn orderAgainstZeroAdvanced( .lazy_size => |ty| return if (Type.fromInterned(ty).hasRuntimeBitsAdvanced( mod, false, - if (opt_sema) |sema| .{ .sema = sema } else .eager, + strat.toLazy(), ) catch |err| switch (err) { error.NeedLazy => unreachable, else => |e| return e, }) .gt else .eq, }, - .enum_tag => |enum_tag| Value.fromInterned(enum_tag.int).orderAgainstZeroAdvanced(mod, opt_sema), + .enum_tag => |enum_tag| Value.fromInterned(enum_tag.int).orderAgainstZeroAdvanced(mod, strat), .float => |float| switch (float.storage) { inline else => |x| std.math.order(x, 0), }, @@ -1069,14 +1065,13 @@ pub fn orderAgainstZeroAdvanced( /// Asserts the value is comparable. pub fn order(lhs: Value, rhs: Value, mod: *Module) std.math.Order { - return orderAdvanced(lhs, rhs, mod, null) catch unreachable; + return orderAdvanced(lhs, rhs, mod, .normal) catch unreachable; } /// Asserts the value is comparable. -/// If opt_sema is null then this function asserts things are resolved and cannot fail. -pub fn orderAdvanced(lhs: Value, rhs: Value, mod: *Module, opt_sema: ?*Sema) !std.math.Order { - const lhs_against_zero = try lhs.orderAgainstZeroAdvanced(mod, opt_sema); - const rhs_against_zero = try rhs.orderAgainstZeroAdvanced(mod, opt_sema); +pub fn orderAdvanced(lhs: Value, rhs: Value, mod: *Module, strat: ResolveStrat) !std.math.Order { + const lhs_against_zero = try lhs.orderAgainstZeroAdvanced(mod, strat); + const rhs_against_zero = try rhs.orderAgainstZeroAdvanced(mod, strat); switch (lhs_against_zero) { .lt => if (rhs_against_zero != .lt) return .lt, .eq => return rhs_against_zero.invert(), @@ -1096,15 +1091,15 @@ pub fn orderAdvanced(lhs: Value, rhs: Value, mod: *Module, opt_sema: ?*Sema) !st var lhs_bigint_space: BigIntSpace = undefined; var rhs_bigint_space: BigIntSpace = undefined; - const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_bigint_space, mod, opt_sema); - const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_bigint_space, mod, opt_sema); + const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_bigint_space, mod, strat); + const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_bigint_space, mod, strat); return lhs_bigint.order(rhs_bigint); } /// Asserts the value is comparable. Does not take a type parameter because it supports /// comparisons between heterogeneous types. pub fn compareHetero(lhs: Value, op: std.math.CompareOperator, rhs: Value, mod: *Module) bool { - return compareHeteroAdvanced(lhs, op, rhs, mod, null) catch unreachable; + return compareHeteroAdvanced(lhs, op, rhs, mod, .normal) catch unreachable; } pub fn compareHeteroAdvanced( @@ -1112,7 +1107,7 @@ pub fn compareHeteroAdvanced( op: std.math.CompareOperator, rhs: Value, mod: *Module, - opt_sema: ?*Sema, + strat: ResolveStrat, ) !bool { if (lhs.pointerDecl(mod)) |lhs_decl| { if (rhs.pointerDecl(mod)) |rhs_decl| { @@ -1135,7 +1130,7 @@ pub fn compareHeteroAdvanced( else => {}, } } - return (try orderAdvanced(lhs, rhs, mod, opt_sema)).compare(op); + return (try orderAdvanced(lhs, rhs, mod, strat)).compare(op); } /// Asserts the values are comparable. Both operands have type `ty`. @@ -1176,22 +1171,22 @@ pub fn compareScalar( /// /// Note that `!compareAllWithZero(.eq, ...) != compareAllWithZero(.neq, ...)` pub fn compareAllWithZero(lhs: Value, op: std.math.CompareOperator, mod: *Module) bool { - return compareAllWithZeroAdvancedExtra(lhs, op, mod, null) catch unreachable; + return compareAllWithZeroAdvancedExtra(lhs, op, mod, .normal) catch unreachable; } -pub fn compareAllWithZeroAdvanced( +pub fn compareAllWithZeroSema( lhs: Value, op: std.math.CompareOperator, - sema: *Sema, + zcu: *Zcu, ) Module.CompileError!bool { - return compareAllWithZeroAdvancedExtra(lhs, op, sema.mod, sema); + return compareAllWithZeroAdvancedExtra(lhs, op, zcu, .sema); } pub fn compareAllWithZeroAdvancedExtra( lhs: Value, op: std.math.CompareOperator, mod: *Module, - opt_sema: ?*Sema, + strat: ResolveStrat, ) Module.CompileError!bool { if (lhs.isInf(mod)) { switch (op) { @@ -1211,14 +1206,14 @@ pub fn compareAllWithZeroAdvancedExtra( if (!std.math.order(byte, 0).compare(op)) break false; } else true, .elems => |elems| for (elems) |elem| { - if (!try Value.fromInterned(elem).compareAllWithZeroAdvancedExtra(op, mod, opt_sema)) break false; + if (!try Value.fromInterned(elem).compareAllWithZeroAdvancedExtra(op, mod, strat)) break false; } else true, - .repeated_elem => |elem| Value.fromInterned(elem).compareAllWithZeroAdvancedExtra(op, mod, opt_sema), + .repeated_elem => |elem| Value.fromInterned(elem).compareAllWithZeroAdvancedExtra(op, mod, strat), }, .undef => return false, else => {}, } - return (try orderAgainstZeroAdvanced(lhs, mod, opt_sema)).compare(op); + return (try orderAgainstZeroAdvanced(lhs, mod, strat)).compare(op); } pub fn eql(a: Value, b: Value, ty: Type, mod: *Module) bool { @@ -1279,9 +1274,9 @@ pub fn slicePtr(val: Value, mod: *Module) Value { } /// Gets the `len` field of a slice value as a `u64`. -/// Resolves the length using the provided `Sema` if necessary. -pub fn sliceLen(val: Value, sema: *Sema) !u64 { - return Value.fromInterned(sema.mod.intern_pool.sliceLen(val.toIntern())).toUnsignedIntAdvanced(sema); +/// Resolves the length using `Sema` if necessary. +pub fn sliceLen(val: Value, zcu: *Zcu) !u64 { + return Value.fromInterned(zcu.intern_pool.sliceLen(val.toIntern())).toUnsignedIntSema(zcu); } /// Asserts the value is an aggregate, and returns the element value at the given index. @@ -1482,29 +1477,29 @@ pub fn isFloat(self: Value, mod: *const Module) bool { } pub fn floatFromInt(val: Value, arena: Allocator, int_ty: Type, float_ty: Type, mod: *Module) !Value { - return floatFromIntAdvanced(val, arena, int_ty, float_ty, mod, null) catch |err| switch (err) { + return floatFromIntAdvanced(val, arena, int_ty, float_ty, mod, .normal) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, else => unreachable, }; } -pub fn floatFromIntAdvanced(val: Value, arena: Allocator, int_ty: Type, float_ty: Type, mod: *Module, opt_sema: ?*Sema) !Value { +pub fn floatFromIntAdvanced(val: Value, arena: Allocator, int_ty: Type, float_ty: Type, mod: *Module, strat: ResolveStrat) !Value { if (int_ty.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(InternPool.Index, int_ty.vectorLen(mod)); const scalar_ty = float_ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { const elem_val = try val.elemValue(mod, i); - scalar.* = (try floatFromIntScalar(elem_val, scalar_ty, mod, opt_sema)).toIntern(); + scalar.* = (try floatFromIntScalar(elem_val, scalar_ty, mod, strat)).toIntern(); } return Value.fromInterned((try mod.intern(.{ .aggregate = .{ .ty = float_ty.toIntern(), .storage = .{ .elems = result_data }, } }))); } - return floatFromIntScalar(val, float_ty, mod, opt_sema); + return floatFromIntScalar(val, float_ty, mod, strat); } -pub fn floatFromIntScalar(val: Value, float_ty: Type, mod: *Module, opt_sema: ?*Sema) !Value { +pub fn floatFromIntScalar(val: Value, float_ty: Type, mod: *Module, strat: ResolveStrat) !Value { return switch (mod.intern_pool.indexToKey(val.toIntern())) { .undef => try mod.undefValue(float_ty), .int => |int| switch (int.storage) { @@ -1513,16 +1508,8 @@ pub fn floatFromIntScalar(val: Value, float_ty: Type, mod: *Module, opt_sema: ?* return mod.floatValue(float_ty, float); }, inline .u64, .i64 => |x| floatFromIntInner(x, float_ty, mod), - .lazy_align => |ty| if (opt_sema) |sema| { - return floatFromIntInner((try Type.fromInterned(ty).abiAlignmentAdvanced(mod, .{ .sema = sema })).scalar.toByteUnits() orelse 0, float_ty, mod); - } else { - return floatFromIntInner(Type.fromInterned(ty).abiAlignment(mod).toByteUnits() orelse 0, float_ty, mod); - }, - .lazy_size => |ty| if (opt_sema) |sema| { - return floatFromIntInner((try Type.fromInterned(ty).abiSizeAdvanced(mod, .{ .sema = sema })).scalar, float_ty, mod); - } else { - return floatFromIntInner(Type.fromInterned(ty).abiSize(mod), float_ty, mod); - }, + .lazy_align => |ty| return floatFromIntInner((try Type.fromInterned(ty).abiAlignmentAdvanced(mod, strat.toLazy())).scalar.toByteUnits() orelse 0, float_ty, mod), + .lazy_size => |ty| return floatFromIntInner((try Type.fromInterned(ty).abiSizeAdvanced(mod, strat.toLazy())).scalar, float_ty, mod), }, else => unreachable, }; @@ -3616,17 +3603,15 @@ pub const RuntimeIndex = InternPool.RuntimeIndex; /// `parent_ptr` must be a single-pointer to some optional. /// Returns a pointer to the payload of the optional. -/// This takes a `Sema` because it may need to perform type resolution. -pub fn ptrOptPayload(parent_ptr: Value, sema: *Sema) !Value { - const zcu = sema.mod; - +/// May perform type resolution. +pub fn ptrOptPayload(parent_ptr: Value, zcu: *Zcu) !Value { const parent_ptr_ty = parent_ptr.typeOf(zcu); const opt_ty = parent_ptr_ty.childType(zcu); assert(parent_ptr_ty.ptrSize(zcu) == .One); assert(opt_ty.zigTypeTag(zcu) == .Optional); - const result_ty = try sema.ptrType(info: { + const result_ty = try zcu.ptrTypeSema(info: { var new = parent_ptr_ty.ptrInfo(zcu); // We can correctly preserve alignment `.none`, since an optional has the same // natural alignment as its child type. @@ -3651,17 +3636,15 @@ pub fn ptrOptPayload(parent_ptr: Value, sema: *Sema) !Value { /// `parent_ptr` must be a single-pointer to some error union. /// Returns a pointer to the payload of the error union. -/// This takes a `Sema` because it may need to perform type resolution. -pub fn ptrEuPayload(parent_ptr: Value, sema: *Sema) !Value { - const zcu = sema.mod; - +/// May perform type resolution. +pub fn ptrEuPayload(parent_ptr: Value, zcu: *Zcu) !Value { const parent_ptr_ty = parent_ptr.typeOf(zcu); const eu_ty = parent_ptr_ty.childType(zcu); assert(parent_ptr_ty.ptrSize(zcu) == .One); assert(eu_ty.zigTypeTag(zcu) == .ErrorUnion); - const result_ty = try sema.ptrType(info: { + const result_ty = try zcu.ptrTypeSema(info: { var new = parent_ptr_ty.ptrInfo(zcu); // We can correctly preserve alignment `.none`, since an error union has a // natural alignment greater than or equal to that of its payload type. @@ -3682,10 +3665,8 @@ pub fn ptrEuPayload(parent_ptr: Value, sema: *Sema) !Value { /// `parent_ptr` must be a single-pointer to a struct, union, or slice. /// Returns a pointer to the aggregate field at the specified index. /// For slices, uses `slice_ptr_index` and `slice_len_index`. -/// This takes a `Sema` because it may need to perform type resolution. -pub fn ptrField(parent_ptr: Value, field_idx: u32, sema: *Sema) !Value { - const zcu = sema.mod; - +/// May perform type resolution. +pub fn ptrField(parent_ptr: Value, field_idx: u32, zcu: *Zcu) !Value { const parent_ptr_ty = parent_ptr.typeOf(zcu); const aggregate_ty = parent_ptr_ty.childType(zcu); @@ -3698,17 +3679,17 @@ pub fn ptrField(parent_ptr: Value, field_idx: u32, sema: *Sema) !Value { .Struct => field: { const field_ty = aggregate_ty.structFieldType(field_idx, zcu); switch (aggregate_ty.containerLayout(zcu)) { - .auto => break :field .{ field_ty, try aggregate_ty.structFieldAlignAdvanced(@intCast(field_idx), zcu, sema) }, + .auto => break :field .{ field_ty, try aggregate_ty.structFieldAlignAdvanced(@intCast(field_idx), zcu, .sema) }, .@"extern" => { // Well-defined layout, so just offset the pointer appropriately. const byte_off = aggregate_ty.structFieldOffset(field_idx, zcu); const field_align = a: { const parent_align = if (parent_ptr_info.flags.alignment == .none) pa: { - break :pa try sema.typeAbiAlignment(aggregate_ty); + break :pa (try aggregate_ty.abiAlignmentAdvanced(zcu, .sema)).scalar; } else parent_ptr_info.flags.alignment; break :a InternPool.Alignment.fromLog2Units(@min(parent_align.toLog2Units(), @ctz(byte_off))); }; - const result_ty = try sema.ptrType(info: { + const result_ty = try zcu.ptrTypeSema(info: { var new = parent_ptr_info; new.child = field_ty.toIntern(); new.flags.alignment = field_align; @@ -3723,14 +3704,14 @@ pub fn ptrField(parent_ptr: Value, field_idx: u32, sema: *Sema) !Value { new.packed_offset = packed_offset; new.child = field_ty.toIntern(); if (new.flags.alignment == .none) { - new.flags.alignment = try sema.typeAbiAlignment(aggregate_ty); + new.flags.alignment = (try aggregate_ty.abiAlignmentAdvanced(zcu, .sema)).scalar; } break :info new; }); return zcu.getCoerced(parent_ptr, result_ty); }, .byte_ptr => |ptr_info| { - const result_ty = try sema.ptrType(info: { + const result_ty = try zcu.ptrTypeSema(info: { var new = parent_ptr_info; new.child = field_ty.toIntern(); new.packed_offset = .{ @@ -3749,10 +3730,10 @@ pub fn ptrField(parent_ptr: Value, field_idx: u32, sema: *Sema) !Value { const union_obj = zcu.typeToUnion(aggregate_ty).?; const field_ty = Type.fromInterned(union_obj.field_types.get(&zcu.intern_pool)[field_idx]); switch (aggregate_ty.containerLayout(zcu)) { - .auto => break :field .{ field_ty, try aggregate_ty.structFieldAlignAdvanced(@intCast(field_idx), zcu, sema) }, + .auto => break :field .{ field_ty, try aggregate_ty.structFieldAlignAdvanced(@intCast(field_idx), zcu, .sema) }, .@"extern" => { // Point to the same address. - const result_ty = try sema.ptrType(info: { + const result_ty = try zcu.ptrTypeSema(info: { var new = parent_ptr_info; new.child = field_ty.toIntern(); break :info new; @@ -3762,28 +3743,28 @@ pub fn ptrField(parent_ptr: Value, field_idx: u32, sema: *Sema) !Value { .@"packed" => { // If the field has an ABI size matching its bit size, then we can continue to use a // non-bit pointer if the parent pointer is also a non-bit pointer. - if (parent_ptr_info.packed_offset.host_size == 0 and try sema.typeAbiSize(field_ty) * 8 == try field_ty.bitSizeAdvanced(zcu, sema)) { + if (parent_ptr_info.packed_offset.host_size == 0 and (try field_ty.abiSizeAdvanced(zcu, .sema)).scalar * 8 == try field_ty.bitSizeAdvanced(zcu, .sema)) { // We must offset the pointer on big-endian targets, since the bits of packed memory don't align nicely. const byte_offset = switch (zcu.getTarget().cpu.arch.endian()) { .little => 0, - .big => try sema.typeAbiSize(aggregate_ty) - try sema.typeAbiSize(field_ty), + .big => (try aggregate_ty.abiSizeAdvanced(zcu, .sema)).scalar - (try field_ty.abiSizeAdvanced(zcu, .sema)).scalar, }; - const result_ty = try sema.ptrType(info: { + const result_ty = try zcu.ptrTypeSema(info: { var new = parent_ptr_info; new.child = field_ty.toIntern(); new.flags.alignment = InternPool.Alignment.fromLog2Units( - @ctz(byte_offset | (try parent_ptr_ty.ptrAlignmentAdvanced(zcu, sema)).toByteUnits().?), + @ctz(byte_offset | (try parent_ptr_ty.ptrAlignmentAdvanced(zcu, .sema)).toByteUnits().?), ); break :info new; }); return parent_ptr.getOffsetPtr(byte_offset, result_ty, zcu); } else { // The result must be a bit-pointer if it is not already. - const result_ty = try sema.ptrType(info: { + const result_ty = try zcu.ptrTypeSema(info: { var new = parent_ptr_info; new.child = field_ty.toIntern(); if (new.packed_offset.host_size == 0) { - new.packed_offset.host_size = @intCast(((try aggregate_ty.bitSizeAdvanced(zcu, sema)) + 7) / 8); + new.packed_offset.host_size = @intCast(((try aggregate_ty.bitSizeAdvanced(zcu, .sema)) + 7) / 8); assert(new.packed_offset.bit_offset == 0); } break :info new; @@ -3805,14 +3786,14 @@ pub fn ptrField(parent_ptr: Value, field_idx: u32, sema: *Sema) !Value { }; const new_align: InternPool.Alignment = if (parent_ptr_info.flags.alignment != .none) a: { - const ty_align = try sema.typeAbiAlignment(field_ty); + const ty_align = (try field_ty.abiAlignmentAdvanced(zcu, .sema)).scalar; const true_field_align = if (field_align == .none) ty_align else field_align; const new_align = true_field_align.min(parent_ptr_info.flags.alignment); if (new_align == ty_align) break :a .none; break :a new_align; } else field_align; - const result_ty = try sema.ptrType(info: { + const result_ty = try zcu.ptrTypeSema(info: { var new = parent_ptr_info; new.child = field_ty.toIntern(); new.flags.alignment = new_align; @@ -3834,10 +3815,8 @@ pub fn ptrField(parent_ptr: Value, field_idx: u32, sema: *Sema) !Value { /// `orig_parent_ptr` must be either a single-pointer to an array or vector, or a many-pointer or C-pointer or slice. /// Returns a pointer to the element at the specified index. -/// This takes a `Sema` because it may need to perform type resolution. -pub fn ptrElem(orig_parent_ptr: Value, field_idx: u64, sema: *Sema) !Value { - const zcu = sema.mod; - +/// May perform type resolution. +pub fn ptrElem(orig_parent_ptr: Value, field_idx: u64, zcu: *Zcu) !Value { const parent_ptr = switch (orig_parent_ptr.typeOf(zcu).ptrSize(zcu)) { .One, .Many, .C => orig_parent_ptr, .Slice => orig_parent_ptr.slicePtr(zcu), @@ -3845,7 +3824,7 @@ pub fn ptrElem(orig_parent_ptr: Value, field_idx: u64, sema: *Sema) !Value { const parent_ptr_ty = parent_ptr.typeOf(zcu); const elem_ty = parent_ptr_ty.childType(zcu); - const result_ty = try sema.elemPtrType(parent_ptr_ty, @intCast(field_idx)); + const result_ty = try parent_ptr_ty.elemPtrType(@intCast(field_idx), zcu); if (parent_ptr.isUndef(zcu)) return zcu.undefValue(result_ty); @@ -3862,21 +3841,21 @@ pub fn ptrElem(orig_parent_ptr: Value, field_idx: u64, sema: *Sema) !Value { const strat: PtrStrat = switch (parent_ptr_ty.ptrSize(zcu)) { .One => switch (elem_ty.zigTypeTag(zcu)) { - .Vector => .{ .offset = field_idx * @divExact(try elem_ty.childType(zcu).bitSizeAdvanced(zcu, sema), 8) }, + .Vector => .{ .offset = field_idx * @divExact(try elem_ty.childType(zcu).bitSizeAdvanced(zcu, .sema), 8) }, .Array => strat: { const arr_elem_ty = elem_ty.childType(zcu); - if (try sema.typeRequiresComptime(arr_elem_ty)) { + if (try arr_elem_ty.comptimeOnlyAdvanced(zcu, .sema)) { break :strat .{ .elem_ptr = arr_elem_ty }; } - break :strat .{ .offset = field_idx * try sema.typeAbiSize(arr_elem_ty) }; + break :strat .{ .offset = field_idx * (try arr_elem_ty.abiSizeAdvanced(zcu, .sema)).scalar }; }, else => unreachable, }, - .Many, .C => if (try sema.typeRequiresComptime(elem_ty)) + .Many, .C => if (try elem_ty.comptimeOnlyAdvanced(zcu, .sema)) .{ .elem_ptr = elem_ty } else - .{ .offset = field_idx * try sema.typeAbiSize(elem_ty) }, + .{ .offset = field_idx * (try elem_ty.abiSizeAdvanced(zcu, .sema)).scalar }, .Slice => unreachable, }; @@ -4014,11 +3993,7 @@ pub const PointerDeriveStep = union(enum) { pub fn pointerDerivation(ptr_val: Value, arena: Allocator, zcu: *Zcu) Allocator.Error!PointerDeriveStep { return ptr_val.pointerDerivationAdvanced(arena, zcu, null) catch |err| switch (err) { error.OutOfMemory => |e| return e, - error.AnalysisFail, - error.GenericPoison, - error.ComptimeReturn, - error.ComptimeBreak, - => unreachable, + error.AnalysisFail => unreachable, }; } @@ -4087,8 +4062,8 @@ pub fn pointerDerivationAdvanced(ptr_val: Value, arena: Allocator, zcu: *Zcu, op const base_ptr_ty = base_ptr.typeOf(zcu); const agg_ty = base_ptr_ty.childType(zcu); const field_ty, const field_align = switch (agg_ty.zigTypeTag(zcu)) { - .Struct => .{ agg_ty.structFieldType(@intCast(field.index), zcu), try agg_ty.structFieldAlignAdvanced(@intCast(field.index), zcu, opt_sema) }, - .Union => .{ agg_ty.unionFieldTypeByIndex(@intCast(field.index), zcu), try agg_ty.structFieldAlignAdvanced(@intCast(field.index), zcu, opt_sema) }, + .Struct => .{ agg_ty.structFieldType(@intCast(field.index), zcu), try agg_ty.structFieldAlignAdvanced(@intCast(field.index), zcu, .sema) }, + .Union => .{ agg_ty.unionFieldTypeByIndex(@intCast(field.index), zcu), try agg_ty.structFieldAlignAdvanced(@intCast(field.index), zcu, .sema) }, .Pointer => .{ switch (field.index) { Value.slice_ptr_index => agg_ty.slicePtrFieldType(zcu), Value.slice_len_index => Type.usize, @@ -4269,3 +4244,118 @@ pub fn pointerDerivationAdvanced(ptr_val: Value, arena: Allocator, zcu: *Zcu, op .new_ptr_ty = Type.fromInterned(ptr.ty), } }; } + +pub fn resolveLazy(val: Value, arena: Allocator, zcu: *Zcu) Zcu.SemaError!Value { + switch (zcu.intern_pool.indexToKey(val.toIntern())) { + .int => |int| switch (int.storage) { + .u64, .i64, .big_int => return val, + .lazy_align, .lazy_size => return zcu.intValue( + Type.fromInterned(int.ty), + (try val.getUnsignedIntAdvanced(zcu, .sema)).?, + ), + }, + .slice => |slice| { + const ptr = try Value.fromInterned(slice.ptr).resolveLazy(arena, zcu); + const len = try Value.fromInterned(slice.len).resolveLazy(arena, zcu); + if (ptr.toIntern() == slice.ptr and len.toIntern() == slice.len) return val; + return Value.fromInterned(try zcu.intern(.{ .slice = .{ + .ty = slice.ty, + .ptr = ptr.toIntern(), + .len = len.toIntern(), + } })); + }, + .ptr => |ptr| { + switch (ptr.base_addr) { + .decl, .comptime_alloc, .anon_decl, .int => return val, + .comptime_field => |field_val| { + const resolved_field_val = (try Value.fromInterned(field_val).resolveLazy(arena, zcu)).toIntern(); + return if (resolved_field_val == field_val) + val + else + Value.fromInterned((try zcu.intern(.{ .ptr = .{ + .ty = ptr.ty, + .base_addr = .{ .comptime_field = resolved_field_val }, + .byte_offset = ptr.byte_offset, + } }))); + }, + .eu_payload, .opt_payload => |base| { + const resolved_base = (try Value.fromInterned(base).resolveLazy(arena, zcu)).toIntern(); + return if (resolved_base == base) + val + else + Value.fromInterned((try zcu.intern(.{ .ptr = .{ + .ty = ptr.ty, + .base_addr = switch (ptr.base_addr) { + .eu_payload => .{ .eu_payload = resolved_base }, + .opt_payload => .{ .opt_payload = resolved_base }, + else => unreachable, + }, + .byte_offset = ptr.byte_offset, + } }))); + }, + .arr_elem, .field => |base_index| { + const resolved_base = (try Value.fromInterned(base_index.base).resolveLazy(arena, zcu)).toIntern(); + return if (resolved_base == base_index.base) + val + else + Value.fromInterned((try zcu.intern(.{ .ptr = .{ + .ty = ptr.ty, + .base_addr = switch (ptr.base_addr) { + .arr_elem => .{ .arr_elem = .{ + .base = resolved_base, + .index = base_index.index, + } }, + .field => .{ .field = .{ + .base = resolved_base, + .index = base_index.index, + } }, + else => unreachable, + }, + .byte_offset = ptr.byte_offset, + } }))); + }, + } + }, + .aggregate => |aggregate| switch (aggregate.storage) { + .bytes => return val, + .elems => |elems| { + var resolved_elems: []InternPool.Index = &.{}; + for (elems, 0..) |elem, i| { + const resolved_elem = (try Value.fromInterned(elem).resolveLazy(arena, zcu)).toIntern(); + if (resolved_elems.len == 0 and resolved_elem != elem) { + resolved_elems = try arena.alloc(InternPool.Index, elems.len); + @memcpy(resolved_elems[0..i], elems[0..i]); + } + if (resolved_elems.len > 0) resolved_elems[i] = resolved_elem; + } + return if (resolved_elems.len == 0) val else Value.fromInterned((try zcu.intern(.{ .aggregate = .{ + .ty = aggregate.ty, + .storage = .{ .elems = resolved_elems }, + } }))); + }, + .repeated_elem => |elem| { + const resolved_elem = (try Value.fromInterned(elem).resolveLazy(arena, zcu)).toIntern(); + return if (resolved_elem == elem) val else Value.fromInterned((try zcu.intern(.{ .aggregate = .{ + .ty = aggregate.ty, + .storage = .{ .repeated_elem = resolved_elem }, + } }))); + }, + }, + .un => |un| { + const resolved_tag = if (un.tag == .none) + .none + else + (try Value.fromInterned(un.tag).resolveLazy(arena, zcu)).toIntern(); + const resolved_val = (try Value.fromInterned(un.val).resolveLazy(arena, zcu)).toIntern(); + return if (resolved_tag == un.tag and resolved_val == un.val) + val + else + Value.fromInterned((try zcu.intern(.{ .un = .{ + .ty = un.ty, + .tag = resolved_tag, + .val = resolved_val, + } }))); + }, + else => return val, + } +} diff --git a/src/Zcu.zig b/src/Zcu.zig index 27e9347268ea..e3b85e957dee 100644 --- a/src/Zcu.zig +++ b/src/Zcu.zig @@ -3593,7 +3593,7 @@ pub fn ensureFuncBodyAnalyzed(zcu: *Zcu, maybe_coerced_func_index: InternPool.In }, error.OutOfMemory => return error.OutOfMemory, }; - defer air.deinit(gpa); + errdefer air.deinit(gpa); const invalidate_ies_deps = i: { if (!was_outdated) break :i false; @@ -3615,13 +3615,36 @@ pub fn ensureFuncBodyAnalyzed(zcu: *Zcu, maybe_coerced_func_index: InternPool.In const dump_llvm_ir = build_options.enable_debug_extensions and (comp.verbose_llvm_ir != null or comp.verbose_llvm_bc != null); if (comp.bin_file == null and zcu.llvm_object == null and !dump_air and !dump_llvm_ir) { + air.deinit(gpa); return; } + try comp.work_queue.writeItem(.{ .codegen_func = .{ + .func = func_index, + .air = air, + } }); +} + +/// Takes ownership of `air`, even on error. +/// If any types referenced by `air` are unresolved, marks the codegen as failed. +pub fn linkerUpdateFunc(zcu: *Zcu, func_index: InternPool.Index, air: Air) Allocator.Error!void { + const gpa = zcu.gpa; + const ip = &zcu.intern_pool; + const comp = zcu.comp; + + defer { + var air_mut = air; + air_mut.deinit(gpa); + } + + const func = zcu.funcInfo(func_index); + const decl_index = func.owner_decl; + const decl = zcu.declPtr(decl_index); + var liveness = try Liveness.analyze(gpa, air, ip); defer liveness.deinit(gpa); - if (dump_air) { + if (build_options.enable_debug_extensions and comp.verbose_air) { const fqn = try decl.fullyQualifiedName(zcu); std.debug.print("# Begin Function AIR: {}:\n", .{fqn.fmt(ip)}); @import("print_air.zig").dump(zcu, air, liveness); @@ -3629,7 +3652,7 @@ pub fn ensureFuncBodyAnalyzed(zcu: *Zcu, maybe_coerced_func_index: InternPool.In } if (std.debug.runtime_safety) { - var verify = Liveness.Verify{ + var verify: Liveness.Verify = .{ .gpa = gpa, .air = air, .liveness = liveness, @@ -3642,7 +3665,7 @@ pub fn ensureFuncBodyAnalyzed(zcu: *Zcu, maybe_coerced_func_index: InternPool.In else => { try zcu.failed_analysis.ensureUnusedCapacity(gpa, 1); zcu.failed_analysis.putAssumeCapacityNoClobber( - AnalUnit.wrap(.{ .decl = decl_index }), + AnalUnit.wrap(.{ .func = func_index }), try Module.ErrorMsg.create( gpa, decl.navSrcLoc(zcu), @@ -3659,7 +3682,13 @@ pub fn ensureFuncBodyAnalyzed(zcu: *Zcu, maybe_coerced_func_index: InternPool.In const codegen_prog_node = zcu.codegen_prog_node.start((try decl.fullyQualifiedName(zcu)).toSlice(ip), 0); defer codegen_prog_node.end(); - if (comp.bin_file) |lf| { + if (!air.typesFullyResolved(zcu)) { + // A type we depend on failed to resolve. This is a transitive failure. + // Correcting this failure will involve changing a type this function + // depends on, hence triggering re-analysis of this function, so this + // interacts correctly with incremental compilation. + func.analysis(ip).state = .codegen_failure; + } else if (comp.bin_file) |lf| { lf.updateFunc(zcu, func_index, air, liveness) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.AnalysisFail => { @@ -3667,7 +3696,7 @@ pub fn ensureFuncBodyAnalyzed(zcu: *Zcu, maybe_coerced_func_index: InternPool.In }, else => { try zcu.failed_analysis.ensureUnusedCapacity(gpa, 1); - zcu.failed_analysis.putAssumeCapacityNoClobber(AnalUnit.wrap(.{ .decl = decl_index }), try Module.ErrorMsg.create( + zcu.failed_analysis.putAssumeCapacityNoClobber(AnalUnit.wrap(.{ .func = func_index }), try Module.ErrorMsg.create( gpa, decl.navSrcLoc(zcu), "unable to codegen: {s}", @@ -3735,7 +3764,7 @@ pub fn ensureFuncBodyAnalysisQueued(mod: *Module, func_index: InternPool.Index) // Decl itself is safely analyzed, and body analysis is not yet queued - try mod.comp.work_queue.writeItem(.{ .codegen_func = func_index }); + try mod.comp.work_queue.writeItem(.{ .analyze_func = func_index }); if (mod.emit_h != null) { // TODO: we ideally only want to do this if the function's type changed // since the last update @@ -3812,7 +3841,7 @@ fn getFileRootStruct(zcu: *Zcu, decl_index: Decl.Index, namespace_index: Namespa decl.analysis = .complete; try zcu.scanNamespace(namespace_index, decls, decl); - + try zcu.comp.work_queue.writeItem(.{ .resolve_type_fully = wip_ty.index }); return wip_ty.finish(ip, decl_index, namespace_index.toOptional()); } @@ -4103,7 +4132,7 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !SemaDeclResult { // Note this resolves the type of the Decl, not the value; if this Decl // is a struct, for example, this resolves `type` (which needs no resolution), // not the struct itself. - try sema.resolveTypeLayout(decl_ty); + try decl_ty.resolveLayout(mod); if (decl.kind == .@"usingnamespace") { if (!decl_ty.eql(Type.type, mod)) { @@ -4220,7 +4249,7 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !SemaDeclResult { if (has_runtime_bits) { // Needed for codegen_decl which will call updateDecl and then the // codegen backend wants full access to the Decl Type. - try sema.resolveTypeFully(decl_ty); + try decl_ty.resolveFully(mod); try mod.comp.work_queue.writeItem(.{ .codegen_decl = decl_index }); @@ -5212,23 +5241,6 @@ pub fn analyzeFnBody(mod: *Module, func_index: InternPool.Index, arena: Allocato else => |e| return e, }; - // Similarly, resolve any queued up types that were requested to be resolved for - // the backends. - for (sema.types_to_resolve.keys()) |ty| { - sema.resolveTypeFully(Type.fromInterned(ty)) catch |err| switch (err) { - error.GenericPoison => unreachable, - error.ComptimeReturn => unreachable, - error.ComptimeBreak => unreachable, - error.AnalysisFail => { - // In this case our function depends on a type that had a compile error. - // We should not try to lower this function. - decl.analysis = .dependency_failure; - return error.AnalysisFail; - }, - else => |e| return e, - }; - } - try sema.flushExports(); return .{ @@ -5793,6 +5805,16 @@ pub fn ptrType(mod: *Module, info: InternPool.Key.PtrType) Allocator.Error!Type return Type.fromInterned((try intern(mod, .{ .ptr_type = canon_info }))); } +/// Like `ptrType`, but if `info` specifies an `alignment`, first ensures the pointer +/// child type's alignment is resolved so that an invalid alignment is not used. +/// In general, prefer this function during semantic analysis. +pub fn ptrTypeSema(zcu: *Zcu, info: InternPool.Key.PtrType) SemaError!Type { + if (info.flags.alignment != .none) { + _ = try Type.fromInterned(info.child).abiAlignmentAdvanced(zcu, .sema); + } + return zcu.ptrType(info); +} + pub fn singleMutPtrType(mod: *Module, child_type: Type) Allocator.Error!Type { return ptrType(mod, .{ .child = child_type.toIntern() }); } @@ -6368,15 +6390,21 @@ pub fn unionAbiAlignment(mod: *Module, loaded_union: InternPool.LoadedUnionType) return max_align; } -/// Returns the field alignment, assuming the union is not packed. -/// Keep implementation in sync with `Sema.unionFieldAlignment`. -/// Prefer to call that function instead of this one during Sema. -pub fn unionFieldNormalAlignment(mod: *Module, loaded_union: InternPool.LoadedUnionType, field_index: u32) Alignment { - const ip = &mod.intern_pool; +/// Returns the field alignment of a non-packed union. Asserts the layout is not packed. +pub fn unionFieldNormalAlignment(zcu: *Zcu, loaded_union: InternPool.LoadedUnionType, field_index: u32) Alignment { + return zcu.unionFieldNormalAlignmentAdvanced(loaded_union, field_index, .normal) catch unreachable; +} + +/// Returns the field alignment of a non-packed union. Asserts the layout is not packed. +/// If `strat` is `.sema`, may perform type resolution. +pub fn unionFieldNormalAlignmentAdvanced(zcu: *Zcu, loaded_union: InternPool.LoadedUnionType, field_index: u32, strat: Type.ResolveStrat) SemaError!Alignment { + const ip = &zcu.intern_pool; + assert(loaded_union.flagsPtr(ip).layout != .@"packed"); const field_align = loaded_union.fieldAlign(ip, field_index); if (field_align != .none) return field_align; const field_ty = Type.fromInterned(loaded_union.field_types.get(ip)[field_index]); - return field_ty.abiAlignment(mod); + if (field_ty.isNoReturn(zcu)) return .none; + return (try field_ty.abiAlignmentAdvanced(zcu, strat.toLazy())).scalar; } /// Returns the index of the active field, given the current tag value @@ -6387,41 +6415,37 @@ pub fn unionTagFieldIndex(mod: *Module, loaded_union: InternPool.LoadedUnionType return loaded_union.loadTagType(ip).tagValueIndex(ip, enum_tag.toIntern()); } -/// Returns the field alignment of a non-packed struct in byte units. -/// Keep implementation in sync with `Sema.structFieldAlignment`. -/// asserts the layout is not packed. +/// Returns the field alignment of a non-packed struct. Asserts the layout is not packed. pub fn structFieldAlignment( - mod: *Module, + zcu: *Zcu, explicit_alignment: InternPool.Alignment, field_ty: Type, layout: std.builtin.Type.ContainerLayout, ) Alignment { + return zcu.structFieldAlignmentAdvanced(explicit_alignment, field_ty, layout, .normal) catch unreachable; +} + +/// Returns the field alignment of a non-packed struct. Asserts the layout is not packed. +/// If `strat` is `.sema`, may perform type resolution. +pub fn structFieldAlignmentAdvanced( + zcu: *Zcu, + explicit_alignment: InternPool.Alignment, + field_ty: Type, + layout: std.builtin.Type.ContainerLayout, + strat: Type.ResolveStrat, +) SemaError!Alignment { assert(layout != .@"packed"); if (explicit_alignment != .none) return explicit_alignment; + const ty_abi_align = (try field_ty.abiAlignmentAdvanced(zcu, strat.toLazy())).scalar; switch (layout) { .@"packed" => unreachable, - .auto => { - if (mod.getTarget().ofmt == .c) { - return structFieldAlignmentExtern(mod, field_ty); - } else { - return field_ty.abiAlignment(mod); - } - }, - .@"extern" => return structFieldAlignmentExtern(mod, field_ty), + .auto => if (zcu.getTarget().ofmt != .c) return ty_abi_align, + .@"extern" => {}, } -} - -/// Returns the field alignment of an extern struct in byte units. -/// This logic is duplicated in Type.abiAlignmentAdvanced. -pub fn structFieldAlignmentExtern(mod: *Module, field_ty: Type) Alignment { - const ty_abi_align = field_ty.abiAlignment(mod); - - if (field_ty.isAbiInt(mod) and field_ty.intInfo(mod).bits >= 128) { - // The C ABI requires 128 bit integer fields of structs - // to be 16-bytes aligned. - return ty_abi_align.max(.@"16"); + // extern + if (field_ty.isAbiInt(zcu) and field_ty.intInfo(zcu).bits >= 128) { + return ty_abi_align.maxStrict(.@"16"); } - return ty_abi_align; } @@ -6480,3 +6504,29 @@ pub fn resolveReferences(zcu: *Zcu) !std.AutoHashMapUnmanaged(AnalUnit, Resolved return result; } + +pub fn getBuiltin(zcu: *Zcu, name: []const u8) Allocator.Error!Air.Inst.Ref { + const decl_index = try zcu.getBuiltinDecl(name); + zcu.ensureDeclAnalyzed(decl_index) catch @panic("std.builtin is corrupt"); + return Air.internedToRef(zcu.declPtr(decl_index).val.toIntern()); +} + +pub fn getBuiltinDecl(zcu: *Zcu, name: []const u8) Allocator.Error!InternPool.DeclIndex { + const gpa = zcu.gpa; + const ip = &zcu.intern_pool; + const std_file = (zcu.importPkg(zcu.std_mod) catch @panic("failed to import lib/std.zig")).file; + const std_namespace = zcu.declPtr(std_file.root_decl.unwrap().?).getOwnedInnerNamespace(zcu).?; + const builtin_str = try ip.getOrPutString(gpa, "builtin", .no_embedded_nulls); + const builtin_decl = std_namespace.decls.getKeyAdapted(builtin_str, Zcu.DeclAdapter{ .zcu = zcu }) orelse @panic("lib/std.zig is corrupt and missing 'builtin'"); + zcu.ensureDeclAnalyzed(builtin_decl) catch @panic("std.builtin is corrupt"); + const builtin_namespace = zcu.declPtr(builtin_decl).getInnerNamespace(zcu) orelse @panic("std.builtin is corrupt"); + const name_str = try ip.getOrPutString(gpa, name, .no_embedded_nulls); + return builtin_namespace.decls.getKeyAdapted(name_str, Zcu.DeclAdapter{ .zcu = zcu }) orelse @panic("lib/std/builtin.zig is corrupt"); +} + +pub fn getBuiltinType(zcu: *Zcu, name: []const u8) Allocator.Error!Type { + const ty_inst = try zcu.getBuiltin(name); + const ty = Type.fromInterned(ty_inst.toInterned() orelse @panic("std.builtin is corrupt")); + ty.resolveFully(zcu) catch @panic("std.builtin is corrupt"); + return ty; +} diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index b3718db5b118..02933929c872 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -2603,7 +2603,10 @@ pub const Object = struct { if (!Type.fromInterned(field_ty).hasRuntimeBitsIgnoreComptime(mod)) continue; const field_size = Type.fromInterned(field_ty).abiSize(mod); - const field_align = mod.unionFieldNormalAlignment(union_type, @intCast(field_index)); + const field_align: InternPool.Alignment = switch (union_type.flagsPtr(ip).layout) { + .@"packed" => .none, + .auto, .@"extern" => mod.unionFieldNormalAlignment(union_type, @intCast(field_index)), + }; const field_name = tag_type.names.get(ip)[field_index]; fields.appendAssumeCapacity(try o.builder.debugMemberType( diff --git a/src/print_value.zig b/src/print_value.zig index d2952c3d8eb5..394f021049ac 100644 --- a/src/print_value.zig +++ b/src/print_value.zig @@ -81,12 +81,12 @@ pub fn print( }), .int => |int| switch (int.storage) { inline .u64, .i64, .big_int => |x| try writer.print("{}", .{x}), - .lazy_align => |ty| if (opt_sema) |sema| { - const a = (try Type.fromInterned(ty).abiAlignmentAdvanced(mod, .{ .sema = sema })).scalar; + .lazy_align => |ty| if (opt_sema != null) { + const a = (try Type.fromInterned(ty).abiAlignmentAdvanced(mod, .sema)).scalar; try writer.print("{}", .{a.toByteUnits() orelse 0}); } else try writer.print("@alignOf({})", .{Type.fromInterned(ty).fmt(mod)}), - .lazy_size => |ty| if (opt_sema) |sema| { - const s = (try Type.fromInterned(ty).abiSizeAdvanced(mod, .{ .sema = sema })).scalar; + .lazy_size => |ty| if (opt_sema != null) { + const s = (try Type.fromInterned(ty).abiSizeAdvanced(mod, .sema)).scalar; try writer.print("{}", .{s}); } else try writer.print("@sizeOf({})", .{Type.fromInterned(ty).fmt(mod)}), }, diff --git a/test/cases/compile_errors/direct_struct_loop.zig b/test/cases/compile_errors/direct_struct_loop.zig index 9fdda1bdc706..1eed8aad53bf 100644 --- a/test/cases/compile_errors/direct_struct_loop.zig +++ b/test/cases/compile_errors/direct_struct_loop.zig @@ -10,4 +10,3 @@ export fn entry() usize { // target=native // // :1:11: error: struct 'tmp.A' depends on itself -// :2:5: note: while checking this field diff --git a/test/cases/compile_errors/indirect_struct_loop.zig b/test/cases/compile_errors/indirect_struct_loop.zig index ef5526830e98..02ec65f5ab11 100644 --- a/test/cases/compile_errors/indirect_struct_loop.zig +++ b/test/cases/compile_errors/indirect_struct_loop.zig @@ -16,6 +16,3 @@ export fn entry() usize { // target=native // // :1:11: error: struct 'tmp.A' depends on itself -// :8:5: note: while checking this field -// :5:5: note: while checking this field -// :2:5: note: while checking this field diff --git a/test/cases/compile_errors/instantiating_an_undefined_value_for_an_invalid_struct_that_contains_itself.zig b/test/cases/compile_errors/instantiating_an_undefined_value_for_an_invalid_struct_that_contains_itself.zig index 74cafabe7c40..11dd93d01e8c 100644 --- a/test/cases/compile_errors/instantiating_an_undefined_value_for_an_invalid_struct_that_contains_itself.zig +++ b/test/cases/compile_errors/instantiating_an_undefined_value_for_an_invalid_struct_that_contains_itself.zig @@ -13,4 +13,3 @@ export fn entry() usize { // target=native // // :1:13: error: struct 'tmp.Foo' depends on itself -// :2:5: note: while checking this field diff --git a/test/cases/compile_errors/instantiating_an_undefined_value_for_an_invalid_union_that_contains_itself.zig b/test/cases/compile_errors/instantiating_an_undefined_value_for_an_invalid_union_that_contains_itself.zig index 6030ca4d3e7f..8e499ab7e2ea 100644 --- a/test/cases/compile_errors/instantiating_an_undefined_value_for_an_invalid_union_that_contains_itself.zig +++ b/test/cases/compile_errors/instantiating_an_undefined_value_for_an_invalid_union_that_contains_itself.zig @@ -13,4 +13,3 @@ export fn entry() usize { // target=native // // :1:13: error: union 'tmp.Foo' depends on itself -// :2:5: note: while checking this field diff --git a/test/cases/compile_errors/invalid_dependency_on_struct_size.zig b/test/cases/compile_errors/invalid_dependency_on_struct_size.zig index 02ea7e27104a..98c622462600 100644 --- a/test/cases/compile_errors/invalid_dependency_on_struct_size.zig +++ b/test/cases/compile_errors/invalid_dependency_on_struct_size.zig @@ -16,4 +16,3 @@ comptime { // target=native // // :6:21: error: struct layout depends on it having runtime bits -// :4:13: note: while checking this field diff --git a/test/cases/compile_errors/struct_depends_on_itself_via_optional_field.zig b/test/cases/compile_errors/struct_depends_on_itself_via_optional_field.zig index cad779e3d783..6a4cba82a6cd 100644 --- a/test/cases/compile_errors/struct_depends_on_itself_via_optional_field.zig +++ b/test/cases/compile_errors/struct_depends_on_itself_via_optional_field.zig @@ -15,5 +15,3 @@ export fn entry() void { // target=native // // :1:17: error: struct 'tmp.LhsExpr' depends on itself -// :5:5: note: while checking this field -// :2:5: note: while checking this field diff --git a/test/cases/compile_errors/struct_type_returned_from_non-generic_function.zig b/test/cases/compile_errors/struct_type_returned_from_non-generic_function.zig index f5647625ddbb..a0a6d370421f 100644 --- a/test/cases/compile_errors/struct_type_returned_from_non-generic_function.zig +++ b/test/cases/compile_errors/struct_type_returned_from_non-generic_function.zig @@ -1,5 +1,5 @@ pub export fn entry(param: usize) usize { - return struct { param }; + return struct { @TypeOf(param) }; } // error diff --git a/test/src/Cases.zig b/test/src/Cases.zig index b8a3260ad6c6..dbf409f53bf5 100644 --- a/test/src/Cases.zig +++ b/test/src/Cases.zig @@ -395,10 +395,7 @@ fn addFromDirInner( if (entry.kind != .file) continue; // Ignore stuff such as .swp files - switch (Compilation.classifyFileExt(entry.basename)) { - .unknown => continue, - else => {}, - } + if (!knownFileExtension(entry.basename)) continue; try filenames.append(try ctx.arena.dupe(u8, entry.path)); } @@ -623,8 +620,6 @@ pub fn lowerToBuildSteps( b: *std.Build, parent_step: *std.Build.Step, test_filters: []const []const u8, - cases_dir_path: []const u8, - incremental_exe: *std.Build.Step.Compile, ) void { const host = std.zig.system.resolveTargetQuery(.{}) catch |err| std.debug.panic("unable to detect native host: {s}\n", .{@errorName(err)}); @@ -637,20 +632,11 @@ pub fn lowerToBuildSteps( // compilation is in a happier state. continue; } - for (test_filters) |test_filter| { - if (std.mem.indexOf(u8, incr_case.base_path, test_filter)) |_| break; - } else if (test_filters.len > 0) continue; - const case_base_path_with_dir = std.fs.path.join(b.allocator, &.{ - cases_dir_path, incr_case.base_path, - }) catch @panic("OOM"); - const run = b.addRunArtifact(incremental_exe); - run.setName(incr_case.base_path); - run.addArgs(&.{ - case_base_path_with_dir, - b.graph.zig_exe, - }); - run.expectStdOutEqual(""); - parent_step.dependOn(&run.step); + // TODO: the logic for running these was bad, so I've ripped it out. Rewrite this + // in a way that actually spawns the compiler, communicating with it over the + // compiler server protocol. + _ = incr_case; + @panic("TODO implement incremental test case executor"); } for (self.cases.items) |case| { @@ -1236,192 +1222,6 @@ const assert = std.debug.assert; const Allocator = std.mem.Allocator; const getExternalExecutor = std.zig.system.getExternalExecutor; -const Compilation = @import("../../src/Compilation.zig"); -const zig_h = @import("../../src/link.zig").File.C.zig_h; -const introspect = @import("../../src/introspect.zig"); -const ThreadPool = std.Thread.Pool; -const WaitGroup = std.Thread.WaitGroup; -const build_options = @import("build_options"); -const Package = @import("../../src/Package.zig"); - -pub const std_options = .{ - .log_level = .err, -}; - -var general_purpose_allocator = std.heap.GeneralPurposeAllocator(.{ - .stack_trace_frames = build_options.mem_leak_frames, -}){}; - -// TODO: instead of embedding the compiler in this process, spawn the compiler -// as a sub-process and communicate the updates using the compiler protocol. -pub fn main() !void { - const use_gpa = build_options.force_gpa or !builtin.link_libc; - const gpa = gpa: { - if (use_gpa) { - break :gpa general_purpose_allocator.allocator(); - } - // We would prefer to use raw libc allocator here, but cannot - // use it if it won't support the alignment we need. - if (@alignOf(std.c.max_align_t) < @alignOf(i128)) { - break :gpa std.heap.c_allocator; - } - break :gpa std.heap.raw_c_allocator; - }; - - var single_threaded_arena = std.heap.ArenaAllocator.init(gpa); - defer single_threaded_arena.deinit(); - - var thread_safe_arena: std.heap.ThreadSafeAllocator = .{ - .child_allocator = single_threaded_arena.allocator(), - }; - const arena = thread_safe_arena.allocator(); - - const args = try std.process.argsAlloc(arena); - const case_file_path = args[1]; - const zig_exe_path = args[2]; - - var filenames = std.ArrayList([]const u8).init(arena); - - const case_dirname = std.fs.path.dirname(case_file_path).?; - var iterable_dir = try std.fs.cwd().openDir(case_dirname, .{ .iterate = true }); - defer iterable_dir.close(); - - if (std.mem.endsWith(u8, case_file_path, ".0.zig")) { - const stem = case_file_path[case_dirname.len + 1 .. case_file_path.len - "0.zig".len]; - var it = iterable_dir.iterate(); - while (try it.next()) |entry| { - if (entry.kind != .file) continue; - if (!std.mem.startsWith(u8, entry.name, stem)) continue; - try filenames.append(try std.fs.path.join(arena, &.{ case_dirname, entry.name })); - } - } else { - try filenames.append(case_file_path); - } - - if (filenames.items.len == 0) { - std.debug.print("failed to find the input source file(s) from '{s}'\n", .{ - case_file_path, - }); - std.process.exit(1); - } - - // Sort filenames, so that incremental tests are contiguous and in-order - sortTestFilenames(filenames.items); - - var ctx = Cases.init(gpa, arena); - - var test_it = TestIterator{ .filenames = filenames.items }; - while (try test_it.next()) |batch| { - const strategy: TestStrategy = if (batch.len > 1) .incremental else .independent; - var cases = std.ArrayList(usize).init(arena); - - for (batch) |filename| { - const max_file_size = 10 * 1024 * 1024; - const src = try iterable_dir.readFileAllocOptions(arena, filename, max_file_size, null, 1, 0); - - // Parse the manifest - var manifest = try TestManifest.parse(arena, src); - - if (cases.items.len == 0) { - const backends = try manifest.getConfigForKeyAlloc(arena, "backend", Backend); - const targets = try manifest.getConfigForKeyAlloc(arena, "target", std.Target.Query); - const c_frontends = try manifest.getConfigForKeyAlloc(ctx.arena, "c_frontend", CFrontend); - const is_test = try manifest.getConfigForKeyAssertSingle("is_test", bool); - const link_libc = try manifest.getConfigForKeyAssertSingle("link_libc", bool); - const output_mode = try manifest.getConfigForKeyAssertSingle("output_mode", std.builtin.OutputMode); - - if (manifest.type == .translate_c) { - for (c_frontends) |c_frontend| { - for (targets) |target_query| { - const output = try manifest.trailingLinesSplit(ctx.arena); - try ctx.translate.append(.{ - .name = std.fs.path.stem(filename), - .c_frontend = c_frontend, - .target = resolveTargetQuery(target_query), - .is_test = is_test, - .link_libc = link_libc, - .input = src, - .kind = .{ .translate = output }, - }); - } - } - continue; - } - if (manifest.type == .run_translated_c) { - for (c_frontends) |c_frontend| { - for (targets) |target_query| { - const output = try manifest.trailingSplit(ctx.arena); - try ctx.translate.append(.{ - .name = std.fs.path.stem(filename), - .c_frontend = c_frontend, - .target = resolveTargetQuery(target_query), - .is_test = is_test, - .link_libc = link_libc, - .output = output, - .input = src, - .kind = .{ .run = output }, - }); - } - } - continue; - } - - // Cross-product to get all possible test combinations - for (backends) |backend| { - for (targets) |target| { - const next = ctx.cases.items.len; - try ctx.cases.append(.{ - .name = std.fs.path.stem(filename), - .target = target, - .backend = backend, - .updates = std.ArrayList(Cases.Update).init(ctx.cases.allocator), - .is_test = is_test, - .output_mode = output_mode, - .link_libc = backend == .llvm, - .deps = std.ArrayList(DepModule).init(ctx.cases.allocator), - }); - try cases.append(next); - } - } - } - - for (cases.items) |case_index| { - const case = &ctx.cases.items[case_index]; - if (strategy == .incremental and case.backend == .stage2 and case.target.getCpuArch() == .x86_64 and !case.link_libc and case.target.getOsTag() != .plan9) { - // https://github.com/ziglang/zig/issues/15174 - continue; - } - - switch (manifest.type) { - .compile => { - case.addCompile(src); - }, - .@"error" => { - const errors = try manifest.trailingLines(arena); - switch (strategy) { - .independent => { - case.addError(src, errors); - }, - .incremental => { - case.addErrorNamed("update", src, errors); - }, - } - }, - .run => { - const output = try manifest.trailingSplit(ctx.arena); - case.addCompareOutput(src, output); - }, - .translate_c => @panic("c_frontend specified for compile case"), - .run_translated_c => @panic("c_frontend specified for compile case"), - .cli => @panic("TODO cli tests"), - } - } - } - } - - return runCases(&ctx, zig_exe_path); -} - fn resolveTargetQuery(query: std.Target.Query) std.Build.ResolvedTarget { return .{ .query = query, @@ -1430,470 +1230,33 @@ fn resolveTargetQuery(query: std.Target.Query) std.Build.ResolvedTarget { }; } -fn runCases(self: *Cases, zig_exe_path: []const u8) !void { - const host = try std.zig.system.resolveTargetQuery(.{}); - - var progress = std.Progress{}; - const root_node = progress.start("compiler", self.cases.items.len); - progress.terminal = null; - defer root_node.end(); - - var zig_lib_directory = try introspect.findZigLibDirFromSelfExe(self.gpa, zig_exe_path); - defer zig_lib_directory.handle.close(); - defer self.gpa.free(zig_lib_directory.path.?); - - var aux_thread_pool: ThreadPool = undefined; - try aux_thread_pool.init(.{ .allocator = self.gpa }); - defer aux_thread_pool.deinit(); - - // Use the same global cache dir for all the tests, such that we for example don't have to - // rebuild musl libc for every case (when LLVM backend is enabled). - var global_tmp = std.testing.tmpDir(.{}); - defer global_tmp.cleanup(); - - var cache_dir = try global_tmp.dir.makeOpenPath(".zig-cache", .{}); - defer cache_dir.close(); - const tmp_dir_path = try std.fs.path.join(self.gpa, &[_][]const u8{ ".", ".zig-cache", "tmp", &global_tmp.sub_path }); - defer self.gpa.free(tmp_dir_path); - - const global_cache_directory: Compilation.Directory = .{ - .handle = cache_dir, - .path = try std.fs.path.join(self.gpa, &[_][]const u8{ tmp_dir_path, ".zig-cache" }), - }; - defer self.gpa.free(global_cache_directory.path.?); - - { - for (self.cases.items) |*case| { - if (build_options.skip_non_native) { - if (case.target.getCpuArch() != builtin.cpu.arch) - continue; - if (case.target.getObjectFormat() != builtin.object_format) - continue; - } - - // Skip tests that require LLVM backend when it is not available - if (!build_options.have_llvm and case.backend == .llvm) - continue; - - assert(case.backend != .stage1); - - for (build_options.test_filters) |test_filter| { - if (std.mem.indexOf(u8, case.name, test_filter)) |_| break; - } else if (build_options.test_filters.len > 0) continue; - - var prg_node = root_node.start(case.name, case.updates.items.len); - prg_node.activate(); - defer prg_node.end(); - - try runOneCase( - self.gpa, - &prg_node, - case.*, - zig_lib_directory, - zig_exe_path, - &aux_thread_pool, - global_cache_directory, - host, - ); - } - - for (self.translate.items) |*case| { - _ = case; - @panic("TODO is this even used?"); - } - } -} - -fn runOneCase( - allocator: Allocator, - root_node: *std.Progress.Node, - case: Case, - zig_lib_directory: Compilation.Directory, - zig_exe_path: []const u8, - thread_pool: *ThreadPool, - global_cache_directory: Compilation.Directory, - host: std.Target, -) !void { - const tmp_src_path = "tmp.zig"; - const enable_rosetta = build_options.enable_rosetta; - const enable_qemu = build_options.enable_qemu; - const enable_wine = build_options.enable_wine; - const enable_wasmtime = build_options.enable_wasmtime; - const enable_darling = build_options.enable_darling; - const glibc_runtimes_dir: ?[]const u8 = build_options.glibc_runtimes_dir; - - const target = try std.zig.system.resolveTargetQuery(case.target); - - var arena_allocator = std.heap.ArenaAllocator.init(allocator); - defer arena_allocator.deinit(); - const arena = arena_allocator.allocator(); - - var tmp = std.testing.tmpDir(.{}); - defer tmp.cleanup(); - - var cache_dir = try tmp.dir.makeOpenPath(".zig-cache", .{}); - defer cache_dir.close(); - - const tmp_dir_path = try std.fs.path.join( - arena, - &[_][]const u8{ ".", ".zig-cache", "tmp", &tmp.sub_path }, - ); - const local_cache_path = try std.fs.path.join( - arena, - &[_][]const u8{ tmp_dir_path, ".zig-cache" }, - ); - - const zig_cache_directory: Compilation.Directory = .{ - .handle = cache_dir, - .path = local_cache_path, - }; - - var main_pkg: Package = .{ - .root_src_directory = .{ .path = tmp_dir_path, .handle = tmp.dir }, - .root_src_path = tmp_src_path, - }; - defer { - var it = main_pkg.table.iterator(); - while (it.next()) |kv| { - allocator.free(kv.key_ptr.*); - kv.value_ptr.*.destroy(allocator); - } - main_pkg.table.deinit(allocator); - } - - for (case.deps.items) |dep| { - var pkg = try Package.create( - allocator, - tmp_dir_path, - dep.path, - ); - errdefer pkg.destroy(allocator); - try main_pkg.add(allocator, dep.name, pkg); +fn knownFileExtension(filename: []const u8) bool { + // List taken from `Compilation.classifyFileExt` in the compiler. + for ([_][]const u8{ + ".c", ".C", ".cc", ".cpp", + ".cxx", ".stub", ".m", ".mm", + ".ll", ".bc", ".s", ".S", + ".h", ".zig", ".so", ".dll", + ".dylib", ".tbd", ".a", ".lib", + ".o", ".obj", ".cu", ".def", + ".rc", ".res", ".manifest", + }) |ext| { + if (std.mem.endsWith(u8, filename, ext)) return true; } - - const bin_name = try std.zig.binNameAlloc(arena, .{ - .root_name = "test_case", - .target = target, - .output_mode = case.output_mode, - }); - - const emit_directory: Compilation.Directory = .{ - .path = tmp_dir_path, - .handle = tmp.dir, - }; - const emit_bin: Compilation.EmitLoc = .{ - .directory = emit_directory, - .basename = bin_name, - }; - const emit_h: ?Compilation.EmitLoc = if (case.emit_h) .{ - .directory = emit_directory, - .basename = "test_case.h", - } else null; - const use_llvm: bool = switch (case.backend) { - .llvm => true, - else => false, - }; - const comp = try Compilation.create(allocator, .{ - .local_cache_directory = zig_cache_directory, - .global_cache_directory = global_cache_directory, - .zig_lib_directory = zig_lib_directory, - .thread_pool = thread_pool, - .root_name = "test_case", - .target = target, - // TODO: support tests for object file building, and library builds - // and linking. This will require a rework to support multi-file - // tests. - .output_mode = case.output_mode, - .is_test = case.is_test, - .optimize_mode = case.optimize_mode, - .emit_bin = emit_bin, - .emit_h = emit_h, - .main_pkg = &main_pkg, - .keep_source_files_loaded = true, - .is_native_os = case.target.isNativeOs(), - .is_native_abi = case.target.isNativeAbi(), - .dynamic_linker = target.dynamic_linker.get(), - .link_libc = case.link_libc, - .use_llvm = use_llvm, - .self_exe_path = zig_exe_path, - // TODO instead of turning off color, pass in a std.Progress.Node - .color = .off, - .reference_trace = 0, - // TODO: force self-hosted linkers with stage2 backend to avoid LLD creeping in - // until the auto-select mechanism deems them worthy - .use_lld = switch (case.backend) { - .stage2 => false, - else => null, - }, - }); - defer comp.destroy(); - - update: for (case.updates.items, 0..) |update, update_index| { - var update_node = root_node.start(update.name, 3); - update_node.activate(); - defer update_node.end(); - - var sync_node = update_node.start("write", 0); - sync_node.activate(); - for (update.files.items) |file| { - try tmp.dir.writeFile(.{ .sub_path = file.path, .data = file.src }); - } - sync_node.end(); - - var module_node = update_node.start("parse/analysis/codegen", 0); - module_node.activate(); - try comp.makeBinFileWritable(); - try comp.update(&module_node); - module_node.end(); - - if (update.case != .Error) { - var all_errors = try comp.getAllErrorsAlloc(); - defer all_errors.deinit(allocator); - if (all_errors.errorMessageCount() > 0) { - all_errors.renderToStdErr(.{ - .ttyconf = std.io.tty.detectConfig(std.io.getStdErr()), - }); - // TODO print generated C code - return error.UnexpectedCompileErrors; - } - } - - switch (update.case) { - .Header => |expected_output| { - var file = try tmp.dir.openFile("test_case.h", .{ .mode = .read_only }); - defer file.close(); - const out = try file.reader().readAllAlloc(arena, 5 * 1024 * 1024); - - try std.testing.expectEqualStrings(expected_output, out); - }, - .CompareObjectFile => |expected_output| { - var file = try tmp.dir.openFile(bin_name, .{ .mode = .read_only }); - defer file.close(); - const out = try file.reader().readAllAlloc(arena, 5 * 1024 * 1024); - - try std.testing.expectEqualStrings(expected_output, out); - }, - .Compile => {}, - .Error => |expected_errors| { - var test_node = update_node.start("assert", 0); - test_node.activate(); - defer test_node.end(); - - var error_bundle = try comp.getAllErrorsAlloc(); - defer error_bundle.deinit(allocator); - - if (error_bundle.errorMessageCount() == 0) { - return error.ExpectedCompilationErrors; - } - - var actual_stderr = std.ArrayList(u8).init(arena); - try error_bundle.renderToWriter(.{ - .ttyconf = .no_color, - .include_reference_trace = false, - .include_source_line = false, - }, actual_stderr.writer()); - - // Render the expected lines into a string that we can compare verbatim. - var expected_generated = std.ArrayList(u8).init(arena); - - var actual_line_it = std.mem.splitScalar(u8, actual_stderr.items, '\n'); - for (expected_errors) |expect_line| { - const actual_line = actual_line_it.next() orelse { - try expected_generated.appendSlice(expect_line); - try expected_generated.append('\n'); - continue; - }; - if (std.mem.endsWith(u8, actual_line, expect_line)) { - try expected_generated.appendSlice(actual_line); - try expected_generated.append('\n'); - continue; - } - if (std.mem.startsWith(u8, expect_line, ":?:?: ")) { - if (std.mem.endsWith(u8, actual_line, expect_line[":?:?: ".len..])) { - try expected_generated.appendSlice(actual_line); - try expected_generated.append('\n'); - continue; - } - } - try expected_generated.appendSlice(expect_line); - try expected_generated.append('\n'); - } - - try std.testing.expectEqualStrings(expected_generated.items, actual_stderr.items); - }, - .Execution => |expected_stdout| { - if (!std.process.can_spawn) { - std.debug.print("Unable to spawn child processes on {s}, skipping test.\n", .{@tagName(builtin.os.tag)}); - continue :update; // Pass test. - } - - update_node.setEstimatedTotalItems(4); - - var argv = std.ArrayList([]const u8).init(allocator); - defer argv.deinit(); - - const exec_result = x: { - var exec_node = update_node.start("execute", 0); - exec_node.activate(); - defer exec_node.end(); - - // We go out of our way here to use the unique temporary directory name in - // the exe_path so that it makes its way into the cache hash, avoiding - // cache collisions from multiple threads doing `zig run` at the same time - // on the same test_case.c input filename. - const ss = std.fs.path.sep_str; - const exe_path = try std.fmt.allocPrint( - arena, - ".." ++ ss ++ "{s}" ++ ss ++ "{s}", - .{ &tmp.sub_path, bin_name }, - ); - if (case.target.ofmt != null and case.target.ofmt.? == .c) { - if (getExternalExecutor(host, &target, .{ .link_libc = true }) != .native) { - // We wouldn't be able to run the compiled C code. - continue :update; // Pass test. - } - try argv.appendSlice(&[_][]const u8{ - zig_exe_path, - "run", - "-cflags", - "-std=c99", - "-pedantic", - "-Werror", - "-Wno-incompatible-library-redeclaration", // https://github.com/ziglang/zig/issues/875 - "--", - "-lc", - exe_path, - }); - if (zig_lib_directory.path) |p| { - try argv.appendSlice(&.{ "-I", p }); - } - } else switch (getExternalExecutor(host, &target, .{ .link_libc = case.link_libc })) { - .native => { - if (case.backend == .stage2 and case.target.getCpuArch().isArmOrThumb()) { - // https://github.com/ziglang/zig/issues/13623 - continue :update; // Pass test. - } - try argv.append(exe_path); - }, - .bad_dl, .bad_os_or_cpu => continue :update, // Pass test. - - .rosetta => if (enable_rosetta) { - try argv.append(exe_path); - } else { - continue :update; // Rosetta not available, pass test. - }, - - .qemu => |qemu_bin_name| if (enable_qemu) { - const need_cross_glibc = target.isGnuLibC() and case.link_libc; - const glibc_dir_arg: ?[]const u8 = if (need_cross_glibc) - glibc_runtimes_dir orelse continue :update // glibc dir not available; pass test - else - null; - try argv.append(qemu_bin_name); - if (glibc_dir_arg) |dir| { - const linux_triple = try target.linuxTriple(arena); - const full_dir = try std.fs.path.join(arena, &[_][]const u8{ - dir, - linux_triple, - }); - - try argv.append("-L"); - try argv.append(full_dir); - } - try argv.append(exe_path); - } else { - continue :update; // QEMU not available; pass test. - }, - - .wine => |wine_bin_name| if (enable_wine) { - try argv.append(wine_bin_name); - try argv.append(exe_path); - } else { - continue :update; // Wine not available; pass test. - }, - - .wasmtime => |wasmtime_bin_name| if (enable_wasmtime) { - try argv.append(wasmtime_bin_name); - try argv.append("--dir=."); - try argv.append(exe_path); - } else { - continue :update; // wasmtime not available; pass test. - }, - - .darling => |darling_bin_name| if (enable_darling) { - try argv.append(darling_bin_name); - // Since we use relative to cwd here, we invoke darling with - // "shell" subcommand. - try argv.append("shell"); - try argv.append(exe_path); - } else { - continue :update; // Darling not available; pass test. - }, - } - - try comp.makeBinFileExecutable(); - - while (true) { - break :x std.process.Child.run(.{ - .allocator = allocator, - .argv = argv.items, - .cwd_dir = tmp.dir, - .cwd = tmp_dir_path, - }) catch |err| switch (err) { - error.FileBusy => { - // There is a fundamental design flaw in Unix systems with how - // ETXTBSY interacts with fork+exec. - // https://github.com/golang/go/issues/22315 - // https://bugs.openjdk.org/browse/JDK-8068370 - // Unfortunately, this could be a real error, but we can't - // tell the difference here. - continue; - }, - else => { - std.debug.print("\n{s}.{d} The following command failed with {s}:\n", .{ - case.name, update_index, @errorName(err), - }); - dumpArgs(argv.items); - return error.ChildProcessExecution; - }, - }; - } - }; - var test_node = update_node.start("test", 0); - test_node.activate(); - defer test_node.end(); - defer allocator.free(exec_result.stdout); - defer allocator.free(exec_result.stderr); - switch (exec_result.term) { - .Exited => |code| { - if (code != 0) { - std.debug.print("\n{s}\n{s}: execution exited with code {d}:\n", .{ - exec_result.stderr, case.name, code, - }); - dumpArgs(argv.items); - return error.ChildProcessExecution; - } - }, - else => { - std.debug.print("\n{s}\n{s}: execution crashed:\n", .{ - exec_result.stderr, case.name, - }); - dumpArgs(argv.items); - return error.ChildProcessExecution; - }, - } - try std.testing.expectEqualStrings(expected_stdout, exec_result.stdout); - // We allow stderr to have garbage in it because wasmtime prints a - // warning about --invoke even though we don't pass it. - //std.testing.expectEqualStrings("", exec_result.stderr); - }, - } - } -} - -fn dumpArgs(argv: []const []const u8) void { - for (argv) |arg| { - std.debug.print("{s} ", .{arg}); + // Final check for .so.X, .so.X.Y, .so.X.Y.Z. + // From `Compilation.hasSharedLibraryExt`. + var it = std.mem.splitScalar(u8, filename, '.'); + _ = it.first(); + var so_txt = it.next() orelse return false; + while (!std.mem.eql(u8, so_txt, "so")) { + so_txt = it.next() orelse return false; } - std.debug.print("\n", .{}); + const n1 = it.next() orelse return false; + const n2 = it.next(); + const n3 = it.next(); + _ = std.fmt.parseInt(u32, n1, 10) catch return false; + if (n2) |x| _ = std.fmt.parseInt(u32, x, 10) catch return false; + if (n3) |x| _ = std.fmt.parseInt(u32, x, 10) catch return false; + if (it.next() != null) return false; + return false; } diff --git a/test/tests.zig b/test/tests.zig index 2202936d5965..95a86c68f66d 100644 --- a/test/tests.zig +++ b/test/tests.zig @@ -1250,7 +1250,6 @@ pub fn addCases( b: *std.Build, parent_step: *Step, test_filters: []const []const u8, - check_case_exe: *std.Build.Step.Compile, target: std.Build.ResolvedTarget, translate_c_options: @import("src/Cases.zig").TranslateCOptions, build_options: @import("cases.zig").BuildOptions, @@ -1268,12 +1267,9 @@ pub fn addCases( cases.lowerToTranslateCSteps(b, parent_step, test_filters, target, translate_c_options); - const cases_dir_path = try b.build_root.join(b.allocator, &.{ "test", "cases" }); cases.lowerToBuildSteps( b, parent_step, test_filters, - cases_dir_path, - check_case_exe, ); } From 00da182e6875845d5727c399b3738a13b262832e Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Sun, 30 Jun 2024 00:11:51 -0400 Subject: [PATCH 041/152] cbe: fix for export changes --- lib/zig.h | 8 +- src/Compilation.zig | 3 + src/Zcu.zig | 14 ++ src/codegen/c.zig | 378 +++++++++++++++++--------------------------- src/link.zig | 1 - src/link/C.zig | 158 +++++++++++++----- 6 files changed, 286 insertions(+), 276 deletions(-) diff --git a/lib/zig.h b/lib/zig.h index 1171c7efacf5..f3b389718672 100644 --- a/lib/zig.h +++ b/lib/zig.h @@ -207,16 +207,16 @@ typedef char bool; __asm(zig_mangle_c(name) " = " zig_mangle_c(symbol)) #endif +#define zig_mangled_tentative zig_mangled +#define zig_mangled_final zig_mangled #if _MSC_VER -#define zig_mangled_tentative(mangled, unmangled) -#define zig_mangled_final(mangled, unmangled) ; \ +#define zig_mangled(mangled, unmangled) ; \ zig_export(#mangled, unmangled) #define zig_mangled_export(mangled, unmangled, symbol) \ zig_export(unmangled, #mangled) \ zig_export(symbol, unmangled) #else /* _MSC_VER */ -#define zig_mangled_tentative(mangled, unmangled) __asm(zig_mangle_c(unmangled)) -#define zig_mangled_final(mangled, unmangled) zig_mangled_tentative(mangled, unmangled) +#define zig_mangled(mangled, unmangled) __asm(zig_mangle_c(unmangled)) #define zig_mangled_export(mangled, unmangled, symbol) \ zig_mangled_final(mangled, unmangled) \ zig_export(symbol, unmangled) diff --git a/src/Compilation.zig b/src/Compilation.zig index 7447d589fd90..185a9a6260d0 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -3466,6 +3466,9 @@ fn processOneJob(comp: *Compilation, job: Job, prog_node: std.Progress.Node) !vo }; }, .emit_h_decl => |decl_index| { + if (true) @panic("regressed compiler feature: emit-h should hook into updateExports, " ++ + "not decl analysis, which is too early to know about @export calls"); + const module = comp.module.?; const decl = module.declPtr(decl_index); diff --git a/src/Zcu.zig b/src/Zcu.zig index e3b85e957dee..adfe60e67875 100644 --- a/src/Zcu.zig +++ b/src/Zcu.zig @@ -268,6 +268,20 @@ pub const Exported = union(enum) { decl_index: Decl.Index, /// Constant value being exported. value: InternPool.Index, + + pub fn getValue(exported: Exported, zcu: *Zcu) Value { + return switch (exported) { + .decl_index => |decl_index| zcu.declPtr(decl_index).val, + .value => |value| Value.fromInterned(value), + }; + } + + pub fn getAlign(exported: Exported, zcu: *Zcu) Alignment { + return switch (exported) { + .decl_index => |decl_index| zcu.declPtr(decl_index).alignment, + .value => .none, + }; + } }; pub const Export = struct { diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 13d9e6751967..92e9edb43389 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -731,8 +731,6 @@ pub const DeclGen = struct { if (decl.val.getExternFunc(zcu)) |extern_func| if (extern_func.decl != decl_index) return dg.renderDeclValue(writer, extern_func.decl, location); - if (decl.val.getVariable(zcu)) |variable| try dg.renderFwdDecl(decl_index, variable, .tentative); - // We shouldn't cast C function pointers as this is UB (when you call // them). The analysis until now should ensure that the C function // pointers are compatible. If they are not, then there is a bug @@ -748,7 +746,7 @@ pub const DeclGen = struct { try writer.writeByte(')'); } try writer.writeByte('&'); - try dg.renderDeclName(writer, decl_index, 0); + try dg.renderDeclName(writer, decl_index); if (need_cast) try writer.writeByte(')'); } @@ -1765,19 +1763,22 @@ pub const DeclGen = struct { fn renderFunctionSignature( dg: *DeclGen, w: anytype, - fn_decl_index: InternPool.DeclIndex, + fn_val: Value, + fn_align: InternPool.Alignment, kind: CType.Kind, name: union(enum) { - export_index: u32, - ident: []const u8, + decl: InternPool.DeclIndex, fmt_ctype_pool_string: std.fmt.Formatter(formatCTypePoolString), + @"export": struct { + main_name: InternPool.NullTerminatedString, + extern_name: InternPool.NullTerminatedString, + }, }, ) !void { const zcu = dg.zcu; const ip = &zcu.intern_pool; - const fn_decl = zcu.declPtr(fn_decl_index); - const fn_ty = fn_decl.typeOf(zcu); + const fn_ty = fn_val.typeOf(zcu); const fn_ctype = try dg.ctypeFromType(fn_ty, kind); const fn_info = zcu.typeToFunc(fn_ty).?; @@ -1788,7 +1789,7 @@ pub const DeclGen = struct { else => unreachable, } } - if (fn_decl.val.getFunction(zcu)) |func| if (func.analysis(ip).is_cold) + if (fn_val.getFunction(zcu)) |func| if (func.analysis(ip).is_cold) try w.writeAll("zig_cold "); if (fn_info.return_type == .noreturn_type) try w.writeAll("zig_noreturn "); @@ -1799,22 +1800,11 @@ pub const DeclGen = struct { trailing = .maybe_space; } - switch (kind) { - .forward => {}, - .complete => if (fn_decl.alignment.toByteUnits()) |a| { - try w.print("{}zig_align_fn({})", .{ trailing, a }); - trailing = .maybe_space; - }, - else => unreachable, - } - + try w.print("{}", .{trailing}); switch (name) { - .export_index => |export_index| { - try w.print("{}", .{trailing}); - try dg.renderDeclName(w, fn_decl_index, export_index); - }, - .ident => |ident| try w.print("{}{ }", .{ trailing, fmtIdent(ident) }), - .fmt_ctype_pool_string => |fmt| try w.print("{}{ }", .{ trailing, fmt }), + .decl => |decl_index| try dg.renderDeclName(w, decl_index), + .fmt_ctype_pool_string => |fmt| try w.print("{ }", .{fmt}), + .@"export" => |@"export"| try w.print("{ }", .{fmtIdent(@"export".extern_name.toSlice(ip))}), } try renderTypeSuffix( @@ -1833,44 +1823,30 @@ pub const DeclGen = struct { switch (kind) { .forward => { - if (fn_decl.alignment.toByteUnits()) |a| { - try w.print(" zig_align_fn({})", .{a}); - } + if (fn_align.toByteUnits()) |a| try w.print(" zig_align_fn({})", .{a}); switch (name) { - .export_index => |export_index| mangled: { - const maybe_exports = zcu.decl_exports.get(fn_decl_index); - const external_name = (if (maybe_exports) |exports| - exports.items[export_index].opts.name - else if (fn_decl.isExtern(zcu)) - fn_decl.name - else - break :mangled).toSlice(ip); - const is_mangled = isMangledIdent(external_name, true); - const is_export = export_index > 0; + .decl, .fmt_ctype_pool_string => {}, + .@"export" => |@"export"| { + const extern_name = @"export".extern_name.toSlice(ip); + const is_mangled = isMangledIdent(extern_name, true); + const is_export = @"export".extern_name != @"export".main_name; if (is_mangled and is_export) { try w.print(" zig_mangled_export({ }, {s}, {s})", .{ - fmtIdent(external_name), - fmtStringLiteral(external_name, null), - fmtStringLiteral( - maybe_exports.?.items[0].opts.name.toSlice(ip), - null, - ), + fmtIdent(extern_name), + fmtStringLiteral(extern_name, null), + fmtStringLiteral(@"export".main_name.toSlice(ip), null), }); } else if (is_mangled) { - try w.print(" zig_mangled_final({ }, {s})", .{ - fmtIdent(external_name), fmtStringLiteral(external_name, null), + try w.print(" zig_mangled({ }, {s})", .{ + fmtIdent(extern_name), fmtStringLiteral(extern_name, null), }); } else if (is_export) { try w.print(" zig_export({s}, {s})", .{ - fmtStringLiteral( - maybe_exports.?.items[0].opts.name.toSlice(ip), - null, - ), - fmtStringLiteral(external_name, null), + fmtStringLiteral(@"export".main_name.toSlice(ip), null), + fmtStringLiteral(extern_name, null), }); } }, - .ident, .fmt_ctype_pool_string => {}, } }, .complete => {}, @@ -2085,21 +2061,11 @@ pub const DeclGen = struct { try renderTypeSuffix(dg.pass, &dg.ctype_pool, dg.zcu, w, ctype, .suffix, .{}); } - fn declIsGlobal(dg: *DeclGen, val: Value) bool { - const zcu = dg.zcu; - return switch (zcu.intern_pool.indexToKey(val.toIntern())) { - .variable => |variable| zcu.decl_exports.contains(variable.decl), - .extern_func => true, - .func => |func| zcu.decl_exports.contains(func.owner_decl), - else => unreachable, - }; - } - fn writeName(dg: *DeclGen, w: anytype, c_value: CValue) !void { switch (c_value) { .new_local, .local => |i| try w.print("t{d}", .{i}), .constant => |val| try renderAnonDeclName(w, val), - .decl => |decl| try dg.renderDeclName(w, decl, 0), + .decl => |decl| try dg.renderDeclName(w, decl), .identifier => |ident| try w.print("{ }", .{fmtIdent(ident)}), else => unreachable, } @@ -2111,10 +2077,10 @@ pub const DeclGen = struct { .constant => |val| try renderAnonDeclName(w, val), .arg, .arg_array => unreachable, .field => |i| try w.print("f{d}", .{i}), - .decl => |decl| try dg.renderDeclName(w, decl, 0), + .decl => |decl| try dg.renderDeclName(w, decl), .decl_ref => |decl| { try w.writeByte('&'); - try dg.renderDeclName(w, decl, 0); + try dg.renderDeclName(w, decl); }, .undef => |ty| try dg.renderUndefValue(w, ty, .Other), .identifier => |ident| try w.print("{ }", .{fmtIdent(ident)}), @@ -2142,10 +2108,10 @@ pub const DeclGen = struct { .field => |i| try w.print("f{d}", .{i}), .decl => |decl| { try w.writeAll("(*"); - try dg.renderDeclName(w, decl, 0); + try dg.renderDeclName(w, decl); try w.writeByte(')'); }, - .decl_ref => |decl| try dg.renderDeclName(w, decl, 0), + .decl_ref => |decl| try dg.renderDeclName(w, decl), .undef => unreachable, .identifier => |ident| try w.print("(*{ })", .{fmtIdent(ident)}), .payload_identifier => |ident| try w.print("(*{ }.{ })", .{ @@ -2195,19 +2161,12 @@ pub const DeclGen = struct { dg: *DeclGen, decl_index: InternPool.DeclIndex, variable: InternPool.Key.Variable, - fwd_kind: enum { tentative, final }, ) !void { const zcu = dg.zcu; const decl = zcu.declPtr(decl_index); const fwd = dg.fwdDeclWriter(); - const is_global = variable.is_extern or dg.declIsGlobal(decl.val); - try fwd.writeAll(if (is_global) "zig_extern " else "static "); - const maybe_exports = zcu.decl_exports.get(decl_index); - const export_weak_linkage = if (maybe_exports) |exports| - exports.items[0].opts.linkage == .weak - else - false; - if (variable.is_weak_linkage or export_weak_linkage) try fwd.writeAll("zig_weak_linkage "); + try fwd.writeAll(if (variable.is_extern) "zig_extern " else "static "); + if (variable.is_weak_linkage) try fwd.writeAll("zig_weak_linkage "); if (variable.is_threadlocal and !dg.mod.single_threaded) try fwd.writeAll("zig_threadlocal "); try dg.renderTypeAndName( fwd, @@ -2217,38 +2176,17 @@ pub const DeclGen = struct { decl.alignment, .complete, ); - mangled: { - const external_name = (if (maybe_exports) |exports| - exports.items[0].opts.name - else if (variable.is_extern) - decl.name - else - break :mangled).toSlice(&zcu.intern_pool); - if (isMangledIdent(external_name, true)) { - try fwd.print(" zig_mangled_{s}({ }, {s})", .{ - @tagName(fwd_kind), - fmtIdent(external_name), - fmtStringLiteral(external_name, null), - }); - } - } try fwd.writeAll(";\n"); } - fn renderDeclName(dg: *DeclGen, writer: anytype, decl_index: InternPool.DeclIndex, export_index: u32) !void { + fn renderDeclName(dg: *DeclGen, writer: anytype, decl_index: InternPool.DeclIndex) !void { const zcu = dg.zcu; const ip = &zcu.intern_pool; const decl = zcu.declPtr(decl_index); - if (zcu.decl_exports.get(decl_index)) |exports| { - try writer.print("{ }", .{ - fmtIdent(exports.items[export_index].opts.name.toSlice(ip)), - }); - } else if (decl.getExternDecl(zcu).unwrap()) |extern_decl_index| { - try writer.print("{ }", .{ - fmtIdent(zcu.declPtr(extern_decl_index).name.toSlice(ip)), - }); - } else { + if (decl.getExternDecl(zcu).unwrap()) |extern_decl_index| try writer.print("{ }", .{ + fmtIdent(zcu.declPtr(extern_decl_index).name.toSlice(ip)), + }) else { // MSVC has a limit of 4095 character token length limit, and fmtIdent can (worst case), // expand to 3x the length of its input, but let's cut it off at a much shorter limit. var name: [100]u8 = undefined; @@ -2761,69 +2699,6 @@ pub fn genErrDecls(o: *Object) !void { try writer.writeAll("};\n"); } -fn genExports(o: *Object) !void { - const tracy = trace(@src()); - defer tracy.end(); - - const zcu = o.dg.zcu; - const ip = &zcu.intern_pool; - const decl_index = switch (o.dg.pass) { - .decl => |decl| decl, - .anon, .flush => return, - }; - const decl = zcu.declPtr(decl_index); - const fwd = o.dg.fwdDeclWriter(); - - const exports = zcu.decl_exports.get(decl_index) orelse return; - if (exports.items.len < 2) return; - - const is_variable_const = switch (ip.indexToKey(decl.val.toIntern())) { - .func => return for (exports.items[1..], 1..) |@"export", i| { - try fwd.writeAll("zig_extern "); - if (@"export".opts.linkage == .weak) try fwd.writeAll("zig_weak_linkage_fn "); - try o.dg.renderFunctionSignature( - fwd, - decl_index, - .forward, - .{ .export_index = @intCast(i) }, - ); - try fwd.writeAll(";\n"); - }, - .extern_func => { - // TODO: when sema allows re-exporting extern decls - unreachable; - }, - .variable => |variable| variable.is_const, - else => true, - }; - for (exports.items[1..]) |@"export"| { - try fwd.writeAll("zig_extern "); - if (@"export".opts.linkage == .weak) try fwd.writeAll("zig_weak_linkage "); - const export_name = @"export".opts.name.toSlice(ip); - try o.dg.renderTypeAndName( - fwd, - decl.typeOf(zcu), - .{ .identifier = export_name }, - CQualifiers.init(.{ .@"const" = is_variable_const }), - decl.alignment, - .complete, - ); - if (isMangledIdent(export_name, true)) { - try fwd.print(" zig_mangled_export({ }, {s}, {s})", .{ - fmtIdent(export_name), - fmtStringLiteral(export_name, null), - fmtStringLiteral(exports.items[0].opts.name.toSlice(ip), null), - }); - } else { - try fwd.print(" zig_export({s}, {s})", .{ - fmtStringLiteral(exports.items[0].opts.name.toSlice(ip), null), - fmtStringLiteral(export_name, null), - }); - } - try fwd.writeAll(";\n"); - } -} - pub fn genLazyFn(o: *Object, lazy_ctype_pool: *const CType.Pool, lazy_fn: LazyFnMap.Entry) !void { const zcu = o.dg.zcu; const ip = &zcu.intern_pool; @@ -2885,19 +2760,19 @@ pub fn genLazyFn(o: *Object, lazy_ctype_pool: *const CType.Pool, lazy_fn: LazyFn const fn_info = fn_ctype.info(ctype_pool).function; const fn_name = fmtCTypePoolString(val.fn_name, lazy_ctype_pool); - const fwd_decl_writer = o.dg.fwdDeclWriter(); - try fwd_decl_writer.print("static zig_{s} ", .{@tagName(key)}); - try o.dg.renderFunctionSignature(fwd_decl_writer, fn_decl_index, .forward, .{ + const fwd = o.dg.fwdDeclWriter(); + try fwd.print("static zig_{s} ", .{@tagName(key)}); + try o.dg.renderFunctionSignature(fwd, fn_decl.val, fn_decl.alignment, .forward, .{ .fmt_ctype_pool_string = fn_name, }); - try fwd_decl_writer.writeAll(";\n"); + try fwd.writeAll(";\n"); - try w.print("static zig_{s} ", .{@tagName(key)}); - try o.dg.renderFunctionSignature(w, fn_decl_index, .complete, .{ + try w.print("zig_{s} ", .{@tagName(key)}); + try o.dg.renderFunctionSignature(w, fn_decl.val, .none, .complete, .{ .fmt_ctype_pool_string = fn_name, }); try w.writeAll(" {\n return "); - try o.dg.renderDeclName(w, fn_decl_index, 0); + try o.dg.renderDeclName(w, fn_decl_index); try w.writeByte('('); for (0..fn_info.param_ctypes.len) |arg| { if (arg > 0) try w.writeAll(", "); @@ -2921,21 +2796,26 @@ pub fn genFunc(f: *Function) !void { o.code_header = std.ArrayList(u8).init(gpa); defer o.code_header.deinit(); - const is_global = o.dg.declIsGlobal(decl.val); - const fwd_decl_writer = o.dg.fwdDeclWriter(); - try fwd_decl_writer.writeAll(if (is_global) "zig_extern " else "static "); - - if (zcu.decl_exports.get(decl_index)) |exports| - if (exports.items[0].opts.linkage == .weak) try fwd_decl_writer.writeAll("zig_weak_linkage_fn "); - try o.dg.renderFunctionSignature(fwd_decl_writer, decl_index, .forward, .{ .export_index = 0 }); - try fwd_decl_writer.writeAll(";\n"); - try genExports(o); + const fwd = o.dg.fwdDeclWriter(); + try fwd.writeAll("static "); + try o.dg.renderFunctionSignature( + fwd, + decl.val, + decl.alignment, + .forward, + .{ .decl = decl_index }, + ); + try fwd.writeAll(";\n"); - try o.indent_writer.insertNewline(); - if (!is_global) try o.writer().writeAll("static "); if (decl.@"linksection".toSlice(&zcu.intern_pool)) |s| try o.writer().print("zig_linksection_fn({s}) ", .{fmtStringLiteral(s, null)}); - try o.dg.renderFunctionSignature(o.writer(), decl_index, .complete, .{ .export_index = 0 }); + try o.dg.renderFunctionSignature( + o.writer(), + decl.val, + .none, + .complete, + .{ .decl = decl_index }, + ); try o.writer().writeByte(' '); // In case we need to use the header, populate it with a copy of the function @@ -2949,7 +2829,6 @@ pub fn genFunc(f: *Function) !void { const main_body = f.air.getMainBody(); try genBodyResolveState(f, undefined, &.{}, main_body, false); - try o.indent_writer.insertNewline(); // Take advantage of the free_locals map to bucket locals per type. All @@ -3007,20 +2886,25 @@ pub fn genDecl(o: *Object) !void { if (!decl_ty.isFnOrHasRuntimeBitsIgnoreComptime(zcu)) return; if (decl.val.getExternFunc(zcu)) |_| { - const fwd_decl_writer = o.dg.fwdDeclWriter(); - try fwd_decl_writer.writeAll("zig_extern "); - try o.dg.renderFunctionSignature(fwd_decl_writer, decl_index, .forward, .{ .export_index = 0 }); - try fwd_decl_writer.writeAll(";\n"); - try genExports(o); + const fwd = o.dg.fwdDeclWriter(); + try fwd.writeAll("zig_extern "); + try o.dg.renderFunctionSignature( + fwd, + decl.val, + decl.alignment, + .forward, + .{ .@"export" = .{ + .main_name = decl.name, + .extern_name = decl.name, + } }, + ); + try fwd.writeAll(";\n"); } else if (decl.val.getVariable(zcu)) |variable| { - try o.dg.renderFwdDecl(decl_index, variable, .final); - try genExports(o); + try o.dg.renderFwdDecl(decl_index, variable); if (variable.is_extern) return; - const is_global = variable.is_extern or o.dg.declIsGlobal(decl.val); const w = o.writer(); - if (!is_global) try w.writeAll("static "); if (variable.is_weak_linkage) try w.writeAll("zig_weak_linkage "); if (variable.is_threadlocal and !o.dg.mod.single_threaded) try w.writeAll("zig_threadlocal "); if (decl.@"linksection".toSlice(&zcu.intern_pool)) |s| @@ -3032,46 +2916,27 @@ pub fn genDecl(o: *Object) !void { try w.writeByte(';'); try o.indent_writer.insertNewline(); } else { - const is_global = o.dg.zcu.decl_exports.contains(decl_index); const decl_c_value = .{ .decl = decl_index }; - try genDeclValue(o, decl.val, is_global, decl_c_value, decl.alignment, decl.@"linksection"); + try genDeclValue(o, decl.val, decl_c_value, decl.alignment, decl.@"linksection"); } } pub fn genDeclValue( o: *Object, val: Value, - is_global: bool, decl_c_value: CValue, alignment: Alignment, @"linksection": InternPool.OptionalNullTerminatedString, ) !void { const zcu = o.dg.zcu; - const fwd_decl_writer = o.dg.fwdDeclWriter(); - const ty = val.typeOf(zcu); - try fwd_decl_writer.writeAll(if (is_global) "zig_extern " else "static "); - try o.dg.renderTypeAndName(fwd_decl_writer, ty, decl_c_value, Const, alignment, .complete); - switch (o.dg.pass) { - .decl => |decl_index| { - if (zcu.decl_exports.get(decl_index)) |exports| { - const export_name = exports.items[0].opts.name.toSlice(&zcu.intern_pool); - if (isMangledIdent(export_name, true)) { - try fwd_decl_writer.print(" zig_mangled_final({ }, {s})", .{ - fmtIdent(export_name), fmtStringLiteral(export_name, null), - }); - } - } - }, - .anon => {}, - .flush => unreachable, - } - try fwd_decl_writer.writeAll(";\n"); - try genExports(o); + const fwd = o.dg.fwdDeclWriter(); + try fwd.writeAll("static "); + try o.dg.renderTypeAndName(fwd, ty, decl_c_value, Const, alignment, .complete); + try fwd.writeAll(";\n"); const w = o.writer(); - if (!is_global) try w.writeAll("static "); if (@"linksection".toSlice(&zcu.intern_pool)) |s| try w.print("zig_linksection({s}) ", .{fmtStringLiteral(s, null)}); try o.dg.renderTypeAndName(w, ty, decl_c_value, Const, alignment, .complete); @@ -3080,24 +2945,73 @@ pub fn genDeclValue( try w.writeAll(";\n"); } -pub fn genHeader(dg: *DeclGen) error{ AnalysisFail, OutOfMemory }!void { - if (true) @panic("TODO jacobly"); - - const tracy = trace(@src()); - defer tracy.end(); - +pub fn genExports(dg: *DeclGen, exported: Zcu.Exported, export_indices: []const u32) !void { const zcu = dg.zcu; - const decl_index = dg.pass.decl; - const decl = zcu.declPtr(decl_index); - const writer = dg.fwdDeclWriter(); + const ip = &zcu.intern_pool; + const fwd = dg.fwdDeclWriter(); - switch (decl.typeOf(zcu).zigTypeTag(zcu)) { - .Fn => if (dg.declIsGlobal(decl.val)) { - try writer.writeAll("zig_extern "); - try dg.renderFunctionSignature(writer, dg.pass.decl, .complete, .{ .export_index = 0 }); - try dg.fwd_decl.appendSlice(";\n"); + const main_name = zcu.all_exports.items[export_indices[0]].opts.name; + try fwd.writeAll("#define "); + switch (exported) { + .decl_index => |decl_index| try dg.renderDeclName(fwd, decl_index), + .value => |value| try DeclGen.renderAnonDeclName(fwd, Value.fromInterned(value)), + } + try fwd.writeByte(' '); + try fwd.print("{ }", .{fmtIdent(main_name.toSlice(ip))}); + try fwd.writeByte('\n'); + + const is_const = switch (ip.indexToKey(exported.getValue(zcu).toIntern())) { + .func, .extern_func => return for (export_indices) |export_index| { + const @"export" = &zcu.all_exports.items[export_index]; + try fwd.writeAll("zig_extern "); + if (@"export".opts.linkage == .weak) try fwd.writeAll("zig_weak_linkage_fn "); + try dg.renderFunctionSignature( + fwd, + exported.getValue(zcu), + exported.getAlign(zcu), + .forward, + .{ .@"export" = .{ + .main_name = main_name, + .extern_name = @"export".opts.name, + } }, + ); + try fwd.writeAll(";\n"); }, - else => {}, + .variable => |variable| variable.is_const, + else => true, + }; + for (export_indices) |export_index| { + const @"export" = &zcu.all_exports.items[export_index]; + try fwd.writeAll("zig_extern "); + if (@"export".opts.linkage == .weak) try fwd.writeAll("zig_weak_linkage "); + const extern_name = @"export".opts.name.toSlice(ip); + const is_mangled = isMangledIdent(extern_name, true); + const is_export = @"export".opts.name != main_name; + try dg.renderTypeAndName( + fwd, + exported.getValue(zcu).typeOf(zcu), + .{ .identifier = extern_name }, + CQualifiers.init(.{ .@"const" = is_const }), + exported.getAlign(zcu), + .complete, + ); + if (is_mangled and is_export) { + try fwd.print(" zig_mangled_export({ }, {s}, {s})", .{ + fmtIdent(extern_name), + fmtStringLiteral(extern_name, null), + fmtStringLiteral(main_name.toSlice(ip), null), + }); + } else if (is_mangled) { + try fwd.print(" zig_mangled({ }, {s})", .{ + fmtIdent(extern_name), fmtStringLiteral(extern_name, null), + }); + } else if (is_export) { + try fwd.print(" zig_export({s}, {s})", .{ + fmtStringLiteral(main_name.toSlice(ip), null), + fmtStringLiteral(extern_name, null), + }); + } + try fwd.writeAll(";\n"); } } @@ -4554,7 +4468,7 @@ fn airCall( }; }; switch (modifier) { - .auto, .always_tail => try f.object.dg.renderDeclName(writer, fn_decl, 0), + .auto, .always_tail => try f.object.dg.renderDeclName(writer, fn_decl), inline .never_tail, .never_inline => |m| try writer.writeAll(try f.getLazyFnName( @unionInit(LazyFnKey, @tagName(m), fn_decl), @unionInit(LazyFnValue.Data, @tagName(m), {}), diff --git a/src/link.zig b/src/link.zig index 009b38a681af..298d81d80c43 100644 --- a/src/link.zig +++ b/src/link.zig @@ -679,7 +679,6 @@ pub const File = struct { if (build_options.only_c) @compileError("unreachable"); switch (base.tag) { .plan9, - .c, .spirv, .nvptx, => {}, diff --git a/src/link/C.zig b/src/link/C.zig index 8372029d2d5e..be8397e196c1 100644 --- a/src/link/C.zig +++ b/src/link/C.zig @@ -39,6 +39,9 @@ anon_decls: std.AutoArrayHashMapUnmanaged(InternPool.Index, DeclBlock) = .{}, /// the keys of `anon_decls`. aligned_anon_decls: std.AutoArrayHashMapUnmanaged(InternPool.Index, Alignment) = .{}, +exported_decls: std.AutoArrayHashMapUnmanaged(InternPool.DeclIndex, ExportedBlock) = .{}, +exported_values: std.AutoArrayHashMapUnmanaged(InternPool.Index, ExportedBlock) = .{}, + /// Optimization, `updateDecl` reuses this buffer rather than creating a new /// one with every call. fwd_decl_buf: std.ArrayListUnmanaged(u8) = .{}, @@ -80,6 +83,11 @@ pub const DeclBlock = struct { } }; +/// Per-exported-symbol data. +pub const ExportedBlock = struct { + fwd_decl: String = String.empty, +}; + pub fn getString(this: C, s: String) []const u8 { return this.string_bytes.items[s.start..][0..s.len]; } @@ -183,8 +191,6 @@ pub fn updateFunc( air: Air, liveness: Liveness, ) !void { - if (true) @panic("TODO jacobly"); - const gpa = self.base.comp.gpa; const func = zcu.funcInfo(func_index); @@ -240,9 +246,13 @@ pub fn updateFunc( function.deinit(); } + try zcu.failed_analysis.ensureUnusedCapacity(gpa, 1); codegen.genFunc(&function) catch |err| switch (err) { error.AnalysisFail => { - try zcu.failed_decls.put(gpa, decl_index, function.object.dg.error_msg.?); + zcu.failed_analysis.putAssumeCapacityNoClobber( + InternPool.AnalUnit.wrap(.{ .decl = decl_index }), + function.object.dg.error_msg.?, + ); return; }, else => |e| return e, @@ -252,8 +262,6 @@ pub fn updateFunc( } fn updateAnonDecl(self: *C, zcu: *Zcu, i: usize) !void { - if (true) @panic("TODO jacobly"); - const gpa = self.base.comp.gpa; const anon_decl = self.anon_decls.keys()[i]; @@ -292,7 +300,7 @@ fn updateAnonDecl(self: *C, zcu: *Zcu, i: usize) !void { const c_value: codegen.CValue = .{ .constant = Value.fromInterned(anon_decl) }; const alignment: Alignment = self.aligned_anon_decls.get(anon_decl) orelse .none; - codegen.genDeclValue(&object, c_value.constant, false, c_value, alignment, .none) catch |err| switch (err) { + codegen.genDeclValue(&object, c_value.constant, c_value, alignment, .none) catch |err| switch (err) { error.AnalysisFail => { @panic("TODO: C backend AnalysisFail on anonymous decl"); //try zcu.failed_decls.put(gpa, decl_index, object.dg.error_msg.?); @@ -310,8 +318,6 @@ fn updateAnonDecl(self: *C, zcu: *Zcu, i: usize) !void { } pub fn updateDecl(self: *C, zcu: *Zcu, decl_index: InternPool.DeclIndex) !void { - if (true) @panic("TODO jacobly"); - const tracy = trace(@src()); defer tracy.end(); @@ -357,9 +363,13 @@ pub fn updateDecl(self: *C, zcu: *Zcu, decl_index: InternPool.DeclIndex) !void { code.* = object.code.moveToUnmanaged(); } + try zcu.failed_analysis.ensureUnusedCapacity(gpa, 1); codegen.genDecl(&object) catch |err| switch (err) { error.AnalysisFail => { - try zcu.failed_decls.put(gpa, decl_index, object.dg.error_msg.?); + zcu.failed_analysis.putAssumeCapacityNoClobber( + InternPool.AnalUnit.wrap(.{ .decl = decl_index }), + object.dg.error_msg.?, + ); return; }, else => |e| return e, @@ -396,8 +406,6 @@ fn abiDefines(self: *C, target: std.Target) !std.ArrayList(u8) { } pub fn flushModule(self: *C, arena: Allocator, prog_node: std.Progress.Node) !void { - if (true) @panic("TODO jacobly"); - _ = arena; // Has the same lifetime as the call to Compilation.update. const tracy = trace(@src()); @@ -460,26 +468,39 @@ pub fn flushModule(self: *C, arena: Allocator, prog_node: std.Progress.Node) !vo var export_names: std.AutoHashMapUnmanaged(InternPool.NullTerminatedString, void) = .{}; defer export_names.deinit(gpa); try export_names.ensureTotalCapacity(gpa, @intCast(zcu.single_exports.count())); - for (zcu.single_exports.values()) |export_idx| { - export_names.putAssumeCapacity(gpa, zcu.all_exports.items[export_idx].opts.name, {}); + for (zcu.single_exports.values()) |export_index| { + export_names.putAssumeCapacity(zcu.all_exports.items[export_index].opts.name, {}); } for (zcu.multi_exports.values()) |info| { - try export_names.ensureUnusedCapacity(info.len); - for (zcu.all_exports.items[info.index..][0..info.len]) |export_idx| { - export_names.putAssumeCapacity(gpa, zcu.all_exports.items[export_idx].opts.name, {}); + try export_names.ensureUnusedCapacity(gpa, info.len); + for (zcu.all_exports.items[info.index..][0..info.len]) |@"export"| { + export_names.putAssumeCapacity(@"export".opts.name, {}); } } - for (self.anon_decls.values()) |*decl_block| { - try self.flushDeclBlock(zcu, zcu.root_mod, &f, decl_block, export_names, .none); - } + for (self.anon_decls.keys(), self.anon_decls.values()) |value, *decl_block| try self.flushDeclBlock( + zcu, + zcu.root_mod, + &f, + decl_block, + self.exported_values.getPtr(value), + export_names, + .none, + ); for (self.decl_table.keys(), self.decl_table.values()) |decl_index, *decl_block| { const decl = zcu.declPtr(decl_index); - assert(decl.has_tv); - const extern_symbol_name = if (decl.isExtern(zcu)) decl.name.toOptional() else .none; + const extern_name = if (decl.isExtern(zcu)) decl.name.toOptional() else .none; const mod = zcu.namespacePtr(decl.src_namespace).file_scope.mod; - try self.flushDeclBlock(zcu, mod, &f, decl_block, export_names, extern_symbol_name); + try self.flushDeclBlock( + zcu, + mod, + &f, + decl_block, + self.exported_decls.getPtr(decl_index), + export_names, + extern_name, + ); } } @@ -512,12 +533,16 @@ pub fn flushModule(self: *C, arena: Allocator, prog_node: std.Progress.Node) !vo f.file_size += lazy_fwd_decl_len; // Now the code. - const anon_decl_values = self.anon_decls.values(); - const decl_values = self.decl_table.values(); - try f.all_buffers.ensureUnusedCapacity(gpa, 1 + anon_decl_values.len + decl_values.len); + try f.all_buffers.ensureUnusedCapacity(gpa, 1 + (self.anon_decls.count() + self.decl_table.count()) * 2); f.appendBufAssumeCapacity(self.lazy_code_buf.items); - for (anon_decl_values) |db| f.appendBufAssumeCapacity(self.getString(db.code)); - for (decl_values) |db| f.appendBufAssumeCapacity(self.getString(db.code)); + for (self.anon_decls.keys(), self.anon_decls.values()) |anon_decl, decl_block| f.appendCodeAssumeCapacity( + self.exported_values.contains(anon_decl), + self.getString(decl_block.code), + ); + for (self.decl_table.keys(), self.decl_table.values()) |decl_index, decl_block| f.appendCodeAssumeCapacity( + self.exported_decls.contains(decl_index), + self.getString(decl_block.code), + ); const file = self.base.file.?; try file.setEndPos(f.file_size); @@ -547,6 +572,12 @@ const Flush = struct { f.file_size += buf.len; } + fn appendCodeAssumeCapacity(f: *Flush, is_extern: bool, code: []const u8) void { + if (code.len == 0) return; + f.appendBufAssumeCapacity(if (is_extern) "\nzig_extern " else "\nstatic "); + f.appendBufAssumeCapacity(code); + } + fn deinit(f: *Flush, gpa: Allocator) void { f.all_buffers.deinit(gpa); f.asm_buf.deinit(gpa); @@ -734,19 +765,20 @@ fn flushDeclBlock( zcu: *Zcu, mod: *Module, f: *Flush, - decl_block: *DeclBlock, + decl_block: *const DeclBlock, + exported_block: ?*const ExportedBlock, export_names: std.AutoHashMapUnmanaged(InternPool.NullTerminatedString, void), - extern_symbol_name: InternPool.OptionalNullTerminatedString, + extern_name: InternPool.OptionalNullTerminatedString, ) FlushDeclError!void { const gpa = self.base.comp.gpa; try self.flushLazyFns(zcu, mod, f, &decl_block.ctype_pool, decl_block.lazy_fns); try f.all_buffers.ensureUnusedCapacity(gpa, 1); - fwd_decl: { - if (extern_symbol_name.unwrap()) |name| { - if (export_names.contains(name)) break :fwd_decl; - } - f.appendBufAssumeCapacity(self.getString(decl_block.fwd_decl)); - } + // avoid emitting extern decls that are already exported + if (extern_name.unwrap()) |name| if (export_names.contains(name)) return; + f.appendBufAssumeCapacity(self.getString(if (exported_block) |exported| + exported.fwd_decl + else + decl_block.fwd_decl)); } pub fn flushEmitH(zcu: *Zcu) !void { @@ -798,8 +830,56 @@ pub fn updateExports( exported: Zcu.Exported, export_indices: []const u32, ) !void { - _ = self; - _ = zcu; - _ = exported; - _ = export_indices; + const gpa = self.base.comp.gpa; + const mod, const pass: codegen.DeclGen.Pass, const decl_block, const exported_block = switch (exported) { + .decl_index => |decl_index| .{ + zcu.namespacePtr(zcu.declPtr(decl_index).src_namespace).file_scope.mod, + .{ .decl = decl_index }, + self.decl_table.getPtr(decl_index).?, + (try self.exported_decls.getOrPut(gpa, decl_index)).value_ptr, + }, + .value => |value| .{ + zcu.root_mod, + .{ .anon = value }, + self.anon_decls.getPtr(value).?, + (try self.exported_values.getOrPut(gpa, value)).value_ptr, + }, + }; + const ctype_pool = &decl_block.ctype_pool; + const fwd_decl = &self.fwd_decl_buf; + fwd_decl.clearRetainingCapacity(); + var dg: codegen.DeclGen = .{ + .gpa = gpa, + .zcu = zcu, + .mod = mod, + .error_msg = null, + .pass = pass, + .is_naked_fn = false, + .fwd_decl = fwd_decl.toManaged(gpa), + .ctype_pool = decl_block.ctype_pool, + .scratch = .{}, + .anon_decl_deps = .{}, + .aligned_anon_decls = .{}, + }; + defer { + assert(dg.anon_decl_deps.count() == 0); + assert(dg.aligned_anon_decls.count() == 0); + fwd_decl.* = dg.fwd_decl.moveToUnmanaged(); + ctype_pool.* = dg.ctype_pool.move(); + ctype_pool.freeUnusedCapacity(gpa); + dg.scratch.deinit(gpa); + } + try codegen.genExports(&dg, exported, export_indices); + exported_block.* = .{ .fwd_decl = try self.addString(dg.fwd_decl.items) }; +} + +pub fn deleteExport( + self: *C, + exported: Zcu.Exported, + _: InternPool.NullTerminatedString, +) void { + switch (exported) { + .decl_index => |decl_index| _ = self.exported_decls.swapRemove(decl_index), + .value => |value| _ = self.exported_values.swapRemove(value), + } } From eae9aa800e76efb835f81ee5b788890425ee95dd Mon Sep 17 00:00:00 2001 From: mlugg Date: Thu, 4 Jul 2024 07:00:56 +0100 Subject: [PATCH 042/152] std: avoid references that trigger compile errors Note that the `_ = Address` statements in tests previously were a nop, and now actually check that the type is valid. However, on WASI, the type is *not* valid. --- lib/std/dynamic_library.zig | 47 ++++++++++++++++++++++++------------- lib/std/http.zig | 12 +++++----- lib/std/net.zig | 10 ++++---- 3 files changed, 43 insertions(+), 26 deletions(-) diff --git a/lib/std/dynamic_library.zig b/lib/std/dynamic_library.zig index a1db48b470cc..f5ce0c8da8e2 100644 --- a/lib/std/dynamic_library.zig +++ b/lib/std/dynamic_library.zig @@ -17,12 +17,15 @@ pub const DynLib = struct { DlDynLib, .windows => WindowsDynLib, .macos, .tvos, .watchos, .ios, .visionos, .freebsd, .netbsd, .openbsd, .dragonfly, .solaris, .illumos => DlDynLib, - else => @compileError("unsupported platform"), + else => struct { + const open = @compileError("unsupported platform"); + const openZ = @compileError("unsupported platform"); + }, }; inner: InnerType, - pub const Error = ElfDynLib.Error || DlDynLib.Error || WindowsDynLib.Error; + pub const Error = ElfDynLibError || DlDynLibError || WindowsDynLibError; /// Trusts the file. Malicious file will be able to execute arbitrary code. pub fn open(path: []const u8) Error!DynLib { @@ -122,6 +125,18 @@ pub fn linkmap_iterator(phdrs: []elf.Phdr) error{InvalidExe}!LinkMap.Iterator { return .{ .current = link_map_ptr }; } +/// Separated to avoid referencing `ElfDynLib`, because its field types may not +/// be valid on other targets. +const ElfDynLibError = error{ + FileTooBig, + NotElfFile, + NotDynamicLibrary, + MissingDynamicLinkingInformation, + ElfStringSectionNotFound, + ElfSymSectionNotFound, + ElfHashTableNotFound, +} || posix.OpenError || posix.MMapError; + pub const ElfDynLib = struct { strings: [*:0]u8, syms: [*]elf.Sym, @@ -130,15 +145,7 @@ pub const ElfDynLib = struct { verdef: ?*elf.Verdef, memory: []align(mem.page_size) u8, - pub const Error = error{ - FileTooBig, - NotElfFile, - NotDynamicLibrary, - MissingDynamicLinkingInformation, - ElfStringSectionNotFound, - ElfSymSectionNotFound, - ElfHashTableNotFound, - } || posix.OpenError || posix.MMapError; + pub const Error = ElfDynLibError; /// Trusts the file. Malicious file will be able to execute arbitrary code. pub fn open(path: []const u8) Error!ElfDynLib { @@ -350,11 +357,15 @@ test "ElfDynLib" { try testing.expectError(error.FileNotFound, ElfDynLib.open("invalid_so.so")); } +/// Separated to avoid referencing `WindowsDynLib`, because its field types may not +/// be valid on other targets. +const WindowsDynLibError = error{ + FileNotFound, + InvalidPath, +} || windows.LoadLibraryError; + pub const WindowsDynLib = struct { - pub const Error = error{ - FileNotFound, - InvalidPath, - } || windows.LoadLibraryError; + pub const Error = WindowsDynLibError; dll: windows.HMODULE, @@ -413,8 +424,12 @@ pub const WindowsDynLib = struct { } }; +/// Separated to avoid referencing `DlDynLib`, because its field types may not +/// be valid on other targets. +const DlDynLibError = error{ FileNotFound, NameTooLong }; + pub const DlDynLib = struct { - pub const Error = error{ FileNotFound, NameTooLong }; + pub const Error = DlDynLibError; handle: *anyopaque, diff --git a/lib/std/http.zig b/lib/std/http.zig index af966d89e75d..621c7a5f0d74 100644 --- a/lib/std/http.zig +++ b/lib/std/http.zig @@ -311,13 +311,13 @@ const builtin = @import("builtin"); const std = @import("std.zig"); test { - _ = Client; - _ = Method; - _ = Server; - _ = Status; - _ = HeadParser; - _ = ChunkParser; if (builtin.os.tag != .wasi) { + _ = Client; + _ = Method; + _ = Server; + _ = Status; + _ = HeadParser; + _ = ChunkParser; _ = @import("http/test.zig"); } } diff --git a/lib/std/net.zig b/lib/std/net.zig index 79ca71d0e28a..b46cc2aece57 100644 --- a/lib/std/net.zig +++ b/lib/std/net.zig @@ -1930,8 +1930,10 @@ pub const Server = struct { }; test { - _ = @import("net/test.zig"); - _ = Server; - _ = Stream; - _ = Address; + if (builtin.os.tag != .wasi) { + _ = Server; + _ = Stream; + _ = Address; + _ = @import("net/test.zig"); + } } From a5d5c097f55d7a1b53055310e2d150b23e56eebb Mon Sep 17 00:00:00 2001 From: mlugg Date: Thu, 4 Jul 2024 10:21:30 +0100 Subject: [PATCH 043/152] Sema: add missing references --- src/Sema.zig | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/Sema.zig b/src/Sema.zig index 9dfbc724ebb4..0d4cf2687156 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -877,6 +877,7 @@ pub fn deinit(sema: *Sema) void { sema.maybe_comptime_allocs.deinit(gpa); sema.comptime_allocs.deinit(gpa); sema.exports.deinit(gpa); + sema.references.deinit(gpa); sema.* = undefined; } @@ -2824,6 +2825,7 @@ fn zirStructDecl( try mod.finalizeAnonDecl(new_decl_index); try mod.comp.work_queue.writeItem(.{ .resolve_type_fully = wip_ty.index }); + try sema.addReferenceEntry(src, AnalUnit.wrap(.{ .decl = new_decl_index })); return Air.internedToRef(wip_ty.finish(ip, new_decl_index, new_namespace_index)); } @@ -3325,6 +3327,7 @@ fn zirUnionDecl( try mod.finalizeAnonDecl(new_decl_index); try mod.comp.work_queue.writeItem(.{ .resolve_type_fully = wip_ty.index }); + try sema.addReferenceEntry(src, AnalUnit.wrap(.{ .decl = new_decl_index })); return Air.internedToRef(wip_ty.finish(ip, new_decl_index, new_namespace_index)); } @@ -21966,6 +21969,7 @@ fn reifyUnion( try mod.finalizeAnonDecl(new_decl_index); try mod.comp.work_queue.writeItem(.{ .resolve_type_fully = wip_ty.index }); + try sema.addReferenceEntry(src, AnalUnit.wrap(.{ .decl = new_decl_index })); return Air.internedToRef(wip_ty.finish(ip, new_decl_index, .none)); } @@ -22231,6 +22235,7 @@ fn reifyStruct( try mod.finalizeAnonDecl(new_decl_index); try mod.comp.work_queue.writeItem(.{ .resolve_type_fully = wip_ty.index }); + try sema.addReferenceEntry(src, AnalUnit.wrap(.{ .decl = new_decl_index })); return Air.internedToRef(wip_ty.finish(ip, new_decl_index, .none)); } From cda6f552d5d4a996df69981dac7c9d9b3c066537 Mon Sep 17 00:00:00 2001 From: mlugg Date: Thu, 4 Jul 2024 10:31:59 +0100 Subject: [PATCH 044/152] cbe: don't mark exported values/Decls as extern --- src/link/C.zig | 23 +++++++++++++++++++---- 1 file changed, 19 insertions(+), 4 deletions(-) diff --git a/src/link/C.zig b/src/link/C.zig index be8397e196c1..21245c1e3009 100644 --- a/src/link/C.zig +++ b/src/link/C.zig @@ -536,11 +536,22 @@ pub fn flushModule(self: *C, arena: Allocator, prog_node: std.Progress.Node) !vo try f.all_buffers.ensureUnusedCapacity(gpa, 1 + (self.anon_decls.count() + self.decl_table.count()) * 2); f.appendBufAssumeCapacity(self.lazy_code_buf.items); for (self.anon_decls.keys(), self.anon_decls.values()) |anon_decl, decl_block| f.appendCodeAssumeCapacity( - self.exported_values.contains(anon_decl), + if (self.exported_values.contains(anon_decl)) + .default + else switch (zcu.intern_pool.indexToKey(anon_decl)) { + .extern_func => .zig_extern, + .variable => |variable| if (variable.is_extern) .zig_extern else .static, + else => .static, + }, self.getString(decl_block.code), ); for (self.decl_table.keys(), self.decl_table.values()) |decl_index, decl_block| f.appendCodeAssumeCapacity( - self.exported_decls.contains(decl_index), + if (self.exported_decls.contains(decl_index)) + .default + else if (zcu.declPtr(decl_index).isExtern(zcu)) + .zig_extern + else + .static, self.getString(decl_block.code), ); @@ -572,9 +583,13 @@ const Flush = struct { f.file_size += buf.len; } - fn appendCodeAssumeCapacity(f: *Flush, is_extern: bool, code: []const u8) void { + fn appendCodeAssumeCapacity(f: *Flush, storage: enum { default, zig_extern, static }, code: []const u8) void { if (code.len == 0) return; - f.appendBufAssumeCapacity(if (is_extern) "\nzig_extern " else "\nstatic "); + f.appendBufAssumeCapacity(switch (storage) { + .default => "\n", + .zig_extern => "\nzig_extern ", + .static => "\nstatic ", + }); f.appendBufAssumeCapacity(code); } From e42e12dbbf8ebae65589ff08c3b5a454d2d67441 Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Thu, 4 Jul 2024 22:03:31 +0200 Subject: [PATCH 045/152] tsan: fix wording in comments --- src/libtsan.zig | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/src/libtsan.zig b/src/libtsan.zig index b7d3a9dda267..bc72f9d86c66 100644 --- a/src/libtsan.zig +++ b/src/libtsan.zig @@ -27,9 +27,8 @@ pub fn buildTsan(comp: *Compilation, prog_node: std.Progress.Node) BuildError!vo const target = comp.getTarget(); const root_name = switch (target.os.tag) { - // On Apple platforms, we use the same name as LLVM and Apple so that we correctly - // mark the images as instrumented when traversing them when TSAN dylib is - // initialized. + // On Apple platforms, we use the same name as LLVM because the + // TSAN library implementation hard-codes a check for these names. .macos => "clang_rt.tsan_osx_dynamic", .ios => switch (target.abi) { .simulator => "clang_rt.tsan_iossim_dynamic", @@ -290,7 +289,7 @@ pub fn buildTsan(comp: *Compilation, prog_node: std.Progress.Node) BuildError!vo try std.fmt.allocPrintZ(arena, "@rpath/{s}", .{basename}) else null; - // This is temp conditional on resolving https://github.com/llvm/llvm-project/issues/97627 upstream. + // Workaround for https://github.com/llvm/llvm-project/issues/97627 const headerpad_size: ?u32 = if (target.isDarwin()) 32 else null; const sub_compilation = Compilation.create(comp.gpa, arena, .{ .local_cache_directory = comp.global_cache_directory, From d2cace58bd031e72b830ebd42f5946f4000e52a4 Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Thu, 4 Jul 2024 22:09:57 +0200 Subject: [PATCH 046/152] Compilation: rename tsan_static_lib to tsan_lib --- src/Compilation.zig | 7 ++----- src/libtsan.zig | 9 ++------- src/link/Elf.zig | 6 +++--- src/link/MachO.zig | 6 +++--- src/link/MachO/load_commands.zig | 2 +- 5 files changed, 11 insertions(+), 19 deletions(-) diff --git a/src/Compilation.zig b/src/Compilation.zig index 412798e09a3d..b72a58f7fc96 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -185,12 +185,9 @@ libcxxabi_static_lib: ?CRTFile = null, /// Populated when we build the libunwind static library. A Job to build this is placed in the queue /// and resolved before calling linker.flush(). libunwind_static_lib: ?CRTFile = null, -/// Populated when we build the TSAN static library. A Job to build this is placed in the queue +/// Populated when we build the TSAN library. A Job to build this is placed in the queue /// and resolved before calling linker.flush(). -tsan_static_lib: ?CRTFile = null, -/// Populated when we build the TSAN dynamic library. A Job to build this is placed in the queue -/// and resolved before calling linker.flush(). -tsan_dynamic_lib: ?CRTFile = null, +tsan_lib: ?CRTFile = null, /// Populated when we build the libc static library. A Job to build this is placed in the queue /// and resolved before calling linker.flush(). libc_static_lib: ?CRTFile = null, diff --git a/src/libtsan.zig b/src/libtsan.zig index bc72f9d86c66..42b605bf3c2f 100644 --- a/src/libtsan.zig +++ b/src/libtsan.zig @@ -339,13 +339,8 @@ pub fn buildTsan(comp: *Compilation, prog_node: std.Progress.Node) BuildError!vo }, }; - assert(comp.tsan_static_lib == null and comp.tsan_dynamic_lib == null); - - if (target.isDarwin()) { - comp.tsan_dynamic_lib = try sub_compilation.toCrtFile(); - } else { - comp.tsan_static_lib = try sub_compilation.toCrtFile(); - } + assert(comp.tsan_lib == null); + comp.tsan_lib = try sub_compilation.toCrtFile(); } const tsan_sources = [_][]const u8{ diff --git a/src/link/Elf.zig b/src/link/Elf.zig index 5a14a544e85d..b1048dfe9da8 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -1145,7 +1145,7 @@ pub fn flushModule(self: *Elf, arena: Allocator, prog_node: std.Progress.Node) l // TSAN if (comp.config.any_sanitize_thread) { - try positionals.append(.{ .path = comp.tsan_static_lib.?.full_object_path }); + try positionals.append(.{ .path = comp.tsan_lib.?.full_object_path }); } // libc @@ -1603,7 +1603,7 @@ fn dumpArgv(self: *Elf, comp: *Compilation) !void { } if (comp.config.any_sanitize_thread) { - try argv.append(comp.tsan_static_lib.?.full_object_path); + try argv.append(comp.tsan_lib.?.full_object_path); } // libc @@ -2610,7 +2610,7 @@ fn linkWithLLD(self: *Elf, arena: Allocator, prog_node: std.Progress.Node) !void } if (comp.config.any_sanitize_thread) { - try argv.append(comp.tsan_static_lib.?.full_object_path); + try argv.append(comp.tsan_lib.?.full_object_path); } // libc diff --git a/src/link/MachO.zig b/src/link/MachO.zig index 76bea8176630..4dcc11ef53f9 100644 --- a/src/link/MachO.zig +++ b/src/link/MachO.zig @@ -413,7 +413,7 @@ pub fn flushModule(self: *MachO, arena: Allocator, prog_node: std.Progress.Node) // TSAN if (comp.config.any_sanitize_thread) { - try positionals.append(.{ .path = comp.tsan_dynamic_lib.?.full_object_path }); + try positionals.append(.{ .path = comp.tsan_lib.?.full_object_path }); } for (positionals.items) |obj| { @@ -831,7 +831,7 @@ fn dumpArgv(self: *MachO, comp: *Compilation) !void { } if (comp.config.any_sanitize_thread) { - const path = comp.tsan_dynamic_lib.?.full_object_path; + const path = comp.tsan_lib.?.full_object_path; try argv.append(path); try argv.appendSlice(&.{ "-rpath", std.fs.path.dirname(path) orelse "." }); } @@ -3023,7 +3023,7 @@ fn writeLoadCommands(self: *MachO) !struct { usize, usize, u64 } { ncmds += 1; } if (comp.config.any_sanitize_thread) { - const path = comp.tsan_dynamic_lib.?.full_object_path; + const path = comp.tsan_lib.?.full_object_path; const rpath = std.fs.path.dirname(path) orelse "."; try load_commands.writeRpathLC(rpath, writer); ncmds += 1; diff --git a/src/link/MachO/load_commands.zig b/src/link/MachO/load_commands.zig index 003612fa2f80..74d0c58cbd28 100644 --- a/src/link/MachO/load_commands.zig +++ b/src/link/MachO/load_commands.zig @@ -72,7 +72,7 @@ pub fn calcLoadCommandsSize(macho_file: *MachO, assume_max_path_len: bool) !u32 } if (comp.config.any_sanitize_thread) { - const path = comp.tsan_dynamic_lib.?.full_object_path; + const path = comp.tsan_lib.?.full_object_path; const rpath = std.fs.path.dirname(path) orelse "."; sizeofcmds += calcInstallNameLen( @sizeOf(macho.rpath_command), From cac7e5afc7624ef18fe7bc8c6acd743f8b5a7eaf Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 3 Jul 2024 13:28:21 -0700 Subject: [PATCH 047/152] add std.debug.assertReadable Useful when trying to figure out whether a slice is valid memory. --- lib/std/debug.zig | 24 +++++++++++++++++------- 1 file changed, 17 insertions(+), 7 deletions(-) diff --git a/lib/std/debug.zig b/lib/std/debug.zig index f7dc46242180..fb6609b8b18c 100644 --- a/lib/std/debug.zig +++ b/lib/std/debug.zig @@ -398,20 +398,30 @@ pub fn dumpStackTrace(stack_trace: std.builtin.StackTrace) void { } } -/// This function invokes undefined behavior when `ok` is `false`. +/// Invokes detectable illegal behavior when `ok` is `false`. +/// /// In Debug and ReleaseSafe modes, calls to this function are always /// generated, and the `unreachable` statement triggers a panic. -/// In ReleaseFast and ReleaseSmall modes, calls to this function are -/// optimized away, and in fact the optimizer is able to use the assertion -/// in its heuristics. -/// Inside a test block, it is best to use the `std.testing` module rather -/// than this function, because this function may not detect a test failure -/// in ReleaseFast and ReleaseSmall mode. Outside of a test block, this assert +/// +/// In ReleaseFast and ReleaseSmall modes, calls to this function are optimized +/// away, and in fact the optimizer is able to use the assertion in its +/// heuristics. +/// +/// Inside a test block, it is best to use the `std.testing` module rather than +/// this function, because this function may not detect a test failure in +/// ReleaseFast and ReleaseSmall mode. Outside of a test block, this assert /// function is the correct function to use. pub fn assert(ok: bool) void { if (!ok) unreachable; // assertion failure } +/// Invokes detectable illegal behavior when the provided slice is not mapped +/// or lacks read permissions. +pub fn assertReadable(slice: []const volatile u8) void { + if (!runtime_safety) return; + for (slice) |*byte| _ = byte.*; +} + pub fn panic(comptime format: []const u8, args: anytype) noreturn { @setCold(true); From 29512f0edd9134ffe1f645ad69b5c4a3e1a7c7c3 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 3 Jul 2024 13:28:44 -0700 Subject: [PATCH 048/152] Compilation: don't give len=0 bufs to pwritev The OS returns EFAULT for undefined pointers, even when len=0. --- src/Compilation.zig | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/Compilation.zig b/src/Compilation.zig index 9d3a31e792ea..38651281b5f5 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -2827,6 +2827,9 @@ pub fn saveState(comp: *Compilation) !void { } fn addBuf(bufs_list: []std.posix.iovec_const, bufs_len: *usize, buf: []const u8) void { + // Even when len=0, the undefined pointer might cause EFAULT. + if (buf.len == 0) return; + const i = bufs_len.*; bufs_len.* = i + 1; bufs_list[i] = .{ From 7ed2fbd7559ceb69ab03a8985fd7e5b591e22ab7 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Thu, 4 Jul 2024 14:15:15 -0700 Subject: [PATCH 049/152] std.Build.Cache: add binToHex function reduces need for API users to rely on formatted printing, even though that's how it is currently implemented. --- lib/std/Build/Cache.zig | 42 ++++++++++++++--------------------------- 1 file changed, 14 insertions(+), 28 deletions(-) diff --git a/lib/std/Build/Cache.zig b/lib/std/Build/Cache.zig index 2977801cb52d..6d43361ae457 100644 --- a/lib/std/Build/Cache.zig +++ b/lib/std/Build/Cache.zig @@ -250,14 +250,7 @@ pub const HashHelper = struct { pub fn final(hh: *HashHelper) HexDigest { var bin_digest: BinDigest = undefined; hh.hasher.final(&bin_digest); - - var out_digest: HexDigest = undefined; - _ = fmt.bufPrint( - &out_digest, - "{s}", - .{fmt.fmtSliceHexLower(&bin_digest)}, - ) catch unreachable; - return out_digest; + return binToHex(bin_digest); } pub fn oneShot(bytes: []const u8) [hex_digest_len]u8 { @@ -265,16 +258,20 @@ pub const HashHelper = struct { hasher.update(bytes); var bin_digest: BinDigest = undefined; hasher.final(&bin_digest); - var out_digest: [hex_digest_len]u8 = undefined; - _ = fmt.bufPrint( - &out_digest, - "{s}", - .{fmt.fmtSliceHexLower(&bin_digest)}, - ) catch unreachable; - return out_digest; + return binToHex(bin_digest); } }; +pub fn binToHex(bin_digest: BinDigest) HexDigest { + var out_digest: HexDigest = undefined; + _ = fmt.bufPrint( + &out_digest, + "{s}", + .{fmt.fmtSliceHexLower(&bin_digest)}, + ) catch unreachable; + return out_digest; +} + pub const Lock = struct { manifest_file: fs.File, @@ -426,11 +423,7 @@ pub const Manifest = struct { var bin_digest: BinDigest = undefined; self.hash.hasher.final(&bin_digest); - _ = fmt.bufPrint( - &self.hex_digest, - "{s}", - .{fmt.fmtSliceHexLower(&bin_digest)}, - ) catch unreachable; + self.hex_digest = binToHex(bin_digest); self.hash.hasher = hasher_init; self.hash.hasher.update(&bin_digest); @@ -899,14 +892,7 @@ pub const Manifest = struct { var bin_digest: BinDigest = undefined; self.hash.hasher.final(&bin_digest); - var out_digest: HexDigest = undefined; - _ = fmt.bufPrint( - &out_digest, - "{s}", - .{fmt.fmtSliceHexLower(&bin_digest)}, - ) catch unreachable; - - return out_digest; + return binToHex(bin_digest); } /// If `want_shared_lock` is true, this function automatically downgrades the From 30ec43a6c78d9c8803becbea5a02edb8fae08af6 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Thu, 4 Jul 2024 14:16:42 -0700 Subject: [PATCH 050/152] Zcu: extract permanent state from File Primarily, this commit removes 2 fields from File, relying on the data being stored in the `files` field, with the key as the path digest, and the value as the struct decl corresponding to the File. This table is serialized into the compiler state that survives between incremental updates. Meanwhile, the File struct remains ephemeral data that can be reconstructed the first time it is needed by the compiler process, as well as operated on by independent worker threads. A key outcome of this commit is that there is now a stable index that can be used to refer to a File. This will be needed when serializing error messages to survive incremental compilation updates. --- src/Compilation.zig | 234 ++++++++------- src/InternPool.zig | 9 +- src/Package/Module.zig | 8 +- src/Sema.zig | 220 +++++++------- src/Type.zig | 2 +- src/Zcu.zig | 555 ++++++++++++++++++++--------------- src/arch/aarch64/CodeGen.zig | 2 +- src/arch/arm/CodeGen.zig | 2 +- src/arch/riscv64/CodeGen.zig | 4 +- src/arch/sparc64/CodeGen.zig | 2 +- src/arch/wasm/CodeGen.zig | 12 +- src/arch/x86_64/CodeGen.zig | 2 +- src/codegen.zig | 12 +- src/codegen/c.zig | 2 +- src/codegen/llvm.zig | 291 +++++++++--------- src/codegen/spirv.zig | 19 +- src/link/C.zig | 12 +- src/link/Dwarf.zig | 2 +- src/link/Wasm/ZigObject.zig | 22 +- src/main.zig | 22 +- 20 files changed, 779 insertions(+), 655 deletions(-) diff --git a/src/Compilation.zig b/src/Compilation.zig index 38651281b5f5..cfe7a21041e7 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -116,7 +116,7 @@ win32_resource_work_queue: if (build_options.only_core_functionality) void else /// These jobs are to tokenize, parse, and astgen files, which may be outdated /// since the last compilation, as well as scan for `@import` and queue up /// additional jobs corresponding to those new files. -astgen_work_queue: std.fifo.LinearFifo(*Module.File, .Dynamic), +astgen_work_queue: std.fifo.LinearFifo(Zcu.File.Index, .Dynamic), /// These jobs are to inspect the file system stat() and if the embedded file has changed /// on disk, mark the corresponding Decl outdated and queue up an `analyze_decl` /// task for it. @@ -1433,7 +1433,7 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil .work_queue = std.fifo.LinearFifo(Job, .Dynamic).init(gpa), .c_object_work_queue = std.fifo.LinearFifo(*CObject, .Dynamic).init(gpa), .win32_resource_work_queue = if (build_options.only_core_functionality) {} else std.fifo.LinearFifo(*Win32Resource, .Dynamic).init(gpa), - .astgen_work_queue = std.fifo.LinearFifo(*Module.File, .Dynamic).init(gpa), + .astgen_work_queue = std.fifo.LinearFifo(Zcu.File.Index, .Dynamic).init(gpa), .embed_file_work_queue = std.fifo.LinearFifo(*Module.EmbedFile, .Dynamic).init(gpa), .c_source_files = options.c_source_files, .rc_source_files = options.rc_source_files, @@ -2095,13 +2095,13 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void { } } - if (comp.module) |module| { - module.compile_log_text.shrinkAndFree(gpa, 0); + if (comp.module) |zcu| { + zcu.compile_log_text.shrinkAndFree(gpa, 0); // Make sure std.zig is inside the import_table. We unconditionally need // it for start.zig. - const std_mod = module.std_mod; - _ = try module.importPkg(std_mod); + const std_mod = zcu.std_mod; + _ = try zcu.importPkg(std_mod); // Normally we rely on importing std to in turn import the root source file // in the start code, but when using the stage1 backend that won't happen, @@ -2110,64 +2110,65 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void { // Likewise, in the case of `zig test`, the test runner is the root source file, // and so there is nothing to import the main file. if (comp.config.is_test) { - _ = try module.importPkg(module.main_mod); + _ = try zcu.importPkg(zcu.main_mod); } - if (module.root_mod.deps.get("compiler_rt")) |compiler_rt_mod| { - _ = try module.importPkg(compiler_rt_mod); + if (zcu.root_mod.deps.get("compiler_rt")) |compiler_rt_mod| { + _ = try zcu.importPkg(compiler_rt_mod); } // Put a work item in for every known source file to detect if // it changed, and, if so, re-compute ZIR and then queue the job // to update it. - try comp.astgen_work_queue.ensureUnusedCapacity(module.import_table.count()); - for (module.import_table.values()) |file| { + try comp.astgen_work_queue.ensureUnusedCapacity(zcu.import_table.count()); + for (zcu.import_table.values(), 0..) |file, file_index_usize| { + const file_index: Zcu.File.Index = @enumFromInt(file_index_usize); if (file.mod.isBuiltin()) continue; - comp.astgen_work_queue.writeItemAssumeCapacity(file); + comp.astgen_work_queue.writeItemAssumeCapacity(file_index); } // Put a work item in for checking if any files used with `@embedFile` changed. - try comp.embed_file_work_queue.ensureUnusedCapacity(module.embed_table.count()); - for (module.embed_table.values()) |embed_file| { + try comp.embed_file_work_queue.ensureUnusedCapacity(zcu.embed_table.count()); + for (zcu.embed_table.values()) |embed_file| { comp.embed_file_work_queue.writeItemAssumeCapacity(embed_file); } try comp.work_queue.writeItem(.{ .analyze_mod = std_mod }); if (comp.config.is_test) { - try comp.work_queue.writeItem(.{ .analyze_mod = module.main_mod }); + try comp.work_queue.writeItem(.{ .analyze_mod = zcu.main_mod }); } - if (module.root_mod.deps.get("compiler_rt")) |compiler_rt_mod| { + if (zcu.root_mod.deps.get("compiler_rt")) |compiler_rt_mod| { try comp.work_queue.writeItem(.{ .analyze_mod = compiler_rt_mod }); } } try comp.performAllTheWork(main_progress_node); - if (comp.module) |module| { + if (comp.module) |zcu| { if (build_options.enable_debug_extensions and comp.verbose_intern_pool) { std.debug.print("intern pool stats for '{s}':\n", .{ comp.root_name, }); - module.intern_pool.dump(); + zcu.intern_pool.dump(); } if (build_options.enable_debug_extensions and comp.verbose_generic_instances) { std.debug.print("generic instances for '{s}:0x{x}':\n", .{ comp.root_name, - @as(usize, @intFromPtr(module)), + @as(usize, @intFromPtr(zcu)), }); - module.intern_pool.dumpGenericInstances(gpa); + zcu.intern_pool.dumpGenericInstances(gpa); } if (comp.config.is_test and comp.totalErrorCount() == 0) { // The `test_functions` decl has been intentionally postponed until now, // at which point we must populate it with the list of test functions that // have been discovered and not filtered out. - try module.populateTestFunctions(main_progress_node); + try zcu.populateTestFunctions(main_progress_node); } - try module.processExports(); + try zcu.processExports(); } if (comp.totalErrorCount() != 0) { @@ -2615,7 +2616,9 @@ fn resolveEmitLoc( return slice.ptr; } -fn reportMultiModuleErrors(mod: *Module) !void { +fn reportMultiModuleErrors(zcu: *Zcu) !void { + const gpa = zcu.gpa; + const ip = &zcu.intern_pool; // Some cases can give you a whole bunch of multi-module errors, which it's not helpful to // print all of, so we'll cap the number of these to emit. var num_errors: u32 = 0; @@ -2623,37 +2626,39 @@ fn reportMultiModuleErrors(mod: *Module) !void { // Attach the "some omitted" note to the final error message var last_err: ?*Module.ErrorMsg = null; - for (mod.import_table.values()) |file| { + for (zcu.import_table.values(), 0..) |file, file_index_usize| { if (!file.multi_pkg) continue; num_errors += 1; if (num_errors > max_errors) continue; + const file_index: Zcu.File.Index = @enumFromInt(file_index_usize); + const err = err_blk: { // Like with errors, let's cap the number of notes to prevent a huge error spew. const max_notes = 5; const omitted = file.references.items.len -| max_notes; const num_notes = file.references.items.len - omitted; - const notes = try mod.gpa.alloc(Module.ErrorMsg, if (omitted > 0) num_notes + 1 else num_notes); - errdefer mod.gpa.free(notes); + const notes = try gpa.alloc(Module.ErrorMsg, if (omitted > 0) num_notes + 1 else num_notes); + errdefer gpa.free(notes); for (notes[0..num_notes], file.references.items[0..num_notes], 0..) |*note, ref, i| { - errdefer for (notes[0..i]) |*n| n.deinit(mod.gpa); + errdefer for (notes[0..i]) |*n| n.deinit(gpa); note.* = switch (ref) { .import => |import| try Module.ErrorMsg.init( - mod.gpa, + gpa, .{ - .base_node_inst = try mod.intern_pool.trackZir(mod.gpa, import.file, .main_struct_inst), + .base_node_inst = try ip.trackZir(gpa, zcu.filePathDigest(import.file), .main_struct_inst), .offset = .{ .token_abs = import.token }, }, "imported from module {s}", - .{import.file.mod.fully_qualified_name}, + .{zcu.fileByIndex(import.file).mod.fully_qualified_name}, ), .root => |pkg| try Module.ErrorMsg.init( - mod.gpa, + gpa, .{ - .base_node_inst = try mod.intern_pool.trackZir(mod.gpa, file, .main_struct_inst), + .base_node_inst = try ip.trackZir(gpa, zcu.filePathDigest(file_index), .main_struct_inst), .offset = .entire_file, }, "root of module {s}", @@ -2661,25 +2666,25 @@ fn reportMultiModuleErrors(mod: *Module) !void { ), }; } - errdefer for (notes[0..num_notes]) |*n| n.deinit(mod.gpa); + errdefer for (notes[0..num_notes]) |*n| n.deinit(gpa); if (omitted > 0) { notes[num_notes] = try Module.ErrorMsg.init( - mod.gpa, + gpa, .{ - .base_node_inst = try mod.intern_pool.trackZir(mod.gpa, file, .main_struct_inst), + .base_node_inst = try ip.trackZir(gpa, zcu.filePathDigest(file_index), .main_struct_inst), .offset = .entire_file, }, "{} more references omitted", .{omitted}, ); } - errdefer if (omitted > 0) notes[num_notes].deinit(mod.gpa); + errdefer if (omitted > 0) notes[num_notes].deinit(gpa); const err = try Module.ErrorMsg.create( - mod.gpa, + gpa, .{ - .base_node_inst = try mod.intern_pool.trackZir(mod.gpa, file, .main_struct_inst), + .base_node_inst = try ip.trackZir(gpa, zcu.filePathDigest(file_index), .main_struct_inst), .offset = .entire_file, }, "file exists in multiple modules", @@ -2688,8 +2693,8 @@ fn reportMultiModuleErrors(mod: *Module) !void { err.notes = notes; break :err_blk err; }; - errdefer err.destroy(mod.gpa); - try mod.failed_files.putNoClobber(mod.gpa, file, err); + errdefer err.destroy(gpa); + try zcu.failed_files.putNoClobber(gpa, file, err); last_err = err; } @@ -2700,15 +2705,15 @@ fn reportMultiModuleErrors(mod: *Module) !void { // There isn't really any meaningful place to put this note, so just attach it to the // last failed file var note = try Module.ErrorMsg.init( - mod.gpa, + gpa, err.src_loc, "{} more errors omitted", .{num_errors - max_errors}, ); - errdefer note.deinit(mod.gpa); + errdefer note.deinit(gpa); const i = err.notes.len; - err.notes = try mod.gpa.realloc(err.notes, i + 1); + err.notes = try gpa.realloc(err.notes, i + 1); err.notes[i] = note; } @@ -2719,8 +2724,8 @@ fn reportMultiModuleErrors(mod: *Module) !void { // to add this flag after reporting the errors however, as otherwise // we'd get an error for every single downstream file, which wouldn't be // very useful. - for (mod.import_table.values()) |file| { - if (file.multi_pkg) file.recursiveMarkMultiPkg(mod); + for (zcu.import_table.values()) |file| { + if (file.multi_pkg) file.recursiveMarkMultiPkg(zcu); } } @@ -2752,6 +2757,7 @@ const Header = extern struct { first_dependency_len: u32, dep_entries_len: u32, free_dep_entries_len: u32, + files_len: u32, }, }; @@ -2759,7 +2765,7 @@ const Header = extern struct { /// saved, such as the target and most CLI flags. A cache hit will only occur /// when subsequent compiler invocations use the same set of flags. pub fn saveState(comp: *Compilation) !void { - var bufs_list: [19]std.posix.iovec_const = undefined; + var bufs_list: [21]std.posix.iovec_const = undefined; var bufs_len: usize = 0; const lf = comp.bin_file orelse return; @@ -2780,6 +2786,7 @@ pub fn saveState(comp: *Compilation) !void { .first_dependency_len = @intCast(ip.first_dependency.count()), .dep_entries_len = @intCast(ip.dep_entries.items.len), .free_dep_entries_len = @intCast(ip.free_dep_entries.items.len), + .files_len = @intCast(zcu.files.entries.len), }, }; addBuf(&bufs_list, &bufs_len, mem.asBytes(&header)); @@ -2804,8 +2811,10 @@ pub fn saveState(comp: *Compilation) !void { addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.dep_entries.items)); addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.free_dep_entries.items)); + addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(zcu.files.keys())); + addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(zcu.files.values())); + // TODO: compilation errors - // TODO: files // TODO: namespaces // TODO: decls // TODO: linker state @@ -3353,16 +3362,31 @@ pub fn performAllTheWork( } } - while (comp.astgen_work_queue.readItem()) |file| { - comp.thread_pool.spawnWg(&comp.astgen_wait_group, workerAstGenFile, .{ - comp, file, zir_prog_node, &comp.astgen_wait_group, .root, - }); - } + if (comp.module) |zcu| { + { + // Worker threads may append to zcu.files and zcu.import_table + // so we must hold the lock while spawning those tasks, since + // we access those tables in this loop. + comp.mutex.lock(); + defer comp.mutex.unlock(); - while (comp.embed_file_work_queue.readItem()) |embed_file| { - comp.thread_pool.spawnWg(&comp.astgen_wait_group, workerCheckEmbedFile, .{ - comp, embed_file, - }); + while (comp.astgen_work_queue.readItem()) |file_index| { + // Pre-load these things from our single-threaded context since they + // will be needed by the worker threads. + const path_digest = zcu.filePathDigest(file_index); + const root_decl = zcu.fileRootDecl(file_index); + const file = zcu.fileByIndex(file_index); + comp.thread_pool.spawnWg(&comp.astgen_wait_group, workerAstGenFile, .{ + comp, file, file_index, path_digest, root_decl, zir_prog_node, &comp.astgen_wait_group, .root, + }); + } + } + + while (comp.embed_file_work_queue.readItem()) |embed_file| { + comp.thread_pool.spawnWg(&comp.astgen_wait_group, workerCheckEmbedFile, .{ + comp, embed_file, + }); + } } while (comp.c_object_work_queue.readItem()) |c_object| { @@ -3426,8 +3450,8 @@ pub fn performAllTheWork( fn processOneJob(comp: *Compilation, job: Job, prog_node: std.Progress.Node) !void { switch (job) { .codegen_decl => |decl_index| { - const module = comp.module.?; - const decl = module.declPtr(decl_index); + const zcu = comp.module.?; + const decl = zcu.declPtr(decl_index); switch (decl.analysis) { .unreferenced => unreachable, @@ -3445,7 +3469,7 @@ fn processOneJob(comp: *Compilation, job: Job, prog_node: std.Progress.Node) !vo assert(decl.has_tv); - try module.linkerUpdateDecl(decl_index); + try zcu.linkerUpdateDecl(decl_index); return; }, } @@ -3454,16 +3478,16 @@ fn processOneJob(comp: *Compilation, job: Job, prog_node: std.Progress.Node) !vo const named_frame = tracy.namedFrame("codegen_func"); defer named_frame.end(); - const module = comp.module.?; + const zcu = comp.module.?; // This call takes ownership of `func.air`. - try module.linkerUpdateFunc(func.func, func.air); + try zcu.linkerUpdateFunc(func.func, func.air); }, .analyze_func => |func| { const named_frame = tracy.namedFrame("analyze_func"); defer named_frame.end(); - const module = comp.module.?; - module.ensureFuncBodyAnalyzed(func) catch |err| switch (err) { + const zcu = comp.module.?; + zcu.ensureFuncBodyAnalyzed(func) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.AnalysisFail => return, }; @@ -3472,8 +3496,8 @@ fn processOneJob(comp: *Compilation, job: Job, prog_node: std.Progress.Node) !vo if (true) @panic("regressed compiler feature: emit-h should hook into updateExports, " ++ "not decl analysis, which is too early to know about @export calls"); - const module = comp.module.?; - const decl = module.declPtr(decl_index); + const zcu = comp.module.?; + const decl = zcu.declPtr(decl_index); switch (decl.analysis) { .unreferenced => unreachable, @@ -3491,7 +3515,7 @@ fn processOneJob(comp: *Compilation, job: Job, prog_node: std.Progress.Node) !vo defer named_frame.end(); const gpa = comp.gpa; - const emit_h = module.emit_h.?; + const emit_h = zcu.emit_h.?; _ = try emit_h.decl_table.getOrPut(gpa, decl_index); const decl_emit_h = emit_h.declPtr(decl_index); const fwd_decl = &decl_emit_h.fwd_decl; @@ -3499,10 +3523,12 @@ fn processOneJob(comp: *Compilation, job: Job, prog_node: std.Progress.Node) !vo var ctypes_arena = std.heap.ArenaAllocator.init(gpa); defer ctypes_arena.deinit(); + const file_scope = zcu.namespacePtr(decl.src_namespace).fileScope(zcu); + var dg: c_codegen.DeclGen = .{ .gpa = gpa, - .zcu = module, - .mod = module.namespacePtr(decl.src_namespace).file_scope.mod, + .zcu = zcu, + .mod = file_scope.mod, .error_msg = null, .pass = .{ .decl = decl_index }, .is_naked_fn = false, @@ -3531,17 +3557,17 @@ fn processOneJob(comp: *Compilation, job: Job, prog_node: std.Progress.Node) !vo } }, .analyze_decl => |decl_index| { - const module = comp.module.?; - module.ensureDeclAnalyzed(decl_index) catch |err| switch (err) { + const zcu = comp.module.?; + zcu.ensureDeclAnalyzed(decl_index) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.AnalysisFail => return, }; - const decl = module.declPtr(decl_index); + const decl = zcu.declPtr(decl_index); if (decl.kind == .@"test" and comp.config.is_test) { // Tests are always emitted in test binaries. The decl_refs are created by - // Module.populateTestFunctions, but this will not queue body analysis, so do + // Zcu.populateTestFunctions, but this will not queue body analysis, so do // that now. - try module.ensureFuncBodyAnalysisQueued(decl.val.toIntern()); + try zcu.ensureFuncBodyAnalysisQueued(decl.val.toIntern()); } }, .resolve_type_fully => |ty| { @@ -3559,30 +3585,30 @@ fn processOneJob(comp: *Compilation, job: Job, prog_node: std.Progress.Node) !vo defer named_frame.end(); const gpa = comp.gpa; - const module = comp.module.?; - const decl = module.declPtr(decl_index); + const zcu = comp.module.?; + const decl = zcu.declPtr(decl_index); const lf = comp.bin_file.?; - lf.updateDeclLineNumber(module, decl_index) catch |err| { - try module.failed_analysis.ensureUnusedCapacity(gpa, 1); - module.failed_analysis.putAssumeCapacityNoClobber( + lf.updateDeclLineNumber(zcu, decl_index) catch |err| { + try zcu.failed_analysis.ensureUnusedCapacity(gpa, 1); + zcu.failed_analysis.putAssumeCapacityNoClobber( InternPool.AnalUnit.wrap(.{ .decl = decl_index }), - try Module.ErrorMsg.create( + try Zcu.ErrorMsg.create( gpa, - decl.navSrcLoc(module), + decl.navSrcLoc(zcu), "unable to update line number: {s}", .{@errorName(err)}, ), ); decl.analysis = .codegen_failure; - try module.retryable_failures.append(gpa, InternPool.AnalUnit.wrap(.{ .decl = decl_index })); + try zcu.retryable_failures.append(gpa, InternPool.AnalUnit.wrap(.{ .decl = decl_index })); }; }, .analyze_mod => |pkg| { const named_frame = tracy.namedFrame("analyze_mod"); defer named_frame.end(); - const module = comp.module.?; - module.semaPkg(pkg) catch |err| switch (err) { + const zcu = comp.module.?; + zcu.semaPkg(pkg) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.AnalysisFail => return, }; @@ -4015,14 +4041,17 @@ fn workerDocsWasmFallible(comp: *Compilation, prog_node: std.Progress.Node) anye const AstGenSrc = union(enum) { root, import: struct { - importing_file: *Module.File, + importing_file: Zcu.File.Index, import_tok: std.zig.Ast.TokenIndex, }, }; fn workerAstGenFile( comp: *Compilation, - file: *Module.File, + file: *Zcu.File, + file_index: Zcu.File.Index, + path_digest: Cache.BinDigest, + root_decl: Zcu.Decl.OptionalIndex, prog_node: std.Progress.Node, wg: *WaitGroup, src: AstGenSrc, @@ -4030,12 +4059,12 @@ fn workerAstGenFile( const child_prog_node = prog_node.start(file.sub_file_path, 0); defer child_prog_node.end(); - const mod = comp.module.?; - mod.astGenFile(file) catch |err| switch (err) { + const zcu = comp.module.?; + zcu.astGenFile(file, path_digest, root_decl) catch |err| switch (err) { error.AnalysisFail => return, else => { file.status = .retryable_failure; - comp.reportRetryableAstGenError(src, file, err) catch |oom| switch (oom) { + comp.reportRetryableAstGenError(src, file_index, err) catch |oom| switch (oom) { // Swallowing this error is OK because it's implied to be OOM when // there is a missing `failed_files` error message. error.OutOfMemory => {}, @@ -4062,29 +4091,31 @@ fn workerAstGenFile( // `@import("builtin")` is handled specially. if (mem.eql(u8, import_path, "builtin")) continue; - const import_result = blk: { + const import_result, const imported_path_digest, const imported_root_decl = blk: { comp.mutex.lock(); defer comp.mutex.unlock(); - const res = mod.importFile(file, import_path) catch continue; + const res = zcu.importFile(file, import_path) catch continue; if (!res.is_pkg) { - res.file.addReference(mod.*, .{ .import = .{ - .file = file, + res.file.addReference(zcu.*, .{ .import = .{ + .file = file_index, .token = item.data.token, } }) catch continue; } - break :blk res; + const imported_path_digest = zcu.filePathDigest(res.file_index); + const imported_root_decl = zcu.fileRootDecl(res.file_index); + break :blk .{ res, imported_path_digest, imported_root_decl }; }; if (import_result.is_new) { log.debug("AstGen of {s} has import '{s}'; queuing AstGen of {s}", .{ file.sub_file_path, import_path, import_result.file.sub_file_path, }); const sub_src: AstGenSrc = .{ .import = .{ - .importing_file = file, + .importing_file = file_index, .import_tok = item.data.token, } }; comp.thread_pool.spawnWg(wg, workerAstGenFile, .{ - comp, import_result.file, prog_node, wg, sub_src, + comp, import_result.file, import_result.file_index, imported_path_digest, imported_root_decl, prog_node, wg, sub_src, }); } } @@ -4435,21 +4466,22 @@ fn reportRetryableWin32ResourceError( fn reportRetryableAstGenError( comp: *Compilation, src: AstGenSrc, - file: *Module.File, + file_index: Zcu.File.Index, err: anyerror, ) error{OutOfMemory}!void { - const mod = comp.module.?; - const gpa = mod.gpa; + const zcu = comp.module.?; + const gpa = zcu.gpa; + const file = zcu.fileByIndex(file_index); file.status = .retryable_failure; const src_loc: Module.LazySrcLoc = switch (src) { .root => .{ - .base_node_inst = try mod.intern_pool.trackZir(gpa, file, .main_struct_inst), + .base_node_inst = try zcu.intern_pool.trackZir(gpa, zcu.filePathDigest(file_index), .main_struct_inst), .offset = .entire_file, }, .import => |info| .{ - .base_node_inst = try mod.intern_pool.trackZir(gpa, info.importing_file, .main_struct_inst), + .base_node_inst = try zcu.intern_pool.trackZir(gpa, zcu.filePathDigest(info.importing_file), .main_struct_inst), .offset = .{ .token_abs = info.import_tok }, }, }; @@ -4462,7 +4494,7 @@ fn reportRetryableAstGenError( { comp.mutex.lock(); defer comp.mutex.unlock(); - try mod.failed_files.putNoClobber(gpa, file, err_msg); + try zcu.failed_files.putNoClobber(gpa, file, err_msg); } } diff --git a/src/InternPool.zig b/src/InternPool.zig index c6b27acaf39d..e79de2651620 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -123,9 +123,14 @@ pub const TrackedInst = extern struct { }; }; -pub fn trackZir(ip: *InternPool, gpa: Allocator, file: *Module.File, inst: Zir.Inst.Index) Allocator.Error!TrackedInst.Index { +pub fn trackZir( + ip: *InternPool, + gpa: Allocator, + path_digest: Cache.BinDigest, + inst: Zir.Inst.Index, +) Allocator.Error!TrackedInst.Index { const key: TrackedInst = .{ - .path_digest = file.path_digest, + .path_digest = path_digest, .inst = inst, }; const gop = try ip.tracked_insts.getOrPut(gpa, key); diff --git a/src/Package/Module.zig b/src/Package/Module.zig index 80d701561095..61b7d2ac4dc8 100644 --- a/src/Package/Module.zig +++ b/src/Package/Module.zig @@ -379,7 +379,7 @@ pub fn create(arena: Allocator, options: CreateOptions) !*Package.Module { const new_file = try arena.create(File); - const bin_digest, const hex_digest = digest: { + const hex_digest = digest: { var hasher: Cache.Hasher = Cache.hasher_init; hasher.update(generated_builtin_source); @@ -393,7 +393,7 @@ pub fn create(arena: Allocator, options: CreateOptions) !*Package.Module { .{std.fmt.fmtSliceHexLower(&bin_digest)}, ) catch unreachable; - break :digest .{ bin_digest, hex_digest }; + break :digest hex_digest; }; const builtin_sub_path = try arena.dupe(u8, "b" ++ std.fs.path.sep_str ++ hex_digest); @@ -443,10 +443,6 @@ pub fn create(arena: Allocator, options: CreateOptions) !*Package.Module { .zir = undefined, .status = .never_loaded, .mod = new, - .root_decl = .none, - // We might as well use this digest for the File `path digest`, since there's a - // one-to-one correspondence here between distinct paths and distinct contents. - .path_digest = bin_digest, }; break :b new; }; diff --git a/src/Sema.zig b/src/Sema.zig index 0d4cf2687156..b2d4fd9a2428 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -546,8 +546,12 @@ pub const Block = struct { }; } - pub fn getFileScope(block: *Block, mod: *Module) *Module.File { - return mod.namespacePtr(block.namespace).file_scope; + pub fn getFileScope(block: *Block, zcu: *Zcu) *Zcu.File { + return zcu.fileByIndex(getFileScopeIndex(block, zcu)); + } + + pub fn getFileScopeIndex(block: *Block, zcu: *Zcu) Zcu.File.Index { + return zcu.namespacePtr(block.namespace).file_scope; } fn addTy( @@ -826,7 +830,17 @@ pub const Block = struct { pub fn ownerModule(block: Block) *Package.Module { const zcu = block.sema.mod; - return zcu.namespacePtr(block.namespace).file_scope.mod; + return zcu.namespacePtr(block.namespace).fileScope(zcu).mod; + } + + fn trackZir(block: *Block, inst: Zir.Inst.Index) Allocator.Error!InternPool.TrackedInst.Index { + const sema = block.sema; + const gpa = sema.gpa; + const zcu = sema.mod; + const ip = &zcu.intern_pool; + const file_index = block.getFileScopeIndex(zcu); + const path_digest = zcu.filePathDigest(file_index); + return ip.trackZir(gpa, path_digest, inst); } }; @@ -1000,7 +1014,7 @@ fn analyzeBodyInner( if (build_options.enable_logging) { std.log.scoped(.sema_zir).debug("sema ZIR {s} %{d}", .{ sub_file_path: { const path_digest = block.src_base_inst.resolveFull(&mod.intern_pool).path_digest; - const index = mod.path_digest_map.getIndex(path_digest).?; + const index = mod.files.getIndex(path_digest).?; break :sub_file_path mod.import_table.values()[index].sub_file_path; }, inst }); } @@ -2730,7 +2744,7 @@ fn zirStructDecl( const small: Zir.Inst.StructDecl.Small = @bitCast(extended.small); const extra = sema.code.extraData(Zir.Inst.StructDecl, extended.operand); - const tracked_inst = try ip.trackZir(gpa, block.getFileScope(mod), inst); + const tracked_inst = try block.trackZir(inst); const src: LazySrcLoc = .{ .base_node_inst = tracked_inst, .offset = LazySrcLoc.Offset.nodeOffset(0), @@ -2806,7 +2820,7 @@ fn zirStructDecl( try ip.addDependency( sema.gpa, AnalUnit.wrap(.{ .decl = new_decl_index }), - .{ .src_hash = try ip.trackZir(sema.gpa, block.getFileScope(mod), inst) }, + .{ .src_hash = try block.trackZir(inst) }, ); } @@ -2814,7 +2828,7 @@ fn zirStructDecl( const new_namespace_index: InternPool.OptionalNamespaceIndex = if (true or decls_len > 0) (try mod.createNamespace(.{ .parent = block.namespace.toOptional(), .decl_index = new_decl_index, - .file_scope = block.getFileScope(mod), + .file_scope = block.getFileScopeIndex(mod), })).toOptional() else .none; errdefer if (new_namespace_index.unwrap()) |ns| mod.destroyNamespace(ns); @@ -2947,7 +2961,7 @@ fn zirEnumDecl( const extra = sema.code.extraData(Zir.Inst.EnumDecl, extended.operand); var extra_index: usize = extra.end; - const tracked_inst = try ip.trackZir(gpa, block.getFileScope(mod), inst); + const tracked_inst = try block.trackZir(inst); const src: LazySrcLoc = .{ .base_node_inst = tracked_inst, .offset = LazySrcLoc.Offset.nodeOffset(0) }; const tag_ty_src: LazySrcLoc = .{ .base_node_inst = tracked_inst, .offset = .{ .node_offset_container_tag = 0 } }; @@ -3040,9 +3054,9 @@ fn zirEnumDecl( if (sema.mod.comp.debug_incremental) { try mod.intern_pool.addDependency( - sema.gpa, + gpa, AnalUnit.wrap(.{ .decl = new_decl_index }), - .{ .src_hash = try mod.intern_pool.trackZir(sema.gpa, block.getFileScope(mod), inst) }, + .{ .src_hash = try block.trackZir(inst) }, ); } @@ -3050,7 +3064,7 @@ fn zirEnumDecl( const new_namespace_index: InternPool.OptionalNamespaceIndex = if (true or decls_len > 0) (try mod.createNamespace(.{ .parent = block.namespace.toOptional(), .decl_index = new_decl_index, - .file_scope = block.getFileScope(mod), + .file_scope = block.getFileScopeIndex(mod), })).toOptional() else .none; errdefer if (!done) if (new_namespace_index.unwrap()) |ns| mod.destroyNamespace(ns); @@ -3232,7 +3246,7 @@ fn zirUnionDecl( const extra = sema.code.extraData(Zir.Inst.UnionDecl, extended.operand); var extra_index: usize = extra.end; - const tracked_inst = try ip.trackZir(gpa, block.getFileScope(mod), inst); + const tracked_inst = try block.trackZir(inst); const src: LazySrcLoc = .{ .base_node_inst = tracked_inst, .offset = LazySrcLoc.Offset.nodeOffset(0) }; extra_index += @intFromBool(small.has_tag_type); @@ -3306,9 +3320,9 @@ fn zirUnionDecl( if (sema.mod.comp.debug_incremental) { try mod.intern_pool.addDependency( - sema.gpa, + gpa, AnalUnit.wrap(.{ .decl = new_decl_index }), - .{ .src_hash = try mod.intern_pool.trackZir(sema.gpa, block.getFileScope(mod), inst) }, + .{ .src_hash = try block.trackZir(inst) }, ); } @@ -3316,7 +3330,7 @@ fn zirUnionDecl( const new_namespace_index: InternPool.OptionalNamespaceIndex = if (true or decls_len > 0) (try mod.createNamespace(.{ .parent = block.namespace.toOptional(), .decl_index = new_decl_index, - .file_scope = block.getFileScope(mod), + .file_scope = block.getFileScopeIndex(mod), })).toOptional() else .none; errdefer if (new_namespace_index.unwrap()) |ns| mod.destroyNamespace(ns); @@ -3348,7 +3362,7 @@ fn zirOpaqueDecl( const extra = sema.code.extraData(Zir.Inst.OpaqueDecl, extended.operand); var extra_index: usize = extra.end; - const tracked_inst = try ip.trackZir(gpa, block.getFileScope(mod), inst); + const tracked_inst = try block.trackZir(inst); const src: LazySrcLoc = .{ .base_node_inst = tracked_inst, .offset = LazySrcLoc.Offset.nodeOffset(0) }; const captures_len = if (small.has_captures_len) blk: { @@ -3397,14 +3411,14 @@ fn zirOpaqueDecl( try ip.addDependency( gpa, AnalUnit.wrap(.{ .decl = new_decl_index }), - .{ .src_hash = try ip.trackZir(gpa, block.getFileScope(mod), inst) }, + .{ .src_hash = try block.trackZir(inst) }, ); } const new_namespace_index: InternPool.OptionalNamespaceIndex = if (decls_len > 0) (try mod.createNamespace(.{ .parent = block.namespace.toOptional(), .decl_index = new_decl_index, - .file_scope = block.getFileScope(mod), + .file_scope = block.getFileScopeIndex(mod), })).toOptional() else .none; errdefer if (new_namespace_index.unwrap()) |ns| mod.destroyNamespace(ns); @@ -5893,8 +5907,8 @@ fn zirCImport(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileEr const tracy = trace(@src()); defer tracy.end(); - const mod = sema.mod; - const comp = mod.comp; + const zcu = sema.mod; + const comp = zcu.comp; const gpa = sema.gpa; const pl_node = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const src = parent_block.nodeOffset(pl_node.src_node); @@ -5940,7 +5954,7 @@ fn zirCImport(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileEr if (!comp.config.link_libc) try sema.errNote(src, msg, "libc headers not available; compilation does not link against libc", .{}); - const gop = try mod.cimport_errors.getOrPut(gpa, sema.ownerUnit()); + const gop = try zcu.cimport_errors.getOrPut(gpa, sema.ownerUnit()); if (!gop.found_existing) { gop.value_ptr.* = c_import_res.errors; c_import_res.errors = std.zig.ErrorBundle.empty; @@ -5984,14 +5998,16 @@ fn zirCImport(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileEr else => |e| return e, }; - const result = mod.importPkg(c_import_mod) catch |err| + const result = zcu.importPkg(c_import_mod) catch |err| return sema.fail(&child_block, src, "C import failed: {s}", .{@errorName(err)}); - mod.astGenFile(result.file) catch |err| + const path_digest = zcu.filePathDigest(result.file_index); + const root_decl = zcu.fileRootDecl(result.file_index); + zcu.astGenFile(result.file, path_digest, root_decl) catch |err| return sema.fail(&child_block, src, "C import failed: {s}", .{@errorName(err)}); - try mod.ensureFileAnalyzed(result.file); - const file_root_decl_index = result.file.root_decl.unwrap().?; + try zcu.ensureFileAnalyzed(result.file_index); + const file_root_decl_index = zcu.fileRootDecl(result.file_index).unwrap().?; return sema.analyzeDeclVal(parent_block, src, file_root_decl_index); } @@ -6730,7 +6746,9 @@ fn lookupInNamespace( // Skip decls which are not marked pub, which are in a different // file than the `a.b`/`@hasDecl` syntax. const decl = mod.declPtr(decl_index); - if (decl.is_pub or (src_file == decl.getFileScope(mod) and checked_namespaces.values()[check_i])) { + if (decl.is_pub or (src_file == decl.getFileScopeIndex(mod) and + checked_namespaces.values()[check_i])) + { try candidates.append(gpa, decl_index); } } @@ -6741,7 +6759,7 @@ fn lookupInNamespace( if (sub_usingnamespace_decl_index == sema.owner_decl_index) continue; const sub_usingnamespace_decl = mod.declPtr(sub_usingnamespace_decl_index); const sub_is_pub = entry.value_ptr.*; - if (!sub_is_pub and src_file != sub_usingnamespace_decl.getFileScope(mod)) { + if (!sub_is_pub and src_file != sub_usingnamespace_decl.getFileScopeIndex(mod)) { // Skip usingnamespace decls which are not marked pub, which are in // a different file than the `a.b`/`@hasDecl` syntax. continue; @@ -6749,7 +6767,7 @@ fn lookupInNamespace( try sema.ensureDeclAnalyzed(sub_usingnamespace_decl_index); const ns_ty = sub_usingnamespace_decl.val.toType(); const sub_ns = mod.namespacePtrUnwrap(ns_ty.getNamespaceIndex(mod)) orelse continue; - try checked_namespaces.put(gpa, sub_ns, src_file == sub_usingnamespace_decl.getFileScope(mod)); + try checked_namespaces.put(gpa, sub_ns, src_file == sub_usingnamespace_decl.getFileScopeIndex(mod)); } } @@ -8067,20 +8085,20 @@ fn instantiateGenericCall( call_tag: Air.Inst.Tag, call_dbg_node: ?Zir.Inst.Index, ) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const zcu = sema.mod; const gpa = sema.gpa; - const ip = &mod.intern_pool; + const ip = &zcu.intern_pool; const func_val = try sema.resolveConstDefinedValue(block, func_src, func, .{ .needed_comptime_reason = "generic function being called must be comptime-known", }); - const generic_owner = switch (mod.intern_pool.indexToKey(func_val.toIntern())) { + const generic_owner = switch (zcu.intern_pool.indexToKey(func_val.toIntern())) { .func => func_val.toIntern(), - .ptr => |ptr| mod.declPtr(ptr.base_addr.decl).val.toIntern(), + .ptr => |ptr| zcu.declPtr(ptr.base_addr.decl).val.toIntern(), else => unreachable, }; - const generic_owner_func = mod.intern_pool.indexToKey(generic_owner).func; - const generic_owner_ty_info = mod.typeToFunc(Type.fromInterned(generic_owner_func.ty)).?; + const generic_owner_func = zcu.intern_pool.indexToKey(generic_owner).func; + const generic_owner_ty_info = zcu.typeToFunc(Type.fromInterned(generic_owner_func.ty)).?; try sema.declareDependency(.{ .src_hash = generic_owner_func.zir_body_inst }); @@ -8092,10 +8110,10 @@ fn instantiateGenericCall( // The actual monomorphization happens via adding `func_instance` to // `InternPool`. - const fn_owner_decl = mod.declPtr(generic_owner_func.owner_decl); + const fn_owner_decl = zcu.declPtr(generic_owner_func.owner_decl); const namespace_index = fn_owner_decl.src_namespace; - const namespace = mod.namespacePtr(namespace_index); - const fn_zir = namespace.file_scope.zir; + const namespace = zcu.namespacePtr(namespace_index); + const fn_zir = namespace.fileScope(zcu).zir; const fn_info = fn_zir.getFnInfo(generic_owner_func.zir_body_inst.resolve(ip)); const comptime_args = try sema.arena.alloc(InternPool.Index, args_info.count()); @@ -8110,7 +8128,7 @@ fn instantiateGenericCall( // `param_anytype_comptime` ZIR instructions to be ignored, resulting in a // new, monomorphized function, with the comptime parameters elided. var child_sema: Sema = .{ - .mod = mod, + .mod = zcu, .gpa = gpa, .arena = sema.arena, .code = fn_zir, @@ -8199,7 +8217,7 @@ fn instantiateGenericCall( const arg_ref = try args_info.analyzeArg(sema, block, arg_index, param_ty, generic_owner_ty_info, func); try sema.validateRuntimeValue(block, args_info.argSrc(block, arg_index), arg_ref); const arg_ty = sema.typeOf(arg_ref); - if (arg_ty.zigTypeTag(mod) == .NoReturn) { + if (arg_ty.zigTypeTag(zcu) == .NoReturn) { // This terminates argument analysis. return arg_ref; } @@ -8283,12 +8301,12 @@ fn instantiateGenericCall( const new_func_inst = try child_sema.resolveInlineBody(&child_block, fn_info.param_body[args_info.count()..], fn_info.param_body_inst); const callee_index = (child_sema.resolveConstDefinedValue(&child_block, LazySrcLoc.unneeded, new_func_inst, undefined) catch unreachable).toIntern(); - const callee = mod.funcInfo(callee_index); + const callee = zcu.funcInfo(callee_index); callee.branchQuota(ip).* = @max(callee.branchQuota(ip).*, sema.branch_quota); // Make a runtime call to the new function, making sure to omit the comptime args. const func_ty = Type.fromInterned(callee.ty); - const func_ty_info = mod.typeToFunc(func_ty).?; + const func_ty_info = zcu.typeToFunc(func_ty).?; // If the call evaluated to a return type that requires comptime, never mind // our generic instantiation. Instead we need to perform a comptime call. @@ -8304,13 +8322,13 @@ fn instantiateGenericCall( if (call_dbg_node) |some| try sema.zirDbgStmt(block, some); if (sema.owner_func_index != .none and - Type.fromInterned(func_ty_info.return_type).isError(mod)) + Type.fromInterned(func_ty_info.return_type).isError(zcu)) { ip.funcAnalysis(sema.owner_func_index).calls_or_awaits_errorable_fn = true; } try sema.addReferenceEntry(call_src, AnalUnit.wrap(.{ .func = callee_index })); - try mod.ensureFuncBodyAnalysisQueued(callee_index); + try zcu.ensureFuncBodyAnalysisQueued(callee_index); try sema.air_extra.ensureUnusedCapacity(sema.gpa, @typeInfo(Air.Call).Struct.fields.len + runtime_args.items.len); const result = try block.addInst(.{ @@ -8333,7 +8351,7 @@ fn instantiateGenericCall( if (call_tag == .call_always_tail) { return sema.handleTailCall(block, call_src, func_ty, result); } - if (func_ty.fnReturnType(mod).isNoReturn(mod)) { + if (func_ty.fnReturnType(zcu).isNoReturn(zcu)) { _ = try block.addNoOp(.unreach); return .unreachable_value; } @@ -9653,7 +9671,7 @@ fn funcCommon( .is_generic = final_is_generic, .is_noinline = is_noinline, - .zir_body_inst = try ip.trackZir(gpa, block.getFileScope(mod), func_inst), + .zir_body_inst = try block.trackZir(func_inst), .lbrace_line = src_locs.lbrace_line, .rbrace_line = src_locs.rbrace_line, .lbrace_column = @as(u16, @truncate(src_locs.columns)), @@ -9731,7 +9749,7 @@ fn funcCommon( .ty = func_ty, .cc = cc, .is_noinline = is_noinline, - .zir_body_inst = try ip.trackZir(gpa, block.getFileScope(mod), func_inst), + .zir_body_inst = try block.trackZir(func_inst), .lbrace_line = src_locs.lbrace_line, .rbrace_line = src_locs.rbrace_line, .lbrace_column = @as(u16, @truncate(src_locs.columns)), @@ -13787,18 +13805,18 @@ fn zirImport(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. const tracy = trace(@src()); defer tracy.end(); - const mod = sema.mod; + const zcu = sema.mod; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].str_tok; const operand_src = block.tokenOffset(inst_data.src_tok); const operand = inst_data.get(sema.code); - const result = mod.importFile(block.getFileScope(mod), operand) catch |err| switch (err) { + const result = zcu.importFile(block.getFileScope(zcu), operand) catch |err| switch (err) { error.ImportOutsideModulePath => { return sema.fail(block, operand_src, "import of file outside module path: '{s}'", .{operand}); }, error.ModuleNotFound => { return sema.fail(block, operand_src, "no module named '{s}' available within module {s}", .{ - operand, block.getFileScope(mod).mod.fully_qualified_name, + operand, block.getFileScope(zcu).mod.fully_qualified_name, }); }, else => { @@ -13807,8 +13825,8 @@ fn zirImport(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. return sema.fail(block, operand_src, "unable to open '{s}': {s}", .{ operand, @errorName(err) }); }, }; - try mod.ensureFileAnalyzed(result.file); - const file_root_decl_index = result.file.root_decl.unwrap().?; + try zcu.ensureFileAnalyzed(result.file_index); + const file_root_decl_index = zcu.fileRootDecl(result.file_index).unwrap().?; return sema.analyzeDeclVal(block, operand_src, file_root_decl_index); } @@ -21089,7 +21107,7 @@ fn zirReify( const ip = &mod.intern_pool; const name_strategy: Zir.Inst.NameStrategy = @enumFromInt(extended.small); const extra = sema.code.extraData(Zir.Inst.Reify, extended.operand).data; - const tracked_inst = try ip.trackZir(gpa, block.getFileScope(mod), inst); + const tracked_inst = try block.trackZir(inst); const src: LazySrcLoc = .{ .base_node_inst = tracked_inst, .offset = LazySrcLoc.Offset.nodeOffset(0), @@ -21466,7 +21484,7 @@ fn zirReify( const wip_ty = switch (try ip.getOpaqueType(gpa, .{ .has_namespace = false, .key = .{ .reified = .{ - .zir_index = try ip.trackZir(gpa, block.getFileScope(mod), inst), + .zir_index = try block.trackZir(inst), } }, })) { .existing => |ty| return Air.internedToRef(ty), @@ -21660,7 +21678,7 @@ fn reifyEnum( .tag_mode = if (is_exhaustive) .explicit else .nonexhaustive, .fields_len = fields_len, .key = .{ .reified = .{ - .zir_index = try ip.trackZir(gpa, block.getFileScope(mod), inst), + .zir_index = try block.trackZir(inst), .type_hash = hasher.final(), } }, })) { @@ -21810,7 +21828,7 @@ fn reifyUnion( .field_types = &.{}, // set later .field_aligns = &.{}, // set later .key = .{ .reified = .{ - .zir_index = try ip.trackZir(gpa, block.getFileScope(mod), inst), + .zir_index = try block.trackZir(inst), .type_hash = hasher.final(), } }, })) { @@ -22062,7 +22080,7 @@ fn reifyStruct( .inits_resolved = true, .has_namespace = false, .key = .{ .reified = .{ - .zir_index = try ip.trackZir(gpa, block.getFileScope(mod), inst), + .zir_index = try block.trackZir(inst), .type_hash = hasher.final(), } }, })) { @@ -34894,14 +34912,14 @@ pub fn resolveStructLayout(sema: *Sema, ty: Type) SemaError!void { _ = try sema.typeRequiresComptime(ty); } -fn semaBackingIntType(mod: *Module, struct_type: InternPool.LoadedStructType) CompileError!void { - const gpa = mod.gpa; - const ip = &mod.intern_pool; +fn semaBackingIntType(zcu: *Zcu, struct_type: InternPool.LoadedStructType) CompileError!void { + const gpa = zcu.gpa; + const ip = &zcu.intern_pool; const decl_index = struct_type.decl.unwrap().?; - const decl = mod.declPtr(decl_index); + const decl = zcu.declPtr(decl_index); - const zir = mod.namespacePtr(struct_type.namespace.unwrap().?).file_scope.zir; + const zir = zcu.namespacePtr(struct_type.namespace.unwrap().?).fileScope(zcu).zir; var analysis_arena = std.heap.ArenaAllocator.init(gpa); defer analysis_arena.deinit(); @@ -34910,7 +34928,7 @@ fn semaBackingIntType(mod: *Module, struct_type: InternPool.LoadedStructType) Co defer comptime_err_ret_trace.deinit(); var sema: Sema = .{ - .mod = mod, + .mod = zcu, .gpa = gpa, .arena = analysis_arena.allocator(), .code = zir, @@ -34941,7 +34959,7 @@ fn semaBackingIntType(mod: *Module, struct_type: InternPool.LoadedStructType) Co var accumulator: u64 = 0; for (0..struct_type.field_types.len) |i| { const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]); - accumulator += try field_ty.bitSizeAdvanced(mod, .sema); + accumulator += try field_ty.bitSizeAdvanced(zcu, .sema); } break :blk accumulator; }; @@ -34987,7 +35005,7 @@ fn semaBackingIntType(mod: *Module, struct_type: InternPool.LoadedStructType) Co if (fields_bit_sum > std.math.maxInt(u16)) { return sema.fail(&block, block.nodeOffset(0), "size of packed struct '{d}' exceeds maximum bit width of 65535", .{fields_bit_sum}); } - const backing_int_ty = try mod.intType(.unsigned, @intCast(fields_bit_sum)); + const backing_int_ty = try zcu.intType(.unsigned, @intCast(fields_bit_sum)); struct_type.backingIntType(ip).* = backing_int_ty.toIntern(); } @@ -35597,23 +35615,23 @@ fn structZirInfo(zir: Zir, zir_index: Zir.Inst.Index) struct { } fn semaStructFields( - mod: *Module, + zcu: *Zcu, arena: Allocator, struct_type: InternPool.LoadedStructType, ) CompileError!void { - const gpa = mod.gpa; - const ip = &mod.intern_pool; + const gpa = zcu.gpa; + const ip = &zcu.intern_pool; const decl_index = struct_type.decl.unwrap() orelse return; - const decl = mod.declPtr(decl_index); + const decl = zcu.declPtr(decl_index); const namespace_index = struct_type.namespace.unwrap() orelse decl.src_namespace; - const zir = mod.namespacePtr(namespace_index).file_scope.zir; + const zir = zcu.namespacePtr(namespace_index).fileScope(zcu).zir; const zir_index = struct_type.zir_index.unwrap().?.resolve(ip); const fields_len, const small, var extra_index = structZirInfo(zir, zir_index); if (fields_len == 0) switch (struct_type.layout) { .@"packed" => { - try semaBackingIntType(mod, struct_type); + try semaBackingIntType(zcu, struct_type); return; }, .auto, .@"extern" => { @@ -35627,7 +35645,7 @@ fn semaStructFields( defer comptime_err_ret_trace.deinit(); var sema: Sema = .{ - .mod = mod, + .mod = zcu, .gpa = gpa, .arena = arena, .code = zir, @@ -35749,7 +35767,7 @@ fn semaStructFields( struct_type.field_types.get(ip)[field_i] = field_ty.toIntern(); - if (field_ty.zigTypeTag(mod) == .Opaque) { + if (field_ty.zigTypeTag(zcu) == .Opaque) { const msg = msg: { const msg = try sema.errMsg(ty_src, "opaque types have unknown size and therefore cannot be directly embedded in structs", .{}); errdefer msg.destroy(sema.gpa); @@ -35759,7 +35777,7 @@ fn semaStructFields( }; return sema.failWithOwnedErrorMsg(&block_scope, msg); } - if (field_ty.zigTypeTag(mod) == .NoReturn) { + if (field_ty.zigTypeTag(zcu) == .NoReturn) { const msg = msg: { const msg = try sema.errMsg(ty_src, "struct fields cannot be 'noreturn'", .{}); errdefer msg.destroy(sema.gpa); @@ -35772,7 +35790,7 @@ fn semaStructFields( switch (struct_type.layout) { .@"extern" => if (!try sema.validateExternType(field_ty, .struct_field)) { const msg = msg: { - const msg = try sema.errMsg(ty_src, "extern structs cannot contain fields of type '{}'", .{field_ty.fmt(mod)}); + const msg = try sema.errMsg(ty_src, "extern structs cannot contain fields of type '{}'", .{field_ty.fmt(zcu)}); errdefer msg.destroy(sema.gpa); try sema.explainWhyTypeIsNotExtern(msg, ty_src, field_ty, .struct_field); @@ -35784,7 +35802,7 @@ fn semaStructFields( }, .@"packed" => if (!try sema.validatePackedType(field_ty)) { const msg = msg: { - const msg = try sema.errMsg(ty_src, "packed structs cannot contain fields of type '{}'", .{field_ty.fmt(mod)}); + const msg = try sema.errMsg(ty_src, "packed structs cannot contain fields of type '{}'", .{field_ty.fmt(zcu)}); errdefer msg.destroy(sema.gpa); try sema.explainWhyTypeIsNotPacked(msg, ty_src, field_ty); @@ -35820,19 +35838,19 @@ fn semaStructFields( // This logic must be kept in sync with `semaStructFields` fn semaStructFieldInits( - mod: *Module, + zcu: *Zcu, arena: Allocator, struct_type: InternPool.LoadedStructType, ) CompileError!void { - const gpa = mod.gpa; - const ip = &mod.intern_pool; + const gpa = zcu.gpa; + const ip = &zcu.intern_pool; assert(!struct_type.haveFieldInits(ip)); const decl_index = struct_type.decl.unwrap() orelse return; - const decl = mod.declPtr(decl_index); + const decl = zcu.declPtr(decl_index); const namespace_index = struct_type.namespace.unwrap() orelse decl.src_namespace; - const zir = mod.namespacePtr(namespace_index).file_scope.zir; + const zir = zcu.namespacePtr(namespace_index).fileScope(zcu).zir; const zir_index = struct_type.zir_index.unwrap().?.resolve(ip); const fields_len, const small, var extra_index = structZirInfo(zir, zir_index); @@ -35840,7 +35858,7 @@ fn semaStructFieldInits( defer comptime_err_ret_trace.deinit(); var sema: Sema = .{ - .mod = mod, + .mod = zcu, .gpa = gpa, .arena = arena, .code = zir, @@ -35950,7 +35968,7 @@ fn semaStructFieldInits( }); }; - if (default_val.canMutateComptimeVarState(mod)) { + if (default_val.canMutateComptimeVarState(zcu)) { return sema.fail(&block_scope, init_src, "field default value contains reference to comptime-mutable memory", .{}); } struct_type.field_inits.get(ip)[field_i] = default_val.toIntern(); @@ -35960,14 +35978,14 @@ fn semaStructFieldInits( try sema.flushExports(); } -fn semaUnionFields(mod: *Module, arena: Allocator, union_type: InternPool.LoadedUnionType) CompileError!void { +fn semaUnionFields(zcu: *Zcu, arena: Allocator, union_type: InternPool.LoadedUnionType) CompileError!void { const tracy = trace(@src()); defer tracy.end(); - const gpa = mod.gpa; - const ip = &mod.intern_pool; + const gpa = zcu.gpa; + const ip = &zcu.intern_pool; const decl_index = union_type.decl; - const zir = mod.namespacePtr(union_type.namespace.unwrap().?).file_scope.zir; + const zir = zcu.namespacePtr(union_type.namespace.unwrap().?).fileScope(zcu).zir; const zir_index = union_type.zir_index.resolve(ip); const extended = zir.instructions.items(.data)[@intFromEnum(zir_index)].extended; assert(extended.opcode == .union_decl); @@ -36011,13 +36029,13 @@ fn semaUnionFields(mod: *Module, arena: Allocator, union_type: InternPool.Loaded const body = zir.bodySlice(extra_index, body_len); extra_index += body.len; - const decl = mod.declPtr(decl_index); + const decl = zcu.declPtr(decl_index); var comptime_err_ret_trace = std.ArrayList(LazySrcLoc).init(gpa); defer comptime_err_ret_trace.deinit(); var sema: Sema = .{ - .mod = mod, + .mod = zcu, .gpa = gpa, .arena = arena, .code = zir, @@ -36063,18 +36081,18 @@ fn semaUnionFields(mod: *Module, arena: Allocator, union_type: InternPool.Loaded if (small.auto_enum_tag) { // The provided type is an integer type and we must construct the enum tag type here. int_tag_ty = provided_ty; - if (int_tag_ty.zigTypeTag(mod) != .Int and int_tag_ty.zigTypeTag(mod) != .ComptimeInt) { - return sema.fail(&block_scope, tag_ty_src, "expected integer tag type, found '{}'", .{int_tag_ty.fmt(mod)}); + if (int_tag_ty.zigTypeTag(zcu) != .Int and int_tag_ty.zigTypeTag(zcu) != .ComptimeInt) { + return sema.fail(&block_scope, tag_ty_src, "expected integer tag type, found '{}'", .{int_tag_ty.fmt(zcu)}); } if (fields_len > 0) { - const field_count_val = try mod.intValue(Type.comptime_int, fields_len - 1); + const field_count_val = try zcu.intValue(Type.comptime_int, fields_len - 1); if (!(try sema.intFitsInType(field_count_val, int_tag_ty, null))) { const msg = msg: { const msg = try sema.errMsg(tag_ty_src, "specified integer tag type cannot represent every field", .{}); errdefer msg.destroy(sema.gpa); try sema.errNote(tag_ty_src, msg, "type '{}' cannot fit values in range 0...{d}", .{ - int_tag_ty.fmt(mod), + int_tag_ty.fmt(zcu), fields_len - 1, }); break :msg msg; @@ -36089,7 +36107,7 @@ fn semaUnionFields(mod: *Module, arena: Allocator, union_type: InternPool.Loaded union_type.tagTypePtr(ip).* = provided_ty.toIntern(); const enum_type = switch (ip.indexToKey(provided_ty.toIntern())) { .enum_type => ip.loadEnumType(provided_ty.toIntern()), - else => return sema.fail(&block_scope, tag_ty_src, "expected enum tag type, found '{}'", .{provided_ty.fmt(mod)}), + else => return sema.fail(&block_scope, tag_ty_src, "expected enum tag type, found '{}'", .{provided_ty.fmt(zcu)}), }; // The fields of the union must match the enum exactly. // A flag per field is used to check for missing and extraneous fields. @@ -36185,7 +36203,7 @@ fn semaUnionFields(mod: *Module, arena: Allocator, union_type: InternPool.Loaded const val = if (last_tag_val) |val| try sema.intAdd(val, Value.one_comptime_int, int_tag_ty, undefined) else - try mod.intValue(int_tag_ty, 0); + try zcu.intValue(int_tag_ty, 0); last_tag_val = val; break :blk val; @@ -36197,7 +36215,7 @@ fn semaUnionFields(mod: *Module, arena: Allocator, union_type: InternPool.Loaded .offset = .{ .container_field_value = @intCast(gop.index) }, }; const msg = msg: { - const msg = try sema.errMsg(value_src, "enum tag value {} already taken", .{enum_tag_val.fmtValue(mod, &sema)}); + const msg = try sema.errMsg(value_src, "enum tag value {} already taken", .{enum_tag_val.fmtValue(zcu, &sema)}); errdefer msg.destroy(gpa); try sema.errNote(other_value_src, msg, "other occurrence here", .{}); break :msg msg; @@ -36227,7 +36245,7 @@ fn semaUnionFields(mod: *Module, arena: Allocator, union_type: InternPool.Loaded const tag_info = ip.loadEnumType(union_type.tagTypePtr(ip).*); const enum_index = tag_info.nameIndex(ip, field_name) orelse { return sema.fail(&block_scope, name_src, "no field named '{}' in enum '{}'", .{ - field_name.fmt(ip), Type.fromInterned(union_type.tagTypePtr(ip).*).fmt(mod), + field_name.fmt(ip), Type.fromInterned(union_type.tagTypePtr(ip).*).fmt(zcu), }); }; @@ -36254,7 +36272,7 @@ fn semaUnionFields(mod: *Module, arena: Allocator, union_type: InternPool.Loaded } } - if (field_ty.zigTypeTag(mod) == .Opaque) { + if (field_ty.zigTypeTag(zcu) == .Opaque) { const msg = msg: { const msg = try sema.errMsg(type_src, "opaque types have unknown size and therefore cannot be directly embedded in unions", .{}); errdefer msg.destroy(sema.gpa); @@ -36269,7 +36287,7 @@ fn semaUnionFields(mod: *Module, arena: Allocator, union_type: InternPool.Loaded !try sema.validateExternType(field_ty, .union_field)) { const msg = msg: { - const msg = try sema.errMsg(type_src, "extern unions cannot contain fields of type '{}'", .{field_ty.fmt(mod)}); + const msg = try sema.errMsg(type_src, "extern unions cannot contain fields of type '{}'", .{field_ty.fmt(zcu)}); errdefer msg.destroy(sema.gpa); try sema.explainWhyTypeIsNotExtern(msg, type_src, field_ty, .union_field); @@ -36280,7 +36298,7 @@ fn semaUnionFields(mod: *Module, arena: Allocator, union_type: InternPool.Loaded return sema.failWithOwnedErrorMsg(&block_scope, msg); } else if (layout == .@"packed" and !try sema.validatePackedType(field_ty)) { const msg = msg: { - const msg = try sema.errMsg(type_src, "packed unions cannot contain fields of type '{}'", .{field_ty.fmt(mod)}); + const msg = try sema.errMsg(type_src, "packed unions cannot contain fields of type '{}'", .{field_ty.fmt(zcu)}); errdefer msg.destroy(sema.gpa); try sema.explainWhyTypeIsNotPacked(msg, type_src, field_ty); @@ -36325,10 +36343,10 @@ fn semaUnionFields(mod: *Module, arena: Allocator, union_type: InternPool.Loaded return sema.failWithOwnedErrorMsg(&block_scope, msg); } } else if (enum_field_vals.count() > 0) { - const enum_ty = try sema.generateUnionTagTypeNumbered(&block_scope, enum_field_names, enum_field_vals.keys(), mod.declPtr(union_type.decl)); + const enum_ty = try sema.generateUnionTagTypeNumbered(&block_scope, enum_field_names, enum_field_vals.keys(), zcu.declPtr(union_type.decl)); union_type.tagTypePtr(ip).* = enum_ty; } else { - const enum_ty = try sema.generateUnionTagTypeSimple(&block_scope, enum_field_names, mod.declPtr(union_type.decl)); + const enum_ty = try sema.generateUnionTagTypeSimple(&block_scope, enum_field_names, zcu.declPtr(union_type.decl)); union_type.tagTypePtr(ip).* = enum_ty; } diff --git a/src/Type.zig b/src/Type.zig index 9f11a70bf360..8c70e34fceb0 100644 --- a/src/Type.zig +++ b/src/Type.zig @@ -3455,7 +3455,7 @@ pub fn typeDeclSrcLine(ty: Type, zcu: *const Zcu) ?u32 { else => return null, }; const info = tracked.resolveFull(&zcu.intern_pool); - const file = zcu.import_table.values()[zcu.path_digest_map.getIndex(info.path_digest).?]; + const file = zcu.import_table.values()[zcu.files.getIndex(info.path_digest).?]; assert(file.zir_loaded); const zir = file.zir; const inst = zir.instructions.get(@intFromEnum(info.inst)); diff --git a/src/Zcu.zig b/src/Zcu.zig index adfe60e67875..50bbeefdf226 100644 --- a/src/Zcu.zig +++ b/src/Zcu.zig @@ -72,6 +72,7 @@ codegen_prog_node: std.Progress.Node = undefined, global_zir_cache: Compilation.Directory, /// Used by AstGen worker to load and store ZIR cache. local_zir_cache: Compilation.Directory, + /// This is where all `Export` values are stored. Not all values here are necessarily valid exports; /// to enumerate all exports, `single_exports` and `multi_exports` must be consulted. all_exports: ArrayListUnmanaged(Export) = .{}, @@ -88,14 +89,35 @@ multi_exports: std.AutoArrayHashMapUnmanaged(AnalUnit, extern struct { index: u32, len: u32, }) = .{}, -/// The set of all the Zig source files in the Module. We keep track of this in order -/// to iterate over it and check which source files have been modified on the file system when -/// an update is requested, as well as to cache `@import` results. + +/// The set of all the Zig source files in the Zig Compilation Unit. Tracked in +/// order to iterate over it and check which source files have been modified on +/// the file system when an update is requested, as well as to cache `@import` +/// results. +/// /// Keys are fully resolved file paths. This table owns the keys and values. +/// +/// Protected by Compilation's mutex. +/// +/// Not serialized. This state is reconstructed during the first call to +/// `Compilation.update` of the process for a given `Compilation`. +/// +/// Indexes correspond 1:1 to `files`. import_table: std.StringArrayHashMapUnmanaged(*File) = .{}, -/// This acts as a map from `path_digest` to the corresponding `File`. -/// The value is omitted, as keys are ordered identically to `import_table`. -path_digest_map: std.AutoArrayHashMapUnmanaged(Cache.BinDigest, void) = .{}, + +/// Elements are ordered identically to `import_table`. +/// +/// Unlike `import_table`, this data is serialized as part of incremental +/// compilation state. +/// +/// Key is the hash of the path to this file, used to store +/// `InternPool.TrackedInst`. +/// +/// Value is the `Decl` of the struct that represents this `File`. +/// +/// Protected by Compilation's mutex. +files: std.AutoArrayHashMapUnmanaged(Cache.BinDigest, Decl.OptionalIndex) = .{}, + /// The set of all the files which have been loaded with `@embedFile` in the Module. /// We keep track of this in order to iterate over it and check which files have been /// modified on the file system when an update is requested, as well as to cache @@ -387,8 +409,8 @@ pub const Decl = struct { anon, }; - const Index = InternPool.DeclIndex; - const OptionalIndex = InternPool.OptionalDeclIndex; + pub const Index = InternPool.DeclIndex; + pub const OptionalIndex = InternPool.OptionalDeclIndex; pub fn zirBodies(decl: Decl, zcu: *Zcu) Zir.Inst.Declaration.Bodies { const zir = decl.getFileScope(zcu).zir; @@ -490,6 +512,10 @@ pub const Decl = struct { } pub fn getFileScope(decl: Decl, zcu: *Zcu) *File { + return zcu.fileByIndex(getFileScopeIndex(decl, zcu)); + } + + pub fn getFileScopeIndex(decl: Decl, zcu: *Zcu) File.Index { return zcu.namespacePtr(decl.src_namespace).file_scope; } @@ -558,7 +584,7 @@ pub const Decl = struct { break :inst generic_owner_decl.zir_decl_index.unwrap().?; }; const info = tracked.resolveFull(&zcu.intern_pool); - const file = zcu.import_table.values()[zcu.path_digest_map.getIndex(info.path_digest).?]; + const file = zcu.import_table.values()[zcu.files.getIndex(info.path_digest).?]; assert(file.zir_loaded); const zir = file.zir; const inst = zir.instructions.get(@intFromEnum(info.inst)); @@ -595,7 +621,7 @@ pub const DeclAdapter = struct { /// The container that structs, enums, unions, and opaques have. pub const Namespace = struct { parent: OptionalIndex, - file_scope: *File, + file_scope: File.Index, /// Will be a struct, enum, union, or opaque. decl_index: Decl.Index, /// Direct children of the namespace. @@ -627,6 +653,10 @@ pub const Namespace = struct { } }; + pub fn fileScope(ns: Namespace, zcu: *Zcu) *File { + return zcu.fileByIndex(ns.file_scope); + } + // This renders e.g. "std.fs.Dir.OpenOptions" pub fn renderFullyQualifiedName( ns: Namespace, @@ -641,7 +671,7 @@ pub const Namespace = struct { writer, ); } else { - try ns.file_scope.renderFullyQualifiedName(writer); + try ns.fileScope(zcu).renderFullyQualifiedName(writer); } if (name != .empty) try writer.print(".{}", .{name.fmt(&zcu.intern_pool)}); } @@ -661,7 +691,7 @@ pub const Namespace = struct { ); break :sep '.'; } else sep: { - try ns.file_scope.renderFullyQualifiedDebugName(writer); + try ns.fileScope(zcu).renderFullyQualifiedDebugName(writer); break :sep ':'; }; if (name != .empty) try writer.print("{c}{}", .{ sep, name.fmt(&zcu.intern_pool) }); @@ -680,7 +710,7 @@ pub const Namespace = struct { const decl = zcu.declPtr(cur_ns.decl_index); count += decl.name.length(ip) + 1; cur_ns = zcu.namespacePtr(cur_ns.parent.unwrap() orelse { - count += ns.file_scope.sub_file_path.len; + count += ns.fileScope(zcu).sub_file_path.len; break :count count; }); } @@ -715,8 +745,6 @@ pub const Namespace = struct { }; pub const File = struct { - /// The Decl of the struct that represents this File. - root_decl: Decl.OptionalIndex, status: enum { never_loaded, retryable_failure, @@ -744,8 +772,6 @@ pub const File = struct { multi_pkg: bool = false, /// List of references to this file, used for multi-package errors. references: std.ArrayListUnmanaged(File.Reference) = .{}, - /// The hash of the path to this file, used to store `InternPool.TrackedInst`. - path_digest: Cache.BinDigest, /// The most recent successful ZIR for this file, with no errors. /// This is only populated when a previously successful ZIR @@ -757,7 +783,7 @@ pub const File = struct { pub const Reference = union(enum) { /// The file is imported directly (i.e. not as a package) with @import. import: struct { - file: *File, + file: File.Index, token: Ast.TokenIndex, }, /// The file is the root of a module. @@ -791,28 +817,6 @@ pub const File = struct { } } - pub fn deinit(file: *File, mod: *Module) void { - const gpa = mod.gpa; - const is_builtin = file.mod.isBuiltin(); - log.debug("deinit File {s}", .{file.sub_file_path}); - if (is_builtin) { - file.unloadTree(gpa); - file.unloadZir(gpa); - } else { - gpa.free(file.sub_file_path); - file.unload(gpa); - } - file.references.deinit(gpa); - if (file.root_decl.unwrap()) |root_decl| { - mod.destroyDecl(root_decl); - } - if (file.prev_zir) |prev_zir| { - prev_zir.deinit(gpa); - gpa.destroy(prev_zir); - } - file.* = undefined; - } - pub const Source = struct { bytes: [:0]const u8, stat: Cache.File.Stat, @@ -865,13 +869,6 @@ pub const File = struct { return &file.tree; } - pub fn destroy(file: *File, mod: *Module) void { - const gpa = mod.gpa; - const is_builtin = file.mod.isBuiltin(); - file.deinit(mod); - if (!is_builtin) gpa.destroy(file); - } - pub fn renderFullyQualifiedName(file: File, writer: anytype) !void { // Convert all the slashes into dots and truncate the extension. const ext = std.fs.path.extension(file.sub_file_path); @@ -937,7 +934,7 @@ pub const File = struct { } const mod = switch (ref) { - .import => |import| import.file.mod, + .import => |import| zcu.fileByIndex(import.file).mod, .root => |mod| mod, }; if (mod != file.mod) file.multi_pkg = true; @@ -971,6 +968,10 @@ pub const File = struct { } } } + + pub const Index = enum(u32) { + _, + }; }; pub const EmbedFile = struct { @@ -2355,7 +2356,7 @@ pub const LazySrcLoc = struct { break :inst .{ info.path_digest, info.inst }; }; const file = file: { - const index = zcu.path_digest_map.getIndex(want_path_digest).?; + const index = zcu.files.getIndex(want_path_digest).?; break :file zcu.import_table.values()[index]; }; assert(file.zir_loaded); @@ -2423,11 +2424,12 @@ pub fn deinit(zcu: *Zcu) void { for (zcu.import_table.keys()) |key| { gpa.free(key); } - for (zcu.import_table.values()) |value| { - value.destroy(zcu); + for (0..zcu.import_table.entries.len) |file_index_usize| { + const file_index: File.Index = @enumFromInt(file_index_usize); + zcu.destroyFile(file_index); } zcu.import_table.deinit(gpa); - zcu.path_digest_map.deinit(gpa); + zcu.files.deinit(gpa); for (zcu.embed_table.keys(), zcu.embed_table.values()) |path, embed_file| { gpa.free(path); @@ -2531,6 +2533,37 @@ pub fn destroyDecl(mod: *Module, decl_index: Decl.Index) void { } } +fn deinitFile(zcu: *Zcu, file_index: File.Index) void { + const gpa = zcu.gpa; + const file = zcu.fileByIndex(file_index); + const is_builtin = file.mod.isBuiltin(); + log.debug("deinit File {s}", .{file.sub_file_path}); + if (is_builtin) { + file.unloadTree(gpa); + file.unloadZir(gpa); + } else { + gpa.free(file.sub_file_path); + file.unload(gpa); + } + file.references.deinit(gpa); + if (zcu.fileRootDecl(file_index).unwrap()) |root_decl| { + zcu.destroyDecl(root_decl); + } + if (file.prev_zir) |prev_zir| { + prev_zir.deinit(gpa); + gpa.destroy(prev_zir); + } + file.* = undefined; +} + +pub fn destroyFile(zcu: *Zcu, file_index: File.Index) void { + const gpa = zcu.gpa; + const file = zcu.fileByIndex(file_index); + const is_builtin = file.mod.isBuiltin(); + zcu.deinitFile(file_index); + if (!is_builtin) gpa.destroy(file); +} + pub fn declPtr(mod: *Module, index: Decl.Index) *Decl { return mod.intern_pool.declPtr(index); } @@ -2563,14 +2596,14 @@ comptime { } } -pub fn astGenFile(mod: *Module, file: *File) !void { +pub fn astGenFile(zcu: *Zcu, file: *File, path_digest: Cache.BinDigest, opt_root_decl: Zcu.Decl.OptionalIndex) !void { assert(!file.mod.isBuiltin()); const tracy = trace(@src()); defer tracy.end(); - const comp = mod.comp; - const gpa = mod.gpa; + const comp = zcu.comp; + const gpa = zcu.gpa; // In any case we need to examine the stat of the file to determine the course of action. var source_file = try file.mod.root.openFile(file.sub_file_path, .{}); @@ -2578,17 +2611,9 @@ pub fn astGenFile(mod: *Module, file: *File) !void { const stat = try source_file.stat(); - const want_local_cache = file.mod == mod.main_mod; - const hex_digest = hex: { - var hex: Cache.HexDigest = undefined; - _ = std.fmt.bufPrint( - &hex, - "{s}", - .{std.fmt.fmtSliceHexLower(&file.path_digest)}, - ) catch unreachable; - break :hex hex; - }; - const cache_directory = if (want_local_cache) mod.local_zir_cache else mod.global_zir_cache; + const want_local_cache = file.mod == zcu.main_mod; + const hex_digest = Cache.binToHex(path_digest); + const cache_directory = if (want_local_cache) zcu.local_zir_cache else zcu.global_zir_cache; const zir_dir = cache_directory.handle; // Determine whether we need to reload the file from disk and redo parsing and AstGen. @@ -2688,7 +2713,7 @@ pub fn astGenFile(mod: *Module, file: *File) !void { { comp.mutex.lock(); defer comp.mutex.unlock(); - try mod.failed_files.putNoClobber(gpa, file, null); + try zcu.failed_files.putNoClobber(gpa, file, null); } file.status = .astgen_failure; return error.AnalysisFail; @@ -2712,7 +2737,7 @@ pub fn astGenFile(mod: *Module, file: *File) !void { else => |e| return e, }; - mod.lockAndClearFileCompileError(file); + zcu.lockAndClearFileCompileError(file); // If the previous ZIR does not have compile errors, keep it around // in case parsing or new ZIR fails. In case of successful ZIR update @@ -2818,27 +2843,27 @@ pub fn astGenFile(mod: *Module, file: *File) !void { { comp.mutex.lock(); defer comp.mutex.unlock(); - try mod.failed_files.putNoClobber(gpa, file, null); + try zcu.failed_files.putNoClobber(gpa, file, null); } file.status = .astgen_failure; return error.AnalysisFail; } if (file.prev_zir) |prev_zir| { - try updateZirRefs(mod, file, prev_zir.*); + try updateZirRefs(zcu, file, prev_zir.*, path_digest); // No need to keep previous ZIR. prev_zir.deinit(gpa); gpa.destroy(prev_zir); file.prev_zir = null; } - if (file.root_decl.unwrap()) |root_decl| { + if (opt_root_decl.unwrap()) |root_decl| { // The root of this file must be re-analyzed, since the file has changed. comp.mutex.lock(); defer comp.mutex.unlock(); log.debug("outdated root Decl: {}", .{root_decl}); - try mod.outdated_file_root.put(gpa, root_decl, {}); + try zcu.outdated_file_root.put(gpa, root_decl, {}); } } @@ -2914,7 +2939,7 @@ fn loadZirCacheBody(gpa: Allocator, header: Zir.Header, cache_file: std.fs.File) /// This is called from the AstGen thread pool, so must acquire /// the Compilation mutex when acting on shared state. -fn updateZirRefs(zcu: *Module, file: *File, old_zir: Zir) !void { +fn updateZirRefs(zcu: *Module, file: *File, old_zir: Zir, path_digest: Cache.BinDigest) !void { const gpa = zcu.gpa; const new_zir = file.zir; @@ -2930,7 +2955,7 @@ fn updateZirRefs(zcu: *Module, file: *File, old_zir: Zir) !void { // iterating over this full set for every updated file. for (zcu.intern_pool.tracked_insts.keys(), 0..) |*ti, idx_raw| { const ti_idx: InternPool.TrackedInst.Index = @enumFromInt(idx_raw); - if (!std.mem.eql(u8, &ti.path_digest, &file.path_digest)) continue; + if (!std.mem.eql(u8, &ti.path_digest, &path_digest)) continue; const old_inst = ti.inst; ti.inst = inst_map.get(ti.inst) orelse { // Tracking failed for this instruction. Invalidate associated `src_hash` deps. @@ -3378,11 +3403,11 @@ pub fn mapOldZirToNew( } /// Like `ensureDeclAnalyzed`, but the Decl is a file's root Decl. -pub fn ensureFileAnalyzed(zcu: *Zcu, file: *File) SemaError!void { - if (file.root_decl.unwrap()) |existing_root| { +pub fn ensureFileAnalyzed(zcu: *Zcu, file_index: File.Index) SemaError!void { + if (zcu.fileRootDecl(file_index).unwrap()) |existing_root| { return zcu.ensureDeclAnalyzed(existing_root); } else { - return zcu.semaFile(file); + return zcu.semaFile(file_index); } } @@ -3455,7 +3480,7 @@ pub fn ensureDeclAnalyzed(mod: *Module, decl_index: Decl.Index) SemaError!void { } if (mod.declIsRoot(decl_index)) { - const changed = try mod.semaFileUpdate(decl.getFileScope(mod), decl_was_outdated); + const changed = try mod.semaFileUpdate(decl.getFileScopeIndex(mod), decl_was_outdated); break :blk .{ .invalidate_decl_val = changed, .invalidate_decl_ref = changed, @@ -3787,17 +3812,23 @@ pub fn ensureFuncBodyAnalysisQueued(mod: *Module, func_index: InternPool.Index) func.analysis(ip).state = .queued; } -/// https://github.com/ziglang/zig/issues/14307 -pub fn semaPkg(mod: *Module, pkg: *Package.Module) !void { - const file = (try mod.importPkg(pkg)).file; - if (file.root_decl == .none) { - return mod.semaFile(file); +pub fn semaPkg(zcu: *Zcu, pkg: *Package.Module) !void { + const import_file_result = try zcu.importPkg(pkg); + const root_decl_index = zcu.fileRootDecl(import_file_result.file_index); + if (root_decl_index == .none) { + return zcu.semaFile(import_file_result.file_index); } } -fn getFileRootStruct(zcu: *Zcu, decl_index: Decl.Index, namespace_index: Namespace.Index, file: *File) Allocator.Error!InternPool.Index { +fn getFileRootStruct( + zcu: *Zcu, + decl_index: Decl.Index, + namespace_index: Namespace.Index, + file_index: File.Index, +) Allocator.Error!InternPool.Index { const gpa = zcu.gpa; const ip = &zcu.intern_pool; + const file = zcu.fileByIndex(file_index); const extended = file.zir.instructions.items(.data)[@intFromEnum(Zir.Inst.Index.main_struct_inst)].extended; assert(extended.opcode == .struct_decl); const small: Zir.Inst.StructDecl.Small = @bitCast(extended.small); @@ -3818,7 +3849,7 @@ fn getFileRootStruct(zcu: *Zcu, decl_index: Decl.Index, namespace_index: Namespa const decls = file.zir.bodySlice(extra_index, decls_len); extra_index += decls_len; - const tracked_inst = try ip.trackZir(gpa, file, .main_struct_inst); + const tracked_inst = try ip.trackZir(gpa, zcu.filePathDigest(file_index), .main_struct_inst); const wip_ty = switch (try ip.getStructType(gpa, .{ .layout = .auto, .fields_len = fields_len, @@ -3863,8 +3894,9 @@ fn getFileRootStruct(zcu: *Zcu, decl_index: Decl.Index, namespace_index: Namespa /// If `type_outdated`, the struct type itself is considered outdated and is /// reconstructed at a new InternPool index. Otherwise, the namespace is just /// re-analyzed. Returns whether the decl's tyval was invalidated. -fn semaFileUpdate(zcu: *Zcu, file: *File, type_outdated: bool) SemaError!bool { - const decl = zcu.declPtr(file.root_decl.unwrap().?); +fn semaFileUpdate(zcu: *Zcu, file_index: File.Index, type_outdated: bool) SemaError!bool { + const file = zcu.fileByIndex(file_index); + const decl = zcu.declPtr(zcu.fileRootDecl(file_index).unwrap().?); log.debug("semaFileUpdate mod={s} sub_file_path={s} type_outdated={}", .{ file.mod.fully_qualified_name, @@ -3883,7 +3915,8 @@ fn semaFileUpdate(zcu: *Zcu, file: *File, type_outdated: bool) SemaError!bool { if (decl.analysis == .file_failure) { // No struct type currently exists. Create one! - _ = try zcu.getFileRootStruct(file.root_decl.unwrap().?, decl.src_namespace, file); + const root_decl = zcu.fileRootDecl(file_index); + _ = try zcu.getFileRootStruct(root_decl.unwrap().?, decl.src_namespace, file_index); return true; } @@ -3892,10 +3925,13 @@ fn semaFileUpdate(zcu: *Zcu, file: *File, type_outdated: bool) SemaError!bool { if (type_outdated) { // Invalidate the existing type, reusing the decl and namespace. - zcu.intern_pool.removeDependenciesForDepender(zcu.gpa, AnalUnit.wrap(.{ .decl = file.root_decl.unwrap().? })); + const file_root_decl = zcu.fileRootDecl(file_index).unwrap().?; + zcu.intern_pool.removeDependenciesForDepender(zcu.gpa, AnalUnit.wrap(.{ + .decl = file_root_decl, + })); zcu.intern_pool.remove(decl.val.toIntern()); decl.val = undefined; - _ = try zcu.getFileRootStruct(file.root_decl.unwrap().?, decl.src_namespace, file); + _ = try zcu.getFileRootStruct(file_root_decl, decl.src_namespace, file_index); return true; } @@ -3923,35 +3959,36 @@ fn semaFileUpdate(zcu: *Zcu, file: *File, type_outdated: bool) SemaError!bool { /// Regardless of the file status, will create a `Decl` if none exists so that we can track /// dependencies and re-analyze when the file becomes outdated. -fn semaFile(mod: *Module, file: *File) SemaError!void { +fn semaFile(zcu: *Zcu, file_index: File.Index) SemaError!void { const tracy = trace(@src()); defer tracy.end(); - assert(file.root_decl == .none); + const file = zcu.fileByIndex(file_index); + assert(zcu.fileRootDecl(file_index) == .none); - const gpa = mod.gpa; - log.debug("semaFile mod={s} sub_file_path={s}", .{ + const gpa = zcu.gpa; + log.debug("semaFile zcu={s} sub_file_path={s}", .{ file.mod.fully_qualified_name, file.sub_file_path, }); // Because these three things each reference each other, `undefined` // placeholders are used before being set after the struct type gains an // InternPool index. - const new_namespace_index = try mod.createNamespace(.{ + const new_namespace_index = try zcu.createNamespace(.{ .parent = .none, .decl_index = undefined, - .file_scope = file, + .file_scope = file_index, }); - errdefer mod.destroyNamespace(new_namespace_index); + errdefer zcu.destroyNamespace(new_namespace_index); - const new_decl_index = try mod.allocateNewDecl(new_namespace_index); - const new_decl = mod.declPtr(new_decl_index); + const new_decl_index = try zcu.allocateNewDecl(new_namespace_index); + const new_decl = zcu.declPtr(new_decl_index); errdefer @panic("TODO error handling"); - file.root_decl = new_decl_index.toOptional(); - mod.namespacePtr(new_namespace_index).decl_index = new_decl_index; + zcu.setFileRootDecl(file_index, new_decl_index.toOptional()); + zcu.namespacePtr(new_namespace_index).decl_index = new_decl_index; - new_decl.name = try file.fullyQualifiedName(mod); + new_decl.name = try file.fullyQualifiedName(zcu); new_decl.name_fully_qualified = true; new_decl.is_pub = true; new_decl.is_exported = false; @@ -3965,13 +4002,13 @@ fn semaFile(mod: *Module, file: *File) SemaError!void { } assert(file.zir_loaded); - const struct_ty = try mod.getFileRootStruct(new_decl_index, new_namespace_index, file); - errdefer mod.intern_pool.remove(struct_ty); + const struct_ty = try zcu.getFileRootStruct(new_decl_index, new_namespace_index, file_index); + errdefer zcu.intern_pool.remove(struct_ty); - switch (mod.comp.cache_use) { + switch (zcu.comp.cache_use) { .whole => |whole| if (whole.cache_manifest) |man| { const source = file.getSource(gpa) catch |err| { - try reportRetryableFileError(mod, file, "unable to load source: {s}", .{@errorName(err)}); + try reportRetryableFileError(zcu, file_index, "unable to load source: {s}", .{@errorName(err)}); return error.AnalysisFail; }; @@ -3980,7 +4017,7 @@ fn semaFile(mod: *Module, file: *File) SemaError!void { file.mod.root.sub_path, file.sub_file_path, }) catch |err| { - try reportRetryableFileError(mod, file, "unable to resolve path: {s}", .{@errorName(err)}); + try reportRetryableFileError(zcu, file_index, "unable to resolve path: {s}", .{@errorName(err)}); return error.AnalysisFail; }; errdefer gpa.free(resolved_path); @@ -4000,57 +4037,58 @@ const SemaDeclResult = packed struct { invalidate_decl_ref: bool, }; -fn semaDecl(mod: *Module, decl_index: Decl.Index) !SemaDeclResult { +fn semaDecl(zcu: *Zcu, decl_index: Decl.Index) !SemaDeclResult { const tracy = trace(@src()); defer tracy.end(); - const decl = mod.declPtr(decl_index); - const ip = &mod.intern_pool; + const decl = zcu.declPtr(decl_index); + const ip = &zcu.intern_pool; - if (decl.getFileScope(mod).status != .success_zir) { + if (decl.getFileScope(zcu).status != .success_zir) { return error.AnalysisFail; } - assert(!mod.declIsRoot(decl_index)); + assert(!zcu.declIsRoot(decl_index)); if (decl.zir_decl_index == .none and decl.owns_tv) { // We are re-analyzing an anonymous owner Decl (for a function or a namespace type). - return mod.semaAnonOwnerDecl(decl_index); + return zcu.semaAnonOwnerDecl(decl_index); } log.debug("semaDecl '{d}'", .{@intFromEnum(decl_index)}); - log.debug("decl name '{}'", .{(try decl.fullyQualifiedName(mod)).fmt(ip)}); + log.debug("decl name '{}'", .{(try decl.fullyQualifiedName(zcu)).fmt(ip)}); defer blk: { - log.debug("finish decl name '{}'", .{(decl.fullyQualifiedName(mod) catch break :blk).fmt(ip)}); + log.debug("finish decl name '{}'", .{(decl.fullyQualifiedName(zcu) catch break :blk).fmt(ip)}); } const old_has_tv = decl.has_tv; // The following values are ignored if `!old_has_tv` - const old_ty = if (old_has_tv) decl.typeOf(mod) else undefined; + const old_ty = if (old_has_tv) decl.typeOf(zcu) else undefined; const old_val = decl.val; const old_align = decl.alignment; const old_linksection = decl.@"linksection"; const old_addrspace = decl.@"addrspace"; - const old_is_inline = if (decl.getOwnedFunction(mod)) |prev_func| + const old_is_inline = if (decl.getOwnedFunction(zcu)) |prev_func| prev_func.analysis(ip).state == .inline_only else false; const decl_inst = decl.zir_decl_index.unwrap().?.resolve(ip); - const gpa = mod.gpa; - const zir = decl.getFileScope(mod).zir; + const gpa = zcu.gpa; + const zir = decl.getFileScope(zcu).zir; const builtin_type_target_index: InternPool.Index = ip_index: { - const std_mod = mod.std_mod; - if (decl.getFileScope(mod).mod != std_mod) break :ip_index .none; + const std_mod = zcu.std_mod; + if (decl.getFileScope(zcu).mod != std_mod) break :ip_index .none; // We're in the std module. - const std_file = (try mod.importPkg(std_mod)).file; - const std_decl = mod.declPtr(std_file.root_decl.unwrap().?); - const std_namespace = std_decl.getInnerNamespace(mod).?; + const std_file_imported = try zcu.importPkg(std_mod); + const std_file_root_decl_index = zcu.fileRootDecl(std_file_imported.file_index); + const std_decl = zcu.declPtr(std_file_root_decl_index.unwrap().?); + const std_namespace = std_decl.getInnerNamespace(zcu).?; const builtin_str = try ip.getOrPutString(gpa, "builtin", .no_embedded_nulls); - const builtin_decl = mod.declPtr(std_namespace.decls.getKeyAdapted(builtin_str, DeclAdapter{ .zcu = mod }) orelse break :ip_index .none); - const builtin_namespace = builtin_decl.getInnerNamespaceIndex(mod).unwrap() orelse break :ip_index .none; + const builtin_decl = zcu.declPtr(std_namespace.decls.getKeyAdapted(builtin_str, DeclAdapter{ .zcu = zcu }) orelse break :ip_index .none); + const builtin_namespace = builtin_decl.getInnerNamespaceIndex(zcu).unwrap() orelse break :ip_index .none; if (decl.src_namespace != builtin_namespace) break :ip_index .none; // We're in builtin.zig. This could be a builtin we need to add to a specific InternPool index. for ([_][]const u8{ @@ -4083,7 +4121,7 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !SemaDeclResult { break :ip_index .none; }; - mod.intern_pool.removeDependenciesForDepender(gpa, AnalUnit.wrap(.{ .decl = decl_index })); + zcu.intern_pool.removeDependenciesForDepender(gpa, AnalUnit.wrap(.{ .decl = decl_index })); decl.analysis = .in_progress; @@ -4094,7 +4132,7 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !SemaDeclResult { defer comptime_err_ret_trace.deinit(); var sema: Sema = .{ - .mod = mod, + .mod = zcu, .gpa = gpa, .arena = analysis_arena.allocator(), .code = zir, @@ -4112,8 +4150,8 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !SemaDeclResult { // Every Decl (other than file root Decls, which do not have a ZIR index) has a dependency on its own source. try sema.declareDependency(.{ .src_hash = try ip.trackZir( - sema.gpa, - decl.getFileScope(mod), + gpa, + zcu.filePathDigest(decl.getFileScopeIndex(zcu)), decl_inst, ) }); @@ -4129,7 +4167,7 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !SemaDeclResult { }; defer block_scope.instructions.deinit(gpa); - const decl_bodies = decl.zirBodies(mod); + const decl_bodies = decl.zirBodies(zcu); const result_ref = try sema.resolveInlineBody(&block_scope, decl_bodies.value_body, decl_inst); // We'll do some other bits with the Sema. Clear the type target index just @@ -4141,22 +4179,22 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !SemaDeclResult { const ty_src: LazySrcLoc = block_scope.src(.{ .node_offset_var_decl_ty = 0 }); const init_src: LazySrcLoc = block_scope.src(.{ .node_offset_var_decl_init = 0 }); const decl_val = try sema.resolveFinalDeclValue(&block_scope, init_src, result_ref); - const decl_ty = decl_val.typeOf(mod); + const decl_ty = decl_val.typeOf(zcu); // Note this resolves the type of the Decl, not the value; if this Decl // is a struct, for example, this resolves `type` (which needs no resolution), // not the struct itself. - try decl_ty.resolveLayout(mod); + try decl_ty.resolveLayout(zcu); if (decl.kind == .@"usingnamespace") { - if (!decl_ty.eql(Type.type, mod)) { + if (!decl_ty.eql(Type.type, zcu)) { return sema.fail(&block_scope, ty_src, "expected type, found {}", .{ - decl_ty.fmt(mod), + decl_ty.fmt(zcu), }); } const ty = decl_val.toType(); - if (ty.getNamespace(mod) == null) { - return sema.fail(&block_scope, ty_src, "type {} has no namespace", .{ty.fmt(mod)}); + if (ty.getNamespace(zcu) == null) { + return sema.fail(&block_scope, ty_src, "type {} has no namespace", .{ty.fmt(zcu)}); } decl.val = ty.toValue(); @@ -4194,7 +4232,7 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !SemaDeclResult { .func => |func| { decl.owns_tv = func.owner_decl == decl_index; queue_linker_work = false; - is_inline = decl.owns_tv and decl_ty.fnCallingConvention(mod) == .Inline; + is_inline = decl.owns_tv and decl_ty.fnCallingConvention(zcu) == .Inline; is_func = decl.owns_tv; }, @@ -4246,10 +4284,10 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !SemaDeclResult { decl.analysis = .complete; const result: SemaDeclResult = if (old_has_tv) .{ - .invalidate_decl_val = !decl_ty.eql(old_ty, mod) or - !decl.val.eql(old_val, decl_ty, mod) or + .invalidate_decl_val = !decl_ty.eql(old_ty, zcu) or + !decl.val.eql(old_val, decl_ty, zcu) or is_inline != old_is_inline, - .invalidate_decl_ref = !decl_ty.eql(old_ty, mod) or + .invalidate_decl_ref = !decl_ty.eql(old_ty, zcu) or decl.alignment != old_align or decl.@"linksection" != old_linksection or decl.@"addrspace" != old_addrspace or @@ -4263,12 +4301,12 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !SemaDeclResult { if (has_runtime_bits) { // Needed for codegen_decl which will call updateDecl and then the // codegen backend wants full access to the Decl Type. - try decl_ty.resolveFully(mod); + try decl_ty.resolveFully(zcu); - try mod.comp.work_queue.writeItem(.{ .codegen_decl = decl_index }); + try zcu.comp.work_queue.writeItem(.{ .codegen_decl = decl_index }); - if (result.invalidate_decl_ref and mod.emit_h != null) { - try mod.comp.work_queue.writeItem(.{ .emit_h_decl = decl_index }); + if (result.invalidate_decl_ref and zcu.emit_h != null) { + try zcu.comp.work_queue.writeItem(.{ .emit_h_decl = decl_index }); } } @@ -4322,6 +4360,7 @@ fn semaAnonOwnerDecl(zcu: *Zcu, decl_index: Decl.Index) !SemaDeclResult { pub const ImportFileResult = struct { file: *File, + file_index: File.Index, is_new: bool, is_pkg: bool, }; @@ -4344,20 +4383,25 @@ pub fn importPkg(zcu: *Zcu, mod: *Package.Module) !ImportFileResult { errdefer _ = zcu.import_table.pop(); if (gop.found_existing) { try gop.value_ptr.*.addReference(zcu.*, .{ .root = mod }); - return ImportFileResult{ + return .{ .file = gop.value_ptr.*, + .file_index = @enumFromInt(gop.index), .is_new = false, .is_pkg = true, }; } + try zcu.files.ensureUnusedCapacity(gpa, 1); + if (mod.builtin_file) |builtin_file| { keep_resolved_path = true; // It's now owned by import_table. gop.value_ptr.* = builtin_file; try builtin_file.addReference(zcu.*, .{ .root = mod }); - try zcu.path_digest_map.put(gpa, builtin_file.path_digest, {}); + const path_digest = computePathDigest(zcu, mod, builtin_file.sub_file_path); + zcu.files.putAssumeCapacityNoClobber(path_digest, .none); return .{ .file = builtin_file, + .file_index = @enumFromInt(zcu.files.entries.len - 1), .is_new = false, .is_pkg = true, }; @@ -4382,43 +4426,36 @@ pub fn importPkg(zcu: *Zcu, mod: *Package.Module) !ImportFileResult { .zir = undefined, .status = .never_loaded, .mod = mod, - .root_decl = .none, - .path_digest = digest: { - const want_local_cache = mod == zcu.main_mod; - var path_hash: Cache.HashHelper = .{}; - path_hash.addBytes(build_options.version); - path_hash.add(builtin.zig_backend); - if (!want_local_cache) { - path_hash.addOptionalBytes(mod.root.root_dir.path); - path_hash.addBytes(mod.root.sub_path); - } - path_hash.addBytes(sub_file_path); - var bin: Cache.BinDigest = undefined; - path_hash.hasher.final(&bin); - break :digest bin; - }, }; + + const path_digest = computePathDigest(zcu, mod, sub_file_path); + try new_file.addReference(zcu.*, .{ .root = mod }); - try zcu.path_digest_map.put(gpa, new_file.path_digest, {}); - return ImportFileResult{ + zcu.files.putAssumeCapacityNoClobber(path_digest, .none); + return .{ .file = new_file, + .file_index = @enumFromInt(zcu.files.entries.len - 1), .is_new = true, .is_pkg = true, }; } +/// Called from a worker thread during AstGen. +/// Also called from Sema during semantic analysis. pub fn importFile( zcu: *Zcu, cur_file: *File, import_string: []const u8, ) !ImportFileResult { + const mod = cur_file.mod; + if (std.mem.eql(u8, import_string, "std")) { return zcu.importPkg(zcu.std_mod); } if (std.mem.eql(u8, import_string, "root")) { return zcu.importPkg(zcu.root_mod); } - if (cur_file.mod.deps.get(import_string)) |pkg| { + if (mod.deps.get(import_string)) |pkg| { return zcu.importPkg(pkg); } if (!mem.endsWith(u8, import_string, ".zig")) { @@ -4430,8 +4467,8 @@ pub fn importFile( // an import refers to the same as another, despite different relative paths // or differently mapped package names. const resolved_path = try std.fs.path.resolve(gpa, &.{ - cur_file.mod.root.root_dir.path orelse ".", - cur_file.mod.root.sub_path, + mod.root.root_dir.path orelse ".", + mod.root.sub_path, cur_file.sub_file_path, "..", import_string, @@ -4442,18 +4479,21 @@ pub fn importFile( const gop = try zcu.import_table.getOrPut(gpa, resolved_path); errdefer _ = zcu.import_table.pop(); - if (gop.found_existing) return ImportFileResult{ + if (gop.found_existing) return .{ .file = gop.value_ptr.*, + .file_index = @enumFromInt(gop.index), .is_new = false, .is_pkg = false, }; + try zcu.files.ensureUnusedCapacity(gpa, 1); + const new_file = try gpa.create(File); errdefer gpa.destroy(new_file); const resolved_root_path = try std.fs.path.resolve(gpa, &.{ - cur_file.mod.root.root_dir.path orelse ".", - cur_file.mod.root.sub_path, + mod.root.root_dir.path orelse ".", + mod.root.sub_path, }); defer gpa.free(resolved_root_path); @@ -4484,26 +4524,14 @@ pub fn importFile( .tree = undefined, .zir = undefined, .status = .never_loaded, - .mod = cur_file.mod, - .root_decl = .none, - .path_digest = digest: { - const want_local_cache = cur_file.mod == zcu.main_mod; - var path_hash: Cache.HashHelper = .{}; - path_hash.addBytes(build_options.version); - path_hash.add(builtin.zig_backend); - if (!want_local_cache) { - path_hash.addOptionalBytes(cur_file.mod.root.root_dir.path); - path_hash.addBytes(cur_file.mod.root.sub_path); - } - path_hash.addBytes(sub_file_path); - var bin: Cache.BinDigest = undefined; - path_hash.hasher.final(&bin); - break :digest bin; - }, + .mod = mod, }; - try zcu.path_digest_map.put(gpa, new_file.path_digest, {}); - return ImportFileResult{ + + const path_digest = computePathDigest(zcu, mod, sub_file_path); + zcu.files.putAssumeCapacityNoClobber(path_digest, .none); + return .{ .file = new_file, + .file_index = @enumFromInt(zcu.files.entries.len - 1), .is_new = true, .is_pkg = false, }; @@ -4581,6 +4609,21 @@ pub fn embedFile( return newEmbedFile(mod, cur_file.mod, sub_file_path, resolved_path, gop.value_ptr, src_loc); } +fn computePathDigest(zcu: *Zcu, mod: *Package.Module, sub_file_path: []const u8) Cache.BinDigest { + const want_local_cache = mod == zcu.main_mod; + var path_hash: Cache.HashHelper = .{}; + path_hash.addBytes(build_options.version); + path_hash.add(builtin.zig_backend); + if (!want_local_cache) { + path_hash.addOptionalBytes(mod.root.root_dir.path); + path_hash.addBytes(mod.root.sub_path); + } + path_hash.addBytes(sub_file_path); + var bin: Cache.BinDigest = undefined; + path_hash.hasher.final(&bin); + return bin; +} + /// https://github.com/ziglang/zig/issues/14307 fn newEmbedFile( mod: *Module, @@ -4765,7 +4808,7 @@ fn scanDecl(iter: *ScanDeclIter, decl_inst: Zir.Inst.Index) Allocator.Error!void const namespace_index = iter.namespace_index; const namespace = zcu.namespacePtr(namespace_index); const gpa = zcu.gpa; - const zir = namespace.file_scope.zir; + const zir = namespace.fileScope(zcu).zir; const ip = &zcu.intern_pool; const inst_data = zir.instructions.items(.data)[@intFromEnum(decl_inst)].declaration; @@ -4848,7 +4891,8 @@ fn scanDecl(iter: *ScanDeclIter, decl_inst: Zir.Inst.Index) Allocator.Error!void else => {}, } - const tracked_inst = try ip.trackZir(gpa, iter.parent_decl.getFileScope(zcu), decl_inst); + const parent_file_scope_index = iter.parent_decl.getFileScopeIndex(zcu); + const tracked_inst = try ip.trackZir(gpa, zcu.filePathDigest(parent_file_scope_index), decl_inst); // We create a Decl for it regardless of analysis status. @@ -4878,7 +4922,7 @@ fn scanDecl(iter: *ScanDeclIter, decl_inst: Zir.Inst.Index) Allocator.Error!void namespace.decls.putAssumeCapacityNoClobberContext(decl_index, {}, .{ .zcu = zcu }); const comp = zcu.comp; - const decl_mod = namespace.file_scope.mod; + const decl_mod = namespace.fileScope(zcu).mod; const want_analysis = declaration.flags.is_export or switch (kind) { .anon => unreachable, .@"comptime" => true, @@ -4908,7 +4952,7 @@ fn scanDecl(iter: *ScanDeclIter, decl_inst: Zir.Inst.Index) Allocator.Error!void // re-analysis for us if necessary. if (prev_exported != declaration.flags.is_export or decl.analysis == .unreferenced) { log.debug("scanDecl queue analyze_decl file='{s}' decl_name='{}' decl_index={d}", .{ - namespace.file_scope.sub_file_path, decl_name.fmt(ip), decl_index, + namespace.fileScope(zcu).sub_file_path, decl_name.fmt(ip), decl_index, }); comp.work_queue.writeItemAssumeCapacity(.{ .analyze_decl = decl_index }); } @@ -5512,77 +5556,78 @@ fn handleUpdateExports( } pub fn populateTestFunctions( - mod: *Module, + zcu: *Zcu, main_progress_node: std.Progress.Node, ) !void { - const gpa = mod.gpa; - const ip = &mod.intern_pool; - const builtin_mod = mod.root_mod.getBuiltinDependency(); - const builtin_file = (mod.importPkg(builtin_mod) catch unreachable).file; - const root_decl = mod.declPtr(builtin_file.root_decl.unwrap().?); - const builtin_namespace = mod.namespacePtr(root_decl.src_namespace); + const gpa = zcu.gpa; + const ip = &zcu.intern_pool; + const builtin_mod = zcu.root_mod.getBuiltinDependency(); + const builtin_file_index = (zcu.importPkg(builtin_mod) catch unreachable).file_index; + const root_decl_index = zcu.fileRootDecl(builtin_file_index); + const root_decl = zcu.declPtr(root_decl_index.unwrap().?); + const builtin_namespace = zcu.namespacePtr(root_decl.src_namespace); const test_functions_str = try ip.getOrPutString(gpa, "test_functions", .no_embedded_nulls); const decl_index = builtin_namespace.decls.getKeyAdapted( test_functions_str, - DeclAdapter{ .zcu = mod }, + DeclAdapter{ .zcu = zcu }, ).?; { // We have to call `ensureDeclAnalyzed` here in case `builtin.test_functions` // was not referenced by start code. - mod.sema_prog_node = main_progress_node.start("Semantic Analysis", 0); + zcu.sema_prog_node = main_progress_node.start("Semantic Analysis", 0); defer { - mod.sema_prog_node.end(); - mod.sema_prog_node = undefined; + zcu.sema_prog_node.end(); + zcu.sema_prog_node = undefined; } - try mod.ensureDeclAnalyzed(decl_index); + try zcu.ensureDeclAnalyzed(decl_index); } - const decl = mod.declPtr(decl_index); - const test_fn_ty = decl.typeOf(mod).slicePtrFieldType(mod).childType(mod); + const decl = zcu.declPtr(decl_index); + const test_fn_ty = decl.typeOf(zcu).slicePtrFieldType(zcu).childType(zcu); const array_anon_decl: InternPool.Key.Ptr.BaseAddr.AnonDecl = array: { - // Add mod.test_functions to an array decl then make the test_functions + // Add zcu.test_functions to an array decl then make the test_functions // decl reference it as a slice. - const test_fn_vals = try gpa.alloc(InternPool.Index, mod.test_functions.count()); + const test_fn_vals = try gpa.alloc(InternPool.Index, zcu.test_functions.count()); defer gpa.free(test_fn_vals); - for (test_fn_vals, mod.test_functions.keys()) |*test_fn_val, test_decl_index| { - const test_decl = mod.declPtr(test_decl_index); - const test_decl_name = try test_decl.fullyQualifiedName(mod); + for (test_fn_vals, zcu.test_functions.keys()) |*test_fn_val, test_decl_index| { + const test_decl = zcu.declPtr(test_decl_index); + const test_decl_name = try test_decl.fullyQualifiedName(zcu); const test_decl_name_len = test_decl_name.length(ip); const test_name_anon_decl: InternPool.Key.Ptr.BaseAddr.AnonDecl = n: { - const test_name_ty = try mod.arrayType(.{ + const test_name_ty = try zcu.arrayType(.{ .len = test_decl_name_len, .child = .u8_type, }); - const test_name_val = try mod.intern(.{ .aggregate = .{ + const test_name_val = try zcu.intern(.{ .aggregate = .{ .ty = test_name_ty.toIntern(), .storage = .{ .bytes = test_decl_name.toString() }, } }); break :n .{ - .orig_ty = (try mod.singleConstPtrType(test_name_ty)).toIntern(), + .orig_ty = (try zcu.singleConstPtrType(test_name_ty)).toIntern(), .val = test_name_val, }; }; const test_fn_fields = .{ // name - try mod.intern(.{ .slice = .{ + try zcu.intern(.{ .slice = .{ .ty = .slice_const_u8_type, - .ptr = try mod.intern(.{ .ptr = .{ + .ptr = try zcu.intern(.{ .ptr = .{ .ty = .manyptr_const_u8_type, .base_addr = .{ .anon_decl = test_name_anon_decl }, .byte_offset = 0, } }), - .len = try mod.intern(.{ .int = .{ + .len = try zcu.intern(.{ .int = .{ .ty = .usize_type, .storage = .{ .u64 = test_decl_name_len }, } }), } }), // func - try mod.intern(.{ .ptr = .{ - .ty = try mod.intern(.{ .ptr_type = .{ - .child = test_decl.typeOf(mod).toIntern(), + try zcu.intern(.{ .ptr = .{ + .ty = try zcu.intern(.{ .ptr_type = .{ + .child = test_decl.typeOf(zcu).toIntern(), .flags = .{ .is_const = true, }, @@ -5591,29 +5636,29 @@ pub fn populateTestFunctions( .byte_offset = 0, } }), }; - test_fn_val.* = try mod.intern(.{ .aggregate = .{ + test_fn_val.* = try zcu.intern(.{ .aggregate = .{ .ty = test_fn_ty.toIntern(), .storage = .{ .elems = &test_fn_fields }, } }); } - const array_ty = try mod.arrayType(.{ + const array_ty = try zcu.arrayType(.{ .len = test_fn_vals.len, .child = test_fn_ty.toIntern(), .sentinel = .none, }); - const array_val = try mod.intern(.{ .aggregate = .{ + const array_val = try zcu.intern(.{ .aggregate = .{ .ty = array_ty.toIntern(), .storage = .{ .elems = test_fn_vals }, } }); break :array .{ - .orig_ty = (try mod.singleConstPtrType(array_ty)).toIntern(), + .orig_ty = (try zcu.singleConstPtrType(array_ty)).toIntern(), .val = array_val, }; }; { - const new_ty = try mod.ptrType(.{ + const new_ty = try zcu.ptrType(.{ .child = test_fn_ty.toIntern(), .flags = .{ .is_const = true, @@ -5621,14 +5666,14 @@ pub fn populateTestFunctions( }, }); const new_val = decl.val; - const new_init = try mod.intern(.{ .slice = .{ + const new_init = try zcu.intern(.{ .slice = .{ .ty = new_ty.toIntern(), - .ptr = try mod.intern(.{ .ptr = .{ - .ty = new_ty.slicePtrFieldType(mod).toIntern(), + .ptr = try zcu.intern(.{ .ptr = .{ + .ty = new_ty.slicePtrFieldType(zcu).toIntern(), .base_addr = .{ .anon_decl = array_anon_decl }, .byte_offset = 0, } }), - .len = (try mod.intValue(Type.usize, mod.test_functions.count())).toIntern(), + .len = (try zcu.intValue(Type.usize, zcu.test_functions.count())).toIntern(), } }); ip.mutateVarInit(decl.val.toIntern(), new_init); @@ -5638,13 +5683,13 @@ pub fn populateTestFunctions( decl.has_tv = true; } { - mod.codegen_prog_node = main_progress_node.start("Code Generation", 0); + zcu.codegen_prog_node = main_progress_node.start("Code Generation", 0); defer { - mod.codegen_prog_node.end(); - mod.codegen_prog_node = undefined; + zcu.codegen_prog_node.end(); + zcu.codegen_prog_node = undefined; } - try mod.linkerUpdateDecl(decl_index); + try zcu.linkerUpdateDecl(decl_index); } } @@ -5684,31 +5729,35 @@ pub fn linkerUpdateDecl(zcu: *Zcu, decl_index: Decl.Index) !void { } fn reportRetryableFileError( - mod: *Module, - file: *File, + zcu: *Zcu, + file_index: File.Index, comptime format: []const u8, args: anytype, ) error{OutOfMemory}!void { + const gpa = zcu.gpa; + const ip = &zcu.intern_pool; + + const file = zcu.fileByIndex(file_index); file.status = .retryable_failure; const err_msg = try ErrorMsg.create( - mod.gpa, + gpa, .{ - .base_node_inst = try mod.intern_pool.trackZir(mod.gpa, file, .main_struct_inst), + .base_node_inst = try ip.trackZir(gpa, zcu.filePathDigest(file_index), .main_struct_inst), .offset = .entire_file, }, format, args, ); - errdefer err_msg.destroy(mod.gpa); + errdefer err_msg.destroy(gpa); - mod.comp.mutex.lock(); - defer mod.comp.mutex.unlock(); + zcu.comp.mutex.lock(); + defer zcu.comp.mutex.unlock(); - const gop = try mod.failed_files.getOrPut(mod.gpa, file); + const gop = try zcu.failed_files.getOrPut(gpa, file); if (gop.found_existing) { if (gop.value_ptr.*) |old_err_msg| { - old_err_msg.destroy(mod.gpa); + old_err_msg.destroy(gpa); } } gop.value_ptr.* = err_msg; @@ -6528,8 +6577,9 @@ pub fn getBuiltin(zcu: *Zcu, name: []const u8) Allocator.Error!Air.Inst.Ref { pub fn getBuiltinDecl(zcu: *Zcu, name: []const u8) Allocator.Error!InternPool.DeclIndex { const gpa = zcu.gpa; const ip = &zcu.intern_pool; - const std_file = (zcu.importPkg(zcu.std_mod) catch @panic("failed to import lib/std.zig")).file; - const std_namespace = zcu.declPtr(std_file.root_decl.unwrap().?).getOwnedInnerNamespace(zcu).?; + const std_file_imported = zcu.importPkg(zcu.std_mod) catch @panic("failed to import lib/std.zig"); + const std_file_root_decl = zcu.fileRootDecl(std_file_imported.file_index).unwrap().?; + const std_namespace = zcu.declPtr(std_file_root_decl).getOwnedInnerNamespace(zcu).?; const builtin_str = try ip.getOrPutString(gpa, "builtin", .no_embedded_nulls); const builtin_decl = std_namespace.decls.getKeyAdapted(builtin_str, Zcu.DeclAdapter{ .zcu = zcu }) orelse @panic("lib/std.zig is corrupt and missing 'builtin'"); zcu.ensureDeclAnalyzed(builtin_decl) catch @panic("std.builtin is corrupt"); @@ -6544,3 +6594,20 @@ pub fn getBuiltinType(zcu: *Zcu, name: []const u8) Allocator.Error!Type { ty.resolveFully(zcu) catch @panic("std.builtin is corrupt"); return ty; } + +pub fn fileByIndex(zcu: *const Zcu, i: File.Index) *File { + return zcu.import_table.values()[@intFromEnum(i)]; +} + +/// Returns the `Decl` of the struct that represents this `File`. +pub fn fileRootDecl(zcu: *const Zcu, i: File.Index) Decl.OptionalIndex { + return zcu.files.values()[@intFromEnum(i)]; +} + +pub fn setFileRootDecl(zcu: *Zcu, i: File.Index, root_decl: Decl.OptionalIndex) void { + zcu.files.values()[@intFromEnum(i)] = root_decl; +} + +pub fn filePathDigest(zcu: *const Zcu, i: File.Index) Cache.BinDigest { + return zcu.files.keys()[@intFromEnum(i)]; +} diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig index 14b9cce3a884..023c86dfb8f0 100644 --- a/src/arch/aarch64/CodeGen.zig +++ b/src/arch/aarch64/CodeGen.zig @@ -345,7 +345,7 @@ pub fn generate( assert(fn_owner_decl.has_tv); const fn_type = fn_owner_decl.typeOf(zcu); const namespace = zcu.namespacePtr(fn_owner_decl.src_namespace); - const target = &namespace.file_scope.mod.resolved_target.result; + const target = &namespace.fileScope(zcu).mod.resolved_target.result; var branch_stack = std.ArrayList(Branch).init(gpa); defer { diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index 0423b63d2383..3f10513bb2fe 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -352,7 +352,7 @@ pub fn generate( assert(fn_owner_decl.has_tv); const fn_type = fn_owner_decl.typeOf(zcu); const namespace = zcu.namespacePtr(fn_owner_decl.src_namespace); - const target = &namespace.file_scope.mod.resolved_target.result; + const target = &namespace.fileScope(zcu).mod.resolved_target.result; var branch_stack = std.ArrayList(Branch).init(gpa); defer { diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index 3f01b7473368..2faddc22e8dd 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -712,8 +712,8 @@ pub fn generate( assert(fn_owner_decl.has_tv); const fn_type = fn_owner_decl.typeOf(zcu); const namespace = zcu.namespacePtr(fn_owner_decl.src_namespace); - const target = &namespace.file_scope.mod.resolved_target.result; - const mod = namespace.file_scope.mod; + const target = &namespace.fileScope(zcu).mod.resolved_target.result; + const mod = namespace.fileScope(zcu).mod; var branch_stack = std.ArrayList(Branch).init(gpa); defer { diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig index 2416eb9176a4..b837eb9ade4a 100644 --- a/src/arch/sparc64/CodeGen.zig +++ b/src/arch/sparc64/CodeGen.zig @@ -277,7 +277,7 @@ pub fn generate( assert(fn_owner_decl.has_tv); const fn_type = fn_owner_decl.typeOf(zcu); const namespace = zcu.namespacePtr(fn_owner_decl.src_namespace); - const target = &namespace.file_scope.mod.resolved_target.result; + const target = &namespace.fileScope(zcu).mod.resolved_target.result; var branch_stack = std.ArrayList(Branch).init(gpa); defer { diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index 2ecface64e92..2f2e35a75b29 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -1212,11 +1212,11 @@ pub fn generate( _ = src_loc; const comp = bin_file.comp; const gpa = comp.gpa; - const mod = comp.module.?; - const func = mod.funcInfo(func_index); - const decl = mod.declPtr(func.owner_decl); - const namespace = mod.namespacePtr(decl.src_namespace); - const target = namespace.file_scope.mod.resolved_target.result; + const zcu = comp.module.?; + const func = zcu.funcInfo(func_index); + const decl = zcu.declPtr(func.owner_decl); + const namespace = zcu.namespacePtr(decl.src_namespace); + const target = namespace.fileScope(zcu).mod.resolved_target.result; var code_gen: CodeGen = .{ .gpa = gpa, .air = air, @@ -7706,7 +7706,7 @@ fn airFence(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { // for a single-threaded build, can we emit the `fence` instruction. // In all other cases, we emit no instructions for a fence. const func_namespace = zcu.namespacePtr(func.decl.src_namespace); - const single_threaded = func_namespace.file_scope.mod.single_threaded; + const single_threaded = func_namespace.fileScope(zcu).mod.single_threaded; if (func.useAtomicFeature() and !single_threaded) { try func.addAtomicTag(.atomic_fence); } diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index def0edcac9af..538b7400422a 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -810,7 +810,7 @@ pub fn generate( assert(fn_owner_decl.has_tv); const fn_type = fn_owner_decl.typeOf(zcu); const namespace = zcu.namespacePtr(fn_owner_decl.src_namespace); - const mod = namespace.file_scope.mod; + const mod = namespace.fileScope(zcu).mod; var function = Self{ .gpa = gpa, diff --git a/src/codegen.zig b/src/codegen.zig index 5e25359d4470..059b4fa7d490 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -58,7 +58,7 @@ pub fn generateFunction( const func = zcu.funcInfo(func_index); const decl = zcu.declPtr(func.owner_decl); const namespace = zcu.namespacePtr(decl.src_namespace); - const target = namespace.file_scope.mod.resolved_target.result; + const target = namespace.fileScope(zcu).mod.resolved_target.result; switch (target.cpu.arch) { .arm, .armeb, @@ -88,7 +88,7 @@ pub fn generateLazyFunction( const decl_index = lazy_sym.ty.getOwnerDecl(zcu); const decl = zcu.declPtr(decl_index); const namespace = zcu.namespacePtr(decl.src_namespace); - const target = namespace.file_scope.mod.resolved_target.result; + const target = namespace.fileScope(zcu).mod.resolved_target.result; switch (target.cpu.arch) { .x86_64 => return @import("arch/x86_64/CodeGen.zig").generateLazy(lf, src_loc, lazy_sym, code, debug_output), else => unreachable, @@ -742,7 +742,7 @@ fn lowerDeclRef( const zcu = lf.comp.module.?; const decl = zcu.declPtr(decl_index); const namespace = zcu.namespacePtr(decl.src_namespace); - const target = namespace.file_scope.mod.resolved_target.result; + const target = namespace.fileScope(zcu).mod.resolved_target.result; const ptr_width = target.ptrBitWidth(); const is_fn_body = decl.typeOf(zcu).zigTypeTag(zcu) == .Fn; @@ -836,7 +836,7 @@ fn genDeclRef( const ptr_decl = zcu.declPtr(ptr_decl_index); const namespace = zcu.namespacePtr(ptr_decl.src_namespace); - const target = namespace.file_scope.mod.resolved_target.result; + const target = namespace.fileScope(zcu).mod.resolved_target.result; const ptr_bits = target.ptrBitWidth(); const ptr_bytes: u64 = @divExact(ptr_bits, 8); @@ -875,7 +875,7 @@ fn genDeclRef( } const decl_namespace = zcu.namespacePtr(decl.src_namespace); - const single_threaded = decl_namespace.file_scope.mod.single_threaded; + const single_threaded = decl_namespace.fileScope(zcu).mod.single_threaded; const is_threadlocal = val.isPtrToThreadLocal(zcu) and !single_threaded; const is_extern = decl.isExtern(zcu); @@ -985,7 +985,7 @@ pub fn genTypedValue( const owner_decl = zcu.declPtr(owner_decl_index); const namespace = zcu.namespacePtr(owner_decl.src_namespace); - const target = namespace.file_scope.mod.resolved_target.result; + const target = namespace.fileScope(zcu).mod.resolved_target.result; const ptr_bits = target.ptrBitWidth(); if (!ty.isSlice(zcu)) switch (ip.indexToKey(val.toIntern())) { diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 92e9edb43389..2fd3d2b164d4 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -2581,7 +2581,7 @@ pub fn genTypeDecl( _ = try renderTypePrefix(.flush, global_ctype_pool, zcu, writer, global_ctype, .suffix, .{}); try writer.writeByte(';'); const owner_decl = zcu.declPtr(owner_decl_index); - const owner_mod = zcu.namespacePtr(owner_decl.src_namespace).file_scope.mod; + const owner_mod = zcu.namespacePtr(owner_decl.src_namespace).fileScope(zcu).mod; if (!owner_mod.strip) { try writer.writeAll(" /* "); try owner_decl.renderFullyQualifiedName(zcu, writer); diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 02933929c872..6efef20f22ba 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -1362,7 +1362,8 @@ pub const Object = struct { const decl_index = func.owner_decl; const decl = zcu.declPtr(decl_index); const namespace = zcu.namespacePtr(decl.src_namespace); - const owner_mod = namespace.file_scope.mod; + const file_scope = namespace.fileScope(zcu); + const owner_mod = file_scope.mod; const fn_info = zcu.typeToFunc(decl.typeOf(zcu)).?; const target = owner_mod.resolved_target.result; const ip = &zcu.intern_pool; @@ -1633,7 +1634,7 @@ pub const Object = struct { function_index.setAttributes(try attributes.finish(&o.builder), &o.builder); const file, const subprogram = if (!wip.strip) debug_info: { - const file = try o.getDebugFile(namespace.file_scope); + const file = try o.getDebugFile(file_scope); const line_number = decl.navSrcLine(zcu) + 1; const is_internal_linkage = decl.val.getExternFunc(zcu) == null; @@ -1720,23 +1721,23 @@ pub const Object = struct { pub fn updateExports( self: *Object, - mod: *Module, + zcu: *Zcu, exported: Module.Exported, export_indices: []const u32, ) link.File.UpdateExportsError!void { const decl_index = switch (exported) { .decl_index => |i| i, - .value => |val| return updateExportedValue(self, mod, val, export_indices), + .value => |val| return updateExportedValue(self, zcu, val, export_indices), }; - const ip = &mod.intern_pool; + const ip = &zcu.intern_pool; const global_index = self.decl_map.get(decl_index).?; - const decl = mod.declPtr(decl_index); - const comp = mod.comp; + const decl = zcu.declPtr(decl_index); + const comp = zcu.comp; if (export_indices.len != 0) { - return updateExportedGlobal(self, mod, global_index, export_indices); + return updateExportedGlobal(self, zcu, global_index, export_indices); } else { - const fqn = try self.builder.strtabString((try decl.fullyQualifiedName(mod)).toSlice(ip)); + const fqn = try self.builder.strtabString((try decl.fullyQualifiedName(zcu)).toSlice(ip)); try global_index.rename(fqn, &self.builder); global_index.setLinkage(.internal, &self.builder); if (comp.config.dll_export_fns) @@ -1908,12 +1909,12 @@ pub const Object = struct { const gpa = o.gpa; const target = o.target; - const mod = o.module; - const ip = &mod.intern_pool; + const zcu = o.module; + const ip = &zcu.intern_pool; if (o.debug_type_map.get(ty)) |debug_type| return debug_type; - switch (ty.zigTypeTag(mod)) { + switch (ty.zigTypeTag(zcu)) { .Void, .NoReturn, => { @@ -1925,12 +1926,12 @@ pub const Object = struct { return debug_void_type; }, .Int => { - const info = ty.intInfo(mod); + const info = ty.intInfo(zcu); assert(info.bits != 0); const name = try o.allocTypeName(ty); defer gpa.free(name); const builder_name = try o.builder.metadataString(name); - const debug_bits = ty.abiSize(mod) * 8; // lldb cannot handle non-byte sized types + const debug_bits = ty.abiSize(zcu) * 8; // lldb cannot handle non-byte sized types const debug_int_type = switch (info.signedness) { .signed => try o.builder.debugSignedType(builder_name, debug_bits), .unsigned => try o.builder.debugUnsignedType(builder_name, debug_bits), @@ -1939,10 +1940,10 @@ pub const Object = struct { return debug_int_type; }, .Enum => { - const owner_decl_index = ty.getOwnerDecl(mod); + const owner_decl_index = ty.getOwnerDecl(zcu); const owner_decl = o.module.declPtr(owner_decl_index); - if (!ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (!ty.hasRuntimeBitsIgnoreComptime(zcu)) { const debug_enum_type = try o.makeEmptyNamespaceDebugType(owner_decl_index); try o.debug_type_map.put(gpa, ty, debug_enum_type); return debug_enum_type; @@ -1954,13 +1955,13 @@ pub const Object = struct { defer gpa.free(enumerators); const int_ty = Type.fromInterned(enum_type.tag_ty); - const int_info = ty.intInfo(mod); + const int_info = ty.intInfo(zcu); assert(int_info.bits != 0); for (enum_type.names.get(ip), 0..) |field_name_ip, i| { var bigint_space: Value.BigIntSpace = undefined; const bigint = if (enum_type.values.len != 0) - Value.fromInterned(enum_type.values.get(ip)[i]).toBigInt(&bigint_space, mod) + Value.fromInterned(enum_type.values.get(ip)[i]).toBigInt(&bigint_space, zcu) else std.math.big.int.Mutable.init(&bigint_space.limbs, i).toConst(); @@ -1972,7 +1973,8 @@ pub const Object = struct { ); } - const file = try o.getDebugFile(mod.namespacePtr(owner_decl.src_namespace).file_scope); + const file_scope = zcu.namespacePtr(owner_decl.src_namespace).fileScope(zcu); + const file = try o.getDebugFile(file_scope); const scope = try o.namespaceToDebugScope(owner_decl.src_namespace); const name = try o.allocTypeName(ty); @@ -1982,10 +1984,10 @@ pub const Object = struct { try o.builder.metadataString(name), file, scope, - owner_decl.typeSrcLine(mod) + 1, // Line + owner_decl.typeSrcLine(zcu) + 1, // Line try o.lowerDebugType(int_ty), - ty.abiSize(mod) * 8, - (ty.abiAlignment(mod).toByteUnits() orelse 0) * 8, + ty.abiSize(zcu) * 8, + (ty.abiAlignment(zcu).toByteUnits() orelse 0) * 8, try o.builder.debugTuple(enumerators), ); @@ -2014,7 +2016,7 @@ pub const Object = struct { }, .Pointer => { // Normalize everything that the debug info does not represent. - const ptr_info = ty.ptrInfo(mod); + const ptr_info = ty.ptrInfo(zcu); if (ptr_info.sentinel != .none or ptr_info.flags.address_space != .generic or @@ -2025,10 +2027,10 @@ pub const Object = struct { ptr_info.flags.is_const or ptr_info.flags.is_volatile or ptr_info.flags.size == .Many or ptr_info.flags.size == .C or - !Type.fromInterned(ptr_info.child).hasRuntimeBitsIgnoreComptime(mod)) + !Type.fromInterned(ptr_info.child).hasRuntimeBitsIgnoreComptime(zcu)) { - const bland_ptr_ty = try mod.ptrType(.{ - .child = if (!Type.fromInterned(ptr_info.child).hasRuntimeBitsIgnoreComptime(mod)) + const bland_ptr_ty = try zcu.ptrType(.{ + .child = if (!Type.fromInterned(ptr_info.child).hasRuntimeBitsIgnoreComptime(zcu)) .anyopaque_type else ptr_info.child, @@ -2050,18 +2052,18 @@ pub const Object = struct { // Set as forward reference while the type is lowered in case it references itself try o.debug_type_map.put(gpa, ty, debug_fwd_ref); - if (ty.isSlice(mod)) { - const ptr_ty = ty.slicePtrFieldType(mod); + if (ty.isSlice(zcu)) { + const ptr_ty = ty.slicePtrFieldType(zcu); const len_ty = Type.usize; const name = try o.allocTypeName(ty); defer gpa.free(name); const line = 0; - const ptr_size = ptr_ty.abiSize(mod); - const ptr_align = ptr_ty.abiAlignment(mod); - const len_size = len_ty.abiSize(mod); - const len_align = len_ty.abiAlignment(mod); + const ptr_size = ptr_ty.abiSize(zcu); + const ptr_align = ptr_ty.abiAlignment(zcu); + const len_size = len_ty.abiSize(zcu); + const len_align = len_ty.abiAlignment(zcu); const len_offset = len_align.forward(ptr_size); @@ -2093,8 +2095,8 @@ pub const Object = struct { o.debug_compile_unit, // Scope line, .none, // Underlying type - ty.abiSize(mod) * 8, - (ty.abiAlignment(mod).toByteUnits() orelse 0) * 8, + ty.abiSize(zcu) * 8, + (ty.abiAlignment(zcu).toByteUnits() orelse 0) * 8, try o.builder.debugTuple(&.{ debug_ptr_type, debug_len_type, @@ -2122,7 +2124,7 @@ pub const Object = struct { 0, // Line debug_elem_ty, target.ptrBitWidth(), - (ty.ptrAlignment(mod).toByteUnits() orelse 0) * 8, + (ty.ptrAlignment(zcu).toByteUnits() orelse 0) * 8, 0, // Offset ); @@ -2146,13 +2148,14 @@ pub const Object = struct { const name = try o.allocTypeName(ty); defer gpa.free(name); - const owner_decl_index = ty.getOwnerDecl(mod); + const owner_decl_index = ty.getOwnerDecl(zcu); const owner_decl = o.module.declPtr(owner_decl_index); + const file_scope = zcu.namespacePtr(owner_decl.src_namespace).fileScope(zcu); const debug_opaque_type = try o.builder.debugStructType( try o.builder.metadataString(name), - try o.getDebugFile(mod.namespacePtr(owner_decl.src_namespace).file_scope), + try o.getDebugFile(file_scope), try o.namespaceToDebugScope(owner_decl.src_namespace), - owner_decl.typeSrcLine(mod) + 1, // Line + owner_decl.typeSrcLine(zcu) + 1, // Line .none, // Underlying type 0, // Size 0, // Align @@ -2167,13 +2170,13 @@ pub const Object = struct { .none, // File .none, // Scope 0, // Line - try o.lowerDebugType(ty.childType(mod)), - ty.abiSize(mod) * 8, - (ty.abiAlignment(mod).toByteUnits() orelse 0) * 8, + try o.lowerDebugType(ty.childType(zcu)), + ty.abiSize(zcu) * 8, + (ty.abiAlignment(zcu).toByteUnits() orelse 0) * 8, try o.builder.debugTuple(&.{ try o.builder.debugSubrange( try o.builder.debugConstant(try o.builder.intConst(.i64, 0)), - try o.builder.debugConstant(try o.builder.intConst(.i64, ty.arrayLen(mod))), + try o.builder.debugConstant(try o.builder.intConst(.i64, ty.arrayLen(zcu))), ), }), ); @@ -2181,14 +2184,14 @@ pub const Object = struct { return debug_array_type; }, .Vector => { - const elem_ty = ty.elemType2(mod); + const elem_ty = ty.elemType2(zcu); // Vector elements cannot be padded since that would make // @bitSizOf(elem) * len > @bitSizOf(vec). // Neither gdb nor lldb seem to be able to display non-byte sized // vectors properly. - const debug_elem_type = switch (elem_ty.zigTypeTag(mod)) { + const debug_elem_type = switch (elem_ty.zigTypeTag(zcu)) { .Int => blk: { - const info = elem_ty.intInfo(mod); + const info = elem_ty.intInfo(zcu); assert(info.bits != 0); const name = try o.allocTypeName(ty); defer gpa.free(name); @@ -2202,7 +2205,7 @@ pub const Object = struct { try o.builder.metadataString("bool"), 1, ), - else => try o.lowerDebugType(ty.childType(mod)), + else => try o.lowerDebugType(ty.childType(zcu)), }; const debug_vector_type = try o.builder.debugVectorType( @@ -2211,12 +2214,12 @@ pub const Object = struct { .none, // Scope 0, // Line debug_elem_type, - ty.abiSize(mod) * 8, - (ty.abiAlignment(mod).toByteUnits() orelse 0) * 8, + ty.abiSize(zcu) * 8, + (ty.abiAlignment(zcu).toByteUnits() orelse 0) * 8, try o.builder.debugTuple(&.{ try o.builder.debugSubrange( try o.builder.debugConstant(try o.builder.intConst(.i64, 0)), - try o.builder.debugConstant(try o.builder.intConst(.i64, ty.vectorLen(mod))), + try o.builder.debugConstant(try o.builder.intConst(.i64, ty.vectorLen(zcu))), ), }), ); @@ -2227,8 +2230,8 @@ pub const Object = struct { .Optional => { const name = try o.allocTypeName(ty); defer gpa.free(name); - const child_ty = ty.optionalChild(mod); - if (!child_ty.hasRuntimeBitsIgnoreComptime(mod)) { + const child_ty = ty.optionalChild(zcu); + if (!child_ty.hasRuntimeBitsIgnoreComptime(zcu)) { const debug_bool_type = try o.builder.debugBoolType( try o.builder.metadataString(name), 8, @@ -2242,7 +2245,7 @@ pub const Object = struct { // Set as forward reference while the type is lowered in case it references itself try o.debug_type_map.put(gpa, ty, debug_fwd_ref); - if (ty.optionalReprIsPayload(mod)) { + if (ty.optionalReprIsPayload(zcu)) { const debug_optional_type = try o.lowerDebugType(child_ty); o.builder.debugForwardReferenceSetType(debug_fwd_ref, debug_optional_type); @@ -2255,10 +2258,10 @@ pub const Object = struct { } const non_null_ty = Type.u8; - const payload_size = child_ty.abiSize(mod); - const payload_align = child_ty.abiAlignment(mod); - const non_null_size = non_null_ty.abiSize(mod); - const non_null_align = non_null_ty.abiAlignment(mod); + const payload_size = child_ty.abiSize(zcu); + const payload_align = child_ty.abiAlignment(zcu); + const non_null_size = non_null_ty.abiSize(zcu); + const non_null_align = non_null_ty.abiAlignment(zcu); const non_null_offset = non_null_align.forward(payload_size); const debug_data_type = try o.builder.debugMemberType( @@ -2289,8 +2292,8 @@ pub const Object = struct { o.debug_compile_unit, // Scope 0, // Line .none, // Underlying type - ty.abiSize(mod) * 8, - (ty.abiAlignment(mod).toByteUnits() orelse 0) * 8, + ty.abiSize(zcu) * 8, + (ty.abiAlignment(zcu).toByteUnits() orelse 0) * 8, try o.builder.debugTuple(&.{ debug_data_type, debug_some_type, @@ -2306,8 +2309,8 @@ pub const Object = struct { return debug_optional_type; }, .ErrorUnion => { - const payload_ty = ty.errorUnionPayload(mod); - if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { + const payload_ty = ty.errorUnionPayload(zcu); + if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) { // TODO: Maybe remove? const debug_error_union_type = try o.lowerDebugType(Type.anyerror); try o.debug_type_map.put(gpa, ty, debug_error_union_type); @@ -2317,10 +2320,10 @@ pub const Object = struct { const name = try o.allocTypeName(ty); defer gpa.free(name); - const error_size = Type.anyerror.abiSize(mod); - const error_align = Type.anyerror.abiAlignment(mod); - const payload_size = payload_ty.abiSize(mod); - const payload_align = payload_ty.abiAlignment(mod); + const error_size = Type.anyerror.abiSize(zcu); + const error_align = Type.anyerror.abiAlignment(zcu); + const payload_size = payload_ty.abiSize(zcu); + const payload_align = payload_ty.abiAlignment(zcu); var error_index: u32 = undefined; var payload_index: u32 = undefined; @@ -2368,8 +2371,8 @@ pub const Object = struct { o.debug_compile_unit, // Sope 0, // Line .none, // Underlying type - ty.abiSize(mod) * 8, - (ty.abiAlignment(mod).toByteUnits() orelse 0) * 8, + ty.abiSize(zcu) * 8, + (ty.abiAlignment(zcu).toByteUnits() orelse 0) * 8, try o.builder.debugTuple(&fields), ); @@ -2390,14 +2393,14 @@ pub const Object = struct { const name = try o.allocTypeName(ty); defer gpa.free(name); - if (mod.typeToPackedStruct(ty)) |struct_type| { + if (zcu.typeToPackedStruct(ty)) |struct_type| { const backing_int_ty = struct_type.backingIntType(ip).*; if (backing_int_ty != .none) { - const info = Type.fromInterned(backing_int_ty).intInfo(mod); + const info = Type.fromInterned(backing_int_ty).intInfo(zcu); const builder_name = try o.builder.metadataString(name); const debug_int_type = switch (info.signedness) { - .signed => try o.builder.debugSignedType(builder_name, ty.abiSize(mod) * 8), - .unsigned => try o.builder.debugUnsignedType(builder_name, ty.abiSize(mod) * 8), + .signed => try o.builder.debugSignedType(builder_name, ty.abiSize(zcu) * 8), + .unsigned => try o.builder.debugUnsignedType(builder_name, ty.abiSize(zcu) * 8), }; try o.debug_type_map.put(gpa, ty, debug_int_type); return debug_int_type; @@ -2417,10 +2420,10 @@ pub const Object = struct { const debug_fwd_ref = try o.builder.debugForwardReference(); for (tuple.types.get(ip), tuple.values.get(ip), 0..) |field_ty, field_val, i| { - if (field_val != .none or !Type.fromInterned(field_ty).hasRuntimeBits(mod)) continue; + if (field_val != .none or !Type.fromInterned(field_ty).hasRuntimeBits(zcu)) continue; - const field_size = Type.fromInterned(field_ty).abiSize(mod); - const field_align = Type.fromInterned(field_ty).abiAlignment(mod); + const field_size = Type.fromInterned(field_ty).abiSize(zcu); + const field_align = Type.fromInterned(field_ty).abiAlignment(zcu); const field_offset = field_align.forward(offset); offset = field_offset + field_size; @@ -2448,8 +2451,8 @@ pub const Object = struct { o.debug_compile_unit, // Scope 0, // Line .none, // Underlying type - ty.abiSize(mod) * 8, - (ty.abiAlignment(mod).toByteUnits() orelse 0) * 8, + ty.abiSize(zcu) * 8, + (ty.abiAlignment(zcu).toByteUnits() orelse 0) * 8, try o.builder.debugTuple(fields.items), ); @@ -2467,7 +2470,7 @@ pub const Object = struct { // into. Therefore we can satisfy this by making an empty namespace, // rather than changing the frontend to unnecessarily resolve the // struct field types. - const owner_decl_index = ty.getOwnerDecl(mod); + const owner_decl_index = ty.getOwnerDecl(zcu); const debug_struct_type = try o.makeEmptyNamespaceDebugType(owner_decl_index); try o.debug_type_map.put(gpa, ty, debug_struct_type); return debug_struct_type; @@ -2476,14 +2479,14 @@ pub const Object = struct { else => {}, } - if (!ty.hasRuntimeBitsIgnoreComptime(mod)) { - const owner_decl_index = ty.getOwnerDecl(mod); + if (!ty.hasRuntimeBitsIgnoreComptime(zcu)) { + const owner_decl_index = ty.getOwnerDecl(zcu); const debug_struct_type = try o.makeEmptyNamespaceDebugType(owner_decl_index); try o.debug_type_map.put(gpa, ty, debug_struct_type); return debug_struct_type; } - const struct_type = mod.typeToStruct(ty).?; + const struct_type = zcu.typeToStruct(ty).?; var fields: std.ArrayListUnmanaged(Builder.Metadata) = .{}; defer fields.deinit(gpa); @@ -2499,14 +2502,14 @@ pub const Object = struct { var it = struct_type.iterateRuntimeOrder(ip); while (it.next()) |field_index| { const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_index]); - if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; - const field_size = field_ty.abiSize(mod); - const field_align = mod.structFieldAlignment( + if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue; + const field_size = field_ty.abiSize(zcu); + const field_align = zcu.structFieldAlignment( struct_type.fieldAlign(ip, field_index), field_ty, struct_type.layout, ); - const field_offset = ty.structFieldOffset(field_index, mod); + const field_offset = ty.structFieldOffset(field_index, zcu); const field_name = struct_type.fieldName(ip, field_index).unwrap() orelse try ip.getOrPutStringFmt(gpa, "{d}", .{field_index}, .no_embedded_nulls); @@ -2529,8 +2532,8 @@ pub const Object = struct { o.debug_compile_unit, // Scope 0, // Line .none, // Underlying type - ty.abiSize(mod) * 8, - (ty.abiAlignment(mod).toByteUnits() orelse 0) * 8, + ty.abiSize(zcu) * 8, + (ty.abiAlignment(zcu).toByteUnits() orelse 0) * 8, try o.builder.debugTuple(fields.items), ); @@ -2543,14 +2546,14 @@ pub const Object = struct { return debug_struct_type; }, .Union => { - const owner_decl_index = ty.getOwnerDecl(mod); + const owner_decl_index = ty.getOwnerDecl(zcu); const name = try o.allocTypeName(ty); defer gpa.free(name); const union_type = ip.loadUnionType(ty.toIntern()); if (!union_type.haveFieldTypes(ip) or - !ty.hasRuntimeBitsIgnoreComptime(mod) or + !ty.hasRuntimeBitsIgnoreComptime(zcu) or !union_type.haveLayout(ip)) { const debug_union_type = try o.makeEmptyNamespaceDebugType(owner_decl_index); @@ -2558,7 +2561,7 @@ pub const Object = struct { return debug_union_type; } - const layout = mod.getUnionLayout(union_type); + const layout = zcu.getUnionLayout(union_type); const debug_fwd_ref = try o.builder.debugForwardReference(); @@ -2572,8 +2575,8 @@ pub const Object = struct { o.debug_compile_unit, // Scope 0, // Line .none, // Underlying type - ty.abiSize(mod) * 8, - (ty.abiAlignment(mod).toByteUnits() orelse 0) * 8, + ty.abiSize(zcu) * 8, + (ty.abiAlignment(zcu).toByteUnits() orelse 0) * 8, try o.builder.debugTuple( &.{try o.lowerDebugType(Type.fromInterned(union_type.enum_tag_ty))}, ), @@ -2600,12 +2603,12 @@ pub const Object = struct { for (0..tag_type.names.len) |field_index| { const field_ty = union_type.field_types.get(ip)[field_index]; - if (!Type.fromInterned(field_ty).hasRuntimeBitsIgnoreComptime(mod)) continue; + if (!Type.fromInterned(field_ty).hasRuntimeBitsIgnoreComptime(zcu)) continue; - const field_size = Type.fromInterned(field_ty).abiSize(mod); + const field_size = Type.fromInterned(field_ty).abiSize(zcu); const field_align: InternPool.Alignment = switch (union_type.flagsPtr(ip).layout) { .@"packed" => .none, - .auto, .@"extern" => mod.unionFieldNormalAlignment(union_type, @intCast(field_index)), + .auto, .@"extern" => zcu.unionFieldNormalAlignment(union_type, @intCast(field_index)), }; const field_name = tag_type.names.get(ip)[field_index]; @@ -2634,8 +2637,8 @@ pub const Object = struct { o.debug_compile_unit, // Scope 0, // Line .none, // Underlying type - ty.abiSize(mod) * 8, - (ty.abiAlignment(mod).toByteUnits() orelse 0) * 8, + ty.abiSize(zcu) * 8, + (ty.abiAlignment(zcu).toByteUnits() orelse 0) * 8, try o.builder.debugTuple(fields.items), ); @@ -2693,8 +2696,8 @@ pub const Object = struct { o.debug_compile_unit, // Scope 0, // Line .none, // Underlying type - ty.abiSize(mod) * 8, - (ty.abiAlignment(mod).toByteUnits() orelse 0) * 8, + ty.abiSize(zcu) * 8, + (ty.abiAlignment(zcu).toByteUnits() orelse 0) * 8, try o.builder.debugTuple(&full_fields), ); @@ -2707,7 +2710,7 @@ pub const Object = struct { return debug_tagged_union_type; }, .Fn => { - const fn_info = mod.typeToFunc(ty).?; + const fn_info = zcu.typeToFunc(ty).?; var debug_param_types = std.ArrayList(Builder.Metadata).init(gpa); defer debug_param_types.deinit(); @@ -2715,32 +2718,32 @@ pub const Object = struct { try debug_param_types.ensureUnusedCapacity(3 + fn_info.param_types.len); // Return type goes first. - if (Type.fromInterned(fn_info.return_type).hasRuntimeBitsIgnoreComptime(mod)) { - const sret = firstParamSRet(fn_info, mod, target); + if (Type.fromInterned(fn_info.return_type).hasRuntimeBitsIgnoreComptime(zcu)) { + const sret = firstParamSRet(fn_info, zcu, target); const ret_ty = if (sret) Type.void else Type.fromInterned(fn_info.return_type); debug_param_types.appendAssumeCapacity(try o.lowerDebugType(ret_ty)); if (sret) { - const ptr_ty = try mod.singleMutPtrType(Type.fromInterned(fn_info.return_type)); + const ptr_ty = try zcu.singleMutPtrType(Type.fromInterned(fn_info.return_type)); debug_param_types.appendAssumeCapacity(try o.lowerDebugType(ptr_ty)); } } else { debug_param_types.appendAssumeCapacity(try o.lowerDebugType(Type.void)); } - if (Type.fromInterned(fn_info.return_type).isError(mod) and + if (Type.fromInterned(fn_info.return_type).isError(zcu) and o.module.comp.config.any_error_tracing) { - const ptr_ty = try mod.singleMutPtrType(try o.getStackTraceType()); + const ptr_ty = try zcu.singleMutPtrType(try o.getStackTraceType()); debug_param_types.appendAssumeCapacity(try o.lowerDebugType(ptr_ty)); } for (0..fn_info.param_types.len) |i| { const param_ty = Type.fromInterned(fn_info.param_types.get(ip)[i]); - if (!param_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; + if (!param_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue; - if (isByRef(param_ty, mod)) { - const ptr_ty = try mod.singleMutPtrType(param_ty); + if (isByRef(param_ty, zcu)) { + const ptr_ty = try zcu.singleMutPtrType(param_ty); debug_param_types.appendAssumeCapacity(try o.lowerDebugType(ptr_ty)); } else { debug_param_types.appendAssumeCapacity(try o.lowerDebugType(param_ty)); @@ -2767,9 +2770,10 @@ pub const Object = struct { } fn namespaceToDebugScope(o: *Object, namespace_index: InternPool.NamespaceIndex) !Builder.Metadata { - const mod = o.module; - const namespace = mod.namespacePtr(namespace_index); - if (namespace.parent == .none) return try o.getDebugFile(namespace.file_scope); + const zcu = o.module; + const namespace = zcu.namespacePtr(namespace_index); + const file_scope = namespace.fileScope(zcu); + if (namespace.parent == .none) return try o.getDebugFile(file_scope); const gop = try o.debug_unresolved_namespace_scopes.getOrPut(o.gpa, namespace_index); @@ -2779,13 +2783,14 @@ pub const Object = struct { } fn makeEmptyNamespaceDebugType(o: *Object, decl_index: InternPool.DeclIndex) !Builder.Metadata { - const mod = o.module; - const decl = mod.declPtr(decl_index); + const zcu = o.module; + const decl = zcu.declPtr(decl_index); + const file_scope = zcu.namespacePtr(decl.src_namespace).fileScope(zcu); return o.builder.debugStructType( - try o.builder.metadataString(decl.name.toSlice(&mod.intern_pool)), // TODO use fully qualified name - try o.getDebugFile(mod.namespacePtr(decl.src_namespace).file_scope), + try o.builder.metadataString(decl.name.toSlice(&zcu.intern_pool)), // TODO use fully qualified name + try o.getDebugFile(file_scope), try o.namespaceToDebugScope(decl.src_namespace), - decl.typeSrcLine(mod) + 1, + decl.typeSrcLine(zcu) + 1, .none, 0, 0, @@ -2794,21 +2799,22 @@ pub const Object = struct { } fn getStackTraceType(o: *Object) Allocator.Error!Type { - const mod = o.module; + const zcu = o.module; - const std_mod = mod.std_mod; - const std_file = (mod.importPkg(std_mod) catch unreachable).file; + const std_mod = zcu.std_mod; + const std_file_imported = zcu.importPkg(std_mod) catch unreachable; - const builtin_str = try mod.intern_pool.getOrPutString(mod.gpa, "builtin", .no_embedded_nulls); - const std_namespace = mod.namespacePtr(mod.declPtr(std_file.root_decl.unwrap().?).src_namespace); - const builtin_decl = std_namespace.decls.getKeyAdapted(builtin_str, Module.DeclAdapter{ .zcu = mod }).?; + const builtin_str = try zcu.intern_pool.getOrPutString(zcu.gpa, "builtin", .no_embedded_nulls); + const std_file_root_decl = zcu.fileRootDecl(std_file_imported.file_index); + const std_namespace = zcu.namespacePtr(zcu.declPtr(std_file_root_decl.unwrap().?).src_namespace); + const builtin_decl = std_namespace.decls.getKeyAdapted(builtin_str, Module.DeclAdapter{ .zcu = zcu }).?; - const stack_trace_str = try mod.intern_pool.getOrPutString(mod.gpa, "StackTrace", .no_embedded_nulls); + const stack_trace_str = try zcu.intern_pool.getOrPutString(zcu.gpa, "StackTrace", .no_embedded_nulls); // buffer is only used for int_type, `builtin` is a struct. - const builtin_ty = mod.declPtr(builtin_decl).val.toType(); - const builtin_namespace = mod.namespacePtrUnwrap(builtin_ty.getNamespaceIndex(mod)).?; - const stack_trace_decl_index = builtin_namespace.decls.getKeyAdapted(stack_trace_str, Module.DeclAdapter{ .zcu = mod }).?; - const stack_trace_decl = mod.declPtr(stack_trace_decl_index); + const builtin_ty = zcu.declPtr(builtin_decl).val.toType(); + const builtin_namespace = zcu.namespacePtrUnwrap(builtin_ty.getNamespaceIndex(zcu)).?; + const stack_trace_decl_index = builtin_namespace.decls.getKeyAdapted(stack_trace_str, Module.DeclAdapter{ .zcu = zcu }).?; + const stack_trace_decl = zcu.declPtr(stack_trace_decl_index); // Sema should have ensured that StackTrace was analyzed. assert(stack_trace_decl.has_tv); @@ -2834,7 +2840,7 @@ pub const Object = struct { const gpa = o.gpa; const decl = zcu.declPtr(decl_index); const namespace = zcu.namespacePtr(decl.src_namespace); - const owner_mod = namespace.file_scope.mod; + const owner_mod = namespace.fileScope(zcu).mod; const zig_fn_type = decl.typeOf(zcu); const gop = try o.decl_map.getOrPut(gpa, decl_index); if (gop.found_existing) return gop.value_ptr.ptr(&o.builder).kind.function; @@ -3059,17 +3065,17 @@ pub const Object = struct { if (gop.found_existing) return gop.value_ptr.ptr(&o.builder).kind.variable; errdefer assert(o.decl_map.remove(decl_index)); - const mod = o.module; - const decl = mod.declPtr(decl_index); - const is_extern = decl.isExtern(mod); + const zcu = o.module; + const decl = zcu.declPtr(decl_index); + const is_extern = decl.isExtern(zcu); const variable_index = try o.builder.addVariable( try o.builder.strtabString((if (is_extern) decl.name else - try decl.fullyQualifiedName(mod)).toSlice(&mod.intern_pool)), - try o.lowerType(decl.typeOf(mod)), - toLlvmGlobalAddressSpace(decl.@"addrspace", mod.getTarget()), + try decl.fullyQualifiedName(zcu)).toSlice(&zcu.intern_pool)), + try o.lowerType(decl.typeOf(zcu)), + toLlvmGlobalAddressSpace(decl.@"addrspace", zcu.getTarget()), ); gop.value_ptr.* = variable_index.ptrConst(&o.builder).global; @@ -3077,9 +3083,9 @@ pub const Object = struct { if (is_extern) { variable_index.setLinkage(.external, &o.builder); variable_index.setUnnamedAddr(.default, &o.builder); - if (decl.val.getVariable(mod)) |decl_var| { - const decl_namespace = mod.namespacePtr(decl.src_namespace); - const single_threaded = decl_namespace.file_scope.mod.single_threaded; + if (decl.val.getVariable(zcu)) |decl_var| { + const decl_namespace = zcu.namespacePtr(decl.src_namespace); + const single_threaded = decl_namespace.fileScope(zcu).mod.single_threaded; variable_index.setThreadLocal( if (decl_var.is_threadlocal and !single_threaded) .generaldynamic else .default, &o.builder, @@ -4638,7 +4644,8 @@ pub const DeclGen = struct { const o = dg.object; const zcu = o.module; const namespace = zcu.namespacePtr(dg.decl.src_namespace); - return namespace.file_scope.mod; + const file_scope = namespace.fileScope(zcu); + return file_scope.mod; } fn todo(dg: *DeclGen, comptime format: []const u8, args: anytype) Error { @@ -4682,7 +4689,7 @@ pub const DeclGen = struct { if (decl.val.getVariable(zcu)) |decl_var| { const decl_namespace = zcu.namespacePtr(decl.src_namespace); - const single_threaded = decl_namespace.file_scope.mod.single_threaded; + const single_threaded = decl_namespace.fileScope(zcu).mod.single_threaded; variable_index.setThreadLocal( if (decl_var.is_threadlocal and !single_threaded) .generaldynamic else .default, &o.builder, @@ -4692,10 +4699,11 @@ pub const DeclGen = struct { const line_number = decl.navSrcLine(zcu) + 1; const namespace = zcu.namespacePtr(decl.src_namespace); - const owner_mod = namespace.file_scope.mod; + const file_scope = namespace.fileScope(zcu); + const owner_mod = file_scope.mod; if (!owner_mod.strip) { - const debug_file = try o.getDebugFile(namespace.file_scope); + const debug_file = try o.getDebugFile(file_scope); const debug_global_var = try o.builder.debugGlobalVar( try o.builder.metadataString(decl.name.toSlice(ip)), // Name @@ -5143,9 +5151,10 @@ pub const FuncGen = struct { const decl_index = func.owner_decl; const decl = zcu.declPtr(decl_index); const namespace = zcu.namespacePtr(decl.src_namespace); - const owner_mod = namespace.file_scope.mod; + const file_scope = namespace.fileScope(zcu); + const owner_mod = file_scope.mod; - self.file = try o.getDebugFile(namespace.file_scope); + self.file = try o.getDebugFile(file_scope); const line_number = decl.navSrcLine(zcu) + 1; self.inlined = self.wip.debug_location; diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index c56a5a799e9c..2fbe9097d63d 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -188,19 +188,20 @@ pub const Object = struct { fn genDecl( self: *Object, - mod: *Module, + zcu: *Zcu, decl_index: InternPool.DeclIndex, air: Air, liveness: Liveness, ) !void { - const decl = mod.declPtr(decl_index); - const namespace = mod.namespacePtr(decl.src_namespace); - const structured_cfg = namespace.file_scope.mod.structured_cfg; + const gpa = self.gpa; + const decl = zcu.declPtr(decl_index); + const namespace = zcu.namespacePtr(decl.src_namespace); + const structured_cfg = namespace.fileScope(zcu).mod.structured_cfg; var decl_gen = DeclGen{ - .gpa = self.gpa, + .gpa = gpa, .object = self, - .module = mod, + .module = zcu, .spv = &self.spv, .decl_index = decl_index, .air = air, @@ -212,19 +213,19 @@ pub const Object = struct { false => .{ .unstructured = .{} }, }, .current_block_label = undefined, - .base_line = decl.navSrcLine(mod), + .base_line = decl.navSrcLine(zcu), }; defer decl_gen.deinit(); decl_gen.genDecl() catch |err| switch (err) { error.CodegenFail => { - try mod.failed_analysis.put(mod.gpa, InternPool.AnalUnit.wrap(.{ .decl = decl_index }), decl_gen.error_msg.?); + try zcu.failed_analysis.put(gpa, InternPool.AnalUnit.wrap(.{ .decl = decl_index }), decl_gen.error_msg.?); }, else => |other| { // There might be an error that happened *after* self.error_msg // was already allocated, so be sure to free it. if (decl_gen.error_msg) |error_msg| { - error_msg.deinit(mod.gpa); + error_msg.deinit(gpa); } return other; diff --git a/src/link/C.zig b/src/link/C.zig index 21245c1e3009..563604f7e09d 100644 --- a/src/link/C.zig +++ b/src/link/C.zig @@ -208,6 +208,8 @@ pub fn updateFunc( fwd_decl.clearRetainingCapacity(); code.clearRetainingCapacity(); + const file_scope = zcu.namespacePtr(decl.src_namespace).fileScope(zcu); + var function: codegen.Function = .{ .value_map = codegen.CValueMap.init(gpa), .air = air, @@ -217,7 +219,7 @@ pub fn updateFunc( .dg = .{ .gpa = gpa, .zcu = zcu, - .mod = zcu.namespacePtr(decl.src_namespace).file_scope.mod, + .mod = file_scope.mod, .error_msg = null, .pass = .{ .decl = decl_index }, .is_naked_fn = decl.typeOf(zcu).fnCallingConvention(zcu) == .Naked, @@ -335,11 +337,13 @@ pub fn updateDecl(self: *C, zcu: *Zcu, decl_index: InternPool.DeclIndex) !void { fwd_decl.clearRetainingCapacity(); code.clearRetainingCapacity(); + const file_scope = zcu.namespacePtr(decl.src_namespace).fileScope(zcu); + var object: codegen.Object = .{ .dg = .{ .gpa = gpa, .zcu = zcu, - .mod = zcu.namespacePtr(decl.src_namespace).file_scope.mod, + .mod = file_scope.mod, .error_msg = null, .pass = .{ .decl = decl_index }, .is_naked_fn = false, @@ -491,7 +495,7 @@ pub fn flushModule(self: *C, arena: Allocator, prog_node: std.Progress.Node) !vo for (self.decl_table.keys(), self.decl_table.values()) |decl_index, *decl_block| { const decl = zcu.declPtr(decl_index); const extern_name = if (decl.isExtern(zcu)) decl.name.toOptional() else .none; - const mod = zcu.namespacePtr(decl.src_namespace).file_scope.mod; + const mod = zcu.namespacePtr(decl.src_namespace).fileScope(zcu).mod; try self.flushDeclBlock( zcu, mod, @@ -848,7 +852,7 @@ pub fn updateExports( const gpa = self.base.comp.gpa; const mod, const pass: codegen.DeclGen.Pass, const decl_block, const exported_block = switch (exported) { .decl_index => |decl_index| .{ - zcu.namespacePtr(zcu.declPtr(decl_index).src_namespace).file_scope.mod, + zcu.namespacePtr(zcu.declPtr(decl_index).src_namespace).fileScope(zcu).mod, .{ .decl = decl_index }, self.decl_table.getPtr(decl_index).?, (try self.exported_decls.getOrPut(gpa, decl_index)).value_ptr, diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig index 2bb0a4c0a0be..40cbfd28a89b 100644 --- a/src/link/Dwarf.zig +++ b/src/link/Dwarf.zig @@ -1204,7 +1204,7 @@ pub fn commitDeclState( const decl = zcu.declPtr(decl_index); const ip = &zcu.intern_pool; const namespace = zcu.namespacePtr(decl.src_namespace); - const target = namespace.file_scope.mod.resolved_target.result; + const target = namespace.fileScope(zcu).mod.resolved_target.result; const target_endian = target.cpu.arch.endian(); var dbg_line_buffer = &decl_state.dbg_line; diff --git a/src/link/Wasm/ZigObject.zig b/src/link/Wasm/ZigObject.zig index 24fc66367a92..da38381cbb79 100644 --- a/src/link/Wasm/ZigObject.zig +++ b/src/link/Wasm/ZigObject.zig @@ -335,29 +335,29 @@ fn finishUpdateDecl( code: []const u8, ) !void { const gpa = wasm_file.base.comp.gpa; - const mod = wasm_file.base.comp.module.?; - const decl = mod.declPtr(decl_index); + const zcu = wasm_file.base.comp.module.?; + const decl = zcu.declPtr(decl_index); const decl_info = zig_object.decls_map.get(decl_index).?; const atom_index = decl_info.atom; const atom = wasm_file.getAtomPtr(atom_index); const sym = zig_object.symbol(atom.sym_index); - const full_name = try decl.fullyQualifiedName(mod); - sym.name = try zig_object.string_table.insert(gpa, full_name.toSlice(&mod.intern_pool)); + const full_name = try decl.fullyQualifiedName(zcu); + sym.name = try zig_object.string_table.insert(gpa, full_name.toSlice(&zcu.intern_pool)); try atom.code.appendSlice(gpa, code); atom.size = @intCast(code.len); - switch (decl.typeOf(mod).zigTypeTag(mod)) { + switch (decl.typeOf(zcu).zigTypeTag(zcu)) { .Fn => { sym.index = try zig_object.appendFunction(gpa, .{ .type_index = zig_object.atom_types.get(atom_index).? }); sym.tag = .function; }, else => { - const segment_name: []const u8 = if (decl.getOwnedVariable(mod)) |variable| name: { + const segment_name: []const u8 = if (decl.getOwnedVariable(zcu)) |variable| name: { if (variable.is_const) { break :name ".rodata."; - } else if (Value.fromInterned(variable.init).isUndefDeep(mod)) { - const decl_namespace = mod.namespacePtr(decl.src_namespace); - const optimize_mode = decl_namespace.file_scope.mod.optimize_mode; + } else if (Value.fromInterned(variable.init).isUndefDeep(zcu)) { + const decl_namespace = zcu.namespacePtr(decl.src_namespace); + const optimize_mode = decl_namespace.fileScope(zcu).mod.optimize_mode; const is_initialized = switch (optimize_mode) { .Debug, .ReleaseSafe => true, .ReleaseFast, .ReleaseSmall => false, @@ -382,7 +382,7 @@ fn finishUpdateDecl( // Will be freed upon freeing of decl or after cleanup of Wasm binary. const full_segment_name = try std.mem.concat(gpa, u8, &.{ segment_name, - full_name.toSlice(&mod.intern_pool), + full_name.toSlice(&zcu.intern_pool), }); errdefer gpa.free(full_segment_name); sym.tag = .data; @@ -390,7 +390,7 @@ fn finishUpdateDecl( }, } if (code.len == 0) return; - atom.alignment = decl.getAlignment(mod); + atom.alignment = decl.getAlignment(zcu); } /// Creates and initializes a new segment in the 'Data' section. diff --git a/src/main.zig b/src/main.zig index d9084686c885..3ba1276abfbf 100644 --- a/src/main.zig +++ b/src/main.zig @@ -27,8 +27,6 @@ const Cache = std.Build.Cache; const target_util = @import("target.zig"); const crash_report = @import("crash_report.zig"); const Zcu = @import("Zcu.zig"); -/// Deprecated. -const Module = Zcu; const AstGen = std.zig.AstGen; const mingw = @import("mingw.zig"); const Server = std.zig.Server; @@ -919,7 +917,7 @@ fn buildOutputType( var contains_res_file: bool = false; var reference_trace: ?u32 = null; var pdb_out_path: ?[]const u8 = null; - var error_limit: ?Module.ErrorInt = null; + var error_limit: ?Zcu.ErrorInt = null; // These are before resolving sysroot. var extra_cflags: std.ArrayListUnmanaged([]const u8) = .{}; var extra_rcflags: std.ArrayListUnmanaged([]const u8) = .{}; @@ -1107,7 +1105,7 @@ fn buildOutputType( ); } else if (mem.eql(u8, arg, "--error-limit")) { const next_arg = args_iter.nextOrFatal(); - error_limit = std.fmt.parseUnsigned(Module.ErrorInt, next_arg, 0) catch |err| { + error_limit = std.fmt.parseUnsigned(Zcu.ErrorInt, next_arg, 0) catch |err| { fatal("unable to parse error limit '{s}': {s}", .{ next_arg, @errorName(err) }); }; } else if (mem.eql(u8, arg, "-cflags")) { @@ -5956,7 +5954,7 @@ fn cmdAstCheck( } } - var file: Module.File = .{ + var file: Zcu.File = .{ .status = .never_loaded, .source_loaded = false, .tree_loaded = false, @@ -5967,8 +5965,6 @@ fn cmdAstCheck( .tree = undefined, .zir = undefined, .mod = undefined, - .root_decl = .none, - .path_digest = undefined, }; if (zig_source_file) |file_name| { var f = fs.cwd().openFile(file_name, .{}) catch |err| { @@ -6275,7 +6271,7 @@ fn cmdDumpZir( }; defer f.close(); - var file: Module.File = .{ + var file: Zcu.File = .{ .status = .never_loaded, .source_loaded = false, .tree_loaded = false, @@ -6284,10 +6280,8 @@ fn cmdDumpZir( .source = undefined, .stat = undefined, .tree = undefined, - .zir = try Module.loadZirCache(gpa, f), + .zir = try Zcu.loadZirCache(gpa, f), .mod = undefined, - .root_decl = .none, - .path_digest = undefined, }; defer file.zir.deinit(gpa); @@ -6342,7 +6336,7 @@ fn cmdChangelist( if (stat.size > std.zig.max_src_size) return error.FileTooBig; - var file: Module.File = .{ + var file: Zcu.File = .{ .status = .never_loaded, .source_loaded = false, .tree_loaded = false, @@ -6357,8 +6351,6 @@ fn cmdChangelist( .tree = undefined, .zir = undefined, .mod = undefined, - .root_decl = .none, - .path_digest = undefined, }; file.mod = try Package.Module.createLimited(arena, .{ @@ -6431,7 +6423,7 @@ fn cmdChangelist( var inst_map: std.AutoHashMapUnmanaged(Zir.Inst.Index, Zir.Inst.Index) = .{}; defer inst_map.deinit(gpa); - try Module.mapOldZirToNew(gpa, old_zir, file.zir, &inst_map); + try Zcu.mapOldZirToNew(gpa, old_zir, file.zir, &inst_map); var bw = io.bufferedWriter(io.getStdOut().writer()); const stdout = bw.writer(); From 74346b0f79ca4bf67d61008030c7cc3565bff3f9 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Thu, 4 Jul 2024 15:47:47 -0700 Subject: [PATCH 051/152] frontend: TrackedInst stores FileIndex instead of path digest The purpose of using path digest was to reference a file in a serializable manner. Now that there is a stable index associated with files, it is a superior way to accomplish that goal, since removes one layer of indirection, and makes TrackedInst 8 bytes instead of 20. The saved Zig Compiler State file for "hello world" goes from 1.3M to 1.2M with this change. --- src/Compilation.zig | 20 +++++----- src/InternPool.zig | 25 +++++++++++-- src/Sema.zig | 17 ++++----- src/Type.zig | 2 +- src/Zcu.zig | 91 ++++++++++++++++++++++----------------------- 5 files changed, 85 insertions(+), 70 deletions(-) diff --git a/src/Compilation.zig b/src/Compilation.zig index cfe7a21041e7..eda5f63a589e 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -2649,7 +2649,7 @@ fn reportMultiModuleErrors(zcu: *Zcu) !void { .import => |import| try Module.ErrorMsg.init( gpa, .{ - .base_node_inst = try ip.trackZir(gpa, zcu.filePathDigest(import.file), .main_struct_inst), + .base_node_inst = try ip.trackZir(gpa, import.file, .main_struct_inst), .offset = .{ .token_abs = import.token }, }, "imported from module {s}", @@ -2658,7 +2658,7 @@ fn reportMultiModuleErrors(zcu: *Zcu) !void { .root => |pkg| try Module.ErrorMsg.init( gpa, .{ - .base_node_inst = try ip.trackZir(gpa, zcu.filePathDigest(file_index), .main_struct_inst), + .base_node_inst = try ip.trackZir(gpa, file_index, .main_struct_inst), .offset = .entire_file, }, "root of module {s}", @@ -2672,7 +2672,7 @@ fn reportMultiModuleErrors(zcu: *Zcu) !void { notes[num_notes] = try Module.ErrorMsg.init( gpa, .{ - .base_node_inst = try ip.trackZir(gpa, zcu.filePathDigest(file_index), .main_struct_inst), + .base_node_inst = try ip.trackZir(gpa, file_index, .main_struct_inst), .offset = .entire_file, }, "{} more references omitted", @@ -2684,7 +2684,7 @@ fn reportMultiModuleErrors(zcu: *Zcu) !void { const err = try Module.ErrorMsg.create( gpa, .{ - .base_node_inst = try ip.trackZir(gpa, zcu.filePathDigest(file_index), .main_struct_inst), + .base_node_inst = try ip.trackZir(gpa, file_index, .main_struct_inst), .offset = .entire_file, }, "file exists in multiple modules", @@ -2786,7 +2786,7 @@ pub fn saveState(comp: *Compilation) !void { .first_dependency_len = @intCast(ip.first_dependency.count()), .dep_entries_len = @intCast(ip.dep_entries.items.len), .free_dep_entries_len = @intCast(ip.free_dep_entries.items.len), - .files_len = @intCast(zcu.files.entries.len), + .files_len = @intCast(ip.files.entries.len), }, }; addBuf(&bufs_list, &bufs_len, mem.asBytes(&header)); @@ -2811,8 +2811,8 @@ pub fn saveState(comp: *Compilation) !void { addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.dep_entries.items)); addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.free_dep_entries.items)); - addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(zcu.files.keys())); - addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(zcu.files.values())); + addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.files.keys())); + addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.files.values())); // TODO: compilation errors // TODO: namespaces @@ -4060,7 +4060,7 @@ fn workerAstGenFile( defer child_prog_node.end(); const zcu = comp.module.?; - zcu.astGenFile(file, path_digest, root_decl) catch |err| switch (err) { + zcu.astGenFile(file, file_index, path_digest, root_decl) catch |err| switch (err) { error.AnalysisFail => return, else => { file.status = .retryable_failure; @@ -4477,11 +4477,11 @@ fn reportRetryableAstGenError( const src_loc: Module.LazySrcLoc = switch (src) { .root => .{ - .base_node_inst = try zcu.intern_pool.trackZir(gpa, zcu.filePathDigest(file_index), .main_struct_inst), + .base_node_inst = try zcu.intern_pool.trackZir(gpa, file_index, .main_struct_inst), .offset = .entire_file, }, .import => |info| .{ - .base_node_inst = try zcu.intern_pool.trackZir(gpa, zcu.filePathDigest(info.importing_file), .main_struct_inst), + .base_node_inst = try zcu.intern_pool.trackZir(gpa, info.importing_file, .main_struct_inst), .offset = .{ .token_abs = info.import_tok }, }, }; diff --git a/src/InternPool.zig b/src/InternPool.zig index e79de2651620..18cd21d08dec 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -92,12 +92,27 @@ dep_entries: std.ArrayListUnmanaged(DepEntry) = .{}, /// garbage collection pass. free_dep_entries: std.ArrayListUnmanaged(DepEntry.Index) = .{}, +/// Elements are ordered identically to the `import_table` field of `Zcu`. +/// +/// Unlike `import_table`, this data is serialized as part of incremental +/// compilation state. +/// +/// Key is the hash of the path to this file, used to store +/// `InternPool.TrackedInst`. +/// +/// Value is the `Decl` of the struct that represents this `File`. +files: std.AutoArrayHashMapUnmanaged(Cache.BinDigest, OptionalDeclIndex) = .{}, + +pub const FileIndex = enum(u32) { + _, +}; + pub const TrackedInst = extern struct { - path_digest: Cache.BinDigest, + file: FileIndex, inst: Zir.Inst.Index, comptime { // The fields should be tightly packed. See also serialiation logic in `Compilation.saveState`. - assert(@sizeOf(@This()) == Cache.bin_digest_len + @sizeOf(Zir.Inst.Index)); + assert(@sizeOf(@This()) == @sizeOf(FileIndex) + @sizeOf(Zir.Inst.Index)); } pub const Index = enum(u32) { _, @@ -126,11 +141,11 @@ pub const TrackedInst = extern struct { pub fn trackZir( ip: *InternPool, gpa: Allocator, - path_digest: Cache.BinDigest, + file: FileIndex, inst: Zir.Inst.Index, ) Allocator.Error!TrackedInst.Index { const key: TrackedInst = .{ - .path_digest = path_digest, + .file = file, .inst = inst, }; const gop = try ip.tracked_insts.getOrPut(gpa, key); @@ -4597,6 +4612,8 @@ pub fn deinit(ip: *InternPool, gpa: Allocator) void { ip.dep_entries.deinit(gpa); ip.free_dep_entries.deinit(gpa); + ip.files.deinit(gpa); + ip.* = undefined; } diff --git a/src/Sema.zig b/src/Sema.zig index b2d4fd9a2428..40fe11af3a67 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -839,8 +839,7 @@ pub const Block = struct { const zcu = sema.mod; const ip = &zcu.intern_pool; const file_index = block.getFileScopeIndex(zcu); - const path_digest = zcu.filePathDigest(file_index); - return ip.trackZir(gpa, path_digest, inst); + return ip.trackZir(gpa, file_index, inst); } }; @@ -993,7 +992,7 @@ fn analyzeBodyInner( try sema.inst_map.ensureSpaceForInstructions(sema.gpa, body); - const mod = sema.mod; + const zcu = sema.mod; const map = &sema.inst_map; const tags = sema.code.instructions.items(.tag); const datas = sema.code.instructions.items(.data); @@ -1013,9 +1012,9 @@ fn analyzeBodyInner( // The hashmap lookup in here is a little expensive, and LLVM fails to optimize it away. if (build_options.enable_logging) { std.log.scoped(.sema_zir).debug("sema ZIR {s} %{d}", .{ sub_file_path: { - const path_digest = block.src_base_inst.resolveFull(&mod.intern_pool).path_digest; - const index = mod.files.getIndex(path_digest).?; - break :sub_file_path mod.import_table.values()[index].sub_file_path; + const file_index = block.src_base_inst.resolveFull(&zcu.intern_pool).file; + const file = zcu.fileByIndex(file_index); + break :sub_file_path file.sub_file_path; }, inst }); } @@ -1776,9 +1775,9 @@ fn analyzeBodyInner( const inline_body = sema.code.bodySlice(extra.end, extra.data.body_len); const err_union = try sema.resolveInst(extra.data.operand); const err_union_ty = sema.typeOf(err_union); - if (err_union_ty.zigTypeTag(mod) != .ErrorUnion) { + if (err_union_ty.zigTypeTag(zcu) != .ErrorUnion) { return sema.fail(block, operand_src, "expected error union type, found '{}'", .{ - err_union_ty.fmt(mod), + err_union_ty.fmt(zcu), }); } const is_non_err = try sema.analyzeIsNonErrComptimeOnly(block, operand_src, err_union); @@ -6003,7 +6002,7 @@ fn zirCImport(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileEr const path_digest = zcu.filePathDigest(result.file_index); const root_decl = zcu.fileRootDecl(result.file_index); - zcu.astGenFile(result.file, path_digest, root_decl) catch |err| + zcu.astGenFile(result.file, result.file_index, path_digest, root_decl) catch |err| return sema.fail(&child_block, src, "C import failed: {s}", .{@errorName(err)}); try zcu.ensureFileAnalyzed(result.file_index); diff --git a/src/Type.zig b/src/Type.zig index 8c70e34fceb0..49f127607057 100644 --- a/src/Type.zig +++ b/src/Type.zig @@ -3455,7 +3455,7 @@ pub fn typeDeclSrcLine(ty: Type, zcu: *const Zcu) ?u32 { else => return null, }; const info = tracked.resolveFull(&zcu.intern_pool); - const file = zcu.import_table.values()[zcu.files.getIndex(info.path_digest).?]; + const file = zcu.fileByIndex(info.file); assert(file.zir_loaded); const zir = file.zir; const inst = zir.instructions.get(@intFromEnum(info.inst)); diff --git a/src/Zcu.zig b/src/Zcu.zig index 50bbeefdf226..203238c6334a 100644 --- a/src/Zcu.zig +++ b/src/Zcu.zig @@ -105,19 +105,6 @@ multi_exports: std.AutoArrayHashMapUnmanaged(AnalUnit, extern struct { /// Indexes correspond 1:1 to `files`. import_table: std.StringArrayHashMapUnmanaged(*File) = .{}, -/// Elements are ordered identically to `import_table`. -/// -/// Unlike `import_table`, this data is serialized as part of incremental -/// compilation state. -/// -/// Key is the hash of the path to this file, used to store -/// `InternPool.TrackedInst`. -/// -/// Value is the `Decl` of the struct that represents this `File`. -/// -/// Protected by Compilation's mutex. -files: std.AutoArrayHashMapUnmanaged(Cache.BinDigest, Decl.OptionalIndex) = .{}, - /// The set of all the files which have been loaded with `@embedFile` in the Module. /// We keep track of this in order to iterate over it and check which files have been /// modified on the file system when an update is requested, as well as to cache @@ -572,19 +559,20 @@ pub const Decl = struct { } pub fn navSrcLine(decl: Decl, zcu: *Zcu) u32 { + const ip = &zcu.intern_pool; const tracked = decl.zir_decl_index.unwrap() orelse inst: { // generic instantiation assert(decl.has_tv); assert(decl.owns_tv); - const generic_owner_func = switch (zcu.intern_pool.indexToKey(decl.val.toIntern())) { + const generic_owner_func = switch (ip.indexToKey(decl.val.toIntern())) { .func => |func| func.generic_owner, else => return 0, // TODO: this is probably a `variable` or something; figure this out when we finish sorting out `Decl`. }; const generic_owner_decl = zcu.declPtr(zcu.funcInfo(generic_owner_func).owner_decl); break :inst generic_owner_decl.zir_decl_index.unwrap().?; }; - const info = tracked.resolveFull(&zcu.intern_pool); - const file = zcu.import_table.values()[zcu.files.getIndex(info.path_digest).?]; + const info = tracked.resolveFull(ip); + const file = zcu.fileByIndex(info.file); assert(file.zir_loaded); const zir = file.zir; const inst = zir.instructions.get(@intFromEnum(info.inst)); @@ -969,9 +957,7 @@ pub const File = struct { } } - pub const Index = enum(u32) { - _, - }; + pub const Index = InternPool.FileIndex; }; pub const EmbedFile = struct { @@ -2351,14 +2337,12 @@ pub const LazySrcLoc = struct { }; pub fn resolveBaseNode(base_node_inst: InternPool.TrackedInst.Index, zcu: *Zcu) struct { *File, Ast.Node.Index } { - const want_path_digest, const zir_inst = inst: { - const info = base_node_inst.resolveFull(&zcu.intern_pool); - break :inst .{ info.path_digest, info.inst }; - }; - const file = file: { - const index = zcu.files.getIndex(want_path_digest).?; - break :file zcu.import_table.values()[index]; + const ip = &zcu.intern_pool; + const file_index, const zir_inst = inst: { + const info = base_node_inst.resolveFull(ip); + break :inst .{ info.file, info.inst }; }; + const file = zcu.fileByIndex(file_index); assert(file.zir_loaded); const zir = file.zir; @@ -2429,7 +2413,6 @@ pub fn deinit(zcu: *Zcu) void { zcu.destroyFile(file_index); } zcu.import_table.deinit(gpa); - zcu.files.deinit(gpa); for (zcu.embed_table.keys(), zcu.embed_table.values()) |path, embed_file| { gpa.free(path); @@ -2596,7 +2579,16 @@ comptime { } } -pub fn astGenFile(zcu: *Zcu, file: *File, path_digest: Cache.BinDigest, opt_root_decl: Zcu.Decl.OptionalIndex) !void { +pub fn astGenFile( + zcu: *Zcu, + file: *File, + /// This parameter is provided separately from `file` because it is not + /// safe to access `import_table` without a lock, and this index is needed + /// in the call to `updateZirRefs`. + file_index: File.Index, + path_digest: Cache.BinDigest, + opt_root_decl: Zcu.Decl.OptionalIndex, +) !void { assert(!file.mod.isBuiltin()); const tracy = trace(@src()); @@ -2850,7 +2842,7 @@ pub fn astGenFile(zcu: *Zcu, file: *File, path_digest: Cache.BinDigest, opt_root } if (file.prev_zir) |prev_zir| { - try updateZirRefs(zcu, file, prev_zir.*, path_digest); + try updateZirRefs(zcu, file, file_index, prev_zir.*); // No need to keep previous ZIR. prev_zir.deinit(gpa); gpa.destroy(prev_zir); @@ -2939,7 +2931,7 @@ fn loadZirCacheBody(gpa: Allocator, header: Zir.Header, cache_file: std.fs.File) /// This is called from the AstGen thread pool, so must acquire /// the Compilation mutex when acting on shared state. -fn updateZirRefs(zcu: *Module, file: *File, old_zir: Zir, path_digest: Cache.BinDigest) !void { +fn updateZirRefs(zcu: *Module, file: *File, file_index: File.Index, old_zir: Zir) !void { const gpa = zcu.gpa; const new_zir = file.zir; @@ -2955,7 +2947,7 @@ fn updateZirRefs(zcu: *Module, file: *File, old_zir: Zir, path_digest: Cache.Bin // iterating over this full set for every updated file. for (zcu.intern_pool.tracked_insts.keys(), 0..) |*ti, idx_raw| { const ti_idx: InternPool.TrackedInst.Index = @enumFromInt(idx_raw); - if (!std.mem.eql(u8, &ti.path_digest, &path_digest)) continue; + if (ti.file != file_index) continue; const old_inst = ti.inst; ti.inst = inst_map.get(ti.inst) orelse { // Tracking failed for this instruction. Invalidate associated `src_hash` deps. @@ -3849,7 +3841,7 @@ fn getFileRootStruct( const decls = file.zir.bodySlice(extra_index, decls_len); extra_index += decls_len; - const tracked_inst = try ip.trackZir(gpa, zcu.filePathDigest(file_index), .main_struct_inst); + const tracked_inst = try ip.trackZir(gpa, file_index, .main_struct_inst); const wip_ty = switch (try ip.getStructType(gpa, .{ .layout = .auto, .fields_len = fields_len, @@ -4151,7 +4143,7 @@ fn semaDecl(zcu: *Zcu, decl_index: Decl.Index) !SemaDeclResult { // Every Decl (other than file root Decls, which do not have a ZIR index) has a dependency on its own source. try sema.declareDependency(.{ .src_hash = try ip.trackZir( gpa, - zcu.filePathDigest(decl.getFileScopeIndex(zcu)), + decl.getFileScopeIndex(zcu), decl_inst, ) }); @@ -4391,17 +4383,19 @@ pub fn importPkg(zcu: *Zcu, mod: *Package.Module) !ImportFileResult { }; } - try zcu.files.ensureUnusedCapacity(gpa, 1); + const ip = &zcu.intern_pool; + + try ip.files.ensureUnusedCapacity(gpa, 1); if (mod.builtin_file) |builtin_file| { keep_resolved_path = true; // It's now owned by import_table. gop.value_ptr.* = builtin_file; try builtin_file.addReference(zcu.*, .{ .root = mod }); const path_digest = computePathDigest(zcu, mod, builtin_file.sub_file_path); - zcu.files.putAssumeCapacityNoClobber(path_digest, .none); + ip.files.putAssumeCapacityNoClobber(path_digest, .none); return .{ .file = builtin_file, - .file_index = @enumFromInt(zcu.files.entries.len - 1), + .file_index = @enumFromInt(ip.files.entries.len - 1), .is_new = false, .is_pkg = true, }; @@ -4431,10 +4425,10 @@ pub fn importPkg(zcu: *Zcu, mod: *Package.Module) !ImportFileResult { const path_digest = computePathDigest(zcu, mod, sub_file_path); try new_file.addReference(zcu.*, .{ .root = mod }); - zcu.files.putAssumeCapacityNoClobber(path_digest, .none); + ip.files.putAssumeCapacityNoClobber(path_digest, .none); return .{ .file = new_file, - .file_index = @enumFromInt(zcu.files.entries.len - 1), + .file_index = @enumFromInt(ip.files.entries.len - 1), .is_new = true, .is_pkg = true, }; @@ -4486,7 +4480,9 @@ pub fn importFile( .is_pkg = false, }; - try zcu.files.ensureUnusedCapacity(gpa, 1); + const ip = &zcu.intern_pool; + + try ip.files.ensureUnusedCapacity(gpa, 1); const new_file = try gpa.create(File); errdefer gpa.destroy(new_file); @@ -4528,10 +4524,10 @@ pub fn importFile( }; const path_digest = computePathDigest(zcu, mod, sub_file_path); - zcu.files.putAssumeCapacityNoClobber(path_digest, .none); + ip.files.putAssumeCapacityNoClobber(path_digest, .none); return .{ .file = new_file, - .file_index = @enumFromInt(zcu.files.entries.len - 1), + .file_index = @enumFromInt(ip.files.entries.len - 1), .is_new = true, .is_pkg = false, }; @@ -4892,7 +4888,7 @@ fn scanDecl(iter: *ScanDeclIter, decl_inst: Zir.Inst.Index) Allocator.Error!void } const parent_file_scope_index = iter.parent_decl.getFileScopeIndex(zcu); - const tracked_inst = try ip.trackZir(gpa, zcu.filePathDigest(parent_file_scope_index), decl_inst); + const tracked_inst = try ip.trackZir(gpa, parent_file_scope_index, decl_inst); // We create a Decl for it regardless of analysis status. @@ -5743,7 +5739,7 @@ fn reportRetryableFileError( const err_msg = try ErrorMsg.create( gpa, .{ - .base_node_inst = try ip.trackZir(gpa, zcu.filePathDigest(file_index), .main_struct_inst), + .base_node_inst = try ip.trackZir(gpa, file_index, .main_struct_inst), .offset = .entire_file, }, format, @@ -6601,13 +6597,16 @@ pub fn fileByIndex(zcu: *const Zcu, i: File.Index) *File { /// Returns the `Decl` of the struct that represents this `File`. pub fn fileRootDecl(zcu: *const Zcu, i: File.Index) Decl.OptionalIndex { - return zcu.files.values()[@intFromEnum(i)]; + const ip = &zcu.intern_pool; + return ip.files.values()[@intFromEnum(i)]; } pub fn setFileRootDecl(zcu: *Zcu, i: File.Index, root_decl: Decl.OptionalIndex) void { - zcu.files.values()[@intFromEnum(i)] = root_decl; + const ip = &zcu.intern_pool; + ip.files.values()[@intFromEnum(i)] = root_decl; } pub fn filePathDigest(zcu: *const Zcu, i: File.Index) Cache.BinDigest { - return zcu.files.keys()[@intFromEnum(i)]; + const ip = &zcu.intern_pool; + return ip.files.keys()[@intFromEnum(i)]; } From 0d7aa1b637b26f87025b7c8b5dc20d9a92fbb7ab Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alex=20R=C3=B8nne=20Petersen?= Date: Sat, 29 Jun 2024 01:36:40 +0200 Subject: [PATCH 052/152] std.Target: Use arch8 as the baseline CPU model for s390x. Fixes #9442. --- lib/std/Target.zig | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/std/Target.zig b/lib/std/Target.zig index 0f2c51acbe18..5392ac228383 100644 --- a/lib/std/Target.zig +++ b/lib/std/Target.zig @@ -1548,6 +1548,7 @@ pub const Cpu = struct { .riscv64 => &riscv.cpu.baseline_rv64, .x86 => &x86.cpu.pentium4, .nvptx, .nvptx64 => &nvptx.cpu.sm_20, + .s390x => &s390x.cpu.arch8, .sparc, .sparcel => &sparc.cpu.v8, .loongarch64 => &loongarch.cpu.loongarch64, From b3afba8a70af984423fc24e5b834df966693b50a Mon Sep 17 00:00:00 2001 From: Linus Groh Date: Tue, 25 Jun 2024 20:40:50 +0100 Subject: [PATCH 053/152] std.c: Add setlocale() --- lib/std/c.zig | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/lib/std/c.zig b/lib/std/c.zig index b1da7c0a2d91..aeb48a3cc27c 100644 --- a/lib/std/c.zig +++ b/lib/std/c.zig @@ -1895,6 +1895,27 @@ pub extern "c" fn setlogmask(maskpri: c_int) c_int; pub extern "c" fn if_nametoindex([*:0]const u8) c_int; +/// These are implementation defined but share identical values in at least musl and glibc: +/// - https://git.musl-libc.org/cgit/musl/tree/include/locale.h?id=ab31e9d6a0fa7c5c408856c89df2dfb12c344039#n18 +/// - https://sourceware.org/git/?p=glibc.git;a=blob;f=locale/bits/locale.h;h=0fcbb66114be5fef0577dc9047256eb508c45919;hb=c90cfce849d010474e8cccf3e5bff49a2c8b141f#l26 +pub const LC = enum(c_int) { + CTYPE = 0, + NUMERIC = 1, + TIME = 2, + COLLATE = 3, + MONETARY = 4, + MESSAGES = 5, + ALL = 6, + PAPER = 7, + NAME = 8, + ADDRESS = 9, + TELEPHONE = 10, + MEASUREMENT = 11, + IDENTIFICATION = 12, +}; + +pub extern "c" fn setlocale(category: LC, locale: ?[*:0]const u8) [*:0]const u8; + pub const getcontext = if (builtin.target.isAndroid()) @compileError("android bionic libc does not implement getcontext") else if (native_os == .linux and builtin.target.isMusl()) From bf588f67d8c6261105f81fd468c420d662541d2a Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 5 Jul 2024 11:34:13 -0700 Subject: [PATCH 054/152] build system: add docs to LinkSystemLibraryOptions --- lib/std/Build/Module.zig | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/lib/std/Build/Module.zig b/lib/std/Build/Module.zig index d40e86e46a7b..844f69c8cdf7 100644 --- a/lib/std/Build/Module.zig +++ b/lib/std/Build/Module.zig @@ -137,7 +137,18 @@ pub const IncludeDir = union(enum) { }; pub const LinkFrameworkOptions = struct { + /// Causes dynamic libraries to be linked regardless of whether they are + /// actually depended on. When false, dynamic libraries with no referenced + /// symbols will be omitted by the linker. needed: bool = false, + /// Marks all referenced symbols from this library as weak, meaning that if + /// a same-named symbol is provided by another compilation unit, instead of + /// emitting a "duplicate symbol" error, the linker will resolve all + /// references to the symbol with the strong version. + /// + /// When the linker encounters two weak symbols, the chosen one is + /// determined by the order compilation units are provided to the linker, + /// priority given to later ones. weak: bool = false, }; @@ -414,7 +425,18 @@ pub fn iterateDependencies( } pub const LinkSystemLibraryOptions = struct { + /// Causes dynamic libraries to be linked regardless of whether they are + /// actually depended on. When false, dynamic libraries with no referenced + /// symbols will be omitted by the linker. needed: bool = false, + /// Marks all referenced symbols from this library as weak, meaning that if + /// a same-named symbol is provided by another compilation unit, instead of + /// emitting a "duplicate symbol" error, the linker will resolve all + /// references to the symbol with the strong version. + /// + /// When the linker encounters two weak symbols, the chosen one is + /// determined by the order compilation units are provided to the linker, + /// priority given to later ones. weak: bool = false, use_pkg_config: SystemLib.UsePkgConfig = .yes, preferred_link_mode: std.builtin.LinkMode = .dynamic, From c40708a2ce4f6ed1adcc1de39fc7b4fc27db32f8 Mon Sep 17 00:00:00 2001 From: Erik Arvstedt Date: Sat, 6 Jul 2024 15:22:42 +0200 Subject: [PATCH 055/152] cmake/findllvm: fix incorrect lib dir setup for zig2 Line `link_directories("${CMAKE_PREFIX_PATH}/lib")` was evaluated as `link_directories("/lib")` in the default case of `CMAKE_PREFIX_PATH` being empty. This caused cmake to add `-L/lib -Wl,-rpath,/lib` to the zig2 build flags. This could result in errors on systems where libraries set via `CMAKE_LIBRARY_PATH` had conflicting versions in `/lib`: - `-L/lib` could cause linking zig2 to fail - `-Wl,-rpath,/lib` adds `/lib` as the first entry of the zig2 `RPATH`. This could cause running zig2 (to build zig3) to fail. In case of conflicting lib dirs, cmake emitted this warning, which is now fixed: ``` Cannot generate a safe runtime search path for target zig2 because files in some directories may conflict with libraries in implicit directories: runtime library [libclang-cpp.so.18.1] in /nix/store/...-clang-18.1.5-lib/lib may be hidden by files in: /lib ``` --- cmake/Findllvm.cmake | 1 - 1 file changed, 1 deletion(-) diff --git a/cmake/Findllvm.cmake b/cmake/Findllvm.cmake index ed8acba37e32..c4eb49fe7600 100644 --- a/cmake/Findllvm.cmake +++ b/cmake/Findllvm.cmake @@ -176,7 +176,6 @@ if(ZIG_USE_LLVM_CONFIG) OUTPUT_STRIP_TRAILING_WHITESPACE) string(REPLACE " " ";" LLVM_INCLUDE_DIRS "${LLVM_INCLUDE_DIRS_SPACES}") - link_directories("${CMAKE_PREFIX_PATH}/lib") link_directories("${LLVM_LIBDIRS}") else() # Here we assume that we're cross compiling with Zig, of course. No reason From ae919915f6c3c1827311ebc6ec3c5883a1cec08e Mon Sep 17 00:00:00 2001 From: Krzysztof Wolicki Date: Sun, 7 Jul 2024 12:10:19 +0200 Subject: [PATCH 056/152] std.Build.Cache.Path: fix makeOpenPath signature --- lib/std/Build/Cache/Path.zig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/std/Build/Cache/Path.zig b/lib/std/Build/Cache/Path.zig index 27dd15184b5c..d7266da9b0cc 100644 --- a/lib/std/Build/Cache/Path.zig +++ b/lib/std/Build/Cache/Path.zig @@ -58,7 +58,7 @@ pub fn openFile( return p.root_dir.handle.openFile(joined_path, flags); } -pub fn makeOpenPath(p: Path, sub_path: []const u8, opts: fs.OpenDirOptions) !fs.Dir { +pub fn makeOpenPath(p: Path, sub_path: []const u8, opts: fs.Dir.OpenDirOptions) !fs.Dir { var buf: [fs.max_path_bytes]u8 = undefined; const joined_path = if (p.sub_path.len == 0) sub_path else p: { break :p std.fmt.bufPrint(&buf, "{s}" ++ fs.path.sep_str ++ "{s}", .{ From 64e84a452b043599e96e1e6af895b9959628f1fe Mon Sep 17 00:00:00 2001 From: Krzysztof Wolicki Date: Sun, 7 Jul 2024 12:13:07 +0200 Subject: [PATCH 057/152] std.ArrayHashMap: unmanaged holds the pointer stability lock --- lib/std/array_hash_map.zig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/std/array_hash_map.zig b/lib/std/array_hash_map.zig index ba086f876446..c82c55337021 100644 --- a/lib/std/array_hash_map.zig +++ b/lib/std/array_hash_map.zig @@ -427,7 +427,7 @@ pub fn ArrayHashMap( /// Set the map to an empty state, making deinitialization a no-op, and /// returning a copy of the original. pub fn move(self: *Self) Self { - self.pointer_stability.assertUnlocked(); + self.unmanaged.pointer_stability.assertUnlocked(); const result = self.*; self.unmanaged = .{}; return result; From 815022c87b34bae92d98475685425063ca36cbf7 Mon Sep 17 00:00:00 2001 From: Krzysztof Wolicki Date: Sun, 7 Jul 2024 12:16:14 +0200 Subject: [PATCH 058/152] std.coff: fix setAlignment --- lib/std/coff.zig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/std/coff.zig b/lib/std/coff.zig index 1461ac7fbcfa..211b1a181938 100644 --- a/lib/std/coff.zig +++ b/lib/std/coff.zig @@ -542,7 +542,7 @@ pub const SectionHeader = extern struct { pub fn setAlignment(self: *SectionHeader, new_alignment: u16) void { assert(new_alignment > 0 and new_alignment <= 8192); - self.flags.ALIGN = std.math.log2(new_alignment); + self.flags.ALIGN = @intCast(std.math.log2(new_alignment)); } pub fn isCode(self: SectionHeader) bool { From 7205756a69b2fb8641e02d23592ac12206f98052 Mon Sep 17 00:00:00 2001 From: Krzysztof Wolicki Date: Sun, 7 Jul 2024 12:28:28 +0200 Subject: [PATCH 059/152] Step.TranslateC: fix defineCMacro --- lib/std/Build/Step/TranslateC.zig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/std/Build/Step/TranslateC.zig b/lib/std/Build/Step/TranslateC.zig index e07744c2da15..d7c62ed1948e 100644 --- a/lib/std/Build/Step/TranslateC.zig +++ b/lib/std/Build/Step/TranslateC.zig @@ -107,7 +107,7 @@ pub fn addCheckFile(translate_c: *TranslateC, expected_matches: []const []const /// If the value is omitted, it is set to 1. /// `name` and `value` need not live longer than the function call. pub fn defineCMacro(translate_c: *TranslateC, name: []const u8, value: ?[]const u8) void { - const macro = std.Build.constructranslate_cMacro(translate_c.step.owner.allocator, name, value); + const macro = translate_c.step.owner.fmt("{s}={s}", .{ name, value orelse "1" }); translate_c.c_macros.append(macro) catch @panic("OOM"); } From 8f20e81b8816aadd8ceb1b04bd3727cc1d124464 Mon Sep 17 00:00:00 2001 From: Shun Sakai Date: Mon, 8 Jul 2024 05:18:33 +0900 Subject: [PATCH 060/152] std.crypto.pwhash: Add recommended parameters (#20527) These parameters according to the OWASP cheat sheet. --- lib/std/crypto/argon2.zig | 4 ++++ lib/std/crypto/bcrypt.zig | 6 ++++++ lib/std/crypto/scrypt.zig | 4 ++++ 3 files changed, 14 insertions(+) diff --git a/lib/std/crypto/argon2.zig b/lib/std/crypto/argon2.zig index 8440590a9843..74a96383d59d 100644 --- a/lib/std/crypto/argon2.zig +++ b/lib/std/crypto/argon2.zig @@ -91,6 +91,10 @@ pub const Params = struct { /// Baseline parameters for offline usage using argon2id type pub const sensitive_2id = Self.fromLimits(4, 1073741824); + /// Recommended parameters for argon2id type according to the + /// [OWASP cheat sheet](https://cheatsheetseries.owasp.org/cheatsheets/Password_Storage_Cheat_Sheet.html). + pub const owasp_2id = Self{ .t = 2, .m = 19 * 1024, .p = 1 }; + /// Create parameters from ops and mem limits, where mem_limit given in bytes pub fn fromLimits(ops_limit: u32, mem_limit: usize) Self { const m = mem_limit / 1024; diff --git a/lib/std/crypto/bcrypt.zig b/lib/std/crypto/bcrypt.zig index 7c505f542fd5..ab7d92a7e6cc 100644 --- a/lib/std/crypto/bcrypt.zig +++ b/lib/std/crypto/bcrypt.zig @@ -408,8 +408,14 @@ pub const State = struct { /// bcrypt parameters pub const Params = struct { + const Self = @This(); + /// log2 of the number of rounds rounds_log: u6, + + /// Minimum recommended parameters according to the + /// [OWASP cheat sheet](https://cheatsheetseries.owasp.org/cheatsheets/Password_Storage_Cheat_Sheet.html). + pub const owasp = Self{ .rounds_log = 10 }; }; /// Compute a hash of a password using 2^rounds_log rounds of the bcrypt key stretching function. diff --git a/lib/std/crypto/scrypt.zig b/lib/std/crypto/scrypt.zig index 96943b06f12d..7968d8736a33 100644 --- a/lib/std/crypto/scrypt.zig +++ b/lib/std/crypto/scrypt.zig @@ -141,6 +141,10 @@ pub const Params = struct { /// Baseline parameters for offline usage pub const sensitive = Self.fromLimits(33554432, 1073741824); + /// Recommended parameters according to the + /// [OWASP cheat sheet](https://cheatsheetseries.owasp.org/cheatsheets/Password_Storage_Cheat_Sheet.html). + pub const owasp = Self{ .ln = 17, .r = 8, .p = 1 }; + /// Create parameters from ops and mem limits, where mem_limit given in bytes pub fn fromLimits(ops_limit: u64, mem_limit: usize) Self { const ops = @max(32768, ops_limit); From 525f341f33af9b8aad53931fd5511f00a82cb090 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Sat, 15 Jun 2024 16:10:53 -0400 Subject: [PATCH 061/152] Zcu: introduce `PerThread` and pass to all the functions --- CMakeLists.txt | 1 + lib/std/Thread/Pool.zig | 96 +- src/Air.zig | 4 +- src/Compilation.zig | 60 +- src/InternPool.zig | 242 +- src/RangeSet.zig | 32 +- src/Sema.zig | 4968 ++++++++++++++++-------------- src/Sema/bitcast.zig | 188 +- src/Sema/comptime_ptr_access.zig | 111 +- src/Type.zig | 737 ++--- src/Value.zig | 2247 +++++++------- src/Zcu.zig | 2264 +------------- src/Zcu/PerThread.zig | 2102 +++++++++++++ src/arch/aarch64/CodeGen.zig | 383 ++- src/arch/aarch64/Emit.zig | 6 +- src/arch/aarch64/abi.zig | 60 +- src/arch/arm/CodeGen.zig | 383 ++- src/arch/arm/Emit.zig | 6 +- src/arch/arm/abi.zig | 62 +- src/arch/riscv64/CodeGen.zig | 483 +-- src/arch/riscv64/Emit.zig | 5 +- src/arch/riscv64/Lower.zig | 12 +- src/arch/riscv64/abi.zig | 55 +- src/arch/sparc64/CodeGen.zig | 220 +- src/arch/sparc64/Emit.zig | 6 +- src/arch/wasm/CodeGen.zig | 961 +++--- src/arch/wasm/abi.zig | 40 +- src/arch/x86_64/CodeGen.zig | 1206 ++++---- src/arch/x86_64/Lower.zig | 6 +- src/arch/x86_64/abi.zig | 70 +- src/codegen.zig | 269 +- src/codegen/c.zig | 745 +++-- src/codegen/c/Type.zig | 66 +- src/codegen/llvm.zig | 1431 +++++---- src/codegen/spirv.zig | 450 +-- src/crash_report.zig | 6 +- src/link.zig | 49 +- src/link/C.zig | 59 +- src/link/Coff.zig | 85 +- src/link/Coff/lld.zig | 5 +- src/link/Dwarf.zig | 222 +- src/link/Elf.zig | 39 +- src/link/Elf/ZigObject.zig | 115 +- src/link/MachO.zig | 33 +- src/link/MachO/ZigObject.zig | 82 +- src/link/NvPtx.zig | 23 +- src/link/Plan9.zig | 87 +- src/link/SpirV.zig | 32 +- src/link/Wasm.zig | 51 +- src/link/Wasm/ZigObject.zig | 100 +- src/main.zig | 8 +- src/mutable_value.zig | 100 +- src/print_air.zig | 38 +- src/print_value.zig | 105 +- src/print_zir.zig | 9 +- src/register_manager.zig | 2 - 56 files changed, 11266 insertions(+), 9961 deletions(-) create mode 100644 src/Zcu/PerThread.zig diff --git a/CMakeLists.txt b/CMakeLists.txt index a33df3a096b5..195bfe04c265 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -525,6 +525,7 @@ set(ZIG_STAGE2_SOURCES src/Type.zig src/Value.zig src/Zcu.zig + src/Zcu/PerThread.zig src/arch/aarch64/CodeGen.zig src/arch/aarch64/Emit.zig src/arch/aarch64/Mir.zig diff --git a/lib/std/Thread/Pool.zig b/lib/std/Thread/Pool.zig index 846c7035a754..03ca8ffc8eba 100644 --- a/lib/std/Thread/Pool.zig +++ b/lib/std/Thread/Pool.zig @@ -9,17 +9,19 @@ run_queue: RunQueue = .{}, is_running: bool = true, allocator: std.mem.Allocator, threads: []std.Thread, +ids: std.AutoArrayHashMapUnmanaged(std.Thread.Id, void), const RunQueue = std.SinglyLinkedList(Runnable); const Runnable = struct { runFn: RunProto, }; -const RunProto = *const fn (*Runnable) void; +const RunProto = *const fn (*Runnable, id: ?usize) void; pub const Options = struct { allocator: std.mem.Allocator, n_jobs: ?u32 = null, + track_ids: bool = false, }; pub fn init(pool: *Pool, options: Options) !void { @@ -28,6 +30,7 @@ pub fn init(pool: *Pool, options: Options) !void { pool.* = .{ .allocator = allocator, .threads = &[_]std.Thread{}, + .ids = .{}, }; if (builtin.single_threaded) { @@ -35,6 +38,10 @@ pub fn init(pool: *Pool, options: Options) !void { } const thread_count = options.n_jobs orelse @max(1, std.Thread.getCpuCount() catch 1); + if (options.track_ids) { + try pool.ids.ensureTotalCapacity(allocator, 1 + thread_count); + pool.ids.putAssumeCapacityNoClobber(std.Thread.getCurrentId(), {}); + } // kill and join any threads we spawned and free memory on error. pool.threads = try allocator.alloc(std.Thread, thread_count); @@ -49,6 +56,7 @@ pub fn init(pool: *Pool, options: Options) !void { pub fn deinit(pool: *Pool) void { pool.join(pool.threads.len); // kill and join all threads. + pool.ids.deinit(pool.allocator); pool.* = undefined; } @@ -96,7 +104,7 @@ pub fn spawnWg(pool: *Pool, wait_group: *WaitGroup, comptime func: anytype, args run_node: RunQueue.Node = .{ .data = .{ .runFn = runFn } }, wait_group: *WaitGroup, - fn runFn(runnable: *Runnable) void { + fn runFn(runnable: *Runnable, _: ?usize) void { const run_node: *RunQueue.Node = @fieldParentPtr("data", runnable); const closure: *@This() = @alignCast(@fieldParentPtr("run_node", run_node)); @call(.auto, func, closure.arguments); @@ -134,6 +142,70 @@ pub fn spawnWg(pool: *Pool, wait_group: *WaitGroup, comptime func: anytype, args pool.cond.signal(); } +/// Runs `func` in the thread pool, calling `WaitGroup.start` beforehand, and +/// `WaitGroup.finish` after it returns. +/// +/// The first argument passed to `func` is a dense `usize` thread id, the rest +/// of the arguments are passed from `args`. Requires the pool to have been +/// initialized with `.track_ids = true`. +/// +/// In the case that queuing the function call fails to allocate memory, or the +/// target is single-threaded, the function is called directly. +pub fn spawnWgId(pool: *Pool, wait_group: *WaitGroup, comptime func: anytype, args: anytype) void { + wait_group.start(); + + if (builtin.single_threaded) { + @call(.auto, func, .{0} ++ args); + wait_group.finish(); + return; + } + + const Args = @TypeOf(args); + const Closure = struct { + arguments: Args, + pool: *Pool, + run_node: RunQueue.Node = .{ .data = .{ .runFn = runFn } }, + wait_group: *WaitGroup, + + fn runFn(runnable: *Runnable, id: ?usize) void { + const run_node: *RunQueue.Node = @fieldParentPtr("data", runnable); + const closure: *@This() = @alignCast(@fieldParentPtr("run_node", run_node)); + @call(.auto, func, .{id.?} ++ closure.arguments); + closure.wait_group.finish(); + + // The thread pool's allocator is protected by the mutex. + const mutex = &closure.pool.mutex; + mutex.lock(); + defer mutex.unlock(); + + closure.pool.allocator.destroy(closure); + } + }; + + { + pool.mutex.lock(); + + const closure = pool.allocator.create(Closure) catch { + const id = pool.ids.getIndex(std.Thread.getCurrentId()); + pool.mutex.unlock(); + @call(.auto, func, .{id.?} ++ args); + wait_group.finish(); + return; + }; + closure.* = .{ + .arguments = args, + .pool = pool, + .wait_group = wait_group, + }; + + pool.run_queue.prepend(&closure.run_node); + pool.mutex.unlock(); + } + + // Notify waiting threads outside the lock to try and keep the critical section small. + pool.cond.signal(); +} + pub fn spawn(pool: *Pool, comptime func: anytype, args: anytype) !void { if (builtin.single_threaded) { @call(.auto, func, args); @@ -181,14 +253,16 @@ fn worker(pool: *Pool) void { pool.mutex.lock(); defer pool.mutex.unlock(); + const id = if (pool.ids.count() > 0) pool.ids.count() else null; + if (id) |_| pool.ids.putAssumeCapacityNoClobber(std.Thread.getCurrentId(), {}); + while (true) { while (pool.run_queue.popFirst()) |run_node| { // Temporarily unlock the mutex in order to execute the run_node pool.mutex.unlock(); defer pool.mutex.lock(); - const runFn = run_node.data.runFn; - runFn(&run_node.data); + run_node.data.runFn(&run_node.data, id); } // Stop executing instead of waiting if the thread pool is no longer running. @@ -201,16 +275,18 @@ fn worker(pool: *Pool) void { } pub fn waitAndWork(pool: *Pool, wait_group: *WaitGroup) void { + var id: ?usize = null; + while (!wait_group.isDone()) { - if (blk: { - pool.mutex.lock(); - defer pool.mutex.unlock(); - break :blk pool.run_queue.popFirst(); - }) |run_node| { - run_node.data.runFn(&run_node.data); + pool.mutex.lock(); + if (pool.run_queue.popFirst()) |run_node| { + id = id orelse pool.ids.getIndex(std.Thread.getCurrentId()); + pool.mutex.unlock(); + run_node.data.runFn(&run_node.data, id); continue; } + pool.mutex.unlock(); wait_group.wait(); return; } diff --git a/src/Air.zig b/src/Air.zig index 5799c31b259c..b291dbee7a9b 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -1563,12 +1563,12 @@ pub fn internedToRef(ip_index: InternPool.Index) Inst.Ref { } /// Returns `null` if runtime-known. -pub fn value(air: Air, inst: Inst.Ref, mod: *Module) !?Value { +pub fn value(air: Air, inst: Inst.Ref, pt: Zcu.PerThread) !?Value { if (inst.toInterned()) |ip_index| { return Value.fromInterned(ip_index); } const index = inst.toIndex().?; - return air.typeOfIndex(index, &mod.intern_pool).onePossibleValue(mod); + return air.typeOfIndex(index, &pt.zcu.intern_pool).onePossibleValue(pt); } pub fn nullTerminatedString(air: Air, index: usize) [:0]const u8 { diff --git a/src/Compilation.zig b/src/Compilation.zig index eda5f63a589e..d3ff33808020 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -2146,6 +2146,8 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void { try comp.performAllTheWork(main_progress_node); if (comp.module) |zcu| { + const pt: Zcu.PerThread = .{ .zcu = zcu, .tid = .main }; + if (build_options.enable_debug_extensions and comp.verbose_intern_pool) { std.debug.print("intern pool stats for '{s}':\n", .{ comp.root_name, @@ -2165,10 +2167,10 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void { // The `test_functions` decl has been intentionally postponed until now, // at which point we must populate it with the list of test functions that // have been discovered and not filtered out. - try zcu.populateTestFunctions(main_progress_node); + try pt.populateTestFunctions(main_progress_node); } - try zcu.processExports(); + try pt.processExports(); } if (comp.totalErrorCount() != 0) { @@ -2247,7 +2249,7 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void { } } - try flush(comp, arena, main_progress_node); + try flush(comp, arena, .main, main_progress_node); if (comp.totalErrorCount() != 0) return; // Failure here only means an unnecessary cache miss. @@ -2264,16 +2266,16 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void { whole.lock = man.toOwnedLock(); }, .incremental => { - try flush(comp, arena, main_progress_node); + try flush(comp, arena, .main, main_progress_node); if (comp.totalErrorCount() != 0) return; }, } } -fn flush(comp: *Compilation, arena: Allocator, prog_node: std.Progress.Node) !void { +fn flush(comp: *Compilation, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) !void { if (comp.bin_file) |lf| { // This is needed before reading the error flags. - lf.flush(arena, prog_node) catch |err| switch (err) { + lf.flush(arena, tid, prog_node) catch |err| switch (err) { error.FlushFailure => {}, // error reported through link_error_flags error.LLDReportedFailure => {}, // error reported via lockAndParseLldStderr else => |e| return e, @@ -3419,7 +3421,7 @@ pub fn performAllTheWork( while (true) { if (comp.work_queue.readItem()) |work_item| { - try processOneJob(comp, work_item, main_progress_node); + try processOneJob(0, comp, work_item, main_progress_node); continue; } if (comp.module) |zcu| { @@ -3447,11 +3449,11 @@ pub fn performAllTheWork( } } -fn processOneJob(comp: *Compilation, job: Job, prog_node: std.Progress.Node) !void { +fn processOneJob(tid: usize, comp: *Compilation, job: Job, prog_node: std.Progress.Node) !void { switch (job) { .codegen_decl => |decl_index| { - const zcu = comp.module.?; - const decl = zcu.declPtr(decl_index); + const pt: Zcu.PerThread = .{ .zcu = comp.module.?, .tid = @enumFromInt(tid) }; + const decl = pt.zcu.declPtr(decl_index); switch (decl.analysis) { .unreferenced => unreachable, @@ -3469,7 +3471,7 @@ fn processOneJob(comp: *Compilation, job: Job, prog_node: std.Progress.Node) !vo assert(decl.has_tv); - try zcu.linkerUpdateDecl(decl_index); + try pt.linkerUpdateDecl(decl_index); return; }, } @@ -3478,16 +3480,16 @@ fn processOneJob(comp: *Compilation, job: Job, prog_node: std.Progress.Node) !vo const named_frame = tracy.namedFrame("codegen_func"); defer named_frame.end(); - const zcu = comp.module.?; + const pt: Zcu.PerThread = .{ .zcu = comp.module.?, .tid = @enumFromInt(tid) }; // This call takes ownership of `func.air`. - try zcu.linkerUpdateFunc(func.func, func.air); + try pt.linkerUpdateFunc(func.func, func.air); }, .analyze_func => |func| { const named_frame = tracy.namedFrame("analyze_func"); defer named_frame.end(); - const zcu = comp.module.?; - zcu.ensureFuncBodyAnalyzed(func) catch |err| switch (err) { + const pt: Zcu.PerThread = .{ .zcu = comp.module.?, .tid = @enumFromInt(tid) }; + pt.ensureFuncBodyAnalyzed(func) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.AnalysisFail => return, }; @@ -3496,8 +3498,8 @@ fn processOneJob(comp: *Compilation, job: Job, prog_node: std.Progress.Node) !vo if (true) @panic("regressed compiler feature: emit-h should hook into updateExports, " ++ "not decl analysis, which is too early to know about @export calls"); - const zcu = comp.module.?; - const decl = zcu.declPtr(decl_index); + const pt: Zcu.PerThread = .{ .zcu = comp.module.?, .tid = @enumFromInt(tid) }; + const decl = pt.zcu.declPtr(decl_index); switch (decl.analysis) { .unreferenced => unreachable, @@ -3515,7 +3517,7 @@ fn processOneJob(comp: *Compilation, job: Job, prog_node: std.Progress.Node) !vo defer named_frame.end(); const gpa = comp.gpa; - const emit_h = zcu.emit_h.?; + const emit_h = pt.zcu.emit_h.?; _ = try emit_h.decl_table.getOrPut(gpa, decl_index); const decl_emit_h = emit_h.declPtr(decl_index); const fwd_decl = &decl_emit_h.fwd_decl; @@ -3523,11 +3525,11 @@ fn processOneJob(comp: *Compilation, job: Job, prog_node: std.Progress.Node) !vo var ctypes_arena = std.heap.ArenaAllocator.init(gpa); defer ctypes_arena.deinit(); - const file_scope = zcu.namespacePtr(decl.src_namespace).fileScope(zcu); + const file_scope = pt.zcu.namespacePtr(decl.src_namespace).fileScope(pt.zcu); var dg: c_codegen.DeclGen = .{ .gpa = gpa, - .zcu = zcu, + .pt = pt, .mod = file_scope.mod, .error_msg = null, .pass = .{ .decl = decl_index }, @@ -3557,25 +3559,25 @@ fn processOneJob(comp: *Compilation, job: Job, prog_node: std.Progress.Node) !vo } }, .analyze_decl => |decl_index| { - const zcu = comp.module.?; - zcu.ensureDeclAnalyzed(decl_index) catch |err| switch (err) { + const pt: Zcu.PerThread = .{ .zcu = comp.module.?, .tid = @enumFromInt(tid) }; + pt.ensureDeclAnalyzed(decl_index) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.AnalysisFail => return, }; - const decl = zcu.declPtr(decl_index); + const decl = pt.zcu.declPtr(decl_index); if (decl.kind == .@"test" and comp.config.is_test) { // Tests are always emitted in test binaries. The decl_refs are created by // Zcu.populateTestFunctions, but this will not queue body analysis, so do // that now. - try zcu.ensureFuncBodyAnalysisQueued(decl.val.toIntern()); + try pt.zcu.ensureFuncBodyAnalysisQueued(decl.val.toIntern()); } }, .resolve_type_fully => |ty| { const named_frame = tracy.namedFrame("resolve_type_fully"); defer named_frame.end(); - const zcu = comp.module.?; - Type.fromInterned(ty).resolveFully(zcu) catch |err| switch (err) { + const pt: Zcu.PerThread = .{ .zcu = comp.module.?, .tid = @enumFromInt(tid) }; + Type.fromInterned(ty).resolveFully(pt) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.AnalysisFail => return, }; @@ -3603,12 +3605,12 @@ fn processOneJob(comp: *Compilation, job: Job, prog_node: std.Progress.Node) !vo try zcu.retryable_failures.append(gpa, InternPool.AnalUnit.wrap(.{ .decl = decl_index })); }; }, - .analyze_mod => |pkg| { + .analyze_mod => |mod| { const named_frame = tracy.namedFrame("analyze_mod"); defer named_frame.end(); - const zcu = comp.module.?; - zcu.semaPkg(pkg) catch |err| switch (err) { + const pt: Zcu.PerThread = .{ .zcu = comp.module.?, .tid = @enumFromInt(tid) }; + pt.semaPkg(mod) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.AnalysisFail => return, }; diff --git a/src/InternPool.zig b/src/InternPool.zig index 18cd21d08dec..133874318228 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -4548,17 +4548,14 @@ pub fn init(ip: *InternPool, gpa: Allocator) !void { // This inserts all the statically-known values into the intern pool in the // order expected. - for (static_keys[0..@intFromEnum(Index.empty_struct_type)]) |key| { - _ = ip.get(gpa, key) catch unreachable; - } - _ = ip.getAnonStructType(gpa, .{ - .types = &.{}, - .names = &.{}, - .values = &.{}, - }) catch unreachable; - for (static_keys[@intFromEnum(Index.empty_struct_type) + 1 ..]) |key| { - _ = ip.get(gpa, key) catch unreachable; - } + for (&static_keys, 0..) |key, key_index| switch (@as(Index, @enumFromInt(key_index))) { + .empty_struct_type => assert(try ip.getAnonStructType(gpa, .main, .{ + .types = &.{}, + .names = &.{}, + .values = &.{}, + }) == .empty_struct_type), + else => |expected_index| assert(try ip.get(gpa, .main, key) == expected_index), + }; if (std.debug.runtime_safety) { // Sanity check. @@ -5242,7 +5239,7 @@ fn indexToKeyBigInt(ip: *const InternPool, limb_index: u32, positive: bool) Key } }; } -pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { +pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) Allocator.Error!Index { const adapter: KeyAdapter = .{ .intern_pool = ip }; const gop = try ip.map.getOrPutAdapted(gpa, key, adapter); if (gop.found_existing) return @enumFromInt(gop.index); @@ -5266,8 +5263,9 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { _ = ip.map.pop(); var new_key = key; new_key.ptr_type.flags.size = .Many; - const ptr_type_index = try ip.get(gpa, new_key); + const ptr_type_index = try ip.get(gpa, tid, new_key); assert(!(try ip.map.getOrPutAdapted(gpa, key, adapter)).found_existing); + try ip.items.ensureUnusedCapacity(gpa, 1); ip.items.appendAssumeCapacity(.{ .tag = .type_slice, @@ -5519,7 +5517,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { else => unreachable, } _ = ip.map.pop(); - const index_index = try ip.get(gpa, .{ .int = .{ + const index_index = try ip.get(gpa, tid, .{ .int = .{ .ty = .usize_type, .storage = .{ .u64 = base_index.index }, } }); @@ -5932,7 +5930,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { const elem = switch (aggregate.storage) { .bytes => |bytes| elem: { _ = ip.map.pop(); - const elem = try ip.get(gpa, .{ .int = .{ + const elem = try ip.get(gpa, tid, .{ .int = .{ .ty = .u8_type, .storage = .{ .u64 = bytes.at(0, ip) }, } }); @@ -6074,7 +6072,12 @@ pub const UnionTypeInit = struct { }, }; -pub fn getUnionType(ip: *InternPool, gpa: Allocator, ini: UnionTypeInit) Allocator.Error!WipNamespaceType.Result { +pub fn getUnionType( + ip: *InternPool, + gpa: Allocator, + _: Zcu.PerThread.Id, + ini: UnionTypeInit, +) Allocator.Error!WipNamespaceType.Result { const adapter: KeyAdapter = .{ .intern_pool = ip }; const gop = try ip.map.getOrPutAdapted(gpa, Key{ .union_type = switch (ini.key) { .declared => |d| .{ .declared = .{ @@ -6221,6 +6224,7 @@ pub const StructTypeInit = struct { pub fn getStructType( ip: *InternPool, gpa: Allocator, + _: Zcu.PerThread.Id, ini: StructTypeInit, ) Allocator.Error!WipNamespaceType.Result { const adapter: KeyAdapter = .{ .intern_pool = ip }; @@ -6396,7 +6400,12 @@ pub const AnonStructTypeInit = struct { values: []const Index, }; -pub fn getAnonStructType(ip: *InternPool, gpa: Allocator, ini: AnonStructTypeInit) Allocator.Error!Index { +pub fn getAnonStructType( + ip: *InternPool, + gpa: Allocator, + _: Zcu.PerThread.Id, + ini: AnonStructTypeInit, +) Allocator.Error!Index { assert(ini.types.len == ini.values.len); for (ini.types) |elem| assert(elem != .none); @@ -6450,7 +6459,12 @@ pub const GetFuncTypeKey = struct { addrspace_is_generic: bool = false, }; -pub fn getFuncType(ip: *InternPool, gpa: Allocator, key: GetFuncTypeKey) Allocator.Error!Index { +pub fn getFuncType( + ip: *InternPool, + gpa: Allocator, + _: Zcu.PerThread.Id, + key: GetFuncTypeKey, +) Allocator.Error!Index { // Validate input parameters. assert(key.return_type != .none); for (key.param_types) |param_type| assert(param_type != .none); @@ -6503,7 +6517,12 @@ pub fn getFuncType(ip: *InternPool, gpa: Allocator, key: GetFuncTypeKey) Allocat return @enumFromInt(ip.items.len - 1); } -pub fn getExternFunc(ip: *InternPool, gpa: Allocator, key: Key.ExternFunc) Allocator.Error!Index { +pub fn getExternFunc( + ip: *InternPool, + gpa: Allocator, + _: Zcu.PerThread.Id, + key: Key.ExternFunc, +) Allocator.Error!Index { const adapter: KeyAdapter = .{ .intern_pool = ip }; const gop = try ip.map.getOrPutAdapted(gpa, Key{ .extern_func = key }, adapter); if (gop.found_existing) return @enumFromInt(gop.index); @@ -6531,7 +6550,12 @@ pub const GetFuncDeclKey = struct { is_noinline: bool, }; -pub fn getFuncDecl(ip: *InternPool, gpa: Allocator, key: GetFuncDeclKey) Allocator.Error!Index { +pub fn getFuncDecl( + ip: *InternPool, + gpa: Allocator, + _: Zcu.PerThread.Id, + key: GetFuncDeclKey, +) Allocator.Error!Index { // The strategy here is to add the function type unconditionally, then to // ask if it already exists, and if so, revert the lengths of the mutated // arrays. This is similar to what `getOrPutTrailingString` does. @@ -6598,7 +6622,12 @@ pub const GetFuncDeclIesKey = struct { rbrace_column: u32, }; -pub fn getFuncDeclIes(ip: *InternPool, gpa: Allocator, key: GetFuncDeclIesKey) Allocator.Error!Index { +pub fn getFuncDeclIes( + ip: *InternPool, + gpa: Allocator, + _: Zcu.PerThread.Id, + key: GetFuncDeclIesKey, +) Allocator.Error!Index { // Validate input parameters. assert(key.bare_return_type != .none); for (key.param_types) |param_type| assert(param_type != .none); @@ -6707,6 +6736,7 @@ pub fn getFuncDeclIes(ip: *InternPool, gpa: Allocator, key: GetFuncDeclIesKey) A pub fn getErrorSetType( ip: *InternPool, gpa: Allocator, + _: Zcu.PerThread.Id, names: []const NullTerminatedString, ) Allocator.Error!Index { assert(std.sort.isSorted(NullTerminatedString, names, {}, NullTerminatedString.indexLessThan)); @@ -6770,11 +6800,16 @@ pub const GetFuncInstanceKey = struct { inferred_error_set: bool, }; -pub fn getFuncInstance(ip: *InternPool, gpa: Allocator, arg: GetFuncInstanceKey) Allocator.Error!Index { +pub fn getFuncInstance( + ip: *InternPool, + gpa: Allocator, + tid: Zcu.PerThread.Id, + arg: GetFuncInstanceKey, +) Allocator.Error!Index { if (arg.inferred_error_set) - return getFuncInstanceIes(ip, gpa, arg); + return getFuncInstanceIes(ip, gpa, tid, arg); - const func_ty = try ip.getFuncType(gpa, .{ + const func_ty = try ip.getFuncType(gpa, tid, .{ .param_types = arg.param_types, .return_type = arg.bare_return_type, .noalias_bits = arg.noalias_bits, @@ -6844,6 +6879,7 @@ pub fn getFuncInstance(ip: *InternPool, gpa: Allocator, arg: GetFuncInstanceKey) pub fn getFuncInstanceIes( ip: *InternPool, gpa: Allocator, + _: Zcu.PerThread.Id, arg: GetFuncInstanceKey, ) Allocator.Error!Index { // Validate input parameters. @@ -6955,7 +6991,6 @@ pub fn getFuncInstanceIes( assert(!ip.map.getOrPutAssumeCapacityAdapted(Key{ .func_type = extraFuncType(ip, func_type_extra_index), }, adapter).found_existing); - return finishFuncInstance( ip, gpa, @@ -7096,6 +7131,7 @@ pub const WipEnumType = struct { pub fn getEnumType( ip: *InternPool, gpa: Allocator, + _: Zcu.PerThread.Id, ini: EnumTypeInit, ) Allocator.Error!WipEnumType.Result { const adapter: KeyAdapter = .{ .intern_pool = ip }; @@ -7172,7 +7208,7 @@ pub fn getEnumType( break :m values_map.toOptional(); }; errdefer if (ini.has_values) { - _ = ip.map.pop(); + _ = ip.maps.pop(); }; try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(EnumExplicit).Struct.fields.len + @@ -7245,7 +7281,12 @@ const GeneratedTagEnumTypeInit = struct { /// Creates an enum type which was automatically-generated as the tag type of a /// `union` with no explicit tag type. Since this is only called once per union /// type, it asserts that no matching type yet exists. -pub fn getGeneratedTagEnumType(ip: *InternPool, gpa: Allocator, ini: GeneratedTagEnumTypeInit) Allocator.Error!Index { +pub fn getGeneratedTagEnumType( + ip: *InternPool, + gpa: Allocator, + _: Zcu.PerThread.Id, + ini: GeneratedTagEnumTypeInit, +) Allocator.Error!Index { assert(ip.isUnion(ini.owner_union_ty)); assert(ip.isIntegerType(ini.tag_ty)); for (ini.values) |val| assert(ip.typeOf(val) == ini.tag_ty); @@ -7342,7 +7383,12 @@ pub const OpaqueTypeInit = struct { }, }; -pub fn getOpaqueType(ip: *InternPool, gpa: Allocator, ini: OpaqueTypeInit) Allocator.Error!WipNamespaceType.Result { +pub fn getOpaqueType( + ip: *InternPool, + gpa: Allocator, + _: Zcu.PerThread.Id, + ini: OpaqueTypeInit, +) Allocator.Error!WipNamespaceType.Result { const adapter: KeyAdapter = .{ .intern_pool = ip }; const gop = try ip.map.getOrPutAdapted(gpa, Key{ .opaque_type = switch (ini.key) { .declared => |d| .{ .declared = .{ @@ -7680,23 +7726,23 @@ test "basic usage" { var ip: InternPool = .{}; defer ip.deinit(gpa); - const i32_type = try ip.get(gpa, .{ .int_type = .{ + const i32_type = try ip.get(gpa, .main, .{ .int_type = .{ .signedness = .signed, .bits = 32, } }); - const array_i32 = try ip.get(gpa, .{ .array_type = .{ + const array_i32 = try ip.get(gpa, .main, .{ .array_type = .{ .len = 10, .child = i32_type, .sentinel = .none, } }); - const another_i32_type = try ip.get(gpa, .{ .int_type = .{ + const another_i32_type = try ip.get(gpa, .main, .{ .int_type = .{ .signedness = .signed, .bits = 32, } }); try std.testing.expect(another_i32_type == i32_type); - const another_array_i32 = try ip.get(gpa, .{ .array_type = .{ + const another_array_i32 = try ip.get(gpa, .main, .{ .array_type = .{ .len = 10, .child = i32_type, .sentinel = .none, @@ -7766,48 +7812,54 @@ pub fn sliceLen(ip: *const InternPool, i: Index) Index { /// * payload => error union /// * fn <=> fn /// * aggregate <=> aggregate (where children can also be coerced) -pub fn getCoerced(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Allocator.Error!Index { +pub fn getCoerced( + ip: *InternPool, + gpa: Allocator, + tid: Zcu.PerThread.Id, + val: Index, + new_ty: Index, +) Allocator.Error!Index { const old_ty = ip.typeOf(val); if (old_ty == new_ty) return val; const tags = ip.items.items(.tag); switch (val) { - .undef => return ip.get(gpa, .{ .undef = new_ty }), + .undef => return ip.get(gpa, tid, .{ .undef = new_ty }), .null_value => { - if (ip.isOptionalType(new_ty)) return ip.get(gpa, .{ .opt = .{ + if (ip.isOptionalType(new_ty)) return ip.get(gpa, tid, .{ .opt = .{ .ty = new_ty, .val = .none, } }); if (ip.isPointerType(new_ty)) switch (ip.indexToKey(new_ty).ptr_type.flags.size) { - .One, .Many, .C => return ip.get(gpa, .{ .ptr = .{ + .One, .Many, .C => return ip.get(gpa, tid, .{ .ptr = .{ .ty = new_ty, .base_addr = .int, .byte_offset = 0, } }), - .Slice => return ip.get(gpa, .{ .slice = .{ + .Slice => return ip.get(gpa, tid, .{ .slice = .{ .ty = new_ty, - .ptr = try ip.get(gpa, .{ .ptr = .{ + .ptr = try ip.get(gpa, tid, .{ .ptr = .{ .ty = ip.slicePtrType(new_ty), .base_addr = .int, .byte_offset = 0, } }), - .len = try ip.get(gpa, .{ .undef = .usize_type }), + .len = try ip.get(gpa, tid, .{ .undef = .usize_type }), } }), }; }, else => switch (tags[@intFromEnum(val)]) { - .func_decl => return getCoercedFuncDecl(ip, gpa, val, new_ty), - .func_instance => return getCoercedFuncInstance(ip, gpa, val, new_ty), + .func_decl => return getCoercedFuncDecl(ip, gpa, tid, val, new_ty), + .func_instance => return getCoercedFuncInstance(ip, gpa, tid, val, new_ty), .func_coerced => { const extra_index = ip.items.items(.data)[@intFromEnum(val)]; const func: Index = @enumFromInt( ip.extra.items[extra_index + std.meta.fieldIndex(Tag.FuncCoerced, "func").?], ); switch (tags[@intFromEnum(func)]) { - .func_decl => return getCoercedFuncDecl(ip, gpa, val, new_ty), - .func_instance => return getCoercedFuncInstance(ip, gpa, val, new_ty), + .func_decl => return getCoercedFuncDecl(ip, gpa, tid, val, new_ty), + .func_instance => return getCoercedFuncInstance(ip, gpa, tid, val, new_ty), else => unreachable, } }, @@ -7816,9 +7868,9 @@ pub fn getCoerced(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Al } switch (ip.indexToKey(val)) { - .undef => return ip.get(gpa, .{ .undef = new_ty }), + .undef => return ip.get(gpa, tid, .{ .undef = new_ty }), .extern_func => |extern_func| if (ip.isFunctionType(new_ty)) - return ip.get(gpa, .{ .extern_func = .{ + return ip.get(gpa, tid, .{ .extern_func = .{ .ty = new_ty, .decl = extern_func.decl, .lib_name = extern_func.lib_name, @@ -7827,12 +7879,12 @@ pub fn getCoerced(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Al .func => unreachable, .int => |int| switch (ip.indexToKey(new_ty)) { - .enum_type => return ip.get(gpa, .{ .enum_tag = .{ + .enum_type => return ip.get(gpa, tid, .{ .enum_tag = .{ .ty = new_ty, - .int = try ip.getCoerced(gpa, val, ip.loadEnumType(new_ty).tag_ty), + .int = try ip.getCoerced(gpa, tid, val, ip.loadEnumType(new_ty).tag_ty), } }), .ptr_type => switch (int.storage) { - inline .u64, .i64 => |int_val| return ip.get(gpa, .{ .ptr = .{ + inline .u64, .i64 => |int_val| return ip.get(gpa, tid, .{ .ptr = .{ .ty = new_ty, .base_addr = .int, .byte_offset = @intCast(int_val), @@ -7841,7 +7893,7 @@ pub fn getCoerced(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Al .lazy_align, .lazy_size => {}, }, else => if (ip.isIntegerType(new_ty)) - return getCoercedInts(ip, gpa, int, new_ty), + return ip.getCoercedInts(gpa, tid, int, new_ty), }, .float => |float| switch (ip.indexToKey(new_ty)) { .simple_type => |simple| switch (simple) { @@ -7852,7 +7904,7 @@ pub fn getCoerced(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Al .f128, .c_longdouble, .comptime_float, - => return ip.get(gpa, .{ .float = .{ + => return ip.get(gpa, tid, .{ .float = .{ .ty = new_ty, .storage = float.storage, } }), @@ -7861,17 +7913,17 @@ pub fn getCoerced(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Al else => {}, }, .enum_tag => |enum_tag| if (ip.isIntegerType(new_ty)) - return getCoercedInts(ip, gpa, ip.indexToKey(enum_tag.int).int, new_ty), + return ip.getCoercedInts(gpa, tid, ip.indexToKey(enum_tag.int).int, new_ty), .enum_literal => |enum_literal| switch (ip.indexToKey(new_ty)) { .enum_type => { const enum_type = ip.loadEnumType(new_ty); const index = enum_type.nameIndex(ip, enum_literal).?; - return ip.get(gpa, .{ .enum_tag = .{ + return ip.get(gpa, tid, .{ .enum_tag = .{ .ty = new_ty, .int = if (enum_type.values.len != 0) enum_type.values.get(ip)[index] else - try ip.get(gpa, .{ .int = .{ + try ip.get(gpa, tid, .{ .int = .{ .ty = enum_type.tag_ty, .storage = .{ .u64 = index }, } }), @@ -7880,22 +7932,22 @@ pub fn getCoerced(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Al else => {}, }, .slice => |slice| if (ip.isPointerType(new_ty) and ip.indexToKey(new_ty).ptr_type.flags.size == .Slice) - return ip.get(gpa, .{ .slice = .{ + return ip.get(gpa, tid, .{ .slice = .{ .ty = new_ty, - .ptr = try ip.getCoerced(gpa, slice.ptr, ip.slicePtrType(new_ty)), + .ptr = try ip.getCoerced(gpa, tid, slice.ptr, ip.slicePtrType(new_ty)), .len = slice.len, } }) else if (ip.isIntegerType(new_ty)) - return ip.getCoerced(gpa, slice.ptr, new_ty), + return ip.getCoerced(gpa, tid, slice.ptr, new_ty), .ptr => |ptr| if (ip.isPointerType(new_ty) and ip.indexToKey(new_ty).ptr_type.flags.size != .Slice) - return ip.get(gpa, .{ .ptr = .{ + return ip.get(gpa, tid, .{ .ptr = .{ .ty = new_ty, .base_addr = ptr.base_addr, .byte_offset = ptr.byte_offset, } }) else if (ip.isIntegerType(new_ty)) switch (ptr.base_addr) { - .int => return ip.get(gpa, .{ .int = .{ + .int => return ip.get(gpa, tid, .{ .int = .{ .ty = .usize_type, .storage = .{ .u64 = @intCast(ptr.byte_offset) }, } }), @@ -7904,44 +7956,44 @@ pub fn getCoerced(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Al .opt => |opt| switch (ip.indexToKey(new_ty)) { .ptr_type => |ptr_type| return switch (opt.val) { .none => switch (ptr_type.flags.size) { - .One, .Many, .C => try ip.get(gpa, .{ .ptr = .{ + .One, .Many, .C => try ip.get(gpa, tid, .{ .ptr = .{ .ty = new_ty, .base_addr = .int, .byte_offset = 0, } }), - .Slice => try ip.get(gpa, .{ .slice = .{ + .Slice => try ip.get(gpa, tid, .{ .slice = .{ .ty = new_ty, - .ptr = try ip.get(gpa, .{ .ptr = .{ + .ptr = try ip.get(gpa, tid, .{ .ptr = .{ .ty = ip.slicePtrType(new_ty), .base_addr = .int, .byte_offset = 0, } }), - .len = try ip.get(gpa, .{ .undef = .usize_type }), + .len = try ip.get(gpa, tid, .{ .undef = .usize_type }), } }), }, - else => |payload| try ip.getCoerced(gpa, payload, new_ty), + else => |payload| try ip.getCoerced(gpa, tid, payload, new_ty), }, - .opt_type => |child_type| return try ip.get(gpa, .{ .opt = .{ + .opt_type => |child_type| return try ip.get(gpa, tid, .{ .opt = .{ .ty = new_ty, .val = switch (opt.val) { .none => .none, - else => try ip.getCoerced(gpa, opt.val, child_type), + else => try ip.getCoerced(gpa, tid, opt.val, child_type), }, } }), else => {}, }, .err => |err| if (ip.isErrorSetType(new_ty)) - return ip.get(gpa, .{ .err = .{ + return ip.get(gpa, tid, .{ .err = .{ .ty = new_ty, .name = err.name, } }) else if (ip.isErrorUnionType(new_ty)) - return ip.get(gpa, .{ .error_union = .{ + return ip.get(gpa, tid, .{ .error_union = .{ .ty = new_ty, .val = .{ .err_name = err.name }, } }), .error_union => |error_union| if (ip.isErrorUnionType(new_ty)) - return ip.get(gpa, .{ .error_union = .{ + return ip.get(gpa, tid, .{ .error_union = .{ .ty = new_ty, .val = error_union.val, } }), @@ -7960,20 +8012,20 @@ pub fn getCoerced(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Al }; if (old_ty_child != new_ty_child) break :direct; switch (aggregate.storage) { - .bytes => |bytes| return ip.get(gpa, .{ .aggregate = .{ + .bytes => |bytes| return ip.get(gpa, tid, .{ .aggregate = .{ .ty = new_ty, .storage = .{ .bytes = bytes }, } }), .elems => |elems| { const elems_copy = try gpa.dupe(Index, elems[0..new_len]); defer gpa.free(elems_copy); - return ip.get(gpa, .{ .aggregate = .{ + return ip.get(gpa, tid, .{ .aggregate = .{ .ty = new_ty, .storage = .{ .elems = elems_copy }, } }); }, .repeated_elem => |elem| { - return ip.get(gpa, .{ .aggregate = .{ + return ip.get(gpa, tid, .{ .aggregate = .{ .ty = new_ty, .storage = .{ .repeated_elem = elem }, } }); @@ -7991,7 +8043,7 @@ pub fn getCoerced(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Al // We have to intern each value here, so unfortunately we can't easily avoid // the repeated indexToKey calls. for (agg_elems, 0..) |*elem, index| { - elem.* = try ip.get(gpa, .{ .int = .{ + elem.* = try ip.get(gpa, tid, .{ .int = .{ .ty = .u8_type, .storage = .{ .u64 = bytes.at(index, ip) }, } }); @@ -8008,27 +8060,27 @@ pub fn getCoerced(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Al .struct_type => ip.loadStructType(new_ty).field_types.get(ip)[i], else => unreachable, }; - elem.* = try ip.getCoerced(gpa, elem.*, new_elem_ty); + elem.* = try ip.getCoerced(gpa, tid, elem.*, new_elem_ty); } - return ip.get(gpa, .{ .aggregate = .{ .ty = new_ty, .storage = .{ .elems = agg_elems } } }); + return ip.get(gpa, tid, .{ .aggregate = .{ .ty = new_ty, .storage = .{ .elems = agg_elems } } }); }, else => {}, } switch (ip.indexToKey(new_ty)) { .opt_type => |child_type| switch (val) { - .null_value => return ip.get(gpa, .{ .opt = .{ + .null_value => return ip.get(gpa, tid, .{ .opt = .{ .ty = new_ty, .val = .none, } }), - else => return ip.get(gpa, .{ .opt = .{ + else => return ip.get(gpa, tid, .{ .opt = .{ .ty = new_ty, - .val = try ip.getCoerced(gpa, val, child_type), + .val = try ip.getCoerced(gpa, tid, val, child_type), } }), }, - .error_union_type => |error_union_type| return ip.get(gpa, .{ .error_union = .{ + .error_union_type => |error_union_type| return ip.get(gpa, tid, .{ .error_union = .{ .ty = new_ty, - .val = .{ .payload = try ip.getCoerced(gpa, val, error_union_type.payload_type) }, + .val = .{ .payload = try ip.getCoerced(gpa, tid, val, error_union_type.payload_type) }, } }), else => {}, } @@ -8042,27 +8094,45 @@ pub fn getCoerced(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Al unreachable; } -fn getCoercedFuncDecl(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Allocator.Error!Index { +fn getCoercedFuncDecl( + ip: *InternPool, + gpa: Allocator, + tid: Zcu.PerThread.Id, + val: Index, + new_ty: Index, +) Allocator.Error!Index { const datas = ip.items.items(.data); const extra_index = datas[@intFromEnum(val)]; const prev_ty: Index = @enumFromInt( ip.extra.items[extra_index + std.meta.fieldIndex(Tag.FuncDecl, "ty").?], ); if (new_ty == prev_ty) return val; - return getCoercedFunc(ip, gpa, val, new_ty); + return getCoercedFunc(ip, gpa, tid, val, new_ty); } -fn getCoercedFuncInstance(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Allocator.Error!Index { +fn getCoercedFuncInstance( + ip: *InternPool, + gpa: Allocator, + tid: Zcu.PerThread.Id, + val: Index, + new_ty: Index, +) Allocator.Error!Index { const datas = ip.items.items(.data); const extra_index = datas[@intFromEnum(val)]; const prev_ty: Index = @enumFromInt( ip.extra.items[extra_index + std.meta.fieldIndex(Tag.FuncInstance, "ty").?], ); if (new_ty == prev_ty) return val; - return getCoercedFunc(ip, gpa, val, new_ty); + return getCoercedFunc(ip, gpa, tid, val, new_ty); } -fn getCoercedFunc(ip: *InternPool, gpa: Allocator, func: Index, ty: Index) Allocator.Error!Index { +fn getCoercedFunc( + ip: *InternPool, + gpa: Allocator, + _: Zcu.PerThread.Id, + func: Index, + ty: Index, +) Allocator.Error!Index { const prev_extra_len = ip.extra.items.len; try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Tag.FuncCoerced).Struct.fields.len); try ip.items.ensureUnusedCapacity(gpa, 1); @@ -8092,7 +8162,7 @@ fn getCoercedFunc(ip: *InternPool, gpa: Allocator, func: Index, ty: Index) Alloc /// Asserts `val` has an integer type. /// Assumes `new_ty` is an integer type. -pub fn getCoercedInts(ip: *InternPool, gpa: Allocator, int: Key.Int, new_ty: Index) Allocator.Error!Index { +pub fn getCoercedInts(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, int: Key.Int, new_ty: Index) Allocator.Error!Index { // The key cannot be passed directly to `get`, otherwise in the case of // big_int storage, the limbs would be invalidated before they are read. // Here we pre-reserve the limbs to ensure that the logic in `addInt` will @@ -8111,7 +8181,7 @@ pub fn getCoercedInts(ip: *InternPool, gpa: Allocator, int: Key.Int, new_ty: Ind } }; }, }; - return ip.get(gpa, .{ .int = .{ + return ip.get(gpa, tid, .{ .int = .{ .ty = new_ty, .storage = new_storage, } }); diff --git a/src/RangeSet.zig b/src/RangeSet.zig index 01d9157767b5..b174f8e3b53e 100644 --- a/src/RangeSet.zig +++ b/src/RangeSet.zig @@ -6,13 +6,11 @@ const InternPool = @import("InternPool.zig"); const Type = @import("Type.zig"); const Value = @import("Value.zig"); const Zcu = @import("Zcu.zig"); -/// Deprecated. -const Module = Zcu; const RangeSet = @This(); const LazySrcLoc = Zcu.LazySrcLoc; +pt: Zcu.PerThread, ranges: std.ArrayList(Range), -module: *Module, pub const Range = struct { first: InternPool.Index, @@ -20,10 +18,10 @@ pub const Range = struct { src: LazySrcLoc, }; -pub fn init(allocator: std.mem.Allocator, module: *Module) RangeSet { +pub fn init(allocator: std.mem.Allocator, pt: Zcu.PerThread) RangeSet { return .{ + .pt = pt, .ranges = std.ArrayList(Range).init(allocator), - .module = module, }; } @@ -37,8 +35,8 @@ pub fn add( last: InternPool.Index, src: LazySrcLoc, ) !?LazySrcLoc { - const mod = self.module; - const ip = &mod.intern_pool; + const pt = self.pt; + const ip = &pt.zcu.intern_pool; const ty = ip.typeOf(first); assert(ty == ip.typeOf(last)); @@ -47,8 +45,8 @@ pub fn add( assert(ty == ip.typeOf(range.first)); assert(ty == ip.typeOf(range.last)); - if (Value.fromInterned(last).compareScalar(.gte, Value.fromInterned(range.first), Type.fromInterned(ty), mod) and - Value.fromInterned(first).compareScalar(.lte, Value.fromInterned(range.last), Type.fromInterned(ty), mod)) + if (Value.fromInterned(last).compareScalar(.gte, Value.fromInterned(range.first), Type.fromInterned(ty), pt) and + Value.fromInterned(first).compareScalar(.lte, Value.fromInterned(range.last), Type.fromInterned(ty), pt)) { return range.src; // They overlap. } @@ -63,20 +61,20 @@ pub fn add( } /// Assumes a and b do not overlap -fn lessThan(mod: *Module, a: Range, b: Range) bool { - const ty = Type.fromInterned(mod.intern_pool.typeOf(a.first)); - return Value.fromInterned(a.first).compareScalar(.lt, Value.fromInterned(b.first), ty, mod); +fn lessThan(pt: Zcu.PerThread, a: Range, b: Range) bool { + const ty = Type.fromInterned(pt.zcu.intern_pool.typeOf(a.first)); + return Value.fromInterned(a.first).compareScalar(.lt, Value.fromInterned(b.first), ty, pt); } pub fn spans(self: *RangeSet, first: InternPool.Index, last: InternPool.Index) !bool { - const mod = self.module; - const ip = &mod.intern_pool; + const pt = self.pt; + const ip = &pt.zcu.intern_pool; assert(ip.typeOf(first) == ip.typeOf(last)); if (self.ranges.items.len == 0) return false; - std.mem.sort(Range, self.ranges.items, mod, lessThan); + std.mem.sort(Range, self.ranges.items, pt, lessThan); if (self.ranges.items[0].first != first or self.ranges.items[self.ranges.items.len - 1].last != last) @@ -95,10 +93,10 @@ pub fn spans(self: *RangeSet, first: InternPool.Index, last: InternPool.Index) ! const prev = self.ranges.items[i]; // prev.last + 1 == cur.first - try counter.copy(Value.fromInterned(prev.last).toBigInt(&space, mod)); + try counter.copy(Value.fromInterned(prev.last).toBigInt(&space, pt)); try counter.addScalar(&counter, 1); - const cur_start_int = Value.fromInterned(cur.first).toBigInt(&space, mod); + const cur_start_int = Value.fromInterned(cur.first).toBigInt(&space, pt); if (!cur_start_int.eql(counter.toConst())) { return false; } diff --git a/src/Sema.zig b/src/Sema.zig index 40fe11af3a67..dd8d2712ed70 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -5,7 +5,7 @@ //! Does type checking, comptime control flow, and safety-check generation. //! This is the the heart of the Zig compiler. -mod: *Module, +pt: Zcu.PerThread, /// Alias to `mod.gpa`. gpa: Allocator, /// Points to the temporary arena allocator of the Sema. @@ -146,7 +146,7 @@ const ComptimeAlloc = struct { fn newComptimeAlloc(sema: *Sema, block: *Block, ty: Type, alignment: Alignment) !ComptimeAllocIndex { const idx = sema.comptime_allocs.items.len; try sema.comptime_allocs.append(sema.gpa, .{ - .val = .{ .interned = try sema.mod.intern(.{ .undef = ty.toIntern() }) }, + .val = .{ .interned = try sema.pt.intern(.{ .undef = ty.toIntern() }) }, .is_const = false, .alignment = alignment, .runtime_index = block.runtime_index, @@ -433,7 +433,7 @@ pub const Block = struct { fn explain(cr: ComptimeReason, sema: *Sema, msg: ?*Module.ErrorMsg) !void { const parent = msg orelse return; - const mod = sema.mod; + const pt = sema.pt; const prefix = "expression is evaluated at comptime because "; switch (cr) { .c_import => |ci| { @@ -451,7 +451,7 @@ pub const Block = struct { ret_ty_src, parent, prefix ++ "the function returns a comptime-only type '{}'", - .{rt.return_ty.fmt(mod)}, + .{rt.return_ty.fmt(pt)}, ); try sema.explainWhyTypeIsComptime(parent, ret_ty_src, rt.return_ty); }, @@ -538,7 +538,7 @@ pub const Block = struct { } pub fn wantSafety(block: *const Block) bool { - return block.want_safety orelse switch (block.sema.mod.optimizeMode()) { + return block.want_safety orelse switch (block.sema.pt.zcu.optimizeMode()) { .Debug => true, .ReleaseSafe => true, .ReleaseFast => false, @@ -737,11 +737,12 @@ pub const Block = struct { fn addCmpVector(block: *Block, lhs: Air.Inst.Ref, rhs: Air.Inst.Ref, cmp_op: std.math.CompareOperator) !Air.Inst.Ref { const sema = block.sema; - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; return block.addInst(.{ .tag = if (block.float_mode == .optimized) .cmp_vector_optimized else .cmp_vector, .data = .{ .ty_pl = .{ - .ty = Air.internedToRef((try mod.vectorType(.{ + .ty = Air.internedToRef((try pt.vectorType(.{ .len = sema.typeOf(lhs).vectorLen(mod), .child = .bool_type, })).toIntern()), @@ -829,14 +830,14 @@ pub const Block = struct { } pub fn ownerModule(block: Block) *Package.Module { - const zcu = block.sema.mod; + const zcu = block.sema.pt.zcu; return zcu.namespacePtr(block.namespace).fileScope(zcu).mod; } fn trackZir(block: *Block, inst: Zir.Inst.Index) Allocator.Error!InternPool.TrackedInst.Index { const sema = block.sema; const gpa = sema.gpa; - const zcu = sema.mod; + const zcu = sema.pt.zcu; const ip = &zcu.intern_pool; const file_index = block.getFileScopeIndex(zcu); return ip.trackZir(gpa, file_index, inst); @@ -992,7 +993,8 @@ fn analyzeBodyInner( try sema.inst_map.ensureSpaceForInstructions(sema.gpa, body); - const zcu = sema.mod; + const pt = sema.pt; + const zcu = pt.zcu; const map = &sema.inst_map; const tags = sema.code.instructions.items(.tag); const datas = sema.code.instructions.items(.data); @@ -1777,7 +1779,7 @@ fn analyzeBodyInner( const err_union_ty = sema.typeOf(err_union); if (err_union_ty.zigTypeTag(zcu) != .ErrorUnion) { return sema.fail(block, operand_src, "expected error union type, found '{}'", .{ - err_union_ty.fmt(zcu), + err_union_ty.fmt(pt), }); } const is_non_err = try sema.analyzeIsNonErrComptimeOnly(block, operand_src, err_union); @@ -1910,10 +1912,11 @@ pub fn toConstString( air_inst: Air.Inst.Ref, reason: NeededComptimeReason, ) ![]u8 { + const pt = sema.pt; const coerced_inst = try sema.coerce(block, Type.slice_const_u8, air_inst, src); const slice_val = try sema.resolveConstDefinedValue(block, src, coerced_inst, reason); const arr_val = try sema.derefSliceAsArray(block, src, slice_val, reason); - return arr_val.toAllocatedBytes(arr_val.typeOf(sema.mod), sema.arena, sema.mod); + return arr_val.toAllocatedBytes(arr_val.typeOf(pt.zcu), sema.arena, pt); } pub fn resolveConstStringIntern( @@ -1945,7 +1948,8 @@ fn resolveDestType( strat: enum { remove_eu_opt, remove_eu, remove_opt }, builtin_name: []const u8, ) !Type { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const remove_eu = switch (strat) { .remove_eu_opt, .remove_eu => true, .remove_opt => false, @@ -2062,7 +2066,8 @@ fn analyzeAsType( } pub fn setupErrorReturnTrace(sema: *Sema, block: *Block, last_arg_index: usize) !void { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const comp = mod.comp; const gpa = sema.gpa; const ip = &mod.intern_pool; @@ -2076,16 +2081,16 @@ pub fn setupErrorReturnTrace(sema: *Sema, block: *Block, last_arg_index: usize) // var addrs: [err_return_trace_addr_count]usize = undefined; const err_return_trace_addr_count = 32; - const addr_arr_ty = try mod.arrayType(.{ + const addr_arr_ty = try pt.arrayType(.{ .len = err_return_trace_addr_count, .child = .usize_type, }); - const addrs_ptr = try err_trace_block.addTy(.alloc, try mod.singleMutPtrType(addr_arr_ty)); + const addrs_ptr = try err_trace_block.addTy(.alloc, try pt.singleMutPtrType(addr_arr_ty)); // var st: StackTrace = undefined; - const stack_trace_ty = try mod.getBuiltinType("StackTrace"); - try stack_trace_ty.resolveFields(mod); - const st_ptr = try err_trace_block.addTy(.alloc, try mod.singleMutPtrType(stack_trace_ty)); + const stack_trace_ty = try pt.getBuiltinType("StackTrace"); + try stack_trace_ty.resolveFields(pt); + const st_ptr = try err_trace_block.addTy(.alloc, try pt.singleMutPtrType(stack_trace_ty)); // st.instruction_addresses = &addrs; const instruction_addresses_field_name = try ip.getOrPutString(gpa, "instruction_addresses", .no_embedded_nulls); @@ -2109,7 +2114,7 @@ pub fn setupErrorReturnTrace(sema: *Sema, block: *Block, last_arg_index: usize) fn resolveValue(sema: *Sema, inst: Air.Inst.Ref) CompileError!?Value { const val = (try sema.resolveValueAllowVariables(inst)) orelse return null; if (val.isGenericPoison()) return error.GenericPoison; - if (sema.mod.intern_pool.isVariable(val.toIntern())) return null; + if (sema.pt.zcu.intern_pool.isVariable(val.toIntern())) return null; return val; } @@ -2133,7 +2138,8 @@ fn resolveDefinedValue( src: LazySrcLoc, air_ref: Air.Inst.Ref, ) CompileError!?Value { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const val = try sema.resolveValue(air_ref) orelse return null; if (val.isUndef(mod)) { return sema.failWithUseOfUndef(block, src); @@ -2150,7 +2156,7 @@ fn resolveConstDefinedValue( reason: NeededComptimeReason, ) CompileError!Value { const val = try sema.resolveConstValue(block, src, air_ref, reason); - if (val.isUndef(sema.mod)) return sema.failWithUseOfUndef(block, src); + if (val.isUndef(sema.pt.zcu)) return sema.failWithUseOfUndef(block, src); return val; } @@ -2164,7 +2170,7 @@ fn resolveValueResolveLazy(sema: *Sema, inst: Air.Inst.Ref) CompileError!?Value /// Lazy values are recursively resolved. fn resolveValueIntable(sema: *Sema, inst: Air.Inst.Ref) CompileError!?Value { const val = (try sema.resolveValue(inst)) orelse return null; - if (sema.mod.intern_pool.getBackingAddrTag(val.toIntern())) |addr| switch (addr) { + if (sema.pt.zcu.intern_pool.getBackingAddrTag(val.toIntern())) |addr| switch (addr) { .decl, .anon_decl, .comptime_alloc, .comptime_field => return null, .int => {}, .eu_payload, .opt_payload, .arr_elem, .field => unreachable, @@ -2174,6 +2180,7 @@ fn resolveValueIntable(sema: *Sema, inst: Air.Inst.Ref) CompileError!?Value { /// Returns all InternPool keys representing values, including `variable`, `undef`, and `generic_poison`. fn resolveValueAllowVariables(sema: *Sema, inst: Air.Inst.Ref) CompileError!?Value { + const pt = sema.pt; assert(inst != .none); // First section of indexes correspond to a set number of constant values. if (@intFromEnum(inst) < InternPool.static_len) { @@ -2184,7 +2191,7 @@ fn resolveValueAllowVariables(sema: *Sema, inst: Air.Inst.Ref) CompileError!?Val if (try sema.typeHasOnePossibleValue(sema.typeOf(inst))) |opv| { if (inst.toInterned()) |ip_index| { const val = Value.fromInterned(ip_index); - if (val.getVariable(sema.mod) != null) return val; + if (val.getVariable(pt.zcu) != null) return val; } return opv; } @@ -2196,7 +2203,7 @@ fn resolveValueAllowVariables(sema: *Sema, inst: Air.Inst.Ref) CompileError!?Val } }; const val = Value.fromInterned(ip_index); - if (val.isPtrToThreadLocal(sema.mod)) return null; + if (val.isPtrToThreadLocal(pt.zcu)) return null; return val; } @@ -2225,7 +2232,7 @@ pub fn resolveFinalDeclValue( }); }; if (val.isGenericPoison()) return error.GenericPoison; - if (val.canMutateComptimeVarState(sema.mod)) { + if (val.canMutateComptimeVarState(sema.pt.zcu)) { return sema.fail(block, src, "global variable contains reference to comptime var", .{}); } return val; @@ -2254,19 +2261,20 @@ fn failWithDivideByZero(sema: *Sema, block: *Block, src: LazySrcLoc) CompileErro } fn failWithModRemNegative(sema: *Sema, block: *Block, src: LazySrcLoc, lhs_ty: Type, rhs_ty: Type) CompileError { + const pt = sema.pt; return sema.fail(block, src, "remainder division with '{}' and '{}': signed integers and floats must use @rem or @mod", .{ - lhs_ty.fmt(sema.mod), rhs_ty.fmt(sema.mod), + lhs_ty.fmt(pt), rhs_ty.fmt(pt), }); } fn failWithExpectedOptionalType(sema: *Sema, block: *Block, src: LazySrcLoc, non_optional_ty: Type) CompileError { - const mod = sema.mod; + const pt = sema.pt; const msg = msg: { const msg = try sema.errMsg(src, "expected optional type, found '{}'", .{ - non_optional_ty.fmt(mod), + non_optional_ty.fmt(pt), }); errdefer msg.destroy(sema.gpa); - if (non_optional_ty.zigTypeTag(mod) == .ErrorUnion) { + if (non_optional_ty.zigTypeTag(pt.zcu) == .ErrorUnion) { try sema.errNote(src, msg, "consider using 'try', 'catch', or 'if'", .{}); } try addDeclaredHereNote(sema, msg, non_optional_ty); @@ -2276,14 +2284,14 @@ fn failWithExpectedOptionalType(sema: *Sema, block: *Block, src: LazySrcLoc, non } fn failWithArrayInitNotSupported(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileError { - const mod = sema.mod; + const pt = sema.pt; const msg = msg: { const msg = try sema.errMsg(src, "type '{}' does not support array initialization syntax", .{ - ty.fmt(mod), + ty.fmt(pt), }); errdefer msg.destroy(sema.gpa); - if (ty.isSlice(mod)) { - try sema.errNote(src, msg, "inferred array length is specified with an underscore: '[_]{}'", .{ty.elemType2(mod).fmt(mod)}); + if (ty.isSlice(pt.zcu)) { + try sema.errNote(src, msg, "inferred array length is specified with an underscore: '[_]{}'", .{ty.elemType2(pt.zcu).fmt(pt)}); } break :msg msg; }; @@ -2291,8 +2299,9 @@ fn failWithArrayInitNotSupported(sema: *Sema, block: *Block, src: LazySrcLoc, ty } fn failWithStructInitNotSupported(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileError { + const pt = sema.pt; return sema.fail(block, src, "type '{}' does not support struct initialization syntax", .{ - ty.fmt(sema.mod), + ty.fmt(pt), }); } @@ -2303,17 +2312,19 @@ fn failWithErrorSetCodeMissing( dest_err_set_ty: Type, src_err_set_ty: Type, ) CompileError { + const pt = sema.pt; return sema.fail(block, src, "expected type '{}', found type '{}'", .{ - dest_err_set_ty.fmt(sema.mod), src_err_set_ty.fmt(sema.mod), + dest_err_set_ty.fmt(pt), src_err_set_ty.fmt(pt), }); } fn failWithIntegerOverflow(sema: *Sema, block: *Block, src: LazySrcLoc, int_ty: Type, val: Value, vector_index: usize) CompileError { - const zcu = sema.mod; + const pt = sema.pt; + const zcu = pt.zcu; if (int_ty.zigTypeTag(zcu) == .Vector) { const msg = msg: { const msg = try sema.errMsg(src, "overflow of vector type '{}' with value '{}'", .{ - int_ty.fmt(zcu), val.fmtValue(zcu, sema), + int_ty.fmt(pt), val.fmtValue(pt, sema), }); errdefer msg.destroy(sema.gpa); try sema.errNote(src, msg, "when computing vector element at index '{d}'", .{vector_index}); @@ -2322,12 +2333,13 @@ fn failWithIntegerOverflow(sema: *Sema, block: *Block, src: LazySrcLoc, int_ty: return sema.failWithOwnedErrorMsg(block, msg); } return sema.fail(block, src, "overflow of integer type '{}' with value '{}'", .{ - int_ty.fmt(zcu), val.fmtValue(zcu, sema), + int_ty.fmt(pt), val.fmtValue(pt, sema), }); } fn failWithInvalidComptimeFieldStore(sema: *Sema, block: *Block, init_src: LazySrcLoc, container_ty: Type, field_index: usize) CompileError { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const msg = msg: { const msg = try sema.errMsg(init_src, "value stored in comptime field does not match the default value of the field", .{}); errdefer msg.destroy(sema.gpa); @@ -2358,14 +2370,15 @@ fn failWithInvalidFieldAccess( object_ty: Type, field_name: InternPool.NullTerminatedString, ) CompileError { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const inner_ty = if (object_ty.isSinglePointer(mod)) object_ty.childType(mod) else object_ty; if (inner_ty.zigTypeTag(mod) == .Optional) opt: { const child_ty = inner_ty.optionalChild(mod); if (!typeSupportsFieldAccess(mod, child_ty, field_name)) break :opt; const msg = msg: { - const msg = try sema.errMsg(src, "optional type '{}' does not support field access", .{object_ty.fmt(sema.mod)}); + const msg = try sema.errMsg(src, "optional type '{}' does not support field access", .{object_ty.fmt(pt)}); errdefer msg.destroy(sema.gpa); try sema.errNote(src, msg, "consider using '.?', 'orelse', or 'if'", .{}); break :msg msg; @@ -2375,14 +2388,14 @@ fn failWithInvalidFieldAccess( const child_ty = inner_ty.errorUnionPayload(mod); if (!typeSupportsFieldAccess(mod, child_ty, field_name)) break :err; const msg = msg: { - const msg = try sema.errMsg(src, "error union type '{}' does not support field access", .{object_ty.fmt(sema.mod)}); + const msg = try sema.errMsg(src, "error union type '{}' does not support field access", .{object_ty.fmt(pt)}); errdefer msg.destroy(sema.gpa); try sema.errNote(src, msg, "consider using 'try', 'catch', or 'if'", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(block, msg); } - return sema.fail(block, src, "type '{}' does not support field access", .{object_ty.fmt(sema.mod)}); + return sema.fail(block, src, "type '{}' does not support field access", .{object_ty.fmt(pt)}); } fn typeSupportsFieldAccess(mod: *const Module, ty: Type, field_name: InternPool.NullTerminatedString) bool { @@ -2408,7 +2421,8 @@ fn failWithComptimeErrorRetTrace( src: LazySrcLoc, name: InternPool.NullTerminatedString, ) CompileError { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const msg = msg: { const msg = try sema.errMsg(src, "caught unexpected error '{}'", .{name.fmt(&mod.intern_pool)}); errdefer msg.destroy(sema.gpa); @@ -2430,7 +2444,7 @@ pub fn errNote( comptime format: []const u8, args: anytype, ) error{OutOfMemory}!void { - return sema.mod.errNote(src, parent, format, args); + return sema.pt.zcu.errNote(src, parent, format, args); } fn addFieldErrNote( @@ -2442,8 +2456,7 @@ fn addFieldErrNote( args: anytype, ) !void { @setCold(true); - const zcu = sema.mod; - const type_src = container_ty.srcLocOrNull(zcu) orelse return; + const type_src = container_ty.srcLocOrNull(sema.pt.zcu) orelse return; const field_src: LazySrcLoc = .{ .base_node_inst = type_src.base_node_inst, .offset = .{ .container_field_name = @intCast(field_index) }, @@ -2480,7 +2493,7 @@ pub fn fail( pub fn failWithOwnedErrorMsg(sema: *Sema, block: ?*Block, err_msg: *Module.ErrorMsg) error{ AnalysisFail, OutOfMemory } { @setCold(true); const gpa = sema.gpa; - const mod = sema.mod; + const mod = sema.pt.zcu; const ip = &mod.intern_pool; if (build_options.enable_debug_extensions and mod.comp.debug_compile_errors) { @@ -2545,8 +2558,7 @@ fn reparentOwnedErrorMsg( comptime format: []const u8, args: anytype, ) !void { - const mod = sema.mod; - const msg_str = try std.fmt.allocPrint(mod.gpa, format, args); + const msg_str = try std.fmt.allocPrint(sema.gpa, format, args); const orig_notes = msg.notes.len; msg.notes = try sema.gpa.realloc(msg.notes, orig_notes + 1); @@ -2630,16 +2642,16 @@ fn analyzeAsInt( dest_ty: Type, reason: NeededComptimeReason, ) !u64 { - const mod = sema.mod; const coerced = try sema.coerce(block, dest_ty, air_ref, src); const val = try sema.resolveConstDefinedValue(block, src, coerced, reason); - return (try val.getUnsignedIntAdvanced(mod, .sema)).?; + return (try val.getUnsignedIntAdvanced(sema.pt, .sema)).?; } /// Given a ZIR extra index which points to a list of `Zir.Inst.Capture`, /// resolves this into a list of `InternPool.CaptureValue` allocated by `arena`. fn getCaptures(sema: *Sema, block: *Block, type_src: LazySrcLoc, extra_index: usize, captures_len: u32) ![]InternPool.CaptureValue { - const zcu = sema.mod; + const pt = sema.pt; + const zcu = pt.zcu; const ip = &zcu.intern_pool; const parent_captures: InternPool.CaptureValue.Slice = zcu.namespacePtr(block.namespace).getType(zcu).getCaptures(zcu); @@ -2706,7 +2718,7 @@ fn wrapWipTy(sema: *Sema, wip_ty: anytype) @TypeOf(wip_ty) { if (sema.builtin_type_target_index == .none) return wip_ty; var new = wip_ty; new.index = sema.builtin_type_target_index; - sema.mod.intern_pool.resolveBuiltinType(new.index, wip_ty.index); + sema.pt.zcu.intern_pool.resolveBuiltinType(new.index, wip_ty.index); return new; } @@ -2714,7 +2726,8 @@ fn wrapWipTy(sema: *Sema, wip_ty: anytype) @TypeOf(wip_ty) { /// considered outdated on this update. If so, remove it from the pool /// and return `true`. fn maybeRemoveOutdatedType(sema: *Sema, ty: InternPool.Index) !bool { - const zcu = sema.mod; + const pt = sema.pt; + const zcu = pt.zcu; if (!zcu.comp.debug_incremental) return false; @@ -2737,7 +2750,8 @@ fn zirStructDecl( extended: Zir.Inst.Extended.InstData, inst: Zir.Inst.Index, ) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const gpa = sema.gpa; const ip = &mod.intern_pool; const small: Zir.Inst.StructDecl.Small = @bitCast(extended.small); @@ -2796,10 +2810,10 @@ fn zirStructDecl( .captures = captures, } }, }; - const wip_ty = sema.wrapWipTy(switch (try ip.getStructType(gpa, struct_init)) { + const wip_ty = sema.wrapWipTy(switch (try ip.getStructType(gpa, pt.tid, struct_init)) { .existing => |ty| wip: { if (!try sema.maybeRemoveOutdatedType(ty)) return Air.internedToRef(ty); - break :wip (try ip.getStructType(gpa, struct_init)).wip; + break :wip (try ip.getStructType(gpa, pt.tid, struct_init)).wip; }, .wip => |wip| wip, }); @@ -2815,7 +2829,7 @@ fn zirStructDecl( mod.declPtr(new_decl_index).owns_tv = true; errdefer mod.abortAnonDecl(new_decl_index); - if (sema.mod.comp.debug_incremental) { + if (pt.zcu.comp.debug_incremental) { try ip.addDependency( sema.gpa, AnalUnit.wrap(.{ .decl = new_decl_index }), @@ -2836,7 +2850,7 @@ fn zirStructDecl( try mod.scanNamespace(ns, decls, mod.declPtr(new_decl_index)); } - try mod.finalizeAnonDecl(new_decl_index); + try pt.finalizeAnonDecl(new_decl_index); try mod.comp.work_queue.writeItem(.{ .resolve_type_fully = wip_ty.index }); try sema.addReferenceEntry(src, AnalUnit.wrap(.{ .decl = new_decl_index })); return Air.internedToRef(wip_ty.finish(ip, new_decl_index, new_namespace_index)); @@ -2850,7 +2864,8 @@ fn createAnonymousDeclTypeNamed( anon_prefix: []const u8, inst: ?Zir.Inst.Index, ) !InternPool.DeclIndex { - const zcu = sema.mod; + const pt = sema.pt; + const zcu = pt.zcu; const ip = &zcu.intern_pool; const gpa = sema.gpa; const namespace = block.namespace; @@ -2892,7 +2907,7 @@ fn createAnonymousDeclTypeNamed( // some tooling may not support very long symbol names. try writer.print("{}", .{Value.fmtValueFull(.{ .val = arg_val, - .mod = zcu, + .pt = pt, .opt_sema = sema, .depth = 1, })}); @@ -2953,7 +2968,8 @@ fn zirEnumDecl( const tracy = trace(@src()); defer tracy.end(); - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const gpa = sema.gpa; const ip = &mod.intern_pool; const small: Zir.Inst.EnumDecl.Small = @bitCast(extended.small); @@ -3026,10 +3042,10 @@ fn zirEnumDecl( .captures = captures, } }, }; - const wip_ty = sema.wrapWipTy(switch (try ip.getEnumType(gpa, enum_init)) { + const wip_ty = sema.wrapWipTy(switch (try ip.getEnumType(gpa, pt.tid, enum_init)) { .existing => |ty| wip: { if (!try sema.maybeRemoveOutdatedType(ty)) return Air.internedToRef(ty); - break :wip (try ip.getEnumType(gpa, enum_init)).wip; + break :wip (try ip.getEnumType(gpa, pt.tid, enum_init)).wip; }, .wip => |wip| wip, }); @@ -3051,7 +3067,7 @@ fn zirEnumDecl( new_decl.owns_tv = true; errdefer if (!done) mod.abortAnonDecl(new_decl_index); - if (sema.mod.comp.debug_incremental) { + if (pt.zcu.comp.debug_incremental) { try mod.intern_pool.addDependency( gpa, AnalUnit.wrap(.{ .decl = new_decl_index }), @@ -3118,21 +3134,21 @@ fn zirEnumDecl( if (tag_type_ref != .none) { const ty = try sema.resolveType(&enum_block, tag_ty_src, tag_type_ref); if (ty.zigTypeTag(mod) != .Int and ty.zigTypeTag(mod) != .ComptimeInt) { - return sema.fail(&enum_block, tag_ty_src, "expected integer tag type, found '{}'", .{ty.fmt(sema.mod)}); + return sema.fail(&enum_block, tag_ty_src, "expected integer tag type, found '{}'", .{ty.fmt(pt)}); } break :ty ty; } else if (fields_len == 0) { - break :ty try mod.intType(.unsigned, 0); + break :ty try pt.intType(.unsigned, 0); } else { const bits = std.math.log2_int_ceil(usize, fields_len); - break :ty try mod.intType(.unsigned, bits); + break :ty try pt.intType(.unsigned, bits); } }; wip_ty.setTagTy(ip, int_tag_ty.toIntern()); if (small.nonexhaustive and int_tag_ty.toIntern() != .comptime_int_type) { - if (fields_len > 1 and std.math.log2_int(u64, fields_len) == int_tag_ty.bitSize(mod)) { + if (fields_len > 1 and std.math.log2_int(u64, fields_len) == int_tag_ty.bitSize(pt)) { return sema.fail(block, src, "non-exhaustive enum specifies every value", .{}); } } @@ -3171,7 +3187,7 @@ fn zirEnumDecl( .needed_comptime_reason = "enum tag value must be comptime-known", }); if (!(try sema.intFitsInType(last_tag_val.?, int_tag_ty, null))) break :overflow true; - last_tag_val = try mod.getCoerced(last_tag_val.?, int_tag_ty); + last_tag_val = try pt.getCoerced(last_tag_val.?, int_tag_ty); if (wip_ty.nextField(&mod.intern_pool, field_name, last_tag_val.?.toIntern())) |conflict| { assert(conflict.kind == .value); // AstGen validated names are unique const other_field_src: LazySrcLoc = .{ @@ -3179,7 +3195,7 @@ fn zirEnumDecl( .offset = .{ .container_field_value = conflict.prev_field_idx }, }; const msg = msg: { - const msg = try sema.errMsg(value_src, "enum tag value {} already taken", .{last_tag_val.?.fmtValue(sema.mod, sema)}); + const msg = try sema.errMsg(value_src, "enum tag value {} already taken", .{last_tag_val.?.fmtValue(pt, sema)}); errdefer msg.destroy(gpa); try sema.errNote(other_field_src, msg, "other occurrence here", .{}); break :msg msg; @@ -3190,9 +3206,9 @@ fn zirEnumDecl( } else if (any_values) overflow: { var overflow: ?usize = null; last_tag_val = if (last_tag_val) |val| - try sema.intAdd(val, try mod.intValue(int_tag_ty, 1), int_tag_ty, &overflow) + try sema.intAdd(val, try pt.intValue(int_tag_ty, 1), int_tag_ty, &overflow) else - try mod.intValue(int_tag_ty, 0); + try pt.intValue(int_tag_ty, 0); if (overflow != null) break :overflow true; if (wip_ty.nextField(&mod.intern_pool, field_name, last_tag_val.?.toIntern())) |conflict| { assert(conflict.kind == .value); // AstGen validated names are unique @@ -3201,7 +3217,7 @@ fn zirEnumDecl( .offset = .{ .container_field_value = conflict.prev_field_idx }, }; const msg = msg: { - const msg = try sema.errMsg(value_src, "enum tag value {} already taken", .{last_tag_val.?.fmtValue(sema.mod, sema)}); + const msg = try sema.errMsg(value_src, "enum tag value {} already taken", .{last_tag_val.?.fmtValue(pt, sema)}); errdefer msg.destroy(gpa); try sema.errNote(other_field_src, msg, "other occurrence here", .{}); break :msg msg; @@ -3211,21 +3227,21 @@ fn zirEnumDecl( break :overflow false; } else overflow: { assert(wip_ty.nextField(&mod.intern_pool, field_name, .none) == null); - last_tag_val = try mod.intValue(Type.comptime_int, field_i); + last_tag_val = try pt.intValue(Type.comptime_int, field_i); if (!try sema.intFitsInType(last_tag_val.?, int_tag_ty, null)) break :overflow true; - last_tag_val = try mod.getCoerced(last_tag_val.?, int_tag_ty); + last_tag_val = try pt.getCoerced(last_tag_val.?, int_tag_ty); break :overflow false; }; if (tag_overflow) { const msg = try sema.errMsg(value_src, "enumeration value '{}' too large for type '{}'", .{ - last_tag_val.?.fmtValue(mod, sema), int_tag_ty.fmt(mod), + last_tag_val.?.fmtValue(pt, sema), int_tag_ty.fmt(pt), }); return sema.failWithOwnedErrorMsg(block, msg); } } - try mod.finalizeAnonDecl(new_decl_index); + try pt.finalizeAnonDecl(new_decl_index); return Air.internedToRef(wip_ty.index); } @@ -3238,7 +3254,8 @@ fn zirUnionDecl( const tracy = trace(@src()); defer tracy.end(); - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const gpa = sema.gpa; const ip = &mod.intern_pool; const small: Zir.Inst.UnionDecl.Small = @bitCast(extended.small); @@ -3298,10 +3315,10 @@ fn zirUnionDecl( .captures = captures, } }, }; - const wip_ty = sema.wrapWipTy(switch (try ip.getUnionType(gpa, union_init)) { + const wip_ty = sema.wrapWipTy(switch (try ip.getUnionType(gpa, pt.tid, union_init)) { .existing => |ty| wip: { if (!try sema.maybeRemoveOutdatedType(ty)) return Air.internedToRef(ty); - break :wip (try ip.getUnionType(gpa, union_init)).wip; + break :wip (try ip.getUnionType(gpa, pt.tid, union_init)).wip; }, .wip => |wip| wip, }); @@ -3317,7 +3334,7 @@ fn zirUnionDecl( mod.declPtr(new_decl_index).owns_tv = true; errdefer mod.abortAnonDecl(new_decl_index); - if (sema.mod.comp.debug_incremental) { + if (pt.zcu.comp.debug_incremental) { try mod.intern_pool.addDependency( gpa, AnalUnit.wrap(.{ .decl = new_decl_index }), @@ -3338,7 +3355,7 @@ fn zirUnionDecl( try mod.scanNamespace(ns, decls, mod.declPtr(new_decl_index)); } - try mod.finalizeAnonDecl(new_decl_index); + try pt.finalizeAnonDecl(new_decl_index); try mod.comp.work_queue.writeItem(.{ .resolve_type_fully = wip_ty.index }); try sema.addReferenceEntry(src, AnalUnit.wrap(.{ .decl = new_decl_index })); return Air.internedToRef(wip_ty.finish(ip, new_decl_index, new_namespace_index)); @@ -3353,7 +3370,8 @@ fn zirOpaqueDecl( const tracy = trace(@src()); defer tracy.end(); - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const gpa = sema.gpa; const ip = &mod.intern_pool; @@ -3387,10 +3405,10 @@ fn zirOpaqueDecl( } }, }; // No `wrapWipTy` needed as no std.builtin types are opaque. - const wip_ty = switch (try ip.getOpaqueType(gpa, opaque_init)) { + const wip_ty = switch (try ip.getOpaqueType(gpa, pt.tid, opaque_init)) { .existing => |ty| wip: { if (!try sema.maybeRemoveOutdatedType(ty)) return Air.internedToRef(ty); - break :wip (try ip.getOpaqueType(gpa, opaque_init)).wip; + break :wip (try ip.getOpaqueType(gpa, pt.tid, opaque_init)).wip; }, .wip => |wip| wip, }; @@ -3406,7 +3424,7 @@ fn zirOpaqueDecl( mod.declPtr(new_decl_index).owns_tv = true; errdefer mod.abortAnonDecl(new_decl_index); - if (sema.mod.comp.debug_incremental) { + if (pt.zcu.comp.debug_incremental) { try ip.addDependency( gpa, AnalUnit.wrap(.{ .decl = new_decl_index }), @@ -3426,7 +3444,7 @@ fn zirOpaqueDecl( try mod.scanNamespace(ns, decls, mod.declPtr(new_decl_index)); } - try mod.finalizeAnonDecl(new_decl_index); + try pt.finalizeAnonDecl(new_decl_index); return Air.internedToRef(wip_ty.finish(ip, new_decl_index, new_namespace_index)); } @@ -3438,7 +3456,8 @@ fn zirErrorSetDecl( const tracy = trace(@src()); defer tracy.end(); - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const gpa = sema.gpa; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const extra = sema.code.extraData(Zir.Inst.ErrorSetDecl, inst_data.payload_index); @@ -3457,20 +3476,22 @@ fn zirErrorSetDecl( assert(!result.found_existing); // verified in AstGen } - return Air.internedToRef((try mod.errorSetFromUnsortedNames(names.keys())).toIntern()); + return Air.internedToRef((try pt.errorSetFromUnsortedNames(names.keys())).toIntern()); } fn zirRetPtr(sema: *Sema, block: *Block) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); + const pt = sema.pt; + if (block.is_comptime or try sema.typeRequiresComptime(sema.fn_ret_ty)) { - try sema.fn_ret_ty.resolveFields(sema.mod); + try sema.fn_ret_ty.resolveFields(pt); return sema.analyzeComptimeAlloc(block, sema.fn_ret_ty, .none); } - const target = sema.mod.getTarget(); - const ptr_type = try sema.mod.ptrTypeSema(.{ + const target = pt.zcu.getTarget(); + const ptr_type = try pt.ptrTypeSema(.{ .child = sema.fn_ret_ty.toIntern(), .flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) }, }); @@ -3511,7 +3532,8 @@ fn ensureResultUsed( ty: Type, src: LazySrcLoc, ) CompileError!void { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; switch (ty.zigTypeTag(mod)) { .Void, .NoReturn => return, .ErrorSet => return sema.fail(block, src, "error set is ignored", .{}), @@ -3526,7 +3548,7 @@ fn ensureResultUsed( }, else => { const msg = msg: { - const msg = try sema.errMsg(src, "value of type '{}' ignored", .{ty.fmt(sema.mod)}); + const msg = try sema.errMsg(src, "value of type '{}' ignored", .{ty.fmt(pt)}); errdefer msg.destroy(sema.gpa); try sema.errNote(src, msg, "all non-void values must be used", .{}); try sema.errNote(src, msg, "to discard the value, assign it to '_'", .{}); @@ -3541,7 +3563,8 @@ fn zirEnsureResultNonError(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com const tracy = trace(@src()); defer tracy.end(); - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node; const operand = try sema.resolveInst(inst_data.operand); const src = block.nodeOffset(inst_data.src_node); @@ -3565,7 +3588,8 @@ fn zirEnsureErrUnionPayloadVoid(sema: *Sema, block: *Block, inst: Zir.Inst.Index const tracy = trace(@src()); defer tracy.end(); - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node; const src = block.nodeOffset(inst_data.src_node); const operand = try sema.resolveInst(inst_data.operand); @@ -3604,7 +3628,8 @@ fn indexablePtrLen( src: LazySrcLoc, object: Air.Inst.Ref, ) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const object_ty = sema.typeOf(object); const is_pointer_to = object_ty.isSinglePointer(mod); const indexable_ty = if (is_pointer_to) object_ty.childType(mod) else object_ty; @@ -3619,7 +3644,8 @@ fn indexablePtrLenOrNone( src: LazySrcLoc, operand: Air.Inst.Ref, ) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const operand_ty = sema.typeOf(operand); try checkMemOperand(sema, block, src, operand_ty); if (operand_ty.ptrSize(mod) == .Many) return .none; @@ -3632,6 +3658,7 @@ fn zirAllocExtended( block: *Block, extended: Zir.Inst.Extended.InstData, ) CompileError!Air.Inst.Ref { + const pt = sema.pt; const gpa = sema.gpa; const extra = sema.code.extraData(Zir.Inst.AllocExtended, extended.operand); const ty_src = block.src(.{ .node_offset_var_decl_ty = extra.data.src_node }); @@ -3673,9 +3700,9 @@ fn zirAllocExtended( if (!small.is_const) { try sema.validateVarType(block, ty_src, var_ty, false); } - const target = sema.mod.getTarget(); - try var_ty.resolveLayout(sema.mod); - const ptr_type = try sema.mod.ptrTypeSema(.{ + const target = pt.zcu.getTarget(); + try var_ty.resolveLayout(pt); + const ptr_type = try sema.pt.ptrTypeSema(.{ .child = var_ty.toIntern(), .flags = .{ .alignment = alignment, @@ -3717,7 +3744,8 @@ fn zirAllocComptime(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErr } fn zirMakePtrConst(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node; const alloc = try sema.resolveInst(inst_data.operand); const alloc_ty = sema.typeOf(alloc); @@ -3749,7 +3777,7 @@ fn zirMakePtrConst(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro assert(ptr.byte_offset == 0); const alloc_index = ptr.base_addr.comptime_alloc; const ct_alloc = sema.getComptimeAlloc(alloc_index); - const interned = try ct_alloc.val.intern(mod, sema.arena); + const interned = try ct_alloc.val.intern(pt, sema.arena); if (interned.canMutateComptimeVarState(mod)) { // Preserve the comptime alloc, just make the pointer const. ct_alloc.val = .{ .interned = interned.toIntern() }; @@ -3757,7 +3785,7 @@ fn zirMakePtrConst(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro return sema.makePtrConst(block, alloc); } else { // Promote the constant to an anon decl. - const new_mut_ptr = Air.internedToRef(try mod.intern(.{ .ptr = .{ + const new_mut_ptr = Air.internedToRef(try pt.intern(.{ .ptr = .{ .ty = alloc_ty.toIntern(), .base_addr = .{ .anon_decl = .{ .val = interned.toIntern(), @@ -3778,7 +3806,7 @@ fn zirMakePtrConst(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro // The value was initialized through RLS, so we didn't detect the runtime condition earlier. // TODO: source location of runtime control flow const init_src = block.src(.{ .node_offset_bin_rhs = inst_data.src_node }); - return sema.fail(block, init_src, "value with comptime-only type '{}' depends on runtime control flow", .{elem_ty.fmt(mod)}); + return sema.fail(block, init_src, "value with comptime-only type '{}' depends on runtime control flow", .{elem_ty.fmt(pt)}); } // This is a runtime value. @@ -3788,7 +3816,8 @@ fn zirMakePtrConst(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro /// If `alloc` is an inferred allocation, `resolved_inferred_ty` is taken to be its resolved /// type. Otherwise, it may be `null`, and the type will be inferred from `alloc`. fn resolveComptimeKnownAllocPtr(sema: *Sema, block: *Block, alloc: Air.Inst.Ref, resolved_alloc_ty: ?Type) CompileError!?InternPool.Index { - const zcu = sema.mod; + const pt = sema.pt; + const zcu = pt.zcu; const alloc_ty = resolved_alloc_ty orelse sema.typeOf(alloc); const ptr_info = alloc_ty.ptrInfo(zcu); @@ -3831,7 +3860,7 @@ fn resolveComptimeKnownAllocPtr(sema: *Sema, block: *Block, alloc: Air.Inst.Ref, const ct_alloc = try sema.newComptimeAlloc(block, elem_ty, ptr_info.flags.alignment); - const alloc_ptr = try zcu.intern(.{ .ptr = .{ + const alloc_ptr = try pt.intern(.{ .ptr = .{ .ty = alloc_ty.toIntern(), .base_addr = .{ .comptime_alloc = ct_alloc }, .byte_offset = 0, @@ -3909,7 +3938,7 @@ fn resolveComptimeKnownAllocPtr(sema: *Sema, block: *Block, alloc: Air.Inst.Ref, const idx_val = (try sema.resolveValue(data.rhs)).?; break :blk .{ data.lhs, - .{ .elem = try idx_val.toUnsignedIntSema(zcu) }, + .{ .elem = try idx_val.toUnsignedIntSema(pt) }, }; }, .bitcast => .{ @@ -3935,32 +3964,32 @@ fn resolveComptimeKnownAllocPtr(sema: *Sema, block: *Block, alloc: Air.Inst.Ref, }; const new_ptr_ty = tmp_air.typeOfIndex(air_ptr, &zcu.intern_pool).toIntern(); const new_ptr = switch (method) { - .same_addr => try zcu.intern_pool.getCoerced(sema.gpa, decl_parent_ptr, new_ptr_ty), + .same_addr => try zcu.intern_pool.getCoerced(sema.gpa, pt.tid, decl_parent_ptr, new_ptr_ty), .opt_payload => ptr: { // Set the optional to non-null at comptime. // If the payload is OPV, we must use that value instead of undef. const opt_ty = Value.fromInterned(decl_parent_ptr).typeOf(zcu).childType(zcu); const payload_ty = opt_ty.optionalChild(zcu); - const payload_val = try sema.typeHasOnePossibleValue(payload_ty) orelse try zcu.undefValue(payload_ty); - const opt_val = try zcu.intern(.{ .opt = .{ + const payload_val = try sema.typeHasOnePossibleValue(payload_ty) orelse try pt.undefValue(payload_ty); + const opt_val = try pt.intern(.{ .opt = .{ .ty = opt_ty.toIntern(), .val = payload_val.toIntern(), } }); try sema.storePtrVal(block, LazySrcLoc.unneeded, Value.fromInterned(decl_parent_ptr), Value.fromInterned(opt_val), opt_ty); - break :ptr (try Value.fromInterned(decl_parent_ptr).ptrOptPayload(zcu)).toIntern(); + break :ptr (try Value.fromInterned(decl_parent_ptr).ptrOptPayload(pt)).toIntern(); }, .eu_payload => ptr: { // Set the error union to non-error at comptime. // If the payload is OPV, we must use that value instead of undef. const eu_ty = Value.fromInterned(decl_parent_ptr).typeOf(zcu).childType(zcu); const payload_ty = eu_ty.errorUnionPayload(zcu); - const payload_val = try sema.typeHasOnePossibleValue(payload_ty) orelse try zcu.undefValue(payload_ty); - const eu_val = try zcu.intern(.{ .error_union = .{ + const payload_val = try sema.typeHasOnePossibleValue(payload_ty) orelse try pt.undefValue(payload_ty); + const eu_val = try pt.intern(.{ .error_union = .{ .ty = eu_ty.toIntern(), .val = .{ .payload = payload_val.toIntern() }, } }); try sema.storePtrVal(block, LazySrcLoc.unneeded, Value.fromInterned(decl_parent_ptr), Value.fromInterned(eu_val), eu_ty); - break :ptr (try Value.fromInterned(decl_parent_ptr).ptrEuPayload(zcu)).toIntern(); + break :ptr (try Value.fromInterned(decl_parent_ptr).ptrEuPayload(pt)).toIntern(); }, .field => |idx| ptr: { const maybe_union_ty = Value.fromInterned(decl_parent_ptr).typeOf(zcu).childType(zcu); @@ -3969,14 +3998,14 @@ fn resolveComptimeKnownAllocPtr(sema: *Sema, block: *Block, alloc: Air.Inst.Ref, // If the payload is OPV, there will not be a payload store, so we store that value. // Otherwise, there will be a payload store to process later, so undef will suffice. const payload_ty = Type.fromInterned(union_obj.field_types.get(&zcu.intern_pool)[idx]); - const payload_val = try sema.typeHasOnePossibleValue(payload_ty) orelse try zcu.undefValue(payload_ty); - const tag_val = try zcu.enumValueFieldIndex(Type.fromInterned(union_obj.enum_tag_ty), idx); - const store_val = try zcu.unionValue(maybe_union_ty, tag_val, payload_val); + const payload_val = try sema.typeHasOnePossibleValue(payload_ty) orelse try pt.undefValue(payload_ty); + const tag_val = try pt.enumValueFieldIndex(Type.fromInterned(union_obj.enum_tag_ty), idx); + const store_val = try pt.unionValue(maybe_union_ty, tag_val, payload_val); try sema.storePtrVal(block, LazySrcLoc.unneeded, Value.fromInterned(decl_parent_ptr), store_val, maybe_union_ty); } - break :ptr (try Value.fromInterned(decl_parent_ptr).ptrField(idx, zcu)).toIntern(); + break :ptr (try Value.fromInterned(decl_parent_ptr).ptrField(idx, pt)).toIntern(); }, - .elem => |idx| (try Value.fromInterned(decl_parent_ptr).ptrElem(idx, zcu)).toIntern(), + .elem => |idx| (try Value.fromInterned(decl_parent_ptr).ptrElem(idx, pt)).toIntern(), }; try ptr_mapping.put(air_ptr, new_ptr); } @@ -4020,7 +4049,8 @@ fn finishResolveComptimeKnownAllocPtr( alloc_inst: Air.Inst.Index, comptime_info: MaybeComptimeAlloc, ) CompileError!?InternPool.Index { - const zcu = sema.mod; + const pt = sema.pt; + const zcu = pt.zcu; // We're almost done - we have the resolved comptime value. We just need to // eliminate the now-dead runtime instructions. @@ -4041,19 +4071,19 @@ fn finishResolveComptimeKnownAllocPtr( if (Value.fromInterned(result_val).canMutateComptimeVarState(zcu)) { const alloc_index = existing_comptime_alloc orelse a: { - const idx = try sema.newComptimeAlloc(block, alloc_ty.childType(zcu), alloc_ty.ptrAlignment(zcu)); + const idx = try sema.newComptimeAlloc(block, alloc_ty.childType(zcu), alloc_ty.ptrAlignment(pt)); const alloc = sema.getComptimeAlloc(idx); alloc.val = .{ .interned = result_val }; break :a idx; }; sema.getComptimeAlloc(alloc_index).is_const = true; - return try zcu.intern(.{ .ptr = .{ + return try pt.intern(.{ .ptr = .{ .ty = alloc_ty.toIntern(), .base_addr = .{ .comptime_alloc = alloc_index }, .byte_offset = 0, } }); } else { - return try zcu.intern(.{ .ptr = .{ + return try pt.intern(.{ .ptr = .{ .ty = alloc_ty.toIntern(), .base_addr = .{ .anon_decl = .{ .orig_ty = alloc_ty.toIntern(), @@ -4065,9 +4095,9 @@ fn finishResolveComptimeKnownAllocPtr( } fn makePtrTyConst(sema: *Sema, ptr_ty: Type) CompileError!Type { - var ptr_info = ptr_ty.ptrInfo(sema.mod); + var ptr_info = ptr_ty.ptrInfo(sema.pt.zcu); ptr_info.flags.is_const = true; - return sema.mod.ptrTypeSema(ptr_info); + return sema.pt.ptrTypeSema(ptr_info); } fn makePtrConst(sema: *Sema, block: *Block, alloc: Air.Inst.Ref) CompileError!Air.Inst.Ref { @@ -4076,7 +4106,7 @@ fn makePtrConst(sema: *Sema, block: *Block, alloc: Air.Inst.Ref) CompileError!Ai // Detect if a comptime value simply needs to have its type changed. if (try sema.resolveValue(alloc)) |val| { - return Air.internedToRef((try sema.mod.getCoerced(val, const_ptr_ty)).toIntern()); + return Air.internedToRef((try sema.pt.getCoerced(val, const_ptr_ty)).toIntern()); } return block.addBitCast(const_ptr_ty, alloc); @@ -4103,14 +4133,16 @@ fn zirAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.I const tracy = trace(@src()); defer tracy.end(); + const pt = sema.pt; + const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node; const ty_src = block.src(.{ .node_offset_var_decl_ty = inst_data.src_node }); const var_ty = try sema.resolveType(block, ty_src, inst_data.operand); if (block.is_comptime) { return sema.analyzeComptimeAlloc(block, var_ty, .none); } - const target = sema.mod.getTarget(); - const ptr_type = try sema.mod.ptrTypeSema(.{ + const target = pt.zcu.getTarget(); + const ptr_type = try pt.ptrTypeSema(.{ .child = var_ty.toIntern(), .flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) }, }); @@ -4125,6 +4157,8 @@ fn zirAllocMut(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const tracy = trace(@src()); defer tracy.end(); + const pt = sema.pt; + const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node; const ty_src = block.src(.{ .node_offset_var_decl_ty = inst_data.src_node }); const var_ty = try sema.resolveType(block, ty_src, inst_data.operand); @@ -4132,8 +4166,8 @@ fn zirAllocMut(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai return sema.analyzeComptimeAlloc(block, var_ty, .none); } try sema.validateVarType(block, ty_src, var_ty, false); - const target = sema.mod.getTarget(); - const ptr_type = try sema.mod.ptrTypeSema(.{ + const target = pt.zcu.getTarget(); + const ptr_type = try pt.ptrTypeSema(.{ .child = var_ty.toIntern(), .flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) }, }); @@ -4181,7 +4215,8 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com const tracy = trace(@src()); defer tracy.end(); - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const gpa = sema.gpa; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node; const src = block.nodeOffset(inst_data.src_node); @@ -4206,7 +4241,7 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com .anon_decl => |a| a.val, .comptime_alloc => |i| val: { const alloc = sema.getComptimeAlloc(i); - break :val (try alloc.val.intern(mod, sema.arena)).toIntern(); + break :val (try alloc.val.intern(pt, sema.arena)).toIntern(); }, else => unreachable, }; @@ -4232,7 +4267,7 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com } const final_elem_ty = try sema.resolvePeerTypes(block, ty_src, peer_vals, .none); - const final_ptr_ty = try mod.ptrTypeSema(.{ + const final_ptr_ty = try pt.ptrTypeSema(.{ .child = final_elem_ty.toIntern(), .flags = .{ .alignment = ia1.alignment, @@ -4244,7 +4279,7 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com try sema.validateVarType(block, ty_src, final_elem_ty, false); } else if (try sema.resolveComptimeKnownAllocPtr(block, ptr, final_ptr_ty)) |ptr_val| { const const_ptr_ty = try sema.makePtrTyConst(final_ptr_ty); - const new_const_ptr = try mod.getCoerced(Value.fromInterned(ptr_val), const_ptr_ty); + const new_const_ptr = try pt.getCoerced(Value.fromInterned(ptr_val), const_ptr_ty); // Remap the ZIR operand to the resolved pointer value sema.inst_map.putAssumeCapacity(inst_data.operand.toIndex().?, Air.internedToRef(new_const_ptr.toIntern())); @@ -4252,7 +4287,7 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com // Unless the block is comptime, `alloc_inferred` always produces // a runtime constant. The final inferred type needs to be // fully resolved so it can be lowered in codegen. - try final_elem_ty.resolveFully(mod); + try final_elem_ty.resolveFully(pt); return; } @@ -4261,7 +4296,7 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com // The alloc wasn't comptime-known per the above logic, so the // type cannot be comptime-only. // TODO: source location of runtime control flow - return sema.fail(block, src, "value with comptime-only type '{}' depends on runtime control flow", .{final_elem_ty.fmt(mod)}); + return sema.fail(block, src, "value with comptime-only type '{}' depends on runtime control flow", .{final_elem_ty.fmt(pt)}); } // Change it to a normal alloc. @@ -4318,7 +4353,8 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com } fn zirForLen(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const gpa = sema.gpa; const ip = &mod.intern_pool; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; @@ -4355,7 +4391,7 @@ fn zirForLen(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. if (!object_ty.isIndexable(mod)) { // Instead of using checkIndexable we customize this error. const msg = msg: { - const msg = try sema.errMsg(arg_src, "type '{}' is not indexable and not a range", .{object_ty.fmt(sema.mod)}); + const msg = try sema.errMsg(arg_src, "type '{}' is not indexable and not a range", .{object_ty.fmt(pt)}); errdefer msg.destroy(sema.gpa); try sema.errNote(arg_src, msg, "for loop operand must be a range, array, slice, tuple, or vector", .{}); @@ -4387,10 +4423,10 @@ fn zirForLen(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. .input_index = len_idx, } }); try sema.errNote(a_src, msg, "length {} here", .{ - v.fmtValue(sema.mod, sema), + v.fmtValue(pt, sema), }); try sema.errNote(arg_src, msg, "length {} here", .{ - arg_val.fmtValue(sema.mod, sema), + arg_val.fmtValue(pt, sema), }); break :msg msg; }; @@ -4427,7 +4463,7 @@ fn zirForLen(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. .input_index = i, } }); try sema.errNote(arg_src, msg, "type '{}' has no upper bound", .{ - object_ty.fmt(sema.mod), + object_ty.fmt(pt), }); } break :msg msg; @@ -4453,7 +4489,8 @@ fn zirForLen(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. /// Given a `*E!?T`, returns a (valid) `*T`. /// May invalidate already-stored payload data. fn optEuBasePtrInit(sema: *Sema, block: *Block, ptr: Air.Inst.Ref, src: LazySrcLoc) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; var base_ptr = ptr; while (true) switch (sema.typeOf(base_ptr).childType(mod).zigTypeTag(mod)) { .ErrorUnion => base_ptr = try sema.analyzeErrUnionPayloadPtr(block, src, base_ptr, false, true), @@ -4471,7 +4508,8 @@ fn zirOptEuBasePtrInit(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Compile } fn zirCoercePtrElemTy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const pl_node = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const src = block.nodeOffset(pl_node.src_node); const extra = sema.code.extraData(Zir.Inst.Bin, pl_node.payload_index).data; @@ -4503,10 +4541,10 @@ fn zirCoercePtrElemTy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE switch (val_ty.zigTypeTag(mod)) { .Array, .Vector => {}, else => if (!val_ty.isTuple(mod)) { - return sema.fail(block, src, "expected array of '{}', found '{}'", .{ elem_ty.fmt(mod), val_ty.fmt(mod) }); + return sema.fail(block, src, "expected array of '{}', found '{}'", .{ elem_ty.fmt(pt), val_ty.fmt(pt) }); }, } - const want_ty = try mod.arrayType(.{ + const want_ty = try pt.arrayType(.{ .len = val_ty.arrayLen(mod), .child = elem_ty.toIntern(), .sentinel = if (ptr_ty.sentinel(mod)) |s| s.toIntern() else .none, @@ -4522,7 +4560,8 @@ fn zirCoercePtrElemTy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE } fn zirValidateRefTy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const un_tok = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_tok; const src = block.tokenOffset(un_tok.src_tok); // In case of GenericPoison, we don't actually have a type, so this will be @@ -4538,7 +4577,7 @@ fn zirValidateRefTy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErr if (ty_operand.isGenericPoison()) return; if (ty_operand.optEuBaseType(mod).zigTypeTag(mod) != .Pointer) { return sema.failWithOwnedErrorMsg(block, msg: { - const msg = try sema.errMsg(src, "expected type '{}', found pointer", .{ty_operand.fmt(mod)}); + const msg = try sema.errMsg(src, "expected type '{}', found pointer", .{ty_operand.fmt(pt)}); errdefer msg.destroy(sema.gpa); try sema.errNote(src, msg, "address-of operator always returns a pointer", .{}); break :msg msg; @@ -4551,7 +4590,8 @@ fn zirValidateArrayInitRefTy( block: *Block, inst: Zir.Inst.Index, ) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const pl_node = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const src = block.nodeOffset(pl_node.src_node); const extra = sema.code.extraData(Zir.Inst.ArrayInitRefTy, pl_node.payload_index).data; @@ -4565,7 +4605,7 @@ fn zirValidateArrayInitRefTy( .ptr_type => |ptr_type| switch (ptr_type.flags.size) { .Slice, .Many => { // Use array of correct length - const arr_ty = try mod.arrayType(.{ + const arr_ty = try pt.arrayType(.{ .len = extra.elem_count, .child = ptr_ty.childType(mod).toIntern(), .sentinel = if (ptr_ty.sentinel(mod)) |s| s.toIntern() else .none, @@ -4593,7 +4633,8 @@ fn zirValidateArrayInitTy( inst: Zir.Inst.Index, is_result_ty: bool, ) CompileError!void { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const src = block.nodeOffset(inst_data.src_node); const ty_src: LazySrcLoc = if (is_result_ty) src else block.src(.{ .node_offset_init_ty = inst_data.src_node }); @@ -4615,7 +4656,8 @@ fn validateArrayInitTy( init_count: u32, ty: Type, ) CompileError!void { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; switch (ty.zigTypeTag(mod)) { .Array => { const array_len = ty.arrayLen(mod); @@ -4636,7 +4678,7 @@ fn validateArrayInitTy( return; }, .Struct => if (ty.isTuple(mod)) { - try ty.resolveFields(mod); + try ty.resolveFields(pt); const array_len = ty.arrayLen(mod); if (init_count > array_len) { return sema.fail(block, src, "expected at most {d} tuple fields; found {d}", .{ @@ -4656,7 +4698,8 @@ fn zirValidateStructInitTy( inst: Zir.Inst.Index, is_result_ty: bool, ) CompileError!void { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node; const src = block.nodeOffset(inst_data.src_node); const ty = sema.resolveType(block, src, inst_data.operand) catch |err| switch (err) { @@ -4681,7 +4724,8 @@ fn zirValidatePtrStructInit( const tracy = trace(@src()); defer tracy.end(); - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const validate_inst = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const init_src = block.nodeOffset(validate_inst.src_node); const validate_extra = sema.code.extraData(Zir.Inst.Block, validate_inst.payload_index); @@ -4716,7 +4760,8 @@ fn validateUnionInit( instrs: []const Zir.Inst.Index, union_ptr: Air.Inst.Ref, ) CompileError!void { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const gpa = sema.gpa; if (instrs.len != 1) { @@ -4814,7 +4859,7 @@ fn validateUnionInit( } const tag_ty = union_ty.unionTagTypeHypothetical(mod); - const tag_val = try mod.enumValueFieldIndex(tag_ty, field_index); + const tag_val = try pt.enumValueFieldIndex(tag_ty, field_index); const field_type = union_ty.unionFieldType(tag_val, mod).?; if (try sema.typeHasOnePossibleValue(field_type)) |field_only_value| { @@ -4848,7 +4893,7 @@ fn validateUnionInit( } block.instructions.shrinkRetainingCapacity(block_index); - const union_val = try mod.intern(.{ .un = .{ + const union_val = try pt.intern(.{ .un = .{ .ty = union_ty.toIntern(), .tag = tag_val.toIntern(), .val = val.toIntern(), @@ -4875,7 +4920,8 @@ fn validateStructInit( init_src: LazySrcLoc, instrs: []const Zir.Inst.Index, ) CompileError!void { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const gpa = sema.gpa; const ip = &mod.intern_pool; @@ -4914,7 +4960,7 @@ fn validateStructInit( if (block.is_comptime and (try sema.resolveDefinedValue(block, init_src, struct_ptr)) != null) { - try struct_ty.resolveLayout(mod); + try struct_ty.resolveLayout(pt); // In this case the only thing we need to do is evaluate the implicit // store instructions for default field values, and report any missing fields. // Avoid the cost of the extra machinery for detecting a comptime struct init value. @@ -4922,7 +4968,7 @@ fn validateStructInit( const i: u32 = @intCast(i_usize); if (field_ptr != .none) continue; - try struct_ty.resolveStructFieldInits(mod); + try struct_ty.resolveStructFieldInits(pt); const default_val = struct_ty.structFieldDefaultValue(i, mod); if (default_val.toIntern() == .unreachable_value) { const field_name = struct_ty.structFieldName(i, mod).unwrap() orelse { @@ -4971,7 +5017,7 @@ fn validateStructInit( const air_tags = sema.air_instructions.items(.tag); const air_datas = sema.air_instructions.items(.data); - try struct_ty.resolveStructFieldInits(mod); + try struct_ty.resolveStructFieldInits(pt); // We collect the comptime field values in case the struct initialization // ends up being comptime-known. @@ -5094,7 +5140,7 @@ fn validateStructInit( for (block.instructions.items[first_block_index..]) |cur_inst| { while (field_ptr_ref == .none and init_index < instrs.len) : (init_index += 1) { const field_ty = struct_ty.structFieldType(field_indices[init_index], mod); - if (try field_ty.onePossibleValue(mod)) |_| continue; + if (try field_ty.onePossibleValue(pt)) |_| continue; field_ptr_ref = sema.inst_map.get(instrs[init_index]).?; } switch (air_tags[@intFromEnum(cur_inst)]) { @@ -5122,7 +5168,7 @@ fn validateStructInit( } block.instructions.shrinkRetainingCapacity(block_index); - const struct_val = try mod.intern(.{ .aggregate = .{ + const struct_val = try pt.intern(.{ .aggregate = .{ .ty = struct_ty.toIntern(), .storage = .{ .elems = field_values }, } }); @@ -5130,7 +5176,7 @@ fn validateStructInit( try sema.storePtr2(block, init_src, struct_ptr, init_src, struct_init, init_src, .store); return; } - try struct_ty.resolveLayout(mod); + try struct_ty.resolveLayout(pt); // Our task is to insert `store` instructions for all the default field values. for (found_fields, 0..) |field_ptr, i| { @@ -5152,7 +5198,8 @@ fn zirValidatePtrArrayInit( block: *Block, inst: Zir.Inst.Index, ) CompileError!void { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const validate_inst = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const init_src = block.nodeOffset(validate_inst.src_node); const validate_extra = sema.code.extraData(Zir.Inst.Block, validate_inst.payload_index); @@ -5175,7 +5222,7 @@ fn zirValidatePtrArrayInit( var root_msg: ?*Module.ErrorMsg = null; errdefer if (root_msg) |msg| msg.destroy(sema.gpa); - try array_ty.resolveStructFieldInits(mod); + try array_ty.resolveStructFieldInits(pt); var i = instrs.len; while (i < array_len) : (i += 1) { const default_val = array_ty.structFieldDefaultValue(i, mod).toIntern(); @@ -5218,7 +5265,7 @@ fn zirValidatePtrArrayInit( // sentinel-terminated array, the sentinel will not have been populated by // any ZIR instructions at comptime; we need to do that here. if (array_ty.sentinel(mod)) |sentinel_val| { - const array_len_ref = try mod.intRef(Type.usize, array_len); + const array_len_ref = try pt.intRef(Type.usize, array_len); const sentinel_ptr = try sema.elemPtrArray(block, init_src, init_src, array_ptr, init_src, array_len_ref, true, true); const sentinel = Air.internedToRef(sentinel_val.toIntern()); try sema.storePtr2(block, init_src, sentinel_ptr, init_src, sentinel, init_src, .store); @@ -5244,8 +5291,8 @@ fn zirValidatePtrArrayInit( if (array_ty.isTuple(mod)) { if (array_ty.structFieldIsComptime(i, mod)) - try array_ty.resolveStructFieldInits(mod); - if (try array_ty.structFieldValueComptime(mod, i)) |opv| { + try array_ty.resolveStructFieldInits(pt); + if (try array_ty.structFieldValueComptime(pt, i)) |opv| { element_vals[i] = opv.toIntern(); continue; } @@ -5347,7 +5394,7 @@ fn zirValidatePtrArrayInit( } block.instructions.shrinkRetainingCapacity(block_index); - const array_val = try mod.intern(.{ .aggregate = .{ + const array_val = try pt.intern(.{ .aggregate = .{ .ty = array_ty.toIntern(), .storage = .{ .elems = element_vals }, } }); @@ -5357,18 +5404,19 @@ fn zirValidatePtrArrayInit( } fn zirValidateDeref(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node; const src = block.nodeOffset(inst_data.src_node); const operand = try sema.resolveInst(inst_data.operand); const operand_ty = sema.typeOf(operand); if (operand_ty.zigTypeTag(mod) != .Pointer) { - return sema.fail(block, src, "cannot dereference non-pointer type '{}'", .{operand_ty.fmt(mod)}); + return sema.fail(block, src, "cannot dereference non-pointer type '{}'", .{operand_ty.fmt(pt)}); } else switch (operand_ty.ptrSize(mod)) { .One, .C => {}, - .Many => return sema.fail(block, src, "index syntax required for unknown-length pointer type '{}'", .{operand_ty.fmt(mod)}), - .Slice => return sema.fail(block, src, "index syntax required for slice type '{}'", .{operand_ty.fmt(mod)}), + .Many => return sema.fail(block, src, "index syntax required for unknown-length pointer type '{}'", .{operand_ty.fmt(pt)}), + .Slice => return sema.fail(block, src, "index syntax required for slice type '{}'", .{operand_ty.fmt(pt)}), } if ((try sema.typeHasOnePossibleValue(operand_ty.childType(mod))) != null) { @@ -5386,7 +5434,7 @@ fn zirValidateDeref(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErr const msg = try sema.errMsg( src, "values of type '{}' must be comptime-known, but operand value is runtime-known", - .{elem_ty.fmt(mod)}, + .{elem_ty.fmt(pt)}, ); errdefer msg.destroy(sema.gpa); @@ -5398,7 +5446,8 @@ fn zirValidateDeref(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErr } fn zirValidateDestructure(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const extra = sema.code.extraData(Zir.Inst.ValidateDestructure, inst_data.payload_index).data; const src = block.nodeOffset(inst_data.src_node); @@ -5414,7 +5463,7 @@ fn zirValidateDestructure(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Comp if (!can_destructure) { return sema.failWithOwnedErrorMsg(block, msg: { - const msg = try sema.errMsg(src, "type '{}' cannot be destructured", .{operand_ty.fmt(mod)}); + const msg = try sema.errMsg(src, "type '{}' cannot be destructured", .{operand_ty.fmt(pt)}); errdefer msg.destroy(sema.gpa); try sema.errNote(destructure_src, msg, "result destructured here", .{}); break :msg msg; @@ -5441,7 +5490,8 @@ fn failWithBadMemberAccess( field_src: LazySrcLoc, field_name: InternPool.NullTerminatedString, ) CompileError { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const kw_name = switch (agg_ty.zigTypeTag(mod)) { .Union => "union", .Struct => "struct", @@ -5451,12 +5501,12 @@ fn failWithBadMemberAccess( }; if (agg_ty.getOwnerDeclOrNull(mod)) |some| if (mod.declIsRoot(some)) { return sema.fail(block, field_src, "root struct of file '{}' has no member named '{}'", .{ - agg_ty.fmt(mod), field_name.fmt(&mod.intern_pool), + agg_ty.fmt(pt), field_name.fmt(&mod.intern_pool), }); }; return sema.fail(block, field_src, "{s} '{}' has no member named '{}'", .{ - kw_name, agg_ty.fmt(mod), field_name.fmt(&mod.intern_pool), + kw_name, agg_ty.fmt(pt), field_name.fmt(&mod.intern_pool), }); } @@ -5468,8 +5518,8 @@ fn failWithBadStructFieldAccess( field_src: LazySrcLoc, field_name: InternPool.NullTerminatedString, ) CompileError { - const zcu = sema.mod; - const gpa = sema.gpa; + const zcu = sema.pt.zcu; + const ip = &zcu.intern_pool; const decl = zcu.declPtr(struct_type.decl.unwrap().?); const fqn = try decl.fullyQualifiedName(zcu); @@ -5477,9 +5527,9 @@ fn failWithBadStructFieldAccess( const msg = try sema.errMsg( field_src, "no field named '{}' in struct '{}'", - .{ field_name.fmt(&zcu.intern_pool), fqn.fmt(&zcu.intern_pool) }, + .{ field_name.fmt(ip), fqn.fmt(ip) }, ); - errdefer msg.destroy(gpa); + errdefer msg.destroy(sema.gpa); try sema.errNote(struct_ty.srcLoc(zcu), msg, "struct declared here", .{}); break :msg msg; }; @@ -5494,7 +5544,8 @@ fn failWithBadUnionFieldAccess( field_src: LazySrcLoc, field_name: InternPool.NullTerminatedString, ) CompileError { - const zcu = sema.mod; + const zcu = sema.pt.zcu; + const ip = &zcu.intern_pool; const gpa = sema.gpa; const decl = zcu.declPtr(union_obj.decl); @@ -5504,7 +5555,7 @@ fn failWithBadUnionFieldAccess( const msg = try sema.errMsg( field_src, "no field named '{}' in union '{}'", - .{ field_name.fmt(&zcu.intern_pool), fqn.fmt(&zcu.intern_pool) }, + .{ field_name.fmt(ip), fqn.fmt(ip) }, ); errdefer msg.destroy(gpa); try sema.errNote(union_ty.srcLoc(zcu), msg, "union declared here", .{}); @@ -5514,9 +5565,9 @@ fn failWithBadUnionFieldAccess( } fn addDeclaredHereNote(sema: *Sema, parent: *Module.ErrorMsg, decl_ty: Type) !void { - const mod = sema.mod; - const src_loc = decl_ty.srcLocOrNull(mod) orelse return; - const category = switch (decl_ty.zigTypeTag(mod)) { + const zcu = sema.pt.zcu; + const src_loc = decl_ty.srcLocOrNull(zcu) orelse return; + const category = switch (decl_ty.zigTypeTag(zcu)) { .Union => "union", .Struct => "struct", .Enum => "enum", @@ -5575,7 +5626,8 @@ fn storeToInferredAllocComptime( operand: Air.Inst.Ref, iac: *Air.Inst.Data.InferredAllocComptime, ) CompileError!void { - const zcu = sema.mod; + const pt = sema.pt; + const zcu = pt.zcu; const operand_ty = sema.typeOf(operand); // There will be only one store_to_inferred_ptr because we are running at comptime. // The alloc will turn into a Decl or a ComptimeAlloc. @@ -5584,7 +5636,7 @@ fn storeToInferredAllocComptime( .needed_comptime_reason = "value being stored to a comptime variable must be comptime-known", }); }; - const alloc_ty = try zcu.ptrTypeSema(.{ + const alloc_ty = try pt.ptrTypeSema(.{ .child = operand_ty.toIntern(), .flags = .{ .alignment = iac.alignment, @@ -5592,7 +5644,7 @@ fn storeToInferredAllocComptime( }, }); if (iac.is_const and !operand_val.canMutateComptimeVarState(zcu)) { - iac.ptr = try zcu.intern(.{ .ptr = .{ + iac.ptr = try pt.intern(.{ .ptr = .{ .ty = alloc_ty.toIntern(), .base_addr = .{ .anon_decl = .{ .val = operand_val.toIntern(), @@ -5603,7 +5655,7 @@ fn storeToInferredAllocComptime( } else { const alloc_index = try sema.newComptimeAlloc(block, operand_ty, iac.alignment); sema.getComptimeAlloc(alloc_index).val = .{ .interned = operand_val.toIntern() }; - iac.ptr = try zcu.intern(.{ .ptr = .{ + iac.ptr = try pt.intern(.{ .ptr = .{ .ty = alloc_ty.toIntern(), .base_addr = .{ .comptime_alloc = alloc_index }, .byte_offset = 0, @@ -5624,7 +5676,8 @@ fn zirStoreNode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!v const tracy = trace(@src()); defer tracy.end(); - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const zir_tags = sema.code.instructions.items(.tag); const zir_datas = sema.code.instructions.items(.data); const inst_data = zir_datas[@intFromEnum(inst)].pl_node; @@ -5662,23 +5715,23 @@ fn zirStoreNode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!v fn zirStr(sema: *Sema, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const bytes = sema.code.instructions.items(.data)[@intFromEnum(inst)].str.get(sema.code); return sema.addStrLit( - try sema.mod.intern_pool.getOrPutString(sema.gpa, bytes, .maybe_embedded_nulls), + try sema.pt.zcu.intern_pool.getOrPutString(sema.gpa, bytes, .maybe_embedded_nulls), bytes.len, ); } fn addNullTerminatedStrLit(sema: *Sema, string: InternPool.NullTerminatedString) CompileError!Air.Inst.Ref { - return sema.addStrLit(string.toString(), string.length(&sema.mod.intern_pool)); + return sema.addStrLit(string.toString(), string.length(&sema.pt.zcu.intern_pool)); } fn addStrLit(sema: *Sema, string: InternPool.String, len: u64) CompileError!Air.Inst.Ref { - const mod = sema.mod; - const array_ty = try mod.arrayType(.{ + const pt = sema.pt; + const array_ty = try pt.arrayType(.{ .len = len, .sentinel = .zero_u8, .child = .u8_type, }); - const val = try mod.intern(.{ .aggregate = .{ + const val = try pt.intern(.{ .aggregate = .{ .ty = array_ty.toIntern(), .storage = .{ .bytes = string }, } }); @@ -5690,16 +5743,16 @@ fn anonDeclRef(sema: *Sema, val: InternPool.Index) CompileError!Air.Inst.Ref { } fn refValue(sema: *Sema, val: InternPool.Index) CompileError!InternPool.Index { - const mod = sema.mod; - const ptr_ty = (try mod.ptrTypeSema(.{ - .child = mod.intern_pool.typeOf(val), + const pt = sema.pt; + const ptr_ty = (try pt.ptrTypeSema(.{ + .child = pt.zcu.intern_pool.typeOf(val), .flags = .{ .alignment = .none, .is_const = true, .address_space = .generic, }, })).toIntern(); - return mod.intern(.{ .ptr = .{ + return pt.intern(.{ .ptr = .{ .ty = ptr_ty, .base_addr = .{ .anon_decl = .{ .val = val, @@ -5715,7 +5768,7 @@ fn zirInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins defer tracy.end(); const int = sema.code.instructions.items(.data)[@intFromEnum(inst)].int; - return sema.mod.intRef(Type.comptime_int, int); + return sema.pt.intRef(Type.comptime_int, int); } fn zirIntBig(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { @@ -5723,7 +5776,6 @@ fn zirIntBig(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. const tracy = trace(@src()); defer tracy.end(); - const mod = sema.mod; const int = sema.code.instructions.items(.data)[@intFromEnum(inst)].str; const byte_count = int.len * @sizeOf(std.math.big.Limb); const limb_bytes = sema.code.string_bytes[@intFromEnum(int.start)..][0..byte_count]; @@ -5734,7 +5786,7 @@ fn zirIntBig(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. const limbs = try sema.arena.alloc(std.math.big.Limb, int.len); @memcpy(mem.sliceAsBytes(limbs), limb_bytes); - return Air.internedToRef((try mod.intValue_big(Type.comptime_int, .{ + return Air.internedToRef((try sema.pt.intValue_big(Type.comptime_int, .{ .limbs = limbs, .positive = true, })).toIntern()); @@ -5743,7 +5795,7 @@ fn zirIntBig(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. fn zirFloat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { _ = block; const number = sema.code.instructions.items(.data)[@intFromEnum(inst)].float; - return Air.internedToRef((try sema.mod.floatValue( + return Air.internedToRef((try sema.pt.floatValue( Type.comptime_float, number, )).toIntern()); @@ -5754,7 +5806,7 @@ fn zirFloat128(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const extra = sema.code.extraData(Zir.Inst.Float128, inst_data.payload_index).data; const number = extra.get(); - return Air.internedToRef((try sema.mod.floatValue(Type.comptime_float, number)).toIntern()); + return Air.internedToRef((try sema.pt.floatValue(Type.comptime_float, number)).toIntern()); } fn zirCompileError(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { @@ -5775,10 +5827,11 @@ fn zirCompileLog( block: *Block, extended: Zir.Inst.Extended.InstData, ) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; var managed = mod.compile_log_text.toManaged(sema.gpa); - defer sema.mod.compile_log_text = managed.moveToUnmanaged(); + defer pt.zcu.compile_log_text = managed.moveToUnmanaged(); const writer = managed.writer(); const extra = sema.code.extraData(Zir.Inst.NodeMultiOp, extended.operand); @@ -5792,10 +5845,10 @@ fn zirCompileLog( const arg_ty = sema.typeOf(arg); if (try sema.resolveValueResolveLazy(arg)) |val| { try writer.print("@as({}, {})", .{ - arg_ty.fmt(mod), val.fmtValue(mod, sema), + arg_ty.fmt(pt), val.fmtValue(pt, sema), }); } else { - try writer.print("@as({}, [runtime value])", .{arg_ty.fmt(mod)}); + try writer.print("@as({}, [runtime value])", .{arg_ty.fmt(pt)}); } } try writer.print("\n", .{}); @@ -5835,7 +5888,8 @@ fn zirLoop(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileError const tracy = trace(@src()); defer tracy.end(); - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const src = parent_block.nodeOffset(inst_data.src_node); const extra = sema.code.extraData(Zir.Inst.Block, inst_data.payload_index); @@ -5906,7 +5960,8 @@ fn zirCImport(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileEr const tracy = trace(@src()); defer tracy.end(); - const zcu = sema.mod; + const pt = sema.pt; + const zcu = pt.zcu; const comp = zcu.comp; const gpa = sema.gpa; const pl_node = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; @@ -6005,7 +6060,7 @@ fn zirCImport(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileEr zcu.astGenFile(result.file, result.file_index, path_digest, root_decl) catch |err| return sema.fail(&child_block, src, "C import failed: {s}", .{@errorName(err)}); - try zcu.ensureFileAnalyzed(result.file_index); + try pt.ensureFileAnalyzed(result.file_index); const file_root_decl_index = zcu.fileRootDecl(result.file_index).unwrap().?; return sema.analyzeDeclVal(parent_block, src, file_root_decl_index); } @@ -6147,7 +6202,8 @@ fn resolveAnalyzedBlock( defer tracy.end(); const gpa = sema.gpa; - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; // Blocks must terminate with noreturn instruction. assert(child_block.instructions.items.len != 0); @@ -6258,7 +6314,7 @@ fn resolveAnalyzedBlock( const type_src = src; // TODO: better source location if (try sema.typeRequiresComptime(resolved_ty)) { const msg = msg: { - const msg = try sema.errMsg(type_src, "value with comptime-only type '{}' depends on runtime control flow", .{resolved_ty.fmt(mod)}); + const msg = try sema.errMsg(type_src, "value with comptime-only type '{}' depends on runtime control flow", .{resolved_ty.fmt(pt)}); errdefer msg.destroy(sema.gpa); const runtime_src = child_block.runtime_cond orelse child_block.runtime_loop.?; @@ -6353,7 +6409,8 @@ fn zirExport(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void const tracy = trace(@src()); defer tracy.end(); - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const extra = sema.code.extraData(Zir.Inst.Export, inst_data.payload_index).data; const src = block.nodeOffset(inst_data.src_node); @@ -6388,7 +6445,8 @@ fn zirExportValue(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const tracy = trace(@src()); defer tracy.end(); - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const extra = sema.code.extraData(Zir.Inst.ExportValue, inst_data.payload_index).data; const src = block.nodeOffset(inst_data.src_node); @@ -6421,7 +6479,8 @@ pub fn analyzeExport( exported_decl_index: InternPool.DeclIndex, ) !void { const gpa = sema.gpa; - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; if (options.linkage == .internal) return; @@ -6433,7 +6492,7 @@ pub fn analyzeExport( if (!try sema.validateExternType(export_ty, .other)) { const msg = msg: { - const msg = try sema.errMsg(src, "unable to export type '{}'", .{export_ty.fmt(mod)}); + const msg = try sema.errMsg(src, "unable to export type '{}'", .{export_ty.fmt(pt)}); errdefer msg.destroy(gpa); try sema.explainWhyTypeIsNotExtern(msg, src, export_ty, .other); @@ -6460,7 +6519,8 @@ pub fn analyzeExport( } fn zirSetAlignStack(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!void { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data; const operand_src = block.builtinCallArgSrc(extra.node, 0); const src = block.nodeOffset(extra.node); @@ -6502,7 +6562,8 @@ fn zirSetAlignStack(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Inst } fn zirSetCold(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!void { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const ip = &mod.intern_pool; const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data; const operand_src = block.builtinCallArgSrc(extra.node, 0); @@ -6628,7 +6689,8 @@ fn addDbgVar( ) CompileError!void { if (block.is_comptime or block.ownerModule().strip) return; - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const operand_ty = sema.typeOf(operand); const val_ty = switch (air_tag) { .dbg_var_ptr => operand_ty.childType(mod), @@ -6669,7 +6731,8 @@ fn addDbgVar( } fn zirDeclRef(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].str_tok; const src = block.tokenOffset(inst_data.src_tok); const decl_name = try mod.intern_pool.getOrPutString( @@ -6682,7 +6745,8 @@ fn zirDeclRef(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air } fn zirDeclVal(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].str_tok; const src = block.tokenOffset(inst_data.src_tok); const decl_name = try mod.intern_pool.getOrPutString( @@ -6695,7 +6759,8 @@ fn zirDeclVal(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air } fn lookupIdentifier(sema: *Sema, block: *Block, src: LazySrcLoc, name: InternPool.NullTerminatedString) !InternPool.DeclIndex { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; var namespace = block.namespace; while (true) { if (try sema.lookupInNamespace(block, src, namespace.toOptional(), name, false)) |decl_index| { @@ -6716,7 +6781,8 @@ fn lookupInNamespace( ident_name: InternPool.NullTerminatedString, observe_usingnamespace: bool, ) CompileError!?InternPool.DeclIndex { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const namespace_index = opt_namespace_index.unwrap() orelse return null; const namespace = mod.namespacePtr(namespace_index); @@ -6811,7 +6877,8 @@ fn lookupInNamespace( } fn funcDeclSrc(sema: *Sema, func_inst: Air.Inst.Ref) !?*Decl { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const func_val = (try sema.resolveValue(func_inst)) orelse return null; if (func_val.isUndef(mod)) return null; const owner_decl_index = switch (mod.intern_pool.indexToKey(func_val.toIntern())) { @@ -6827,18 +6894,19 @@ fn funcDeclSrc(sema: *Sema, func_inst: Air.Inst.Ref) !?*Decl { } pub fn analyzeSaveErrRetIndex(sema: *Sema, block: *Block) SemaError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const gpa = sema.gpa; if (block.is_comptime or block.is_typeof) { - const index_val = try mod.intValue_u64(Type.usize, sema.comptime_err_ret_trace.items.len); + const index_val = try pt.intValue_u64(Type.usize, sema.comptime_err_ret_trace.items.len); return Air.internedToRef(index_val.toIntern()); } if (!block.ownerModule().error_tracing) return .none; - const stack_trace_ty = try mod.getBuiltinType("StackTrace"); - try stack_trace_ty.resolveFields(mod); + const stack_trace_ty = try pt.getBuiltinType("StackTrace"); + try stack_trace_ty.resolveFields(pt); const field_name = try mod.intern_pool.getOrPutString(gpa, "index", .no_embedded_nulls); const field_index = sema.structFieldIndex(block, stack_trace_ty, field_name, LazySrcLoc.unneeded) catch |err| switch (err) { error.AnalysisFail => @panic("std.builtin.StackTrace is corrupt"), @@ -6864,7 +6932,8 @@ fn popErrorReturnTrace( operand: Air.Inst.Ref, saved_error_trace_index: Air.Inst.Ref, ) CompileError!void { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const gpa = sema.gpa; var is_non_error: ?bool = null; var is_non_error_inst: Air.Inst.Ref = undefined; @@ -6878,9 +6947,9 @@ fn popErrorReturnTrace( // AstGen determined this result does not go to an error-handling expr (try/catch/return etc.), or // the result is comptime-known to be a non-error. Either way, pop unconditionally. - const stack_trace_ty = try mod.getBuiltinType("StackTrace"); - try stack_trace_ty.resolveFields(mod); - const ptr_stack_trace_ty = try mod.singleMutPtrType(stack_trace_ty); + const stack_trace_ty = try pt.getBuiltinType("StackTrace"); + try stack_trace_ty.resolveFields(pt); + const ptr_stack_trace_ty = try pt.singleMutPtrType(stack_trace_ty); const err_return_trace = try block.addTy(.err_return_trace, ptr_stack_trace_ty); const field_name = try mod.intern_pool.getOrPutString(gpa, "index", .no_embedded_nulls); const field_ptr = try sema.structFieldPtr(block, src, err_return_trace, field_name, src, stack_trace_ty, true); @@ -6904,9 +6973,9 @@ fn popErrorReturnTrace( defer then_block.instructions.deinit(gpa); // If non-error, then pop the error return trace by restoring the index. - const stack_trace_ty = try mod.getBuiltinType("StackTrace"); - try stack_trace_ty.resolveFields(mod); - const ptr_stack_trace_ty = try mod.singleMutPtrType(stack_trace_ty); + const stack_trace_ty = try pt.getBuiltinType("StackTrace"); + try stack_trace_ty.resolveFields(pt); + const ptr_stack_trace_ty = try pt.singleMutPtrType(stack_trace_ty); const err_return_trace = try then_block.addTy(.err_return_trace, ptr_stack_trace_ty); const field_name = try mod.intern_pool.getOrPutString(gpa, "index", .no_embedded_nulls); const field_ptr = try sema.structFieldPtr(&then_block, src, err_return_trace, field_name, src, stack_trace_ty, true); @@ -6947,7 +7016,8 @@ fn zirCall( const tracy = trace(@src()); defer tracy.end(); - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const callee_src = block.src(.{ .node_offset_call_func = inst_data.src_node }); const call_src = block.nodeOffset(inst_data.src_node); @@ -7031,8 +7101,8 @@ fn zirCall( // If any input is an error-type, we might need to pop any trace it generated. Otherwise, we only // need to clean-up our own trace if we were passed to a non-error-handling expression. if (input_is_error or (pop_error_return_trace and return_ty.isError(mod))) { - const stack_trace_ty = try mod.getBuiltinType("StackTrace"); - try stack_trace_ty.resolveFields(mod); + const stack_trace_ty = try pt.getBuiltinType("StackTrace"); + try stack_trace_ty.resolveFields(pt); const field_name = try mod.intern_pool.getOrPutString(sema.gpa, "index", .no_embedded_nulls); const field_index = try sema.structFieldIndex(block, stack_trace_ty, field_name, call_src); @@ -7065,7 +7135,8 @@ fn checkCallArgumentCount( total_args: usize, member_fn: bool, ) !Type { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const func_ty = func_ty: { switch (callee_ty.zigTypeTag(mod)) { .Fn => break :func_ty callee_ty, @@ -7082,7 +7153,7 @@ fn checkCallArgumentCount( { const msg = msg: { const msg = try sema.errMsg(func_src, "cannot call optional type '{}'", .{ - callee_ty.fmt(mod), + callee_ty.fmt(pt), }); errdefer msg.destroy(sema.gpa); try sema.errNote(func_src, msg, "consider using '.?', 'orelse' or 'if'", .{}); @@ -7093,7 +7164,7 @@ fn checkCallArgumentCount( }, else => {}, } - return sema.fail(block, func_src, "type '{}' not a function", .{callee_ty.fmt(mod)}); + return sema.fail(block, func_src, "type '{}' not a function", .{callee_ty.fmt(pt)}); }; const func_ty_info = mod.typeToFunc(func_ty).?; @@ -7142,7 +7213,8 @@ fn callBuiltin( args: []const Air.Inst.Ref, operation: CallOperation, ) !void { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const callee_ty = sema.typeOf(builtin_fn); const func_ty = func_ty: { switch (callee_ty.zigTypeTag(mod)) { @@ -7155,7 +7227,7 @@ fn callBuiltin( }, else => {}, } - std.debug.panic("type '{}' is not a function calling builtin fn", .{callee_ty.fmt(mod)}); + std.debug.panic("type '{}' is not a function calling builtin fn", .{callee_ty.fmt(pt)}); }; const func_ty_info = mod.typeToFunc(func_ty).?; @@ -7261,7 +7333,8 @@ const CallArgsInfo = union(enum) { func_ty_info: InternPool.Key.FuncType, func_inst: Air.Inst.Ref, ) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const param_count = func_ty_info.param_types.len; const uncoerced_arg: Air.Inst.Ref = switch (cai) { inline .resolved, .call_builtin => |resolved| resolved.args[arg_index], @@ -7438,7 +7511,8 @@ fn analyzeCall( call_dbg_node: ?Zir.Inst.Index, operation: CallOperation, ) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const ip = &mod.intern_pool; const callee_ty = sema.typeOf(func); @@ -7741,10 +7815,10 @@ fn analyzeCall( const ies = try sema.arena.create(InferredErrorSet); ies.* = .{ .func = .none }; sema.fn_ret_ty_ies = ies; - sema.fn_ret_ty = Type.fromInterned((try ip.get(gpa, .{ .error_union_type = .{ + sema.fn_ret_ty = Type.fromInterned(try pt.intern(.{ .error_union_type = .{ .error_set_type = .adhoc_inferred_error_set_type, .payload_type = sema.fn_ret_ty.toIntern(), - } }))); + } })); } // This `res2` is here instead of directly breaking from `res` due to a stage1 @@ -7816,7 +7890,7 @@ fn analyzeCall( // TODO: check whether any external comptime memory was mutated by the // comptime function call. If so, then do not memoize the call here. if (should_memoize and !Value.fromInterned(result_interned).canMutateComptimeVarState(mod)) { - _ = try mod.intern(.{ .memoized_call = .{ + _ = try pt.intern(.{ .memoized_call = .{ .func = module_fn_index, .arg_values = memoized_arg_values, .result = result_transformed, @@ -7921,7 +7995,8 @@ fn analyzeCall( } fn handleTailCall(sema: *Sema, block: *Block, call_src: LazySrcLoc, func_ty: Type, result: Air.Inst.Ref) !Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const target = mod.getTarget(); const backend = mod.comp.getZigBackend(); if (!target_util.supportsTailCall(target, backend)) { @@ -7932,7 +8007,7 @@ fn handleTailCall(sema: *Sema, block: *Block, call_src: LazySrcLoc, func_ty: Typ const func_decl = mod.funcOwnerDeclPtr(sema.owner_func_index); if (!func_ty.eql(func_decl.typeOf(mod), mod)) { return sema.fail(block, call_src, "unable to perform tail call: type of function being called '{}' does not match type of calling function '{}'", .{ - func_ty.fmt(mod), func_decl.typeOf(mod).fmt(mod), + func_ty.fmt(pt), func_decl.typeOf(mod).fmt(pt), }); } _ = try block.addUnOp(.ret, result); @@ -7954,7 +8029,7 @@ fn analyzeInlineCallArg( func_ty_info: InternPool.Key.FuncType, func_inst: Air.Inst.Ref, ) !?Air.Inst.Ref { - const mod = ics.sema.mod; + const mod = ics.sema.pt.zcu; const ip = &mod.intern_pool; const zir_tags = ics.callee().code.instructions.items(.tag); switch (zir_tags[@intFromEnum(inst)]) { @@ -8084,7 +8159,8 @@ fn instantiateGenericCall( call_tag: Air.Inst.Tag, call_dbg_node: ?Zir.Inst.Index, ) CompileError!Air.Inst.Ref { - const zcu = sema.mod; + const pt = sema.pt; + const zcu = pt.zcu; const gpa = sema.gpa; const ip = &zcu.intern_pool; @@ -8127,7 +8203,7 @@ fn instantiateGenericCall( // `param_anytype_comptime` ZIR instructions to be ignored, resulting in a // new, monomorphized function, with the comptime parameters elided. var child_sema: Sema = .{ - .mod = zcu, + .pt = pt, .gpa = gpa, .arena = sema.arena, .code = fn_zir, @@ -8358,7 +8434,8 @@ fn instantiateGenericCall( } fn resolveTupleLazyValues(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileError!void { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const ip = &mod.intern_pool; const tuple = switch (ip.indexToKey(ty.toIntern())) { .anon_struct_type => |tuple| tuple, @@ -8373,9 +8450,8 @@ fn resolveTupleLazyValues(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) } fn zirIntType(sema: *Sema, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { - const mod = sema.mod; const int_type = sema.code.instructions.items(.data)[@intFromEnum(inst)].int_type; - const ty = try mod.intType(int_type.signedness, int_type.bit_count); + const ty = try sema.pt.intType(int_type.signedness, int_type.bit_count); return Air.internedToRef(ty.toIntern()); } @@ -8383,22 +8459,24 @@ fn zirOptionalType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro const tracy = trace(@src()); defer tracy.end(); - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node; const operand_src = block.src(.{ .node_offset_un_op = inst_data.src_node }); const child_type = try sema.resolveType(block, operand_src, inst_data.operand); if (child_type.zigTypeTag(mod) == .Opaque) { - return sema.fail(block, operand_src, "opaque type '{}' cannot be optional", .{child_type.fmt(mod)}); + return sema.fail(block, operand_src, "opaque type '{}' cannot be optional", .{child_type.fmt(pt)}); } else if (child_type.zigTypeTag(mod) == .Null) { - return sema.fail(block, operand_src, "type '{}' cannot be optional", .{child_type.fmt(mod)}); + return sema.fail(block, operand_src, "type '{}' cannot be optional", .{child_type.fmt(pt)}); } - const opt_type = try mod.optionalType(child_type.toIntern()); + const opt_type = try pt.optionalType(child_type.toIntern()); return Air.internedToRef(opt_type.toIntern()); } fn zirArrayInitElemType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const bin = sema.code.instructions.items(.data)[@intFromEnum(inst)].bin; const maybe_wrapped_indexable_ty = sema.resolveType(block, LazySrcLoc.unneeded, bin.lhs) catch |err| switch (err) { // Since this is a ZIR instruction that returns a type, encountering @@ -8409,7 +8487,7 @@ fn zirArrayInitElemType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Compil else => |e| return e, }; const indexable_ty = maybe_wrapped_indexable_ty.optEuBaseType(mod); - try indexable_ty.resolveFields(mod); + try indexable_ty.resolveFields(pt); assert(indexable_ty.isIndexable(mod)); // validated by a previous instruction if (indexable_ty.zigTypeTag(mod) == .Struct) { const elem_type = indexable_ty.structFieldType(@intFromEnum(bin.rhs), mod); @@ -8421,7 +8499,8 @@ fn zirArrayInitElemType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Compil } fn zirElemType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const un_node = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node; const maybe_wrapped_ptr_ty = sema.resolveType(block, LazySrcLoc.unneeded, un_node.operand) catch |err| switch (err) { error.GenericPoison => return .generic_poison_type, @@ -8439,7 +8518,8 @@ fn zirElemType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } fn zirIndexablePtrElemType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const un_node = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node; const src = block.nodeOffset(un_node.src_node); const ptr_ty = sema.resolveType(block, src, un_node.operand) catch |err| switch (err) { @@ -8455,7 +8535,8 @@ fn zirIndexablePtrElemType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com } fn zirVectorElemType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const un_node = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node; const vec_ty = sema.resolveType(block, LazySrcLoc.unneeded, un_node.operand) catch |err| switch (err) { // Since this is a ZIR instruction that returns a type, encountering @@ -8466,13 +8547,12 @@ fn zirVectorElemType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr else => |e| return e, }; if (!vec_ty.isVector(mod)) { - return sema.fail(block, block.nodeOffset(un_node.src_node), "expected vector type, found '{}'", .{vec_ty.fmt(mod)}); + return sema.fail(block, block.nodeOffset(un_node.src_node), "expected vector type, found '{}'", .{vec_ty.fmt(pt)}); } return Air.internedToRef(vec_ty.childType(mod).toIntern()); } fn zirVectorType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { - const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const len_src = block.builtinCallArgSrc(inst_data.src_node, 0); const elem_type_src = block.builtinCallArgSrc(inst_data.src_node, 1); @@ -8482,7 +8562,7 @@ fn zirVectorType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! })); const elem_type = try sema.resolveType(block, elem_type_src, extra.rhs); try sema.checkVectorElemType(block, elem_type_src, elem_type); - const vector_type = try mod.vectorType(.{ + const vector_type = try sema.pt.vectorType(.{ .len = len, .child = elem_type.toIntern(), }); @@ -8502,7 +8582,7 @@ fn zirArrayType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A }); const elem_type = try sema.resolveType(block, elem_src, extra.rhs); try sema.validateArrayElemType(block, elem_type, elem_src); - const array_ty = try sema.mod.arrayType(.{ + const array_ty = try sema.pt.arrayType(.{ .len = len, .child = elem_type.toIntern(), }); @@ -8529,7 +8609,7 @@ fn zirArrayTypeSentinel(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Compil const sentinel_val = try sema.resolveConstDefinedValue(block, sentinel_src, sentinel, .{ .needed_comptime_reason = "array sentinel value must be comptime-known", }); - const array_ty = try sema.mod.arrayType(.{ + const array_ty = try sema.pt.arrayType(.{ .len = len, .sentinel = sentinel_val.toIntern(), .child = elem_type.toIntern(), @@ -8539,9 +8619,10 @@ fn zirArrayTypeSentinel(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Compil } fn validateArrayElemType(sema: *Sema, block: *Block, elem_type: Type, elem_src: LazySrcLoc) !void { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; if (elem_type.zigTypeTag(mod) == .Opaque) { - return sema.fail(block, elem_src, "array of opaque type '{}' not allowed", .{elem_type.fmt(mod)}); + return sema.fail(block, elem_src, "array of opaque type '{}' not allowed", .{elem_type.fmt(pt)}); } else if (elem_type.zigTypeTag(mod) == .NoReturn) { return sema.fail(block, elem_src, "array of 'noreturn' not allowed", .{}); } @@ -8567,7 +8648,8 @@ fn zirErrorUnionType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr const tracy = trace(@src()); defer tracy.end(); - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const lhs_src = block.src(.{ .node_offset_bin_lhs = inst_data.src_node }); @@ -8577,40 +8659,41 @@ fn zirErrorUnionType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr if (error_set.zigTypeTag(mod) != .ErrorSet) { return sema.fail(block, lhs_src, "expected error set type, found '{}'", .{ - error_set.fmt(mod), + error_set.fmt(pt), }); } try sema.validateErrorUnionPayloadType(block, payload, rhs_src); - const err_union_ty = try mod.errorUnionType(error_set, payload); + const err_union_ty = try pt.errorUnionType(error_set, payload); return Air.internedToRef(err_union_ty.toIntern()); } fn validateErrorUnionPayloadType(sema: *Sema, block: *Block, payload_ty: Type, payload_src: LazySrcLoc) !void { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; if (payload_ty.zigTypeTag(mod) == .Opaque) { return sema.fail(block, payload_src, "error union with payload of opaque type '{}' not allowed", .{ - payload_ty.fmt(mod), + payload_ty.fmt(pt), }); } else if (payload_ty.zigTypeTag(mod) == .ErrorSet) { return sema.fail(block, payload_src, "error union with payload of error set type '{}' not allowed", .{ - payload_ty.fmt(mod), + payload_ty.fmt(pt), }); } } fn zirErrorValue(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { _ = block; - const mod = sema.mod; + const pt = sema.pt; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].str_tok; - const name = try mod.intern_pool.getOrPutString( + const name = try pt.zcu.intern_pool.getOrPutString( sema.gpa, inst_data.get(sema.code), .no_embedded_nulls, ); - _ = try mod.getErrorValue(name); + _ = try pt.zcu.getErrorValue(name); // Create an error set type with only this error value, and return the value. - const error_set_type = try mod.singleErrorSetType(name); - return Air.internedToRef((try mod.intern(.{ .err = .{ + const error_set_type = try pt.singleErrorSetType(name); + return Air.internedToRef((try pt.intern(.{ .err = .{ .ty = error_set_type.toIntern(), .name = name, } }))); @@ -8620,21 +8703,22 @@ fn zirIntFromError(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstD const tracy = trace(@src()); defer tracy.end(); - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const ip = &mod.intern_pool; const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data; const src = block.nodeOffset(extra.node); const operand_src = block.builtinCallArgSrc(extra.node, 0); const uncasted_operand = try sema.resolveInst(extra.operand); const operand = try sema.coerce(block, Type.anyerror, uncasted_operand, operand_src); - const err_int_ty = try mod.errorIntType(); + const err_int_ty = try pt.errorIntType(); if (try sema.resolveValue(operand)) |val| { if (val.isUndef(mod)) { - return mod.undefRef(err_int_ty); + return pt.undefRef(err_int_ty); } const err_name = ip.indexToKey(val.toIntern()).err.name; - return Air.internedToRef((try mod.intValue( + return Air.internedToRef((try pt.intValue( err_int_ty, try mod.getErrorValue(err_name), )).toIntern()); @@ -8646,10 +8730,10 @@ fn zirIntFromError(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstD else => |err_set_ty_index| { const names = ip.indexToKey(err_set_ty_index).error_set_type.names; switch (names.len) { - 0 => return Air.internedToRef((try mod.intValue(err_int_ty, 0)).toIntern()), + 0 => return Air.internedToRef((try pt.intValue(err_int_ty, 0)).toIntern()), 1 => { const int: Module.ErrorInt = @intCast(mod.global_error_set.getIndex(names.get(ip)[0]).?); - return mod.intRef(err_int_ty, int); + return pt.intRef(err_int_ty, int); }, else => {}, } @@ -8664,19 +8748,20 @@ fn zirErrorFromInt(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstD const tracy = trace(@src()); defer tracy.end(); - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data; const src = block.nodeOffset(extra.node); const operand_src = block.builtinCallArgSrc(extra.node, 0); const uncasted_operand = try sema.resolveInst(extra.operand); - const err_int_ty = try mod.errorIntType(); + const err_int_ty = try pt.errorIntType(); const operand = try sema.coerce(block, err_int_ty, uncasted_operand, operand_src); if (try sema.resolveDefinedValue(block, operand_src, operand)) |value| { - const int = try sema.usizeCast(block, operand_src, try value.toUnsignedIntSema(mod)); + const int = try sema.usizeCast(block, operand_src, try value.toUnsignedIntSema(pt)); if (int > mod.global_error_set.count() or int == 0) return sema.fail(block, operand_src, "integer value '{d}' represents no error", .{int}); - return Air.internedToRef((try mod.intern(.{ .err = .{ + return Air.internedToRef((try pt.intern(.{ .err = .{ .ty = .anyerror_type, .name = mod.global_error_set.keys()[int], } }))); @@ -8684,7 +8769,7 @@ fn zirErrorFromInt(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstD try sema.requireRuntimeBlock(block, src, operand_src); if (block.wantSafety()) { const is_lt_len = try block.addUnOp(.cmp_lt_errors_len, operand); - const zero_val = Air.internedToRef((try mod.intValue(err_int_ty, 0)).toIntern()); + const zero_val = Air.internedToRef((try pt.intValue(err_int_ty, 0)).toIntern()); const is_non_zero = try block.addBinOp(.cmp_neq, operand, zero_val); const ok = try block.addBinOp(.bool_and, is_lt_len, is_non_zero); try sema.addSafetyCheck(block, src, ok, .invalid_error_code); @@ -8702,7 +8787,8 @@ fn zirMergeErrorSets(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr const tracy = trace(@src()); defer tracy.end(); - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const ip = &mod.intern_pool; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; @@ -8723,9 +8809,9 @@ fn zirMergeErrorSets(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr const lhs_ty = try sema.analyzeAsType(block, lhs_src, lhs); const rhs_ty = try sema.analyzeAsType(block, rhs_src, rhs); if (lhs_ty.zigTypeTag(mod) != .ErrorSet) - return sema.fail(block, lhs_src, "expected error set type, found '{}'", .{lhs_ty.fmt(mod)}); + return sema.fail(block, lhs_src, "expected error set type, found '{}'", .{lhs_ty.fmt(pt)}); if (rhs_ty.zigTypeTag(mod) != .ErrorSet) - return sema.fail(block, rhs_src, "expected error set type, found '{}'", .{rhs_ty.fmt(mod)}); + return sema.fail(block, rhs_src, "expected error set type, found '{}'", .{rhs_ty.fmt(pt)}); // Anything merged with anyerror is anyerror. if (lhs_ty.toIntern() == .anyerror_type or rhs_ty.toIntern() == .anyerror_type) { @@ -8758,16 +8844,18 @@ fn zirEnumLiteral(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const tracy = trace(@src()); defer tracy.end(); - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].str_tok; const name = inst_data.get(sema.code); - return Air.internedToRef((try mod.intern(.{ + return Air.internedToRef((try pt.intern(.{ .enum_literal = try mod.intern_pool.getOrPutString(sema.gpa, name, .no_embedded_nulls), }))); } fn zirIntFromEnum(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node; const src = block.nodeOffset(inst_data.src_node); const operand_src = block.builtinCallArgSrc(inst_data.src_node, 0); @@ -8777,7 +8865,7 @@ fn zirIntFromEnum(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const enum_tag: Air.Inst.Ref = switch (operand_ty.zigTypeTag(mod)) { .Enum => operand, .Union => blk: { - try operand_ty.resolveFields(mod); + try operand_ty.resolveFields(pt); const tag_ty = operand_ty.unionTagType(mod) orelse { return sema.fail( block, @@ -8791,7 +8879,7 @@ fn zirIntFromEnum(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError }, else => { return sema.fail(block, operand_src, "expected enum or tagged union, found '{}'", .{ - operand_ty.fmt(mod), + operand_ty.fmt(pt), }); }, }; @@ -8802,20 +8890,20 @@ fn zirIntFromEnum(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError // https://github.com/ziglang/zig/issues/15909 if (enum_tag_ty.enumFieldCount(mod) == 0 and !enum_tag_ty.isNonexhaustiveEnum(mod)) { return sema.fail(block, operand_src, "cannot use @intFromEnum on empty enum '{}'", .{ - enum_tag_ty.fmt(mod), + enum_tag_ty.fmt(pt), }); } if (try sema.typeHasOnePossibleValue(enum_tag_ty)) |opv| { - return Air.internedToRef((try mod.getCoerced(opv, int_tag_ty)).toIntern()); + return Air.internedToRef((try pt.getCoerced(opv, int_tag_ty)).toIntern()); } if (try sema.resolveValue(enum_tag)) |enum_tag_val| { if (enum_tag_val.isUndef(mod)) { - return mod.undefRef(int_tag_ty); + return pt.undefRef(int_tag_ty); } - const val = try enum_tag_val.intFromEnum(enum_tag_ty, mod); + const val = try enum_tag_val.intFromEnum(enum_tag_ty, pt); return Air.internedToRef(val.toIntern()); } @@ -8824,7 +8912,8 @@ fn zirIntFromEnum(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError } fn zirEnumFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const src = block.nodeOffset(inst_data.src_node); @@ -8833,7 +8922,7 @@ fn zirEnumFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const operand = try sema.resolveInst(extra.rhs); if (dest_ty.zigTypeTag(mod) != .Enum) { - return sema.fail(block, src, "expected enum, found '{}'", .{dest_ty.fmt(mod)}); + return sema.fail(block, src, "expected enum, found '{}'", .{dest_ty.fmt(pt)}); } _ = try sema.checkIntType(block, operand_src, sema.typeOf(operand)); @@ -8841,10 +8930,10 @@ fn zirEnumFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError if (dest_ty.isNonexhaustiveEnum(mod)) { const int_tag_ty = dest_ty.intTagType(mod); if (try sema.intFitsInType(int_val, int_tag_ty, null)) { - return Air.internedToRef((try mod.getCoerced(int_val, dest_ty)).toIntern()); + return Air.internedToRef((try pt.getCoerced(int_val, dest_ty)).toIntern()); } return sema.fail(block, src, "int value '{}' out of range of non-exhaustive enum '{}'", .{ - int_val.fmtValue(mod, sema), dest_ty.fmt(mod), + int_val.fmtValue(pt, sema), dest_ty.fmt(pt), }); } if (int_val.isUndef(mod)) { @@ -8852,10 +8941,10 @@ fn zirEnumFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError } if (!(try sema.enumHasInt(dest_ty, int_val))) { return sema.fail(block, src, "enum '{}' has no tag with value '{}'", .{ - dest_ty.fmt(mod), int_val.fmtValue(mod, sema), + dest_ty.fmt(pt), int_val.fmtValue(pt, sema), }); } - return Air.internedToRef((try mod.getCoerced(int_val, dest_ty)).toIntern()); + return Air.internedToRef((try pt.getCoerced(int_val, dest_ty)).toIntern()); } if (dest_ty.intTagType(mod).zigTypeTag(mod) == .ComptimeInt) { @@ -8909,7 +8998,8 @@ fn analyzeOptionalPayloadPtr( safety_check: bool, initializing: bool, ) CompileError!Air.Inst.Ref { - const zcu = sema.mod; + const pt = sema.pt; + const zcu = pt.zcu; const optional_ptr_ty = sema.typeOf(optional_ptr); assert(optional_ptr_ty.zigTypeTag(zcu) == .Pointer); @@ -8919,7 +9009,7 @@ fn analyzeOptionalPayloadPtr( } const child_type = opt_type.optionalChild(zcu); - const child_pointer = try zcu.ptrTypeSema(.{ + const child_pointer = try pt.ptrTypeSema(.{ .child = child_type.toIntern(), .flags = .{ .is_const = optional_ptr_ty.isConstPtr(zcu), @@ -8932,8 +9022,8 @@ fn analyzeOptionalPayloadPtr( if (sema.isComptimeMutablePtr(ptr_val)) { // Set the optional to non-null at comptime. // If the payload is OPV, we must use that value instead of undef. - const payload_val = try sema.typeHasOnePossibleValue(child_type) orelse try zcu.undefValue(child_type); - const opt_val = try zcu.intern(.{ .opt = .{ + const payload_val = try sema.typeHasOnePossibleValue(child_type) orelse try pt.undefValue(child_type); + const opt_val = try pt.intern(.{ .opt = .{ .ty = opt_type.toIntern(), .val = payload_val.toIntern(), } }); @@ -8943,13 +9033,13 @@ fn analyzeOptionalPayloadPtr( const opt_payload_ptr = try block.addTyOp(.optional_payload_ptr_set, child_pointer, optional_ptr); try sema.checkKnownAllocPtr(block, optional_ptr, opt_payload_ptr); } - return Air.internedToRef((try ptr_val.ptrOptPayload(zcu)).toIntern()); + return Air.internedToRef((try ptr_val.ptrOptPayload(pt)).toIntern()); } if (try sema.pointerDeref(block, src, ptr_val, optional_ptr_ty)) |val| { if (val.isNull(zcu)) { return sema.fail(block, src, "unable to unwrap null", .{}); } - return Air.internedToRef((try ptr_val.ptrOptPayload(zcu)).toIntern()); + return Air.internedToRef((try ptr_val.ptrOptPayload(pt)).toIntern()); } } @@ -8978,7 +9068,8 @@ fn zirOptionalPayload( const tracy = trace(@src()); defer tracy.end(); - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node; const src = block.nodeOffset(inst_data.src_node); const operand = try sema.resolveInst(inst_data.operand); @@ -8992,7 +9083,7 @@ fn zirOptionalPayload( // TODO https://github.com/ziglang/zig/issues/6597 if (true) break :t operand_ty; const ptr_info = operand_ty.ptrInfo(mod); - break :t try mod.ptrTypeSema(.{ + break :t try pt.ptrTypeSema(.{ .child = ptr_info.child, .flags = .{ .alignment = ptr_info.flags.alignment, @@ -9030,7 +9121,8 @@ fn zirErrUnionPayload( const tracy = trace(@src()); defer tracy.end(); - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node; const src = block.nodeOffset(inst_data.src_node); const operand = try sema.resolveInst(inst_data.operand); @@ -9038,7 +9130,7 @@ fn zirErrUnionPayload( const err_union_ty = sema.typeOf(operand); if (err_union_ty.zigTypeTag(mod) != .ErrorUnion) { return sema.fail(block, operand_src, "expected error union type, found '{}'", .{ - err_union_ty.fmt(mod), + err_union_ty.fmt(pt), }); } return sema.analyzeErrUnionPayload(block, src, err_union_ty, operand, operand_src, false); @@ -9053,7 +9145,8 @@ fn analyzeErrUnionPayload( operand_src: LazySrcLoc, safety_check: bool, ) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const payload_ty = err_union_ty.errorUnionPayload(mod); if (try sema.resolveDefinedValue(block, operand_src, operand)) |val| { if (val.getErrorName(mod).unwrap()) |name| { @@ -9098,19 +9191,20 @@ fn analyzeErrUnionPayloadPtr( safety_check: bool, initializing: bool, ) CompileError!Air.Inst.Ref { - const zcu = sema.mod; + const pt = sema.pt; + const zcu = pt.zcu; const operand_ty = sema.typeOf(operand); assert(operand_ty.zigTypeTag(zcu) == .Pointer); if (operand_ty.childType(zcu).zigTypeTag(zcu) != .ErrorUnion) { return sema.fail(block, src, "expected error union type, found '{}'", .{ - operand_ty.childType(zcu).fmt(zcu), + operand_ty.childType(zcu).fmt(pt), }); } const err_union_ty = operand_ty.childType(zcu); const payload_ty = err_union_ty.errorUnionPayload(zcu); - const operand_pointer_ty = try zcu.ptrTypeSema(.{ + const operand_pointer_ty = try pt.ptrTypeSema(.{ .child = payload_ty.toIntern(), .flags = .{ .is_const = operand_ty.isConstPtr(zcu), @@ -9123,8 +9217,8 @@ fn analyzeErrUnionPayloadPtr( if (sema.isComptimeMutablePtr(ptr_val)) { // Set the error union to non-error at comptime. // If the payload is OPV, we must use that value instead of undef. - const payload_val = try sema.typeHasOnePossibleValue(payload_ty) orelse try zcu.undefValue(payload_ty); - const eu_val = try zcu.intern(.{ .error_union = .{ + const payload_val = try sema.typeHasOnePossibleValue(payload_ty) orelse try pt.undefValue(payload_ty); + const eu_val = try pt.intern(.{ .error_union = .{ .ty = err_union_ty.toIntern(), .val = .{ .payload = payload_val.toIntern() }, } }); @@ -9135,13 +9229,13 @@ fn analyzeErrUnionPayloadPtr( const eu_payload_ptr = try block.addTyOp(.errunion_payload_ptr_set, operand_pointer_ty, operand); try sema.checkKnownAllocPtr(block, operand, eu_payload_ptr); } - return Air.internedToRef((try ptr_val.ptrEuPayload(zcu)).toIntern()); + return Air.internedToRef((try ptr_val.ptrEuPayload(pt)).toIntern()); } if (try sema.pointerDeref(block, src, ptr_val, operand_ty)) |val| { if (val.getErrorName(zcu).unwrap()) |name| { return sema.failWithComptimeErrorRetTrace(block, src, name); } - return Air.internedToRef((try ptr_val.ptrEuPayload(zcu)).toIntern()); + return Air.internedToRef((try ptr_val.ptrEuPayload(pt)).toIntern()); } } @@ -9175,18 +9269,19 @@ fn zirErrUnionCode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro } fn analyzeErrUnionCode(sema: *Sema, block: *Block, src: LazySrcLoc, operand: Air.Inst.Ref) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const operand_ty = sema.typeOf(operand); if (operand_ty.zigTypeTag(mod) != .ErrorUnion) { return sema.fail(block, src, "expected error union type, found '{}'", .{ - operand_ty.fmt(mod), + operand_ty.fmt(pt), }); } const result_ty = operand_ty.errorUnionSet(mod); if (try sema.resolveDefinedValue(block, src, operand)) |val| { - return Air.internedToRef((try mod.intern(.{ .err = .{ + return Air.internedToRef((try pt.intern(.{ .err = .{ .ty = result_ty.toIntern(), .name = mod.intern_pool.indexToKey(val.toIntern()).error_union.val.err_name, } }))); @@ -9208,13 +9303,14 @@ fn zirErrUnionCodePtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE } fn analyzeErrUnionCodePtr(sema: *Sema, block: *Block, src: LazySrcLoc, operand: Air.Inst.Ref) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const operand_ty = sema.typeOf(operand); assert(operand_ty.zigTypeTag(mod) == .Pointer); if (operand_ty.childType(mod).zigTypeTag(mod) != .ErrorUnion) { return sema.fail(block, src, "expected error union type, found '{}'", .{ - operand_ty.childType(mod).fmt(mod), + operand_ty.childType(mod).fmt(pt), }); } @@ -9223,7 +9319,7 @@ fn analyzeErrUnionCodePtr(sema: *Sema, block: *Block, src: LazySrcLoc, operand: if (try sema.resolveDefinedValue(block, src, operand)) |pointer_val| { if (try sema.pointerDeref(block, src, pointer_val, operand_ty)) |val| { assert(val.getErrorName(mod) != .none); - return Air.internedToRef((try mod.intern(.{ .err = .{ + return Air.internedToRef((try pt.intern(.{ .err = .{ .ty = result_ty.toIntern(), .name = mod.intern_pool.indexToKey(val.toIntern()).error_union.val.err_name, } }))); @@ -9240,10 +9336,11 @@ fn zirFunc( inst: Zir.Inst.Index, inferred_error_set: bool, ) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const extra = sema.code.extraData(Zir.Inst.Func, inst_data.payload_index); - const target = sema.mod.getTarget(); + const target = mod.getTarget(); const ret_ty_src = block.src(.{ .node_offset_fn_type_ret_ty = inst_data.src_node }); var extra_index = extra.end; @@ -9372,7 +9469,8 @@ fn handleExternLibName( lib_name: []const u8, ) CompileError!void { blk: { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const comp = mod.comp; const target = mod.getTarget(); log.debug("extern fn symbol expected in lib '{s}'", .{lib_name}); @@ -9485,7 +9583,8 @@ fn funcCommon( noalias_bits: u32, is_noinline: bool, ) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const gpa = sema.gpa; const target = mod.getTarget(); const ip = &mod.intern_pool; @@ -9539,13 +9638,13 @@ fn funcCommon( if (!param_ty.isValidParamType(mod)) { const opaque_str = if (param_ty.zigTypeTag(mod) == .Opaque) "opaque " else ""; return sema.fail(block, param_src, "parameter of {s}type '{}' not allowed", .{ - opaque_str, param_ty.fmt(mod), + opaque_str, param_ty.fmt(pt), }); } if (!this_generic and !target_util.fnCallConvAllowsZigTypes(target, cc_resolved) and !try sema.validateExternType(param_ty, .param_ty)) { const msg = msg: { const msg = try sema.errMsg(param_src, "parameter of type '{}' not allowed in function with calling convention '{s}'", .{ - param_ty.fmt(mod), @tagName(cc_resolved), + param_ty.fmt(pt), @tagName(cc_resolved), }); errdefer msg.destroy(sema.gpa); @@ -9559,7 +9658,7 @@ fn funcCommon( if (is_source_decl and requires_comptime and !param_is_comptime and has_body and !block.is_comptime) { const msg = msg: { const msg = try sema.errMsg(param_src, "parameter of type '{}' must be declared comptime", .{ - param_ty.fmt(mod), + param_ty.fmt(pt), }); errdefer msg.destroy(sema.gpa); @@ -9580,7 +9679,7 @@ fn funcCommon( const err_code_size = target.ptrBitWidth(); switch (i) { 0 => if (param_ty.zigTypeTag(mod) != .Pointer) return sema.fail(block, param_src, "first parameter of function with 'Interrupt' calling convention must be a pointer type", .{}), - 1 => if (param_ty.bitSize(mod) != err_code_size) return sema.fail(block, param_src, "second parameter of function with 'Interrupt' calling convention must be a {d}-bit integer", .{err_code_size}), + 1 => if (param_ty.bitSize(pt) != err_code_size) return sema.fail(block, param_src, "second parameter of function with 'Interrupt' calling convention must be a {d}-bit integer", .{err_code_size}), else => return sema.fail(block, param_src, "'Interrupt' calling convention supports up to 2 parameters, found {d}", .{i + 1}), } } else return sema.fail(block, param_src, "parameters are not allowed with 'Interrupt' calling convention", .{}), @@ -9606,7 +9705,7 @@ fn funcCommon( if (inferred_error_set) { try sema.validateErrorUnionPayloadType(block, bare_return_type, ret_ty_src); } - const func_index = try ip.getFuncInstance(gpa, .{ + const func_index = try ip.getFuncInstance(gpa, pt.tid, .{ .param_types = param_types, .noalias_bits = noalias_bits, .bare_return_type = bare_return_type.toIntern(), @@ -9655,7 +9754,7 @@ fn funcCommon( assert(has_body); if (!ret_poison) try sema.validateErrorUnionPayloadType(block, bare_return_type, ret_ty_src); - const func_index = try ip.getFuncDeclIes(gpa, .{ + const func_index = try ip.getFuncDeclIes(gpa, pt.tid, .{ .owner_decl = sema.owner_decl_index, .param_types = param_types, @@ -9695,7 +9794,7 @@ fn funcCommon( ); } - const func_ty = try ip.getFuncType(gpa, .{ + const func_ty = try ip.getFuncType(gpa, pt.tid, .{ .param_types = param_types, .noalias_bits = noalias_bits, .comptime_bits = comptime_bits, @@ -9718,7 +9817,7 @@ fn funcCommon( if (opt_lib_name) |lib_name| try sema.handleExternLibName(block, block.src(.{ .node_offset_lib_name = src_node_offset, }), lib_name); - const func_index = try ip.getExternFunc(gpa, .{ + const func_index = try ip.getExternFunc(gpa, pt.tid, .{ .ty = func_ty, .decl = sema.owner_decl_index, .lib_name = try mod.intern_pool.getOrPutStringOpt(gpa, opt_lib_name, .no_embedded_nulls), @@ -9743,7 +9842,7 @@ fn funcCommon( } if (has_body) { - const func_index = try ip.getFuncDecl(gpa, .{ + const func_index = try ip.getFuncDecl(gpa, pt.tid, .{ .owner_decl = sema.owner_decl_index, .ty = func_ty, .cc = cc, @@ -9809,7 +9908,8 @@ fn finishFunc( is_generic: bool, final_is_generic: bool, ) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const ip = &mod.intern_pool; const gpa = sema.gpa; const target = mod.getTarget(); @@ -9822,7 +9922,7 @@ fn finishFunc( if (!return_type.isValidReturnType(mod)) { const opaque_str = if (return_type.zigTypeTag(mod) == .Opaque) "opaque " else ""; return sema.fail(block, ret_ty_src, "{s}return type '{}' not allowed", .{ - opaque_str, return_type.fmt(mod), + opaque_str, return_type.fmt(pt), }); } if (!ret_poison and !target_util.fnCallConvAllowsZigTypes(target, cc_resolved) and @@ -9830,7 +9930,7 @@ fn finishFunc( { const msg = msg: { const msg = try sema.errMsg(ret_ty_src, "return type '{}' not allowed in function with calling convention '{s}'", .{ - return_type.fmt(mod), @tagName(cc_resolved), + return_type.fmt(pt), @tagName(cc_resolved), }); errdefer msg.destroy(gpa); @@ -9852,7 +9952,7 @@ fn finishFunc( const msg = try sema.errMsg( ret_ty_src, "function with comptime-only return type '{}' requires all parameters to be comptime", - .{return_type.fmt(mod)}, + .{return_type.fmt(pt)}, ); try sema.explainWhyTypeIsComptime(msg, ret_ty_src, return_type); @@ -9938,8 +10038,8 @@ fn finishFunc( if (!final_is_generic and sema.wantErrorReturnTracing(return_type)) { // Make sure that StackTrace's fields are resolved so that the backend can // lower this fn type. - const unresolved_stack_trace_ty = try mod.getBuiltinType("StackTrace"); - try unresolved_stack_trace_ty.resolveFields(mod); + const unresolved_stack_trace_ty = try pt.getBuiltinType("StackTrace"); + try unresolved_stack_trace_ty.resolveFields(pt); } return Air.internedToRef(if (opt_func_index != .none) opt_func_index else func_ty); @@ -10068,7 +10168,8 @@ fn analyzeAs( zir_operand: Zir.Inst.Ref, no_cast_to_comptime_int: bool, ) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const operand = try sema.resolveInst(zir_operand); const operand_air_inst = sema.resolveInst(zir_dest_type) catch |err| switch (err) { error.GenericPoison => return operand, @@ -10098,7 +10199,8 @@ fn zirIntFromPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! const tracy = trace(@src()); defer tracy.end(); - const zcu = sema.mod; + const pt = sema.pt; + const zcu = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node; const ptr_src = block.builtinCallArgSrc(inst_data.src_node, 0); const operand = try sema.resolveInst(inst_data.operand); @@ -10106,12 +10208,12 @@ fn zirIntFromPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! const ptr_ty = operand_ty.scalarType(zcu); const is_vector = operand_ty.zigTypeTag(zcu) == .Vector; if (!ptr_ty.isPtrAtRuntime(zcu)) { - return sema.fail(block, ptr_src, "expected pointer, found '{}'", .{ptr_ty.fmt(zcu)}); + return sema.fail(block, ptr_src, "expected pointer, found '{}'", .{ptr_ty.fmt(pt)}); } const pointee_ty = ptr_ty.childType(zcu); if (try sema.typeRequiresComptime(ptr_ty)) { const msg = msg: { - const msg = try sema.errMsg(ptr_src, "comptime-only type '{}' has no pointer address", .{pointee_ty.fmt(zcu)}); + const msg = try sema.errMsg(ptr_src, "comptime-only type '{}' has no pointer address", .{pointee_ty.fmt(pt)}); errdefer msg.destroy(sema.gpa); try sema.explainWhyTypeIsComptime(msg, ptr_src, pointee_ty); break :msg msg; @@ -10121,32 +10223,32 @@ fn zirIntFromPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! if (try sema.resolveValueIntable(operand)) |operand_val| ct: { if (!is_vector) { if (operand_val.isUndef(zcu)) { - return Air.internedToRef((try zcu.undefValue(Type.usize)).toIntern()); + return Air.internedToRef((try pt.undefValue(Type.usize)).toIntern()); } - return Air.internedToRef((try zcu.intValue( + return Air.internedToRef((try pt.intValue( Type.usize, - (try operand_val.getUnsignedIntAdvanced(zcu, .sema)).?, + (try operand_val.getUnsignedIntAdvanced(pt, .sema)).?, )).toIntern()); } const len = operand_ty.vectorLen(zcu); - const dest_ty = try zcu.vectorType(.{ .child = .usize_type, .len = len }); + const dest_ty = try pt.vectorType(.{ .child = .usize_type, .len = len }); const new_elems = try sema.arena.alloc(InternPool.Index, len); for (new_elems, 0..) |*new_elem, i| { - const ptr_val = try operand_val.elemValue(zcu, i); + const ptr_val = try operand_val.elemValue(pt, i); if (ptr_val.isUndef(zcu)) { - new_elem.* = (try zcu.undefValue(Type.usize)).toIntern(); + new_elem.* = (try pt.undefValue(Type.usize)).toIntern(); continue; } - const addr = try ptr_val.getUnsignedIntAdvanced(zcu, .sema) orelse { + const addr = try ptr_val.getUnsignedIntAdvanced(pt, .sema) orelse { // A vector element wasn't an integer pointer. This is a runtime operation. break :ct; }; - new_elem.* = (try zcu.intValue( + new_elem.* = (try pt.intValue( Type.usize, addr, )).toIntern(); } - return Air.internedToRef(try zcu.intern(.{ .aggregate = .{ + return Air.internedToRef(try pt.intern(.{ .aggregate = .{ .ty = dest_ty.toIntern(), .storage = .{ .elems = new_elems }, } })); @@ -10157,10 +10259,10 @@ fn zirIntFromPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! return block.addUnOp(.int_from_ptr, operand); } const len = operand_ty.vectorLen(zcu); - const dest_ty = try zcu.vectorType(.{ .child = .usize_type, .len = len }); + const dest_ty = try pt.vectorType(.{ .child = .usize_type, .len = len }); const new_elems = try sema.arena.alloc(Air.Inst.Ref, len); for (new_elems, 0..) |*new_elem, i| { - const idx_ref = try zcu.intRef(Type.usize, i); + const idx_ref = try pt.intRef(Type.usize, i); const old_elem = try block.addBinOp(.array_elem_val, operand, idx_ref); new_elem.* = try block.addUnOp(.int_from_ptr, old_elem); } @@ -10171,7 +10273,8 @@ fn zirFieldVal(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const tracy = trace(@src()); defer tracy.end(); - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const src = block.nodeOffset(inst_data.src_node); const field_name_src = block.src(.{ .node_offset_field_name = inst_data.src_node }); @@ -10189,7 +10292,8 @@ fn zirFieldPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const tracy = trace(@src()); defer tracy.end(); - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const src = block.nodeOffset(inst_data.src_node); const field_name_src = block.src(.{ .node_offset_field_name = inst_data.src_node }); @@ -10207,7 +10311,8 @@ fn zirStructInitFieldPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Compi const tracy = trace(@src()); defer tracy.end(); - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const src = block.nodeOffset(inst_data.src_node); const field_name_src = block.src(.{ .node_offset_field_name_init = inst_data.src_node }); @@ -10284,7 +10389,8 @@ fn intCast( operand_src: LazySrcLoc, runtime_safety: bool, ) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const operand_ty = sema.typeOf(operand); const dest_scalar_ty = try sema.checkIntOrVectorAllowComptime(block, dest_ty, dest_ty_src); const operand_scalar_ty = try sema.checkIntOrVectorAllowComptime(block, operand_ty, operand_src); @@ -10307,7 +10413,7 @@ fn intCast( if (wanted_bits == 0) { const ok = if (is_vector) ok: { - const zeros = try sema.splat(operand_ty, try mod.intValue(operand_scalar_ty, 0)); + const zeros = try sema.splat(operand_ty, try pt.intValue(operand_scalar_ty, 0)); const zero_inst = Air.internedToRef(zeros.toIntern()); const is_in_range = try block.addCmpVector(operand, zero_inst, .eq); const all_in_range = try block.addInst(.{ @@ -10316,7 +10422,7 @@ fn intCast( }); break :ok all_in_range; } else ok: { - const zero_inst = Air.internedToRef((try mod.intValue(operand_ty, 0)).toIntern()); + const zero_inst = Air.internedToRef((try pt.intValue(operand_ty, 0)).toIntern()); const is_in_range = try block.addBinOp(.cmp_lte, operand, zero_inst); break :ok is_in_range; }; @@ -10339,7 +10445,7 @@ fn intCast( // range shrinkage // requirement: int value fits into target type if (wanted_value_bits < actual_value_bits) { - const dest_max_val_scalar = try dest_scalar_ty.maxIntScalar(mod, operand_scalar_ty); + const dest_max_val_scalar = try dest_scalar_ty.maxIntScalar(pt, operand_scalar_ty); const dest_max_val = try sema.splat(operand_ty, dest_max_val_scalar); const dest_max = Air.internedToRef(dest_max_val.toIntern()); @@ -10348,8 +10454,8 @@ fn intCast( // Reinterpret the sign-bit as part of the value. This will make // negative differences (`operand` > `dest_max`) appear too big. - const unsigned_scalar_operand_ty = try mod.intType(.unsigned, actual_bits); - const unsigned_operand_ty = if (is_vector) try mod.vectorType(.{ + const unsigned_scalar_operand_ty = try pt.intType(.unsigned, actual_bits); + const unsigned_operand_ty = if (is_vector) try pt.vectorType(.{ .len = dest_ty.vectorLen(mod), .child = unsigned_scalar_operand_ty.toIntern(), }) else unsigned_scalar_operand_ty; @@ -10358,14 +10464,14 @@ fn intCast( // If the destination type is signed, then we need to double its // range to account for negative values. const dest_range_val = if (wanted_info.signedness == .signed) range_val: { - const one_scalar = try mod.intValue(unsigned_scalar_operand_ty, 1); - const one = if (is_vector) Value.fromInterned((try mod.intern(.{ .aggregate = .{ + const one_scalar = try pt.intValue(unsigned_scalar_operand_ty, 1); + const one = if (is_vector) Value.fromInterned(try pt.intern(.{ .aggregate = .{ .ty = unsigned_operand_ty.toIntern(), .storage = .{ .repeated_elem = one_scalar.toIntern() }, - } }))) else one_scalar; - const range_minus_one = try dest_max_val.shl(one, unsigned_operand_ty, sema.arena, mod); + } })) else one_scalar; + const range_minus_one = try dest_max_val.shl(one, unsigned_operand_ty, sema.arena, pt); break :range_val try sema.intAdd(range_minus_one, one, unsigned_operand_ty, undefined); - } else try mod.getCoerced(dest_max_val, unsigned_operand_ty); + } else try pt.getCoerced(dest_max_val, unsigned_operand_ty); const dest_range = Air.internedToRef(dest_range_val.toIntern()); const ok = if (is_vector) ok: { @@ -10405,7 +10511,7 @@ fn intCast( // no shrinkage, yes sign loss // requirement: signed to unsigned >= 0 const ok = if (is_vector) ok: { - const scalar_zero = try mod.intValue(operand_scalar_ty, 0); + const scalar_zero = try pt.intValue(operand_scalar_ty, 0); const zero_val = try sema.splat(operand_ty, scalar_zero); const zero_inst = Air.internedToRef(zero_val.toIntern()); const is_in_range = try block.addCmpVector(operand, zero_inst, .gte); @@ -10418,7 +10524,7 @@ fn intCast( }); break :ok all_in_range; } else ok: { - const zero_inst = Air.internedToRef((try mod.intValue(operand_ty, 0)).toIntern()); + const zero_inst = Air.internedToRef((try pt.intValue(operand_ty, 0)).toIntern()); const is_in_range = try block.addBinOp(.cmp_gte, operand, zero_inst); break :ok is_in_range; }; @@ -10432,7 +10538,8 @@ fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air const tracy = trace(@src()); defer tracy.end(); - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const src = block.nodeOffset(inst_data.src_node); const operand_src = block.builtinCallArgSrc(inst_data.src_node, 0); @@ -10457,14 +10564,14 @@ fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air .Type, .Undefined, .Void, - => return sema.fail(block, src, "cannot @bitCast to '{}'", .{dest_ty.fmt(mod)}), + => return sema.fail(block, src, "cannot @bitCast to '{}'", .{dest_ty.fmt(pt)}), .Enum => { const msg = msg: { - const msg = try sema.errMsg(src, "cannot @bitCast to '{}'", .{dest_ty.fmt(mod)}); + const msg = try sema.errMsg(src, "cannot @bitCast to '{}'", .{dest_ty.fmt(pt)}); errdefer msg.destroy(sema.gpa); switch (operand_ty.zigTypeTag(mod)) { - .Int, .ComptimeInt => try sema.errNote(src, msg, "use @enumFromInt to cast from '{}'", .{operand_ty.fmt(mod)}), + .Int, .ComptimeInt => try sema.errNote(src, msg, "use @enumFromInt to cast from '{}'", .{operand_ty.fmt(pt)}), else => {}, } @@ -10475,11 +10582,11 @@ fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air .Pointer => { const msg = msg: { - const msg = try sema.errMsg(src, "cannot @bitCast to '{}'", .{dest_ty.fmt(mod)}); + const msg = try sema.errMsg(src, "cannot @bitCast to '{}'", .{dest_ty.fmt(pt)}); errdefer msg.destroy(sema.gpa); switch (operand_ty.zigTypeTag(mod)) { - .Int, .ComptimeInt => try sema.errNote(src, msg, "use @ptrFromInt to cast from '{}'", .{operand_ty.fmt(mod)}), - .Pointer => try sema.errNote(src, msg, "use @ptrCast to cast from '{}'", .{operand_ty.fmt(mod)}), + .Int, .ComptimeInt => try sema.errNote(src, msg, "use @ptrFromInt to cast from '{}'", .{operand_ty.fmt(pt)}), + .Pointer => try sema.errNote(src, msg, "use @ptrCast to cast from '{}'", .{operand_ty.fmt(pt)}), else => {}, } @@ -10494,7 +10601,7 @@ fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air else => unreachable, }; return sema.fail(block, src, "cannot @bitCast to '{}'; {s} does not have a guaranteed in-memory layout", .{ - dest_ty.fmt(mod), container, + dest_ty.fmt(pt), container, }); }, @@ -10521,14 +10628,14 @@ fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air .Type, .Undefined, .Void, - => return sema.fail(block, operand_src, "cannot @bitCast from '{}'", .{operand_ty.fmt(mod)}), + => return sema.fail(block, operand_src, "cannot @bitCast from '{}'", .{operand_ty.fmt(pt)}), .Enum => { const msg = msg: { - const msg = try sema.errMsg(operand_src, "cannot @bitCast from '{}'", .{operand_ty.fmt(mod)}); + const msg = try sema.errMsg(operand_src, "cannot @bitCast from '{}'", .{operand_ty.fmt(pt)}); errdefer msg.destroy(sema.gpa); switch (dest_ty.zigTypeTag(mod)) { - .Int, .ComptimeInt => try sema.errNote(operand_src, msg, "use @intFromEnum to cast to '{}'", .{dest_ty.fmt(mod)}), + .Int, .ComptimeInt => try sema.errNote(operand_src, msg, "use @intFromEnum to cast to '{}'", .{dest_ty.fmt(pt)}), else => {}, } @@ -10538,11 +10645,11 @@ fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air }, .Pointer => { const msg = msg: { - const msg = try sema.errMsg(operand_src, "cannot @bitCast from '{}'", .{operand_ty.fmt(mod)}); + const msg = try sema.errMsg(operand_src, "cannot @bitCast from '{}'", .{operand_ty.fmt(pt)}); errdefer msg.destroy(sema.gpa); switch (dest_ty.zigTypeTag(mod)) { - .Int, .ComptimeInt => try sema.errNote(operand_src, msg, "use @intFromPtr to cast to '{}'", .{dest_ty.fmt(mod)}), - .Pointer => try sema.errNote(operand_src, msg, "use @ptrCast to cast to '{}'", .{dest_ty.fmt(mod)}), + .Int, .ComptimeInt => try sema.errNote(operand_src, msg, "use @intFromPtr to cast to '{}'", .{dest_ty.fmt(pt)}), + .Pointer => try sema.errNote(operand_src, msg, "use @ptrCast to cast to '{}'", .{dest_ty.fmt(pt)}), else => {}, } @@ -10557,7 +10664,7 @@ fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air else => unreachable, }; return sema.fail(block, operand_src, "cannot @bitCast from '{}'; {s} does not have a guaranteed in-memory layout", .{ - operand_ty.fmt(mod), container, + operand_ty.fmt(pt), container, }); }, @@ -10575,7 +10682,8 @@ fn zirFloatCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A const tracy = trace(@src()); defer tracy.end(); - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const src = block.nodeOffset(inst_data.src_node); const operand_src = block.builtinCallArgSrc(inst_data.src_node, 0); @@ -10599,7 +10707,7 @@ fn zirFloatCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A block, src, "expected float or vector type, found '{}'", - .{dest_ty.fmt(mod)}, + .{dest_ty.fmt(pt)}, ), }; @@ -10609,21 +10717,21 @@ fn zirFloatCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A block, operand_src, "expected float or vector type, found '{}'", - .{operand_ty.fmt(mod)}, + .{operand_ty.fmt(pt)}, ), } if (try sema.resolveValue(operand)) |operand_val| { if (!is_vector) { - return Air.internedToRef((try operand_val.floatCast(dest_ty, mod)).toIntern()); + return Air.internedToRef((try operand_val.floatCast(dest_ty, pt)).toIntern()); } const vec_len = operand_ty.vectorLen(mod); const new_elems = try sema.arena.alloc(InternPool.Index, vec_len); for (new_elems, 0..) |*new_elem, i| { - const old_elem = try operand_val.elemValue(mod, i); - new_elem.* = (try old_elem.floatCast(dest_scalar_ty, mod)).toIntern(); + const old_elem = try operand_val.elemValue(pt, i); + new_elem.* = (try old_elem.floatCast(dest_scalar_ty, pt)).toIntern(); } - return Air.internedToRef(try mod.intern(.{ .aggregate = .{ + return Air.internedToRef(try pt.intern(.{ .aggregate = .{ .ty = dest_ty.toIntern(), .storage = .{ .elems = new_elems }, } })); @@ -10644,7 +10752,7 @@ fn zirFloatCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A const vec_len = operand_ty.vectorLen(mod); const new_elems = try sema.arena.alloc(Air.Inst.Ref, vec_len); for (new_elems, 0..) |*new_elem, i| { - const idx_ref = try mod.intRef(Type.usize, i); + const idx_ref = try pt.intRef(Type.usize, i); const old_elem = try block.addBinOp(.array_elem_val, operand, idx_ref); new_elem.* = try block.addTyOp(.fptrunc, dest_scalar_ty, old_elem); } @@ -10681,10 +10789,9 @@ fn zirElemValImm(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! const tracy = trace(@src()); defer tracy.end(); - const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].elem_val_imm; const array = try sema.resolveInst(inst_data.operand); - const elem_index = try mod.intRef(Type.usize, inst_data.idx); + const elem_index = try sema.pt.intRef(Type.usize, inst_data.idx); return sema.elemVal(block, LazySrcLoc.unneeded, array, elem_index, LazySrcLoc.unneeded, false); } @@ -10692,7 +10799,8 @@ fn zirElemPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air const tracy = trace(@src()); defer tracy.end(); - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const src = block.nodeOffset(inst_data.src_node); const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; @@ -10703,7 +10811,7 @@ fn zirElemPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air const capture_src = block.src(.{ .for_capture_from_input = inst_data.src_node }); const msg = msg: { const msg = try sema.errMsg(capture_src, "pointer capture of non pointer type '{}'", .{ - indexable_ty.fmt(mod), + indexable_ty.fmt(pt), }); errdefer msg.destroy(sema.gpa); if (indexable_ty.isIndexable(mod)) { @@ -10734,12 +10842,13 @@ fn zirArrayInitElemPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Compile const tracy = trace(@src()); defer tracy.end(); - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const src = block.nodeOffset(inst_data.src_node); const extra = sema.code.extraData(Zir.Inst.ElemPtrImm, inst_data.payload_index).data; const array_ptr = try sema.resolveInst(extra.ptr); - const elem_index = try sema.mod.intRef(Type.usize, extra.index); + const elem_index = try pt.intRef(Type.usize, extra.index); const array_ty = sema.typeOf(array_ptr).childType(mod); switch (array_ty.zigTypeTag(mod)) { .Array, .Vector => {}, @@ -10892,7 +11001,7 @@ const SwitchProngAnalysis = struct { inline_case_capture, ); - if (sema.typeOf(capture_ref).isNoReturn(sema.mod)) { + if (sema.typeOf(capture_ref).isNoReturn(sema.pt.zcu)) { // This prong should be unreachable! return .unreachable_value; } @@ -10948,7 +11057,7 @@ const SwitchProngAnalysis = struct { inline_case_capture, ); - if (sema.typeOf(capture_ref).isNoReturn(sema.mod)) { + if (sema.typeOf(capture_ref).isNoReturn(sema.pt.zcu)) { // No need to analyze any further, the prong is unreachable return; } @@ -10968,7 +11077,8 @@ const SwitchProngAnalysis = struct { inline_case_capture: Air.Inst.Ref, ) CompileError!Air.Inst.Ref { const sema = spa.sema; - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const operand_ty = sema.typeOf(spa.operand); if (operand_ty.zigTypeTag(mod) != .Union) { const tag_capture_src: LazySrcLoc = .{ @@ -10976,7 +11086,7 @@ const SwitchProngAnalysis = struct { .offset = .{ .switch_tag_capture = capture_src.offset.switch_capture }, }; return sema.fail(block, tag_capture_src, "cannot capture tag of non-union type '{}'", .{ - operand_ty.fmt(mod), + operand_ty.fmt(pt), }); } assert(inline_case_capture != .none); @@ -10993,7 +11103,8 @@ const SwitchProngAnalysis = struct { inline_case_capture: Air.Inst.Ref, ) CompileError!Air.Inst.Ref { const sema = spa.sema; - const zcu = sema.mod; + const pt = sema.pt; + const zcu = pt.zcu; const ip = &zcu.intern_pool; const zir_datas = sema.code.instructions.items(.data); @@ -11010,7 +11121,7 @@ const SwitchProngAnalysis = struct { const union_obj = zcu.typeToUnion(operand_ty).?; const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]); if (capture_byref) { - const ptr_field_ty = try zcu.ptrTypeSema(.{ + const ptr_field_ty = try pt.ptrTypeSema(.{ .child = field_ty.toIntern(), .flags = .{ .is_const = !operand_ptr_ty.ptrIsMutable(zcu), @@ -11019,7 +11130,7 @@ const SwitchProngAnalysis = struct { }, }); if (try sema.resolveDefinedValue(block, operand_src, spa.operand_ptr)) |union_ptr| { - return Air.internedToRef((try union_ptr.ptrField(field_index, zcu)).toIntern()); + return Air.internedToRef((try union_ptr.ptrField(field_index, pt)).toIntern()); } return block.addStructFieldPtr(spa.operand_ptr, field_index, ptr_field_ty); } else { @@ -11078,7 +11189,7 @@ const SwitchProngAnalysis = struct { const dummy_captures = try sema.arena.alloc(Air.Inst.Ref, case_vals.len); for (dummy_captures, field_indices) |*dummy, field_idx| { const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_idx]); - dummy.* = try zcu.undefRef(field_ty); + dummy.* = try pt.undefRef(field_ty); } const case_srcs = try sema.arena.alloc(?LazySrcLoc, case_vals.len); @@ -11113,7 +11224,7 @@ const SwitchProngAnalysis = struct { const dummy_captures = try sema.arena.alloc(Air.Inst.Ref, case_vals.len); for (field_indices, dummy_captures) |field_idx, *dummy| { const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_idx]); - const field_ptr_ty = try zcu.ptrTypeSema(.{ + const field_ptr_ty = try pt.ptrTypeSema(.{ .child = field_ty.toIntern(), .flags = .{ .is_const = operand_ptr_info.flags.is_const, @@ -11122,7 +11233,7 @@ const SwitchProngAnalysis = struct { .alignment = union_obj.fieldAlign(ip, field_idx), }, }); - dummy.* = try zcu.undefRef(field_ptr_ty); + dummy.* = try pt.undefRef(field_ptr_ty); } const case_srcs = try sema.arena.alloc(?LazySrcLoc, case_vals.len); for (case_srcs, 0..) |*case_src, i| { @@ -11148,9 +11259,9 @@ const SwitchProngAnalysis = struct { }; if (try sema.resolveDefinedValue(block, operand_src, spa.operand_ptr)) |op_ptr_val| { - if (op_ptr_val.isUndef(zcu)) return zcu.undefRef(capture_ptr_ty); - const field_ptr_val = try op_ptr_val.ptrField(first_field_index, zcu); - return Air.internedToRef((try zcu.getCoerced(field_ptr_val, capture_ptr_ty)).toIntern()); + if (op_ptr_val.isUndef(zcu)) return pt.undefRef(capture_ptr_ty); + const field_ptr_val = try op_ptr_val.ptrField(first_field_index, pt); + return Air.internedToRef((try pt.getCoerced(field_ptr_val, capture_ptr_ty)).toIntern()); } try sema.requireRuntimeBlock(block, operand_src, null); @@ -11158,9 +11269,9 @@ const SwitchProngAnalysis = struct { } if (try sema.resolveDefinedValue(block, operand_src, spa.operand)) |operand_val| { - if (operand_val.isUndef(zcu)) return zcu.undefRef(capture_ty); + if (operand_val.isUndef(zcu)) return pt.undefRef(capture_ty); const union_val = ip.indexToKey(operand_val.toIntern()).un; - if (Value.fromInterned(union_val.tag).isUndef(zcu)) return zcu.undefRef(capture_ty); + if (Value.fromInterned(union_val.tag).isUndef(zcu)) return pt.undefRef(capture_ty); const uncoerced = Air.internedToRef(union_val.val); return sema.coerce(block, capture_ty, uncoerced, operand_src); } @@ -11304,7 +11415,7 @@ const SwitchProngAnalysis = struct { if (case_vals.len == 1) { const item_val = sema.resolveConstDefinedValue(block, LazySrcLoc.unneeded, case_vals[0], undefined) catch unreachable; - const item_ty = try zcu.singleErrorSetType(item_val.getErrorName(zcu).unwrap().?); + const item_ty = try pt.singleErrorSetType(item_val.getErrorName(zcu).unwrap().?); return sema.bitCast(block, item_ty, spa.operand, operand_src, null); } @@ -11314,7 +11425,7 @@ const SwitchProngAnalysis = struct { const err_val = sema.resolveConstDefinedValue(block, LazySrcLoc.unneeded, err, undefined) catch unreachable; names.putAssumeCapacityNoClobber(err_val.getErrorName(zcu).unwrap().?, {}); } - const error_ty = try zcu.errorSetFromUnsortedNames(names.keys()); + const error_ty = try pt.errorSetFromUnsortedNames(names.keys()); return sema.bitCast(block, error_ty, spa.operand, operand_src, null); }, else => { @@ -11336,7 +11447,8 @@ fn switchCond( src: LazySrcLoc, operand: Air.Inst.Ref, ) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const operand_ty = sema.typeOf(operand); switch (operand_ty.zigTypeTag(mod)) { .Type, @@ -11353,7 +11465,7 @@ fn switchCond( .Enum, => { if (operand_ty.isSlice(mod)) { - return sema.fail(block, src, "switch on type '{}'", .{operand_ty.fmt(mod)}); + return sema.fail(block, src, "switch on type '{}'", .{operand_ty.fmt(pt)}); } if ((try sema.typeHasOnePossibleValue(operand_ty))) |opv| { return Air.internedToRef(opv.toIntern()); @@ -11362,7 +11474,7 @@ fn switchCond( }, .Union => { - try operand_ty.resolveFields(mod); + try operand_ty.resolveFields(pt); const enum_ty = operand_ty.unionTagType(mod) orelse { const msg = msg: { const msg = try sema.errMsg(src, "switch on union with no attached enum", .{}); @@ -11388,7 +11500,7 @@ fn switchCond( .Vector, .Frame, .AnyFrame, - => return sema.fail(block, src, "switch on type '{}'", .{operand_ty.fmt(mod)}), + => return sema.fail(block, src, "switch on type '{}'", .{operand_ty.fmt(pt)}), } } @@ -11398,7 +11510,8 @@ fn zirSwitchBlockErrUnion(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Comp const tracy = trace(@src()); defer tracy.end(); - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const gpa = sema.gpa; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const switch_src = block.nodeOffset(inst_data.src_node); @@ -11489,7 +11602,7 @@ fn zirSwitchBlockErrUnion(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Comp if (operand_err_set.zigTypeTag(mod) != .ErrorUnion) { return sema.fail(block, switch_src, "expected error union type, found '{}'", .{ - operand_ty.fmt(mod), + operand_ty.fmt(pt), }); } @@ -11571,7 +11684,7 @@ fn zirSwitchBlockErrUnion(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Comp if (operand_val.errorUnionIsPayload(mod)) { return sema.resolveBlockBody(block, main_operand_src, &child_block, non_error_case.body, inst, merges); } else { - const err_val = Value.fromInterned(try mod.intern(.{ + const err_val = Value.fromInterned(try pt.intern(.{ .err = .{ .ty = operand_err_set_ty.toIntern(), .name = operand_val.getErrorName(mod).unwrap().?, @@ -11708,7 +11821,8 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r const tracy = trace(@src()); defer tracy.end(); - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const gpa = sema.gpa; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const src = block.nodeOffset(inst_data.src_node); @@ -11783,7 +11897,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r // Duplicate checking variables later also used for `inline else`. var seen_enum_fields: []?LazySrcLoc = &.{}; var seen_errors = SwitchErrorSet.init(gpa); - var range_set = RangeSet.init(gpa, mod); + var range_set = RangeSet.init(gpa, pt); var true_count: u8 = 0; var false_count: u8 = 0; @@ -11924,7 +12038,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r operand_ty.srcLoc(mod), msg, "enum '{}' declared here", - .{operand_ty.fmt(mod)}, + .{operand_ty.fmt(pt)}, ); break :msg msg; }; @@ -12030,8 +12144,8 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r check_range: { if (operand_ty.zigTypeTag(mod) == .Int) { - const min_int = try operand_ty.minInt(mod, operand_ty); - const max_int = try operand_ty.maxInt(mod, operand_ty); + const min_int = try operand_ty.minInt(pt, operand_ty); + const max_int = try operand_ty.maxInt(pt, operand_ty); if (try range_set.spans(min_int.toIntern(), max_int.toIntern())) { if (special_prong == .@"else") { return sema.fail( @@ -12136,7 +12250,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r block, src, "else prong required when switching on type '{}'", - .{operand_ty.fmt(mod)}, + .{operand_ty.fmt(pt)}, ); } @@ -12212,7 +12326,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r .ComptimeFloat, .Float, => return sema.fail(block, operand_src, "invalid switch operand type '{}'", .{ - operand_ty.fmt(mod), + operand_ty.fmt(pt), }), } @@ -12386,7 +12500,8 @@ fn analyzeSwitchRuntimeBlock( cond_dbg_node_index: Zir.Inst.Index, allow_err_code_unwrap: bool, ) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const gpa = sema.gpa; const ip = &mod.intern_pool; @@ -12496,9 +12611,9 @@ fn analyzeSwitchRuntimeBlock( var item = sema.resolveConstDefinedValue(block, LazySrcLoc.unneeded, item_first_ref, undefined) catch unreachable; const item_last = sema.resolveConstDefinedValue(block, LazySrcLoc.unneeded, item_last_ref, undefined) catch unreachable; - while (item.compareScalar(.lte, item_last, operand_ty, mod)) : ({ + while (item.compareScalar(.lte, item_last, operand_ty, pt)) : ({ // Previous validation has resolved any possible lazy values. - item = sema.intAddScalar(item, try mod.intValue(operand_ty, 1), operand_ty) catch |err| switch (err) { + item = sema.intAddScalar(item, try pt.intValue(operand_ty, 1), operand_ty) catch |err| switch (err) { error.Overflow => unreachable, else => |e| return e, }; @@ -12537,7 +12652,7 @@ fn analyzeSwitchRuntimeBlock( cases_extra.appendAssumeCapacity(@intFromEnum(item_ref)); cases_extra.appendSliceAssumeCapacity(@ptrCast(case_block.instructions.items)); - if (item.compareScalar(.eq, item_last, operand_ty, mod)) break; + if (item.compareScalar(.eq, item_last, operand_ty, pt)) break; } } @@ -12744,14 +12859,14 @@ fn analyzeSwitchRuntimeBlock( .Enum => { if (operand_ty.isNonexhaustiveEnum(mod) and !union_originally) { return sema.fail(block, special_prong_src, "cannot enumerate values of type '{}' for 'inline else'", .{ - operand_ty.fmt(mod), + operand_ty.fmt(pt), }); } for (seen_enum_fields, 0..) |f, i| { if (f != null) continue; cases_len += 1; - const item_val = try mod.enumValueFieldIndex(operand_ty, @intCast(i)); + const item_val = try pt.enumValueFieldIndex(operand_ty, @intCast(i)); const item_ref = Air.internedToRef(item_val.toIntern()); case_block.instructions.shrinkRetainingCapacity(0); @@ -12793,7 +12908,7 @@ fn analyzeSwitchRuntimeBlock( .ErrorSet => { if (operand_ty.isAnyError(mod)) { return sema.fail(block, special_prong_src, "cannot enumerate values of type '{}' for 'inline else'", .{ - operand_ty.fmt(mod), + operand_ty.fmt(pt), }); } const error_names = operand_ty.errorSetNames(mod); @@ -12802,7 +12917,7 @@ fn analyzeSwitchRuntimeBlock( if (seen_errors.contains(error_name)) continue; cases_len += 1; - const item_val = try mod.intern(.{ .err = .{ + const item_val = try pt.intern(.{ .err = .{ .ty = operand_ty.toIntern(), .name = error_name, } }); @@ -12930,7 +13045,7 @@ fn analyzeSwitchRuntimeBlock( } }, else => return sema.fail(block, special_prong_src, "cannot enumerate values of type '{}' for 'inline else'", .{ - operand_ty.fmt(mod), + operand_ty.fmt(pt), }), }; @@ -13051,7 +13166,7 @@ fn resolveSwitchComptime( const item = case_vals.items[scalar_i]; const item_val = sema.resolveConstDefinedValue(child_block, LazySrcLoc.unneeded, item, undefined) catch unreachable; - if (operand_val.eql(item_val, operand_ty, sema.mod)) { + if (operand_val.eql(item_val, operand_ty, sema.pt.zcu)) { if (err_set) try sema.maybeErrorUnwrapComptime(child_block, body, cond_operand); return spa.resolveProngComptime( child_block, @@ -13088,7 +13203,7 @@ fn resolveSwitchComptime( for (items) |item| { // Validation above ensured these will succeed. const item_val = sema.resolveConstDefinedValue(child_block, LazySrcLoc.unneeded, item, undefined) catch unreachable; - if (operand_val.eql(item_val, operand_ty, sema.mod)) { + if (operand_val.eql(item_val, operand_ty, sema.pt.zcu)) { if (err_set) try sema.maybeErrorUnwrapComptime(child_block, body, cond_operand); return spa.resolveProngComptime( child_block, @@ -13162,7 +13277,7 @@ fn resolveSwitchComptime( } const RangeSetUnhandledIterator = struct { - mod: *Module, + pt: Zcu.PerThread, cur: ?InternPool.Index, max: InternPool.Index, range_i: usize, @@ -13172,13 +13287,13 @@ const RangeSetUnhandledIterator = struct { const preallocated_limbs = math.big.int.calcTwosCompLimbCount(128); fn init(sema: *Sema, ty: Type, range_set: RangeSet) !RangeSetUnhandledIterator { - const mod = sema.mod; - const int_type = mod.intern_pool.indexToKey(ty.toIntern()).int_type; + const pt = sema.pt; + const int_type = pt.zcu.intern_pool.indexToKey(ty.toIntern()).int_type; const needed_limbs = math.big.int.calcTwosCompLimbCount(int_type.bits); return .{ - .mod = mod, - .cur = (try ty.minInt(mod, ty)).toIntern(), - .max = (try ty.maxInt(mod, ty)).toIntern(), + .pt = pt, + .cur = (try ty.minInt(pt, ty)).toIntern(), + .max = (try ty.maxInt(pt, ty)).toIntern(), .range_i = 0, .ranges = range_set.ranges.items, .limbs = if (needed_limbs > preallocated_limbs) @@ -13190,13 +13305,13 @@ const RangeSetUnhandledIterator = struct { fn addOne(it: *const RangeSetUnhandledIterator, val: InternPool.Index) !?InternPool.Index { if (val == it.max) return null; - const int = it.mod.intern_pool.indexToKey(val).int; + const int = it.pt.zcu.intern_pool.indexToKey(val).int; switch (int.storage) { inline .u64, .i64 => |val_int| { const next_int = @addWithOverflow(val_int, 1); if (next_int[1] == 0) - return (try it.mod.intValue(Type.fromInterned(int.ty), next_int[0])).toIntern(); + return (try it.pt.intValue(Type.fromInterned(int.ty), next_int[0])).toIntern(); }, .big_int => {}, .lazy_align, .lazy_size => unreachable, @@ -13212,7 +13327,7 @@ const RangeSetUnhandledIterator = struct { ); result_bigint.addScalar(val_bigint, 1); - return (try it.mod.intValue_big(Type.fromInterned(int.ty), result_bigint.toConst())).toIntern(); + return (try it.pt.intValue_big(Type.fromInterned(int.ty), result_bigint.toConst())).toIntern(); } fn next(it: *RangeSetUnhandledIterator) !?InternPool.Index { @@ -13274,7 +13389,8 @@ fn validateErrSetSwitch( has_else: bool, ) CompileError!?Type { const gpa = sema.gpa; - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const ip = &mod.intern_pool; const src_node_offset = inst_data.src_node; @@ -13426,7 +13542,7 @@ fn validateErrSetSwitch( } // No need to keep the hash map metadata correct; here we // extract the (sorted) keys only. - return try mod.errorSetFromUnsortedNames(names.keys()); + return try pt.errorSetFromUnsortedNames(names.keys()); }, } return null; @@ -13441,7 +13557,6 @@ fn validateSwitchRange( operand_ty: Type, item_src: LazySrcLoc, ) CompileError![2]Air.Inst.Ref { - const mod = sema.mod; const first_src: LazySrcLoc = .{ .base_node_inst = item_src.base_node_inst, .offset = .{ .switch_case_item_range_first = item_src.offset.switch_case_item }, @@ -13452,7 +13567,7 @@ fn validateSwitchRange( }; const first = try sema.resolveSwitchItemVal(block, first_ref, operand_ty, first_src); const last = try sema.resolveSwitchItemVal(block, last_ref, operand_ty, last_src); - if (try Value.fromInterned(first.val).compareAll(.gt, Value.fromInterned(last.val), operand_ty, mod)) { + if (try Value.fromInterned(first.val).compareAll(.gt, Value.fromInterned(last.val), operand_ty, sema.pt)) { return sema.fail(block, item_src, "range start value is greater than the end value", .{}); } const maybe_prev_src = try range_set.add(first.val, last.val, item_src); @@ -13483,7 +13598,7 @@ fn validateSwitchItemEnum( operand_ty: Type, item_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { - const ip = &sema.mod.intern_pool; + const ip = &sema.pt.zcu.intern_pool; const item = try sema.resolveSwitchItemVal(block, item_ref, operand_ty, item_src); const int = ip.indexToKey(item.val).enum_tag.int; const field_index = ip.loadEnumType(ip.typeOf(item.val)).tagValueIndex(ip, int) orelse { @@ -13505,9 +13620,8 @@ fn validateSwitchItemError( operand_ty: Type, item_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { - const ip = &sema.mod.intern_pool; const item = try sema.resolveSwitchItemVal(block, item_ref, operand_ty, item_src); - const error_name = ip.indexToKey(item.val).err.name; + const error_name = sema.pt.zcu.intern_pool.indexToKey(item.val).err.name; const maybe_prev_src = if (try seen_errors.fetchPut(error_name, item_src)) |prev| prev.value else @@ -13593,7 +13707,7 @@ fn validateSwitchNoRange( const msg = try sema.errMsg( operand_src, "ranges not allowed when switching on type '{}'", - .{operand_ty.fmt(sema.mod)}, + .{operand_ty.fmt(sema.pt)}, ); errdefer msg.destroy(sema.gpa); try sema.errNote( @@ -13615,7 +13729,8 @@ fn maybeErrorUnwrap( operand_src: LazySrcLoc, allow_err_code_inst: bool, ) !bool { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; if (!mod.backendSupportsFeature(.panic_unwrap_error)) return false; const tags = sema.code.instructions.items(.tag); @@ -13654,7 +13769,7 @@ fn maybeErrorUnwrap( return true; } - const panic_fn = try mod.getBuiltin("panicUnwrapError"); + const panic_fn = try pt.getBuiltin("panicUnwrapError"); const err_return_trace = try sema.getErrorReturnTrace(block); const args: [2]Air.Inst.Ref = .{ err_return_trace, operand }; try sema.callBuiltin(block, operand_src, panic_fn, .auto, &args, .@"safety check"); @@ -13664,7 +13779,7 @@ fn maybeErrorUnwrap( const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node; const msg_inst = try sema.resolveInst(inst_data.operand); - const panic_fn = try mod.getBuiltin("panic"); + const panic_fn = try pt.getBuiltin("panic"); const err_return_trace = try sema.getErrorReturnTrace(block); const args: [3]Air.Inst.Ref = .{ msg_inst, err_return_trace, .null_value }; try sema.callBuiltin(block, operand_src, panic_fn, .auto, &args, .@"safety check"); @@ -13680,7 +13795,8 @@ fn maybeErrorUnwrap( } fn maybeErrorUnwrapCondbr(sema: *Sema, block: *Block, body: []const Zir.Inst.Index, cond: Zir.Inst.Ref, cond_src: LazySrcLoc) !void { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const index = cond.toIndex() orelse return; if (sema.code.instructions.items(.tag)[@intFromEnum(index)] != .is_non_err) return; @@ -13713,14 +13829,15 @@ fn maybeErrorUnwrapComptime(sema: *Sema, block: *Block, body: []const Zir.Inst.I const src = block.nodeOffset(inst_data.src_node); if (try sema.resolveDefinedValue(block, src, operand)) |val| { - if (val.getErrorName(sema.mod).unwrap()) |name| { + if (val.getErrorName(sema.pt.zcu).unwrap()) |name| { return sema.failWithComptimeErrorRetTrace(block, src, name); } } } fn zirHasField(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const ty_src = block.builtinCallArgSrc(inst_data.src_node, 0); @@ -13729,7 +13846,7 @@ fn zirHasField(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const field_name = try sema.resolveConstStringIntern(block, name_src, extra.rhs, .{ .needed_comptime_reason = "field name must be comptime-known", }); - try ty.resolveFields(mod); + try ty.resolveFields(pt); const ip = &mod.intern_pool; const has_field = hf: { @@ -13764,14 +13881,15 @@ fn zirHasField(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai else => {}, } return sema.fail(block, ty_src, "type '{}' does not support '@hasField'", .{ - ty.fmt(mod), + ty.fmt(pt), }); }; return if (has_field) .bool_true else .bool_false; } fn zirHasDecl(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const src = block.nodeOffset(inst_data.src_node); @@ -13804,7 +13922,8 @@ fn zirImport(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. const tracy = trace(@src()); defer tracy.end(); - const zcu = sema.mod; + const pt = sema.pt; + const zcu = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].str_tok; const operand_src = block.tokenOffset(inst_data.src_tok); const operand = inst_data.get(sema.code); @@ -13824,7 +13943,7 @@ fn zirImport(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. return sema.fail(block, operand_src, "unable to open '{s}': {s}", .{ operand, @errorName(err) }); }, }; - try zcu.ensureFileAnalyzed(result.file_index); + try pt.ensureFileAnalyzed(result.file_index); const file_root_decl_index = zcu.fileRootDecl(result.file_index).unwrap().?; return sema.analyzeDeclVal(block, operand_src, file_root_decl_index); } @@ -13833,7 +13952,7 @@ fn zirEmbedFile(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A const tracy = trace(@src()); defer tracy.end(); - const mod = sema.mod; + const pt = sema.pt; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node; const operand_src = block.builtinCallArgSrc(inst_data.src_node, 0); const name = try sema.resolveConstString(block, operand_src, inst_data.operand, .{ @@ -13844,7 +13963,7 @@ fn zirEmbedFile(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A return sema.fail(block, operand_src, "file path name cannot be empty", .{}); } - const val = mod.embedFile(block.getFileScope(mod), name, operand_src) catch |err| switch (err) { + const val = pt.embedFile(block.getFileScope(pt.zcu), name, operand_src) catch |err| switch (err) { error.ImportOutsideModulePath => { return sema.fail(block, operand_src, "embed of file outside package path: '{s}'", .{name}); }, @@ -13859,7 +13978,8 @@ fn zirEmbedFile(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A } fn zirRetErrValueCode(sema: *Sema, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].str_tok; const name = try mod.intern_pool.getOrPutString( sema.gpa, @@ -13867,8 +13987,8 @@ fn zirRetErrValueCode(sema: *Sema, inst: Zir.Inst.Index) CompileError!Air.Inst.R .no_embedded_nulls, ); _ = try mod.getErrorValue(name); - const error_set_type = try mod.singleErrorSetType(name); - return Air.internedToRef((try mod.intern(.{ .err = .{ + const error_set_type = try pt.singleErrorSetType(name); + return Air.internedToRef((try pt.intern(.{ .err = .{ .ty = error_set_type.toIntern(), .name = name, } }))); @@ -13883,7 +14003,8 @@ fn zirShl( const tracy = trace(@src()); defer tracy.end(); - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const src = block.nodeOffset(inst_data.src_node); const lhs_src = block.src(.{ .node_offset_bin_lhs = inst_data.src_node }); @@ -13906,53 +14027,53 @@ fn zirShl( if (maybe_rhs_val) |rhs_val| { if (rhs_val.isUndef(mod)) { - return mod.undefRef(sema.typeOf(lhs)); + return pt.undefRef(sema.typeOf(lhs)); } // If rhs is 0, return lhs without doing any calculations. - if (try rhs_val.compareAllWithZeroSema(.eq, mod)) { + if (try rhs_val.compareAllWithZeroSema(.eq, pt)) { return lhs; } if (scalar_ty.zigTypeTag(mod) != .ComptimeInt and air_tag != .shl_sat) { - const bit_value = try mod.intValue(Type.comptime_int, scalar_ty.intInfo(mod).bits); + const bit_value = try pt.intValue(Type.comptime_int, scalar_ty.intInfo(mod).bits); if (rhs_ty.zigTypeTag(mod) == .Vector) { var i: usize = 0; while (i < rhs_ty.vectorLen(mod)) : (i += 1) { - const rhs_elem = try rhs_val.elemValue(mod, i); - if (rhs_elem.compareHetero(.gte, bit_value, mod)) { + const rhs_elem = try rhs_val.elemValue(pt, i); + if (rhs_elem.compareHetero(.gte, bit_value, pt)) { return sema.fail(block, rhs_src, "shift amount '{}' at index '{d}' is too large for operand type '{}'", .{ - rhs_elem.fmtValue(mod, sema), + rhs_elem.fmtValue(pt, sema), i, - scalar_ty.fmt(mod), + scalar_ty.fmt(pt), }); } } - } else if (rhs_val.compareHetero(.gte, bit_value, mod)) { + } else if (rhs_val.compareHetero(.gte, bit_value, pt)) { return sema.fail(block, rhs_src, "shift amount '{}' is too large for operand type '{}'", .{ - rhs_val.fmtValue(mod, sema), - scalar_ty.fmt(mod), + rhs_val.fmtValue(pt, sema), + scalar_ty.fmt(pt), }); } } if (rhs_ty.zigTypeTag(mod) == .Vector) { var i: usize = 0; while (i < rhs_ty.vectorLen(mod)) : (i += 1) { - const rhs_elem = try rhs_val.elemValue(mod, i); - if (rhs_elem.compareHetero(.lt, try mod.intValue(scalar_rhs_ty, 0), mod)) { + const rhs_elem = try rhs_val.elemValue(pt, i); + if (rhs_elem.compareHetero(.lt, try pt.intValue(scalar_rhs_ty, 0), pt)) { return sema.fail(block, rhs_src, "shift by negative amount '{}' at index '{d}'", .{ - rhs_elem.fmtValue(mod, sema), + rhs_elem.fmtValue(pt, sema), i, }); } } - } else if (rhs_val.compareHetero(.lt, try mod.intValue(rhs_ty, 0), mod)) { + } else if (rhs_val.compareHetero(.lt, try pt.intValue(rhs_ty, 0), pt)) { return sema.fail(block, rhs_src, "shift by negative amount '{}'", .{ - rhs_val.fmtValue(mod, sema), + rhs_val.fmtValue(pt, sema), }); } } const runtime_src = if (maybe_lhs_val) |lhs_val| rs: { - if (lhs_val.isUndef(mod)) return mod.undefRef(lhs_ty); + if (lhs_val.isUndef(mod)) return pt.undefRef(lhs_ty); const rhs_val = maybe_rhs_val orelse { if (scalar_ty.zigTypeTag(mod) == .ComptimeInt) { return sema.fail(block, src, "LHS of shift must be a fixed-width integer type, or RHS must be comptime-known", .{}); @@ -13960,17 +14081,17 @@ fn zirShl( break :rs rhs_src; }; const val = if (scalar_ty.zigTypeTag(mod) == .ComptimeInt) - try lhs_val.shl(rhs_val, lhs_ty, sema.arena, mod) + try lhs_val.shl(rhs_val, lhs_ty, sema.arena, pt) else switch (air_tag) { .shl_exact => val: { - const shifted = try lhs_val.shlWithOverflow(rhs_val, lhs_ty, sema.arena, mod); - if (shifted.overflow_bit.compareAllWithZero(.eq, mod)) { + const shifted = try lhs_val.shlWithOverflow(rhs_val, lhs_ty, sema.arena, pt); + if (shifted.overflow_bit.compareAllWithZero(.eq, pt)) { break :val shifted.wrapped_result; } return sema.fail(block, src, "operation caused overflow", .{}); }, - .shl_sat => try lhs_val.shlSat(rhs_val, lhs_ty, sema.arena, mod), - .shl => try lhs_val.shlTrunc(rhs_val, lhs_ty, sema.arena, mod), + .shl_sat => try lhs_val.shlSat(rhs_val, lhs_ty, sema.arena, pt), + .shl => try lhs_val.shlTrunc(rhs_val, lhs_ty, sema.arena, pt), else => unreachable, }; return Air.internedToRef(val.toIntern()); @@ -13981,7 +14102,7 @@ fn zirShl( if (rhs_is_comptime_int or scalar_rhs_ty.intInfo(mod).bits > scalar_ty.intInfo(mod).bits) { - const max_int = Air.internedToRef((try lhs_ty.maxInt(mod, lhs_ty)).toIntern()); + const max_int = Air.internedToRef((try lhs_ty.maxInt(pt, lhs_ty)).toIntern()); const rhs_limited = try sema.analyzeMinMax(block, rhs_src, .min, &.{ rhs, max_int }, &.{ rhs_src, rhs_src }); break :rhs try sema.intCast(block, src, lhs_ty, rhs_src, rhs_limited, rhs_src, false); } else { @@ -13993,7 +14114,7 @@ fn zirShl( if (block.wantSafety()) { const bit_count = scalar_ty.intInfo(mod).bits; if (!std.math.isPowerOfTwo(bit_count)) { - const bit_count_val = try mod.intValue(scalar_rhs_ty, bit_count); + const bit_count_val = try pt.intValue(scalar_rhs_ty, bit_count); const ok = if (rhs_ty.zigTypeTag(mod) == .Vector) ok: { const bit_count_inst = Air.internedToRef((try sema.splat(rhs_ty, bit_count_val)).toIntern()); const lt = try block.addCmpVector(rhs, bit_count_inst, .lt); @@ -14034,7 +14155,7 @@ fn zirShl( }) else ov_bit; - const zero_ov = Air.internedToRef((try mod.intValue(Type.u1, 0)).toIntern()); + const zero_ov = Air.internedToRef((try pt.intValue(Type.u1, 0)).toIntern()); const no_ov = try block.addBinOp(.cmp_eq, any_ov_bit, zero_ov); try sema.addSafetyCheck(block, src, no_ov, .shl_overflow); @@ -14053,7 +14174,8 @@ fn zirShr( const tracy = trace(@src()); defer tracy.end(); - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const src = block.nodeOffset(inst_data.src_node); const lhs_src = block.src(.{ .node_offset_bin_lhs = inst_data.src_node }); @@ -14071,61 +14193,61 @@ fn zirShr( const runtime_src = if (maybe_rhs_val) |rhs_val| rs: { if (rhs_val.isUndef(mod)) { - return mod.undefRef(lhs_ty); + return pt.undefRef(lhs_ty); } // If rhs is 0, return lhs without doing any calculations. - if (try rhs_val.compareAllWithZeroSema(.eq, mod)) { + if (try rhs_val.compareAllWithZeroSema(.eq, pt)) { return lhs; } if (scalar_ty.zigTypeTag(mod) != .ComptimeInt) { - const bit_value = try mod.intValue(Type.comptime_int, scalar_ty.intInfo(mod).bits); + const bit_value = try pt.intValue(Type.comptime_int, scalar_ty.intInfo(mod).bits); if (rhs_ty.zigTypeTag(mod) == .Vector) { var i: usize = 0; while (i < rhs_ty.vectorLen(mod)) : (i += 1) { - const rhs_elem = try rhs_val.elemValue(mod, i); - if (rhs_elem.compareHetero(.gte, bit_value, mod)) { + const rhs_elem = try rhs_val.elemValue(pt, i); + if (rhs_elem.compareHetero(.gte, bit_value, pt)) { return sema.fail(block, rhs_src, "shift amount '{}' at index '{d}' is too large for operand type '{}'", .{ - rhs_elem.fmtValue(mod, sema), + rhs_elem.fmtValue(pt, sema), i, - scalar_ty.fmt(mod), + scalar_ty.fmt(pt), }); } } - } else if (rhs_val.compareHetero(.gte, bit_value, mod)) { + } else if (rhs_val.compareHetero(.gte, bit_value, pt)) { return sema.fail(block, rhs_src, "shift amount '{}' is too large for operand type '{}'", .{ - rhs_val.fmtValue(mod, sema), - scalar_ty.fmt(mod), + rhs_val.fmtValue(pt, sema), + scalar_ty.fmt(pt), }); } } if (rhs_ty.zigTypeTag(mod) == .Vector) { var i: usize = 0; while (i < rhs_ty.vectorLen(mod)) : (i += 1) { - const rhs_elem = try rhs_val.elemValue(mod, i); - if (rhs_elem.compareHetero(.lt, try mod.intValue(rhs_ty.childType(mod), 0), mod)) { + const rhs_elem = try rhs_val.elemValue(pt, i); + if (rhs_elem.compareHetero(.lt, try pt.intValue(rhs_ty.childType(mod), 0), pt)) { return sema.fail(block, rhs_src, "shift by negative amount '{}' at index '{d}'", .{ - rhs_elem.fmtValue(mod, sema), + rhs_elem.fmtValue(pt, sema), i, }); } } - } else if (rhs_val.compareHetero(.lt, try mod.intValue(rhs_ty, 0), mod)) { + } else if (rhs_val.compareHetero(.lt, try pt.intValue(rhs_ty, 0), pt)) { return sema.fail(block, rhs_src, "shift by negative amount '{}'", .{ - rhs_val.fmtValue(mod, sema), + rhs_val.fmtValue(pt, sema), }); } if (maybe_lhs_val) |lhs_val| { if (lhs_val.isUndef(mod)) { - return mod.undefRef(lhs_ty); + return pt.undefRef(lhs_ty); } if (air_tag == .shr_exact) { // Detect if any ones would be shifted out. - const truncated = try lhs_val.intTruncBitsAsValue(lhs_ty, sema.arena, .unsigned, rhs_val, mod); - if (!(try truncated.compareAllWithZeroSema(.eq, mod))) { + const truncated = try lhs_val.intTruncBitsAsValue(lhs_ty, sema.arena, .unsigned, rhs_val, pt); + if (!(try truncated.compareAllWithZeroSema(.eq, pt))) { return sema.fail(block, src, "exact shift shifted out 1 bits", .{}); } } - const val = try lhs_val.shr(rhs_val, lhs_ty, sema.arena, mod); + const val = try lhs_val.shr(rhs_val, lhs_ty, sema.arena, pt); return Air.internedToRef(val.toIntern()); } else { break :rs lhs_src; @@ -14141,7 +14263,7 @@ fn zirShr( if (block.wantSafety()) { const bit_count = scalar_ty.intInfo(mod).bits; if (!std.math.isPowerOfTwo(bit_count)) { - const bit_count_val = try mod.intValue(rhs_ty.scalarType(mod), bit_count); + const bit_count_val = try pt.intValue(rhs_ty.scalarType(mod), bit_count); const ok = if (rhs_ty.zigTypeTag(mod) == .Vector) ok: { const bit_count_inst = Air.internedToRef((try sema.splat(rhs_ty, bit_count_val)).toIntern()); @@ -14188,7 +14310,8 @@ fn zirBitwise( const tracy = trace(@src()); defer tracy.end(); - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const src = block.src(.{ .node_offset_bin_op = inst_data.src_node }); const lhs_src = block.src(.{ .node_offset_bin_lhs = inst_data.src_node }); @@ -14220,9 +14343,9 @@ fn zirBitwise( if (try sema.resolveValueIntable(casted_lhs)) |lhs_val| { if (try sema.resolveValueIntable(casted_rhs)) |rhs_val| { const result_val = switch (air_tag) { - .bit_and => try lhs_val.bitwiseAnd(rhs_val, resolved_type, sema.arena, mod), - .bit_or => try lhs_val.bitwiseOr(rhs_val, resolved_type, sema.arena, mod), - .xor => try lhs_val.bitwiseXor(rhs_val, resolved_type, sema.arena, mod), + .bit_and => try lhs_val.bitwiseAnd(rhs_val, resolved_type, sema.arena, pt), + .bit_or => try lhs_val.bitwiseOr(rhs_val, resolved_type, sema.arena, pt), + .xor => try lhs_val.bitwiseXor(rhs_val, resolved_type, sema.arena, pt), else => unreachable, }; return Air.internedToRef(result_val.toIntern()); @@ -14242,7 +14365,8 @@ fn zirBitNot(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. const tracy = trace(@src()); defer tracy.end(); - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node; const src = block.nodeOffset(inst_data.src_node); const operand_src = block.src(.{ .node_offset_un_op = inst_data.src_node }); @@ -14253,26 +14377,26 @@ fn zirBitNot(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. if (scalar_type.zigTypeTag(mod) != .Int) { return sema.fail(block, src, "unable to perform binary not operation on type '{}'", .{ - operand_type.fmt(mod), + operand_type.fmt(pt), }); } if (try sema.resolveValue(operand)) |val| { if (val.isUndef(mod)) { - return mod.undefRef(operand_type); + return pt.undefRef(operand_type); } else if (operand_type.zigTypeTag(mod) == .Vector) { const vec_len = try sema.usizeCast(block, operand_src, operand_type.vectorLen(mod)); const elems = try sema.arena.alloc(InternPool.Index, vec_len); for (elems, 0..) |*elem, i| { - const elem_val = try val.elemValue(mod, i); - elem.* = (try elem_val.bitwiseNot(scalar_type, sema.arena, mod)).toIntern(); + const elem_val = try val.elemValue(pt, i); + elem.* = (try elem_val.bitwiseNot(scalar_type, sema.arena, pt)).toIntern(); } - return Air.internedToRef((try mod.intern(.{ .aggregate = .{ + return Air.internedToRef((try pt.intern(.{ .aggregate = .{ .ty = operand_type.toIntern(), .storage = .{ .elems = elems }, } }))); } else { - const result_val = try val.bitwiseNot(operand_type, sema.arena, mod); + const result_val = try val.bitwiseNot(operand_type, sema.arena, pt); return Air.internedToRef(result_val.toIntern()); } } @@ -14288,7 +14412,8 @@ fn analyzeTupleCat( lhs: Air.Inst.Ref, rhs: Air.Inst.Ref, ) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const lhs_ty = sema.typeOf(lhs); const rhs_ty = sema.typeOf(rhs); const src = block.nodeOffset(src_node); @@ -14344,14 +14469,14 @@ fn analyzeTupleCat( break :rs runtime_src; }; - const tuple_ty = try mod.intern_pool.getAnonStructType(mod.gpa, .{ + const tuple_ty = try mod.intern_pool.getAnonStructType(mod.gpa, pt.tid, .{ .types = types, .values = values, .names = &.{}, }); const runtime_src = opt_runtime_src orelse { - const tuple_val = try mod.intern(.{ .aggregate = .{ + const tuple_val = try pt.intern(.{ .aggregate = .{ .ty = tuple_ty, .storage = .{ .elems = values }, } }); @@ -14386,7 +14511,8 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const tracy = trace(@src()); defer tracy.end(); - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const lhs = try sema.resolveInst(extra.lhs); @@ -14406,11 +14532,11 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const lhs_info = try sema.getArrayCatInfo(block, lhs_src, lhs, rhs_ty) orelse lhs_info: { if (lhs_is_tuple) break :lhs_info @as(Type.ArrayInfo, undefined); - return sema.fail(block, lhs_src, "expected indexable; found '{}'", .{lhs_ty.fmt(mod)}); + return sema.fail(block, lhs_src, "expected indexable; found '{}'", .{lhs_ty.fmt(pt)}); }; const rhs_info = try sema.getArrayCatInfo(block, rhs_src, rhs, lhs_ty) orelse { assert(!rhs_is_tuple); - return sema.fail(block, rhs_src, "expected indexable; found '{}'", .{rhs_ty.fmt(mod)}); + return sema.fail(block, rhs_src, "expected indexable; found '{}'", .{rhs_ty.fmt(pt)}); }; const resolved_elem_ty = t: { @@ -14472,7 +14598,7 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai ), }; - const result_ty = try mod.arrayType(.{ + const result_ty = try pt.arrayType(.{ .len = result_len, .sentinel = if (res_sent_val) |v| v.toIntern() else .none, .child = resolved_elem_ty.toIntern(), @@ -14512,7 +14638,7 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai while (elem_i < lhs_len) : (elem_i += 1) { const lhs_elem_i = elem_i; const elem_default_val = if (lhs_is_tuple) lhs_ty.structFieldDefaultValue(lhs_elem_i, mod) else Value.@"unreachable"; - const elem_val = if (elem_default_val.toIntern() == .unreachable_value) try lhs_sub_val.elemValue(mod, lhs_elem_i) else elem_default_val; + const elem_val = if (elem_default_val.toIntern() == .unreachable_value) try lhs_sub_val.elemValue(pt, lhs_elem_i) else elem_default_val; const elem_val_inst = Air.internedToRef(elem_val.toIntern()); const operand_src = block.src(.{ .array_cat_lhs = .{ .array_cat_offset = inst_data.src_node, @@ -14525,7 +14651,7 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai while (elem_i < result_len) : (elem_i += 1) { const rhs_elem_i = elem_i - lhs_len; const elem_default_val = if (rhs_is_tuple) rhs_ty.structFieldDefaultValue(rhs_elem_i, mod) else Value.@"unreachable"; - const elem_val = if (elem_default_val.toIntern() == .unreachable_value) try rhs_sub_val.elemValue(mod, rhs_elem_i) else elem_default_val; + const elem_val = if (elem_default_val.toIntern() == .unreachable_value) try rhs_sub_val.elemValue(pt, rhs_elem_i) else elem_default_val; const elem_val_inst = Air.internedToRef(elem_val.toIntern()); const operand_src = block.src(.{ .array_cat_rhs = .{ .array_cat_offset = inst_data.src_node, @@ -14535,7 +14661,7 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const coerced_elem_val = try sema.resolveConstValue(block, operand_src, coerced_elem_val_inst, undefined); element_vals[elem_i] = coerced_elem_val.toIntern(); } - return sema.addConstantMaybeRef(try mod.intern(.{ .aggregate = .{ + return sema.addConstantMaybeRef(try pt.intern(.{ .aggregate = .{ .ty = result_ty.toIntern(), .storage = .{ .elems = element_vals }, } }), ptr_addrspace != null); @@ -14545,19 +14671,19 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai try sema.requireRuntimeBlock(block, src, runtime_src); if (ptr_addrspace) |ptr_as| { - const alloc_ty = try mod.ptrTypeSema(.{ + const alloc_ty = try pt.ptrTypeSema(.{ .child = result_ty.toIntern(), .flags = .{ .address_space = ptr_as }, }); const alloc = try block.addTy(.alloc, alloc_ty); - const elem_ptr_ty = try mod.ptrTypeSema(.{ + const elem_ptr_ty = try pt.ptrTypeSema(.{ .child = resolved_elem_ty.toIntern(), .flags = .{ .address_space = ptr_as }, }); var elem_i: u32 = 0; while (elem_i < lhs_len) : (elem_i += 1) { - const elem_index = try mod.intRef(Type.usize, elem_i); + const elem_index = try pt.intRef(Type.usize, elem_i); const elem_ptr = try block.addPtrElemPtr(alloc, elem_index, elem_ptr_ty); const operand_src = block.src(.{ .array_cat_lhs = .{ .array_cat_offset = inst_data.src_node, @@ -14568,8 +14694,8 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } while (elem_i < result_len) : (elem_i += 1) { const rhs_elem_i = elem_i - lhs_len; - const elem_index = try mod.intRef(Type.usize, elem_i); - const rhs_index = try mod.intRef(Type.usize, rhs_elem_i); + const elem_index = try pt.intRef(Type.usize, elem_i); + const rhs_index = try pt.intRef(Type.usize, rhs_elem_i); const elem_ptr = try block.addPtrElemPtr(alloc, elem_index, elem_ptr_ty); const operand_src = block.src(.{ .array_cat_rhs = .{ .array_cat_offset = inst_data.src_node, @@ -14579,9 +14705,9 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai try sema.storePtr2(block, src, elem_ptr, src, init, operand_src, .store); } if (res_sent_val) |sent_val| { - const elem_index = try mod.intRef(Type.usize, result_len); + const elem_index = try pt.intRef(Type.usize, result_len); const elem_ptr = try block.addPtrElemPtr(alloc, elem_index, elem_ptr_ty); - const init = Air.internedToRef((try mod.getCoerced(sent_val, lhs_info.elem_type)).toIntern()); + const init = Air.internedToRef((try pt.getCoerced(sent_val, lhs_info.elem_type)).toIntern()); try sema.storePtr2(block, src, elem_ptr, src, init, lhs_src, .store); } @@ -14592,7 +14718,7 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai { var elem_i: u32 = 0; while (elem_i < lhs_len) : (elem_i += 1) { - const index = try mod.intRef(Type.usize, elem_i); + const index = try pt.intRef(Type.usize, elem_i); const operand_src = block.src(.{ .array_cat_lhs = .{ .array_cat_offset = inst_data.src_node, .elem_index = elem_i, @@ -14602,7 +14728,7 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } while (elem_i < result_len) : (elem_i += 1) { const rhs_elem_i = elem_i - lhs_len; - const index = try mod.intRef(Type.usize, rhs_elem_i); + const index = try pt.intRef(Type.usize, rhs_elem_i); const operand_src = block.src(.{ .array_cat_rhs = .{ .array_cat_offset = inst_data.src_node, .elem_index = @intCast(rhs_elem_i), @@ -14616,7 +14742,8 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } fn getArrayCatInfo(sema: *Sema, block: *Block, src: LazySrcLoc, operand: Air.Inst.Ref, peer_ty: Type) !?Type.ArrayInfo { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const operand_ty = sema.typeOf(operand); switch (operand_ty.zigTypeTag(mod)) { .Array => return operand_ty.arrayInfo(mod), @@ -14633,7 +14760,7 @@ fn getArrayCatInfo(sema: *Sema, block: *Block, src: LazySrcLoc, operand: Air.Ins .none => null, else => Value.fromInterned(ptr_info.sentinel), }, - .len = try val.sliceLen(mod), + .len = try val.sliceLen(pt), }; }, .One => { @@ -14666,7 +14793,8 @@ fn analyzeTupleMul( operand: Air.Inst.Ref, factor: usize, ) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const operand_ty = sema.typeOf(operand); const src = block.nodeOffset(src_node); const len_src = block.src(.{ .node_offset_bin_rhs = src_node }); @@ -14702,14 +14830,14 @@ fn analyzeTupleMul( break :rs runtime_src; }; - const tuple_ty = try mod.intern_pool.getAnonStructType(mod.gpa, .{ + const tuple_ty = try mod.intern_pool.getAnonStructType(mod.gpa, pt.tid, .{ .types = types, .values = values, .names = &.{}, }); const runtime_src = opt_runtime_src orelse { - const tuple_val = try mod.intern(.{ .aggregate = .{ + const tuple_val = try pt.intern(.{ .aggregate = .{ .ty = tuple_ty, .storage = .{ .elems = values }, } }); @@ -14739,7 +14867,8 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const tracy = trace(@src()); defer tracy.end(); - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const extra = sema.code.extraData(Zir.Inst.ArrayMul, inst_data.payload_index).data; const uncoerced_lhs = try sema.resolveInst(extra.lhs); @@ -14762,12 +14891,12 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const lhs_len = uncoerced_lhs_ty.structFieldCount(mod); const lhs_dest_ty = switch (res_ty.zigTypeTag(mod)) { else => break :no_coerce, - .Array => try mod.arrayType(.{ + .Array => try pt.arrayType(.{ .child = res_ty.childType(mod).toIntern(), .len = lhs_len, .sentinel = if (res_ty.sentinel(mod)) |s| s.toIntern() else .none, }), - .Vector => try mod.vectorType(.{ + .Vector => try pt.vectorType(.{ .child = res_ty.childType(mod).toIntern(), .len = lhs_len, }), @@ -14796,7 +14925,7 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // Analyze the lhs first, to catch the case that someone tried to do exponentiation const lhs_info = try sema.getArrayCatInfo(block, lhs_src, lhs, lhs_ty) orelse { const msg = msg: { - const msg = try sema.errMsg(lhs_src, "expected indexable; found '{}'", .{lhs_ty.fmt(mod)}); + const msg = try sema.errMsg(lhs_src, "expected indexable; found '{}'", .{lhs_ty.fmt(pt)}); errdefer msg.destroy(sema.gpa); switch (lhs_ty.zigTypeTag(mod)) { .Int, .Float, .ComptimeFloat, .ComptimeInt, .Vector => { @@ -14818,7 +14947,7 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai return sema.fail(block, rhs_src, "operation results in overflow", .{}); const result_len = try sema.usizeCast(block, src, result_len_u64); - const result_ty = try mod.arrayType(.{ + const result_ty = try pt.arrayType(.{ .len = result_len, .sentinel = if (lhs_info.sentinel) |s| s.toIntern() else .none, .child = lhs_info.elem_type.toIntern(), @@ -14839,8 +14968,8 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // Optimization for the common pattern of a single element repeated N times, such // as zero-filling a byte array. if (lhs_len == 1 and lhs_info.sentinel == null) { - const elem_val = try lhs_sub_val.elemValue(mod, 0); - break :v try mod.intern(.{ .aggregate = .{ + const elem_val = try lhs_sub_val.elemValue(pt, 0); + break :v try pt.intern(.{ .aggregate = .{ .ty = result_ty.toIntern(), .storage = .{ .repeated_elem = elem_val.toIntern() }, } }); @@ -14851,12 +14980,12 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai while (elem_i < result_len) { var lhs_i: usize = 0; while (lhs_i < lhs_len) : (lhs_i += 1) { - const elem_val = try lhs_sub_val.elemValue(mod, lhs_i); + const elem_val = try lhs_sub_val.elemValue(pt, lhs_i); element_vals[elem_i] = elem_val.toIntern(); elem_i += 1; } } - break :v try mod.intern(.{ .aggregate = .{ + break :v try pt.intern(.{ .aggregate = .{ .ty = result_ty.toIntern(), .storage = .{ .elems = element_vals }, } }); @@ -14870,17 +14999,17 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // to get the same elem values. const lhs_vals = try sema.arena.alloc(Air.Inst.Ref, lhs_len); for (lhs_vals, 0..) |*lhs_val, idx| { - const idx_ref = try mod.intRef(Type.usize, idx); + const idx_ref = try pt.intRef(Type.usize, idx); lhs_val.* = try sema.elemVal(block, lhs_src, lhs, idx_ref, src, false); } if (ptr_addrspace) |ptr_as| { - const alloc_ty = try mod.ptrTypeSema(.{ + const alloc_ty = try pt.ptrTypeSema(.{ .child = result_ty.toIntern(), .flags = .{ .address_space = ptr_as }, }); const alloc = try block.addTy(.alloc, alloc_ty); - const elem_ptr_ty = try mod.ptrTypeSema(.{ + const elem_ptr_ty = try pt.ptrTypeSema(.{ .child = lhs_info.elem_type.toIntern(), .flags = .{ .address_space = ptr_as }, }); @@ -14888,14 +15017,14 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai var elem_i: usize = 0; while (elem_i < result_len) { for (lhs_vals) |lhs_val| { - const elem_index = try mod.intRef(Type.usize, elem_i); + const elem_index = try pt.intRef(Type.usize, elem_i); const elem_ptr = try block.addPtrElemPtr(alloc, elem_index, elem_ptr_ty); try sema.storePtr2(block, src, elem_ptr, src, lhs_val, lhs_src, .store); elem_i += 1; } } if (lhs_info.sentinel) |sent_val| { - const elem_index = try mod.intRef(Type.usize, result_len); + const elem_index = try pt.intRef(Type.usize, result_len); const elem_ptr = try block.addPtrElemPtr(alloc, elem_index, elem_ptr_ty); const init = Air.internedToRef(sent_val.toIntern()); try sema.storePtr2(block, src, elem_ptr, src, init, lhs_src, .store); @@ -14912,7 +15041,8 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } fn zirNegate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node; const src = block.nodeOffset(inst_data.src_node); const lhs_src = src; @@ -14926,25 +15056,26 @@ fn zirNegate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. .Int, .ComptimeInt, .Float, .ComptimeFloat => false, else => true, }) { - return sema.fail(block, src, "negation of type '{}'", .{rhs_ty.fmt(mod)}); + return sema.fail(block, src, "negation of type '{}'", .{rhs_ty.fmt(pt)}); } if (rhs_scalar_ty.isAnyFloat()) { // We handle float negation here to ensure negative zero is represented in the bits. if (try sema.resolveValue(rhs)) |rhs_val| { - if (rhs_val.isUndef(mod)) return mod.undefRef(rhs_ty); - return Air.internedToRef((try rhs_val.floatNeg(rhs_ty, sema.arena, mod)).toIntern()); + if (rhs_val.isUndef(mod)) return pt.undefRef(rhs_ty); + return Air.internedToRef((try rhs_val.floatNeg(rhs_ty, sema.arena, pt)).toIntern()); } try sema.requireRuntimeBlock(block, src, null); return block.addUnOp(if (block.float_mode == .optimized) .neg_optimized else .neg, rhs); } - const lhs = Air.internedToRef((try sema.splat(rhs_ty, try mod.intValue(rhs_scalar_ty, 0))).toIntern()); + const lhs = Air.internedToRef((try sema.splat(rhs_ty, try pt.intValue(rhs_scalar_ty, 0))).toIntern()); return sema.analyzeArithmetic(block, .sub, lhs, rhs, src, lhs_src, rhs_src, true); } fn zirNegateWrap(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node; const src = block.nodeOffset(inst_data.src_node); const lhs_src = src; @@ -14956,10 +15087,10 @@ fn zirNegateWrap(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! switch (rhs_scalar_ty.zigTypeTag(mod)) { .Int, .ComptimeInt, .Float, .ComptimeFloat => {}, - else => return sema.fail(block, src, "negation of type '{}'", .{rhs_ty.fmt(mod)}), + else => return sema.fail(block, src, "negation of type '{}'", .{rhs_ty.fmt(pt)}), } - const lhs = Air.internedToRef((try sema.splat(rhs_ty, try mod.intValue(rhs_scalar_ty, 0))).toIntern()); + const lhs = Air.internedToRef((try sema.splat(rhs_ty, try pt.intValue(rhs_scalar_ty, 0))).toIntern()); return sema.analyzeArithmetic(block, .subwrap, lhs, rhs, src, lhs_src, rhs_src, true); } @@ -14985,7 +15116,8 @@ fn zirArithmetic( } fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const src = block.src(.{ .node_offset_bin_op = inst_data.src_node }); const lhs_src = block.src(.{ .node_offset_bin_lhs = inst_data.src_node }); @@ -15026,13 +15158,13 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins // If lhs % rhs is 0, it doesn't matter. const lhs_val = maybe_lhs_val orelse unreachable; const rhs_val = maybe_rhs_val orelse unreachable; - const rem = lhs_val.floatRem(rhs_val, resolved_type, sema.arena, mod) catch unreachable; - if (!rem.compareAllWithZero(.eq, mod)) { + const rem = lhs_val.floatRem(rhs_val, resolved_type, sema.arena, pt) catch unreachable; + if (!rem.compareAllWithZero(.eq, pt)) { return sema.fail( block, src, "ambiguous coercion of division operands '{}' and '{}'; non-zero remainder '{}'", - .{ lhs_ty.fmt(mod), rhs_ty.fmt(mod), rem.fmtValue(mod, sema) }, + .{ lhs_ty.fmt(pt), rhs_ty.fmt(pt), rem.fmtValue(pt, sema) }, ); } } @@ -15068,10 +15200,10 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins .Int, .ComptimeInt, .ComptimeFloat => { if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef(mod)) { - if (try lhs_val.compareAllWithZeroSema(.eq, mod)) { + if (try lhs_val.compareAllWithZeroSema(.eq, pt)) { const scalar_zero = switch (scalar_tag) { - .ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0.0), - .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0), + .ComptimeFloat, .Float => try pt.floatValue(resolved_type.scalarType(mod), 0.0), + .ComptimeInt, .Int => try pt.intValue(resolved_type.scalarType(mod), 0), else => unreachable, }; const zero_val = try sema.splat(resolved_type, scalar_zero); @@ -15083,7 +15215,7 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins if (rhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, rhs_src); } - if (!(try rhs_val.compareAllWithZeroSema(.neq, mod))) { + if (!(try rhs_val.compareAllWithZeroSema(.neq, pt))) { return sema.failWithDivideByZero(block, rhs_src); } // TODO: if the RHS is one, return the LHS directly @@ -15097,25 +15229,25 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins if (lhs_val.isUndef(mod)) { if (lhs_scalar_ty.isSignedInt(mod) and rhs_scalar_ty.isSignedInt(mod)) { if (maybe_rhs_val) |rhs_val| { - if (try sema.compareAll(rhs_val, .neq, try mod.intValue(resolved_type, -1), resolved_type)) { - return mod.undefRef(resolved_type); + if (try sema.compareAll(rhs_val, .neq, try pt.intValue(resolved_type, -1), resolved_type)) { + return pt.undefRef(resolved_type); } } return sema.failWithUseOfUndef(block, rhs_src); } - return mod.undefRef(resolved_type); + return pt.undefRef(resolved_type); } if (maybe_rhs_val) |rhs_val| { if (is_int) { var overflow_idx: ?usize = null; - const res = try lhs_val.intDiv(rhs_val, resolved_type, &overflow_idx, sema.arena, mod); + const res = try lhs_val.intDiv(rhs_val, resolved_type, &overflow_idx, sema.arena, pt); if (overflow_idx) |vec_idx| { return sema.failWithIntegerOverflow(block, src, resolved_type, res, vec_idx); } return Air.internedToRef(res.toIntern()); } else { - return Air.internedToRef((try lhs_val.floatDiv(rhs_val, resolved_type, sema.arena, mod)).toIntern()); + return Air.internedToRef((try lhs_val.floatDiv(rhs_val, resolved_type, sema.arena, pt)).toIntern()); } } else { break :rs rhs_src; @@ -15138,7 +15270,7 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins block, src, "division with '{}' and '{}': signed integers must use @divTrunc, @divFloor, or @divExact", - .{ lhs_ty.fmt(mod), rhs_ty.fmt(mod) }, + .{ lhs_ty.fmt(pt), rhs_ty.fmt(pt) }, ); } break :blk Air.Inst.Tag.div_trunc; @@ -15150,7 +15282,8 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins } fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const src = block.src(.{ .node_offset_bin_op = inst_data.src_node }); const lhs_src = block.src(.{ .node_offset_bin_lhs = inst_data.src_node }); @@ -15204,10 +15337,10 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai if (lhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, rhs_src); } else { - if (try lhs_val.compareAllWithZeroSema(.eq, mod)) { + if (try lhs_val.compareAllWithZeroSema(.eq, pt)) { const scalar_zero = switch (scalar_tag) { - .ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0.0), - .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0), + .ComptimeFloat, .Float => try pt.floatValue(resolved_type.scalarType(mod), 0.0), + .ComptimeInt, .Int => try pt.intValue(resolved_type.scalarType(mod), 0), else => unreachable, }; const zero_val = try sema.splat(resolved_type, scalar_zero); @@ -15219,7 +15352,7 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai if (rhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, rhs_src); } - if (!(try rhs_val.compareAllWithZeroSema(.neq, mod))) { + if (!(try rhs_val.compareAllWithZeroSema(.neq, pt))) { return sema.failWithDivideByZero(block, rhs_src); } // TODO: if the RHS is one, return the LHS directly @@ -15227,22 +15360,22 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai if (maybe_lhs_val) |lhs_val| { if (maybe_rhs_val) |rhs_val| { if (is_int) { - const modulus_val = try lhs_val.intMod(rhs_val, resolved_type, sema.arena, mod); - if (!(modulus_val.compareAllWithZero(.eq, mod))) { + const modulus_val = try lhs_val.intMod(rhs_val, resolved_type, sema.arena, pt); + if (!(modulus_val.compareAllWithZero(.eq, pt))) { return sema.fail(block, src, "exact division produced remainder", .{}); } var overflow_idx: ?usize = null; - const res = try lhs_val.intDiv(rhs_val, resolved_type, &overflow_idx, sema.arena, mod); + const res = try lhs_val.intDiv(rhs_val, resolved_type, &overflow_idx, sema.arena, pt); if (overflow_idx) |vec_idx| { return sema.failWithIntegerOverflow(block, src, resolved_type, res, vec_idx); } return Air.internedToRef(res.toIntern()); } else { - const modulus_val = try lhs_val.floatMod(rhs_val, resolved_type, sema.arena, mod); - if (!(modulus_val.compareAllWithZero(.eq, mod))) { + const modulus_val = try lhs_val.floatMod(rhs_val, resolved_type, sema.arena, pt); + if (!(modulus_val.compareAllWithZero(.eq, pt))) { return sema.fail(block, src, "exact division produced remainder", .{}); } - return Air.internedToRef((try lhs_val.floatDiv(rhs_val, resolved_type, sema.arena, mod)).toIntern()); + return Air.internedToRef((try lhs_val.floatDiv(rhs_val, resolved_type, sema.arena, pt)).toIntern()); } } else break :rs rhs_src; } else break :rs lhs_src; @@ -15286,8 +15419,8 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const remainder = try block.addBinOp(.rem, casted_lhs, casted_rhs); const scalar_zero = switch (scalar_tag) { - .ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0.0), - .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0), + .ComptimeFloat, .Float => try pt.floatValue(resolved_type.scalarType(mod), 0.0), + .ComptimeInt, .Int => try pt.intValue(resolved_type.scalarType(mod), 0), else => unreachable, }; if (resolved_type.zigTypeTag(mod) == .Vector) { @@ -15315,7 +15448,8 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } fn zirDivFloor(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const src = block.src(.{ .node_offset_bin_op = inst_data.src_node }); const lhs_src = block.src(.{ .node_offset_bin_lhs = inst_data.src_node }); @@ -15371,10 +15505,10 @@ fn zirDivFloor(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // If the lhs is undefined, result is undefined. if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef(mod)) { - if (try lhs_val.compareAllWithZeroSema(.eq, mod)) { + if (try lhs_val.compareAllWithZeroSema(.eq, pt)) { const scalar_zero = switch (scalar_tag) { - .ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0.0), - .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0), + .ComptimeFloat, .Float => try pt.floatValue(resolved_type.scalarType(mod), 0.0), + .ComptimeInt, .Int => try pt.intValue(resolved_type.scalarType(mod), 0), else => unreachable, }; const zero_val = try sema.splat(resolved_type, scalar_zero); @@ -15386,7 +15520,7 @@ fn zirDivFloor(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai if (rhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, rhs_src); } - if (!(try rhs_val.compareAllWithZeroSema(.neq, mod))) { + if (!(try rhs_val.compareAllWithZeroSema(.neq, pt))) { return sema.failWithDivideByZero(block, rhs_src); } // TODO: if the RHS is one, return the LHS directly @@ -15395,20 +15529,20 @@ fn zirDivFloor(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai if (lhs_val.isUndef(mod)) { if (lhs_scalar_ty.isSignedInt(mod) and rhs_scalar_ty.isSignedInt(mod)) { if (maybe_rhs_val) |rhs_val| { - if (try sema.compareAll(rhs_val, .neq, try mod.intValue(resolved_type, -1), resolved_type)) { - return mod.undefRef(resolved_type); + if (try sema.compareAll(rhs_val, .neq, try pt.intValue(resolved_type, -1), resolved_type)) { + return pt.undefRef(resolved_type); } } return sema.failWithUseOfUndef(block, rhs_src); } - return mod.undefRef(resolved_type); + return pt.undefRef(resolved_type); } if (maybe_rhs_val) |rhs_val| { if (is_int) { - return Air.internedToRef((try lhs_val.intDivFloor(rhs_val, resolved_type, sema.arena, mod)).toIntern()); + return Air.internedToRef((try lhs_val.intDivFloor(rhs_val, resolved_type, sema.arena, pt)).toIntern()); } else { - return Air.internedToRef((try lhs_val.floatDivFloor(rhs_val, resolved_type, sema.arena, mod)).toIntern()); + return Air.internedToRef((try lhs_val.floatDivFloor(rhs_val, resolved_type, sema.arena, pt)).toIntern()); } } else break :rs rhs_src; } else break :rs lhs_src; @@ -15425,7 +15559,8 @@ fn zirDivFloor(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } fn zirDivTrunc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const src = block.src(.{ .node_offset_bin_op = inst_data.src_node }); const lhs_src = block.src(.{ .node_offset_bin_lhs = inst_data.src_node }); @@ -15481,10 +15616,10 @@ fn zirDivTrunc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // If the lhs is undefined, result is undefined. if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef(mod)) { - if (try lhs_val.compareAllWithZeroSema(.eq, mod)) { + if (try lhs_val.compareAllWithZeroSema(.eq, pt)) { const scalar_zero = switch (scalar_tag) { - .ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0.0), - .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0), + .ComptimeFloat, .Float => try pt.floatValue(resolved_type.scalarType(mod), 0.0), + .ComptimeInt, .Int => try pt.intValue(resolved_type.scalarType(mod), 0), else => unreachable, }; const zero_val = try sema.splat(resolved_type, scalar_zero); @@ -15496,7 +15631,7 @@ fn zirDivTrunc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai if (rhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, rhs_src); } - if (!(try rhs_val.compareAllWithZeroSema(.neq, mod))) { + if (!(try rhs_val.compareAllWithZeroSema(.neq, pt))) { return sema.failWithDivideByZero(block, rhs_src); } } @@ -15504,25 +15639,25 @@ fn zirDivTrunc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai if (lhs_val.isUndef(mod)) { if (lhs_scalar_ty.isSignedInt(mod) and rhs_scalar_ty.isSignedInt(mod)) { if (maybe_rhs_val) |rhs_val| { - if (try sema.compareAll(rhs_val, .neq, try mod.intValue(resolved_type, -1), resolved_type)) { - return mod.undefRef(resolved_type); + if (try sema.compareAll(rhs_val, .neq, try pt.intValue(resolved_type, -1), resolved_type)) { + return pt.undefRef(resolved_type); } } return sema.failWithUseOfUndef(block, rhs_src); } - return mod.undefRef(resolved_type); + return pt.undefRef(resolved_type); } if (maybe_rhs_val) |rhs_val| { if (is_int) { var overflow_idx: ?usize = null; - const res = try lhs_val.intDiv(rhs_val, resolved_type, &overflow_idx, sema.arena, mod); + const res = try lhs_val.intDiv(rhs_val, resolved_type, &overflow_idx, sema.arena, pt); if (overflow_idx) |vec_idx| { return sema.failWithIntegerOverflow(block, src, resolved_type, res, vec_idx); } return Air.internedToRef(res.toIntern()); } else { - return Air.internedToRef((try lhs_val.floatDivTrunc(rhs_val, resolved_type, sema.arena, mod)).toIntern()); + return Air.internedToRef((try lhs_val.floatDivTrunc(rhs_val, resolved_type, sema.arena, pt)).toIntern()); } } else break :rs rhs_src; } else break :rs lhs_src; @@ -15550,7 +15685,8 @@ fn addDivIntOverflowSafety( casted_rhs: Air.Inst.Ref, is_int: bool, ) CompileError!void { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; if (!is_int) return; // If the LHS is unsigned, it cannot cause overflow. @@ -15561,19 +15697,19 @@ fn addDivIntOverflowSafety( return; } - const min_int = try resolved_type.minInt(mod, resolved_type); - const neg_one_scalar = try mod.intValue(lhs_scalar_ty, -1); + const min_int = try resolved_type.minInt(pt, resolved_type); + const neg_one_scalar = try pt.intValue(lhs_scalar_ty, -1); const neg_one = try sema.splat(resolved_type, neg_one_scalar); // If the LHS is comptime-known to be not equal to the min int, // no overflow is possible. if (maybe_lhs_val) |lhs_val| { - if (try lhs_val.compareAll(.neq, min_int, resolved_type, mod)) return; + if (try lhs_val.compareAll(.neq, min_int, resolved_type, pt)) return; } // If the RHS is comptime-known to not be equal to -1, no overflow is possible. if (maybe_rhs_val) |rhs_val| { - if (try rhs_val.compareAll(.neq, neg_one, resolved_type, mod)) return; + if (try rhs_val.compareAll(.neq, neg_one, resolved_type, pt)) return; } var ok: Air.Inst.Ref = .none; @@ -15634,11 +15770,12 @@ fn addDivByZeroSafety( // emitted above. if (maybe_rhs_val != null) return; - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const scalar_zero = if (is_int) - try mod.intValue(resolved_type.scalarType(mod), 0) + try pt.intValue(resolved_type.scalarType(mod), 0) else - try mod.floatValue(resolved_type.scalarType(mod), 0.0); + try pt.floatValue(resolved_type.scalarType(mod), 0.0); const ok = if (resolved_type.zigTypeTag(mod) == .Vector) ok: { const zero_val = try sema.splat(resolved_type, scalar_zero); const zero = Air.internedToRef(zero_val.toIntern()); @@ -15666,7 +15803,8 @@ fn airTag(block: *Block, is_int: bool, normal: Air.Inst.Tag, optimized: Air.Inst } fn zirModRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const src = block.src(.{ .node_offset_bin_op = inst_data.src_node }); const lhs_src = block.src(.{ .node_offset_bin_lhs = inst_data.src_node }); @@ -15721,16 +15859,16 @@ fn zirModRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. if (lhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, lhs_src); } - if (try lhs_val.compareAllWithZeroSema(.eq, mod)) { + if (try lhs_val.compareAllWithZeroSema(.eq, pt)) { const scalar_zero = switch (scalar_tag) { - .ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0.0), - .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0), + .ComptimeFloat, .Float => try pt.floatValue(resolved_type.scalarType(mod), 0.0), + .ComptimeInt, .Int => try pt.intValue(resolved_type.scalarType(mod), 0), else => unreachable, }; - const zero_val = if (is_vector) Value.fromInterned((try mod.intern(.{ .aggregate = .{ + const zero_val = if (is_vector) Value.fromInterned(try pt.intern(.{ .aggregate = .{ .ty = resolved_type.toIntern(), .storage = .{ .repeated_elem = scalar_zero.toIntern() }, - } }))) else scalar_zero; + } })) else scalar_zero; return Air.internedToRef(zero_val.toIntern()); } } else if (lhs_scalar_ty.isSignedInt(mod)) { @@ -15740,18 +15878,18 @@ fn zirModRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. if (rhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, rhs_src); } - if (!(try rhs_val.compareAllWithZeroSema(.neq, mod))) { + if (!(try rhs_val.compareAllWithZeroSema(.neq, pt))) { return sema.failWithDivideByZero(block, rhs_src); } - if (!(try rhs_val.compareAllWithZeroSema(.gte, mod))) { + if (!(try rhs_val.compareAllWithZeroSema(.gte, pt))) { return sema.failWithModRemNegative(block, rhs_src, lhs_ty, rhs_ty); } if (maybe_lhs_val) |lhs_val| { const rem_result = try sema.intRem(resolved_type, lhs_val, rhs_val); // If this answer could possibly be different by doing `intMod`, // we must emit a compile error. Otherwise, it's OK. - if (!(try lhs_val.compareAllWithZeroSema(.gte, mod)) and - !(try rem_result.compareAllWithZeroSema(.eq, mod))) + if (!(try lhs_val.compareAllWithZeroSema(.gte, pt)) and + !(try rem_result.compareAllWithZeroSema(.eq, pt))) { return sema.failWithModRemNegative(block, lhs_src, lhs_ty, rhs_ty); } @@ -15769,17 +15907,17 @@ fn zirModRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. if (rhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, rhs_src); } - if (!(try rhs_val.compareAllWithZeroSema(.neq, mod))) { + if (!(try rhs_val.compareAllWithZeroSema(.neq, pt))) { return sema.failWithDivideByZero(block, rhs_src); } - if (!(try rhs_val.compareAllWithZeroSema(.gte, mod))) { + if (!(try rhs_val.compareAllWithZeroSema(.gte, pt))) { return sema.failWithModRemNegative(block, rhs_src, lhs_ty, rhs_ty); } if (maybe_lhs_val) |lhs_val| { - if (lhs_val.isUndef(mod) or !(try lhs_val.compareAllWithZeroSema(.gte, mod))) { + if (lhs_val.isUndef(mod) or !(try lhs_val.compareAllWithZeroSema(.gte, pt))) { return sema.failWithModRemNegative(block, lhs_src, lhs_ty, rhs_ty); } - return Air.internedToRef((try lhs_val.floatRem(rhs_val, resolved_type, sema.arena, mod)).toIntern()); + return Air.internedToRef((try lhs_val.floatRem(rhs_val, resolved_type, sema.arena, pt)).toIntern()); } else { return sema.failWithModRemNegative(block, lhs_src, lhs_ty, rhs_ty); } @@ -15804,31 +15942,32 @@ fn intRem( lhs: Value, rhs: Value, ) CompileError!Value { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; if (ty.zigTypeTag(mod) == .Vector) { const result_data = try sema.arena.alloc(InternPool.Index, ty.vectorLen(mod)); const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { - const lhs_elem = try lhs.elemValue(mod, i); - const rhs_elem = try rhs.elemValue(mod, i); + const lhs_elem = try lhs.elemValue(pt, i); + const rhs_elem = try rhs.elemValue(pt, i); scalar.* = (try sema.intRemScalar(lhs_elem, rhs_elem, scalar_ty)).toIntern(); } - return Value.fromInterned((try mod.intern(.{ .aggregate = .{ + return Value.fromInterned(try pt.intern(.{ .aggregate = .{ .ty = ty.toIntern(), .storage = .{ .elems = result_data }, - } }))); + } })); } return sema.intRemScalar(lhs, rhs, ty); } fn intRemScalar(sema: *Sema, lhs: Value, rhs: Value, scalar_ty: Type) CompileError!Value { - const mod = sema.mod; + const pt = sema.pt; // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. var lhs_space: Value.BigIntSpace = undefined; var rhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, mod, .sema); - const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, mod, .sema); + const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, pt, .sema); + const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, pt, .sema); const limbs_q = try sema.arena.alloc( math.big.Limb, lhs_bigint.limbs.len, @@ -15846,11 +15985,12 @@ fn intRemScalar(sema: *Sema, lhs: Value, rhs: Value, scalar_ty: Type) CompileErr var result_q = math.big.int.Mutable{ .limbs = limbs_q, .positive = undefined, .len = undefined }; var result_r = math.big.int.Mutable{ .limbs = limbs_r, .positive = undefined, .len = undefined }; result_q.divTrunc(&result_r, lhs_bigint, rhs_bigint, limbs_buffer); - return mod.intValue_big(scalar_ty, result_r.toConst()); + return pt.intValue_big(scalar_ty, result_r.toConst()); } fn zirMod(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const src = block.src(.{ .node_offset_bin_op = inst_data.src_node }); const lhs_src = block.src(.{ .node_offset_bin_lhs = inst_data.src_node }); @@ -15904,11 +16044,11 @@ fn zirMod(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins if (rhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, rhs_src); } - if (!(try rhs_val.compareAllWithZeroSema(.neq, mod))) { + if (!(try rhs_val.compareAllWithZeroSema(.neq, pt))) { return sema.failWithDivideByZero(block, rhs_src); } if (maybe_lhs_val) |lhs_val| { - return Air.internedToRef((try lhs_val.intMod(rhs_val, resolved_type, sema.arena, mod)).toIntern()); + return Air.internedToRef((try lhs_val.intMod(rhs_val, resolved_type, sema.arena, pt)).toIntern()); } break :rs lhs_src; } else { @@ -15920,16 +16060,16 @@ fn zirMod(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins if (rhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, rhs_src); } - if (!(try rhs_val.compareAllWithZeroSema(.neq, mod))) { + if (!(try rhs_val.compareAllWithZeroSema(.neq, pt))) { return sema.failWithDivideByZero(block, rhs_src); } } if (maybe_lhs_val) |lhs_val| { if (lhs_val.isUndef(mod)) { - return mod.undefRef(resolved_type); + return pt.undefRef(resolved_type); } if (maybe_rhs_val) |rhs_val| { - return Air.internedToRef((try lhs_val.floatMod(rhs_val, resolved_type, sema.arena, mod)).toIntern()); + return Air.internedToRef((try lhs_val.floatMod(rhs_val, resolved_type, sema.arena, pt)).toIntern()); } else break :rs rhs_src; } else break :rs lhs_src; }; @@ -15945,7 +16085,8 @@ fn zirMod(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins } fn zirRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const src = block.src(.{ .node_offset_bin_op = inst_data.src_node }); const lhs_src = block.src(.{ .node_offset_bin_lhs = inst_data.src_node }); @@ -15999,7 +16140,7 @@ fn zirRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins if (rhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, rhs_src); } - if (!(try rhs_val.compareAllWithZeroSema(.neq, mod))) { + if (!(try rhs_val.compareAllWithZeroSema(.neq, pt))) { return sema.failWithDivideByZero(block, rhs_src); } if (maybe_lhs_val) |lhs_val| { @@ -16015,16 +16156,16 @@ fn zirRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins if (rhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, rhs_src); } - if (!(try rhs_val.compareAllWithZeroSema(.neq, mod))) { + if (!(try rhs_val.compareAllWithZeroSema(.neq, pt))) { return sema.failWithDivideByZero(block, rhs_src); } } if (maybe_lhs_val) |lhs_val| { if (lhs_val.isUndef(mod)) { - return mod.undefRef(resolved_type); + return pt.undefRef(resolved_type); } if (maybe_rhs_val) |rhs_val| { - return Air.internedToRef((try lhs_val.floatRem(rhs_val, resolved_type, sema.arena, mod)).toIntern()); + return Air.internedToRef((try lhs_val.floatRem(rhs_val, resolved_type, sema.arena, pt)).toIntern()); } else break :rs rhs_src; } else break :rs lhs_src; }; @@ -16059,7 +16200,8 @@ fn zirOverflowArithmetic( const lhs_ty = sema.typeOf(uncasted_lhs); const rhs_ty = sema.typeOf(uncasted_rhs); - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const ip = &mod.intern_pool; try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src); @@ -16081,7 +16223,7 @@ fn zirOverflowArithmetic( const rhs = try sema.coerce(block, rhs_dest_ty, uncasted_rhs, rhs_src); if (dest_ty.scalarType(mod).zigTypeTag(mod) != .Int) { - return sema.fail(block, src, "expected vector of integers or integer tag type, found '{}'", .{dest_ty.fmt(mod)}); + return sema.fail(block, src, "expected vector of integers or integer tag type, found '{}'", .{dest_ty.fmt(pt)}); } const maybe_lhs_val = try sema.resolveValue(lhs); @@ -16095,19 +16237,19 @@ fn zirOverflowArithmetic( wrapped: Value = Value.@"unreachable", overflow_bit: Value, } = result: { - const zero_bit = try mod.intValue(Type.u1, 0); + const zero_bit = try pt.intValue(Type.u1, 0); switch (zir_tag) { .add_with_overflow => { // If either of the arguments is zero, `false` is returned and the other is stored // to the result, even if it is undefined.. // Otherwise, if either of the argument is undefined, undefined is returned. if (maybe_lhs_val) |lhs_val| { - if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroSema(.eq, mod))) { + if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroSema(.eq, pt))) { break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = rhs }; } } if (maybe_rhs_val) |rhs_val| { - if (!rhs_val.isUndef(mod) and (try rhs_val.compareAllWithZeroSema(.eq, mod))) { + if (!rhs_val.isUndef(mod) and (try rhs_val.compareAllWithZeroSema(.eq, pt))) { break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = lhs }; } } @@ -16128,7 +16270,7 @@ fn zirOverflowArithmetic( if (maybe_rhs_val) |rhs_val| { if (rhs_val.isUndef(mod)) { break :result .{ .overflow_bit = Value.undef, .wrapped = Value.undef }; - } else if (try rhs_val.compareAllWithZeroSema(.eq, mod)) { + } else if (try rhs_val.compareAllWithZeroSema(.eq, pt)) { break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = lhs }; } else if (maybe_lhs_val) |lhs_val| { if (lhs_val.isUndef(mod)) { @@ -16144,10 +16286,10 @@ fn zirOverflowArithmetic( // If either of the arguments is zero, the result is zero and no overflow occured. // If either of the arguments is one, the result is the other and no overflow occured. // Otherwise, if either of the arguments is undefined, both results are undefined. - const scalar_one = try mod.intValue(dest_ty.scalarType(mod), 1); + const scalar_one = try pt.intValue(dest_ty.scalarType(mod), 1); if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef(mod)) { - if (try lhs_val.compareAllWithZeroSema(.eq, mod)) { + if (try lhs_val.compareAllWithZeroSema(.eq, pt)) { break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = lhs }; } else if (try sema.compareAll(lhs_val, .eq, try sema.splat(dest_ty, scalar_one), dest_ty)) { break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = rhs }; @@ -16157,7 +16299,7 @@ fn zirOverflowArithmetic( if (maybe_rhs_val) |rhs_val| { if (!rhs_val.isUndef(mod)) { - if (try rhs_val.compareAllWithZeroSema(.eq, mod)) { + if (try rhs_val.compareAllWithZeroSema(.eq, pt)) { break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = rhs }; } else if (try sema.compareAll(rhs_val, .eq, try sema.splat(dest_ty, scalar_one), dest_ty)) { break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = lhs }; @@ -16171,7 +16313,7 @@ fn zirOverflowArithmetic( break :result .{ .overflow_bit = Value.undef, .wrapped = Value.undef }; } - const result = try lhs_val.intMulWithOverflow(rhs_val, dest_ty, sema.arena, mod); + const result = try lhs_val.intMulWithOverflow(rhs_val, dest_ty, sema.arena, pt); break :result .{ .overflow_bit = result.overflow_bit, .wrapped = result.wrapped_result }; } } @@ -16181,12 +16323,12 @@ fn zirOverflowArithmetic( // If rhs is zero, the result is lhs (even if undefined) and no overflow occurred. // Oterhwise if either of the arguments is undefined, both results are undefined. if (maybe_lhs_val) |lhs_val| { - if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroSema(.eq, mod))) { + if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroSema(.eq, pt))) { break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = lhs }; } } if (maybe_rhs_val) |rhs_val| { - if (!rhs_val.isUndef(mod) and (try rhs_val.compareAllWithZeroSema(.eq, mod))) { + if (!rhs_val.isUndef(mod) and (try rhs_val.compareAllWithZeroSema(.eq, pt))) { break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = lhs }; } } @@ -16196,7 +16338,7 @@ fn zirOverflowArithmetic( break :result .{ .overflow_bit = Value.undef, .wrapped = Value.undef }; } - const result = try lhs_val.shlWithOverflow(rhs_val, dest_ty, sema.arena, mod); + const result = try lhs_val.shlWithOverflow(rhs_val, dest_ty, sema.arena, pt); break :result .{ .overflow_bit = result.overflow_bit, .wrapped = result.wrapped_result }; } } @@ -16235,7 +16377,7 @@ fn zirOverflowArithmetic( } if (result.inst == .none) { - return Air.internedToRef((try mod.intern(.{ .aggregate = .{ + return Air.internedToRef((try pt.intern(.{ .aggregate = .{ .ty = tuple_ty.toIntern(), .storage = .{ .elems = &.{ result.wrapped.toIntern(), @@ -16251,9 +16393,10 @@ fn zirOverflowArithmetic( } fn splat(sema: *Sema, ty: Type, val: Value) !Value { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; if (ty.zigTypeTag(mod) != .Vector) return val; - const repeated = try mod.intern(.{ .aggregate = .{ + const repeated = try pt.intern(.{ .aggregate = .{ .ty = ty.toIntern(), .storage = .{ .repeated_elem = val.toIntern() }, } }); @@ -16261,16 +16404,17 @@ fn splat(sema: *Sema, ty: Type, val: Value) !Value { } fn overflowArithmeticTupleType(sema: *Sema, ty: Type) !Type { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const ip = &mod.intern_pool; - const ov_ty = if (ty.zigTypeTag(mod) == .Vector) try mod.vectorType(.{ + const ov_ty = if (ty.zigTypeTag(mod) == .Vector) try pt.vectorType(.{ .len = ty.vectorLen(mod), .child = .u1_type, }) else Type.u1; const types = [2]InternPool.Index{ ty.toIntern(), ov_ty.toIntern() }; const values = [2]InternPool.Index{ .none, .none }; - const tuple_ty = try ip.getAnonStructType(mod.gpa, .{ + const tuple_ty = try ip.getAnonStructType(mod.gpa, pt.tid, .{ .types = &types, .values = &values, .names = &.{}, @@ -16290,7 +16434,8 @@ fn analyzeArithmetic( rhs_src: LazySrcLoc, want_safety: bool, ) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const lhs_ty = sema.typeOf(lhs); const rhs_ty = sema.typeOf(rhs); const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(mod); @@ -16337,7 +16482,7 @@ fn analyzeArithmetic( // overflow (max_int), causing illegal behavior. // For floats: either operand being undef makes the result undef. if (maybe_lhs_val) |lhs_val| { - if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroSema(.eq, mod))) { + if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroSema(.eq, pt))) { return casted_rhs; } } @@ -16346,10 +16491,10 @@ fn analyzeArithmetic( if (is_int) { return sema.failWithUseOfUndef(block, rhs_src); } else { - return mod.undefRef(resolved_type); + return pt.undefRef(resolved_type); } } - if (try rhs_val.compareAllWithZeroSema(.eq, mod)) { + if (try rhs_val.compareAllWithZeroSema(.eq, pt)) { return casted_lhs; } } @@ -16359,7 +16504,7 @@ fn analyzeArithmetic( if (is_int) { return sema.failWithUseOfUndef(block, lhs_src); } else { - return mod.undefRef(resolved_type); + return pt.undefRef(resolved_type); } } if (maybe_rhs_val) |rhs_val| { @@ -16371,7 +16516,7 @@ fn analyzeArithmetic( } return Air.internedToRef(sum.toIntern()); } else { - return Air.internedToRef((try Value.floatAdd(lhs_val, rhs_val, resolved_type, sema.arena, mod)).toIntern()); + return Air.internedToRef((try Value.floatAdd(lhs_val, rhs_val, resolved_type, sema.arena, pt)).toIntern()); } } else break :rs .{ rhs_src, air_tag, .add_safe }; } else break :rs .{ lhs_src, air_tag, .add_safe }; @@ -16381,15 +16526,15 @@ fn analyzeArithmetic( // If either of the operands are zero, the other operand is returned. // If either of the operands are undefined, the result is undefined. if (maybe_lhs_val) |lhs_val| { - if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroSema(.eq, mod))) { + if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroSema(.eq, pt))) { return casted_rhs; } } if (maybe_rhs_val) |rhs_val| { if (rhs_val.isUndef(mod)) { - return mod.undefRef(resolved_type); + return pt.undefRef(resolved_type); } - if (try rhs_val.compareAllWithZeroSema(.eq, mod)) { + if (try rhs_val.compareAllWithZeroSema(.eq, pt)) { return casted_lhs; } if (maybe_lhs_val) |lhs_val| { @@ -16402,26 +16547,26 @@ fn analyzeArithmetic( // If either of the operands are zero, then the other operand is returned. // If either of the operands are undefined, the result is undefined. if (maybe_lhs_val) |lhs_val| { - if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroSema(.eq, mod))) { + if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroSema(.eq, pt))) { return casted_rhs; } } if (maybe_rhs_val) |rhs_val| { if (rhs_val.isUndef(mod)) { - return mod.undefRef(resolved_type); + return pt.undefRef(resolved_type); } - if (try rhs_val.compareAllWithZeroSema(.eq, mod)) { + if (try rhs_val.compareAllWithZeroSema(.eq, pt)) { return casted_lhs; } if (maybe_lhs_val) |lhs_val| { if (lhs_val.isUndef(mod)) { - return mod.undefRef(resolved_type); + return pt.undefRef(resolved_type); } const val = if (scalar_tag == .ComptimeInt) try sema.intAdd(lhs_val, rhs_val, resolved_type, undefined) else - try lhs_val.intAddSat(rhs_val, resolved_type, sema.arena, mod); + try lhs_val.intAddSat(rhs_val, resolved_type, sema.arena, pt); return Air.internedToRef(val.toIntern()); } else break :rs .{ @@ -16448,10 +16593,10 @@ fn analyzeArithmetic( if (is_int) { return sema.failWithUseOfUndef(block, rhs_src); } else { - return mod.undefRef(resolved_type); + return pt.undefRef(resolved_type); } } - if (try rhs_val.compareAllWithZeroSema(.eq, mod)) { + if (try rhs_val.compareAllWithZeroSema(.eq, pt)) { return casted_lhs; } } @@ -16461,7 +16606,7 @@ fn analyzeArithmetic( if (is_int) { return sema.failWithUseOfUndef(block, lhs_src); } else { - return mod.undefRef(resolved_type); + return pt.undefRef(resolved_type); } } if (maybe_rhs_val) |rhs_val| { @@ -16473,7 +16618,7 @@ fn analyzeArithmetic( } return Air.internedToRef(diff.toIntern()); } else { - return Air.internedToRef((try Value.floatSub(lhs_val, rhs_val, resolved_type, sema.arena, mod)).toIntern()); + return Air.internedToRef((try Value.floatSub(lhs_val, rhs_val, resolved_type, sema.arena, pt)).toIntern()); } } else break :rs .{ rhs_src, air_tag, .sub_safe }; } else break :rs .{ lhs_src, air_tag, .sub_safe }; @@ -16484,15 +16629,15 @@ fn analyzeArithmetic( // If either of the operands are undefined, the result is undefined. if (maybe_rhs_val) |rhs_val| { if (rhs_val.isUndef(mod)) { - return mod.undefRef(resolved_type); + return pt.undefRef(resolved_type); } - if (try rhs_val.compareAllWithZeroSema(.eq, mod)) { + if (try rhs_val.compareAllWithZeroSema(.eq, pt)) { return casted_lhs; } } if (maybe_lhs_val) |lhs_val| { if (lhs_val.isUndef(mod)) { - return mod.undefRef(resolved_type); + return pt.undefRef(resolved_type); } if (maybe_rhs_val) |rhs_val| { return Air.internedToRef((try sema.numberSubWrapScalar(lhs_val, rhs_val, resolved_type)).toIntern()); @@ -16505,21 +16650,21 @@ fn analyzeArithmetic( // If either of the operands are undefined, the result is undefined. if (maybe_rhs_val) |rhs_val| { if (rhs_val.isUndef(mod)) { - return mod.undefRef(resolved_type); + return pt.undefRef(resolved_type); } - if (try rhs_val.compareAllWithZeroSema(.eq, mod)) { + if (try rhs_val.compareAllWithZeroSema(.eq, pt)) { return casted_lhs; } } if (maybe_lhs_val) |lhs_val| { if (lhs_val.isUndef(mod)) { - return mod.undefRef(resolved_type); + return pt.undefRef(resolved_type); } if (maybe_rhs_val) |rhs_val| { const val = if (scalar_tag == .ComptimeInt) try sema.intSub(lhs_val, rhs_val, resolved_type, undefined) else - try lhs_val.intSubSat(rhs_val, resolved_type, sema.arena, mod); + try lhs_val.intSubSat(rhs_val, resolved_type, sema.arena, pt); return Air.internedToRef(val.toIntern()); } else break :rs .{ rhs_src, .sub_sat, .sub_sat }; @@ -16540,13 +16685,13 @@ fn analyzeArithmetic( // the result is nan. // If either of the operands are nan, the result is nan. const scalar_zero = switch (scalar_tag) { - .ComptimeFloat, .Float => try mod.floatValue(scalar_type, 0.0), - .ComptimeInt, .Int => try mod.intValue(scalar_type, 0), + .ComptimeFloat, .Float => try pt.floatValue(scalar_type, 0.0), + .ComptimeInt, .Int => try pt.intValue(scalar_type, 0), else => unreachable, }; const scalar_one = switch (scalar_tag) { - .ComptimeFloat, .Float => try mod.floatValue(scalar_type, 1.0), - .ComptimeInt, .Int => try mod.intValue(scalar_type, 1), + .ComptimeFloat, .Float => try pt.floatValue(scalar_type, 1.0), + .ComptimeInt, .Int => try pt.intValue(scalar_type, 1), else => unreachable, }; if (maybe_lhs_val) |lhs_val| { @@ -16554,13 +16699,13 @@ fn analyzeArithmetic( if (lhs_val.isNan(mod)) { return Air.internedToRef(lhs_val.toIntern()); } - if (try lhs_val.compareAllWithZeroSema(.eq, mod)) lz: { + if (try lhs_val.compareAllWithZeroSema(.eq, pt)) lz: { if (maybe_rhs_val) |rhs_val| { if (rhs_val.isNan(mod)) { return Air.internedToRef(rhs_val.toIntern()); } if (rhs_val.isInf(mod)) { - return Air.internedToRef((try mod.floatValue(resolved_type, std.math.nan(f128))).toIntern()); + return Air.internedToRef((try pt.floatValue(resolved_type, std.math.nan(f128))).toIntern()); } } else if (resolved_type.isAnyFloat()) { break :lz; @@ -16579,16 +16724,16 @@ fn analyzeArithmetic( if (is_int) { return sema.failWithUseOfUndef(block, rhs_src); } else { - return mod.undefRef(resolved_type); + return pt.undefRef(resolved_type); } } if (rhs_val.isNan(mod)) { return Air.internedToRef(rhs_val.toIntern()); } - if (try rhs_val.compareAllWithZeroSema(.eq, mod)) rz: { + if (try rhs_val.compareAllWithZeroSema(.eq, pt)) rz: { if (maybe_lhs_val) |lhs_val| { if (lhs_val.isInf(mod)) { - return Air.internedToRef((try mod.floatValue(resolved_type, std.math.nan(f128))).toIntern()); + return Air.internedToRef((try pt.floatValue(resolved_type, std.math.nan(f128))).toIntern()); } } else if (resolved_type.isAnyFloat()) { break :rz; @@ -16604,18 +16749,18 @@ fn analyzeArithmetic( if (is_int) { return sema.failWithUseOfUndef(block, lhs_src); } else { - return mod.undefRef(resolved_type); + return pt.undefRef(resolved_type); } } if (is_int) { var overflow_idx: ?usize = null; - const product = try lhs_val.intMul(rhs_val, resolved_type, &overflow_idx, sema.arena, mod); + const product = try lhs_val.intMul(rhs_val, resolved_type, &overflow_idx, sema.arena, pt); if (overflow_idx) |vec_idx| { return sema.failWithIntegerOverflow(block, src, resolved_type, product, vec_idx); } return Air.internedToRef(product.toIntern()); } else { - return Air.internedToRef((try lhs_val.floatMul(rhs_val, resolved_type, sema.arena, mod)).toIntern()); + return Air.internedToRef((try lhs_val.floatMul(rhs_val, resolved_type, sema.arena, pt)).toIntern()); } } else break :rs .{ lhs_src, air_tag, .mul_safe }; } else break :rs .{ rhs_src, air_tag, .mul_safe }; @@ -16626,18 +16771,18 @@ fn analyzeArithmetic( // If either of the operands are one, result is the other operand. // If either of the operands are undefined, result is undefined. const scalar_zero = switch (scalar_tag) { - .ComptimeFloat, .Float => try mod.floatValue(scalar_type, 0.0), - .ComptimeInt, .Int => try mod.intValue(scalar_type, 0), + .ComptimeFloat, .Float => try pt.floatValue(scalar_type, 0.0), + .ComptimeInt, .Int => try pt.intValue(scalar_type, 0), else => unreachable, }; const scalar_one = switch (scalar_tag) { - .ComptimeFloat, .Float => try mod.floatValue(scalar_type, 1.0), - .ComptimeInt, .Int => try mod.intValue(scalar_type, 1), + .ComptimeFloat, .Float => try pt.floatValue(scalar_type, 1.0), + .ComptimeInt, .Int => try pt.intValue(scalar_type, 1), else => unreachable, }; if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef(mod)) { - if (try lhs_val.compareAllWithZeroSema(.eq, mod)) { + if (try lhs_val.compareAllWithZeroSema(.eq, pt)) { const zero_val = try sema.splat(resolved_type, scalar_zero); return Air.internedToRef(zero_val.toIntern()); } @@ -16648,9 +16793,9 @@ fn analyzeArithmetic( } if (maybe_rhs_val) |rhs_val| { if (rhs_val.isUndef(mod)) { - return mod.undefRef(resolved_type); + return pt.undefRef(resolved_type); } - if (try rhs_val.compareAllWithZeroSema(.eq, mod)) { + if (try rhs_val.compareAllWithZeroSema(.eq, pt)) { const zero_val = try sema.splat(resolved_type, scalar_zero); return Air.internedToRef(zero_val.toIntern()); } @@ -16659,9 +16804,9 @@ fn analyzeArithmetic( } if (maybe_lhs_val) |lhs_val| { if (lhs_val.isUndef(mod)) { - return mod.undefRef(resolved_type); + return pt.undefRef(resolved_type); } - return Air.internedToRef((try lhs_val.numberMulWrap(rhs_val, resolved_type, sema.arena, mod)).toIntern()); + return Air.internedToRef((try lhs_val.numberMulWrap(rhs_val, resolved_type, sema.arena, pt)).toIntern()); } else break :rs .{ lhs_src, .mul_wrap, .mul_wrap }; } else break :rs .{ rhs_src, .mul_wrap, .mul_wrap }; }, @@ -16671,18 +16816,18 @@ fn analyzeArithmetic( // If either of the operands are one, result is the other operand. // If either of the operands are undefined, result is undefined. const scalar_zero = switch (scalar_tag) { - .ComptimeFloat, .Float => try mod.floatValue(scalar_type, 0.0), - .ComptimeInt, .Int => try mod.intValue(scalar_type, 0), + .ComptimeFloat, .Float => try pt.floatValue(scalar_type, 0.0), + .ComptimeInt, .Int => try pt.intValue(scalar_type, 0), else => unreachable, }; const scalar_one = switch (scalar_tag) { - .ComptimeFloat, .Float => try mod.floatValue(scalar_type, 1.0), - .ComptimeInt, .Int => try mod.intValue(scalar_type, 1), + .ComptimeFloat, .Float => try pt.floatValue(scalar_type, 1.0), + .ComptimeInt, .Int => try pt.intValue(scalar_type, 1), else => unreachable, }; if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef(mod)) { - if (try lhs_val.compareAllWithZeroSema(.eq, mod)) { + if (try lhs_val.compareAllWithZeroSema(.eq, pt)) { const zero_val = try sema.splat(resolved_type, scalar_zero); return Air.internedToRef(zero_val.toIntern()); } @@ -16693,9 +16838,9 @@ fn analyzeArithmetic( } if (maybe_rhs_val) |rhs_val| { if (rhs_val.isUndef(mod)) { - return mod.undefRef(resolved_type); + return pt.undefRef(resolved_type); } - if (try rhs_val.compareAllWithZeroSema(.eq, mod)) { + if (try rhs_val.compareAllWithZeroSema(.eq, pt)) { const zero_val = try sema.splat(resolved_type, scalar_zero); return Air.internedToRef(zero_val.toIntern()); } @@ -16704,13 +16849,13 @@ fn analyzeArithmetic( } if (maybe_lhs_val) |lhs_val| { if (lhs_val.isUndef(mod)) { - return mod.undefRef(resolved_type); + return pt.undefRef(resolved_type); } const val = if (scalar_tag == .ComptimeInt) - try lhs_val.intMul(rhs_val, resolved_type, undefined, sema.arena, mod) + try lhs_val.intMul(rhs_val, resolved_type, undefined, sema.arena, pt) else - try lhs_val.intMulSat(rhs_val, resolved_type, sema.arena, mod); + try lhs_val.intMulSat(rhs_val, resolved_type, sema.arena, pt); return Air.internedToRef(val.toIntern()); } else break :rs .{ lhs_src, .mul_sat, .mul_sat }; @@ -16758,7 +16903,7 @@ fn analyzeArithmetic( }) else ov_bit; - const zero_ov = Air.internedToRef((try mod.intValue(Type.u1, 0)).toIntern()); + const zero_ov = Air.internedToRef((try pt.intValue(Type.u1, 0)).toIntern()); const no_ov = try block.addBinOp(.cmp_eq, any_ov_bit, zero_ov); try sema.addSafetyCheck(block, src, no_ov, .integer_overflow); @@ -16782,7 +16927,8 @@ fn analyzePtrArithmetic( // TODO if the operand is comptime-known to be negative, or is a negative int, // coerce to isize instead of usize. const offset = try sema.coerce(block, Type.usize, uncasted_offset, offset_src); - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const opt_ptr_val = try sema.resolveValue(ptr); const opt_off_val = try sema.resolveDefinedValue(block, offset_src, offset); const ptr_ty = sema.typeOf(ptr); @@ -16800,7 +16946,7 @@ fn analyzePtrArithmetic( // it being a multiple of the type size. const elem_size = try sema.typeAbiSize(Type.fromInterned(ptr_info.child)); const addend = if (opt_off_val) |off_val| a: { - const off_int = try sema.usizeCast(block, offset_src, try off_val.toUnsignedIntSema(mod)); + const off_int = try sema.usizeCast(block, offset_src, try off_val.toUnsignedIntSema(pt)); break :a elem_size * off_int; } else elem_size; @@ -16813,7 +16959,7 @@ fn analyzePtrArithmetic( )); assert(new_align != .none); - break :t try mod.ptrTypeSema(.{ + break :t try pt.ptrTypeSema(.{ .child = ptr_info.child, .sentinel = ptr_info.sentinel, .flags = .{ @@ -16830,16 +16976,16 @@ fn analyzePtrArithmetic( const runtime_src = rs: { if (opt_ptr_val) |ptr_val| { if (opt_off_val) |offset_val| { - if (ptr_val.isUndef(mod)) return mod.undefRef(new_ptr_ty); + if (ptr_val.isUndef(mod)) return pt.undefRef(new_ptr_ty); - const offset_int = try sema.usizeCast(block, offset_src, try offset_val.toUnsignedIntSema(mod)); + const offset_int = try sema.usizeCast(block, offset_src, try offset_val.toUnsignedIntSema(pt)); if (offset_int == 0) return ptr; if (air_tag == .ptr_sub) { const elem_size = try sema.typeAbiSize(Type.fromInterned(ptr_info.child)); const new_ptr_val = try sema.ptrSubtract(block, op_src, ptr_val, offset_int * elem_size, new_ptr_ty); return Air.internedToRef(new_ptr_val.toIntern()); } else { - const new_ptr_val = try mod.getCoerced(try ptr_val.ptrElem(offset_int, mod), new_ptr_ty); + const new_ptr_val = try pt.getCoerced(try ptr_val.ptrElem(offset_int, pt), new_ptr_ty); return Air.internedToRef(new_ptr_val.toIntern()); } } else break :rs offset_src; @@ -16879,6 +17025,8 @@ fn zirAsm( const tracy = trace(@src()); defer tracy.end(); + const pt = sema.pt; + const mod = pt.zcu; const extra = sema.code.extraData(Zir.Inst.Asm, extended.operand); const src = block.nodeOffset(extra.data.src_node); const ret_ty_src = block.src(.{ .node_offset_asm_ret_ty = extra.data.src_node }); @@ -16910,7 +17058,7 @@ fn zirAsm( if (is_volatile) { return sema.fail(block, src, "volatile keyword is redundant on module-level assembly", .{}); } - try sema.mod.addGlobalAssembly(sema.owner_decl_index, asm_source); + try mod.addGlobalAssembly(sema.owner_decl_index, asm_source); return .void_value; } @@ -16959,7 +17107,6 @@ fn zirAsm( const args = try sema.arena.alloc(Air.Inst.Ref, inputs_len); const inputs = try sema.arena.alloc(ConstraintName, inputs_len); - const mod = sema.mod; for (args, 0..) |*arg, arg_i| { const input = sema.code.extraData(Zir.Inst.Asm.Input, extra_i); @@ -17049,7 +17196,8 @@ fn zirCmpEq( const tracy = trace(@src()); defer tracy.end(); - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const src: LazySrcLoc = block.nodeOffset(inst_data.src_node); @@ -17077,7 +17225,7 @@ fn zirCmpEq( if (lhs_ty_tag == .Null or rhs_ty_tag == .Null) { const non_null_type = if (lhs_ty_tag == .Null) rhs_ty else lhs_ty; - return sema.fail(block, src, "comparison of '{}' with null", .{non_null_type.fmt(mod)}); + return sema.fail(block, src, "comparison of '{}' with null", .{non_null_type.fmt(pt)}); } if (lhs_ty_tag == .Union and (rhs_ty_tag == .EnumLiteral or rhs_ty_tag == .Enum)) { @@ -17092,7 +17240,7 @@ fn zirCmpEq( if (try sema.resolveValue(lhs)) |lval| { if (try sema.resolveValue(rhs)) |rval| { if (lval.isUndef(mod) or rval.isUndef(mod)) { - return mod.undefRef(Type.bool); + return pt.undefRef(Type.bool); } const lkey = mod.intern_pool.indexToKey(lval.toIntern()); const rkey = mod.intern_pool.indexToKey(rval.toIntern()); @@ -17128,14 +17276,15 @@ fn analyzeCmpUnionTag( tag_src: LazySrcLoc, op: std.math.CompareOperator, ) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const union_ty = sema.typeOf(un); - try union_ty.resolveFields(mod); + try union_ty.resolveFields(pt); const union_tag_ty = union_ty.unionTagType(mod) orelse { const msg = msg: { const msg = try sema.errMsg(un_src, "comparison of union and enum literal is only valid for tagged union types", .{}); errdefer msg.destroy(sema.gpa); - try sema.errNote(union_ty.srcLoc(mod), msg, "union '{}' is not a tagged union", .{union_ty.fmt(mod)}); + try sema.errNote(union_ty.srcLoc(mod), msg, "union '{}' is not a tagged union", .{union_ty.fmt(pt)}); break :msg msg; }; return sema.failWithOwnedErrorMsg(block, msg); @@ -17146,7 +17295,7 @@ fn analyzeCmpUnionTag( const coerced_union = try sema.coerce(block, union_tag_ty, un, un_src); if (try sema.resolveValue(coerced_tag)) |enum_val| { - if (enum_val.isUndef(mod)) return mod.undefRef(Type.bool); + if (enum_val.isUndef(mod)) return pt.undefRef(Type.bool); const field_ty = union_ty.unionFieldType(enum_val, mod).?; if (field_ty.zigTypeTag(mod) == .NoReturn) { return .bool_false; @@ -17187,7 +17336,8 @@ fn analyzeCmp( rhs_src: LazySrcLoc, is_equality_cmp: bool, ) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const lhs_ty = sema.typeOf(lhs); const rhs_ty = sema.typeOf(rhs); if (lhs_ty.zigTypeTag(mod) != .Optional and rhs_ty.zigTypeTag(mod) != .Optional) { @@ -17215,7 +17365,7 @@ fn analyzeCmp( const resolved_type = try sema.resolvePeerTypes(block, src, instructions, .{ .override = &[_]?LazySrcLoc{ lhs_src, rhs_src } }); if (!resolved_type.isSelfComparable(mod, is_equality_cmp)) { return sema.fail(block, src, "operator {s} not allowed for type '{}'", .{ - compareOperatorName(op), resolved_type.fmt(mod), + compareOperatorName(op), resolved_type.fmt(pt), }); } const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src); @@ -17244,13 +17394,14 @@ fn cmpSelf( lhs_src: LazySrcLoc, rhs_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const resolved_type = sema.typeOf(casted_lhs); const runtime_src: LazySrcLoc = src: { if (try sema.resolveValue(casted_lhs)) |lhs_val| { - if (lhs_val.isUndef(mod)) return mod.undefRef(Type.bool); + if (lhs_val.isUndef(mod)) return pt.undefRef(Type.bool); if (try sema.resolveValue(casted_rhs)) |rhs_val| { - if (rhs_val.isUndef(mod)) return mod.undefRef(Type.bool); + if (rhs_val.isUndef(mod)) return pt.undefRef(Type.bool); if (resolved_type.zigTypeTag(mod) == .Vector) { const cmp_val = try sema.compareVector(lhs_val, op, rhs_val, resolved_type); @@ -17273,7 +17424,7 @@ fn cmpSelf( // bool eq/neq more efficiently. if (resolved_type.zigTypeTag(mod) == .Bool) { if (try sema.resolveValue(casted_rhs)) |rhs_val| { - if (rhs_val.isUndef(mod)) return mod.undefRef(Type.bool); + if (rhs_val.isUndef(mod)) return pt.undefRef(Type.bool); return sema.runtimeBoolCmp(block, src, op, casted_lhs, rhs_val.toBool(), lhs_src); } } @@ -17310,24 +17461,24 @@ fn runtimeBoolCmp( } fn zirSizeOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node; const operand_src = block.builtinCallArgSrc(inst_data.src_node, 0); const ty = try sema.resolveType(block, operand_src, inst_data.operand); - switch (ty.zigTypeTag(mod)) { + switch (ty.zigTypeTag(pt.zcu)) { .Fn, .NoReturn, .Undefined, .Null, .Opaque, - => return sema.fail(block, operand_src, "no size available for type '{}'", .{ty.fmt(mod)}), + => return sema.fail(block, operand_src, "no size available for type '{}'", .{ty.fmt(pt)}), .Type, .EnumLiteral, .ComptimeFloat, .ComptimeInt, .Void, - => return mod.intRef(Type.comptime_int, 0), + => return pt.intRef(Type.comptime_int, 0), .Bool, .Int, @@ -17345,12 +17496,13 @@ fn zirSizeOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. .AnyFrame, => {}, } - const val = try ty.lazyAbiSize(mod); + const val = try ty.lazyAbiSize(pt); return Air.internedToRef(val.toIntern()); } fn zirBitSizeOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node; const operand_src = block.builtinCallArgSrc(inst_data.src_node, 0); const operand_ty = try sema.resolveType(block, operand_src, inst_data.operand); @@ -17360,14 +17512,14 @@ fn zirBitSizeOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A .Undefined, .Null, .Opaque, - => return sema.fail(block, operand_src, "no size available for type '{}'", .{operand_ty.fmt(mod)}), + => return sema.fail(block, operand_src, "no size available for type '{}'", .{operand_ty.fmt(pt)}), .Type, .EnumLiteral, .ComptimeFloat, .ComptimeInt, .Void, - => return mod.intRef(Type.comptime_int, 0), + => return pt.intRef(Type.comptime_int, 0), .Bool, .Int, @@ -17385,8 +17537,8 @@ fn zirBitSizeOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A .AnyFrame, => {}, } - const bit_size = try operand_ty.bitSizeAdvanced(mod, .sema); - return mod.intRef(Type.comptime_int, bit_size); + const bit_size = try operand_ty.bitSizeAdvanced(pt, .sema); + return pt.intRef(Type.comptime_int, bit_size); } fn zirThis( @@ -17394,14 +17546,16 @@ fn zirThis( block: *Block, extended: Zir.Inst.Extended.InstData, ) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const this_decl_index = mod.namespacePtr(block.namespace).decl_index; const src = block.nodeOffset(@bitCast(extended.operand)); return sema.analyzeDeclVal(block, src, this_decl_index); } fn zirClosureGet(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const ip = &mod.intern_pool; const captures = mod.namespacePtr(block.namespace).getType(mod).getCaptures(mod); @@ -17489,7 +17643,7 @@ fn zirRetAddr( _ = extended; if (block.is_comptime) { // TODO: we could give a meaningful lazy value here. #14938 - return sema.mod.intRef(Type.usize, 0); + return sema.pt.intRef(Type.usize, 0); } else { return block.addNoOp(.ret_addr); } @@ -17514,7 +17668,8 @@ fn zirBuiltinSrc( const tracy = trace(@src()); defer tracy.end(); - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const extra = sema.code.extraData(Zir.Inst.Src, extended.operand).data; const fn_owner_decl = mod.funcOwnerDeclPtr(sema.func_index); const ip = &mod.intern_pool; @@ -17522,43 +17677,43 @@ fn zirBuiltinSrc( const func_name_val = v: { const func_name_len = fn_owner_decl.name.length(ip); - const array_ty = try ip.get(gpa, .{ .array_type = .{ + const array_ty = try pt.intern(.{ .array_type = .{ .len = func_name_len, .sentinel = .zero_u8, .child = .u8_type, } }); - break :v try ip.get(gpa, .{ .slice = .{ + break :v try pt.intern(.{ .slice = .{ .ty = .slice_const_u8_sentinel_0_type, - .ptr = try ip.get(gpa, .{ .ptr = .{ + .ptr = try pt.intern(.{ .ptr = .{ .ty = .manyptr_const_u8_sentinel_0_type, .base_addr = .{ .anon_decl = .{ .orig_ty = .slice_const_u8_sentinel_0_type, - .val = try ip.get(gpa, .{ .aggregate = .{ + .val = try pt.intern(.{ .aggregate = .{ .ty = array_ty, .storage = .{ .bytes = fn_owner_decl.name.toString() }, } }), } }, .byte_offset = 0, } }), - .len = (try mod.intValue(Type.usize, func_name_len)).toIntern(), + .len = (try pt.intValue(Type.usize, func_name_len)).toIntern(), } }); }; const file_name_val = v: { // The compiler must not call realpath anywhere. const file_name = try fn_owner_decl.getFileScope(mod).fullPath(sema.arena); - const array_ty = try ip.get(gpa, .{ .array_type = .{ + const array_ty = try pt.intern(.{ .array_type = .{ .len = file_name.len, .sentinel = .zero_u8, .child = .u8_type, } }); - break :v try ip.get(gpa, .{ .slice = .{ + break :v try pt.intern(.{ .slice = .{ .ty = .slice_const_u8_sentinel_0_type, - .ptr = try ip.get(gpa, .{ .ptr = .{ + .ptr = try pt.intern(.{ .ptr = .{ .ty = .manyptr_const_u8_sentinel_0_type, .base_addr = .{ .anon_decl = .{ .orig_ty = .slice_const_u8_sentinel_0_type, - .val = try ip.get(gpa, .{ .aggregate = .{ + .val = try pt.intern(.{ .aggregate = .{ .ty = array_ty, .storage = .{ .bytes = try ip.getOrPutString(gpa, file_name, .maybe_embedded_nulls), @@ -17567,35 +17722,36 @@ fn zirBuiltinSrc( } }, .byte_offset = 0, } }), - .len = (try mod.intValue(Type.usize, file_name.len)).toIntern(), + .len = (try pt.intValue(Type.usize, file_name.len)).toIntern(), } }); }; - const src_loc_ty = try mod.getBuiltinType("SourceLocation"); + const src_loc_ty = try pt.getBuiltinType("SourceLocation"); const fields = .{ // file: [:0]const u8, file_name_val, // fn_name: [:0]const u8, func_name_val, // line: u32, - (try mod.intValue(Type.u32, extra.line + 1)).toIntern(), + (try pt.intValue(Type.u32, extra.line + 1)).toIntern(), // column: u32, - (try mod.intValue(Type.u32, extra.column + 1)).toIntern(), + (try pt.intValue(Type.u32, extra.column + 1)).toIntern(), }; - return Air.internedToRef((try mod.intern(.{ .aggregate = .{ + return Air.internedToRef((try pt.intern(.{ .aggregate = .{ .ty = src_loc_ty.toIntern(), .storage = .{ .elems = &fields }, } }))); } fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const gpa = sema.gpa; const ip = &mod.intern_pool; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node; const src = block.nodeOffset(inst_data.src_node); const ty = try sema.resolveType(block, src, inst_data.operand); - const type_info_ty = try mod.getBuiltinType("Type"); + const type_info_ty = try pt.getBuiltinType("Type"); const type_info_tag_ty = type_info_ty.unionTagType(mod).?; if (ty.typeDeclInst(mod)) |type_decl_inst| { @@ -17612,9 +17768,9 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai .Undefined, .Null, .EnumLiteral, - => |type_info_tag| return Air.internedToRef((try mod.intern(.{ .un = .{ + => |type_info_tag| return Air.internedToRef((try pt.intern(.{ .un = .{ .ty = type_info_ty.toIntern(), - .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(type_info_tag))).toIntern(), + .tag = (try pt.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(type_info_tag))).toIntern(), .val = .void_value, } }))), .Fn => { @@ -17643,8 +17799,8 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai for (param_vals, 0..) |*param_val, i| { const param_ty = func_ty_info.param_types.get(ip)[i]; const is_generic = param_ty == .generic_poison_type; - const param_ty_val = try ip.get(gpa, .{ .opt = .{ - .ty = try ip.get(gpa, .{ .opt_type = .type_type }), + const param_ty_val = try pt.intern(.{ .opt = .{ + .ty = try pt.intern(.{ .opt_type = .type_type }), .val = if (is_generic) .none else param_ty, } }); @@ -17661,22 +17817,22 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // type: ?type, param_ty_val, }; - param_val.* = try mod.intern(.{ .aggregate = .{ + param_val.* = try pt.intern(.{ .aggregate = .{ .ty = param_info_ty.toIntern(), .storage = .{ .elems = ¶m_fields }, } }); } const args_val = v: { - const new_decl_ty = try mod.arrayType(.{ + const new_decl_ty = try pt.arrayType(.{ .len = param_vals.len, .child = param_info_ty.toIntern(), }); - const new_decl_val = try mod.intern(.{ .aggregate = .{ + const new_decl_val = try pt.intern(.{ .aggregate = .{ .ty = new_decl_ty.toIntern(), .storage = .{ .elems = param_vals }, } }); - const slice_ty = (try mod.ptrTypeSema(.{ + const slice_ty = (try pt.ptrTypeSema(.{ .child = param_info_ty.toIntern(), .flags = .{ .size = .Slice, @@ -17684,9 +17840,9 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai }, })).toIntern(); const manyptr_ty = Type.fromInterned(slice_ty).slicePtrFieldType(mod).toIntern(); - break :v try mod.intern(.{ .slice = .{ + break :v try pt.intern(.{ .slice = .{ .ty = slice_ty, - .ptr = try mod.intern(.{ .ptr = .{ + .ptr = try pt.intern(.{ .ptr = .{ .ty = manyptr_ty, .base_addr = .{ .anon_decl = .{ .orig_ty = manyptr_ty, @@ -17694,23 +17850,23 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } }, .byte_offset = 0, } }), - .len = (try mod.intValue(Type.usize, param_vals.len)).toIntern(), + .len = (try pt.intValue(Type.usize, param_vals.len)).toIntern(), } }); }; - const ret_ty_opt = try mod.intern(.{ .opt = .{ - .ty = try ip.get(gpa, .{ .opt_type = .type_type }), + const ret_ty_opt = try pt.intern(.{ .opt = .{ + .ty = try pt.intern(.{ .opt_type = .type_type }), .val = if (func_ty_info.return_type == .generic_poison_type) .none else func_ty_info.return_type, } }); - const callconv_ty = try mod.getBuiltinType("CallingConvention"); + const callconv_ty = try pt.getBuiltinType("CallingConvention"); const field_values = .{ // calling_convention: CallingConvention, - (try mod.enumValueFieldIndex(callconv_ty, @intFromEnum(func_ty_info.cc))).toIntern(), + (try pt.enumValueFieldIndex(callconv_ty, @intFromEnum(func_ty_info.cc))).toIntern(), // is_generic: bool, Value.makeBool(func_ty_info.is_generic).toIntern(), // is_var_args: bool, @@ -17720,10 +17876,10 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // args: []const Fn.Param, args_val, }; - return Air.internedToRef((try mod.intern(.{ .un = .{ + return Air.internedToRef((try pt.intern(.{ .un = .{ .ty = type_info_ty.toIntern(), - .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.Fn))).toIntern(), - .val = try mod.intern(.{ .aggregate = .{ + .tag = (try pt.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.Fn))).toIntern(), + .val = try pt.intern(.{ .aggregate = .{ .ty = fn_info_ty.toIntern(), .storage = .{ .elems = &field_values }, } }), @@ -17740,18 +17896,18 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const int_info_decl = mod.declPtr(int_info_decl_index); const int_info_ty = int_info_decl.val.toType(); - const signedness_ty = try mod.getBuiltinType("Signedness"); + const signedness_ty = try pt.getBuiltinType("Signedness"); const info = ty.intInfo(mod); const field_values = .{ // signedness: Signedness, - (try mod.enumValueFieldIndex(signedness_ty, @intFromEnum(info.signedness))).toIntern(), + (try pt.enumValueFieldIndex(signedness_ty, @intFromEnum(info.signedness))).toIntern(), // bits: u16, - (try mod.intValue(Type.u16, info.bits)).toIntern(), + (try pt.intValue(Type.u16, info.bits)).toIntern(), }; - return Air.internedToRef((try mod.intern(.{ .un = .{ + return Air.internedToRef((try pt.intern(.{ .un = .{ .ty = type_info_ty.toIntern(), - .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.Int))).toIntern(), - .val = try mod.intern(.{ .aggregate = .{ + .tag = (try pt.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.Int))).toIntern(), + .val = try pt.intern(.{ .aggregate = .{ .ty = int_info_ty.toIntern(), .storage = .{ .elems = &field_values }, } }), @@ -17770,12 +17926,12 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const field_vals = .{ // bits: u16, - (try mod.intValue(Type.u16, ty.bitSize(mod))).toIntern(), + (try pt.intValue(Type.u16, ty.bitSize(pt))).toIntern(), }; - return Air.internedToRef((try mod.intern(.{ .un = .{ + return Air.internedToRef((try pt.intern(.{ .un = .{ .ty = type_info_ty.toIntern(), - .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.Float))).toIntern(), - .val = try mod.intern(.{ .aggregate = .{ + .tag = (try pt.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.Float))).toIntern(), + .val = try pt.intern(.{ .aggregate = .{ .ty = float_info_ty.toIntern(), .storage = .{ .elems = &field_vals }, } }), @@ -17784,16 +17940,16 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai .Pointer => { const info = ty.ptrInfo(mod); const alignment = if (info.flags.alignment.toByteUnits()) |alignment| - try mod.intValue(Type.comptime_int, alignment) + try pt.intValue(Type.comptime_int, alignment) else - try Type.fromInterned(info.child).lazyAbiAlignment(mod); + try Type.fromInterned(info.child).lazyAbiAlignment(pt); - const addrspace_ty = try mod.getBuiltinType("AddressSpace"); + const addrspace_ty = try pt.getBuiltinType("AddressSpace"); const pointer_ty = t: { const decl_index = (try sema.namespaceLookup( block, src, - (try mod.getBuiltinType("Type")).getNamespaceIndex(mod), + (try pt.getBuiltinType("Type")).getNamespaceIndex(mod), try ip.getOrPutString(gpa, "Pointer", .no_embedded_nulls), )).?; try sema.ensureDeclAnalyzed(decl_index); @@ -17814,7 +17970,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const field_values = .{ // size: Size, - (try mod.enumValueFieldIndex(ptr_size_ty, @intFromEnum(info.flags.size))).toIntern(), + (try pt.enumValueFieldIndex(ptr_size_ty, @intFromEnum(info.flags.size))).toIntern(), // is_const: bool, Value.makeBool(info.flags.is_const).toIntern(), // is_volatile: bool, @@ -17822,7 +17978,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // alignment: comptime_int, alignment.toIntern(), // address_space: AddressSpace - (try mod.enumValueFieldIndex(addrspace_ty, @intFromEnum(info.flags.address_space))).toIntern(), + (try pt.enumValueFieldIndex(addrspace_ty, @intFromEnum(info.flags.address_space))).toIntern(), // child: type, info.child, // is_allowzero: bool, @@ -17833,10 +17989,10 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai else => Value.fromInterned(info.sentinel), })).toIntern(), }; - return Air.internedToRef((try mod.intern(.{ .un = .{ + return Air.internedToRef((try pt.intern(.{ .un = .{ .ty = type_info_ty.toIntern(), - .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.Pointer))).toIntern(), - .val = try mod.intern(.{ .aggregate = .{ + .tag = (try pt.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.Pointer))).toIntern(), + .val = try pt.intern(.{ .aggregate = .{ .ty = pointer_ty.toIntern(), .storage = .{ .elems = &field_values }, } }), @@ -17858,16 +18014,16 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const info = ty.arrayInfo(mod); const field_values = .{ // len: comptime_int, - (try mod.intValue(Type.comptime_int, info.len)).toIntern(), + (try pt.intValue(Type.comptime_int, info.len)).toIntern(), // child: type, info.elem_type.toIntern(), // sentinel: ?*const anyopaque, (try sema.optRefValue(info.sentinel)).toIntern(), }; - return Air.internedToRef((try mod.intern(.{ .un = .{ + return Air.internedToRef((try pt.intern(.{ .un = .{ .ty = type_info_ty.toIntern(), - .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.Array))).toIntern(), - .val = try mod.intern(.{ .aggregate = .{ + .tag = (try pt.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.Array))).toIntern(), + .val = try pt.intern(.{ .aggregate = .{ .ty = array_field_ty.toIntern(), .storage = .{ .elems = &field_values }, } }), @@ -17889,14 +18045,14 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const info = ty.arrayInfo(mod); const field_values = .{ // len: comptime_int, - (try mod.intValue(Type.comptime_int, info.len)).toIntern(), + (try pt.intValue(Type.comptime_int, info.len)).toIntern(), // child: type, info.elem_type.toIntern(), }; - return Air.internedToRef((try mod.intern(.{ .un = .{ + return Air.internedToRef((try pt.intern(.{ .un = .{ .ty = type_info_ty.toIntern(), - .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.Vector))).toIntern(), - .val = try mod.intern(.{ .aggregate = .{ + .tag = (try pt.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.Vector))).toIntern(), + .val = try pt.intern(.{ .aggregate = .{ .ty = vector_field_ty.toIntern(), .storage = .{ .elems = &field_values }, } }), @@ -17919,10 +18075,10 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // child: type, ty.optionalChild(mod).toIntern(), }; - return Air.internedToRef((try mod.intern(.{ .un = .{ + return Air.internedToRef((try pt.intern(.{ .un = .{ .ty = type_info_ty.toIntern(), - .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.Optional))).toIntern(), - .val = try mod.intern(.{ .aggregate = .{ + .tag = (try pt.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.Optional))).toIntern(), + .val = try pt.intern(.{ .aggregate = .{ .ty = optional_field_ty.toIntern(), .storage = .{ .elems = &field_values }, } }), @@ -17954,18 +18110,18 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const error_name = names.get(ip)[error_index]; const error_name_len = error_name.length(ip); const error_name_val = v: { - const new_decl_ty = try mod.arrayType(.{ + const new_decl_ty = try pt.arrayType(.{ .len = error_name_len, .sentinel = .zero_u8, .child = .u8_type, }); - const new_decl_val = try mod.intern(.{ .aggregate = .{ + const new_decl_val = try pt.intern(.{ .aggregate = .{ .ty = new_decl_ty.toIntern(), .storage = .{ .bytes = error_name.toString() }, } }); - break :v try mod.intern(.{ .slice = .{ + break :v try pt.intern(.{ .slice = .{ .ty = .slice_const_u8_sentinel_0_type, - .ptr = try mod.intern(.{ .ptr = .{ + .ptr = try pt.intern(.{ .ptr = .{ .ty = .manyptr_const_u8_sentinel_0_type, .base_addr = .{ .anon_decl = .{ .val = new_decl_val, @@ -17973,7 +18129,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } }, .byte_offset = 0, } }), - .len = (try mod.intValue(Type.usize, error_name_len)).toIntern(), + .len = (try pt.intValue(Type.usize, error_name_len)).toIntern(), } }); }; @@ -17981,7 +18137,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // name: [:0]const u8, error_name_val, }; - field_val.* = try mod.intern(.{ .aggregate = .{ + field_val.* = try pt.intern(.{ .aggregate = .{ .ty = error_field_ty.toIntern(), .storage = .{ .elems = &error_field_fields }, } }); @@ -17992,27 +18148,27 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai }; // Build our ?[]const Error value - const slice_errors_ty = try mod.ptrTypeSema(.{ + const slice_errors_ty = try pt.ptrTypeSema(.{ .child = error_field_ty.toIntern(), .flags = .{ .size = .Slice, .is_const = true, }, }); - const opt_slice_errors_ty = try mod.optionalType(slice_errors_ty.toIntern()); + const opt_slice_errors_ty = try pt.optionalType(slice_errors_ty.toIntern()); const errors_payload_val: InternPool.Index = if (error_field_vals) |vals| v: { - const array_errors_ty = try mod.arrayType(.{ + const array_errors_ty = try pt.arrayType(.{ .len = vals.len, .child = error_field_ty.toIntern(), }); - const new_decl_val = try mod.intern(.{ .aggregate = .{ + const new_decl_val = try pt.intern(.{ .aggregate = .{ .ty = array_errors_ty.toIntern(), .storage = .{ .elems = vals }, } }); const manyptr_errors_ty = slice_errors_ty.slicePtrFieldType(mod).toIntern(); - break :v try mod.intern(.{ .slice = .{ + break :v try pt.intern(.{ .slice = .{ .ty = slice_errors_ty.toIntern(), - .ptr = try mod.intern(.{ .ptr = .{ + .ptr = try pt.intern(.{ .ptr = .{ .ty = manyptr_errors_ty, .base_addr = .{ .anon_decl = .{ .orig_ty = manyptr_errors_ty, @@ -18020,18 +18176,18 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } }, .byte_offset = 0, } }), - .len = (try mod.intValue(Type.usize, vals.len)).toIntern(), + .len = (try pt.intValue(Type.usize, vals.len)).toIntern(), } }); } else .none; - const errors_val = try mod.intern(.{ .opt = .{ + const errors_val = try pt.intern(.{ .opt = .{ .ty = opt_slice_errors_ty.toIntern(), .val = errors_payload_val, } }); // Construct Type{ .ErrorSet = errors_val } - return Air.internedToRef((try mod.intern(.{ .un = .{ + return Air.internedToRef((try pt.intern(.{ .un = .{ .ty = type_info_ty.toIntern(), - .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.ErrorSet))).toIntern(), + .tag = (try pt.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.ErrorSet))).toIntern(), .val = errors_val, } }))); }, @@ -18054,10 +18210,10 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // payload: type, ty.errorUnionPayload(mod).toIntern(), }; - return Air.internedToRef((try mod.intern(.{ .un = .{ + return Air.internedToRef((try pt.intern(.{ .un = .{ .ty = type_info_ty.toIntern(), - .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.ErrorUnion))).toIntern(), - .val = try mod.intern(.{ .aggregate = .{ + .tag = (try pt.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.ErrorUnion))).toIntern(), + .val = try pt.intern(.{ .aggregate = .{ .ty = error_union_field_ty.toIntern(), .storage = .{ .elems = &field_values }, } }), @@ -18082,30 +18238,31 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai for (enum_field_vals, 0..) |*field_val, tag_index| { const enum_type = ip.loadEnumType(ty.toIntern()); const value_val = if (enum_type.values.len > 0) - try mod.intern_pool.getCoercedInts( + try ip.getCoercedInts( mod.gpa, - mod.intern_pool.indexToKey(enum_type.values.get(ip)[tag_index]).int, + pt.tid, + ip.indexToKey(enum_type.values.get(ip)[tag_index]).int, .comptime_int_type, ) else - (try mod.intValue(Type.comptime_int, tag_index)).toIntern(); + (try pt.intValue(Type.comptime_int, tag_index)).toIntern(); // TODO: write something like getCoercedInts to avoid needing to dupe const name_val = v: { const tag_name = enum_type.names.get(ip)[tag_index]; const tag_name_len = tag_name.length(ip); - const new_decl_ty = try mod.arrayType(.{ + const new_decl_ty = try pt.arrayType(.{ .len = tag_name_len, .sentinel = .zero_u8, .child = .u8_type, }); - const new_decl_val = try mod.intern(.{ .aggregate = .{ + const new_decl_val = try pt.intern(.{ .aggregate = .{ .ty = new_decl_ty.toIntern(), .storage = .{ .bytes = tag_name.toString() }, } }); - break :v try mod.intern(.{ .slice = .{ + break :v try pt.intern(.{ .slice = .{ .ty = .slice_const_u8_sentinel_0_type, - .ptr = try mod.intern(.{ .ptr = .{ + .ptr = try pt.intern(.{ .ptr = .{ .ty = .manyptr_const_u8_sentinel_0_type, .base_addr = .{ .anon_decl = .{ .val = new_decl_val, @@ -18113,7 +18270,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } }, .byte_offset = 0, } }), - .len = (try mod.intValue(Type.usize, tag_name_len)).toIntern(), + .len = (try pt.intValue(Type.usize, tag_name_len)).toIntern(), } }); }; @@ -18123,22 +18280,22 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // value: comptime_int, value_val, }; - field_val.* = try mod.intern(.{ .aggregate = .{ + field_val.* = try pt.intern(.{ .aggregate = .{ .ty = enum_field_ty.toIntern(), .storage = .{ .elems = &enum_field_fields }, } }); } const fields_val = v: { - const fields_array_ty = try mod.arrayType(.{ + const fields_array_ty = try pt.arrayType(.{ .len = enum_field_vals.len, .child = enum_field_ty.toIntern(), }); - const new_decl_val = try mod.intern(.{ .aggregate = .{ + const new_decl_val = try pt.intern(.{ .aggregate = .{ .ty = fields_array_ty.toIntern(), .storage = .{ .elems = enum_field_vals }, } }); - const slice_ty = (try mod.ptrTypeSema(.{ + const slice_ty = (try pt.ptrTypeSema(.{ .child = enum_field_ty.toIntern(), .flags = .{ .size = .Slice, @@ -18146,9 +18303,9 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai }, })).toIntern(); const manyptr_ty = Type.fromInterned(slice_ty).slicePtrFieldType(mod).toIntern(); - break :v try mod.intern(.{ .slice = .{ + break :v try pt.intern(.{ .slice = .{ .ty = slice_ty, - .ptr = try mod.intern(.{ .ptr = .{ + .ptr = try pt.intern(.{ .ptr = .{ .ty = manyptr_ty, .base_addr = .{ .anon_decl = .{ .val = new_decl_val, @@ -18156,7 +18313,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } }, .byte_offset = 0, } }), - .len = (try mod.intValue(Type.usize, enum_field_vals.len)).toIntern(), + .len = (try pt.intValue(Type.usize, enum_field_vals.len)).toIntern(), } }); }; @@ -18184,10 +18341,10 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // is_exhaustive: bool, is_exhaustive.toIntern(), }; - return Air.internedToRef((try mod.intern(.{ .un = .{ + return Air.internedToRef((try pt.intern(.{ .un = .{ .ty = type_info_ty.toIntern(), - .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.Enum))).toIntern(), - .val = try mod.intern(.{ .aggregate = .{ + .tag = (try pt.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.Enum))).toIntern(), + .val = try pt.intern(.{ .aggregate = .{ .ty = type_enum_ty.toIntern(), .storage = .{ .elems = &field_values }, } }), @@ -18218,7 +18375,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai break :t union_field_ty_decl.val.toType(); }; - try ty.resolveLayout(mod); // Getting alignment requires type layout + try ty.resolveLayout(pt); // Getting alignment requires type layout const union_obj = mod.typeToUnion(ty).?; const tag_type = union_obj.loadTagType(ip); const layout = union_obj.getLayout(ip); @@ -18230,18 +18387,18 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const name_val = v: { const field_name = tag_type.names.get(ip)[field_index]; const field_name_len = field_name.length(ip); - const new_decl_ty = try mod.arrayType(.{ + const new_decl_ty = try pt.arrayType(.{ .len = field_name_len, .sentinel = .zero_u8, .child = .u8_type, }); - const new_decl_val = try mod.intern(.{ .aggregate = .{ + const new_decl_val = try pt.intern(.{ .aggregate = .{ .ty = new_decl_ty.toIntern(), .storage = .{ .bytes = field_name.toString() }, } }); - break :v try mod.intern(.{ .slice = .{ + break :v try pt.intern(.{ .slice = .{ .ty = .slice_const_u8_sentinel_0_type, - .ptr = try mod.intern(.{ .ptr = .{ + .ptr = try pt.intern(.{ .ptr = .{ .ty = .manyptr_const_u8_sentinel_0_type, .base_addr = .{ .anon_decl = .{ .val = new_decl_val, @@ -18249,12 +18406,12 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } }, .byte_offset = 0, } }), - .len = (try mod.intValue(Type.usize, field_name_len)).toIntern(), + .len = (try pt.intValue(Type.usize, field_name_len)).toIntern(), } }); }; const alignment = switch (layout) { - .auto, .@"extern" => try mod.unionFieldNormalAlignmentAdvanced(union_obj, @intCast(field_index), .sema), + .auto, .@"extern" => try pt.unionFieldNormalAlignmentAdvanced(union_obj, @intCast(field_index), .sema), .@"packed" => .none, }; @@ -18265,24 +18422,24 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // type: type, field_ty, // alignment: comptime_int, - (try mod.intValue(Type.comptime_int, alignment.toByteUnits() orelse 0)).toIntern(), + (try pt.intValue(Type.comptime_int, alignment.toByteUnits() orelse 0)).toIntern(), }; - field_val.* = try mod.intern(.{ .aggregate = .{ + field_val.* = try pt.intern(.{ .aggregate = .{ .ty = union_field_ty.toIntern(), .storage = .{ .elems = &union_field_fields }, } }); } const fields_val = v: { - const array_fields_ty = try mod.arrayType(.{ + const array_fields_ty = try pt.arrayType(.{ .len = union_field_vals.len, .child = union_field_ty.toIntern(), }); - const new_decl_val = try mod.intern(.{ .aggregate = .{ + const new_decl_val = try pt.intern(.{ .aggregate = .{ .ty = array_fields_ty.toIntern(), .storage = .{ .elems = union_field_vals }, } }); - const slice_ty = (try mod.ptrTypeSema(.{ + const slice_ty = (try pt.ptrTypeSema(.{ .child = union_field_ty.toIntern(), .flags = .{ .size = .Slice, @@ -18290,9 +18447,9 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai }, })).toIntern(); const manyptr_ty = Type.fromInterned(slice_ty).slicePtrFieldType(mod).toIntern(); - break :v try mod.intern(.{ .slice = .{ + break :v try pt.intern(.{ .slice = .{ .ty = slice_ty, - .ptr = try mod.intern(.{ .ptr = .{ + .ptr = try pt.intern(.{ .ptr = .{ .ty = manyptr_ty, .base_addr = .{ .anon_decl = .{ .orig_ty = manyptr_ty, @@ -18300,14 +18457,14 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } }, .byte_offset = 0, } }), - .len = (try mod.intValue(Type.usize, union_field_vals.len)).toIntern(), + .len = (try pt.intValue(Type.usize, union_field_vals.len)).toIntern(), } }); }; const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, ty.getNamespaceIndex(mod)); - const enum_tag_ty_val = try mod.intern(.{ .opt = .{ - .ty = (try mod.optionalType(.type_type)).toIntern(), + const enum_tag_ty_val = try pt.intern(.{ .opt = .{ + .ty = (try pt.optionalType(.type_type)).toIntern(), .val = if (ty.unionTagType(mod)) |tag_ty| tag_ty.toIntern() else .none, } }); @@ -18315,7 +18472,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const decl_index = (try sema.namespaceLookup( block, src, - (try mod.getBuiltinType("Type")).getNamespaceIndex(mod), + (try pt.getBuiltinType("Type")).getNamespaceIndex(mod), try ip.getOrPutString(gpa, "ContainerLayout", .no_embedded_nulls), )).?; try sema.ensureDeclAnalyzed(decl_index); @@ -18325,7 +18482,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const field_values = .{ // layout: ContainerLayout, - (try mod.enumValueFieldIndex(container_layout_ty, @intFromEnum(layout))).toIntern(), + (try pt.enumValueFieldIndex(container_layout_ty, @intFromEnum(layout))).toIntern(), // tag_type: ?type, enum_tag_ty_val, @@ -18334,10 +18491,10 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // decls: []const Declaration, decls_val, }; - return Air.internedToRef((try mod.intern(.{ .un = .{ + return Air.internedToRef((try pt.intern(.{ .un = .{ .ty = type_info_ty.toIntern(), - .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.Union))).toIntern(), - .val = try mod.intern(.{ .aggregate = .{ + .tag = (try pt.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.Union))).toIntern(), + .val = try pt.intern(.{ .aggregate = .{ .ty = type_union_ty.toIntern(), .storage = .{ .elems = &field_values }, } }), @@ -18368,7 +18525,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai break :t struct_field_ty_decl.val.toType(); }; - try ty.resolveLayout(mod); // Getting alignment requires type layout + try ty.resolveLayout(pt); // Getting alignment requires type layout var struct_field_vals: []InternPool.Index = &.{}; defer gpa.free(struct_field_vals); @@ -18385,18 +18542,18 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai else try ip.getOrPutStringFmt(gpa, "{d}", .{field_index}, .no_embedded_nulls); const field_name_len = field_name.length(ip); - const new_decl_ty = try mod.arrayType(.{ + const new_decl_ty = try pt.arrayType(.{ .len = field_name_len, .sentinel = .zero_u8, .child = .u8_type, }); - const new_decl_val = try mod.intern(.{ .aggregate = .{ + const new_decl_val = try pt.intern(.{ .aggregate = .{ .ty = new_decl_ty.toIntern(), .storage = .{ .bytes = field_name.toString() }, } }); - break :v try mod.intern(.{ .slice = .{ + break :v try pt.intern(.{ .slice = .{ .ty = .slice_const_u8_sentinel_0_type, - .ptr = try mod.intern(.{ .ptr = .{ + .ptr = try pt.intern(.{ .ptr = .{ .ty = .manyptr_const_u8_sentinel_0_type, .base_addr = .{ .anon_decl = .{ .val = new_decl_val, @@ -18404,11 +18561,11 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } }, .byte_offset = 0, } }), - .len = (try mod.intValue(Type.usize, field_name_len)).toIntern(), + .len = (try pt.intValue(Type.usize, field_name_len)).toIntern(), } }); }; - try Type.fromInterned(field_ty).resolveLayout(mod); + try Type.fromInterned(field_ty).resolveLayout(pt); const is_comptime = field_val != .none; const opt_default_val = if (is_comptime) Value.fromInterned(field_val) else null; @@ -18423,9 +18580,9 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // is_comptime: bool, Value.makeBool(is_comptime).toIntern(), // alignment: comptime_int, - (try mod.intValue(Type.comptime_int, Type.fromInterned(field_ty).abiAlignment(mod).toByteUnits() orelse 0)).toIntern(), + (try pt.intValue(Type.comptime_int, Type.fromInterned(field_ty).abiAlignment(pt).toByteUnits() orelse 0)).toIntern(), }; - struct_field_val.* = try mod.intern(.{ .aggregate = .{ + struct_field_val.* = try pt.intern(.{ .aggregate = .{ .ty = struct_field_ty.toIntern(), .storage = .{ .elems = &struct_field_fields }, } }); @@ -18437,7 +18594,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai }; struct_field_vals = try gpa.alloc(InternPool.Index, struct_type.field_types.len); - try ty.resolveStructFieldInits(mod); + try ty.resolveStructFieldInits(pt); for (struct_field_vals, 0..) |*field_val, field_index| { const field_name = if (struct_type.fieldName(ip, field_index).unwrap()) |field_name| @@ -18449,18 +18606,18 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const field_init = struct_type.fieldInit(ip, field_index); const field_is_comptime = struct_type.fieldIsComptime(ip, field_index); const name_val = v: { - const new_decl_ty = try mod.arrayType(.{ + const new_decl_ty = try pt.arrayType(.{ .len = field_name_len, .sentinel = .zero_u8, .child = .u8_type, }); - const new_decl_val = try mod.intern(.{ .aggregate = .{ + const new_decl_val = try pt.intern(.{ .aggregate = .{ .ty = new_decl_ty.toIntern(), .storage = .{ .bytes = field_name.toString() }, } }); - break :v try mod.intern(.{ .slice = .{ + break :v try pt.intern(.{ .slice = .{ .ty = .slice_const_u8_sentinel_0_type, - .ptr = try mod.intern(.{ .ptr = .{ + .ptr = try pt.intern(.{ .ptr = .{ .ty = .manyptr_const_u8_sentinel_0_type, .base_addr = .{ .anon_decl = .{ .val = new_decl_val, @@ -18468,7 +18625,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } }, .byte_offset = 0, } }), - .len = (try mod.intValue(Type.usize, field_name_len)).toIntern(), + .len = (try pt.intValue(Type.usize, field_name_len)).toIntern(), } }); }; @@ -18476,7 +18633,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const default_val_ptr = try sema.optRefValue(opt_default_val); const alignment = switch (struct_type.layout) { .@"packed" => .none, - else => try mod.structFieldAlignmentAdvanced( + else => try pt.structFieldAlignmentAdvanced( struct_type.fieldAlign(ip, field_index), field_ty, struct_type.layout, @@ -18494,9 +18651,9 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // is_comptime: bool, Value.makeBool(field_is_comptime).toIntern(), // alignment: comptime_int, - (try mod.intValue(Type.comptime_int, alignment.toByteUnits() orelse 0)).toIntern(), + (try pt.intValue(Type.comptime_int, alignment.toByteUnits() orelse 0)).toIntern(), }; - field_val.* = try mod.intern(.{ .aggregate = .{ + field_val.* = try pt.intern(.{ .aggregate = .{ .ty = struct_field_ty.toIntern(), .storage = .{ .elems = &struct_field_fields }, } }); @@ -18504,15 +18661,15 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } const fields_val = v: { - const array_fields_ty = try mod.arrayType(.{ + const array_fields_ty = try pt.arrayType(.{ .len = struct_field_vals.len, .child = struct_field_ty.toIntern(), }); - const new_decl_val = try mod.intern(.{ .aggregate = .{ + const new_decl_val = try pt.intern(.{ .aggregate = .{ .ty = array_fields_ty.toIntern(), .storage = .{ .elems = struct_field_vals }, } }); - const slice_ty = (try mod.ptrTypeSema(.{ + const slice_ty = (try pt.ptrTypeSema(.{ .child = struct_field_ty.toIntern(), .flags = .{ .size = .Slice, @@ -18520,9 +18677,9 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai }, })).toIntern(); const manyptr_ty = Type.fromInterned(slice_ty).slicePtrFieldType(mod).toIntern(); - break :v try mod.intern(.{ .slice = .{ + break :v try pt.intern(.{ .slice = .{ .ty = slice_ty, - .ptr = try mod.intern(.{ .ptr = .{ + .ptr = try pt.intern(.{ .ptr = .{ .ty = manyptr_ty, .base_addr = .{ .anon_decl = .{ .orig_ty = manyptr_ty, @@ -18530,14 +18687,14 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } }, .byte_offset = 0, } }), - .len = (try mod.intValue(Type.usize, struct_field_vals.len)).toIntern(), + .len = (try pt.intValue(Type.usize, struct_field_vals.len)).toIntern(), } }); }; const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, ty.getNamespaceIndex(mod)); - const backing_integer_val = try mod.intern(.{ .opt = .{ - .ty = (try mod.optionalType(.type_type)).toIntern(), + const backing_integer_val = try pt.intern(.{ .opt = .{ + .ty = (try pt.optionalType(.type_type)).toIntern(), .val = if (mod.typeToPackedStruct(ty)) |packed_struct| val: { assert(Type.fromInterned(packed_struct.backingIntType(ip).*).isInt(mod)); break :val packed_struct.backingIntType(ip).*; @@ -18548,7 +18705,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const decl_index = (try sema.namespaceLookup( block, src, - (try mod.getBuiltinType("Type")).getNamespaceIndex(mod), + (try pt.getBuiltinType("Type")).getNamespaceIndex(mod), try ip.getOrPutString(gpa, "ContainerLayout", .no_embedded_nulls), )).?; try sema.ensureDeclAnalyzed(decl_index); @@ -18560,7 +18717,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const field_values = [_]InternPool.Index{ // layout: ContainerLayout, - (try mod.enumValueFieldIndex(container_layout_ty, @intFromEnum(layout))).toIntern(), + (try pt.enumValueFieldIndex(container_layout_ty, @intFromEnum(layout))).toIntern(), // backing_integer: ?type, backing_integer_val, // fields: []const StructField, @@ -18570,10 +18727,10 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // is_tuple: bool, Value.makeBool(ty.isTuple(mod)).toIntern(), }; - return Air.internedToRef((try mod.intern(.{ .un = .{ + return Air.internedToRef((try pt.intern(.{ .un = .{ .ty = type_info_ty.toIntern(), - .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.Struct))).toIntern(), - .val = try mod.intern(.{ .aggregate = .{ + .tag = (try pt.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.Struct))).toIntern(), + .val = try pt.intern(.{ .aggregate = .{ .ty = type_struct_ty.toIntern(), .storage = .{ .elems = &field_values }, } }), @@ -18592,17 +18749,17 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai break :t type_opaque_ty_decl.val.toType(); }; - try ty.resolveFields(mod); + try ty.resolveFields(pt); const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, ty.getNamespaceIndex(mod)); const field_values = .{ // decls: []const Declaration, decls_val, }; - return Air.internedToRef((try mod.intern(.{ .un = .{ + return Air.internedToRef((try pt.intern(.{ .un = .{ .ty = type_info_ty.toIntern(), - .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.Opaque))).toIntern(), - .val = try mod.intern(.{ .aggregate = .{ + .tag = (try pt.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.Opaque))).toIntern(), + .val = try pt.intern(.{ .aggregate = .{ .ty = type_opaque_ty.toIntern(), .storage = .{ .elems = &field_values }, } }), @@ -18620,7 +18777,8 @@ fn typeInfoDecls( type_info_ty: Type, opt_namespace: InternPool.OptionalNamespaceIndex, ) CompileError!InternPool.Index { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const gpa = sema.gpa; const declaration_ty = t: { @@ -18643,15 +18801,15 @@ fn typeInfoDecls( try sema.typeInfoNamespaceDecls(block, opt_namespace, declaration_ty, &decl_vals, &seen_namespaces); - const array_decl_ty = try mod.arrayType(.{ + const array_decl_ty = try pt.arrayType(.{ .len = decl_vals.items.len, .child = declaration_ty.toIntern(), }); - const new_decl_val = try mod.intern(.{ .aggregate = .{ + const new_decl_val = try pt.intern(.{ .aggregate = .{ .ty = array_decl_ty.toIntern(), .storage = .{ .elems = decl_vals.items }, } }); - const slice_ty = (try mod.ptrTypeSema(.{ + const slice_ty = (try pt.ptrTypeSema(.{ .child = declaration_ty.toIntern(), .flags = .{ .size = .Slice, @@ -18659,9 +18817,9 @@ fn typeInfoDecls( }, })).toIntern(); const manyptr_ty = Type.fromInterned(slice_ty).slicePtrFieldType(mod).toIntern(); - return try mod.intern(.{ .slice = .{ + return try pt.intern(.{ .slice = .{ .ty = slice_ty, - .ptr = try mod.intern(.{ .ptr = .{ + .ptr = try pt.intern(.{ .ptr = .{ .ty = manyptr_ty, .base_addr = .{ .anon_decl = .{ .orig_ty = manyptr_ty, @@ -18669,7 +18827,7 @@ fn typeInfoDecls( } }, .byte_offset = 0, } }), - .len = (try mod.intValue(Type.usize, decl_vals.items.len)).toIntern(), + .len = (try pt.intValue(Type.usize, decl_vals.items.len)).toIntern(), } }); } @@ -18681,7 +18839,8 @@ fn typeInfoNamespaceDecls( decl_vals: *std.ArrayList(InternPool.Index), seen_namespaces: *std.AutoHashMap(*Namespace, void), ) !void { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const ip = &mod.intern_pool; const namespace_index = opt_namespace_index.unwrap() orelse return; @@ -18703,18 +18862,18 @@ fn typeInfoNamespaceDecls( if (decl.kind != .named) continue; const name_val = v: { const decl_name_len = decl.name.length(ip); - const new_decl_ty = try mod.arrayType(.{ + const new_decl_ty = try pt.arrayType(.{ .len = decl_name_len, .sentinel = .zero_u8, .child = .u8_type, }); - const new_decl_val = try mod.intern(.{ .aggregate = .{ + const new_decl_val = try pt.intern(.{ .aggregate = .{ .ty = new_decl_ty.toIntern(), .storage = .{ .bytes = decl.name.toString() }, } }); - break :v try mod.intern(.{ .slice = .{ + break :v try pt.intern(.{ .slice = .{ .ty = .slice_const_u8_sentinel_0_type, - .ptr = try mod.intern(.{ .ptr = .{ + .ptr = try pt.intern(.{ .ptr = .{ .ty = .manyptr_const_u8_sentinel_0_type, .base_addr = .{ .anon_decl = .{ .orig_ty = .slice_const_u8_sentinel_0_type, @@ -18722,7 +18881,7 @@ fn typeInfoNamespaceDecls( } }, .byte_offset = 0, } }), - .len = (try mod.intValue(Type.usize, decl_name_len)).toIntern(), + .len = (try pt.intValue(Type.usize, decl_name_len)).toIntern(), } }); }; @@ -18730,7 +18889,7 @@ fn typeInfoNamespaceDecls( //name: [:0]const u8, name_val, }; - try decl_vals.append(try mod.intern(.{ .aggregate = .{ + try decl_vals.append(try pt.intern(.{ .aggregate = .{ .ty = declaration_ty.toIntern(), .storage = .{ .elems = &fields }, } })); @@ -18782,11 +18941,12 @@ fn zirTypeofLog2IntType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Compil } fn log2IntType(sema: *Sema, block: *Block, operand: Type, src: LazySrcLoc) CompileError!Type { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; switch (operand.zigTypeTag(mod)) { .ComptimeInt => return Type.comptime_int, .Int => { - const bits = operand.bitSize(mod); + const bits = operand.bitSize(pt); const count = if (bits == 0) 0 else blk: { @@ -18797,12 +18957,12 @@ fn log2IntType(sema: *Sema, block: *Block, operand: Type, src: LazySrcLoc) Compi } break :blk count; }; - return mod.intType(.unsigned, count); + return pt.intType(.unsigned, count); }, .Vector => { const elem_ty = operand.elemType2(mod); const log2_elem_ty = try sema.log2IntType(block, elem_ty, src); - return mod.vectorType(.{ + return pt.vectorType(.{ .len = operand.vectorLen(mod), .child = log2_elem_ty.toIntern(), }); @@ -18813,7 +18973,7 @@ fn log2IntType(sema: *Sema, block: *Block, operand: Type, src: LazySrcLoc) Compi block, src, "bit shifting operation expected integer type, found '{}'", - .{operand.fmt(mod)}, + .{operand.fmt(pt)}, ); } @@ -18865,7 +19025,8 @@ fn zirBoolNot(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air const tracy = trace(@src()); defer tracy.end(); - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node; const src = block.nodeOffset(inst_data.src_node); const operand_src = block.src(.{ .node_offset_un_op = inst_data.src_node }); @@ -18874,7 +19035,7 @@ fn zirBoolNot(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air const operand = try sema.coerce(block, Type.bool, uncasted_operand, operand_src); if (try sema.resolveValue(operand)) |val| { return if (val.isUndef(mod)) - mod.undefRef(Type.bool) + pt.undefRef(Type.bool) else if (val.toBool()) .bool_false else .bool_true; } try sema.requireRuntimeBlock(block, src, null); @@ -18890,7 +19051,8 @@ fn zirBoolBr( const tracy = trace(@src()); defer tracy.end(); - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const gpa = sema.gpa; const datas = sema.code.instructions.items(.data); @@ -19006,7 +19168,8 @@ fn finishCondBr( } fn checkNullableType(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) !void { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; switch (ty.zigTypeTag(mod)) { .Optional, .Null, .Undefined => return, .Pointer => if (ty.isPtrLikeOptional(mod)) return, @@ -19038,7 +19201,8 @@ fn zirIsNonNullPtr( const tracy = trace(@src()); defer tracy.end(); - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node; const src = block.nodeOffset(inst_data.src_node); const ptr = try sema.resolveInst(inst_data.operand); @@ -19051,11 +19215,12 @@ fn zirIsNonNullPtr( } fn checkErrorType(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) !void { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; switch (ty.zigTypeTag(mod)) { .ErrorSet, .ErrorUnion, .Undefined => return, else => return sema.fail(block, src, "expected error union type, found '{}'", .{ - ty.fmt(mod), + ty.fmt(pt), }), } } @@ -19075,7 +19240,8 @@ fn zirIsNonErrPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const tracy = trace(@src()); defer tracy.end(); - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node; const src = block.nodeOffset(inst_data.src_node); const ptr = try sema.resolveInst(inst_data.operand); @@ -19102,7 +19268,8 @@ fn zirCondbr( const tracy = trace(@src()); defer tracy.end(); - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const cond_src = parent_block.src(.{ .node_offset_if_cond = inst_data.src_node }); const extra = sema.code.extraData(Zir.Inst.CondBr, inst_data.payload_index); @@ -19177,10 +19344,11 @@ fn zirTry(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileError! const body = sema.code.bodySlice(extra.end, extra.data.body_len); const err_union = try sema.resolveInst(extra.data.operand); const err_union_ty = sema.typeOf(err_union); - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; if (err_union_ty.zigTypeTag(mod) != .ErrorUnion) { return sema.fail(parent_block, operand_src, "expected error union type, found '{}'", .{ - err_union_ty.fmt(mod), + err_union_ty.fmt(pt), }); } const is_non_err = try sema.analyzeIsNonErrComptimeOnly(parent_block, operand_src, err_union); @@ -19225,10 +19393,11 @@ fn zirTryPtr(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileErr const operand = try sema.resolveInst(extra.data.operand); const err_union = try sema.analyzeLoad(parent_block, src, operand, operand_src); const err_union_ty = sema.typeOf(err_union); - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; if (err_union_ty.zigTypeTag(mod) != .ErrorUnion) { return sema.fail(parent_block, operand_src, "expected error union type, found '{}'", .{ - err_union_ty.fmt(mod), + err_union_ty.fmt(pt), }); } const is_non_err = try sema.analyzeIsNonErrComptimeOnly(parent_block, operand_src, err_union); @@ -19251,7 +19420,7 @@ fn zirTryPtr(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileErr const operand_ty = sema.typeOf(operand); const ptr_info = operand_ty.ptrInfo(mod); - const res_ty = try mod.ptrTypeSema(.{ + const res_ty = try pt.ptrTypeSema(.{ .child = err_union_ty.errorUnionPayload(mod).toIntern(), .flags = .{ .is_const = ptr_info.flags.is_const, @@ -19366,7 +19535,8 @@ fn zirRetErrValue( block: *Block, inst: Zir.Inst.Index, ) CompileError!void { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].str_tok; const src = block.tokenOffset(inst_data.src_tok); const err_name = try mod.intern_pool.getOrPutString( @@ -19376,8 +19546,8 @@ fn zirRetErrValue( ); _ = try mod.getErrorValue(err_name); // Return the error code from the function. - const error_set_type = try mod.singleErrorSetType(err_name); - const result_inst = Air.internedToRef((try mod.intern(.{ .err = .{ + const error_set_type = try pt.singleErrorSetType(err_name); + const result_inst = Air.internedToRef((try pt.intern(.{ .err = .{ .ty = error_set_type.toIntern(), .name = err_name, } }))); @@ -19392,7 +19562,8 @@ fn zirRetImplicit( const tracy = trace(@src()); defer tracy.end(); - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_tok; const r_brace_src = block.tokenOffset(inst_data.src_tok); if (block.inlining == null and sema.func_is_naked) { @@ -19412,7 +19583,7 @@ fn zirRetImplicit( if (base_tag == .NoReturn) { const msg = msg: { const msg = try sema.errMsg(ret_ty_src, "function declared '{}' implicitly returns", .{ - sema.fn_ret_ty.fmt(mod), + sema.fn_ret_ty.fmt(pt), }); errdefer msg.destroy(sema.gpa); try sema.errNote(r_brace_src, msg, "control flow reaches end of body here", .{}); @@ -19422,7 +19593,7 @@ fn zirRetImplicit( } else if (base_tag != .Void) { const msg = msg: { const msg = try sema.errMsg(ret_ty_src, "function with non-void return type '{}' implicitly returns", .{ - sema.fn_ret_ty.fmt(mod), + sema.fn_ret_ty.fmt(pt), }); errdefer msg.destroy(sema.gpa); try sema.errNote(r_brace_src, msg, "control flow reaches end of body here", .{}); @@ -19474,7 +19645,7 @@ fn retWithErrTracing( ret_tag: Air.Inst.Tag, operand: Air.Inst.Ref, ) CompileError!void { - const mod = sema.mod; + const pt = sema.pt; const need_check = switch (is_non_err) { .bool_true => { _ = try block.addUnOp(ret_tag, operand); @@ -19484,11 +19655,11 @@ fn retWithErrTracing( else => true, }; const gpa = sema.gpa; - const stack_trace_ty = try mod.getBuiltinType("StackTrace"); - try stack_trace_ty.resolveFields(mod); - const ptr_stack_trace_ty = try mod.singleMutPtrType(stack_trace_ty); + const stack_trace_ty = try pt.getBuiltinType("StackTrace"); + try stack_trace_ty.resolveFields(pt); + const ptr_stack_trace_ty = try pt.singleMutPtrType(stack_trace_ty); const err_return_trace = try block.addTy(.err_return_trace, ptr_stack_trace_ty); - const return_err_fn = try mod.getBuiltin("returnError"); + const return_err_fn = try pt.getBuiltin("returnError"); const args: [1]Air.Inst.Ref = .{err_return_trace}; if (!need_check) { @@ -19524,12 +19695,14 @@ fn retWithErrTracing( } fn wantErrorReturnTracing(sema: *Sema, fn_ret_ty: Type) bool { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; return fn_ret_ty.isError(mod) and mod.comp.config.any_error_tracing; } fn zirSaveErrRetIndex(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].save_err_ret_index; if (!block.ownerModule().error_tracing) return; @@ -19559,7 +19732,8 @@ fn restoreErrRetIndex(sema: *Sema, start_block: *Block, src: LazySrcLoc, target_ const tracy = trace(@src()); defer tracy.end(); - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const saved_index = if (target_block.toIndexAllowNone()) |zir_block| b: { var block = start_block; @@ -19597,7 +19771,7 @@ fn restoreErrRetIndex(sema: *Sema, start_block: *Block, src: LazySrcLoc, target_ if (is_non_error) return; const saved_index_val = try sema.resolveDefinedValue(start_block, src, saved_index); - const saved_index_int = saved_index_val.?.toUnsignedInt(mod); + const saved_index_int = saved_index_val.?.toUnsignedInt(pt); assert(saved_index_int <= sema.comptime_err_ret_trace.items.len); sema.comptime_err_ret_trace.items.len = @intCast(saved_index_int); return; @@ -19612,7 +19786,8 @@ fn restoreErrRetIndex(sema: *Sema, start_block: *Block, src: LazySrcLoc, target_ } fn addToInferredErrorSet(sema: *Sema, uncasted_operand: Air.Inst.Ref) !void { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const ip = &mod.intern_pool; assert(sema.fn_ret_ty.zigTypeTag(mod) == .ErrorUnion); const err_set_ty = sema.fn_ret_ty.errorUnionSet(mod).toIntern(); @@ -19632,7 +19807,8 @@ fn addToInferredErrorSet(sema: *Sema, uncasted_operand: Air.Inst.Ref) !void { fn addToInferredErrorSetPtr(sema: *Sema, ies: *InferredErrorSet, op_ty: Type) !void { const arena = sema.arena; - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const ip = &mod.intern_pool; switch (op_ty.zigTypeTag(mod)) { .ErrorSet => try ies.addErrorSet(op_ty, ip, arena), @@ -19651,7 +19827,8 @@ fn analyzeRet( // Special case for returning an error to an inferred error set; we need to // add the error tag to the inferred error set of the in-scope function, so // that the coercion below works correctly. - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; if (sema.fn_ret_ty_ies != null and sema.fn_ret_ty.zigTypeTag(mod) == .ErrorUnion) { try sema.addToInferredErrorSet(uncasted_operand); } @@ -19691,7 +19868,7 @@ fn analyzeRet( return sema.failWithOwnedErrorMsg(block, msg); } - try sema.fn_ret_ty.resolveLayout(mod); + try sema.fn_ret_ty.resolveLayout(pt); try sema.validateRuntimeValue(block, operand_src, operand); @@ -19718,7 +19895,8 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air const tracy = trace(@src()); defer tracy.end(); - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].ptr_type; const extra = sema.code.extraData(Zir.Inst.PtrType, inst_data.payload_index); const elem_ty_src = block.src(.{ .node_offset_ptr_elem = extra.data.src_node }); @@ -19773,7 +19951,7 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air }, else => {}, } - const align_bytes = (try val.getUnsignedIntAdvanced(mod, .sema)).?; + const align_bytes = (try val.getUnsignedIntAdvanced(pt, .sema)).?; break :blk try sema.validateAlignAllowZero(block, align_src, align_bytes); } else .none; @@ -19804,13 +19982,13 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air if (host_size != 0) { if (bit_offset >= host_size * 8) { return sema.fail(block, bitoffset_src, "packed type '{}' at bit offset {} starts {} bits after the end of a {} byte host integer", .{ - elem_ty.fmt(mod), bit_offset, bit_offset - host_size * 8, host_size, + elem_ty.fmt(pt), bit_offset, bit_offset - host_size * 8, host_size, }); } - const elem_bit_size = try elem_ty.bitSizeAdvanced(mod, .sema); + const elem_bit_size = try elem_ty.bitSizeAdvanced(pt, .sema); if (elem_bit_size > host_size * 8 - bit_offset) { return sema.fail(block, bitoffset_src, "packed type '{}' at bit offset {} ends {} bits after the end of a {} byte host integer", .{ - elem_ty.fmt(mod), bit_offset, elem_bit_size - (host_size * 8 - bit_offset), host_size, + elem_ty.fmt(pt), bit_offset, elem_bit_size - (host_size * 8 - bit_offset), host_size, }); } } @@ -19824,7 +20002,7 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air } else if (inst_data.size == .C) { if (!try sema.validateExternType(elem_ty, .other)) { const msg = msg: { - const msg = try sema.errMsg(elem_ty_src, "C pointers cannot point to non-C-ABI-compatible type '{}'", .{elem_ty.fmt(mod)}); + const msg = try sema.errMsg(elem_ty_src, "C pointers cannot point to non-C-ABI-compatible type '{}'", .{elem_ty.fmt(pt)}); errdefer msg.destroy(sema.gpa); try sema.explainWhyTypeIsNotExtern(msg, elem_ty_src, elem_ty, .other); @@ -19841,14 +20019,14 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air if (host_size != 0 and !try sema.validatePackedType(elem_ty)) { return sema.failWithOwnedErrorMsg(block, msg: { - const msg = try sema.errMsg(elem_ty_src, "bit-pointer cannot refer to value of type '{}'", .{elem_ty.fmt(mod)}); + const msg = try sema.errMsg(elem_ty_src, "bit-pointer cannot refer to value of type '{}'", .{elem_ty.fmt(pt)}); errdefer msg.destroy(sema.gpa); try sema.explainWhyTypeIsNotPacked(msg, elem_ty_src, elem_ty); break :msg msg; }); } - const ty = try mod.ptrTypeSema(.{ + const ty = try pt.ptrTypeSema(.{ .child = elem_ty.toIntern(), .sentinel = sentinel, .flags = .{ @@ -19875,7 +20053,8 @@ fn zirStructInitEmpty(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE const src = block.nodeOffset(inst_data.src_node); const ty_src = block.src(.{ .node_offset_init_ty = inst_data.src_node }); const obj_ty = try sema.resolveType(block, ty_src, inst_data.operand); - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; switch (obj_ty.zigTypeTag(mod)) { .Struct => return sema.structInitEmpty(block, obj_ty, src, src), @@ -19890,7 +20069,8 @@ fn zirStructInitEmptyResult(sema: *Sema, block: *Block, inst: Zir.Inst.Index, is const tracy = trace(@src()); defer tracy.end(); - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node; const src = block.nodeOffset(inst_data.src_node); const ty_operand = sema.resolveType(block, src, inst_data.operand) catch |err| switch (err) { @@ -19905,7 +20085,7 @@ fn zirStructInitEmptyResult(sema: *Sema, block: *Block, inst: Zir.Inst.Index, is break :ty ptr_ty.childType(mod); } // To make `&.{}` a `[:s]T`, the init should be a `[0:s]T`. - break :ty try mod.arrayType(.{ + break :ty try pt.arrayType(.{ .len = 0, .sentinel = if (ptr_ty.sentinel(mod)) |s| s.toIntern() else .none, .child = ptr_ty.childType(mod).toIntern(), @@ -19936,10 +20116,11 @@ fn structInitEmpty( dest_src: LazySrcLoc, init_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const gpa = sema.gpa; // This logic must be synchronized with that in `zirStructInit`. - try struct_ty.resolveFields(mod); + try struct_ty.resolveFields(pt); // The init values to use for the struct instance. const field_inits = try gpa.alloc(Air.Inst.Ref, struct_ty.structFieldCount(mod)); @@ -19950,7 +20131,8 @@ fn structInitEmpty( } fn arrayInitEmpty(sema: *Sema, block: *Block, src: LazySrcLoc, obj_ty: Type) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const arr_len = obj_ty.arrayLen(mod); if (arr_len != 0) { if (obj_ty.zigTypeTag(mod) == .Array) { @@ -19959,21 +20141,22 @@ fn arrayInitEmpty(sema: *Sema, block: *Block, src: LazySrcLoc, obj_ty: Type) Com return sema.fail(block, src, "expected {d} vector elements; found 0", .{arr_len}); } } - return Air.internedToRef((try mod.intern(.{ .aggregate = .{ + return Air.internedToRef((try pt.intern(.{ .aggregate = .{ .ty = obj_ty.toIntern(), .storage = .{ .elems = &.{} }, } }))); } fn zirUnionInit(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const pt = sema.pt; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const ty_src = block.builtinCallArgSrc(inst_data.src_node, 0); const field_src = block.builtinCallArgSrc(inst_data.src_node, 1); const init_src = block.builtinCallArgSrc(inst_data.src_node, 2); const extra = sema.code.extraData(Zir.Inst.UnionInit, inst_data.payload_index).data; const union_ty = try sema.resolveType(block, ty_src, extra.union_type); - if (union_ty.zigTypeTag(sema.mod) != .Union) { - return sema.fail(block, ty_src, "expected union type, found '{}'", .{union_ty.fmt(sema.mod)}); + if (union_ty.zigTypeTag(pt.zcu) != .Union) { + return sema.fail(block, ty_src, "expected union type, found '{}'", .{union_ty.fmt(pt)}); } const field_name = try sema.resolveConstStringIntern(block, field_src, extra.field_name, .{ .needed_comptime_reason = "name of field being initialized must be comptime-known", @@ -19992,7 +20175,8 @@ fn unionInit( field_name: InternPool.NullTerminatedString, field_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const ip = &mod.intern_pool; const field_index = try sema.unionFieldIndex(block, union_ty, field_name, field_src); const field_ty = Type.fromInterned(mod.typeToUnion(union_ty).?.field_types.get(ip)[field_index]); @@ -20000,8 +20184,8 @@ fn unionInit( if (try sema.resolveValue(init)) |init_val| { const tag_ty = union_ty.unionTagTypeHypothetical(mod); - const tag_val = try mod.enumValueFieldIndex(tag_ty, field_index); - return Air.internedToRef((try mod.intern(.{ .un = .{ + const tag_val = try pt.enumValueFieldIndex(tag_ty, field_index); + return Air.internedToRef((try pt.intern(.{ .un = .{ .ty = union_ty.toIntern(), .tag = tag_val.toIntern(), .val = init_val.toIntern(), @@ -20025,7 +20209,8 @@ fn zirStructInit( const extra = sema.code.extraData(Zir.Inst.StructInit, inst_data.payload_index); const src = block.nodeOffset(inst_data.src_node); - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const ip = &mod.intern_pool; const first_item = sema.code.extraData(Zir.Inst.StructInit.Item, extra.end).data; const first_field_type_data = zir_datas[@intFromEnum(first_item.field_type)].pl_node; @@ -20038,7 +20223,7 @@ fn zirStructInit( else => |e| return e, }; const resolved_ty = result_ty.optEuBaseType(mod); - try resolved_ty.resolveLayout(mod); + try resolved_ty.resolveLayout(pt); if (resolved_ty.zigTypeTag(mod) == .Struct) { // This logic must be synchronized with that in `zirStructInitEmpty`. @@ -20079,8 +20264,8 @@ fn zirStructInit( const field_ty = resolved_ty.structFieldType(field_index, mod); field_inits[field_index] = try sema.coerce(block, field_ty, uncoerced_init, field_src); if (!is_packed) { - try resolved_ty.resolveStructFieldInits(mod); - if (try resolved_ty.structFieldValueComptime(mod, field_index)) |default_value| { + try resolved_ty.resolveStructFieldInits(pt); + if (try resolved_ty.structFieldValueComptime(pt, field_index)) |default_value| { const init_val = (try sema.resolveValue(field_inits[field_index])) orelse { return sema.failWithNeededComptime(block, field_src, .{ .needed_comptime_reason = "value stored in comptime field must be comptime-known", @@ -20112,7 +20297,7 @@ fn zirStructInit( ); const field_index = try sema.unionFieldIndex(block, resolved_ty, field_name, field_src); const tag_ty = resolved_ty.unionTagTypeHypothetical(mod); - const tag_val = try mod.enumValueFieldIndex(tag_ty, field_index); + const tag_val = try pt.enumValueFieldIndex(tag_ty, field_index); const field_ty = Type.fromInterned(mod.typeToUnion(resolved_ty).?.field_types.get(ip)[field_index]); if (field_ty.zigTypeTag(mod) == .NoReturn) { @@ -20132,11 +20317,11 @@ fn zirStructInit( const init_inst = try sema.coerce(block, field_ty, uncoerced_init_inst, field_src); if (try sema.resolveValue(init_inst)) |val| { - const struct_val = Value.fromInterned((try mod.intern(.{ .un = .{ + const struct_val = Value.fromInterned(try pt.intern(.{ .un = .{ .ty = resolved_ty.toIntern(), .tag = tag_val.toIntern(), .val = val.toIntern(), - } }))); + } })); const final_val_inst = try sema.coerce(block, result_ty, Air.internedToRef(struct_val.toIntern()), src); const final_val = (try sema.resolveValue(final_val_inst)).?; return sema.addConstantMaybeRef(final_val.toIntern(), is_ref); @@ -20152,7 +20337,7 @@ fn zirStructInit( if (is_ref) { const target = mod.getTarget(); - const alloc_ty = try mod.ptrTypeSema(.{ + const alloc_ty = try pt.ptrTypeSema(.{ .child = result_ty.toIntern(), .flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) }, }); @@ -20182,7 +20367,8 @@ fn finishStructInit( result_ty: Type, is_ref: bool, ) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const ip = &mod.intern_pool; var root_msg: ?*Module.ErrorMsg = null; @@ -20242,7 +20428,7 @@ fn finishStructInit( continue; } - try struct_ty.resolveStructFieldInits(mod); + try struct_ty.resolveStructFieldInits(pt); const field_init = struct_type.fieldInit(ip, i); if (field_init == .none) { @@ -20289,7 +20475,7 @@ fn finishStructInit( for (elems, field_inits) |*elem, field_init| { elem.* = (sema.resolveValue(field_init) catch unreachable).?.toIntern(); } - const struct_val = try mod.intern(.{ .aggregate = .{ + const struct_val = try pt.intern(.{ .aggregate = .{ .ty = struct_ty.toIntern(), .storage = .{ .elems = elems }, } }); @@ -20312,9 +20498,9 @@ fn finishStructInit( } if (is_ref) { - try struct_ty.resolveLayout(mod); - const target = sema.mod.getTarget(); - const alloc_ty = try mod.ptrTypeSema(.{ + try struct_ty.resolveLayout(pt); + const target = mod.getTarget(); + const alloc_ty = try pt.ptrTypeSema(.{ .child = result_ty.toIntern(), .flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) }, }); @@ -20334,7 +20520,7 @@ fn finishStructInit( .init_node_offset = init_src.offset.node_offset.x, .elem_index = @intCast(runtime_index), } })); - try struct_ty.resolveStructFieldInits(mod); + try struct_ty.resolveStructFieldInits(pt); const struct_val = try block.addAggregateInit(struct_ty, field_inits); return sema.coerce(block, result_ty, struct_val, init_src); } @@ -20364,7 +20550,8 @@ fn structInitAnon( extra_end: usize, is_ref: bool, ) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const gpa = sema.gpa; const ip = &mod.intern_pool; const zir_datas = sema.code.instructions.items(.data); @@ -20422,14 +20609,14 @@ fn structInitAnon( break :rs runtime_index; }; - const tuple_ty = try ip.getAnonStructType(gpa, .{ + const tuple_ty = try ip.getAnonStructType(gpa, pt.tid, .{ .names = names, .types = types, .values = values, }); const runtime_index = opt_runtime_index orelse { - const tuple_val = try mod.intern(.{ .aggregate = .{ + const tuple_val = try pt.intern(.{ .aggregate = .{ .ty = tuple_ty, .storage = .{ .elems = values }, } }); @@ -20443,7 +20630,7 @@ fn structInitAnon( if (is_ref) { const target = mod.getTarget(); - const alloc_ty = try mod.ptrTypeSema(.{ + const alloc_ty = try pt.ptrTypeSema(.{ .child = tuple_ty, .flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) }, }); @@ -20457,7 +20644,7 @@ fn structInitAnon( }; extra_index = item.end; - const field_ptr_ty = try mod.ptrTypeSema(.{ + const field_ptr_ty = try pt.ptrTypeSema(.{ .child = field_ty, .flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) }, }); @@ -20491,7 +20678,8 @@ fn zirArrayInit( inst: Zir.Inst.Index, is_ref: bool, ) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const gpa = sema.gpa; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const src = block.nodeOffset(inst_data.src_node); @@ -20550,8 +20738,8 @@ fn zirArrayInit( dest.* = try sema.coerce(block, elem_ty, resolved_arg, elem_src); if (is_tuple) { if (array_ty.structFieldIsComptime(i, mod)) - try array_ty.resolveStructFieldInits(mod); - if (try array_ty.structFieldValueComptime(mod, i)) |field_val| { + try array_ty.resolveStructFieldInits(pt); + if (try array_ty.structFieldValueComptime(pt, i)) |field_val| { const init_val = try sema.resolveValue(dest.*) orelse { return sema.failWithNeededComptime(block, elem_src, .{ .needed_comptime_reason = "value stored in comptime field must be comptime-known", @@ -20581,7 +20769,7 @@ fn zirArrayInit( // We checked that all args are comptime above. val.* = (sema.resolveValue(arg) catch unreachable).?.toIntern(); } - const arr_val = try mod.intern(.{ .aggregate = .{ + const arr_val = try pt.intern(.{ .aggregate = .{ .ty = array_ty.toIntern(), .storage = .{ .elems = elem_vals }, } }); @@ -20597,7 +20785,7 @@ fn zirArrayInit( if (is_ref) { const target = mod.getTarget(); - const alloc_ty = try mod.ptrTypeSema(.{ + const alloc_ty = try pt.ptrTypeSema(.{ .child = result_ty.toIntern(), .flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) }, }); @@ -20606,27 +20794,27 @@ fn zirArrayInit( if (is_tuple) { for (resolved_args, 0..) |arg, i| { - const elem_ptr_ty = try mod.ptrTypeSema(.{ + const elem_ptr_ty = try pt.ptrTypeSema(.{ .child = array_ty.structFieldType(i, mod).toIntern(), .flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) }, }); const elem_ptr_ty_ref = Air.internedToRef(elem_ptr_ty.toIntern()); - const index = try mod.intRef(Type.usize, i); + const index = try pt.intRef(Type.usize, i); const elem_ptr = try block.addPtrElemPtrTypeRef(base_ptr, index, elem_ptr_ty_ref); _ = try block.addBinOp(.store, elem_ptr, arg); } return sema.makePtrConst(block, alloc); } - const elem_ptr_ty = try mod.ptrTypeSema(.{ + const elem_ptr_ty = try pt.ptrTypeSema(.{ .child = array_ty.elemType2(mod).toIntern(), .flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) }, }); const elem_ptr_ty_ref = Air.internedToRef(elem_ptr_ty.toIntern()); for (resolved_args, 0..) |arg, i| { - const index = try mod.intRef(Type.usize, i); + const index = try pt.intRef(Type.usize, i); const elem_ptr = try block.addPtrElemPtrTypeRef(base_ptr, index, elem_ptr_ty_ref); _ = try block.addBinOp(.store, elem_ptr, arg); } @@ -20656,7 +20844,8 @@ fn arrayInitAnon( operands: []const Zir.Inst.Ref, is_ref: bool, ) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const gpa = sema.gpa; const ip = &mod.intern_pool; @@ -20689,14 +20878,14 @@ fn arrayInitAnon( break :rs runtime_src; }; - const tuple_ty = try ip.getAnonStructType(gpa, .{ + const tuple_ty = try ip.getAnonStructType(gpa, pt.tid, .{ .types = types, .values = values, .names = &.{}, }); const runtime_src = opt_runtime_src orelse { - const tuple_val = try mod.intern(.{ .aggregate = .{ + const tuple_val = try pt.intern(.{ .aggregate = .{ .ty = tuple_ty, .storage = .{ .elems = values }, } }); @@ -20706,15 +20895,15 @@ fn arrayInitAnon( try sema.requireRuntimeBlock(block, src, runtime_src); if (is_ref) { - const target = sema.mod.getTarget(); - const alloc_ty = try mod.ptrTypeSema(.{ + const target = sema.pt.zcu.getTarget(); + const alloc_ty = try pt.ptrTypeSema(.{ .child = tuple_ty, .flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) }, }); const alloc = try block.addTy(.alloc, alloc_ty); for (operands, 0..) |operand, i_usize| { const i: u32 = @intCast(i_usize); - const field_ptr_ty = try mod.ptrTypeSema(.{ + const field_ptr_ty = try pt.ptrTypeSema(.{ .child = types[i], .flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) }, }); @@ -20752,7 +20941,8 @@ fn zirFieldTypeRef(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro } fn zirStructInitFieldType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const ip = &mod.intern_pool; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const extra = sema.code.extraData(Zir.Inst.FieldType, inst_data.payload_index).data; @@ -20780,11 +20970,12 @@ fn fieldType( field_src: LazySrcLoc, ty_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const ip = &mod.intern_pool; var cur_ty = aggregate_ty; while (true) { - try cur_ty.resolveFields(mod); + try cur_ty.resolveFields(pt); switch (cur_ty.zigTypeTag(mod)) { .Struct => switch (ip.indexToKey(cur_ty.toIntern())) { .anon_struct_type => |anon_struct| { @@ -20823,7 +21014,7 @@ fn fieldType( else => {}, } return sema.fail(block, ty_src, "expected struct or union; found '{}'", .{ - cur_ty.fmt(sema.mod), + cur_ty.fmt(pt), }); } } @@ -20833,12 +21024,13 @@ fn zirErrorReturnTrace(sema: *Sema, block: *Block) CompileError!Air.Inst.Ref { } fn getErrorReturnTrace(sema: *Sema, block: *Block) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const ip = &mod.intern_pool; - const stack_trace_ty = try mod.getBuiltinType("StackTrace"); - try stack_trace_ty.resolveFields(mod); - const ptr_stack_trace_ty = try mod.singleMutPtrType(stack_trace_ty); - const opt_ptr_stack_trace_ty = try mod.optionalType(ptr_stack_trace_ty.toIntern()); + const stack_trace_ty = try pt.getBuiltinType("StackTrace"); + try stack_trace_ty.resolveFields(pt); + const ptr_stack_trace_ty = try pt.singleMutPtrType(stack_trace_ty); + const opt_ptr_stack_trace_ty = try pt.optionalType(ptr_stack_trace_ty.toIntern()); if (sema.owner_func_index != .none and ip.funcAnalysis(sema.owner_func_index).calls_or_awaits_errorable_fn and @@ -20846,7 +21038,7 @@ fn getErrorReturnTrace(sema: *Sema, block: *Block) CompileError!Air.Inst.Ref { { return block.addTy(.err_return_trace, opt_ptr_stack_trace_ty); } - return Air.internedToRef((try mod.intern(.{ .opt = .{ + return Air.internedToRef((try pt.intern(.{ .opt = .{ .ty = opt_ptr_stack_trace_ty.toIntern(), .val = .none, } }))); @@ -20862,19 +21054,20 @@ fn zirFrame( } fn zirAlignOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node; const operand_src = block.builtinCallArgSrc(inst_data.src_node, 0); const ty = try sema.resolveType(block, operand_src, inst_data.operand); - if (ty.isNoReturn(mod)) { - return sema.fail(block, operand_src, "no align available for type '{}'", .{ty.fmt(sema.mod)}); + if (ty.isNoReturn(pt.zcu)) { + return sema.fail(block, operand_src, "no align available for type '{}'", .{ty.fmt(pt)}); } - const val = try ty.lazyAbiAlignment(mod); + const val = try ty.lazyAbiAlignment(pt); return Air.internedToRef(val.toIntern()); } fn zirIntFromBool(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node; const src = block.nodeOffset(inst_data.src_node); const operand = try sema.resolveInst(inst_data.operand); @@ -20886,25 +21079,25 @@ fn zirIntFromBool(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError } if (try sema.resolveValue(operand)) |val| { if (!is_vector) { - if (val.isUndef(mod)) return mod.undefRef(Type.u1); - if (val.toBool()) return Air.internedToRef((try mod.intValue(Type.u1, 1)).toIntern()); - return Air.internedToRef((try mod.intValue(Type.u1, 0)).toIntern()); + if (val.isUndef(mod)) return pt.undefRef(Type.u1); + if (val.toBool()) return Air.internedToRef((try pt.intValue(Type.u1, 1)).toIntern()); + return Air.internedToRef((try pt.intValue(Type.u1, 0)).toIntern()); } const len = operand_ty.vectorLen(mod); - const dest_ty = try mod.vectorType(.{ .child = .u1_type, .len = len }); - if (val.isUndef(mod)) return mod.undefRef(dest_ty); + const dest_ty = try pt.vectorType(.{ .child = .u1_type, .len = len }); + if (val.isUndef(mod)) return pt.undefRef(dest_ty); const new_elems = try sema.arena.alloc(InternPool.Index, len); for (new_elems, 0..) |*new_elem, i| { - const old_elem = try val.elemValue(mod, i); + const old_elem = try val.elemValue(pt, i); const new_val = if (old_elem.isUndef(mod)) - try mod.undefValue(Type.u1) + try pt.undefValue(Type.u1) else if (old_elem.toBool()) - try mod.intValue(Type.u1, 1) + try pt.intValue(Type.u1, 1) else - try mod.intValue(Type.u1, 0); + try pt.intValue(Type.u1, 0); new_elem.* = new_val.toIntern(); } - return Air.internedToRef(try mod.intern(.{ .aggregate = .{ + return Air.internedToRef(try pt.intern(.{ .aggregate = .{ .ty = dest_ty.toIntern(), .storage = .{ .elems = new_elems }, } })); @@ -20913,10 +21106,10 @@ fn zirIntFromBool(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError return block.addUnOp(.int_from_bool, operand); } const len = operand_ty.vectorLen(mod); - const dest_ty = try mod.vectorType(.{ .child = .u1_type, .len = len }); + const dest_ty = try pt.vectorType(.{ .child = .u1_type, .len = len }); const new_elems = try sema.arena.alloc(Air.Inst.Ref, len); for (new_elems, 0..) |*new_elem, i| { - const idx_ref = try mod.intRef(Type.usize, i); + const idx_ref = try pt.intRef(Type.usize, i); const old_elem = try block.addBinOp(.array_elem_val, operand, idx_ref); new_elem.* = try block.addUnOp(.int_from_bool, old_elem); } @@ -20930,7 +21123,7 @@ fn zirErrorName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A const operand = try sema.coerce(block, Type.anyerror, uncoerced_operand, operand_src); if (try sema.resolveDefinedValue(block, operand_src, operand)) |val| { - const err_name = sema.mod.intern_pool.indexToKey(val.toIntern()).err.name; + const err_name = sema.pt.zcu.intern_pool.indexToKey(val.toIntern()).err.name; return sema.addNullTerminatedStrLit(err_name); } @@ -20944,7 +21137,8 @@ fn zirAbs( block: *Block, inst: Zir.Inst.Index, ) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node; const operand = try sema.resolveInst(inst_data.operand); const operand_src = block.builtinCallArgSrc(inst_data.src_node, 0); @@ -20953,12 +21147,12 @@ fn zirAbs( const result_ty = switch (scalar_ty.zigTypeTag(mod)) { .ComptimeFloat, .Float, .ComptimeInt => operand_ty, - .Int => if (scalar_ty.isSignedInt(mod)) try operand_ty.toUnsigned(mod) else return operand, + .Int => if (scalar_ty.isSignedInt(mod)) try operand_ty.toUnsigned(pt) else return operand, else => return sema.fail( block, operand_src, "expected integer, float, or vector of either integers or floats, found '{}'", - .{operand_ty.fmt(mod)}, + .{operand_ty.fmt(pt)}, ), }; @@ -20972,30 +21166,31 @@ fn maybeConstantUnaryMath( sema: *Sema, operand: Air.Inst.Ref, result_ty: Type, - comptime eval: fn (Value, Type, Allocator, *Module) Allocator.Error!Value, + comptime eval: fn (Value, Type, Allocator, Zcu.PerThread) Allocator.Error!Value, ) CompileError!?Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; switch (result_ty.zigTypeTag(mod)) { .Vector => if (try sema.resolveValue(operand)) |val| { const scalar_ty = result_ty.scalarType(mod); const vec_len = result_ty.vectorLen(mod); if (val.isUndef(mod)) - return try mod.undefRef(result_ty); + return try pt.undefRef(result_ty); const elems = try sema.arena.alloc(InternPool.Index, vec_len); for (elems, 0..) |*elem, i| { - const elem_val = try val.elemValue(sema.mod, i); - elem.* = (try eval(elem_val, scalar_ty, sema.arena, sema.mod)).toIntern(); + const elem_val = try val.elemValue(pt, i); + elem.* = (try eval(elem_val, scalar_ty, sema.arena, pt)).toIntern(); } - return Air.internedToRef((try mod.intern(.{ .aggregate = .{ + return Air.internedToRef((try pt.intern(.{ .aggregate = .{ .ty = result_ty.toIntern(), .storage = .{ .elems = elems }, } }))); }, else => if (try sema.resolveValue(operand)) |operand_val| { if (operand_val.isUndef(mod)) - return try mod.undefRef(result_ty); - const result_val = try eval(operand_val, result_ty, sema.arena, sema.mod); + return try pt.undefRef(result_ty); + const result_val = try eval(operand_val, result_ty, sema.arena, pt); return Air.internedToRef(result_val.toIntern()); }, } @@ -21007,12 +21202,13 @@ fn zirUnaryMath( block: *Block, inst: Zir.Inst.Index, air_tag: Air.Inst.Tag, - comptime eval: fn (Value, Type, Allocator, *Module) Allocator.Error!Value, + comptime eval: fn (Value, Type, Allocator, Zcu.PerThread) Allocator.Error!Value, ) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node; const operand = try sema.resolveInst(inst_data.operand); const operand_src = block.builtinCallArgSrc(inst_data.src_node, 0); @@ -21025,7 +21221,7 @@ fn zirUnaryMath( block, operand_src, "expected vector of floats or float type, found '{}'", - .{operand_ty.fmt(sema.mod)}, + .{operand_ty.fmt(pt)}, ), } @@ -21041,10 +21237,11 @@ fn zirTagName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air const src = block.nodeOffset(inst_data.src_node); const operand = try sema.resolveInst(inst_data.operand); const operand_ty = sema.typeOf(operand); - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const ip = &mod.intern_pool; - try operand_ty.resolveLayout(mod); + try operand_ty.resolveLayout(pt); const enum_ty = switch (operand_ty.zigTypeTag(mod)) { .EnumLiteral => { const val = try sema.resolveConstDefinedValue(block, LazySrcLoc.unneeded, operand, undefined); @@ -21053,9 +21250,9 @@ fn zirTagName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air }, .Enum => operand_ty, .Union => operand_ty.unionTagType(mod) orelse - return sema.fail(block, src, "union '{}' is untagged", .{operand_ty.fmt(sema.mod)}), + return sema.fail(block, src, "union '{}' is untagged", .{operand_ty.fmt(pt)}), else => return sema.fail(block, operand_src, "expected enum or union; found '{}'", .{ - operand_ty.fmt(mod), + operand_ty.fmt(pt), }), }; if (enum_ty.enumFieldCount(mod) == 0) { @@ -21063,7 +21260,7 @@ fn zirTagName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air // it prevents a crash. // https://github.com/ziglang/zig/issues/15909 return sema.fail(block, operand_src, "cannot get @tagName of empty enum '{}'", .{ - enum_ty.fmt(mod), + enum_ty.fmt(pt), }); } const enum_decl_index = enum_ty.getOwnerDecl(mod); @@ -21072,7 +21269,7 @@ fn zirTagName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air const field_index = enum_ty.enumTagFieldIndex(val, mod) orelse { const msg = msg: { const msg = try sema.errMsg(src, "no field with value '{}' in enum '{}'", .{ - val.fmtValue(sema.mod, sema), mod.declPtr(enum_decl_index).name.fmt(ip), + val.fmtValue(pt, sema), mod.declPtr(enum_decl_index).name.fmt(ip), }); errdefer msg.destroy(sema.gpa); try sema.errNote(enum_ty.srcLoc(mod), msg, "declared here", .{}); @@ -21085,7 +21282,7 @@ fn zirTagName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air return sema.addNullTerminatedStrLit(field_name); } try sema.requireRuntimeBlock(block, src, operand_src); - if (block.wantSafety() and sema.mod.backendSupportsFeature(.is_named_enum_value)) { + if (block.wantSafety() and mod.backendSupportsFeature(.is_named_enum_value)) { const ok = try block.addUnOp(.is_named_enum_value, casted_operand); try sema.addSafetyCheck(block, src, ok, .invalid_enum_value); } @@ -21101,7 +21298,8 @@ fn zirReify( extended: Zir.Inst.Extended.InstData, inst: Zir.Inst.Index, ) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const gpa = sema.gpa; const ip = &mod.intern_pool; const name_strategy: Zir.Inst.NameStrategy = @enumFromInt(extended.small); @@ -21120,7 +21318,7 @@ fn zirReify( }, }, }; - const type_info_ty = try mod.getBuiltinType("Type"); + const type_info_ty = try pt.getBuiltinType("Type"); const uncasted_operand = try sema.resolveInst(extra.operand); const type_info = try sema.coerce(block, type_info_ty, uncasted_operand, operand_src); const val = try sema.resolveConstDefinedValue(block, operand_src, type_info, .{ @@ -21145,36 +21343,36 @@ fn zirReify( .Int => { const struct_type = ip.loadStructType(ip.typeOf(union_val.val)); const signedness_val = try Value.fromInterned(union_val.val).fieldValue( - mod, + pt, struct_type.nameIndex(ip, try ip.getOrPutString(gpa, "signedness", .no_embedded_nulls)).?, ); const bits_val = try Value.fromInterned(union_val.val).fieldValue( - mod, + pt, struct_type.nameIndex(ip, try ip.getOrPutString(gpa, "bits", .no_embedded_nulls)).?, ); const signedness = mod.toEnum(std.builtin.Signedness, signedness_val); - const bits: u16 = @intCast(try bits_val.toUnsignedIntSema(mod)); - const ty = try mod.intType(signedness, bits); + const bits: u16 = @intCast(try bits_val.toUnsignedIntSema(pt)); + const ty = try pt.intType(signedness, bits); return Air.internedToRef(ty.toIntern()); }, .Vector => { const struct_type = ip.loadStructType(ip.typeOf(union_val.val)); - const len_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex( + const len_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, try ip.getOrPutString(gpa, "len", .no_embedded_nulls), ).?); - const child_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex( + const child_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, try ip.getOrPutString(gpa, "child", .no_embedded_nulls), ).?); - const len: u32 = @intCast(try len_val.toUnsignedIntSema(mod)); + const len: u32 = @intCast(try len_val.toUnsignedIntSema(pt)); const child_ty = child_val.toType(); try sema.checkVectorElemType(block, src, child_ty); - const ty = try mod.vectorType(.{ + const ty = try pt.vectorType(.{ .len = len, .child = child_ty.toIntern(), }); @@ -21182,12 +21380,12 @@ fn zirReify( }, .Float => { const struct_type = ip.loadStructType(ip.typeOf(union_val.val)); - const bits_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex( + const bits_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, try ip.getOrPutString(gpa, "bits", .no_embedded_nulls), ).?); - const bits: u16 = @intCast(try bits_val.toUnsignedIntSema(mod)); + const bits: u16 = @intCast(try bits_val.toUnsignedIntSema(pt)); const ty = switch (bits) { 16 => Type.f16, 32 => Type.f32, @@ -21200,35 +21398,35 @@ fn zirReify( }, .Pointer => { const struct_type = ip.loadStructType(ip.typeOf(union_val.val)); - const size_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex( + const size_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, try ip.getOrPutString(gpa, "size", .no_embedded_nulls), ).?); - const is_const_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex( + const is_const_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, try ip.getOrPutString(gpa, "is_const", .no_embedded_nulls), ).?); - const is_volatile_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex( + const is_volatile_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, try ip.getOrPutString(gpa, "is_volatile", .no_embedded_nulls), ).?); - const alignment_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex( + const alignment_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, try ip.getOrPutString(gpa, "alignment", .no_embedded_nulls), ).?); - const address_space_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex( + const address_space_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, try ip.getOrPutString(gpa, "address_space", .no_embedded_nulls), ).?); - const child_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex( + const child_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, try ip.getOrPutString(gpa, "child", .no_embedded_nulls), ).?); - const is_allowzero_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex( + const is_allowzero_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, try ip.getOrPutString(gpa, "is_allowzero", .no_embedded_nulls), ).?); - const sentinel_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex( + const sentinel_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, try ip.getOrPutString(gpa, "sentinel", .no_embedded_nulls), ).?); @@ -21237,7 +21435,7 @@ fn zirReify( return sema.fail(block, src, "alignment must fit in 'u32'", .{}); } - const alignment_val_int = (try alignment_val.getUnsignedIntAdvanced(mod, .sema)).?; + const alignment_val_int = (try alignment_val.getUnsignedIntAdvanced(pt, .sema)).?; if (alignment_val_int > 0 and !math.isPowerOfTwo(alignment_val_int)) { return sema.fail(block, src, "alignment value '{d}' is not a power of two or zero", .{alignment_val_int}); } @@ -21245,7 +21443,7 @@ fn zirReify( const elem_ty = child_val.toType(); if (abi_align != .none) { - try elem_ty.resolveLayout(mod); + try elem_ty.resolveLayout(pt); } const ptr_size = mod.toEnum(std.builtin.Type.Pointer.Size, size_val); @@ -21256,7 +21454,7 @@ fn zirReify( return sema.fail(block, src, "sentinels are only allowed on slices and unknown-length pointers", .{}); } const sentinel_ptr_val = sentinel_val.optionalValue(mod).?; - const ptr_ty = try mod.singleMutPtrType(elem_ty); + const ptr_ty = try pt.singleMutPtrType(elem_ty); const sent_val = (try sema.pointerDeref(block, src, sentinel_ptr_val, ptr_ty)).?; break :s sent_val.toIntern(); } @@ -21274,7 +21472,7 @@ fn zirReify( } else if (ptr_size == .C) { if (!try sema.validateExternType(elem_ty, .other)) { const msg = msg: { - const msg = try sema.errMsg(src, "C pointers cannot point to non-C-ABI-compatible type '{}'", .{elem_ty.fmt(mod)}); + const msg = try sema.errMsg(src, "C pointers cannot point to non-C-ABI-compatible type '{}'", .{elem_ty.fmt(pt)}); errdefer msg.destroy(gpa); try sema.explainWhyTypeIsNotExtern(msg, src, elem_ty, .other); @@ -21289,7 +21487,7 @@ fn zirReify( } } - const ty = try mod.ptrTypeSema(.{ + const ty = try pt.ptrTypeSema(.{ .child = elem_ty.toIntern(), .sentinel = actual_sentinel, .flags = .{ @@ -21305,27 +21503,27 @@ fn zirReify( }, .Array => { const struct_type = ip.loadStructType(ip.typeOf(union_val.val)); - const len_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex( + const len_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, try ip.getOrPutString(gpa, "len", .no_embedded_nulls), ).?); - const child_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex( + const child_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, try ip.getOrPutString(gpa, "child", .no_embedded_nulls), ).?); - const sentinel_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex( + const sentinel_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, try ip.getOrPutString(gpa, "sentinel", .no_embedded_nulls), ).?); - const len = try len_val.toUnsignedIntSema(mod); + const len = try len_val.toUnsignedIntSema(pt); const child_ty = child_val.toType(); const sentinel = if (sentinel_val.optionalValue(mod)) |p| blk: { - const ptr_ty = try mod.singleMutPtrType(child_ty); + const ptr_ty = try pt.singleMutPtrType(child_ty); break :blk (try sema.pointerDeref(block, src, p, ptr_ty)).?; } else null; - const ty = try mod.arrayType(.{ + const ty = try pt.arrayType(.{ .len = len, .sentinel = if (sentinel) |s| s.toIntern() else .none, .child = child_ty.toIntern(), @@ -21334,23 +21532,23 @@ fn zirReify( }, .Optional => { const struct_type = ip.loadStructType(ip.typeOf(union_val.val)); - const child_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex( + const child_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, try ip.getOrPutString(gpa, "child", .no_embedded_nulls), ).?); const child_ty = child_val.toType(); - const ty = try mod.optionalType(child_ty.toIntern()); + const ty = try pt.optionalType(child_ty.toIntern()); return Air.internedToRef(ty.toIntern()); }, .ErrorUnion => { const struct_type = ip.loadStructType(ip.typeOf(union_val.val)); - const error_set_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex( + const error_set_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, try ip.getOrPutString(gpa, "error_set", .no_embedded_nulls), ).?); - const payload_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex( + const payload_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, try ip.getOrPutString(gpa, "payload", .no_embedded_nulls), ).?); @@ -21362,7 +21560,7 @@ fn zirReify( return sema.fail(block, src, "Type.ErrorUnion.error_set must be an error set type", .{}); } - const ty = try mod.errorUnionType(error_set_ty, payload_ty); + const ty = try pt.errorUnionType(error_set_ty, payload_ty); return Air.internedToRef(ty.toIntern()); }, .ErrorSet => { @@ -21377,9 +21575,9 @@ fn zirReify( var names: InferredErrorSet.NameMap = .{}; try names.ensureUnusedCapacity(sema.arena, len); for (0..len) |i| { - const elem_val = try names_val.elemValue(mod, i); + const elem_val = try names_val.elemValue(pt, i); const elem_struct_type = ip.loadStructType(ip.typeOf(elem_val.toIntern())); - const name_val = try elem_val.fieldValue(mod, elem_struct_type.nameIndex( + const name_val = try elem_val.fieldValue(pt, elem_struct_type.nameIndex( ip, try ip.getOrPutString(gpa, "name", .no_embedded_nulls), ).?); @@ -21396,28 +21594,28 @@ fn zirReify( } } - const ty = try mod.errorSetFromUnsortedNames(names.keys()); + const ty = try pt.errorSetFromUnsortedNames(names.keys()); return Air.internedToRef(ty.toIntern()); }, .Struct => { const struct_type = ip.loadStructType(ip.typeOf(union_val.val)); - const layout_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex( + const layout_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, try ip.getOrPutString(gpa, "layout", .no_embedded_nulls), ).?); - const backing_integer_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex( + const backing_integer_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, try ip.getOrPutString(gpa, "backing_integer", .no_embedded_nulls), ).?); - const fields_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex( + const fields_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, try ip.getOrPutString(gpa, "fields", .no_embedded_nulls), ).?); - const decls_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex( + const decls_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, try ip.getOrPutString(gpa, "decls", .no_embedded_nulls), ).?); - const is_tuple_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex( + const is_tuple_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, try ip.getOrPutString(gpa, "is_tuple", .no_embedded_nulls), ).?); @@ -21425,7 +21623,7 @@ fn zirReify( const layout = mod.toEnum(std.builtin.Type.ContainerLayout, layout_val); // Decls - if (try decls_val.sliceLen(mod) > 0) { + if (try decls_val.sliceLen(pt) > 0) { return sema.fail(block, src, "reified structs must have no decls", .{}); } @@ -21441,24 +21639,24 @@ fn zirReify( }, .Enum => { const struct_type = ip.loadStructType(ip.typeOf(union_val.val)); - const tag_type_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex( + const tag_type_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, try ip.getOrPutString(gpa, "tag_type", .no_embedded_nulls), ).?); - const fields_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex( + const fields_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, try ip.getOrPutString(gpa, "fields", .no_embedded_nulls), ).?); - const decls_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex( + const decls_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, try ip.getOrPutString(gpa, "decls", .no_embedded_nulls), ).?); - const is_exhaustive_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex( + const is_exhaustive_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, try ip.getOrPutString(gpa, "is_exhaustive", .no_embedded_nulls), ).?); - if (try decls_val.sliceLen(mod) > 0) { + if (try decls_val.sliceLen(pt) > 0) { return sema.fail(block, src, "reified enums must have no decls", .{}); } @@ -21470,17 +21668,17 @@ fn zirReify( }, .Opaque => { const struct_type = ip.loadStructType(ip.typeOf(union_val.val)); - const decls_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex( + const decls_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, try ip.getOrPutString(gpa, "decls", .no_embedded_nulls), ).?); // Decls - if (try decls_val.sliceLen(mod) > 0) { + if (try decls_val.sliceLen(pt) > 0) { return sema.fail(block, src, "reified opaque must have no decls", .{}); } - const wip_ty = switch (try ip.getOpaqueType(gpa, .{ + const wip_ty = switch (try ip.getOpaqueType(gpa, pt.tid, .{ .has_namespace = false, .key = .{ .reified = .{ .zir_index = try block.trackZir(inst), @@ -21501,30 +21699,30 @@ fn zirReify( mod.declPtr(new_decl_index).owns_tv = true; errdefer mod.abortAnonDecl(new_decl_index); - try mod.finalizeAnonDecl(new_decl_index); + try pt.finalizeAnonDecl(new_decl_index); return Air.internedToRef(wip_ty.finish(ip, new_decl_index, .none)); }, .Union => { const struct_type = ip.loadStructType(ip.typeOf(union_val.val)); - const layout_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex( + const layout_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, try ip.getOrPutString(gpa, "layout", .no_embedded_nulls), ).?); - const tag_type_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex( + const tag_type_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, try ip.getOrPutString(gpa, "tag_type", .no_embedded_nulls), ).?); - const fields_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex( + const fields_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, try ip.getOrPutString(gpa, "fields", .no_embedded_nulls), ).?); - const decls_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex( + const decls_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, try ip.getOrPutString(gpa, "decls", .no_embedded_nulls), ).?); - if (try decls_val.sliceLen(mod) > 0) { + if (try decls_val.sliceLen(pt) > 0) { return sema.fail(block, src, "reified unions must have no decls", .{}); } const layout = mod.toEnum(std.builtin.Type.ContainerLayout, layout_val); @@ -21537,23 +21735,23 @@ fn zirReify( }, .Fn => { const struct_type = ip.loadStructType(ip.typeOf(union_val.val)); - const calling_convention_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex( + const calling_convention_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, try ip.getOrPutString(gpa, "calling_convention", .no_embedded_nulls), ).?); - const is_generic_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex( + const is_generic_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, try ip.getOrPutString(gpa, "is_generic", .no_embedded_nulls), ).?); - const is_var_args_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex( + const is_var_args_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, try ip.getOrPutString(gpa, "is_var_args", .no_embedded_nulls), ).?); - const return_type_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex( + const return_type_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, try ip.getOrPutString(gpa, "return_type", .no_embedded_nulls), ).?); - const params_slice_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex( + const params_slice_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, try ip.getOrPutString(gpa, "params", .no_embedded_nulls), ).?); @@ -21581,17 +21779,17 @@ fn zirReify( var noalias_bits: u32 = 0; for (param_types, 0..) |*param_type, i| { - const elem_val = try params_val.elemValue(mod, i); + const elem_val = try params_val.elemValue(pt, i); const elem_struct_type = ip.loadStructType(ip.typeOf(elem_val.toIntern())); - const param_is_generic_val = try elem_val.fieldValue(mod, elem_struct_type.nameIndex( + const param_is_generic_val = try elem_val.fieldValue(pt, elem_struct_type.nameIndex( ip, try ip.getOrPutString(gpa, "is_generic", .no_embedded_nulls), ).?); - const param_is_noalias_val = try elem_val.fieldValue(mod, elem_struct_type.nameIndex( + const param_is_noalias_val = try elem_val.fieldValue(pt, elem_struct_type.nameIndex( ip, try ip.getOrPutString(gpa, "is_noalias", .no_embedded_nulls), ).?); - const opt_param_type_val = try elem_val.fieldValue(mod, elem_struct_type.nameIndex( + const opt_param_type_val = try elem_val.fieldValue(pt, elem_struct_type.nameIndex( ip, try ip.getOrPutString(gpa, "type", .no_embedded_nulls), ).?); @@ -21613,7 +21811,7 @@ fn zirReify( } } - const ty = try mod.funcType(.{ + const ty = try pt.funcType(.{ .param_types = param_types, .noalias_bits = noalias_bits, .return_type = return_type.toIntern(), @@ -21636,7 +21834,8 @@ fn reifyEnum( fields_val: Value, name_strategy: Zir.Inst.NameStrategy, ) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const gpa = sema.gpa; const ip = &mod.intern_pool; @@ -21656,10 +21855,10 @@ fn reifyEnum( std.hash.autoHash(&hasher, fields_len); for (0..fields_len) |field_idx| { - const field_info = try fields_val.elemValue(mod, field_idx); + const field_info = try fields_val.elemValue(pt, field_idx); - const field_name_val = try field_info.fieldValue(mod, 0); - const field_value_val = try sema.resolveLazyValue(try field_info.fieldValue(mod, 1)); + const field_name_val = try field_info.fieldValue(pt, 0); + const field_value_val = try sema.resolveLazyValue(try field_info.fieldValue(pt, 1)); const field_name = try sema.sliceToIpString(block, src, field_name_val, .{ .needed_comptime_reason = "enum field name must be comptime-known", @@ -21671,7 +21870,7 @@ fn reifyEnum( }); } - const wip_ty = switch (try ip.getEnumType(gpa, .{ + const wip_ty = switch (try ip.getEnumType(gpa, pt.tid, .{ .has_namespace = false, .has_values = true, .tag_mode = if (is_exhaustive) .explicit else .nonexhaustive, @@ -21704,10 +21903,10 @@ fn reifyEnum( wip_ty.setTagTy(ip, tag_ty.toIntern()); for (0..fields_len) |field_idx| { - const field_info = try fields_val.elemValue(mod, field_idx); + const field_info = try fields_val.elemValue(pt, field_idx); - const field_name_val = try field_info.fieldValue(mod, 0); - const field_value_val = try sema.resolveLazyValue(try field_info.fieldValue(mod, 1)); + const field_name_val = try field_info.fieldValue(pt, 0); + const field_value_val = try sema.resolveLazyValue(try field_info.fieldValue(pt, 1)); // Don't pass a reason; first loop acts as an assertion that this is valid. const field_name = try sema.sliceToIpString(block, src, field_name_val, undefined); @@ -21716,12 +21915,12 @@ fn reifyEnum( // TODO: better source location return sema.fail(block, src, "field '{}' with enumeration value '{}' is too large for backing int type '{}'", .{ field_name.fmt(ip), - field_value_val.fmtValue(mod, sema), - tag_ty.fmt(mod), + field_value_val.fmtValue(pt, sema), + tag_ty.fmt(pt), }); } - const coerced_field_val = try mod.getCoerced(field_value_val, tag_ty); + const coerced_field_val = try pt.getCoerced(field_value_val, tag_ty); if (wip_ty.nextField(ip, field_name, coerced_field_val.toIntern())) |conflict| { return sema.failWithOwnedErrorMsg(block, switch (conflict.kind) { .name => msg: { @@ -21732,7 +21931,7 @@ fn reifyEnum( break :msg msg; }, .value => msg: { - const msg = try sema.errMsg(src, "enum tag value {} already taken", .{field_value_val.fmtValue(mod, sema)}); + const msg = try sema.errMsg(src, "enum tag value {} already taken", .{field_value_val.fmtValue(pt, sema)}); errdefer msg.destroy(gpa); _ = conflict.prev_field_idx; // TODO: this note is incorrect try sema.errNote(src, msg, "other enum tag value here", .{}); @@ -21742,11 +21941,11 @@ fn reifyEnum( } } - if (!is_exhaustive and fields_len > 1 and std.math.log2_int(u64, fields_len) == tag_ty.bitSize(mod)) { + if (!is_exhaustive and fields_len > 1 and std.math.log2_int(u64, fields_len) == tag_ty.bitSize(pt)) { return sema.fail(block, src, "non-exhaustive enum specified every value", .{}); } - try mod.finalizeAnonDecl(new_decl_index); + try pt.finalizeAnonDecl(new_decl_index); return Air.internedToRef(wip_ty.index); } @@ -21760,7 +21959,8 @@ fn reifyUnion( fields_val: Value, name_strategy: Zir.Inst.NameStrategy, ) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const gpa = sema.gpa; const ip = &mod.intern_pool; @@ -21782,11 +21982,11 @@ fn reifyUnion( var any_aligns = false; for (0..fields_len) |field_idx| { - const field_info = try fields_val.elemValue(mod, field_idx); + const field_info = try fields_val.elemValue(pt, field_idx); - const field_name_val = try field_info.fieldValue(mod, 0); - const field_type_val = try field_info.fieldValue(mod, 1); - const field_align_val = try sema.resolveLazyValue(try field_info.fieldValue(mod, 2)); + const field_name_val = try field_info.fieldValue(pt, 0); + const field_type_val = try field_info.fieldValue(pt, 1); + const field_align_val = try sema.resolveLazyValue(try field_info.fieldValue(pt, 2)); const field_name = try sema.sliceToIpString(block, src, field_name_val, .{ .needed_comptime_reason = "union field name must be comptime-known", @@ -21798,12 +21998,12 @@ fn reifyUnion( field_align_val.toIntern(), }); - if (field_align_val.toUnsignedInt(mod) != 0) { + if (field_align_val.toUnsignedInt(pt) != 0) { any_aligns = true; } } - const wip_ty = switch (try ip.getUnionType(gpa, .{ + const wip_ty = switch (try ip.getUnionType(gpa, pt.tid, .{ .flags = .{ .layout = layout, .status = .none, @@ -21861,10 +22061,10 @@ fn reifyUnion( var seen_tags = try std.DynamicBitSetUnmanaged.initEmpty(sema.arena, tag_ty_fields_len); for (field_types, 0..) |*field_ty, field_idx| { - const field_info = try fields_val.elemValue(mod, field_idx); + const field_info = try fields_val.elemValue(pt, field_idx); - const field_name_val = try field_info.fieldValue(mod, 0); - const field_type_val = try field_info.fieldValue(mod, 1); + const field_name_val = try field_info.fieldValue(pt, 0); + const field_type_val = try field_info.fieldValue(pt, 1); // Don't pass a reason; first loop acts as an assertion that this is valid. const field_name = try sema.sliceToIpString(block, src, field_name_val, undefined); @@ -21872,7 +22072,7 @@ fn reifyUnion( const enum_index = enum_tag_ty.enumFieldIndex(field_name, mod) orelse { // TODO: better source location return sema.fail(block, src, "no field named '{}' in enum '{}'", .{ - field_name.fmt(ip), enum_tag_ty.fmt(mod), + field_name.fmt(ip), enum_tag_ty.fmt(pt), }); }; if (seen_tags.isSet(enum_index)) { @@ -21883,7 +22083,7 @@ fn reifyUnion( field_ty.* = field_type_val.toIntern(); if (any_aligns) { - const byte_align = try (try field_info.fieldValue(mod, 2)).toUnsignedIntSema(mod); + const byte_align = try (try field_info.fieldValue(pt, 2)).toUnsignedIntSema(pt); if (byte_align > 0 and !math.isPowerOfTwo(byte_align)) { // TODO: better source location return sema.fail(block, src, "alignment value '{d}' is not a power of two or zero", .{byte_align}); @@ -21913,10 +22113,10 @@ fn reifyUnion( try field_names.ensureTotalCapacity(sema.arena, fields_len); for (field_types, 0..) |*field_ty, field_idx| { - const field_info = try fields_val.elemValue(mod, field_idx); + const field_info = try fields_val.elemValue(pt, field_idx); - const field_name_val = try field_info.fieldValue(mod, 0); - const field_type_val = try field_info.fieldValue(mod, 1); + const field_name_val = try field_info.fieldValue(pt, 0); + const field_type_val = try field_info.fieldValue(pt, 1); // Don't pass a reason; first loop acts as an assertion that this is valid. const field_name = try sema.sliceToIpString(block, src, field_name_val, undefined); @@ -21928,7 +22128,7 @@ fn reifyUnion( field_ty.* = field_type_val.toIntern(); if (any_aligns) { - const byte_align = try (try field_info.fieldValue(mod, 2)).toUnsignedIntSema(mod); + const byte_align = try (try field_info.fieldValue(pt, 2)).toUnsignedIntSema(pt); if (byte_align > 0 and !math.isPowerOfTwo(byte_align)) { // TODO: better source location return sema.fail(block, src, "alignment value '{d}' is not a power of two or zero", .{byte_align}); @@ -21955,7 +22155,7 @@ fn reifyUnion( } if (layout == .@"extern" and !try sema.validateExternType(field_ty, .union_field)) { return sema.failWithOwnedErrorMsg(block, msg: { - const msg = try sema.errMsg(src, "extern unions cannot contain fields of type '{}'", .{field_ty.fmt(mod)}); + const msg = try sema.errMsg(src, "extern unions cannot contain fields of type '{}'", .{field_ty.fmt(pt)}); errdefer msg.destroy(gpa); try sema.explainWhyTypeIsNotExtern(msg, src, field_ty, .union_field); @@ -21965,7 +22165,7 @@ fn reifyUnion( }); } else if (layout == .@"packed" and !try sema.validatePackedType(field_ty)) { return sema.failWithOwnedErrorMsg(block, msg: { - const msg = try sema.errMsg(src, "packed unions cannot contain fields of type '{}'", .{field_ty.fmt(mod)}); + const msg = try sema.errMsg(src, "packed unions cannot contain fields of type '{}'", .{field_ty.fmt(pt)}); errdefer msg.destroy(gpa); try sema.explainWhyTypeIsNotPacked(msg, src, field_ty); @@ -21984,7 +22184,7 @@ fn reifyUnion( loaded_union.tagTypePtr(ip).* = enum_tag_ty; loaded_union.flagsPtr(ip).status = .have_field_types; - try mod.finalizeAnonDecl(new_decl_index); + try pt.finalizeAnonDecl(new_decl_index); try mod.comp.work_queue.writeItem(.{ .resolve_type_fully = wip_ty.index }); try sema.addReferenceEntry(src, AnalUnit.wrap(.{ .decl = new_decl_index })); return Air.internedToRef(wip_ty.finish(ip, new_decl_index, .none)); @@ -22001,7 +22201,8 @@ fn reifyStruct( name_strategy: Zir.Inst.NameStrategy, is_tuple: bool, ) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const gpa = sema.gpa; const ip = &mod.intern_pool; @@ -22026,20 +22227,20 @@ fn reifyStruct( var any_aligned_fields = false; for (0..fields_len) |field_idx| { - const field_info = try fields_val.elemValue(mod, field_idx); + const field_info = try fields_val.elemValue(pt, field_idx); - const field_name_val = try field_info.fieldValue(mod, 0); - const field_type_val = try field_info.fieldValue(mod, 1); - const field_default_value_val = try field_info.fieldValue(mod, 2); - const field_is_comptime_val = try field_info.fieldValue(mod, 3); - const field_alignment_val = try sema.resolveLazyValue(try field_info.fieldValue(mod, 4)); + const field_name_val = try field_info.fieldValue(pt, 0); + const field_type_val = try field_info.fieldValue(pt, 1); + const field_default_value_val = try field_info.fieldValue(pt, 2); + const field_is_comptime_val = try field_info.fieldValue(pt, 3); + const field_alignment_val = try sema.resolveLazyValue(try field_info.fieldValue(pt, 4)); const field_name = try sema.sliceToIpString(block, src, field_name_val, .{ .needed_comptime_reason = "struct field name must be comptime-known", }); const field_is_comptime = field_is_comptime_val.toBool(); const field_default_value: InternPool.Index = if (field_default_value_val.optionalValue(mod)) |ptr_val| d: { - const ptr_ty = try mod.singleConstPtrType(field_type_val.toType()); + const ptr_ty = try pt.singleConstPtrType(field_type_val.toType()); // We need to do this deref here, so we won't check for this error case later on. const val = try sema.pointerDeref(block, src, ptr_val, ptr_ty) orelse return sema.failWithNeededComptime( block, @@ -22060,14 +22261,14 @@ fn reifyStruct( if (field_is_comptime) any_comptime_fields = true; if (field_default_value != .none) any_default_inits = true; - switch (try field_alignment_val.orderAgainstZeroAdvanced(mod, .sema)) { + switch (try field_alignment_val.orderAgainstZeroAdvanced(pt, .sema)) { .eq => {}, .gt => any_aligned_fields = true, .lt => unreachable, } } - const wip_ty = switch (try ip.getStructType(gpa, .{ + const wip_ty = switch (try ip.getStructType(gpa, pt.tid, .{ .layout = layout, .fields_len = fields_len, .known_non_opv = false, @@ -22107,13 +22308,13 @@ fn reifyStruct( const struct_type = ip.loadStructType(wip_ty.index); for (0..fields_len) |field_idx| { - const field_info = try fields_val.elemValue(mod, field_idx); + const field_info = try fields_val.elemValue(pt, field_idx); - const field_name_val = try field_info.fieldValue(mod, 0); - const field_type_val = try field_info.fieldValue(mod, 1); - const field_default_value_val = try field_info.fieldValue(mod, 2); - const field_is_comptime_val = try field_info.fieldValue(mod, 3); - const field_alignment_val = try field_info.fieldValue(mod, 4); + const field_name_val = try field_info.fieldValue(pt, 0); + const field_type_val = try field_info.fieldValue(pt, 1); + const field_default_value_val = try field_info.fieldValue(pt, 2); + const field_is_comptime_val = try field_info.fieldValue(pt, 3); + const field_alignment_val = try field_info.fieldValue(pt, 4); const field_ty = field_type_val.toType(); // Don't pass a reason; first loop acts as an assertion that this is valid. @@ -22143,7 +22344,7 @@ fn reifyStruct( return sema.fail(block, src, "alignment must fit in 'u32'", .{}); } - const byte_align = try field_alignment_val.toUnsignedIntSema(mod); + const byte_align = try field_alignment_val.toUnsignedIntSema(pt); if (byte_align == 0) { if (layout != .@"packed") { struct_type.field_aligns.get(ip)[field_idx] = .none; @@ -22168,7 +22369,7 @@ fn reifyStruct( const field_default: InternPool.Index = d: { if (!any_default_inits) break :d .none; const ptr_val = field_default_value_val.optionalValue(mod) orelse break :d .none; - const ptr_ty = try mod.singleConstPtrType(field_ty); + const ptr_ty = try pt.singleConstPtrType(field_ty); // Asserted comptime-dereferencable above. const val = (try sema.pointerDeref(block, src, ptr_val, ptr_ty)).?; // We already resolved this for deduplication, so we may as well do it now. @@ -22204,7 +22405,7 @@ fn reifyStruct( } if (layout == .@"extern" and !try sema.validateExternType(field_ty, .struct_field)) { return sema.failWithOwnedErrorMsg(block, msg: { - const msg = try sema.errMsg(src, "extern structs cannot contain fields of type '{}'", .{field_ty.fmt(sema.mod)}); + const msg = try sema.errMsg(src, "extern structs cannot contain fields of type '{}'", .{field_ty.fmt(pt)}); errdefer msg.destroy(gpa); try sema.explainWhyTypeIsNotExtern(msg, src, field_ty, .struct_field); @@ -22214,7 +22415,7 @@ fn reifyStruct( }); } else if (layout == .@"packed" and !try sema.validatePackedType(field_ty)) { return sema.failWithOwnedErrorMsg(block, msg: { - const msg = try sema.errMsg(src, "packed structs cannot contain fields of type '{}'", .{field_ty.fmt(sema.mod)}); + const msg = try sema.errMsg(src, "packed structs cannot contain fields of type '{}'", .{field_ty.fmt(pt)}); errdefer msg.destroy(gpa); try sema.explainWhyTypeIsNotPacked(msg, src, field_ty); @@ -22229,7 +22430,7 @@ fn reifyStruct( var fields_bit_sum: u64 = 0; for (0..struct_type.field_types.len) |field_idx| { const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_idx]); - field_ty.resolveLayout(mod) catch |err| switch (err) { + field_ty.resolveLayout(pt) catch |err| switch (err) { error.AnalysisFail => { const msg = sema.err orelse return err; try sema.errNote(src, msg, "while checking a field of this struct", .{}); @@ -22237,7 +22438,7 @@ fn reifyStruct( }, else => return err, }; - fields_bit_sum += field_ty.bitSize(mod); + fields_bit_sum += field_ty.bitSize(pt); } if (opt_backing_int_val.optionalValue(mod)) |backing_int_val| { @@ -22245,20 +22446,21 @@ fn reifyStruct( try sema.checkBackingIntType(block, src, backing_int_ty, fields_bit_sum); struct_type.backingIntType(ip).* = backing_int_ty.toIntern(); } else { - const backing_int_ty = try mod.intType(.unsigned, @intCast(fields_bit_sum)); + const backing_int_ty = try pt.intType(.unsigned, @intCast(fields_bit_sum)); struct_type.backingIntType(ip).* = backing_int_ty.toIntern(); } } - try mod.finalizeAnonDecl(new_decl_index); + try pt.finalizeAnonDecl(new_decl_index); try mod.comp.work_queue.writeItem(.{ .resolve_type_fully = wip_ty.index }); try sema.addReferenceEntry(src, AnalUnit.wrap(.{ .decl = new_decl_index })); return Air.internedToRef(wip_ty.finish(ip, new_decl_index, .none)); } fn resolveVaListRef(sema: *Sema, block: *Block, src: LazySrcLoc, zir_ref: Zir.Inst.Ref) CompileError!Air.Inst.Ref { - const va_list_ty = try sema.mod.getBuiltinType("VaList"); - const va_list_ptr = try sema.mod.singleMutPtrType(va_list_ty); + const pt = sema.pt; + const va_list_ty = try pt.getBuiltinType("VaList"); + const va_list_ptr = try pt.singleMutPtrType(va_list_ty); const inst = try sema.resolveInst(zir_ref); return sema.coerce(block, va_list_ptr, inst, src); @@ -22275,7 +22477,7 @@ fn zirCVaArg(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) C if (!try sema.validateExternType(arg_ty, .param_ty)) { const msg = msg: { - const msg = try sema.errMsg(ty_src, "cannot get '{}' from variadic argument", .{arg_ty.fmt(sema.mod)}); + const msg = try sema.errMsg(ty_src, "cannot get '{}' from variadic argument", .{arg_ty.fmt(sema.pt)}); errdefer msg.destroy(sema.gpa); try sema.explainWhyTypeIsNotExtern(msg, ty_src, arg_ty, .param_ty); @@ -22296,7 +22498,7 @@ fn zirCVaCopy(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) const va_list_src = block.builtinCallArgSrc(extra.node, 0); const va_list_ref = try sema.resolveVaListRef(block, va_list_src, extra.operand); - const va_list_ty = try sema.mod.getBuiltinType("VaList"); + const va_list_ty = try sema.pt.getBuiltinType("VaList"); try sema.requireRuntimeBlock(block, src, null); return block.addTyOp(.c_va_copy, va_list_ty, va_list_ref); @@ -22316,7 +22518,7 @@ fn zirCVaEnd(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) C fn zirCVaStart(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref { const src = block.nodeOffset(@bitCast(extended.operand)); - const va_list_ty = try sema.mod.getBuiltinType("VaList"); + const va_list_ty = try sema.pt.getBuiltinType("VaList"); try sema.requireRuntimeBlock(block, src, null); return block.addInst(.{ .tag = .c_va_start, @@ -22325,14 +22527,15 @@ fn zirCVaStart(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) } fn zirTypeName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const ip = &mod.intern_pool; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node; const ty_src = block.builtinCallArgSrc(inst_data.src_node, 0); const ty = try sema.resolveType(block, ty_src, inst_data.operand); - const type_name = try ip.getOrPutStringFmt(sema.gpa, "{}", .{ty.fmt(mod)}, .no_embedded_nulls); + const type_name = try ip.getOrPutStringFmt(sema.gpa, "{}", .{ty.fmt(pt)}, .no_embedded_nulls); return sema.addNullTerminatedStrLit(type_name); } @@ -22349,7 +22552,8 @@ fn zirFrameSize(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A } fn zirIntFromFloat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const src = block.nodeOffset(inst_data.src_node); const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; @@ -22380,23 +22584,23 @@ fn zirIntFromFloat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro if (dest_scalar_ty.intInfo(mod).bits == 0) { if (!is_vector) { if (block.wantSafety()) { - const ok = try block.addBinOp(if (block.float_mode == .optimized) .cmp_eq_optimized else .cmp_eq, operand, Air.internedToRef((try mod.floatValue(operand_ty, 0.0)).toIntern())); + const ok = try block.addBinOp(if (block.float_mode == .optimized) .cmp_eq_optimized else .cmp_eq, operand, Air.internedToRef((try pt.floatValue(operand_ty, 0.0)).toIntern())); try sema.addSafetyCheck(block, src, ok, .integer_part_out_of_bounds); } - return Air.internedToRef((try mod.intValue(dest_ty, 0)).toIntern()); + return Air.internedToRef((try pt.intValue(dest_ty, 0)).toIntern()); } if (block.wantSafety()) { const len = dest_ty.vectorLen(mod); for (0..len) |i| { - const idx_ref = try mod.intRef(Type.usize, i); + const idx_ref = try pt.intRef(Type.usize, i); const elem_ref = try block.addBinOp(.array_elem_val, operand, idx_ref); - const ok = try block.addBinOp(if (block.float_mode == .optimized) .cmp_eq_optimized else .cmp_eq, elem_ref, Air.internedToRef((try mod.floatValue(operand_scalar_ty, 0.0)).toIntern())); + const ok = try block.addBinOp(if (block.float_mode == .optimized) .cmp_eq_optimized else .cmp_eq, elem_ref, Air.internedToRef((try pt.floatValue(operand_scalar_ty, 0.0)).toIntern())); try sema.addSafetyCheck(block, src, ok, .integer_part_out_of_bounds); } } - return Air.internedToRef(try mod.intern(.{ .aggregate = .{ + return Air.internedToRef(try pt.intern(.{ .aggregate = .{ .ty = dest_ty.toIntern(), - .storage = .{ .repeated_elem = (try mod.intValue(dest_scalar_ty, 0)).toIntern() }, + .storage = .{ .repeated_elem = (try pt.intValue(dest_scalar_ty, 0)).toIntern() }, } })); } if (!is_vector) { @@ -22404,8 +22608,8 @@ fn zirIntFromFloat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro if (block.wantSafety()) { const back = try block.addTyOp(.float_from_int, operand_ty, result); const diff = try block.addBinOp(.sub, operand, back); - const ok_pos = try block.addBinOp(if (block.float_mode == .optimized) .cmp_lt_optimized else .cmp_lt, diff, Air.internedToRef((try mod.floatValue(operand_ty, 1.0)).toIntern())); - const ok_neg = try block.addBinOp(if (block.float_mode == .optimized) .cmp_gt_optimized else .cmp_gt, diff, Air.internedToRef((try mod.floatValue(operand_ty, -1.0)).toIntern())); + const ok_pos = try block.addBinOp(if (block.float_mode == .optimized) .cmp_lt_optimized else .cmp_lt, diff, Air.internedToRef((try pt.floatValue(operand_ty, 1.0)).toIntern())); + const ok_neg = try block.addBinOp(if (block.float_mode == .optimized) .cmp_gt_optimized else .cmp_gt, diff, Air.internedToRef((try pt.floatValue(operand_ty, -1.0)).toIntern())); const ok = try block.addBinOp(.bool_and, ok_pos, ok_neg); try sema.addSafetyCheck(block, src, ok, .integer_part_out_of_bounds); } @@ -22414,14 +22618,14 @@ fn zirIntFromFloat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro const len = dest_ty.vectorLen(mod); const new_elems = try sema.arena.alloc(Air.Inst.Ref, len); for (new_elems, 0..) |*new_elem, i| { - const idx_ref = try mod.intRef(Type.usize, i); + const idx_ref = try pt.intRef(Type.usize, i); const old_elem = try block.addBinOp(.array_elem_val, operand, idx_ref); const result = try block.addTyOp(if (block.float_mode == .optimized) .int_from_float_optimized else .int_from_float, dest_scalar_ty, old_elem); if (block.wantSafety()) { const back = try block.addTyOp(.float_from_int, operand_scalar_ty, result); const diff = try block.addBinOp(.sub, old_elem, back); - const ok_pos = try block.addBinOp(if (block.float_mode == .optimized) .cmp_lt_optimized else .cmp_lt, diff, Air.internedToRef((try mod.floatValue(operand_scalar_ty, 1.0)).toIntern())); - const ok_neg = try block.addBinOp(if (block.float_mode == .optimized) .cmp_gt_optimized else .cmp_gt, diff, Air.internedToRef((try mod.floatValue(operand_scalar_ty, -1.0)).toIntern())); + const ok_pos = try block.addBinOp(if (block.float_mode == .optimized) .cmp_lt_optimized else .cmp_lt, diff, Air.internedToRef((try pt.floatValue(operand_scalar_ty, 1.0)).toIntern())); + const ok_neg = try block.addBinOp(if (block.float_mode == .optimized) .cmp_gt_optimized else .cmp_gt, diff, Air.internedToRef((try pt.floatValue(operand_scalar_ty, -1.0)).toIntern())); const ok = try block.addBinOp(.bool_and, ok_pos, ok_neg); try sema.addSafetyCheck(block, src, ok, .integer_part_out_of_bounds); } @@ -22431,7 +22635,8 @@ fn zirIntFromFloat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro } fn zirFloatFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const src = block.nodeOffset(inst_data.src_node); const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; @@ -22450,7 +22655,7 @@ fn zirFloatFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro _ = try sema.checkIntType(block, operand_src, operand_scalar_ty); if (try sema.resolveValue(operand)) |operand_val| { - const result_val = try operand_val.floatFromIntAdvanced(sema.arena, operand_ty, dest_ty, mod, .sema); + const result_val = try operand_val.floatFromIntAdvanced(sema.arena, operand_ty, dest_ty, pt, .sema); return Air.internedToRef(result_val.toIntern()); } else if (dest_scalar_ty.zigTypeTag(mod) == .ComptimeFloat) { return sema.failWithNeededComptime(block, operand_src, .{ @@ -22465,7 +22670,7 @@ fn zirFloatFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro const len = operand_ty.vectorLen(mod); const new_elems = try sema.arena.alloc(Air.Inst.Ref, len); for (new_elems, 0..) |*new_elem, i| { - const idx_ref = try mod.intRef(Type.usize, i); + const idx_ref = try pt.intRef(Type.usize, i); const old_elem = try block.addBinOp(.array_elem_val, operand, idx_ref); new_elem.* = try block.addTyOp(.float_from_int, dest_scalar_ty, old_elem); } @@ -22473,7 +22678,8 @@ fn zirFloatFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro } fn zirPtrFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const src = block.nodeOffset(inst_data.src_node); @@ -22489,7 +22695,7 @@ fn zirPtrFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! const is_vector = dest_ty.zigTypeTag(mod) == .Vector; const operand_ty = if (is_vector) operand_ty: { const len = dest_ty.vectorLen(mod); - break :operand_ty try mod.vectorType(.{ .child = .usize_type, .len = len }); + break :operand_ty try pt.vectorType(.{ .child = .usize_type, .len = len }); } else Type.usize; const operand_coerced = try sema.coerce(block, operand_ty, operand_res, operand_src); @@ -22498,11 +22704,11 @@ fn zirPtrFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! try sema.checkPtrType(block, src, ptr_ty, true); const elem_ty = ptr_ty.elemType2(mod); - const ptr_align = try ptr_ty.ptrAlignmentAdvanced(mod, .sema); + const ptr_align = try ptr_ty.ptrAlignmentAdvanced(pt, .sema); if (ptr_ty.isSlice(mod)) { const msg = msg: { - const msg = try sema.errMsg(src, "integer cannot be converted to slice type '{}'", .{ptr_ty.fmt(sema.mod)}); + const msg = try sema.errMsg(src, "integer cannot be converted to slice type '{}'", .{ptr_ty.fmt(pt)}); errdefer msg.destroy(sema.gpa); try sema.errNote(src, msg, "slice length cannot be inferred from address", .{}); break :msg msg; @@ -22518,18 +22724,18 @@ fn zirPtrFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! const len = dest_ty.vectorLen(mod); const new_elems = try sema.arena.alloc(InternPool.Index, len); for (new_elems, 0..) |*new_elem, i| { - const elem = try val.elemValue(mod, i); + const elem = try val.elemValue(pt, i); const ptr_val = try sema.ptrFromIntVal(block, operand_src, elem, ptr_ty, ptr_align); new_elem.* = ptr_val.toIntern(); } - return Air.internedToRef(try mod.intern(.{ .aggregate = .{ + return Air.internedToRef(try pt.intern(.{ .aggregate = .{ .ty = dest_ty.toIntern(), .storage = .{ .elems = new_elems }, } })); } if (try sema.typeRequiresComptime(ptr_ty)) { return sema.failWithOwnedErrorMsg(block, msg: { - const msg = try sema.errMsg(src, "pointer to comptime-only type '{}' must be comptime-known, but operand is runtime-known", .{ptr_ty.fmt(mod)}); + const msg = try sema.errMsg(src, "pointer to comptime-only type '{}' must be comptime-known, but operand is runtime-known", .{ptr_ty.fmt(pt)}); errdefer msg.destroy(sema.gpa); try sema.explainWhyTypeIsComptime(msg, src, ptr_ty); @@ -22545,7 +22751,7 @@ fn zirPtrFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! } if (ptr_align.compare(.gt, .@"1")) { const align_bytes_minus_1 = ptr_align.toByteUnits().? - 1; - const align_minus_1 = Air.internedToRef((try mod.intValue(Type.usize, align_bytes_minus_1)).toIntern()); + const align_minus_1 = Air.internedToRef((try pt.intValue(Type.usize, align_bytes_minus_1)).toIntern()); const remainder = try block.addBinOp(.bit_and, operand_coerced, align_minus_1); const is_aligned = try block.addBinOp(.cmp_eq, remainder, .zero_usize); try sema.addSafetyCheck(block, src, is_aligned, .incorrect_alignment); @@ -22557,7 +22763,7 @@ fn zirPtrFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! const len = dest_ty.vectorLen(mod); if (block.wantSafety() and (try sema.typeHasRuntimeBits(elem_ty) or elem_ty.zigTypeTag(mod) == .Fn)) { for (0..len) |i| { - const idx_ref = try mod.intRef(Type.usize, i); + const idx_ref = try pt.intRef(Type.usize, i); const elem_coerced = try block.addBinOp(.array_elem_val, operand_coerced, idx_ref); if (!ptr_ty.isAllowzeroPtr(mod)) { const is_non_zero = try block.addBinOp(.cmp_neq, elem_coerced, .zero_usize); @@ -22565,7 +22771,7 @@ fn zirPtrFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! } if (ptr_align.compare(.gt, .@"1")) { const align_bytes_minus_1 = ptr_align.toByteUnits().? - 1; - const align_minus_1 = Air.internedToRef((try mod.intValue(Type.usize, align_bytes_minus_1)).toIntern()); + const align_minus_1 = Air.internedToRef((try pt.intValue(Type.usize, align_bytes_minus_1)).toIntern()); const remainder = try block.addBinOp(.bit_and, elem_coerced, align_minus_1); const is_aligned = try block.addBinOp(.cmp_eq, remainder, .zero_usize); try sema.addSafetyCheck(block, src, is_aligned, .incorrect_alignment); @@ -22575,7 +22781,7 @@ fn zirPtrFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! const new_elems = try sema.arena.alloc(Air.Inst.Ref, len); for (new_elems, 0..) |*new_elem, i| { - const idx_ref = try mod.intRef(Type.usize, i); + const idx_ref = try pt.intRef(Type.usize, i); const old_elem = try block.addBinOp(.array_elem_val, operand_coerced, idx_ref); new_elem.* = try block.addBitCast(ptr_ty, old_elem); } @@ -22590,31 +22796,33 @@ fn ptrFromIntVal( ptr_ty: Type, ptr_align: Alignment, ) !Value { - const zcu = sema.mod; + const pt = sema.pt; + const zcu = pt.zcu; if (operand_val.isUndef(zcu)) { if (ptr_ty.isAllowzeroPtr(zcu) and ptr_align == .@"1") { - return zcu.undefValue(ptr_ty); + return pt.undefValue(ptr_ty); } return sema.failWithUseOfUndef(block, operand_src); } - const addr = try operand_val.toUnsignedIntSema(zcu); + const addr = try operand_val.toUnsignedIntSema(pt); if (!ptr_ty.isAllowzeroPtr(zcu) and addr == 0) - return sema.fail(block, operand_src, "pointer type '{}' does not allow address zero", .{ptr_ty.fmt(zcu)}); + return sema.fail(block, operand_src, "pointer type '{}' does not allow address zero", .{ptr_ty.fmt(pt)}); if (addr != 0 and ptr_align != .none and !ptr_align.check(addr)) - return sema.fail(block, operand_src, "pointer type '{}' requires aligned address", .{ptr_ty.fmt(zcu)}); + return sema.fail(block, operand_src, "pointer type '{}' requires aligned address", .{ptr_ty.fmt(pt)}); return switch (ptr_ty.zigTypeTag(zcu)) { - .Optional => Value.fromInterned((try zcu.intern(.{ .opt = .{ + .Optional => Value.fromInterned(try pt.intern(.{ .opt = .{ .ty = ptr_ty.toIntern(), - .val = if (addr == 0) .none else (try zcu.ptrIntValue(ptr_ty.childType(zcu), addr)).toIntern(), - } }))), - .Pointer => try zcu.ptrIntValue(ptr_ty, addr), + .val = if (addr == 0) .none else (try pt.ptrIntValue(ptr_ty.childType(zcu), addr)).toIntern(), + } })), + .Pointer => try pt.ptrIntValue(ptr_ty, addr), else => unreachable, }; } fn zirErrorCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const ip = &mod.intern_pool; const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data; const src = block.nodeOffset(extra.node); @@ -22642,8 +22850,8 @@ fn zirErrorCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData errdefer msg.destroy(sema.gpa); const dest_ty = base_dest_ty.errorUnionPayload(mod); const operand_ty = base_operand_ty.errorUnionPayload(mod); - try sema.errNote(src, msg, "destination payload is '{}'", .{dest_ty.fmt(mod)}); - try sema.errNote(src, msg, "operand payload is '{}'", .{operand_ty.fmt(mod)}); + try sema.errNote(src, msg, "destination payload is '{}'", .{dest_ty.fmt(pt)}); + try sema.errNote(src, msg, "operand payload is '{}'", .{operand_ty.fmt(pt)}); try addDeclaredHereNote(sema, msg, dest_ty); try addDeclaredHereNote(sema, msg, operand_ty); break :msg msg; @@ -22684,7 +22892,7 @@ fn zirErrorCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData }; if (disjoint and dest_tag != .ErrorUnion) { return sema.fail(block, src, "error sets '{}' and '{}' have no common errors", .{ - operand_ty.fmt(sema.mod), dest_ty.fmt(sema.mod), + operand_ty.fmt(pt), dest_ty.fmt(pt), }); } @@ -22700,24 +22908,24 @@ fn zirErrorCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData } if (!Type.errorSetHasFieldIp(ip, dest_ty.toIntern(), error_name)) { return sema.fail(block, src, "'error.{}' not a member of error set '{}'", .{ - error_name.fmt(ip), dest_ty.fmt(sema.mod), + error_name.fmt(ip), dest_ty.fmt(pt), }); } } - return Air.internedToRef((try mod.getCoerced(val, base_dest_ty)).toIntern()); + return Air.internedToRef((try pt.getCoerced(val, base_dest_ty)).toIntern()); } try sema.requireRuntimeBlock(block, src, operand_src); - const err_int_ty = try mod.errorIntType(); + const err_int_ty = try pt.errorIntType(); if (block.wantSafety() and !dest_ty.isAnyError(mod) and dest_ty.toIntern() != .adhoc_inferred_error_set_type and - sema.mod.backendSupportsFeature(.error_set_has_value)) + mod.backendSupportsFeature(.error_set_has_value)) { if (dest_tag == .ErrorUnion) { const err_code = try sema.analyzeErrUnionCode(block, operand_src, operand); const err_int = try block.addBitCast(err_int_ty, err_code); - const zero_err = try mod.intRef(try mod.errorIntType(), 0); + const zero_err = try pt.intRef(try pt.errorIntType(), 0); const is_zero = try block.addBinOp(.cmp_eq, err_int, zero_err); if (disjoint) { @@ -22786,7 +22994,8 @@ fn ptrCastFull( dest_ty: Type, operation: []const u8, ) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const operand_ty = sema.typeOf(operand); try sema.checkPtrType(block, src, dest_ty, true); @@ -22795,8 +23004,8 @@ fn ptrCastFull( const src_info = operand_ty.ptrInfo(mod); const dest_info = dest_ty.ptrInfo(mod); - try Type.fromInterned(src_info.child).resolveLayout(mod); - try Type.fromInterned(dest_info.child).resolveLayout(mod); + try Type.fromInterned(src_info.child).resolveLayout(pt); + try Type.fromInterned(dest_info.child).resolveLayout(pt); const src_slice_like = src_info.flags.size == .Slice or (src_info.flags.size == .One and Type.fromInterned(src_info.child).zigTypeTag(mod) == .Array); @@ -22810,12 +23019,12 @@ fn ptrCastFull( if (dest_info.flags.size == .Slice) { const src_elem_size = switch (src_info.flags.size) { - .Slice => Type.fromInterned(src_info.child).abiSize(mod), + .Slice => Type.fromInterned(src_info.child).abiSize(pt), // pointer to array - .One => Type.fromInterned(src_info.child).childType(mod).abiSize(mod), + .One => Type.fromInterned(src_info.child).childType(mod).abiSize(pt), else => unreachable, }; - const dest_elem_size = Type.fromInterned(dest_info.child).abiSize(mod); + const dest_elem_size = Type.fromInterned(dest_info.child).abiSize(pt); if (src_elem_size != dest_elem_size) { return sema.fail(block, src, "TODO: implement {s} between slices changing the length", .{operation}); } @@ -22867,8 +23076,7 @@ fn ptrCastFull( if (imc_res == .ok) break :check_child; return sema.failWithOwnedErrorMsg(block, msg: { const msg = try sema.errMsg(src, "pointer element type '{}' cannot coerce into element type '{}'", .{ - src_child.fmt(mod), - dest_child.fmt(mod), + src_child.fmt(pt), dest_child.fmt(pt), }); errdefer msg.destroy(sema.gpa); try imc_res.report(sema, src, msg); @@ -22881,26 +23089,26 @@ fn ptrCastFull( if (dest_info.sentinel == .none) break :check_sent; if (src_info.flags.size == .C) break :check_sent; if (src_info.sentinel != .none) { - const coerced_sent = try mod.intern_pool.getCoerced(sema.gpa, src_info.sentinel, dest_info.child); + const coerced_sent = try mod.intern_pool.getCoerced(sema.gpa, pt.tid, src_info.sentinel, dest_info.child); if (dest_info.sentinel == coerced_sent) break :check_sent; } if (src_slice_like and src_info.flags.size == .One and dest_info.flags.size == .Slice) { // [*]nT -> []T const arr_ty = Type.fromInterned(src_info.child); if (arr_ty.sentinel(mod)) |src_sentinel| { - const coerced_sent = try mod.intern_pool.getCoerced(sema.gpa, src_sentinel.toIntern(), dest_info.child); + const coerced_sent = try mod.intern_pool.getCoerced(sema.gpa, pt.tid, src_sentinel.toIntern(), dest_info.child); if (dest_info.sentinel == coerced_sent) break :check_sent; } } return sema.failWithOwnedErrorMsg(block, msg: { const msg = if (src_info.sentinel == .none) blk: { break :blk try sema.errMsg(src, "destination pointer requires '{}' sentinel", .{ - Value.fromInterned(dest_info.sentinel).fmtValue(mod, sema), + Value.fromInterned(dest_info.sentinel).fmtValue(pt, sema), }); } else blk: { break :blk try sema.errMsg(src, "pointer sentinel '{}' cannot coerce into pointer sentinel '{}'", .{ - Value.fromInterned(src_info.sentinel).fmtValue(mod, sema), - Value.fromInterned(dest_info.sentinel).fmtValue(mod, sema), + Value.fromInterned(src_info.sentinel).fmtValue(pt, sema), + Value.fromInterned(dest_info.sentinel).fmtValue(pt, sema), }); }; errdefer msg.destroy(sema.gpa); @@ -22941,8 +23149,8 @@ fn ptrCastFull( return sema.failWithOwnedErrorMsg(block, msg: { const msg = try sema.errMsg(src, "'{}' could have null values which are illegal in type '{}'", .{ - operand_ty.fmt(mod), - dest_ty.fmt(mod), + operand_ty.fmt(pt), + dest_ty.fmt(pt), }); errdefer msg.destroy(sema.gpa); try sema.errNote(src, msg, "use @ptrCast to assert the pointer is not null", .{}); @@ -22956,12 +23164,12 @@ fn ptrCastFull( const src_align = if (src_info.flags.alignment != .none) src_info.flags.alignment else - Type.fromInterned(src_info.child).abiAlignment(mod); + Type.fromInterned(src_info.child).abiAlignment(pt); const dest_align = if (dest_info.flags.alignment != .none) dest_info.flags.alignment else - Type.fromInterned(dest_info.child).abiAlignment(mod); + Type.fromInterned(dest_info.child).abiAlignment(pt); if (!flags.align_cast) { if (dest_align.compare(.gt, src_align)) { @@ -22969,10 +23177,10 @@ fn ptrCastFull( const msg = try sema.errMsg(src, "{s} increases pointer alignment", .{operation}); errdefer msg.destroy(sema.gpa); try sema.errNote(operand_src, msg, "'{}' has alignment '{d}'", .{ - operand_ty.fmt(mod), src_align.toByteUnits() orelse 0, + operand_ty.fmt(pt), src_align.toByteUnits() orelse 0, }); try sema.errNote(src, msg, "'{}' has alignment '{d}'", .{ - dest_ty.fmt(mod), dest_align.toByteUnits() orelse 0, + dest_ty.fmt(pt), dest_align.toByteUnits() orelse 0, }); try sema.errNote(src, msg, "use @alignCast to assert pointer alignment", .{}); break :msg msg; @@ -22986,10 +23194,10 @@ fn ptrCastFull( const msg = try sema.errMsg(src, "{s} changes pointer address space", .{operation}); errdefer msg.destroy(sema.gpa); try sema.errNote(operand_src, msg, "'{}' has address space '{s}'", .{ - operand_ty.fmt(mod), @tagName(src_info.flags.address_space), + operand_ty.fmt(pt), @tagName(src_info.flags.address_space), }); try sema.errNote(src, msg, "'{}' has address space '{s}'", .{ - dest_ty.fmt(mod), @tagName(dest_info.flags.address_space), + dest_ty.fmt(pt), @tagName(dest_info.flags.address_space), }); try sema.errNote(src, msg, "use @addrSpaceCast to cast pointer address space", .{}); break :msg msg; @@ -23044,9 +23252,9 @@ fn ptrCastFull( // Only convert to a many-pointer at first var info = dest_info; info.flags.size = .Many; - const ty = try mod.ptrTypeSema(info); + const ty = try pt.ptrTypeSema(info); if (dest_ty.zigTypeTag(mod) == .Optional) { - break :blk try mod.optionalType(ty.toIntern()); + break :blk try pt.optionalType(ty.toIntern()); } else { break :blk ty; } @@ -23059,10 +23267,10 @@ fn ptrCastFull( return sema.failWithUseOfUndef(block, operand_src); } if (!dest_ty.ptrAllowsZero(mod) and ptr_val.isNull(mod)) { - return sema.fail(block, operand_src, "null pointer casted to type '{}'", .{dest_ty.fmt(mod)}); + return sema.fail(block, operand_src, "null pointer casted to type '{}'", .{dest_ty.fmt(pt)}); } if (dest_align.compare(.gt, src_align)) { - if (try ptr_val.getUnsignedIntAdvanced(mod, .sema)) |addr| { + if (try ptr_val.getUnsignedIntAdvanced(pt, .sema)) |addr| { if (!dest_align.check(addr)) { return sema.fail(block, operand_src, "pointer address 0x{X} is not aligned to {d} bytes", .{ addr, @@ -23072,12 +23280,12 @@ fn ptrCastFull( } } if (dest_info.flags.size == .Slice and src_info.flags.size != .Slice) { - if (ptr_val.isUndef(mod)) return mod.undefRef(dest_ty); - const arr_len = try mod.intValue(Type.usize, Type.fromInterned(src_info.child).arrayLen(mod)); + if (ptr_val.isUndef(mod)) return pt.undefRef(dest_ty); + const arr_len = try pt.intValue(Type.usize, Type.fromInterned(src_info.child).arrayLen(mod)); const ptr_val_key = mod.intern_pool.indexToKey(ptr_val.toIntern()).ptr; - return Air.internedToRef((try mod.intern(.{ .slice = .{ + return Air.internedToRef((try pt.intern(.{ .slice = .{ .ty = dest_ty.toIntern(), - .ptr = try mod.intern(.{ .ptr = .{ + .ptr = try pt.intern(.{ .ptr = .{ .ty = dest_ty.slicePtrFieldType(mod).toIntern(), .base_addr = ptr_val_key.base_addr, .byte_offset = ptr_val_key.byte_offset, @@ -23086,7 +23294,7 @@ fn ptrCastFull( } }))); } else { assert(dest_ptr_ty.eql(dest_ty, mod)); - return Air.internedToRef((try mod.getCoerced(ptr_val, dest_ty)).toIntern()); + return Air.internedToRef((try pt.getCoerced(ptr_val, dest_ty)).toIntern()); } } } @@ -23112,7 +23320,7 @@ fn ptrCastFull( try sema.typeHasRuntimeBits(Type.fromInterned(dest_info.child))) { const align_bytes_minus_1 = dest_align.toByteUnits().? - 1; - const align_minus_1 = Air.internedToRef((try mod.intValue(Type.usize, align_bytes_minus_1)).toIntern()); + const align_minus_1 = Air.internedToRef((try pt.intValue(Type.usize, align_bytes_minus_1)).toIntern()); const ptr_int = try block.addUnOp(.int_from_ptr, ptr); const remainder = try block.addBinOp(.bit_and, ptr_int, align_minus_1); const is_aligned = try block.addBinOp(.cmp_eq, remainder, .zero_usize); @@ -23129,9 +23337,9 @@ fn ptrCastFull( // We can't change address spaces with a bitcast, so this requires two instructions var intermediate_info = src_info; intermediate_info.flags.address_space = dest_info.flags.address_space; - const intermediate_ptr_ty = try mod.ptrTypeSema(intermediate_info); + const intermediate_ptr_ty = try pt.ptrTypeSema(intermediate_info); const intermediate_ty = if (dest_ptr_ty.zigTypeTag(mod) == .Optional) blk: { - break :blk try mod.optionalType(intermediate_ptr_ty.toIntern()); + break :blk try pt.optionalType(intermediate_ptr_ty.toIntern()); } else intermediate_ptr_ty; const intermediate = try block.addInst(.{ .tag = .addrspace_cast, @@ -23152,7 +23360,7 @@ fn ptrCastFull( if (dest_info.flags.size == .Slice and src_info.flags.size != .Slice) { // We have to construct a slice using the operand's child's array length // Note that we know from the check at the start of the function that operand_ty is slice-like - const arr_len = Air.internedToRef((try mod.intValue(Type.usize, Type.fromInterned(src_info.child).arrayLen(mod))).toIntern()); + const arr_len = Air.internedToRef((try pt.intValue(Type.usize, Type.fromInterned(src_info.child).arrayLen(mod))).toIntern()); return block.addInst(.{ .tag = .slice, .data = .{ .ty_pl = .{ @@ -23171,7 +23379,8 @@ fn ptrCastFull( } fn zirPtrCastNoDest(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const FlagsInt = @typeInfo(Zir.Inst.FullPtrCastFlags).Struct.backing_integer.?; const flags: Zir.Inst.FullPtrCastFlags = @bitCast(@as(FlagsInt, @truncate(extended.small))); const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data; @@ -23186,15 +23395,15 @@ fn zirPtrCastNoDest(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Inst if (flags.volatile_cast) ptr_info.flags.is_volatile = false; const dest_ty = blk: { - const dest_ty = try mod.ptrTypeSema(ptr_info); + const dest_ty = try pt.ptrTypeSema(ptr_info); if (operand_ty.zigTypeTag(mod) == .Optional) { - break :blk try mod.optionalType(dest_ty.toIntern()); + break :blk try pt.optionalType(dest_ty.toIntern()); } break :blk dest_ty; }; if (try sema.resolveValue(operand)) |operand_val| { - return Air.internedToRef((try mod.getCoerced(operand_val, dest_ty)).toIntern()); + return Air.internedToRef((try pt.getCoerced(operand_val, dest_ty)).toIntern()); } try sema.requireRuntimeBlock(block, src, null); @@ -23204,7 +23413,8 @@ fn zirPtrCastNoDest(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Inst } fn zirTruncate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const src = block.nodeOffset(inst_data.src_node); const operand_src = block.builtinCallArgSrc(inst_data.src_node, 0); @@ -23218,7 +23428,7 @@ fn zirTruncate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const operand_is_vector = operand_ty.zigTypeTag(mod) == .Vector; const dest_is_vector = dest_ty.zigTypeTag(mod) == .Vector; if (operand_is_vector != dest_is_vector) { - return sema.fail(block, operand_src, "expected type '{}', found '{}'", .{ dest_ty.fmt(mod), operand_ty.fmt(mod) }); + return sema.fail(block, operand_src, "expected type '{}', found '{}'", .{ dest_ty.fmt(pt), operand_ty.fmt(pt) }); } if (dest_scalar_ty.zigTypeTag(mod) == .ComptimeInt) { @@ -23239,7 +23449,7 @@ fn zirTruncate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai if (operand_info.signedness != dest_info.signedness) { return sema.fail(block, operand_src, "expected {s} integer type, found '{}'", .{ - @tagName(dest_info.signedness), operand_ty.fmt(mod), + @tagName(dest_info.signedness), operand_ty.fmt(pt), }); } if (operand_info.bits < dest_info.bits) { @@ -23247,7 +23457,7 @@ fn zirTruncate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const msg = try sema.errMsg( src, "destination type '{}' has more bits than source type '{}'", - .{ dest_ty.fmt(mod), operand_ty.fmt(mod) }, + .{ dest_ty.fmt(pt), operand_ty.fmt(pt) }, ); errdefer msg.destroy(sema.gpa); try sema.errNote(src, msg, "destination type has {d} bits", .{ @@ -23263,20 +23473,20 @@ fn zirTruncate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } if (try sema.resolveValueIntable(operand)) |val| { - if (val.isUndef(mod)) return mod.undefRef(dest_ty); + if (val.isUndef(mod)) return pt.undefRef(dest_ty); if (!dest_is_vector) { - return Air.internedToRef((try mod.getCoerced( - try val.intTrunc(operand_ty, sema.arena, dest_info.signedness, dest_info.bits, mod), + return Air.internedToRef((try pt.getCoerced( + try val.intTrunc(operand_ty, sema.arena, dest_info.signedness, dest_info.bits, pt), dest_ty, )).toIntern()); } const elems = try sema.arena.alloc(InternPool.Index, operand_ty.vectorLen(mod)); for (elems, 0..) |*elem, i| { - const elem_val = try val.elemValue(mod, i); - const uncoerced_elem = try elem_val.intTrunc(operand_scalar_ty, sema.arena, dest_info.signedness, dest_info.bits, mod); - elem.* = (try mod.getCoerced(uncoerced_elem, dest_scalar_ty)).toIntern(); + const elem_val = try val.elemValue(pt, i); + const uncoerced_elem = try elem_val.intTrunc(operand_scalar_ty, sema.arena, dest_info.signedness, dest_info.bits, pt); + elem.* = (try pt.getCoerced(uncoerced_elem, dest_scalar_ty)).toIntern(); } - return Air.internedToRef((try mod.intern(.{ .aggregate = .{ + return Air.internedToRef((try pt.intern(.{ .aggregate = .{ .ty = dest_ty.toIntern(), .storage = .{ .elems = elems }, } }))); @@ -23291,9 +23501,10 @@ fn zirBitCount( block: *Block, inst: Zir.Inst.Index, air_tag: Air.Inst.Tag, - comptime comptimeOp: fn (val: Value, ty: Type, mod: *Module) u64, + comptime comptimeOp: fn (val: Value, ty: Type, pt: Zcu.PerThread) u64, ) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node; const src = block.nodeOffset(inst_data.src_node); const operand_src = block.builtinCallArgSrc(inst_data.src_node, 0); @@ -23306,25 +23517,25 @@ fn zirBitCount( return Air.internedToRef(val.toIntern()); } - const result_scalar_ty = try mod.smallestUnsignedInt(bits); + const result_scalar_ty = try pt.smallestUnsignedInt(bits); switch (operand_ty.zigTypeTag(mod)) { .Vector => { const vec_len = operand_ty.vectorLen(mod); - const result_ty = try mod.vectorType(.{ + const result_ty = try pt.vectorType(.{ .len = vec_len, .child = result_scalar_ty.toIntern(), }); if (try sema.resolveValue(operand)) |val| { - if (val.isUndef(mod)) return mod.undefRef(result_ty); + if (val.isUndef(mod)) return pt.undefRef(result_ty); const elems = try sema.arena.alloc(InternPool.Index, vec_len); const scalar_ty = operand_ty.scalarType(mod); for (elems, 0..) |*elem, i| { - const elem_val = try val.elemValue(mod, i); - const count = comptimeOp(elem_val, scalar_ty, mod); - elem.* = (try mod.intValue(result_scalar_ty, count)).toIntern(); + const elem_val = try val.elemValue(pt, i); + const count = comptimeOp(elem_val, scalar_ty, pt); + elem.* = (try pt.intValue(result_scalar_ty, count)).toIntern(); } - return Air.internedToRef((try mod.intern(.{ .aggregate = .{ + return Air.internedToRef((try pt.intern(.{ .aggregate = .{ .ty = result_ty.toIntern(), .storage = .{ .elems = elems }, } }))); @@ -23335,8 +23546,8 @@ fn zirBitCount( }, .Int => { if (try sema.resolveValueResolveLazy(operand)) |val| { - if (val.isUndef(mod)) return mod.undefRef(result_scalar_ty); - return mod.intRef(result_scalar_ty, comptimeOp(val, operand_ty, mod)); + if (val.isUndef(mod)) return pt.undefRef(result_scalar_ty); + return pt.intRef(result_scalar_ty, comptimeOp(val, operand_ty, pt)); } else { try sema.requireRuntimeBlock(block, src, operand_src); return block.addTyOp(air_tag, result_scalar_ty, operand); @@ -23347,7 +23558,8 @@ fn zirBitCount( } fn zirByteSwap(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node; const src = block.nodeOffset(inst_data.src_node); const operand_src = block.builtinCallArgSrc(inst_data.src_node, 0); @@ -23360,7 +23572,7 @@ fn zirByteSwap(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai block, operand_src, "@byteSwap requires the number of bits to be evenly divisible by 8, but {} has {} bits", - .{ scalar_ty.fmt(mod), bits }, + .{ scalar_ty.fmt(pt), bits }, ); } @@ -23371,8 +23583,8 @@ fn zirByteSwap(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai switch (operand_ty.zigTypeTag(mod)) { .Int => { const runtime_src = if (try sema.resolveValue(operand)) |val| { - if (val.isUndef(mod)) return mod.undefRef(operand_ty); - const result_val = try val.byteSwap(operand_ty, mod, sema.arena); + if (val.isUndef(mod)) return pt.undefRef(operand_ty); + const result_val = try val.byteSwap(operand_ty, pt, sema.arena); return Air.internedToRef(result_val.toIntern()); } else operand_src; @@ -23382,15 +23594,15 @@ fn zirByteSwap(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai .Vector => { const runtime_src = if (try sema.resolveValue(operand)) |val| { if (val.isUndef(mod)) - return mod.undefRef(operand_ty); + return pt.undefRef(operand_ty); const vec_len = operand_ty.vectorLen(mod); const elems = try sema.arena.alloc(InternPool.Index, vec_len); for (elems, 0..) |*elem, i| { - const elem_val = try val.elemValue(mod, i); - elem.* = (try elem_val.byteSwap(scalar_ty, mod, sema.arena)).toIntern(); + const elem_val = try val.elemValue(pt, i); + elem.* = (try elem_val.byteSwap(scalar_ty, pt, sema.arena)).toIntern(); } - return Air.internedToRef((try mod.intern(.{ .aggregate = .{ + return Air.internedToRef((try pt.intern(.{ .aggregate = .{ .ty = operand_ty.toIntern(), .storage = .{ .elems = elems }, } }))); @@ -23415,12 +23627,13 @@ fn zirBitReverse(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! return Air.internedToRef(val.toIntern()); } - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; switch (operand_ty.zigTypeTag(mod)) { .Int => { const runtime_src = if (try sema.resolveValue(operand)) |val| { - if (val.isUndef(mod)) return mod.undefRef(operand_ty); - const result_val = try val.bitReverse(operand_ty, mod, sema.arena); + if (val.isUndef(mod)) return pt.undefRef(operand_ty); + const result_val = try val.bitReverse(operand_ty, pt, sema.arena); return Air.internedToRef(result_val.toIntern()); } else operand_src; @@ -23430,15 +23643,15 @@ fn zirBitReverse(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! .Vector => { const runtime_src = if (try sema.resolveValue(operand)) |val| { if (val.isUndef(mod)) - return mod.undefRef(operand_ty); + return pt.undefRef(operand_ty); const vec_len = operand_ty.vectorLen(mod); const elems = try sema.arena.alloc(InternPool.Index, vec_len); for (elems, 0..) |*elem, i| { - const elem_val = try val.elemValue(mod, i); - elem.* = (try elem_val.bitReverse(scalar_ty, mod, sema.arena)).toIntern(); + const elem_val = try val.elemValue(pt, i); + elem.* = (try elem_val.bitReverse(scalar_ty, pt, sema.arena)).toIntern(); } - return Air.internedToRef((try mod.intern(.{ .aggregate = .{ + return Air.internedToRef((try pt.intern(.{ .aggregate = .{ .ty = operand_ty.toIntern(), .storage = .{ .elems = elems }, } }))); @@ -23453,13 +23666,13 @@ fn zirBitReverse(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! fn zirBitOffsetOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const offset = try sema.bitOffsetOf(block, inst); - return sema.mod.intRef(Type.comptime_int, offset); + return sema.pt.intRef(Type.comptime_int, offset); } fn zirOffsetOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const offset = try sema.bitOffsetOf(block, inst); // TODO reminder to make this a compile error for packed structs - return sema.mod.intRef(Type.comptime_int, offset / 8); + return sema.pt.intRef(Type.comptime_int, offset / 8); } fn bitOffsetOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!u64 { @@ -23474,12 +23687,13 @@ fn bitOffsetOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!u6 .needed_comptime_reason = "name of field must be comptime-known", }); - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const ip = &mod.intern_pool; - try ty.resolveLayout(mod); + try ty.resolveLayout(pt); switch (ty.zigTypeTag(mod)) { .Struct => {}, - else => return sema.fail(block, lhs_src, "expected struct type, found '{}'", .{ty.fmt(mod)}), + else => return sema.fail(block, lhs_src, "expected struct type, found '{}'", .{ty.fmt(pt)}), } const field_index = if (ty.isTuple(mod)) blk: { @@ -23502,28 +23716,30 @@ fn bitOffsetOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!u6 return bit_sum; } const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]); - bit_sum += field_ty.bitSize(mod); + bit_sum += field_ty.bitSize(pt); } else unreachable; }, - else => return ty.structFieldOffset(field_index, mod) * 8, + else => return ty.structFieldOffset(field_index, pt) * 8, } } fn checkNamespaceType(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileError!void { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; switch (ty.zigTypeTag(mod)) { .Struct, .Enum, .Union, .Opaque => return, - else => return sema.fail(block, src, "expected struct, enum, union, or opaque; found '{}'", .{ty.fmt(mod)}), + else => return sema.fail(block, src, "expected struct, enum, union, or opaque; found '{}'", .{ty.fmt(pt)}), } } /// Returns `true` if the type was a comptime_int. fn checkIntType(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileError!bool { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; switch (try ty.zigTypeTagOrPoison(mod)) { .ComptimeInt => return true, .Int => return false, - else => return sema.fail(block, src, "expected integer type, found '{}'", .{ty.fmt(mod)}), + else => return sema.fail(block, src, "expected integer type, found '{}'", .{ty.fmt(pt)}), } } @@ -23533,7 +23749,8 @@ fn checkInvalidPtrArithmetic( src: LazySrcLoc, ty: Type, ) CompileError!void { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; switch (try ty.zigTypeTagOrPoison(mod)) { .Pointer => switch (ty.ptrSize(mod)) { .One, .Slice => return, @@ -23573,7 +23790,8 @@ fn checkPtrOperand( ty_src: LazySrcLoc, ty: Type, ) CompileError!void { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; switch (ty.zigTypeTag(mod)) { .Pointer => return, .Fn => { @@ -23581,7 +23799,7 @@ fn checkPtrOperand( const msg = try sema.errMsg( ty_src, "expected pointer, found '{}'", - .{ty.fmt(mod)}, + .{ty.fmt(pt)}, ); errdefer msg.destroy(sema.gpa); @@ -23594,7 +23812,7 @@ fn checkPtrOperand( .Optional => if (ty.childType(mod).zigTypeTag(mod) == .Pointer) return, else => {}, } - return sema.fail(block, ty_src, "expected pointer type, found '{}'", .{ty.fmt(mod)}); + return sema.fail(block, ty_src, "expected pointer type, found '{}'", .{ty.fmt(pt)}); } fn checkPtrType( @@ -23604,7 +23822,8 @@ fn checkPtrType( ty: Type, allow_slice: bool, ) CompileError!void { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; switch (ty.zigTypeTag(mod)) { .Pointer => if (allow_slice or !ty.isSlice(mod)) return, .Fn => { @@ -23612,7 +23831,7 @@ fn checkPtrType( const msg = try sema.errMsg( ty_src, "expected pointer type, found '{}'", - .{ty.fmt(mod)}, + .{ty.fmt(pt)}, ); errdefer msg.destroy(sema.gpa); @@ -23625,7 +23844,7 @@ fn checkPtrType( .Optional => if (ty.childType(mod).zigTypeTag(mod) == .Pointer) return, else => {}, } - return sema.fail(block, ty_src, "expected pointer type, found '{}'", .{ty.fmt(mod)}); + return sema.fail(block, ty_src, "expected pointer type, found '{}'", .{ty.fmt(pt)}); } fn checkVectorElemType( @@ -23634,13 +23853,14 @@ fn checkVectorElemType( ty_src: LazySrcLoc, ty: Type, ) CompileError!void { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; switch (ty.zigTypeTag(mod)) { .Int, .Float, .Bool => return, .Optional, .Pointer => if (ty.isPtrAtRuntime(mod)) return, else => {}, } - return sema.fail(block, ty_src, "expected integer, float, bool, or pointer for the vector element type; found '{}'", .{ty.fmt(mod)}); + return sema.fail(block, ty_src, "expected integer, float, bool, or pointer for the vector element type; found '{}'", .{ty.fmt(pt)}); } fn checkFloatType( @@ -23649,10 +23869,11 @@ fn checkFloatType( ty_src: LazySrcLoc, ty: Type, ) CompileError!void { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; switch (ty.zigTypeTag(mod)) { .ComptimeInt, .ComptimeFloat, .Float => {}, - else => return sema.fail(block, ty_src, "expected float type, found '{}'", .{ty.fmt(mod)}), + else => return sema.fail(block, ty_src, "expected float type, found '{}'", .{ty.fmt(pt)}), } } @@ -23662,14 +23883,15 @@ fn checkNumericType( ty_src: LazySrcLoc, ty: Type, ) CompileError!void { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; switch (ty.zigTypeTag(mod)) { .ComptimeFloat, .Float, .ComptimeInt, .Int => {}, .Vector => switch (ty.childType(mod).zigTypeTag(mod)) { .ComptimeFloat, .Float, .ComptimeInt, .Int => {}, else => |t| return sema.fail(block, ty_src, "expected number, found '{}'", .{t}), }, - else => return sema.fail(block, ty_src, "expected number, found '{}'", .{ty.fmt(mod)}), + else => return sema.fail(block, ty_src, "expected number, found '{}'", .{ty.fmt(pt)}), } } @@ -23683,7 +23905,8 @@ fn checkAtomicPtrOperand( ptr_src: LazySrcLoc, ptr_const: bool, ) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; var diag: Module.AtomicPtrAlignmentDiagnostics = .{}; const alignment = mod.atomicPtrAlignment(elem_ty, &diag) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, @@ -23703,7 +23926,7 @@ fn checkAtomicPtrOperand( block, elem_ty_src, "expected bool, integer, float, enum, or pointer type; found '{}'", - .{elem_ty.fmt(mod)}, + .{elem_ty.fmt(pt)}, ), }; @@ -23719,7 +23942,7 @@ fn checkAtomicPtrOperand( const ptr_data = switch (try ptr_ty.zigTypeTagOrPoison(mod)) { .Pointer => ptr_ty.ptrInfo(mod), else => { - const wanted_ptr_ty = try mod.ptrTypeSema(wanted_ptr_data); + const wanted_ptr_ty = try pt.ptrTypeSema(wanted_ptr_data); _ = try sema.coerce(block, wanted_ptr_ty, ptr, ptr_src); unreachable; }, @@ -23729,7 +23952,7 @@ fn checkAtomicPtrOperand( wanted_ptr_data.flags.is_allowzero = ptr_data.flags.is_allowzero; wanted_ptr_data.flags.is_volatile = ptr_data.flags.is_volatile; - const wanted_ptr_ty = try mod.ptrTypeSema(wanted_ptr_data); + const wanted_ptr_ty = try pt.ptrTypeSema(wanted_ptr_data); const casted_ptr = try sema.coerce(block, wanted_ptr_ty, ptr, ptr_src); return casted_ptr; @@ -23754,7 +23977,8 @@ fn checkIntOrVector( operand: Air.Inst.Ref, operand_src: LazySrcLoc, ) CompileError!Type { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const operand_ty = sema.typeOf(operand); switch (try operand_ty.zigTypeTagOrPoison(mod)) { .Int => return operand_ty, @@ -23763,12 +23987,12 @@ fn checkIntOrVector( switch (try elem_ty.zigTypeTagOrPoison(mod)) { .Int => return elem_ty, else => return sema.fail(block, operand_src, "expected vector of integers; found vector of '{}'", .{ - elem_ty.fmt(mod), + elem_ty.fmt(pt), }), } }, else => return sema.fail(block, operand_src, "expected integer or vector, found '{}'", .{ - operand_ty.fmt(mod), + operand_ty.fmt(pt), }), } } @@ -23779,7 +24003,8 @@ fn checkIntOrVectorAllowComptime( operand_ty: Type, operand_src: LazySrcLoc, ) CompileError!Type { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; switch (try operand_ty.zigTypeTagOrPoison(mod)) { .Int, .ComptimeInt => return operand_ty, .Vector => { @@ -23787,12 +24012,12 @@ fn checkIntOrVectorAllowComptime( switch (try elem_ty.zigTypeTagOrPoison(mod)) { .Int, .ComptimeInt => return elem_ty, else => return sema.fail(block, operand_src, "expected vector of integers; found vector of '{}'", .{ - elem_ty.fmt(mod), + elem_ty.fmt(pt), }), } }, else => return sema.fail(block, operand_src, "expected integer or vector, found '{}'", .{ - operand_ty.fmt(mod), + operand_ty.fmt(pt), }), } } @@ -23819,7 +24044,8 @@ fn checkSimdBinOp( lhs_src: LazySrcLoc, rhs_src: LazySrcLoc, ) CompileError!SimdBinOp { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const lhs_ty = sema.typeOf(uncasted_lhs); const rhs_ty = sema.typeOf(uncasted_rhs); @@ -23851,7 +24077,8 @@ fn checkVectorizableBinaryOperands( lhs_src: LazySrcLoc, rhs_src: LazySrcLoc, ) CompileError!void { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(mod); const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(mod); if (lhs_zig_ty_tag != .Vector and rhs_zig_ty_tag != .Vector) return; @@ -23881,7 +24108,7 @@ fn checkVectorizableBinaryOperands( } else { const msg = msg: { const msg = try sema.errMsg(src, "mixed scalar and vector operands: '{}' and '{}'", .{ - lhs_ty.fmt(mod), rhs_ty.fmt(mod), + lhs_ty.fmt(pt), rhs_ty.fmt(pt), }); errdefer msg.destroy(sema.gpa); if (lhs_is_vector) { @@ -23903,10 +24130,11 @@ fn resolveExportOptions( src: LazySrcLoc, zir_ref: Zir.Inst.Ref, ) CompileError!Module.Export.Options { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const gpa = sema.gpa; const ip = &mod.intern_pool; - const export_options_ty = try mod.getBuiltinType("ExportOptions"); + const export_options_ty = try pt.getBuiltinType("ExportOptions"); const air_ref = try sema.resolveInst(zir_ref); const options = try sema.coerce(block, export_options_ty, air_ref, src); @@ -23969,12 +24197,12 @@ fn resolveBuiltinEnum( comptime name: []const u8, reason: NeededComptimeReason, ) CompileError!@field(std.builtin, name) { - const mod = sema.mod; - const ty = try mod.getBuiltinType(name); + const pt = sema.pt; + const ty = try pt.getBuiltinType(name); const air_ref = try sema.resolveInst(zir_ref); const coerced = try sema.coerce(block, ty, air_ref, src); const val = try sema.resolveConstDefinedValue(block, src, coerced, reason); - return mod.toEnum(@field(std.builtin, name), val); + return pt.zcu.toEnum(@field(std.builtin, name), val); } fn resolveAtomicOrder( @@ -24003,7 +24231,8 @@ fn zirCmpxchg( block: *Block, extended: Zir.Inst.Extended.InstData, ) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const extra = sema.code.extraData(Zir.Inst.Cmpxchg, extended.operand).data; const air_tag: Air.Inst.Tag = switch (extended.small) { 0 => .cmpxchg_weak, @@ -24026,7 +24255,7 @@ fn zirCmpxchg( block, elem_ty_src, "expected bool, integer, enum, or pointer type; found '{}'", - .{elem_ty.fmt(mod)}, + .{elem_ty.fmt(pt)}, ); } const uncasted_ptr = try sema.resolveInst(extra.ptr); @@ -24052,11 +24281,11 @@ fn zirCmpxchg( return sema.fail(block, failure_order_src, "failure atomic ordering must not be release or acq_rel", .{}); } - const result_ty = try mod.optionalType(elem_ty.toIntern()); + const result_ty = try pt.optionalType(elem_ty.toIntern()); // special case zero bit types if ((try sema.typeHasOnePossibleValue(elem_ty)) != null) { - return Air.internedToRef((try mod.intern(.{ .opt = .{ + return Air.internedToRef((try pt.intern(.{ .opt = .{ .ty = result_ty.toIntern(), .val = .none, } }))); @@ -24068,11 +24297,11 @@ fn zirCmpxchg( if (expected_val.isUndef(mod) or new_val.isUndef(mod)) { // TODO: this should probably cause the memory stored at the pointer // to become undef as well - return mod.undefRef(result_ty); + return pt.undefRef(result_ty); } const ptr_ty = sema.typeOf(ptr); const stored_val = (try sema.pointerDeref(block, ptr_src, ptr_val, ptr_ty)) orelse break :rs ptr_src; - const result_val = try mod.intern(.{ .opt = .{ + const result_val = try pt.intern(.{ .opt = .{ .ty = result_ty.toIntern(), .val = if (stored_val.eql(expected_val, elem_ty, mod)) blk: { try sema.storePtr(block, src, ptr, new_value); @@ -24103,17 +24332,18 @@ fn zirCmpxchg( } fn zirSplat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const src = block.nodeOffset(inst_data.src_node); const scalar_src = block.builtinCallArgSrc(inst_data.src_node, 0); const dest_ty = try sema.resolveDestType(block, src, extra.lhs, .remove_eu_opt, "@splat"); - if (!dest_ty.isVector(mod)) return sema.fail(block, src, "expected vector type, found '{}'", .{dest_ty.fmt(mod)}); + if (!dest_ty.isVector(mod)) return sema.fail(block, src, "expected vector type, found '{}'", .{dest_ty.fmt(pt)}); - if (!dest_ty.hasRuntimeBits(mod)) { - const empty_aggregate = try mod.intern(.{ .aggregate = .{ + if (!dest_ty.hasRuntimeBits(pt)) { + const empty_aggregate = try pt.intern(.{ .aggregate = .{ .ty = dest_ty.toIntern(), .storage = .{ .elems = &[_]InternPool.Index{} }, } }); @@ -24124,7 +24354,7 @@ fn zirSplat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.I const scalar_ty = dest_ty.childType(mod); const scalar = try sema.coerce(block, scalar_ty, operand, scalar_src); if (try sema.resolveValue(scalar)) |scalar_val| { - if (scalar_val.isUndef(mod)) return mod.undefRef(dest_ty); + if (scalar_val.isUndef(mod)) return pt.undefRef(dest_ty); return Air.internedToRef((try sema.splat(dest_ty, scalar_val)).toIntern()); } @@ -24142,10 +24372,11 @@ fn zirReduce(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. }); const operand = try sema.resolveInst(extra.rhs); const operand_ty = sema.typeOf(operand); - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; if (operand_ty.zigTypeTag(mod) != .Vector) { - return sema.fail(block, operand_src, "expected vector, found '{}'", .{operand_ty.fmt(mod)}); + return sema.fail(block, operand_src, "expected vector, found '{}'", .{operand_ty.fmt(pt)}); } const scalar_ty = operand_ty.childType(mod); @@ -24155,13 +24386,13 @@ fn zirReduce(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. .And, .Or, .Xor => switch (scalar_ty.zigTypeTag(mod)) { .Int, .Bool => {}, else => return sema.fail(block, operand_src, "@reduce operation '{s}' requires integer or boolean operand; found '{}'", .{ - @tagName(operation), operand_ty.fmt(mod), + @tagName(operation), operand_ty.fmt(pt), }), }, .Min, .Max, .Add, .Mul => switch (scalar_ty.zigTypeTag(mod)) { .Int, .Float => {}, else => return sema.fail(block, operand_src, "@reduce operation '{s}' requires integer or float operand; found '{}'", .{ - @tagName(operation), operand_ty.fmt(mod), + @tagName(operation), operand_ty.fmt(pt), }), }, } @@ -24174,20 +24405,20 @@ fn zirReduce(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. } if (try sema.resolveValue(operand)) |operand_val| { - if (operand_val.isUndef(mod)) return mod.undefRef(scalar_ty); + if (operand_val.isUndef(mod)) return pt.undefRef(scalar_ty); - var accum: Value = try operand_val.elemValue(mod, 0); + var accum: Value = try operand_val.elemValue(pt, 0); var i: u32 = 1; while (i < vec_len) : (i += 1) { - const elem_val = try operand_val.elemValue(mod, i); + const elem_val = try operand_val.elemValue(pt, i); switch (operation) { - .And => accum = try accum.bitwiseAnd(elem_val, scalar_ty, sema.arena, mod), - .Or => accum = try accum.bitwiseOr(elem_val, scalar_ty, sema.arena, mod), - .Xor => accum = try accum.bitwiseXor(elem_val, scalar_ty, sema.arena, mod), - .Min => accum = accum.numberMin(elem_val, mod), - .Max => accum = accum.numberMax(elem_val, mod), + .And => accum = try accum.bitwiseAnd(elem_val, scalar_ty, sema.arena, pt), + .Or => accum = try accum.bitwiseOr(elem_val, scalar_ty, sema.arena, pt), + .Xor => accum = try accum.bitwiseXor(elem_val, scalar_ty, sema.arena, pt), + .Min => accum = accum.numberMin(elem_val, pt), + .Max => accum = accum.numberMax(elem_val, pt), .Add => accum = try sema.numberAddWrapScalar(accum, elem_val, scalar_ty), - .Mul => accum = try accum.numberMulWrap(elem_val, scalar_ty, sema.arena, mod), + .Mul => accum = try accum.numberMulWrap(elem_val, scalar_ty, sema.arena, pt), } } return Air.internedToRef(accum.toIntern()); @@ -24204,7 +24435,8 @@ fn zirReduce(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. } fn zirShuffle(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const extra = sema.code.extraData(Zir.Inst.Shuffle, inst_data.payload_index).data; const elem_ty_src = block.builtinCallArgSrc(inst_data.src_node, 0); @@ -24219,9 +24451,9 @@ fn zirShuffle(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air const mask_len = switch (sema.typeOf(mask).zigTypeTag(mod)) { .Array, .Vector => sema.typeOf(mask).arrayLen(mod), - else => return sema.fail(block, mask_src, "expected vector or array, found '{}'", .{sema.typeOf(mask).fmt(sema.mod)}), + else => return sema.fail(block, mask_src, "expected vector or array, found '{}'", .{sema.typeOf(mask).fmt(pt)}), }; - mask_ty = try mod.vectorType(.{ + mask_ty = try pt.vectorType(.{ .len = @intCast(mask_len), .child = .i32_type, }); @@ -24242,51 +24474,51 @@ fn analyzeShuffle( mask: Value, mask_len: u32, ) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; const a_src = block.builtinCallArgSrc(src_node, 1); const b_src = block.builtinCallArgSrc(src_node, 2); const mask_src = block.builtinCallArgSrc(src_node, 3); var a = a_arg; var b = b_arg; - const res_ty = try mod.vectorType(.{ + const res_ty = try pt.vectorType(.{ .len = mask_len, .child = elem_ty.toIntern(), }); - const maybe_a_len = switch (sema.typeOf(a).zigTypeTag(mod)) { - .Array, .Vector => sema.typeOf(a).arrayLen(mod), + const maybe_a_len = switch (sema.typeOf(a).zigTypeTag(pt.zcu)) { + .Array, .Vector => sema.typeOf(a).arrayLen(pt.zcu), .Undefined => null, else => return sema.fail(block, a_src, "expected vector or array with element type '{}', found '{}'", .{ - elem_ty.fmt(sema.mod), - sema.typeOf(a).fmt(sema.mod), + elem_ty.fmt(pt), + sema.typeOf(a).fmt(pt), }), }; - const maybe_b_len = switch (sema.typeOf(b).zigTypeTag(mod)) { - .Array, .Vector => sema.typeOf(b).arrayLen(mod), + const maybe_b_len = switch (sema.typeOf(b).zigTypeTag(pt.zcu)) { + .Array, .Vector => sema.typeOf(b).arrayLen(pt.zcu), .Undefined => null, else => return sema.fail(block, b_src, "expected vector or array with element type '{}', found '{}'", .{ - elem_ty.fmt(sema.mod), - sema.typeOf(b).fmt(sema.mod), + elem_ty.fmt(pt), + sema.typeOf(b).fmt(pt), }), }; if (maybe_a_len == null and maybe_b_len == null) { - return mod.undefRef(res_ty); + return pt.undefRef(res_ty); } const a_len: u32 = @intCast(maybe_a_len orelse maybe_b_len.?); const b_len: u32 = @intCast(maybe_b_len orelse a_len); - const a_ty = try mod.vectorType(.{ + const a_ty = try pt.vectorType(.{ .len = a_len, .child = elem_ty.toIntern(), }); - const b_ty = try mod.vectorType(.{ + const b_ty = try pt.vectorType(.{ .len = b_len, .child = elem_ty.toIntern(), }); - if (maybe_a_len == null) a = try mod.undefRef(a_ty) else a = try sema.coerce(block, a_ty, a, a_src); - if (maybe_b_len == null) b = try mod.undefRef(b_ty) else b = try sema.coerce(block, b_ty, b, b_src); + if (maybe_a_len == null) a = try pt.undefRef(a_ty) else a = try sema.coerce(block, a_ty, a, a_src); + if (maybe_b_len == null) b = try pt.undefRef(b_ty) else b = try sema.coerce(block, b_ty, b, b_src); const operand_info = [2]std.meta.Tuple(&.{ u64, LazySrcLoc, Type }){ .{ a_len, a_src, a_ty }, @@ -24294,10 +24526,10 @@ fn analyzeShuffle( }; for (0..@intCast(mask_len)) |i| { - const elem = try mask.elemValue(sema.mod, i); - if (elem.isUndef(mod)) continue; + const elem = try mask.elemValue(pt, i); + if (elem.isUndef(pt.zcu)) continue; const elem_resolved = try sema.resolveLazyValue(elem); - const int = elem_resolved.toSignedInt(mod); + const int = elem_resolved.toSignedInt(pt); var unsigned: u32 = undefined; var chosen: u32 = undefined; if (int >= 0) { @@ -24314,7 +24546,7 @@ fn analyzeShuffle( try sema.errNote(operand_info[chosen][1], msg, "selected index '{d}' out of bounds of '{}'", .{ unsigned, - operand_info[chosen][2].fmt(sema.mod), + operand_info[chosen][2].fmt(pt), }); if (chosen == 0) { @@ -24331,16 +24563,16 @@ fn analyzeShuffle( if (try sema.resolveValue(b)) |b_val| { const values = try sema.arena.alloc(InternPool.Index, mask_len); for (values, 0..) |*value, i| { - const mask_elem_val = try mask.elemValue(sema.mod, i); - if (mask_elem_val.isUndef(mod)) { - value.* = try mod.intern(.{ .undef = elem_ty.toIntern() }); + const mask_elem_val = try mask.elemValue(pt, i); + if (mask_elem_val.isUndef(pt.zcu)) { + value.* = try pt.intern(.{ .undef = elem_ty.toIntern() }); continue; } - const int = mask_elem_val.toSignedInt(mod); + const int = mask_elem_val.toSignedInt(pt); const unsigned: u32 = @intCast(if (int >= 0) int else ~int); - values[i] = (try (if (int >= 0) a_val else b_val).elemValue(mod, unsigned)).toIntern(); + values[i] = (try (if (int >= 0) a_val else b_val).elemValue(pt, unsigned)).toIntern(); } - return Air.internedToRef((try mod.intern(.{ .aggregate = .{ + return Air.internedToRef((try pt.intern(.{ .aggregate = .{ .ty = res_ty.toIntern(), .storage = .{ .elems = values }, } }))); @@ -24359,21 +24591,21 @@ fn analyzeShuffle( const expand_mask_values = try sema.arena.alloc(InternPool.Index, max_len); for (@intCast(0)..@intCast(min_len)) |i| { - expand_mask_values[i] = (try mod.intValue(Type.comptime_int, i)).toIntern(); + expand_mask_values[i] = (try pt.intValue(Type.comptime_int, i)).toIntern(); } for (@intCast(min_len)..@intCast(max_len)) |i| { - expand_mask_values[i] = (try mod.intValue(Type.comptime_int, -1)).toIntern(); + expand_mask_values[i] = (try pt.intValue(Type.comptime_int, -1)).toIntern(); } - const expand_mask = try mod.intern(.{ .aggregate = .{ - .ty = (try mod.vectorType(.{ .len = @intCast(max_len), .child = .comptime_int_type })).toIntern(), + const expand_mask = try pt.intern(.{ .aggregate = .{ + .ty = (try pt.vectorType(.{ .len = @intCast(max_len), .child = .comptime_int_type })).toIntern(), .storage = .{ .elems = expand_mask_values }, } }); if (a_len < b_len) { - const undef = try mod.undefRef(a_ty); + const undef = try pt.undefRef(a_ty); a = try sema.analyzeShuffle(block, src_node, elem_ty, a, undef, Value.fromInterned(expand_mask), @intCast(max_len)); } else { - const undef = try mod.undefRef(b_ty); + const undef = try pt.undefRef(b_ty); b = try sema.analyzeShuffle(block, src_node, elem_ty, b, undef, Value.fromInterned(expand_mask), @intCast(max_len)); } } @@ -24393,7 +24625,8 @@ fn analyzeShuffle( } fn zirSelect(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const extra = sema.code.extraData(Zir.Inst.Select, extended.operand).data; const src = block.nodeOffset(extra.node); @@ -24409,17 +24642,17 @@ fn zirSelect(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) C const vec_len_u64 = switch (try pred_ty.zigTypeTagOrPoison(mod)) { .Vector, .Array => pred_ty.arrayLen(mod), - else => return sema.fail(block, pred_src, "expected vector or array, found '{}'", .{pred_ty.fmt(mod)}), + else => return sema.fail(block, pred_src, "expected vector or array, found '{}'", .{pred_ty.fmt(pt)}), }; const vec_len: u32 = @intCast(try sema.usizeCast(block, pred_src, vec_len_u64)); - const bool_vec_ty = try mod.vectorType(.{ + const bool_vec_ty = try pt.vectorType(.{ .len = vec_len, .child = .bool_type, }); const pred = try sema.coerce(block, bool_vec_ty, pred_uncoerced, pred_src); - const vec_ty = try mod.vectorType(.{ + const vec_ty = try pt.vectorType(.{ .len = vec_len, .child = elem_ty.toIntern(), }); @@ -24431,23 +24664,23 @@ fn zirSelect(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) C const maybe_b = try sema.resolveValue(b); const runtime_src = if (maybe_pred) |pred_val| rs: { - if (pred_val.isUndef(mod)) return mod.undefRef(vec_ty); + if (pred_val.isUndef(mod)) return pt.undefRef(vec_ty); if (maybe_a) |a_val| { - if (a_val.isUndef(mod)) return mod.undefRef(vec_ty); + if (a_val.isUndef(mod)) return pt.undefRef(vec_ty); if (maybe_b) |b_val| { - if (b_val.isUndef(mod)) return mod.undefRef(vec_ty); + if (b_val.isUndef(mod)) return pt.undefRef(vec_ty); const elems = try sema.gpa.alloc(InternPool.Index, vec_len); defer sema.gpa.free(elems); for (elems, 0..) |*elem, i| { - const pred_elem_val = try pred_val.elemValue(mod, i); + const pred_elem_val = try pred_val.elemValue(pt, i); const should_choose_a = pred_elem_val.toBool(); - elem.* = (try (if (should_choose_a) a_val else b_val).elemValue(mod, i)).toIntern(); + elem.* = (try (if (should_choose_a) a_val else b_val).elemValue(pt, i)).toIntern(); } - return Air.internedToRef((try mod.intern(.{ .aggregate = .{ + return Air.internedToRef((try pt.intern(.{ .aggregate = .{ .ty = vec_ty.toIntern(), .storage = .{ .elems = elems }, } }))); @@ -24456,16 +24689,16 @@ fn zirSelect(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) C } } else { if (maybe_b) |b_val| { - if (b_val.isUndef(mod)) return mod.undefRef(vec_ty); + if (b_val.isUndef(mod)) return pt.undefRef(vec_ty); } break :rs a_src; } } else rs: { if (maybe_a) |a_val| { - if (a_val.isUndef(mod)) return mod.undefRef(vec_ty); + if (a_val.isUndef(mod)) return pt.undefRef(vec_ty); } if (maybe_b) |b_val| { - if (b_val.isUndef(mod)) return mod.undefRef(vec_ty); + if (b_val.isUndef(mod)) return pt.undefRef(vec_ty); } break :rs pred_src; }; @@ -24531,7 +24764,8 @@ fn zirAtomicLoad(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! } fn zirAtomicRmw(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const extra = sema.code.extraData(Zir.Inst.AtomicRmw, inst_data.payload_index).data; const src = block.nodeOffset(inst_data.src_node); @@ -24588,12 +24822,12 @@ fn zirAtomicRmw(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A .Xchg => operand_val, .Add => try sema.numberAddWrapScalar(stored_val, operand_val, elem_ty), .Sub => try sema.numberSubWrapScalar(stored_val, operand_val, elem_ty), - .And => try stored_val.bitwiseAnd (operand_val, elem_ty, sema.arena, mod), - .Nand => try stored_val.bitwiseNand (operand_val, elem_ty, sema.arena, mod), - .Or => try stored_val.bitwiseOr (operand_val, elem_ty, sema.arena, mod), - .Xor => try stored_val.bitwiseXor (operand_val, elem_ty, sema.arena, mod), - .Max => stored_val.numberMax (operand_val, mod), - .Min => stored_val.numberMin (operand_val, mod), + .And => try stored_val.bitwiseAnd (operand_val, elem_ty, sema.arena, pt), + .Nand => try stored_val.bitwiseNand (operand_val, elem_ty, sema.arena, pt), + .Or => try stored_val.bitwiseOr (operand_val, elem_ty, sema.arena, pt), + .Xor => try stored_val.bitwiseXor (operand_val, elem_ty, sema.arena, pt), + .Max => stored_val.numberMax (operand_val, pt), + .Min => stored_val.numberMin (operand_val, pt), // zig fmt: on }; try sema.storePtrVal(block, src, ptr_val, new_val, elem_ty); @@ -24669,36 +24903,37 @@ fn zirMulAdd(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. const maybe_mulend1 = try sema.resolveValue(mulend1); const maybe_mulend2 = try sema.resolveValue(mulend2); const maybe_addend = try sema.resolveValue(addend); - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; switch (ty.scalarType(mod).zigTypeTag(mod)) { .ComptimeFloat, .Float => {}, - else => return sema.fail(block, src, "expected vector of floats or float type, found '{}'", .{ty.fmt(sema.mod)}), + else => return sema.fail(block, src, "expected vector of floats or float type, found '{}'", .{ty.fmt(pt)}), } const runtime_src = if (maybe_mulend1) |mulend1_val| rs: { if (maybe_mulend2) |mulend2_val| { - if (mulend2_val.isUndef(mod)) return mod.undefRef(ty); + if (mulend2_val.isUndef(mod)) return pt.undefRef(ty); if (maybe_addend) |addend_val| { - if (addend_val.isUndef(mod)) return mod.undefRef(ty); - const result_val = try Value.mulAdd(ty, mulend1_val, mulend2_val, addend_val, sema.arena, sema.mod); + if (addend_val.isUndef(mod)) return pt.undefRef(ty); + const result_val = try Value.mulAdd(ty, mulend1_val, mulend2_val, addend_val, sema.arena, pt); return Air.internedToRef(result_val.toIntern()); } else { break :rs addend_src; } } else { if (maybe_addend) |addend_val| { - if (addend_val.isUndef(mod)) return mod.undefRef(ty); + if (addend_val.isUndef(mod)) return pt.undefRef(ty); } break :rs mulend2_src; } } else rs: { if (maybe_mulend2) |mulend2_val| { - if (mulend2_val.isUndef(mod)) return mod.undefRef(ty); + if (mulend2_val.isUndef(mod)) return pt.undefRef(ty); } if (maybe_addend) |addend_val| { - if (addend_val.isUndef(mod)) return mod.undefRef(ty); + if (addend_val.isUndef(mod)) return pt.undefRef(ty); } break :rs mulend1_src; }; @@ -24720,7 +24955,8 @@ fn zirBuiltinCall(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const tracy = trace(@src()); defer tracy.end(); - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const modifier_src = block.builtinCallArgSrc(inst_data.src_node, 0); const func_src = block.builtinCallArgSrc(inst_data.src_node, 1); @@ -24730,7 +24966,7 @@ fn zirBuiltinCall(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const extra = sema.code.extraData(Zir.Inst.BuiltinCall, inst_data.payload_index).data; const func = try sema.resolveInst(extra.callee); - const modifier_ty = try mod.getBuiltinType("CallModifier"); + const modifier_ty = try pt.getBuiltinType("CallModifier"); const air_ref = try sema.resolveInst(extra.modifier); const modifier_ref = try sema.coerce(block, modifier_ty, air_ref, modifier_src); const modifier_val = try sema.resolveConstDefinedValue(block, modifier_src, modifier_ref, .{ @@ -24783,7 +25019,7 @@ fn zirBuiltinCall(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const args_ty = sema.typeOf(args); if (!args_ty.isTuple(mod) and args_ty.toIntern() != .empty_struct_type) { - return sema.fail(block, args_src, "expected a tuple, found '{}'", .{args_ty.fmt(sema.mod)}); + return sema.fail(block, args_src, "expected a tuple, found '{}'", .{args_ty.fmt(pt)}); } const resolved_args: []Air.Inst.Ref = try sema.arena.alloc(Air.Inst.Ref, args_ty.structFieldCount(mod)); @@ -24812,7 +25048,8 @@ fn zirBuiltinCall(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError } fn zirFieldParentPtr(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref { - const zcu = sema.mod; + const pt = sema.pt; + const zcu = pt.zcu; const ip = &zcu.intern_pool; const extra = sema.code.extraData(Zir.Inst.FieldParentPtr, extended.operand).data; @@ -24827,14 +25064,14 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Ins try sema.checkPtrType(block, inst_src, parent_ptr_ty, true); const parent_ptr_info = parent_ptr_ty.ptrInfo(zcu); if (parent_ptr_info.flags.size != .One) { - return sema.fail(block, inst_src, "expected single pointer type, found '{}'", .{parent_ptr_ty.fmt(zcu)}); + return sema.fail(block, inst_src, "expected single pointer type, found '{}'", .{parent_ptr_ty.fmt(pt)}); } const parent_ty = Type.fromInterned(parent_ptr_info.child); switch (parent_ty.zigTypeTag(zcu)) { .Struct, .Union => {}, - else => return sema.fail(block, inst_src, "expected pointer to struct or union type, found '{}'", .{parent_ptr_ty.fmt(zcu)}), + else => return sema.fail(block, inst_src, "expected pointer to struct or union type, found '{}'", .{parent_ptr_ty.fmt(pt)}), } - try parent_ty.resolveLayout(zcu); + try parent_ty.resolveLayout(pt); const field_name = try sema.resolveConstStringIntern(block, field_name_src, extra.field_name, .{ .needed_comptime_reason = "field name must be comptime-known", @@ -24865,7 +25102,7 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Ins var actual_parent_ptr_info: InternPool.Key.PtrType = .{ .child = parent_ty.toIntern(), .flags = .{ - .alignment = try parent_ptr_ty.ptrAlignmentAdvanced(zcu, .sema), + .alignment = try parent_ptr_ty.ptrAlignmentAdvanced(pt, .sema), .is_const = field_ptr_info.flags.is_const, .is_volatile = field_ptr_info.flags.is_volatile, .is_allowzero = field_ptr_info.flags.is_allowzero, @@ -24877,7 +25114,7 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Ins var actual_field_ptr_info: InternPool.Key.PtrType = .{ .child = field_ty.toIntern(), .flags = .{ - .alignment = try field_ptr_ty.ptrAlignmentAdvanced(zcu, .sema), + .alignment = try field_ptr_ty.ptrAlignmentAdvanced(pt, .sema), .is_const = field_ptr_info.flags.is_const, .is_volatile = field_ptr_info.flags.is_volatile, .is_allowzero = field_ptr_info.flags.is_allowzero, @@ -24888,13 +25125,13 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Ins switch (parent_ty.containerLayout(zcu)) { .auto => { actual_parent_ptr_info.flags.alignment = actual_field_ptr_info.flags.alignment.minStrict( - if (zcu.typeToStruct(parent_ty)) |struct_obj| try zcu.structFieldAlignmentAdvanced( + if (zcu.typeToStruct(parent_ty)) |struct_obj| try pt.structFieldAlignmentAdvanced( struct_obj.fieldAlign(ip, field_index), field_ty, struct_obj.layout, .sema, ) else if (zcu.typeToUnion(parent_ty)) |union_obj| - try zcu.unionFieldNormalAlignmentAdvanced(union_obj, field_index, .sema) + try pt.unionFieldNormalAlignmentAdvanced(union_obj, field_index, .sema) else actual_field_ptr_info.flags.alignment, ); @@ -24903,7 +25140,7 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Ins actual_field_ptr_info.packed_offset = .{ .bit_offset = 0, .host_size = 0 }; }, .@"extern" => { - const field_offset = parent_ty.structFieldOffset(field_index, zcu); + const field_offset = parent_ty.structFieldOffset(field_index, pt); actual_parent_ptr_info.flags.alignment = actual_field_ptr_info.flags.alignment.minStrict(if (field_offset > 0) Alignment.fromLog2Units(@ctz(field_offset)) else @@ -24914,7 +25151,7 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Ins }, .@"packed" => { const byte_offset = std.math.divExact(u32, @abs(@as(i32, actual_parent_ptr_info.packed_offset.bit_offset) + - (if (zcu.typeToStruct(parent_ty)) |struct_obj| zcu.structPackedFieldBitOffset(struct_obj, field_index) else 0) - + (if (zcu.typeToStruct(parent_ty)) |struct_obj| pt.structPackedFieldBitOffset(struct_obj, field_index) else 0) - actual_field_ptr_info.packed_offset.bit_offset), 8) catch return sema.fail(block, inst_src, "pointer bit-offset mismatch", .{}); actual_parent_ptr_info.flags.alignment = actual_field_ptr_info.flags.alignment.minStrict(if (byte_offset > 0) @@ -24924,16 +25161,16 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Ins }, } - const actual_field_ptr_ty = try zcu.ptrTypeSema(actual_field_ptr_info); + const actual_field_ptr_ty = try pt.ptrTypeSema(actual_field_ptr_info); const casted_field_ptr = try sema.coerce(block, actual_field_ptr_ty, field_ptr, field_ptr_src); - const actual_parent_ptr_ty = try zcu.ptrTypeSema(actual_parent_ptr_info); + const actual_parent_ptr_ty = try pt.ptrTypeSema(actual_parent_ptr_info); const result = if (try sema.resolveDefinedValue(block, field_ptr_src, casted_field_ptr)) |field_ptr_val| result: { switch (parent_ty.zigTypeTag(zcu)) { .Struct => switch (parent_ty.containerLayout(zcu)) { .auto => {}, .@"extern" => { - const byte_offset = parent_ty.structFieldOffset(field_index, zcu); + const byte_offset = parent_ty.structFieldOffset(field_index, pt); const parent_ptr_val = try sema.ptrSubtract(block, field_ptr_src, field_ptr_val, byte_offset, actual_parent_ptr_ty); break :result Air.internedToRef(parent_ptr_val.toIntern()); }, @@ -24941,7 +25178,7 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Ins // Logic lifted from type computation above - I'm just assuming it's correct. // `catch unreachable` since error case handled above. const byte_offset = std.math.divExact(u32, @abs(@as(i32, actual_parent_ptr_info.packed_offset.bit_offset) + - zcu.structPackedFieldBitOffset(zcu.typeToStruct(parent_ty).?, field_index) - + pt.structPackedFieldBitOffset(zcu.typeToStruct(parent_ty).?, field_index) - actual_field_ptr_info.packed_offset.bit_offset), 8) catch unreachable; const parent_ptr_val = try sema.ptrSubtract(block, field_ptr_src, field_ptr_val, byte_offset, actual_parent_ptr_ty); break :result Air.internedToRef(parent_ptr_val.toIntern()); @@ -24951,7 +25188,7 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Ins .auto => {}, .@"extern", .@"packed" => { // For an extern or packed union, just coerce the pointer. - const parent_ptr_val = try zcu.getCoerced(field_ptr_val, actual_parent_ptr_ty); + const parent_ptr_val = try pt.getCoerced(field_ptr_val, actual_parent_ptr_ty); break :result Air.internedToRef(parent_ptr_val.toIntern()); }, }, @@ -24980,7 +25217,7 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Ins if (field.index != field_index) { return sema.fail(block, inst_src, "field '{}' has index '{d}' but pointer value is index '{d}' of struct '{}'", .{ - field_name.fmt(ip), field_index, field.index, parent_ty.fmt(zcu), + field_name.fmt(ip), field_index, field.index, parent_ty.fmt(pt), }); } break :result try sema.coerce(block, actual_parent_ptr_ty, Air.internedToRef(field.base), inst_src); @@ -25001,8 +25238,9 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Ins } fn ptrSubtract(sema: *Sema, block: *Block, src: LazySrcLoc, ptr_val: Value, byte_subtract: u64, new_ty: Type) !Value { - const zcu = sema.mod; - if (byte_subtract == 0) return zcu.getCoerced(ptr_val, new_ty); + const pt = sema.pt; + const zcu = pt.zcu; + if (byte_subtract == 0) return pt.getCoerced(ptr_val, new_ty); var ptr = switch (zcu.intern_pool.indexToKey(ptr_val.toIntern())) { .undef => return sema.failWithUseOfUndef(block, src), .ptr => |ptr| ptr, @@ -25018,7 +25256,7 @@ fn ptrSubtract(sema: *Sema, block: *Block, src: LazySrcLoc, ptr_val: Value, byte } ptr.byte_offset -= byte_subtract; ptr.ty = new_ty.toIntern(); - return Value.fromInterned(try zcu.intern(.{ .ptr = ptr })); + return Value.fromInterned(try pt.intern(.{ .ptr = ptr })); } fn zirMinMax( @@ -25072,7 +25310,8 @@ fn analyzeMinMax( ) CompileError!Air.Inst.Ref { assert(operands.len == operand_srcs.len); assert(operands.len > 0); - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; if (operands.len == 1) return operands[0]; @@ -25115,15 +25354,15 @@ fn analyzeMinMax( break :refine_bounds; } const scalar_bounds: ?[2]Value = bounds: { - if (!ty.isVector(mod)) break :bounds try uncoerced_val.intValueBounds(mod); - var cur_bounds: [2]Value = try Value.intValueBounds(try uncoerced_val.elemValue(mod, 0), mod) orelse break :bounds null; + if (!ty.isVector(mod)) break :bounds try uncoerced_val.intValueBounds(pt); + var cur_bounds: [2]Value = try Value.intValueBounds(try uncoerced_val.elemValue(pt, 0), pt) orelse break :bounds null; const len = try sema.usizeCast(block, src, ty.vectorLen(mod)); for (1..len) |i| { - const elem = try uncoerced_val.elemValue(mod, i); - const elem_bounds = try elem.intValueBounds(mod) orelse break :bounds null; + const elem = try uncoerced_val.elemValue(pt, i); + const elem_bounds = try elem.intValueBounds(pt) orelse break :bounds null; cur_bounds = .{ - Value.numberMin(elem_bounds[0], cur_bounds[0], mod), - Value.numberMax(elem_bounds[1], cur_bounds[1], mod), + Value.numberMin(elem_bounds[0], cur_bounds[0], pt), + Value.numberMax(elem_bounds[1], cur_bounds[1], pt), }; } break :bounds cur_bounds; @@ -25134,8 +25373,8 @@ fn analyzeMinMax( cur_max_scalar = bounds[1]; bounds_status = .defined; } else { - cur_min_scalar = opFunc(cur_min_scalar, bounds[0], mod); - cur_max_scalar = opFunc(cur_max_scalar, bounds[1], mod); + cur_min_scalar = opFunc(cur_min_scalar, bounds[0], pt); + cur_max_scalar = opFunc(cur_max_scalar, bounds[1], pt); } } }, @@ -25153,18 +25392,18 @@ fn analyzeMinMax( const operand_val = try sema.resolveLazyValue(simd_op.rhs_val.?); // we checked the operand was resolvable above const vec_len = simd_op.len orelse { - const result_val = opFunc(cur_val, operand_val, mod); + const result_val = opFunc(cur_val, operand_val, pt); cur_minmax = Air.internedToRef(result_val.toIntern()); continue; }; const elems = try sema.arena.alloc(InternPool.Index, vec_len); for (elems, 0..) |*elem, i| { - const lhs_elem_val = try cur_val.elemValue(mod, i); - const rhs_elem_val = try operand_val.elemValue(mod, i); - const uncoerced_elem = opFunc(lhs_elem_val, rhs_elem_val, mod); - elem.* = (try mod.getCoerced(uncoerced_elem, simd_op.scalar_ty)).toIntern(); + const lhs_elem_val = try cur_val.elemValue(pt, i); + const rhs_elem_val = try operand_val.elemValue(pt, i); + const uncoerced_elem = opFunc(lhs_elem_val, rhs_elem_val, pt); + elem.* = (try pt.getCoerced(uncoerced_elem, simd_op.scalar_ty)).toIntern(); } - cur_minmax = Air.internedToRef((try mod.intern(.{ .aggregate = .{ + cur_minmax = Air.internedToRef((try pt.intern(.{ .aggregate = .{ .ty = simd_op.result_ty.toIntern(), .storage = .{ .elems = elems }, } }))); @@ -25191,8 +25430,8 @@ fn analyzeMinMax( assert(bounds_status == .defined); // there was a non-comptime-int integral comptime-known arg - const refined_scalar_ty = try mod.intFittingRange(cur_min_scalar, cur_max_scalar); - const refined_ty = if (orig_ty.isVector(mod)) try mod.vectorType(.{ + const refined_scalar_ty = try pt.intFittingRange(cur_min_scalar, cur_max_scalar); + const refined_ty = if (orig_ty.isVector(mod)) try pt.vectorType(.{ .len = orig_ty.vectorLen(mod), .child = refined_scalar_ty.toIntern(), }) else refined_scalar_ty; @@ -25226,8 +25465,8 @@ fn analyzeMinMax( runtime_known.unset(0); // don't look at this operand in the loop below const scalar_ty = sema.typeOf(cur_minmax.?).scalarType(mod); if (scalar_ty.isInt(mod)) { - cur_min_scalar = try scalar_ty.minInt(mod, scalar_ty); - cur_max_scalar = try scalar_ty.maxInt(mod, scalar_ty); + cur_min_scalar = try scalar_ty.minInt(pt, scalar_ty); + cur_max_scalar = try scalar_ty.maxInt(pt, scalar_ty); bounds_status = .defined; } else { bounds_status = .non_integral; @@ -25242,7 +25481,7 @@ fn analyzeMinMax( const rhs_src = operand_srcs[idx]; const simd_op = try sema.checkSimdBinOp(block, src, lhs, rhs, lhs_src, rhs_src); if (known_undef) { - cur_minmax = try mod.undefRef(simd_op.result_ty); + cur_minmax = try pt.undefRef(simd_op.result_ty); } else { cur_minmax = try block.addBinOp(air_tag, simd_op.lhs, simd_op.rhs); } @@ -25254,15 +25493,15 @@ fn analyzeMinMax( bounds_status = .non_integral; break :refine_bounds; } - const scalar_min = try scalar_ty.minInt(mod, scalar_ty); - const scalar_max = try scalar_ty.maxInt(mod, scalar_ty); + const scalar_min = try scalar_ty.minInt(pt, scalar_ty); + const scalar_max = try scalar_ty.maxInt(pt, scalar_ty); if (bounds_status == .unknown) { cur_min_scalar = scalar_min; cur_max_scalar = scalar_max; bounds_status = .defined; } else { - cur_min_scalar = opFunc(cur_min_scalar, scalar_min, mod); - cur_max_scalar = opFunc(cur_max_scalar, scalar_max, mod); + cur_min_scalar = opFunc(cur_min_scalar, scalar_min, pt); + cur_max_scalar = opFunc(cur_max_scalar, scalar_max, pt); } }, .non_integral => {}, @@ -25276,8 +25515,8 @@ fn analyzeMinMax( return cur_minmax.?; } assert(bounds_status == .defined); // there were integral runtime operands - const refined_scalar_ty = try mod.intFittingRange(cur_min_scalar, cur_max_scalar); - const refined_ty = if (unrefined_ty.isVector(mod)) try mod.vectorType(.{ + const refined_scalar_ty = try pt.intFittingRange(cur_min_scalar, cur_max_scalar); + const refined_ty = if (unrefined_ty.isVector(mod)) try pt.vectorType(.{ .len = unrefined_ty.vectorLen(mod), .child = refined_scalar_ty.toIntern(), }) else refined_scalar_ty; @@ -25291,15 +25530,16 @@ fn analyzeMinMax( } fn upgradeToArrayPtr(sema: *Sema, block: *Block, ptr: Air.Inst.Ref, len: u64) !Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const ptr_ty = sema.typeOf(ptr); const info = ptr_ty.ptrInfo(mod); if (info.flags.size == .One) { // Already an array pointer. return ptr; } - const new_ty = try mod.ptrTypeSema(.{ - .child = (try mod.arrayType(.{ + const new_ty = try pt.ptrTypeSema(.{ + .child = (try pt.arrayType(.{ .len = len, .sentinel = info.sentinel, .child = info.child, @@ -25331,8 +25571,9 @@ fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void const src_ty = sema.typeOf(src_ptr); const dest_len = try indexablePtrLenOrNone(sema, block, dest_src, dest_ptr); const src_len = try indexablePtrLenOrNone(sema, block, src_src, src_ptr); - const target = sema.mod.getTarget(); - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; + const target = mod.getTarget(); if (dest_ty.isConstPtr(mod)) { return sema.fail(block, dest_src, "cannot memcpy to constant pointer", .{}); @@ -25343,10 +25584,10 @@ fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void const msg = try sema.errMsg(src, "unknown @memcpy length", .{}); errdefer msg.destroy(sema.gpa); try sema.errNote(dest_src, msg, "destination type '{}' provides no length", .{ - dest_ty.fmt(sema.mod), + dest_ty.fmt(pt), }); try sema.errNote(src_src, msg, "source type '{}' provides no length", .{ - src_ty.fmt(sema.mod), + src_ty.fmt(pt), }); break :msg msg; }; @@ -25365,10 +25606,10 @@ fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void const msg = try sema.errMsg(src, "non-matching @memcpy lengths", .{}); errdefer msg.destroy(sema.gpa); try sema.errNote(dest_src, msg, "length {} here", .{ - dest_len_val.fmtValue(sema.mod, sema), + dest_len_val.fmtValue(pt, sema), }); try sema.errNote(src_src, msg, "length {} here", .{ - src_len_val.fmtValue(sema.mod, sema), + src_len_val.fmtValue(pt, sema), }); break :msg msg; }; @@ -25397,10 +25638,10 @@ fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void const runtime_src = if (try sema.resolveDefinedValue(block, dest_src, dest_ptr)) |dest_ptr_val| rs: { if (!sema.isComptimeMutablePtr(dest_ptr_val)) break :rs dest_src; if (try sema.resolveDefinedValue(block, src_src, src_ptr)) |_| { - const len_u64 = (try len_val.?.getUnsignedIntAdvanced(mod, .sema)).?; + const len_u64 = (try len_val.?.getUnsignedIntAdvanced(pt, .sema)).?; const len = try sema.usizeCast(block, dest_src, len_u64); for (0..len) |i| { - const elem_index = try mod.intRef(Type.usize, i); + const elem_index = try pt.intRef(Type.usize, i); const dest_elem_ptr = try sema.elemPtrOneLayerOnly( block, src, @@ -25456,7 +25697,7 @@ fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void var new_dest_ptr = dest_ptr; var new_src_ptr = src_ptr; if (len_val) |val| { - const len = try val.toUnsignedIntSema(mod); + const len = try val.toUnsignedIntSema(pt); if (len == 0) { // This AIR instruction guarantees length > 0 if it is comptime-known. return; @@ -25503,7 +25744,7 @@ fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void assert(dest_manyptr_ty_key.flags.size == .One); dest_manyptr_ty_key.child = dest_elem_ty.toIntern(); dest_manyptr_ty_key.flags.size = .Many; - break :ptr try sema.coerceCompatiblePtrs(block, try mod.ptrTypeSema(dest_manyptr_ty_key), new_dest_ptr, dest_src); + break :ptr try sema.coerceCompatiblePtrs(block, try pt.ptrTypeSema(dest_manyptr_ty_key), new_dest_ptr, dest_src); } else new_dest_ptr; const new_src_ptr_ty = sema.typeOf(new_src_ptr); @@ -25514,7 +25755,7 @@ fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void assert(src_manyptr_ty_key.flags.size == .One); src_manyptr_ty_key.child = src_elem_ty.toIntern(); src_manyptr_ty_key.flags.size = .Many; - break :ptr try sema.coerceCompatiblePtrs(block, try mod.ptrTypeSema(src_manyptr_ty_key), new_src_ptr, src_src); + break :ptr try sema.coerceCompatiblePtrs(block, try pt.ptrTypeSema(src_manyptr_ty_key), new_src_ptr, src_src); } else new_src_ptr; // ok1: dest >= src + len @@ -25537,7 +25778,8 @@ fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void } fn zirMemset(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const gpa = sema.gpa; const ip = &mod.intern_pool; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; @@ -25569,7 +25811,7 @@ fn zirMemset(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void const msg = try sema.errMsg(src, "unknown @memset length", .{}); errdefer msg.destroy(sema.gpa); try sema.errNote(dest_src, msg, "destination type '{}' provides no length", .{ - dest_ptr_ty.fmt(mod), + dest_ptr_ty.fmt(pt), }); break :msg msg; }); @@ -25581,7 +25823,7 @@ fn zirMemset(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void const ptr_val = try sema.resolveDefinedValue(block, dest_src, dest_ptr) orelse break :rs dest_src; const len_air_ref = try sema.fieldVal(block, src, dest_ptr, try ip.getOrPutString(gpa, "len", .no_embedded_nulls), dest_src); const len_val = (try sema.resolveDefinedValue(block, dest_src, len_air_ref)) orelse break :rs dest_src; - const len_u64 = (try len_val.getUnsignedIntAdvanced(mod, .sema)).?; + const len_u64 = (try len_val.getUnsignedIntAdvanced(pt, .sema)).?; const len = try sema.usizeCast(block, dest_src, len_u64); if (len == 0) { // This AIR instruction guarantees length > 0 if it is comptime-known. @@ -25590,22 +25832,22 @@ fn zirMemset(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void if (!sema.isComptimeMutablePtr(ptr_val)) break :rs dest_src; const elem_val = try sema.resolveValue(elem) orelse break :rs value_src; - const array_ty = try mod.arrayType(.{ + const array_ty = try pt.arrayType(.{ .child = dest_elem_ty.toIntern(), .len = len_u64, }); - const array_val = Value.fromInterned((try mod.intern(.{ .aggregate = .{ + const array_val = Value.fromInterned(try pt.intern(.{ .aggregate = .{ .ty = array_ty.toIntern(), .storage = .{ .repeated_elem = elem_val.toIntern() }, - } }))); + } })); const array_ptr_ty = ty: { var info = dest_ptr_ty.ptrInfo(mod); info.flags.size = .One; info.child = array_ty.toIntern(); - break :ty try mod.ptrType(info); + break :ty try pt.ptrType(info); }; const raw_ptr_val = if (dest_ptr_ty.isSlice(mod)) ptr_val.slicePtr(mod) else ptr_val; - const array_ptr_val = try mod.getCoerced(raw_ptr_val, array_ptr_ty); + const array_ptr_val = try pt.getCoerced(raw_ptr_val, array_ptr_ty); return sema.storePtrVal(block, src, array_ptr_val, array_val, array_ty); }; @@ -25658,7 +25900,8 @@ fn zirVarExtended( block: *Block, extended: Zir.Inst.Extended.InstData, ) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const extra = sema.code.extraData(Zir.Inst.ExtendedVar, extended.operand); const ty_src = block.src(.{ .node_offset_var_decl_ty = 0 }); const init_src = block.src(.{ .node_offset_var_decl_init = 0 }); @@ -25705,7 +25948,7 @@ fn zirVarExtended( try sema.validateVarType(block, ty_src, var_ty, small.is_extern); - return Air.internedToRef((try mod.intern(.{ .variable = .{ + return Air.internedToRef((try pt.intern(.{ .variable = .{ .ty = var_ty.toIntern(), .init = init_val, .decl = sema.owner_decl_index, @@ -25721,7 +25964,8 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A const tracy = trace(@src()); defer tracy.end(); - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const extra = sema.code.extraData(Zir.Inst.FuncFancy, inst_data.payload_index); const target = mod.getTarget(); @@ -25761,7 +26005,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A if (val.isGenericPoison()) { break :blk null; } - const alignment = try sema.validateAlignAllowZero(block, align_src, try val.toUnsignedIntSema(mod)); + const alignment = try sema.validateAlignAllowZero(block, align_src, try val.toUnsignedIntSema(pt)); const default = target_util.defaultFunctionAlignment(target); break :blk if (alignment == default) .none else alignment; } else if (extra.data.bits.has_align_ref) blk: { @@ -25781,7 +26025,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A error.GenericPoison => break :blk null, else => |e| return e, }; - const alignment = try sema.validateAlignAllowZero(block, align_src, try align_val.toUnsignedIntSema(mod)); + const alignment = try sema.validateAlignAllowZero(block, align_src, try align_val.toUnsignedIntSema(pt)); const default = target_util.defaultFunctionAlignment(target); break :blk if (alignment == default) .none else alignment; } else .none; @@ -25857,7 +26101,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A const body = sema.code.bodySlice(extra_index, body_len); extra_index += body.len; - const cc_ty = try mod.getBuiltinType("CallingConvention"); + const cc_ty = try pt.getBuiltinType("CallingConvention"); const val = try sema.resolveGenericBody(block, cc_src, body, inst, cc_ty, .{ .needed_comptime_reason = "calling convention must be comptime-known", }); @@ -25986,7 +26230,8 @@ fn zirCDefine( block: *Block, extended: Zir.Inst.Extended.InstData, ) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data; const name_src = block.builtinCallArgSrc(extra.node, 0); const val_src = block.builtinCallArgSrc(extra.node, 1); @@ -26014,7 +26259,7 @@ fn zirWasmMemorySize( const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data; const index_src = block.builtinCallArgSrc(extra.node, 0); const builtin_src = block.nodeOffset(extra.node); - const target = sema.mod.getTarget(); + const target = sema.pt.zcu.getTarget(); if (!target.isWasm()) { return sema.fail(block, builtin_src, "builtin @wasmMemorySize is available when targeting WebAssembly; targeted CPU architecture is {s}", .{@tagName(target.cpu.arch)}); } @@ -26041,7 +26286,7 @@ fn zirWasmMemoryGrow( const builtin_src = block.nodeOffset(extra.node); const index_src = block.builtinCallArgSrc(extra.node, 0); const delta_src = block.builtinCallArgSrc(extra.node, 1); - const target = sema.mod.getTarget(); + const target = sema.pt.zcu.getTarget(); if (!target.isWasm()) { return sema.fail(block, builtin_src, "builtin @wasmMemoryGrow is available when targeting WebAssembly; targeted CPU architecture is {s}", .{@tagName(target.cpu.arch)}); } @@ -26067,10 +26312,11 @@ fn resolvePrefetchOptions( src: LazySrcLoc, zir_ref: Zir.Inst.Ref, ) CompileError!std.builtin.PrefetchOptions { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const gpa = sema.gpa; const ip = &mod.intern_pool; - const options_ty = try mod.getBuiltinType("PrefetchOptions"); + const options_ty = try pt.getBuiltinType("PrefetchOptions"); const options = try sema.coerce(block, options_ty, try sema.resolveInst(zir_ref), src); const rw_src = block.src(.{ .init_field_rw = src.offset.node_offset_builtin_call_arg.builtin_call_node }); @@ -26094,7 +26340,7 @@ fn resolvePrefetchOptions( return std.builtin.PrefetchOptions{ .rw = mod.toEnum(std.builtin.PrefetchOptions.Rw, rw_val), - .locality = @intCast(try locality_val.toUnsignedIntSema(mod)), + .locality = @intCast(try locality_val.toUnsignedIntSema(pt)), .cache = mod.toEnum(std.builtin.PrefetchOptions.Cache, cache_val), }; } @@ -26138,11 +26384,12 @@ fn resolveExternOptions( linkage: std.builtin.GlobalLinkage = .strong, is_thread_local: bool = false, } { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const gpa = sema.gpa; const ip = &mod.intern_pool; const options_inst = try sema.resolveInst(zir_ref); - const extern_options_ty = try mod.getBuiltinType("ExternOptions"); + const extern_options_ty = try pt.getBuiltinType("ExternOptions"); const options = try sema.coerce(block, extern_options_ty, options_inst, src); const name_src = block.src(.{ .init_field_name = src.offset.node_offset_builtin_call_arg.builtin_call_node }); @@ -26203,7 +26450,8 @@ fn zirBuiltinExtern( block: *Block, extended: Zir.Inst.Extended.InstData, ) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const ip = &mod.intern_pool; const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data; const ty_src = block.builtinCallArgSrc(extra.node, 0); @@ -26215,7 +26463,7 @@ fn zirBuiltinExtern( } if (!try sema.validateExternType(ty, .other)) { const msg = msg: { - const msg = try sema.errMsg(ty_src, "extern symbol cannot have type '{}'", .{ty.fmt(mod)}); + const msg = try sema.errMsg(ty_src, "extern symbol cannot have type '{}'", .{ty.fmt(pt)}); errdefer msg.destroy(sema.gpa); try sema.explainWhyTypeIsNotExtern(msg, ty_src, ty, .other); break :msg msg; @@ -26226,7 +26474,7 @@ fn zirBuiltinExtern( const options = try sema.resolveExternOptions(block, options_src, extra.rhs); if (options.linkage == .weak and !ty.ptrAllowsZero(mod)) { - ty = try mod.optionalType(ty.toIntern()); + ty = try pt.optionalType(ty.toIntern()); } const ptr_info = ty.ptrInfo(mod); @@ -26237,13 +26485,13 @@ fn zirBuiltinExtern( new_decl_index, Value.fromInterned( if (Type.fromInterned(ptr_info.child).zigTypeTag(mod) == .Fn) - try ip.getExternFunc(sema.gpa, .{ + try ip.getExternFunc(sema.gpa, pt.tid, .{ .ty = ptr_info.child, .decl = new_decl_index, .lib_name = options.library_name, }) else - try mod.intern(.{ .variable = .{ + try pt.intern(.{ .variable = .{ .ty = ptr_info.child, .init = .none, .decl = new_decl_index, @@ -26259,9 +26507,9 @@ fn zirBuiltinExtern( new_decl.owns_tv = true; // Note that this will queue the anon decl for codegen, so that the backend can // correctly handle the extern, including duplicate detection. - try mod.finalizeAnonDecl(new_decl_index); + try pt.finalizeAnonDecl(new_decl_index); - return Air.internedToRef((try mod.getCoerced(Value.fromInterned((try mod.intern(.{ .ptr = .{ + return Air.internedToRef((try pt.getCoerced(Value.fromInterned(try pt.intern(.{ .ptr = .{ .ty = switch (ip.indexToKey(ty.toIntern())) { .ptr_type => ty.toIntern(), .opt_type => |child_type| child_type, @@ -26269,7 +26517,7 @@ fn zirBuiltinExtern( }, .base_addr = .{ .decl = new_decl_index }, .byte_offset = 0, - } }))), ty)).toIntern()); + } })), ty)).toIntern()); } fn zirWorkItem( @@ -26281,7 +26529,7 @@ fn zirWorkItem( const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data; const dimension_src = block.builtinCallArgSrc(extra.node, 0); const builtin_src = block.nodeOffset(extra.node); - const target = sema.mod.getTarget(); + const target = sema.pt.zcu.getTarget(); switch (target.cpu.arch) { // TODO: Allow for other GPU targets. @@ -26344,11 +26592,12 @@ fn validateVarType( var_ty: Type, is_extern: bool, ) CompileError!void { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; if (is_extern) { if (!try sema.validateExternType(var_ty, .other)) { const msg = msg: { - const msg = try sema.errMsg(src, "extern variable cannot have type '{}'", .{var_ty.fmt(mod)}); + const msg = try sema.errMsg(src, "extern variable cannot have type '{}'", .{var_ty.fmt(pt)}); errdefer msg.destroy(sema.gpa); try sema.explainWhyTypeIsNotExtern(msg, src, var_ty, .other); break :msg msg; @@ -26361,7 +26610,7 @@ fn validateVarType( block, src, "non-extern variable with opaque type '{}'", - .{var_ty.fmt(mod)}, + .{var_ty.fmt(pt)}, ); } } @@ -26369,7 +26618,7 @@ fn validateVarType( if (!try sema.typeRequiresComptime(var_ty)) return; const msg = msg: { - const msg = try sema.errMsg(src, "variable of type '{}' must be const or comptime", .{var_ty.fmt(mod)}); + const msg = try sema.errMsg(src, "variable of type '{}' must be const or comptime", .{var_ty.fmt(pt)}); errdefer msg.destroy(sema.gpa); try sema.explainWhyTypeIsComptime(msg, src, var_ty); @@ -26393,7 +26642,7 @@ fn explainWhyTypeIsComptime( var type_set = TypeSet{}; defer type_set.deinit(sema.gpa); - try ty.resolveFully(sema.mod); + try ty.resolveFully(sema.pt); return sema.explainWhyTypeIsComptimeInner(msg, src_loc, ty, &type_set); } @@ -26404,7 +26653,8 @@ fn explainWhyTypeIsComptimeInner( ty: Type, type_set: *TypeSet, ) CompileError!void { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const ip = &mod.intern_pool; switch (ty.zigTypeTag(mod)) { .Bool, @@ -26418,9 +26668,7 @@ fn explainWhyTypeIsComptimeInner( => return, .Fn => { - try sema.errNote(src_loc, msg, "use '*const {}' for a function pointer type", .{ - ty.fmt(sema.mod), - }); + try sema.errNote(src_loc, msg, "use '*const {}' for a function pointer type", .{ty.fmt(pt)}); }, .Type => { @@ -26436,7 +26684,7 @@ fn explainWhyTypeIsComptimeInner( => return, .Opaque => { - try sema.errNote(src_loc, msg, "opaque type '{}' has undefined size", .{ty.fmt(sema.mod)}); + try sema.errNote(src_loc, msg, "opaque type '{}' has undefined size", .{ty.fmt(pt)}); }, .Array, .Vector => { @@ -26453,7 +26701,7 @@ fn explainWhyTypeIsComptimeInner( .Inline => try sema.errNote(src_loc, msg, "function has inline calling convention", .{}), else => {}, } - if (Type.fromInterned(fn_info.return_type).comptimeOnly(mod)) { + if (Type.fromInterned(fn_info.return_type).comptimeOnly(pt)) { try sema.errNote(src_loc, msg, "function has a comptime-only return type", .{}); } return; @@ -26526,7 +26774,8 @@ fn validateExternType( ty: Type, position: ExternPosition, ) !bool { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; switch (ty.zigTypeTag(mod)) { .Type, .ComptimeFloat, @@ -26557,7 +26806,7 @@ fn validateExternType( }, .Fn => { if (position != .other) return false; - const target = sema.mod.getTarget(); + const target = mod.getTarget(); // For now we want to authorize PTX kernel to use zig objects, even if we end up exposing the ABI. // The goal is to experiment with more integrated CPU/GPU code. if (ty.fnCallingConvention(mod) == .Kernel and (target.cpu.arch == .nvptx or target.cpu.arch == .nvptx64)) { @@ -26571,7 +26820,7 @@ fn validateExternType( .Struct, .Union => switch (ty.containerLayout(mod)) { .@"extern" => return true, .@"packed" => { - const bit_size = try ty.bitSizeAdvanced(mod, .sema); + const bit_size = try ty.bitSizeAdvanced(pt, .sema); switch (bit_size) { 0, 8, 16, 32, 64, 128 => return true, else => return false, @@ -26595,7 +26844,8 @@ fn explainWhyTypeIsNotExtern( ty: Type, position: ExternPosition, ) CompileError!void { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; switch (ty.zigTypeTag(mod)) { .Opaque, .Bool, @@ -26622,7 +26872,7 @@ fn explainWhyTypeIsNotExtern( if (!ty.isConstPtr(mod) and pointee_ty.zigTypeTag(mod) == .Fn) { try sema.errNote(src_loc, msg, "pointer to extern function must be 'const'", .{}); } else if (try sema.typeRequiresComptime(ty)) { - try sema.errNote(src_loc, msg, "pointer to comptime-only type '{}'", .{pointee_ty.fmt(sema.mod)}); + try sema.errNote(src_loc, msg, "pointer to comptime-only type '{}'", .{pointee_ty.fmt(pt)}); try sema.explainWhyTypeIsComptime(msg, src_loc, ty); } try sema.explainWhyTypeIsNotExtern(msg, src_loc, pointee_ty, .other); @@ -26650,7 +26900,7 @@ fn explainWhyTypeIsNotExtern( }, .Enum => { const tag_ty = ty.intTagType(mod); - try sema.errNote(src_loc, msg, "enum tag type '{}' is not extern compatible", .{tag_ty.fmt(sema.mod)}); + try sema.errNote(src_loc, msg, "enum tag type '{}' is not extern compatible", .{tag_ty.fmt(pt)}); try sema.explainWhyTypeIsNotExtern(msg, src_loc, tag_ty, position); }, .Struct => try sema.errNote(src_loc, msg, "only extern structs and ABI sized packed structs are extern compatible", .{}), @@ -26671,7 +26921,8 @@ fn explainWhyTypeIsNotExtern( /// Returns true if `ty` is allowed in packed types. /// Does not require `ty` to be resolved in any way, but may resolve whether it is comptime-only. fn validatePackedType(sema: *Sema, ty: Type) !bool { - const zcu = sema.mod; + const pt = sema.pt; + const zcu = pt.zcu; return switch (ty.zigTypeTag(zcu)) { .Type, .ComptimeFloat, @@ -26710,7 +26961,8 @@ fn explainWhyTypeIsNotPacked( src_loc: LazySrcLoc, ty: Type, ) CompileError!void { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; switch (ty.zigTypeTag(mod)) { .Void, .Bool, @@ -26750,10 +27002,11 @@ fn explainWhyTypeIsNotPacked( } fn prepareSimplePanic(sema: *Sema) !void { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; if (mod.panic_func_index == .none) { - const decl_index = (try mod.getBuiltinDecl("panic")); + const decl_index = (try pt.getBuiltinDecl("panic")); // decl_index may be an alias; we must find the decl that actually // owns the function. try sema.ensureDeclAnalyzed(decl_index); @@ -26766,17 +27019,17 @@ fn prepareSimplePanic(sema: *Sema) !void { } if (mod.null_stack_trace == .none) { - const stack_trace_ty = try mod.getBuiltinType("StackTrace"); - try stack_trace_ty.resolveFields(mod); + const stack_trace_ty = try pt.getBuiltinType("StackTrace"); + try stack_trace_ty.resolveFields(pt); const target = mod.getTarget(); - const ptr_stack_trace_ty = try mod.ptrTypeSema(.{ + const ptr_stack_trace_ty = try pt.ptrTypeSema(.{ .child = stack_trace_ty.toIntern(), .flags = .{ .address_space = target_util.defaultAddressSpace(target, .global_constant), }, }); - const opt_ptr_stack_trace_ty = try mod.optionalType(ptr_stack_trace_ty.toIntern()); - mod.null_stack_trace = try mod.intern(.{ .opt = .{ + const opt_ptr_stack_trace_ty = try pt.optionalType(ptr_stack_trace_ty.toIntern()); + mod.null_stack_trace = try pt.intern(.{ .opt = .{ .ty = opt_ptr_stack_trace_ty.toIntern(), .val = .none, } }); @@ -26787,13 +27040,14 @@ fn prepareSimplePanic(sema: *Sema) !void { /// instructions. This function ensures the panic function will be available to /// be called during that time. fn preparePanicId(sema: *Sema, block: *Block, panic_id: Module.PanicId) !InternPool.DeclIndex { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const gpa = sema.gpa; if (mod.panic_messages[@intFromEnum(panic_id)].unwrap()) |x| return x; try sema.prepareSimplePanic(); - const panic_messages_ty = try mod.getBuiltinType("panic_messages"); + const panic_messages_ty = try pt.getBuiltinType("panic_messages"); const msg_decl_index = (sema.namespaceLookup( block, LazySrcLoc.unneeded, @@ -26892,7 +27146,8 @@ fn addSafetyCheckExtra( } fn panicWithMsg(sema: *Sema, block: *Block, src: LazySrcLoc, msg_inst: Air.Inst.Ref, operation: CallOperation) !void { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; if (!mod.backendSupportsFeature(.panic_fn)) { _ = try block.addNoOp(.trap); @@ -26905,8 +27160,8 @@ fn panicWithMsg(sema: *Sema, block: *Block, src: LazySrcLoc, msg_inst: Air.Inst. const panic_fn = try sema.analyzeDeclVal(block, src, panic_func.owner_decl); const null_stack_trace = Air.internedToRef(mod.null_stack_trace); - const opt_usize_ty = try mod.optionalType(.usize_type); - const null_ret_addr = Air.internedToRef((try mod.intern(.{ .opt = .{ + const opt_usize_ty = try pt.optionalType(.usize_type); + const null_ret_addr = Air.internedToRef((try pt.intern(.{ .opt = .{ .ty = opt_usize_ty.toIntern(), .val = .none, } }))); @@ -26921,9 +27176,10 @@ fn panicUnwrapError( unwrap_err_tag: Air.Inst.Tag, is_non_err_tag: Air.Inst.Tag, ) !void { + const pt = sema.pt; assert(!parent_block.is_comptime); const ok = try parent_block.addUnOp(is_non_err_tag, operand); - if (!sema.mod.comp.formatted_panics) { + if (!pt.zcu.comp.formatted_panics) { return sema.addSafetyCheck(parent_block, src, ok, .unwrap_error); } const gpa = sema.gpa; @@ -26942,10 +27198,10 @@ fn panicUnwrapError( defer fail_block.instructions.deinit(gpa); { - if (!sema.mod.backendSupportsFeature(.panic_unwrap_error)) { + if (!pt.zcu.backendSupportsFeature(.panic_unwrap_error)) { _ = try fail_block.addNoOp(.trap); } else { - const panic_fn = try sema.mod.getBuiltin("panicUnwrapError"); + const panic_fn = try sema.pt.getBuiltin("panicUnwrapError"); const err = try fail_block.addTyOp(unwrap_err_tag, Type.anyerror, operand); const err_return_trace = try sema.getErrorReturnTrace(&fail_block); const args: [2]Air.Inst.Ref = .{ err_return_trace, err }; @@ -26965,7 +27221,7 @@ fn panicIndexOutOfBounds( ) !void { assert(!parent_block.is_comptime); const ok = try parent_block.addBinOp(cmp_op, index, len); - if (!sema.mod.comp.formatted_panics) { + if (!sema.pt.zcu.comp.formatted_panics) { return sema.addSafetyCheck(parent_block, src, ok, .index_out_of_bounds); } try sema.safetyCheckFormatted(parent_block, src, ok, "panicOutOfBounds", &.{ index, len }); @@ -26980,7 +27236,7 @@ fn panicInactiveUnionField( ) !void { assert(!parent_block.is_comptime); const ok = try parent_block.addBinOp(.cmp_eq, active_tag, wanted_tag); - if (!sema.mod.comp.formatted_panics) { + if (!sema.pt.zcu.comp.formatted_panics) { return sema.addSafetyCheck(parent_block, src, ok, .inactive_union_field); } try sema.safetyCheckFormatted(parent_block, src, ok, "panicInactiveUnionField", &.{ active_tag, wanted_tag }); @@ -26996,7 +27252,8 @@ fn panicSentinelMismatch( sentinel_index: Air.Inst.Ref, ) !void { assert(!parent_block.is_comptime); - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const expected_sentinel_val = maybe_sentinel orelse return; const expected_sentinel = Air.internedToRef(expected_sentinel_val.toIntern()); @@ -27004,7 +27261,7 @@ fn panicSentinelMismatch( const actual_sentinel = if (ptr_ty.isSlice(mod)) try parent_block.addBinOp(.slice_elem_val, ptr, sentinel_index) else blk: { - const elem_ptr_ty = try ptr_ty.elemPtrType(null, mod); + const elem_ptr_ty = try ptr_ty.elemPtrType(null, pt); const sentinel_ptr = try parent_block.addPtrElemPtr(ptr, sentinel_index, elem_ptr_ty); break :blk try parent_block.addTyOp(.load, sentinel_ty, sentinel_ptr); }; @@ -27022,13 +27279,13 @@ fn panicSentinelMismatch( } else if (sentinel_ty.isSelfComparable(mod, true)) try parent_block.addBinOp(.cmp_eq, expected_sentinel, actual_sentinel) else { - const panic_fn = try mod.getBuiltin("checkNonScalarSentinel"); + const panic_fn = try pt.getBuiltin("checkNonScalarSentinel"); const args: [2]Air.Inst.Ref = .{ expected_sentinel, actual_sentinel }; try sema.callBuiltin(parent_block, src, panic_fn, .auto, &args, .@"safety check"); return; }; - if (!sema.mod.comp.formatted_panics) { + if (!pt.zcu.comp.formatted_panics) { return sema.addSafetyCheck(parent_block, src, ok, .sentinel_mismatch); } try sema.safetyCheckFormatted(parent_block, src, ok, "panicSentinelMismatch", &.{ expected_sentinel, actual_sentinel }); @@ -27042,7 +27299,9 @@ fn safetyCheckFormatted( func: []const u8, args: []const Air.Inst.Ref, ) CompileError!void { - assert(sema.mod.comp.formatted_panics); + const pt = sema.pt; + const zcu = pt.zcu; + assert(zcu.comp.formatted_panics); const gpa = sema.gpa; var fail_block: Block = .{ @@ -27058,10 +27317,10 @@ fn safetyCheckFormatted( defer fail_block.instructions.deinit(gpa); - if (!sema.mod.backendSupportsFeature(.safety_check_formatted)) { + if (!zcu.backendSupportsFeature(.safety_check_formatted)) { _ = try fail_block.addNoOp(.trap); } else { - const panic_fn = try sema.mod.getBuiltin(func); + const panic_fn = try pt.getBuiltin(func); try sema.callBuiltin(&fail_block, src, panic_fn, .auto, args, .@"safety check"); } try sema.addSafetyCheckExtra(parent_block, ok, &fail_block); @@ -27102,7 +27361,8 @@ fn fieldVal( // When editing this function, note that there is corresponding logic to be edited // in `fieldPtr`. This function takes a value and returns a value. - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const ip = &mod.intern_pool; const object_src = src; // TODO better source location const object_ty = sema.typeOf(object); @@ -27120,10 +27380,10 @@ fn fieldVal( switch (inner_ty.zigTypeTag(mod)) { .Array => { if (field_name.eqlSlice("len", ip)) { - return Air.internedToRef((try mod.intValue(Type.usize, inner_ty.arrayLen(mod))).toIntern()); + return Air.internedToRef((try pt.intValue(Type.usize, inner_ty.arrayLen(mod))).toIntern()); } else if (field_name.eqlSlice("ptr", ip) and is_pointer_to) { const ptr_info = object_ty.ptrInfo(mod); - const result_ty = try mod.ptrTypeSema(.{ + const result_ty = try pt.ptrTypeSema(.{ .child = Type.fromInterned(ptr_info.child).childType(mod).toIntern(), .sentinel = if (inner_ty.sentinel(mod)) |s| s.toIntern() else .none, .flags = .{ @@ -27143,7 +27403,7 @@ fn fieldVal( block, field_name_src, "no member named '{}' in '{}'", - .{ field_name.fmt(ip), object_ty.fmt(mod) }, + .{ field_name.fmt(ip), object_ty.fmt(pt) }, ); } }, @@ -27167,7 +27427,7 @@ fn fieldVal( block, field_name_src, "no member named '{}' in '{}'", - .{ field_name.fmt(ip), object_ty.fmt(mod) }, + .{ field_name.fmt(ip), object_ty.fmt(pt) }, ); } } @@ -27194,7 +27454,7 @@ fn fieldVal( .error_set_type => |error_set_type| blk: { if (error_set_type.nameIndex(ip, field_name) != null) break :blk; return sema.fail(block, src, "no error named '{}' in '{}'", .{ - field_name.fmt(ip), child_type.fmt(mod), + field_name.fmt(ip), child_type.fmt(pt), }); }, .inferred_error_set_type => { @@ -27210,8 +27470,8 @@ fn fieldVal( const error_set_type = if (!child_type.isAnyError(mod)) child_type else - try mod.singleErrorSetType(field_name); - return Air.internedToRef((try mod.intern(.{ .err = .{ + try pt.singleErrorSetType(field_name); + return Air.internedToRef((try pt.intern(.{ .err = .{ .ty = error_set_type.toIntern(), .name = field_name, } }))); @@ -27220,11 +27480,11 @@ fn fieldVal( if (try sema.namespaceLookupVal(block, src, child_type.getNamespaceIndex(mod), field_name)) |inst| { return inst; } - try child_type.resolveFields(mod); + try child_type.resolveFields(pt); if (child_type.unionTagType(mod)) |enum_ty| { if (enum_ty.enumFieldIndex(field_name, mod)) |field_index_usize| { const field_index: u32 = @intCast(field_index_usize); - return Air.internedToRef((try mod.enumValueFieldIndex(enum_ty, field_index)).toIntern()); + return Air.internedToRef((try pt.enumValueFieldIndex(enum_ty, field_index)).toIntern()); } } return sema.failWithBadMemberAccess(block, child_type, field_name_src, field_name); @@ -27236,7 +27496,7 @@ fn fieldVal( const field_index_usize = child_type.enumFieldIndex(field_name, mod) orelse return sema.failWithBadMemberAccess(block, child_type, field_name_src, field_name); const field_index: u32 = @intCast(field_index_usize); - const enum_val = try mod.enumValueFieldIndex(child_type, field_index); + const enum_val = try pt.enumValueFieldIndex(child_type, field_index); return Air.internedToRef(enum_val.toIntern()); }, .Struct, .Opaque => { @@ -27247,7 +27507,7 @@ fn fieldVal( }, else => { const msg = msg: { - const msg = try sema.errMsg(src, "type '{}' has no members", .{child_type.fmt(mod)}); + const msg = try sema.errMsg(src, "type '{}' has no members", .{child_type.fmt(pt)}); errdefer msg.destroy(sema.gpa); if (child_type.isSlice(mod)) try sema.errNote(src, msg, "slice values have 'len' and 'ptr' members", .{}); if (child_type.zigTypeTag(mod) == .Array) try sema.errNote(src, msg, "array values have 'len' member", .{}); @@ -27288,13 +27548,14 @@ fn fieldPtr( // When editing this function, note that there is corresponding logic to be edited // in `fieldVal`. This function takes a pointer and returns a pointer. - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const ip = &mod.intern_pool; const object_ptr_src = src; // TODO better source location const object_ptr_ty = sema.typeOf(object_ptr); const object_ty = switch (object_ptr_ty.zigTypeTag(mod)) { .Pointer => object_ptr_ty.childType(mod), - else => return sema.fail(block, object_ptr_src, "expected pointer, found '{}'", .{object_ptr_ty.fmt(mod)}), + else => return sema.fail(block, object_ptr_src, "expected pointer, found '{}'", .{object_ptr_ty.fmt(pt)}), }; // Zig allows dereferencing a single pointer during field lookup. Note that @@ -27310,11 +27571,11 @@ fn fieldPtr( switch (inner_ty.zigTypeTag(mod)) { .Array => { if (field_name.eqlSlice("len", ip)) { - const int_val = try mod.intValue(Type.usize, inner_ty.arrayLen(mod)); + const int_val = try pt.intValue(Type.usize, inner_ty.arrayLen(mod)); return anonDeclRef(sema, int_val.toIntern()); } else if (field_name.eqlSlice("ptr", ip) and is_pointer_to) { const ptr_info = object_ty.ptrInfo(mod); - const new_ptr_ty = try mod.ptrTypeSema(.{ + const new_ptr_ty = try pt.ptrTypeSema(.{ .child = Type.fromInterned(ptr_info.child).childType(mod).toIntern(), .sentinel = if (object_ty.sentinel(mod)) |s| s.toIntern() else .none, .flags = .{ @@ -27329,7 +27590,7 @@ fn fieldPtr( .packed_offset = ptr_info.packed_offset, }); const ptr_ptr_info = object_ptr_ty.ptrInfo(mod); - const result_ty = try mod.ptrTypeSema(.{ + const result_ty = try pt.ptrTypeSema(.{ .child = new_ptr_ty.toIntern(), .sentinel = if (object_ptr_ty.sentinel(mod)) |s| s.toIntern() else .none, .flags = .{ @@ -27348,7 +27609,7 @@ fn fieldPtr( block, field_name_src, "no member named '{}' in '{}'", - .{ field_name.fmt(ip), object_ty.fmt(mod) }, + .{ field_name.fmt(ip), object_ty.fmt(pt) }, ); } }, @@ -27363,7 +27624,7 @@ fn fieldPtr( if (field_name.eqlSlice("ptr", ip)) { const slice_ptr_ty = inner_ty.slicePtrFieldType(mod); - const result_ty = try mod.ptrTypeSema(.{ + const result_ty = try pt.ptrTypeSema(.{ .child = slice_ptr_ty.toIntern(), .flags = .{ .is_const = !attr_ptr_ty.ptrIsMutable(mod), @@ -27373,7 +27634,7 @@ fn fieldPtr( }); if (try sema.resolveDefinedValue(block, object_ptr_src, inner_ptr)) |val| { - return Air.internedToRef((try val.ptrField(Value.slice_ptr_index, mod)).toIntern()); + return Air.internedToRef((try val.ptrField(Value.slice_ptr_index, pt)).toIntern()); } try sema.requireRuntimeBlock(block, src, null); @@ -27381,7 +27642,7 @@ fn fieldPtr( try sema.checkKnownAllocPtr(block, inner_ptr, field_ptr); return field_ptr; } else if (field_name.eqlSlice("len", ip)) { - const result_ty = try mod.ptrTypeSema(.{ + const result_ty = try pt.ptrTypeSema(.{ .child = .usize_type, .flags = .{ .is_const = !attr_ptr_ty.ptrIsMutable(mod), @@ -27391,7 +27652,7 @@ fn fieldPtr( }); if (try sema.resolveDefinedValue(block, object_ptr_src, inner_ptr)) |val| { - return Air.internedToRef((try val.ptrField(Value.slice_len_index, mod)).toIntern()); + return Air.internedToRef((try val.ptrField(Value.slice_len_index, pt)).toIntern()); } try sema.requireRuntimeBlock(block, src, null); @@ -27403,7 +27664,7 @@ fn fieldPtr( block, field_name_src, "no member named '{}' in '{}'", - .{ field_name.fmt(ip), object_ty.fmt(mod) }, + .{ field_name.fmt(ip), object_ty.fmt(pt) }, ); } }, @@ -27433,7 +27694,7 @@ fn fieldPtr( break :blk; } return sema.fail(block, src, "no error named '{}' in '{}'", .{ - field_name.fmt(ip), child_type.fmt(mod), + field_name.fmt(ip), child_type.fmt(pt), }); }, .inferred_error_set_type => { @@ -27449,8 +27710,8 @@ fn fieldPtr( const error_set_type = if (!child_type.isAnyError(mod)) child_type else - try mod.singleErrorSetType(field_name); - return anonDeclRef(sema, try mod.intern(.{ .err = .{ + try pt.singleErrorSetType(field_name); + return anonDeclRef(sema, try pt.intern(.{ .err = .{ .ty = error_set_type.toIntern(), .name = field_name, } })); @@ -27459,11 +27720,11 @@ fn fieldPtr( if (try sema.namespaceLookupRef(block, src, child_type.getNamespaceIndex(mod), field_name)) |inst| { return inst; } - try child_type.resolveFields(mod); + try child_type.resolveFields(pt); if (child_type.unionTagType(mod)) |enum_ty| { if (enum_ty.enumFieldIndex(field_name, mod)) |field_index| { const field_index_u32: u32 = @intCast(field_index); - const idx_val = try mod.enumValueFieldIndex(enum_ty, field_index_u32); + const idx_val = try pt.enumValueFieldIndex(enum_ty, field_index_u32); return anonDeclRef(sema, idx_val.toIntern()); } } @@ -27477,7 +27738,7 @@ fn fieldPtr( return sema.failWithBadMemberAccess(block, child_type, field_name_src, field_name); }; const field_index_u32: u32 = @intCast(field_index); - const idx_val = try mod.enumValueFieldIndex(child_type, field_index_u32); + const idx_val = try pt.enumValueFieldIndex(child_type, field_index_u32); return anonDeclRef(sema, idx_val.toIntern()); }, .Struct, .Opaque => { @@ -27486,7 +27747,7 @@ fn fieldPtr( } return sema.failWithBadMemberAccess(block, child_type, field_name_src, field_name); }, - else => return sema.fail(block, src, "type '{}' has no members", .{child_type.fmt(mod)}), + else => return sema.fail(block, src, "type '{}' has no members", .{child_type.fmt(pt)}), } }, .Struct => { @@ -27533,14 +27794,15 @@ fn fieldCallBind( // When editing this function, note that there is corresponding logic to be edited // in `fieldVal`. This function takes a pointer and returns a pointer. - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const ip = &mod.intern_pool; const raw_ptr_src = src; // TODO better source location const raw_ptr_ty = sema.typeOf(raw_ptr); const inner_ty = if (raw_ptr_ty.zigTypeTag(mod) == .Pointer and (raw_ptr_ty.ptrSize(mod) == .One or raw_ptr_ty.ptrSize(mod) == .C)) raw_ptr_ty.childType(mod) else - return sema.fail(block, raw_ptr_src, "expected single pointer, found '{}'", .{raw_ptr_ty.fmt(mod)}); + return sema.fail(block, raw_ptr_src, "expected single pointer, found '{}'", .{raw_ptr_ty.fmt(pt)}); // Optionally dereference a second pointer to get the concrete type. const is_double_ptr = inner_ty.zigTypeTag(mod) == .Pointer and inner_ty.ptrSize(mod) == .One; @@ -27554,7 +27816,7 @@ fn fieldCallBind( find_field: { switch (concrete_ty.zigTypeTag(mod)) { .Struct => { - try concrete_ty.resolveFields(mod); + try concrete_ty.resolveFields(pt); if (mod.typeToStruct(concrete_ty)) |struct_type| { const field_index = struct_type.nameIndex(ip, field_name) orelse break :find_field; @@ -27563,7 +27825,7 @@ fn fieldCallBind( return sema.finishFieldCallBind(block, src, ptr_ty, field_ty, field_index, object_ptr); } else if (concrete_ty.isTuple(mod)) { if (field_name.eqlSlice("len", ip)) { - return .{ .direct = try mod.intRef(Type.usize, concrete_ty.structFieldCount(mod)) }; + return .{ .direct = try pt.intRef(Type.usize, concrete_ty.structFieldCount(mod)) }; } if (field_name.toUnsigned(ip)) |field_index| { if (field_index >= concrete_ty.structFieldCount(mod)) break :find_field; @@ -27580,7 +27842,7 @@ fn fieldCallBind( } }, .Union => { - try concrete_ty.resolveFields(mod); + try concrete_ty.resolveFields(pt); const union_obj = mod.typeToUnion(concrete_ty).?; _ = union_obj.loadTagType(ip).nameIndex(ip, field_name) orelse break :find_field; const field_ptr = try unionFieldPtr(sema, block, src, object_ptr, field_name, field_name_src, concrete_ty, false); @@ -27661,7 +27923,7 @@ fn fieldCallBind( const msg = msg: { const msg = try sema.errMsg(src, "no field or member function named '{}' in '{}'", .{ field_name.fmt(ip), - concrete_ty.fmt(mod), + concrete_ty.fmt(pt), }); errdefer msg.destroy(sema.gpa); try sema.addDeclaredHereNote(msg, concrete_ty); @@ -27689,8 +27951,9 @@ fn finishFieldCallBind( field_index: u32, object_ptr: Air.Inst.Ref, ) CompileError!ResolvedFieldCallee { - const mod = sema.mod; - const ptr_field_ty = try mod.ptrTypeSema(.{ + const pt = sema.pt; + const mod = pt.zcu; + const ptr_field_ty = try pt.ptrTypeSema(.{ .child = field_ty.toIntern(), .flags = .{ .is_const = !ptr_ty.ptrIsMutable(mod), @@ -27701,14 +27964,14 @@ fn finishFieldCallBind( const container_ty = ptr_ty.childType(mod); if (container_ty.zigTypeTag(mod) == .Struct) { if (container_ty.structFieldIsComptime(field_index, mod)) { - try container_ty.resolveStructFieldInits(mod); - const default_val = (try container_ty.structFieldValueComptime(mod, field_index)).?; + try container_ty.resolveStructFieldInits(pt); + const default_val = (try container_ty.structFieldValueComptime(pt, field_index)).?; return .{ .direct = Air.internedToRef(default_val.toIntern()) }; } } if (try sema.resolveDefinedValue(block, src, object_ptr)) |struct_ptr_val| { - const ptr_val = try struct_ptr_val.ptrField(field_index, mod); + const ptr_val = try struct_ptr_val.ptrField(field_index, pt); const pointer = Air.internedToRef(ptr_val.toIntern()); return .{ .direct = try sema.analyzeLoad(block, src, pointer, src) }; } @@ -27725,7 +27988,8 @@ fn namespaceLookup( opt_namespace: InternPool.OptionalNamespaceIndex, decl_name: InternPool.NullTerminatedString, ) CompileError!?InternPool.DeclIndex { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const gpa = sema.gpa; if (try sema.lookupInNamespace(block, src, opt_namespace, decl_name, true)) |decl_index| { const decl = mod.declPtr(decl_index); @@ -27780,16 +28044,17 @@ fn structFieldPtr( struct_ty: Type, initializing: bool, ) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const ip = &mod.intern_pool; assert(struct_ty.zigTypeTag(mod) == .Struct); - try struct_ty.resolveFields(mod); - try struct_ty.resolveLayout(mod); + try struct_ty.resolveFields(pt); + try struct_ty.resolveLayout(pt); if (struct_ty.isTuple(mod)) { if (field_name.eqlSlice("len", ip)) { - const len_inst = try mod.intRef(Type.usize, struct_ty.structFieldCount(mod)); + const len_inst = try pt.intRef(Type.usize, struct_ty.structFieldCount(mod)); return sema.analyzeRef(block, src, len_inst); } const field_index = try sema.tupleFieldIndex(block, struct_ty, field_name, field_name_src); @@ -27817,14 +28082,15 @@ fn structFieldPtrByIndex( struct_ty: Type, initializing: bool, ) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const ip = &mod.intern_pool; if (struct_ty.isAnonStruct(mod)) { return sema.tupleFieldPtr(block, src, struct_ptr, field_src, field_index, initializing); } if (try sema.resolveDefinedValue(block, src, struct_ptr)) |struct_ptr_val| { - const val = try struct_ptr_val.ptrField(field_index, mod); + const val = try struct_ptr_val.ptrField(field_index, pt); return Air.internedToRef(val.toIntern()); } @@ -27848,7 +28114,7 @@ fn structFieldPtrByIndex( try sema.typeAbiAlignment(Type.fromInterned(struct_ptr_ty_info.child)); if (struct_type.layout == .@"packed") { - switch (struct_ty.packedStructFieldPtrInfo(struct_ptr_ty, field_index, mod)) { + switch (struct_ty.packedStructFieldPtrInfo(struct_ptr_ty, field_index, pt)) { .bit_ptr => |packed_offset| { ptr_ty_data.flags.alignment = parent_align; ptr_ty_data.packed_offset = packed_offset; @@ -27861,14 +28127,14 @@ fn structFieldPtrByIndex( // For extern structs, field alignment might be bigger than type's // natural alignment. Eg, in `extern struct { x: u32, y: u16 }` the // second field is aligned as u32. - const field_offset = struct_ty.structFieldOffset(field_index, mod); + const field_offset = struct_ty.structFieldOffset(field_index, pt); ptr_ty_data.flags.alignment = if (parent_align == .none) .none else @enumFromInt(@min(@intFromEnum(parent_align), @ctz(field_offset))); } else { // Our alignment is capped at the field alignment. - const field_align = try mod.structFieldAlignmentAdvanced( + const field_align = try pt.structFieldAlignmentAdvanced( struct_type.fieldAlign(ip, field_index), Type.fromInterned(field_ty), struct_type.layout, @@ -27880,11 +28146,11 @@ fn structFieldPtrByIndex( field_align.min(parent_align); } - const ptr_field_ty = try mod.ptrTypeSema(ptr_ty_data); + const ptr_field_ty = try pt.ptrTypeSema(ptr_ty_data); if (struct_type.fieldIsComptime(ip, field_index)) { - try struct_ty.resolveStructFieldInits(mod); - const val = try mod.intern(.{ .ptr = .{ + try struct_ty.resolveStructFieldInits(pt); + const val = try pt.intern(.{ .ptr = .{ .ty = ptr_field_ty.toIntern(), .base_addr = .{ .comptime_field = struct_type.field_inits.get(ip)[field_index] }, .byte_offset = 0, @@ -27905,11 +28171,12 @@ fn structFieldVal( field_name_src: LazySrcLoc, struct_ty: Type, ) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const ip = &mod.intern_pool; assert(struct_ty.zigTypeTag(mod) == .Struct); - try struct_ty.resolveFields(mod); + try struct_ty.resolveFields(pt); switch (ip.indexToKey(struct_ty.toIntern())) { .struct_type => { @@ -27920,7 +28187,7 @@ fn structFieldVal( const field_index = struct_type.nameIndex(ip, field_name) orelse return sema.failWithBadStructFieldAccess(block, struct_ty, struct_type, field_name_src, field_name); if (struct_type.fieldIsComptime(ip, field_index)) { - try struct_ty.resolveStructFieldInits(mod); + try struct_ty.resolveStructFieldInits(pt); return Air.internedToRef(struct_type.field_inits.get(ip)[field_index]); } @@ -27929,15 +28196,15 @@ fn structFieldVal( return Air.internedToRef(field_val.toIntern()); if (try sema.resolveValue(struct_byval)) |struct_val| { - if (struct_val.isUndef(mod)) return mod.undefRef(field_ty); + if (struct_val.isUndef(mod)) return pt.undefRef(field_ty); if ((try sema.typeHasOnePossibleValue(field_ty))) |opv| { return Air.internedToRef(opv.toIntern()); } - return Air.internedToRef((try struct_val.fieldValue(mod, field_index)).toIntern()); + return Air.internedToRef((try struct_val.fieldValue(pt, field_index)).toIntern()); } try sema.requireRuntimeBlock(block, src, null); - try field_ty.resolveLayout(mod); + try field_ty.resolveLayout(pt); return block.addStructFieldVal(struct_byval, field_index, field_ty); }, .anon_struct_type => |anon_struct| { @@ -27961,9 +28228,10 @@ fn tupleFieldVal( field_name_src: LazySrcLoc, tuple_ty: Type, ) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; if (field_name.eqlSlice("len", &mod.intern_pool)) { - return mod.intRef(Type.usize, tuple_ty.structFieldCount(mod)); + return pt.intRef(Type.usize, tuple_ty.structFieldCount(mod)); } const field_index = try sema.tupleFieldIndex(block, tuple_ty, field_name, field_name_src); return sema.tupleFieldValByIndex(block, src, tuple_byval, field_index, tuple_ty); @@ -27977,18 +28245,18 @@ fn tupleFieldIndex( field_name: InternPool.NullTerminatedString, field_name_src: LazySrcLoc, ) CompileError!u32 { - const mod = sema.mod; - const ip = &mod.intern_pool; + const pt = sema.pt; + const ip = &pt.zcu.intern_pool; assert(!field_name.eqlSlice("len", ip)); if (field_name.toUnsigned(ip)) |field_index| { - if (field_index < tuple_ty.structFieldCount(mod)) return field_index; + if (field_index < tuple_ty.structFieldCount(pt.zcu)) return field_index; return sema.fail(block, field_name_src, "index '{}' out of bounds of tuple '{}'", .{ - field_name.fmt(ip), tuple_ty.fmt(mod), + field_name.fmt(ip), tuple_ty.fmt(pt), }); } return sema.fail(block, field_name_src, "no field named '{}' in tuple '{}'", .{ - field_name.fmt(ip), tuple_ty.fmt(mod), + field_name.fmt(ip), tuple_ty.fmt(pt), }); } @@ -28000,12 +28268,13 @@ fn tupleFieldValByIndex( field_index: u32, tuple_ty: Type, ) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const field_ty = tuple_ty.structFieldType(field_index, mod); if (tuple_ty.structFieldIsComptime(field_index, mod)) - try tuple_ty.resolveStructFieldInits(mod); - if (try tuple_ty.structFieldValueComptime(mod, field_index)) |default_value| { + try tuple_ty.resolveStructFieldInits(pt); + if (try tuple_ty.structFieldValueComptime(pt, field_index)) |default_value| { return Air.internedToRef(default_value.toIntern()); } @@ -28014,9 +28283,9 @@ fn tupleFieldValByIndex( return Air.internedToRef(opv.toIntern()); } return switch (mod.intern_pool.indexToKey(tuple_val.toIntern())) { - .undef => mod.undefRef(field_ty), + .undef => pt.undefRef(field_ty), .aggregate => |aggregate| Air.internedToRef(switch (aggregate.storage) { - .bytes => |bytes| try mod.intValue(Type.u8, bytes.at(field_index, &mod.intern_pool)), + .bytes => |bytes| try pt.intValue(Type.u8, bytes.at(field_index, &mod.intern_pool)), .elems => |elems| Value.fromInterned(elems[field_index]), .repeated_elem => |elem| Value.fromInterned(elem), }.toIntern()), @@ -28025,7 +28294,7 @@ fn tupleFieldValByIndex( } try sema.requireRuntimeBlock(block, src, null); - try field_ty.resolveLayout(mod); + try field_ty.resolveLayout(pt); return block.addStructFieldVal(tuple_byval, field_index, field_ty); } @@ -28039,18 +28308,19 @@ fn unionFieldPtr( union_ty: Type, initializing: bool, ) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const ip = &mod.intern_pool; assert(union_ty.zigTypeTag(mod) == .Union); const union_ptr_ty = sema.typeOf(union_ptr); const union_ptr_info = union_ptr_ty.ptrInfo(mod); - try union_ty.resolveFields(mod); + try union_ty.resolveFields(pt); const union_obj = mod.typeToUnion(union_ty).?; const field_index = try sema.unionFieldIndex(block, union_ty, field_name, field_name_src); const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]); - const ptr_field_ty = try mod.ptrTypeSema(.{ + const ptr_field_ty = try pt.ptrTypeSema(.{ .child = field_ty.toIntern(), .flags = .{ .is_const = union_ptr_info.flags.is_const, @@ -28061,7 +28331,7 @@ fn unionFieldPtr( union_ptr_info.flags.alignment else try sema.typeAbiAlignment(union_ty); - const field_align = try mod.unionFieldNormalAlignmentAdvanced(union_obj, field_index, .sema); + const field_align = try pt.unionFieldNormalAlignmentAdvanced(union_obj, field_index, .sema); break :blk union_align.min(field_align); } else union_ptr_info.flags.alignment, }, @@ -28087,9 +28357,9 @@ fn unionFieldPtr( switch (union_obj.getLayout(ip)) { .auto => if (initializing) { // Store to the union to initialize the tag. - const field_tag = try mod.enumValueFieldIndex(Type.fromInterned(union_obj.enum_tag_ty), enum_field_index); + const field_tag = try pt.enumValueFieldIndex(Type.fromInterned(union_obj.enum_tag_ty), enum_field_index); const payload_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]); - const new_union_val = try mod.unionValue(union_ty, field_tag, try mod.undefValue(payload_ty)); + const new_union_val = try pt.unionValue(union_ty, field_tag, try pt.undefValue(payload_ty)); try sema.storePtrVal(block, src, union_ptr_val, new_union_val, union_ty); } else { const union_val = (try sema.pointerDeref(block, src, union_ptr_val, union_ptr_ty)) orelse @@ -28098,7 +28368,7 @@ fn unionFieldPtr( return sema.failWithUseOfUndef(block, src); } const un = ip.indexToKey(union_val.toIntern()).un; - const field_tag = try mod.enumValueFieldIndex(Type.fromInterned(union_obj.enum_tag_ty), enum_field_index); + const field_tag = try pt.enumValueFieldIndex(Type.fromInterned(union_obj.enum_tag_ty), enum_field_index); const tag_matches = un.tag == field_tag.toIntern(); if (!tag_matches) { const msg = msg: { @@ -28117,7 +28387,7 @@ fn unionFieldPtr( }, .@"packed", .@"extern" => {}, } - const field_ptr_val = try union_ptr_val.ptrField(field_index, mod); + const field_ptr_val = try union_ptr_val.ptrField(field_index, pt); return Air.internedToRef(field_ptr_val.toIntern()); } @@ -28125,7 +28395,7 @@ fn unionFieldPtr( if (!initializing and union_obj.getLayout(ip) == .auto and block.wantSafety() and union_ty.unionTagTypeSafety(mod) != null and union_obj.field_types.len > 1) { - const wanted_tag_val = try mod.enumValueFieldIndex(Type.fromInterned(union_obj.enum_tag_ty), enum_field_index); + const wanted_tag_val = try pt.enumValueFieldIndex(Type.fromInterned(union_obj.enum_tag_ty), enum_field_index); const wanted_tag = Air.internedToRef(wanted_tag_val.toIntern()); // TODO would it be better if get_union_tag supported pointers to unions? const union_val = try block.addTyOp(.load, union_ty, union_ptr); @@ -28148,21 +28418,22 @@ fn unionFieldVal( field_name_src: LazySrcLoc, union_ty: Type, ) CompileError!Air.Inst.Ref { - const zcu = sema.mod; + const pt = sema.pt; + const zcu = pt.zcu; const ip = &zcu.intern_pool; assert(union_ty.zigTypeTag(zcu) == .Union); - try union_ty.resolveFields(zcu); + try union_ty.resolveFields(pt); const union_obj = zcu.typeToUnion(union_ty).?; const field_index = try sema.unionFieldIndex(block, union_ty, field_name, field_name_src); const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]); const enum_field_index: u32 = @intCast(Type.fromInterned(union_obj.enum_tag_ty).enumFieldIndex(field_name, zcu).?); if (try sema.resolveValue(union_byval)) |union_val| { - if (union_val.isUndef(zcu)) return zcu.undefRef(field_ty); + if (union_val.isUndef(zcu)) return pt.undefRef(field_ty); const un = ip.indexToKey(union_val.toIntern()).un; - const field_tag = try zcu.enumValueFieldIndex(Type.fromInterned(union_obj.enum_tag_ty), enum_field_index); + const field_tag = try pt.enumValueFieldIndex(Type.fromInterned(union_obj.enum_tag_ty), enum_field_index); const tag_matches = un.tag == field_tag.toIntern(); switch (union_obj.getLayout(ip)) { .auto => { @@ -28191,7 +28462,7 @@ fn unionFieldVal( .@"packed" => if (tag_matches) { // Fast path - no need to use bitcast logic. return Air.internedToRef(un.val); - } else if (try sema.bitCastVal(union_val, field_ty, 0, try union_ty.bitSizeAdvanced(zcu, .sema), 0)) |field_val| { + } else if (try sema.bitCastVal(union_val, field_ty, 0, try union_ty.bitSizeAdvanced(pt, .sema), 0)) |field_val| { return Air.internedToRef(field_val.toIntern()); }, } @@ -28201,7 +28472,7 @@ fn unionFieldVal( if (union_obj.getLayout(ip) == .auto and block.wantSafety() and union_ty.unionTagTypeSafety(zcu) != null and union_obj.field_types.len > 1) { - const wanted_tag_val = try zcu.enumValueFieldIndex(Type.fromInterned(union_obj.enum_tag_ty), enum_field_index); + const wanted_tag_val = try pt.enumValueFieldIndex(Type.fromInterned(union_obj.enum_tag_ty), enum_field_index); const wanted_tag = Air.internedToRef(wanted_tag_val.toIntern()); const active_tag = try block.addTyOp(.get_union_tag, Type.fromInterned(union_obj.enum_tag_ty), union_byval); try sema.panicInactiveUnionField(block, src, active_tag, wanted_tag); @@ -28210,7 +28481,7 @@ fn unionFieldVal( _ = try block.addNoOp(.unreach); return .unreachable_value; } - try field_ty.resolveLayout(zcu); + try field_ty.resolveLayout(pt); return block.addStructFieldVal(union_byval, field_index, field_ty); } @@ -28224,13 +28495,14 @@ fn elemPtr( init: bool, oob_safety: bool, ) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const indexable_ptr_src = src; // TODO better source location const indexable_ptr_ty = sema.typeOf(indexable_ptr); const indexable_ty = switch (indexable_ptr_ty.zigTypeTag(mod)) { .Pointer => indexable_ptr_ty.childType(mod), - else => return sema.fail(block, indexable_ptr_src, "expected pointer, found '{}'", .{indexable_ptr_ty.fmt(mod)}), + else => return sema.fail(block, indexable_ptr_src, "expected pointer, found '{}'", .{indexable_ptr_ty.fmt(pt)}), }; try checkIndexable(sema, block, src, indexable_ty); @@ -28241,7 +28513,7 @@ fn elemPtr( const index_val = try sema.resolveConstDefinedValue(block, elem_index_src, elem_index, .{ .needed_comptime_reason = "tuple field access index must be comptime-known", }); - const index: u32 = @intCast(try index_val.toUnsignedIntSema(mod)); + const index: u32 = @intCast(try index_val.toUnsignedIntSema(pt)); break :blk try sema.tupleFieldPtr(block, src, indexable_ptr, elem_index_src, index, init); }, else => { @@ -28267,7 +28539,8 @@ fn elemPtrOneLayerOnly( ) CompileError!Air.Inst.Ref { const indexable_src = src; // TODO better source location const indexable_ty = sema.typeOf(indexable); - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; try checkIndexable(sema, block, src, indexable_ty); @@ -28279,11 +28552,11 @@ fn elemPtrOneLayerOnly( const runtime_src = rs: { const ptr_val = maybe_ptr_val orelse break :rs indexable_src; const index_val = maybe_index_val orelse break :rs elem_index_src; - const index: usize = @intCast(try index_val.toUnsignedIntSema(mod)); - const elem_ptr = try ptr_val.ptrElem(index, mod); + const index: usize = @intCast(try index_val.toUnsignedIntSema(pt)); + const elem_ptr = try ptr_val.ptrElem(index, pt); return Air.internedToRef(elem_ptr.toIntern()); }; - const result_ty = try indexable_ty.elemPtrType(null, mod); + const result_ty = try indexable_ty.elemPtrType(null, pt); try sema.requireRuntimeBlock(block, src, runtime_src); return block.addPtrElemPtr(indexable, elem_index, result_ty); @@ -28297,7 +28570,7 @@ fn elemPtrOneLayerOnly( const index_val = try sema.resolveConstDefinedValue(block, elem_index_src, elem_index, .{ .needed_comptime_reason = "tuple field access index must be comptime-known", }); - const index: u32 = @intCast(try index_val.toUnsignedIntSema(mod)); + const index: u32 = @intCast(try index_val.toUnsignedIntSema(pt)); break :blk try sema.tupleFieldPtr(block, indexable_src, indexable, elem_index_src, index, false); }, else => unreachable, // Guaranteed by checkIndexable @@ -28319,7 +28592,8 @@ fn elemVal( ) CompileError!Air.Inst.Ref { const indexable_src = src; // TODO better source location const indexable_ty = sema.typeOf(indexable); - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; try checkIndexable(sema, block, src, indexable_ty); @@ -28337,14 +28611,14 @@ fn elemVal( const runtime_src = rs: { const indexable_val = maybe_indexable_val orelse break :rs indexable_src; const index_val = maybe_index_val orelse break :rs elem_index_src; - const index: usize = @intCast(try index_val.toUnsignedIntSema(mod)); + const index: usize = @intCast(try index_val.toUnsignedIntSema(pt)); const elem_ty = indexable_ty.elemType2(mod); - const many_ptr_ty = try mod.manyConstPtrType(elem_ty); - const many_ptr_val = try mod.getCoerced(indexable_val, many_ptr_ty); - const elem_ptr_ty = try mod.singleConstPtrType(elem_ty); - const elem_ptr_val = try many_ptr_val.ptrElem(index, mod); + const many_ptr_ty = try pt.manyConstPtrType(elem_ty); + const many_ptr_val = try pt.getCoerced(indexable_val, many_ptr_ty); + const elem_ptr_ty = try pt.singleConstPtrType(elem_ty); + const elem_ptr_val = try many_ptr_val.ptrElem(index, pt); if (try sema.pointerDeref(block, indexable_src, elem_ptr_val, elem_ptr_ty)) |elem_val| { - return Air.internedToRef((try mod.getCoerced(elem_val, elem_ty)).toIntern()); + return Air.internedToRef((try pt.getCoerced(elem_val, elem_ty)).toIntern()); } break :rs indexable_src; }; @@ -28358,7 +28632,7 @@ fn elemVal( if (inner_ty.zigTypeTag(mod) != .Array) break :arr_sent; const sentinel = inner_ty.sentinel(mod) orelse break :arr_sent; const index_val = try sema.resolveDefinedValue(block, elem_index_src, elem_index) orelse break :arr_sent; - const index = try sema.usizeCast(block, src, try index_val.toUnsignedIntSema(mod)); + const index = try sema.usizeCast(block, src, try index_val.toUnsignedIntSema(pt)); if (index != inner_ty.arrayLen(mod)) break :arr_sent; return Air.internedToRef(sentinel.toIntern()); } @@ -28376,7 +28650,7 @@ fn elemVal( const index_val = try sema.resolveConstDefinedValue(block, elem_index_src, elem_index, .{ .needed_comptime_reason = "tuple field access index must be comptime-known", }); - const index: u32 = @intCast(try index_val.toUnsignedIntSema(mod)); + const index: u32 = @intCast(try index_val.toUnsignedIntSema(pt)); return sema.tupleField(block, indexable_src, indexable, elem_index_src, index); }, else => unreachable, @@ -28391,13 +28665,12 @@ fn validateRuntimeElemAccess( parent_ty: Type, parent_src: LazySrcLoc, ) CompileError!void { - const mod = sema.mod; if (try sema.typeRequiresComptime(elem_ty)) { const msg = msg: { const msg = try sema.errMsg( elem_index_src, "values of type '{}' must be comptime-known, but index value is runtime-known", - .{parent_ty.fmt(mod)}, + .{parent_ty.fmt(sema.pt)}, ); errdefer msg.destroy(sema.gpa); @@ -28418,10 +28691,11 @@ fn tupleFieldPtr( field_index: u32, init: bool, ) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const tuple_ptr_ty = sema.typeOf(tuple_ptr); const tuple_ty = tuple_ptr_ty.childType(mod); - try tuple_ty.resolveFields(mod); + try tuple_ty.resolveFields(pt); const field_count = tuple_ty.structFieldCount(mod); if (field_count == 0) { @@ -28435,7 +28709,7 @@ fn tupleFieldPtr( } const field_ty = tuple_ty.structFieldType(field_index, mod); - const ptr_field_ty = try mod.ptrTypeSema(.{ + const ptr_field_ty = try pt.ptrTypeSema(.{ .child = field_ty.toIntern(), .flags = .{ .is_const = !tuple_ptr_ty.ptrIsMutable(mod), @@ -28445,10 +28719,10 @@ fn tupleFieldPtr( }); if (tuple_ty.structFieldIsComptime(field_index, mod)) - try tuple_ty.resolveStructFieldInits(mod); + try tuple_ty.resolveStructFieldInits(pt); - if (try tuple_ty.structFieldValueComptime(mod, field_index)) |default_val| { - return Air.internedToRef((try mod.intern(.{ .ptr = .{ + if (try tuple_ty.structFieldValueComptime(pt, field_index)) |default_val| { + return Air.internedToRef((try pt.intern(.{ .ptr = .{ .ty = ptr_field_ty.toIntern(), .base_addr = .{ .comptime_field = default_val.toIntern() }, .byte_offset = 0, @@ -28456,7 +28730,7 @@ fn tupleFieldPtr( } if (try sema.resolveValue(tuple_ptr)) |tuple_ptr_val| { - const field_ptr_val = try tuple_ptr_val.ptrField(field_index, mod); + const field_ptr_val = try tuple_ptr_val.ptrField(field_index, pt); return Air.internedToRef(field_ptr_val.toIntern()); } @@ -28476,9 +28750,10 @@ fn tupleField( field_index_src: LazySrcLoc, field_index: u32, ) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const tuple_ty = sema.typeOf(tuple); - try tuple_ty.resolveFields(mod); + try tuple_ty.resolveFields(pt); const field_count = tuple_ty.structFieldCount(mod); if (field_count == 0) { @@ -28494,20 +28769,20 @@ fn tupleField( const field_ty = tuple_ty.structFieldType(field_index, mod); if (tuple_ty.structFieldIsComptime(field_index, mod)) - try tuple_ty.resolveStructFieldInits(mod); - if (try tuple_ty.structFieldValueComptime(mod, field_index)) |default_value| { + try tuple_ty.resolveStructFieldInits(pt); + if (try tuple_ty.structFieldValueComptime(pt, field_index)) |default_value| { return Air.internedToRef(default_value.toIntern()); // comptime field } if (try sema.resolveValue(tuple)) |tuple_val| { - if (tuple_val.isUndef(mod)) return mod.undefRef(field_ty); - return Air.internedToRef((try tuple_val.fieldValue(mod, field_index)).toIntern()); + if (tuple_val.isUndef(mod)) return pt.undefRef(field_ty); + return Air.internedToRef((try tuple_val.fieldValue(pt, field_index)).toIntern()); } try sema.validateRuntimeElemAccess(block, field_index_src, field_ty, tuple_ty, tuple_src); try sema.requireRuntimeBlock(block, tuple_src, null); - try field_ty.resolveLayout(mod); + try field_ty.resolveLayout(pt); return block.addStructFieldVal(tuple, field_index, field_ty); } @@ -28521,7 +28796,8 @@ fn elemValArray( elem_index: Air.Inst.Ref, oob_safety: bool, ) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const array_ty = sema.typeOf(array); const array_sent = array_ty.sentinel(mod); const array_len = array_ty.arrayLen(mod); @@ -28537,7 +28813,7 @@ fn elemValArray( const maybe_index_val = try sema.resolveDefinedValue(block, elem_index_src, elem_index); if (maybe_index_val) |index_val| { - const index: usize = @intCast(try index_val.toUnsignedIntSema(mod)); + const index: usize = @intCast(try index_val.toUnsignedIntSema(pt)); if (array_sent) |s| { if (index == array_len) { return Air.internedToRef(s.toIntern()); @@ -28550,11 +28826,11 @@ fn elemValArray( } if (maybe_undef_array_val) |array_val| { if (array_val.isUndef(mod)) { - return mod.undefRef(elem_ty); + return pt.undefRef(elem_ty); } if (maybe_index_val) |index_val| { - const index: usize = @intCast(try index_val.toUnsignedIntSema(mod)); - const elem_val = try array_val.elemValue(mod, index); + const index: usize = @intCast(try index_val.toUnsignedIntSema(pt)); + const elem_val = try array_val.elemValue(pt, index); return Air.internedToRef(elem_val.toIntern()); } } @@ -28565,7 +28841,7 @@ fn elemValArray( if (oob_safety and block.wantSafety()) { // Runtime check is only needed if unable to comptime check if (maybe_index_val == null) { - const len_inst = try mod.intRef(Type.usize, array_len); + const len_inst = try pt.intRef(Type.usize, array_len); const cmp_op: Air.Inst.Tag = if (array_sent != null) .cmp_lte else .cmp_lt; try sema.panicIndexOutOfBounds(block, src, elem_index, len_inst, cmp_op); } @@ -28589,7 +28865,8 @@ fn elemPtrArray( init: bool, oob_safety: bool, ) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const array_ptr_ty = sema.typeOf(array_ptr); const array_ty = array_ptr_ty.childType(mod); const array_sent = array_ty.sentinel(mod) != null; @@ -28603,7 +28880,7 @@ fn elemPtrArray( const maybe_undef_array_ptr_val = try sema.resolveValue(array_ptr); // The index must not be undefined since it can be out of bounds. const offset: ?usize = if (try sema.resolveDefinedValue(block, elem_index_src, elem_index)) |index_val| o: { - const index = try sema.usizeCast(block, elem_index_src, try index_val.toUnsignedIntSema(mod)); + const index = try sema.usizeCast(block, elem_index_src, try index_val.toUnsignedIntSema(pt)); if (index >= array_len_s) { const sentinel_label: []const u8 = if (array_sent) " +1 (sentinel)" else ""; return sema.fail(block, elem_index_src, "index {d} outside array of length {d}{s}", .{ index, array_len, sentinel_label }); @@ -28611,14 +28888,14 @@ fn elemPtrArray( break :o index; } else null; - const elem_ptr_ty = try array_ptr_ty.elemPtrType(offset, mod); + const elem_ptr_ty = try array_ptr_ty.elemPtrType(offset, pt); if (maybe_undef_array_ptr_val) |array_ptr_val| { if (array_ptr_val.isUndef(mod)) { - return mod.undefRef(elem_ptr_ty); + return pt.undefRef(elem_ptr_ty); } if (offset) |index| { - const elem_ptr = try array_ptr_val.ptrElem(index, mod); + const elem_ptr = try array_ptr_val.ptrElem(index, pt); return Air.internedToRef(elem_ptr.toIntern()); } } @@ -28632,7 +28909,7 @@ fn elemPtrArray( // Runtime check is only needed if unable to comptime check. if (oob_safety and block.wantSafety() and offset == null) { - const len_inst = try mod.intRef(Type.usize, array_len); + const len_inst = try pt.intRef(Type.usize, array_len); const cmp_op: Air.Inst.Tag = if (array_sent) .cmp_lte else .cmp_lt; try sema.panicIndexOutOfBounds(block, src, elem_index, len_inst, cmp_op); } @@ -28650,7 +28927,8 @@ fn elemValSlice( elem_index: Air.Inst.Ref, oob_safety: bool, ) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const slice_ty = sema.typeOf(slice); const slice_sent = slice_ty.sentinel(mod) != null; const elem_ty = slice_ty.elemType2(mod); @@ -28663,19 +28941,19 @@ fn elemValSlice( if (maybe_slice_val) |slice_val| { runtime_src = elem_index_src; - const slice_len = try slice_val.sliceLen(mod); + const slice_len = try slice_val.sliceLen(pt); const slice_len_s = slice_len + @intFromBool(slice_sent); if (slice_len_s == 0) { return sema.fail(block, slice_src, "indexing into empty slice is not allowed", .{}); } if (maybe_index_val) |index_val| { - const index: usize = @intCast(try index_val.toUnsignedIntSema(mod)); + const index: usize = @intCast(try index_val.toUnsignedIntSema(pt)); if (index >= slice_len_s) { const sentinel_label: []const u8 = if (slice_sent) " +1 (sentinel)" else ""; return sema.fail(block, elem_index_src, "index {d} outside slice of length {d}{s}", .{ index, slice_len, sentinel_label }); } - const elem_ptr_ty = try slice_ty.elemPtrType(index, mod); - const elem_ptr_val = try slice_val.ptrElem(index, mod); + const elem_ptr_ty = try slice_ty.elemPtrType(index, pt); + const elem_ptr_val = try slice_val.ptrElem(index, pt); if (try sema.pointerDeref(block, slice_src, elem_ptr_val, elem_ptr_ty)) |elem_val| { return Air.internedToRef(elem_val.toIntern()); } @@ -28688,7 +28966,7 @@ fn elemValSlice( try sema.requireRuntimeBlock(block, src, runtime_src); if (oob_safety and block.wantSafety()) { const len_inst = if (maybe_slice_val) |slice_val| - try mod.intRef(Type.usize, try slice_val.sliceLen(mod)) + try pt.intRef(Type.usize, try slice_val.sliceLen(pt)) else try block.addTyOp(.slice_len, Type.usize, slice); const cmp_op: Air.Inst.Tag = if (slice_sent) .cmp_lte else .cmp_lt; @@ -28707,24 +28985,25 @@ fn elemPtrSlice( elem_index: Air.Inst.Ref, oob_safety: bool, ) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const slice_ty = sema.typeOf(slice); const slice_sent = slice_ty.sentinel(mod) != null; const maybe_undef_slice_val = try sema.resolveValue(slice); // The index must not be undefined since it can be out of bounds. const offset: ?usize = if (try sema.resolveDefinedValue(block, elem_index_src, elem_index)) |index_val| o: { - const index = try sema.usizeCast(block, elem_index_src, try index_val.toUnsignedIntSema(mod)); + const index = try sema.usizeCast(block, elem_index_src, try index_val.toUnsignedIntSema(pt)); break :o index; } else null; - const elem_ptr_ty = try slice_ty.elemPtrType(offset, mod); + const elem_ptr_ty = try slice_ty.elemPtrType(offset, pt); if (maybe_undef_slice_val) |slice_val| { if (slice_val.isUndef(mod)) { - return mod.undefRef(elem_ptr_ty); + return pt.undefRef(elem_ptr_ty); } - const slice_len = try slice_val.sliceLen(mod); + const slice_len = try slice_val.sliceLen(pt); const slice_len_s = slice_len + @intFromBool(slice_sent); if (slice_len_s == 0) { return sema.fail(block, slice_src, "indexing into empty slice is not allowed", .{}); @@ -28734,7 +29013,7 @@ fn elemPtrSlice( const sentinel_label: []const u8 = if (slice_sent) " +1 (sentinel)" else ""; return sema.fail(block, elem_index_src, "index {d} outside slice of length {d}{s}", .{ index, slice_len, sentinel_label }); } - const elem_ptr_val = try slice_val.ptrElem(index, mod); + const elem_ptr_val = try slice_val.ptrElem(index, pt); return Air.internedToRef(elem_ptr_val.toIntern()); } } @@ -28747,7 +29026,7 @@ fn elemPtrSlice( const len_inst = len: { if (maybe_undef_slice_val) |slice_val| if (!slice_val.isUndef(mod)) - break :len try mod.intRef(Type.usize, try slice_val.sliceLen(mod)); + break :len try pt.intRef(Type.usize, try slice_val.sliceLen(pt)); break :len try block.addTyOp(.slice_len, Type.usize, slice); }; const cmp_op: Air.Inst.Tag = if (slice_sent) .cmp_lte else .cmp_lt; @@ -28810,11 +29089,12 @@ fn coerceExtra( opts: CoerceOpts, ) CoersionError!Air.Inst.Ref { if (dest_ty.isGenericPoison()) return inst; - const zcu = sema.mod; + const pt = sema.pt; + const zcu = pt.zcu; const dest_ty_src = inst_src; // TODO better source location - try dest_ty.resolveFields(zcu); + try dest_ty.resolveFields(pt); const inst_ty = sema.typeOf(inst); - try inst_ty.resolveFields(zcu); + try inst_ty.resolveFields(pt); const target = zcu.getTarget(); // If the types are the same, we can return the operand. if (dest_ty.eql(inst_ty, zcu)) @@ -28838,12 +29118,12 @@ fn coerceExtra( if (maybe_inst_val) |val| { // undefined sets the optional bit also to undefined. if (val.toIntern() == .undef) { - return zcu.undefRef(dest_ty); + return pt.undefRef(dest_ty); } // null to ?T if (val.toIntern() == .null_value) { - return Air.internedToRef((try zcu.intern(.{ .opt = .{ + return Air.internedToRef((try pt.intern(.{ .opt = .{ .ty = dest_ty.toIntern(), .val = .none, } }))); @@ -29018,7 +29298,7 @@ fn coerceExtra( switch (dest_info.flags.size) { // coercion to C pointer .C => switch (inst_ty.zigTypeTag(zcu)) { - .Null => return Air.internedToRef(try zcu.intern(.{ .ptr = .{ + .Null => return Air.internedToRef(try pt.intern(.{ .ptr = .{ .ty = dest_ty.toIntern(), .base_addr = .int, .byte_offset = 0, @@ -29063,7 +29343,7 @@ fn coerceExtra( if (inst_info.flags.size == .Slice) { assert(dest_info.sentinel == .none); if (inst_info.sentinel == .none or - inst_info.sentinel != (try zcu.intValue(Type.fromInterned(inst_info.child), 0)).toIntern()) + inst_info.sentinel != (try pt.intValue(Type.fromInterned(inst_info.child), 0)).toIntern()) break :p; const slice_ptr = try sema.analyzeSlicePtr(block, inst_src, inst, inst_ty); @@ -29112,7 +29392,7 @@ fn coerceExtra( block, inst_src, "array literal requires address-of operator (&) to coerce to slice type '{}'", - .{dest_ty.fmt(zcu)}, + .{dest_ty.fmt(pt)}, ); } @@ -29123,10 +29403,10 @@ fn coerceExtra( // empty tuple to zero-length slice // note that this allows coercing to a mutable slice. if (inst_child_ty.structFieldCount(zcu) == 0) { - const align_val = try dest_ty.ptrAlignmentAdvanced(zcu, .sema); - return Air.internedToRef(try zcu.intern(.{ .slice = .{ + const align_val = try dest_ty.ptrAlignmentAdvanced(pt, .sema); + return Air.internedToRef(try pt.intern(.{ .slice = .{ .ty = dest_ty.toIntern(), - .ptr = try zcu.intern(.{ .ptr = .{ + .ptr = try pt.intern(.{ .ptr = .{ .ty = dest_ty.slicePtrFieldType(zcu).toIntern(), .base_addr = .int, .byte_offset = align_val.toByteUnits().?, @@ -29138,7 +29418,7 @@ fn coerceExtra( // pointer to tuple to slice if (!dest_info.flags.is_const) { const err_msg = err_msg: { - const err_msg = try sema.errMsg(inst_src, "cannot cast pointer to tuple to '{}'", .{dest_ty.fmt(zcu)}); + const err_msg = try sema.errMsg(inst_src, "cannot cast pointer to tuple to '{}'", .{dest_ty.fmt(pt)}); errdefer err_msg.destroy(sema.gpa); try sema.errNote(dest_ty_src, err_msg, "pointers to tuples can only coerce to constant pointers", .{}); break :err_msg err_msg; @@ -29194,12 +29474,12 @@ fn coerceExtra( // comptime-known integer to other number if (!(try sema.intFitsInType(val, dest_ty, null))) { if (!opts.report_err) return error.NotCoercible; - return sema.fail(block, inst_src, "type '{}' cannot represent integer value '{}'", .{ dest_ty.fmt(zcu), val.fmtValue(zcu, sema) }); + return sema.fail(block, inst_src, "type '{}' cannot represent integer value '{}'", .{ dest_ty.fmt(pt), val.fmtValue(pt, sema) }); } return switch (zcu.intern_pool.indexToKey(val.toIntern())) { - .undef => try zcu.undefRef(dest_ty), + .undef => try pt.undefRef(dest_ty), .int => |int| Air.internedToRef( - try zcu.intern_pool.getCoercedInts(zcu.gpa, int, dest_ty.toIntern()), + try zcu.intern_pool.getCoercedInts(zcu.gpa, pt.tid, int, dest_ty.toIntern()), ), else => unreachable, }; @@ -29228,18 +29508,18 @@ fn coerceExtra( .Float, .ComptimeFloat => switch (inst_ty.zigTypeTag(zcu)) { .ComptimeFloat => { const val = try sema.resolveConstDefinedValue(block, LazySrcLoc.unneeded, inst, undefined); - const result_val = try val.floatCast(dest_ty, zcu); + const result_val = try val.floatCast(dest_ty, pt); return Air.internedToRef(result_val.toIntern()); }, .Float => { if (maybe_inst_val) |val| { - const result_val = try val.floatCast(dest_ty, zcu); - if (!val.eql(try result_val.floatCast(inst_ty, zcu), inst_ty, zcu)) { + const result_val = try val.floatCast(dest_ty, pt); + if (!val.eql(try result_val.floatCast(inst_ty, pt), inst_ty, zcu)) { return sema.fail( block, inst_src, "type '{}' cannot represent float value '{}'", - .{ dest_ty.fmt(zcu), val.fmtValue(zcu, sema) }, + .{ dest_ty.fmt(pt), val.fmtValue(pt, sema) }, ); } return Air.internedToRef(result_val.toIntern()); @@ -29268,7 +29548,7 @@ fn coerceExtra( } break :int; }; - const result_val = try val.floatFromIntAdvanced(sema.arena, inst_ty, dest_ty, zcu, .sema); + const result_val = try val.floatFromIntAdvanced(sema.arena, inst_ty, dest_ty, pt, .sema); // TODO implement this compile error //const int_again_val = try result_val.intFromFloat(sema.arena, inst_ty); //if (!int_again_val.eql(val, inst_ty, zcu)) { @@ -29276,7 +29556,7 @@ fn coerceExtra( // block, // inst_src, // "type '{}' cannot represent integer value '{}'", - // .{ dest_ty.fmt(zcu), val }, + // .{ dest_ty.fmt(pt), val }, // ); //} return Air.internedToRef(result_val.toIntern()); @@ -29290,10 +29570,10 @@ fn coerceExtra( const string = zcu.intern_pool.indexToKey(val.toIntern()).enum_literal; const field_index = dest_ty.enumFieldIndex(string, zcu) orelse { return sema.fail(block, inst_src, "no field named '{}' in enum '{}'", .{ - string.fmt(&zcu.intern_pool), dest_ty.fmt(zcu), + string.fmt(&zcu.intern_pool), dest_ty.fmt(pt), }); }; - return Air.internedToRef((try zcu.enumValueFieldIndex(dest_ty, @intCast(field_index))).toIntern()); + return Air.internedToRef((try pt.enumValueFieldIndex(dest_ty, @intCast(field_index))).toIntern()); }, .Union => blk: { // union to its own tag type @@ -29308,12 +29588,12 @@ fn coerceExtra( .ErrorUnion => eu: { if (maybe_inst_val) |inst_val| { switch (inst_val.toIntern()) { - .undef => return zcu.undefRef(dest_ty), + .undef => return pt.undefRef(dest_ty), else => switch (zcu.intern_pool.indexToKey(inst_val.toIntern())) { .error_union => |error_union| switch (error_union.val) { .err_name => |err_name| { const error_set_ty = inst_ty.errorUnionSet(zcu); - const error_set_val = Air.internedToRef((try zcu.intern(.{ .err = .{ + const error_set_val = Air.internedToRef((try pt.intern(.{ .err = .{ .ty = error_set_ty.toIntern(), .name = err_name, } }))); @@ -29370,7 +29650,7 @@ fn coerceExtra( if (dest_ty.sentinel(zcu)) |dest_sent| { const src_sent = inst_ty.sentinel(zcu) orelse break :array_to_array; - if (dest_sent.toIntern() != (try zcu.getCoerced(src_sent, dest_ty.childType(zcu))).toIntern()) { + if (dest_sent.toIntern() != (try pt.getCoerced(src_sent, dest_ty.childType(zcu))).toIntern()) { break :array_to_array; } } @@ -29414,7 +29694,7 @@ fn coerceExtra( // undefined to anything. We do this after the big switch above so that // special logic has a chance to run first, such as `*[N]T` to `[]T` which // should initialize the length field of the slice. - if (maybe_inst_val) |val| if (val.toIntern() == .undef) return zcu.undefRef(dest_ty); + if (maybe_inst_val) |val| if (val.toIntern() == .undef) return pt.undefRef(dest_ty); if (!opts.report_err) return error.NotCoercible; @@ -29434,7 +29714,7 @@ fn coerceExtra( } const msg = msg: { - const msg = try sema.errMsg(inst_src, "expected type '{}', found '{}'", .{ dest_ty.fmt(zcu), inst_ty.fmt(zcu) }); + const msg = try sema.errMsg(inst_src, "expected type '{}', found '{}'", .{ dest_ty.fmt(pt), inst_ty.fmt(pt) }); errdefer msg.destroy(sema.gpa); // E!T to T @@ -29486,7 +29766,7 @@ fn coerceInMemory( val: Value, dst_ty: Type, ) CompileError!Air.Inst.Ref { - return Air.internedToRef((try sema.mod.getCoerced(val, dst_ty)).toIntern()); + return Air.internedToRef((try sema.pt.getCoerced(val, dst_ty)).toIntern()); } const InMemoryCoercionResult = union(enum) { @@ -29607,7 +29887,7 @@ const InMemoryCoercionResult = union(enum) { } fn report(res: *const InMemoryCoercionResult, sema: *Sema, src: LazySrcLoc, msg: *Module.ErrorMsg) !void { - const mod = sema.mod; + const pt = sema.pt; var cur = res; while (true) switch (cur.*) { .ok => unreachable, @@ -29624,7 +29904,7 @@ const InMemoryCoercionResult = union(enum) { }, .error_union_payload => |pair| { try sema.errNote(src, msg, "error union payload '{}' cannot cast into error union payload '{}'", .{ - pair.actual.fmt(mod), pair.wanted.fmt(mod), + pair.actual.fmt(pt), pair.wanted.fmt(pt), }); cur = pair.child; }, @@ -29637,18 +29917,18 @@ const InMemoryCoercionResult = union(enum) { .array_sentinel => |sentinel| { if (sentinel.actual.toIntern() != .unreachable_value) { try sema.errNote(src, msg, "array sentinel '{}' cannot cast into array sentinel '{}'", .{ - sentinel.actual.fmtValue(mod, sema), sentinel.wanted.fmtValue(mod, sema), + sentinel.actual.fmtValue(pt, sema), sentinel.wanted.fmtValue(pt, sema), }); } else { try sema.errNote(src, msg, "destination array requires '{}' sentinel", .{ - sentinel.wanted.fmtValue(mod, sema), + sentinel.wanted.fmtValue(pt, sema), }); } break; }, .array_elem => |pair| { try sema.errNote(src, msg, "array element type '{}' cannot cast into array element type '{}'", .{ - pair.actual.fmt(mod), pair.wanted.fmt(mod), + pair.actual.fmt(pt), pair.wanted.fmt(pt), }); cur = pair.child; }, @@ -29660,19 +29940,19 @@ const InMemoryCoercionResult = union(enum) { }, .vector_elem => |pair| { try sema.errNote(src, msg, "vector element type '{}' cannot cast into vector element type '{}'", .{ - pair.actual.fmt(mod), pair.wanted.fmt(mod), + pair.actual.fmt(pt), pair.wanted.fmt(pt), }); cur = pair.child; }, .optional_shape => |pair| { try sema.errNote(src, msg, "optional type child '{}' cannot cast into optional type child '{}'", .{ - pair.actual.optionalChild(mod).fmt(mod), pair.wanted.optionalChild(mod).fmt(mod), + pair.actual.optionalChild(pt.zcu).fmt(pt), pair.wanted.optionalChild(pt.zcu).fmt(pt), }); break; }, .optional_child => |pair| { try sema.errNote(src, msg, "optional type child '{}' cannot cast into optional type child '{}'", .{ - pair.actual.fmt(mod), pair.wanted.fmt(mod), + pair.actual.fmt(pt), pair.wanted.fmt(pt), }); cur = pair.child; }, @@ -29682,7 +29962,7 @@ const InMemoryCoercionResult = union(enum) { }, .missing_error => |missing_errors| { for (missing_errors) |err| { - try sema.errNote(src, msg, "'error.{}' not a member of destination error set", .{err.fmt(&mod.intern_pool)}); + try sema.errNote(src, msg, "'error.{}' not a member of destination error set", .{err.fmt(&pt.zcu.intern_pool)}); } break; }, @@ -29736,7 +30016,7 @@ const InMemoryCoercionResult = union(enum) { }, .fn_param => |param| { try sema.errNote(src, msg, "parameter {d} '{}' cannot cast into '{}'", .{ - param.index, param.actual.fmt(mod), param.wanted.fmt(mod), + param.index, param.actual.fmt(pt), param.wanted.fmt(pt), }); cur = param.child; }, @@ -29746,13 +30026,13 @@ const InMemoryCoercionResult = union(enum) { }, .fn_return_type => |pair| { try sema.errNote(src, msg, "return type '{}' cannot cast into return type '{}'", .{ - pair.actual.fmt(mod), pair.wanted.fmt(mod), + pair.actual.fmt(pt), pair.wanted.fmt(pt), }); cur = pair.child; }, .ptr_child => |pair| { try sema.errNote(src, msg, "pointer type child '{}' cannot cast into pointer type child '{}'", .{ - pair.actual.fmt(mod), pair.wanted.fmt(mod), + pair.actual.fmt(pt), pair.wanted.fmt(pt), }); cur = pair.child; }, @@ -29763,11 +30043,11 @@ const InMemoryCoercionResult = union(enum) { .ptr_sentinel => |sentinel| { if (sentinel.actual.toIntern() != .unreachable_value) { try sema.errNote(src, msg, "pointer sentinel '{}' cannot cast into pointer sentinel '{}'", .{ - sentinel.actual.fmtValue(mod, sema), sentinel.wanted.fmtValue(mod, sema), + sentinel.actual.fmtValue(pt, sema), sentinel.wanted.fmtValue(pt, sema), }); } else { try sema.errNote(src, msg, "destination pointer requires '{}' sentinel", .{ - sentinel.wanted.fmtValue(mod, sema), + sentinel.wanted.fmtValue(pt, sema), }); } break; @@ -29787,15 +30067,15 @@ const InMemoryCoercionResult = union(enum) { break; }, .ptr_allowzero => |pair| { - const wanted_allow_zero = pair.wanted.ptrAllowsZero(mod); - const actual_allow_zero = pair.actual.ptrAllowsZero(mod); + const wanted_allow_zero = pair.wanted.ptrAllowsZero(pt.zcu); + const actual_allow_zero = pair.actual.ptrAllowsZero(pt.zcu); if (actual_allow_zero and !wanted_allow_zero) { try sema.errNote(src, msg, "'{}' could have null values which are illegal in type '{}'", .{ - pair.actual.fmt(mod), pair.wanted.fmt(mod), + pair.actual.fmt(pt), pair.wanted.fmt(pt), }); } else { try sema.errNote(src, msg, "mutable '{}' allows illegal null values stored to type '{}'", .{ - pair.actual.fmt(mod), pair.wanted.fmt(mod), + pair.actual.fmt(pt), pair.wanted.fmt(pt), }); } break; @@ -29821,13 +30101,13 @@ const InMemoryCoercionResult = union(enum) { }, .double_ptr_to_anyopaque => |pair| { try sema.errNote(src, msg, "cannot implicitly cast double pointer '{}' to anyopaque pointer '{}'", .{ - pair.actual.fmt(mod), pair.wanted.fmt(mod), + pair.actual.fmt(pt), pair.wanted.fmt(pt), }); break; }, .slice_to_anyopaque => |pair| { try sema.errNote(src, msg, "cannot implicitly cast slice '{}' to anyopaque pointer '{}'", .{ - pair.actual.fmt(mod), pair.wanted.fmt(mod), + pair.actual.fmt(pt), pair.wanted.fmt(pt), }); try sema.errNote(src, msg, "consider using '.ptr'", .{}); break; @@ -29864,7 +30144,8 @@ pub fn coerceInMemoryAllowed( dest_src: LazySrcLoc, src_src: LazySrcLoc, ) CompileError!InMemoryCoercionResult { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; if (dest_ty.eql(src_ty, mod)) return .ok; @@ -29968,7 +30249,7 @@ pub fn coerceInMemoryAllowed( (src_info.sentinel != null and dest_info.sentinel != null and dest_info.sentinel.?.eql( - try mod.getCoerced(src_info.sentinel.?, dest_info.elem_type), + try pt.getCoerced(src_info.sentinel.?, dest_info.elem_type), dest_info.elem_type, mod, )); @@ -30045,8 +30326,8 @@ pub fn coerceInMemoryAllowed( // The memory layout of @Vector(N, iM) is the same as the integer type i(N*M), // that is to say, the padding bits are not in the same place as the array [N]iM. // If there's no padding, the bitcast is possible. - const elem_bit_size = dest_elem_ty.bitSize(mod); - const elem_abi_byte_size = dest_elem_ty.abiSize(mod); + const elem_bit_size = dest_elem_ty.bitSize(pt); + const elem_abi_byte_size = dest_elem_ty.abiSize(pt); if (elem_abi_byte_size * 8 == elem_bit_size) return .ok; } @@ -30081,7 +30362,7 @@ pub fn coerceInMemoryAllowed( const field_count = dest_ty.structFieldCount(mod); for (0..field_count) |field_idx| { if (dest_ty.structFieldIsComptime(field_idx, mod) != src_ty.structFieldIsComptime(field_idx, mod)) break :tuple; - if (dest_ty.structFieldAlign(field_idx, mod) != src_ty.structFieldAlign(field_idx, mod)) break :tuple; + if (dest_ty.structFieldAlign(field_idx, pt) != src_ty.structFieldAlign(field_idx, pt)) break :tuple; const dest_field_ty = dest_ty.structFieldType(field_idx, mod); const src_field_ty = src_ty.structFieldType(field_idx, mod); const field = try sema.coerceInMemoryAllowed(block, dest_field_ty, src_field_ty, dest_is_mut, target, dest_src, src_src); @@ -30104,7 +30385,8 @@ fn coerceInMemoryAllowedErrorSets( dest_src: LazySrcLoc, src_src: LazySrcLoc, ) !InMemoryCoercionResult { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const gpa = sema.gpa; const ip = &mod.intern_pool; @@ -30202,7 +30484,8 @@ fn coerceInMemoryAllowedFns( dest_src: LazySrcLoc, src_src: LazySrcLoc, ) !InMemoryCoercionResult { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const ip = &mod.intern_pool; const dest_info = mod.typeToFunc(dest_ty).?; @@ -30303,7 +30586,8 @@ fn coerceInMemoryAllowedPtrs( dest_src: LazySrcLoc, src_src: LazySrcLoc, ) !InMemoryCoercionResult { - const zcu = sema.mod; + const pt = sema.pt; + const zcu = pt.zcu; const dest_info = dest_ptr_ty.ptrInfo(zcu); const src_info = src_ptr_ty.ptrInfo(zcu); @@ -30381,7 +30665,7 @@ fn coerceInMemoryAllowedPtrs( const ok_sent = dest_info.sentinel == .none or src_info.flags.size == .C or (src_info.sentinel != .none and - dest_info.sentinel == try zcu.intern_pool.getCoerced(sema.gpa, src_info.sentinel, dest_info.child)); + dest_info.sentinel == try zcu.intern_pool.getCoerced(sema.gpa, pt.tid, src_info.sentinel, dest_info.child)); if (!ok_sent) { return InMemoryCoercionResult{ .ptr_sentinel = .{ .actual = switch (src_info.sentinel) { @@ -30432,7 +30716,8 @@ fn coerceVarArgParam( ) !Air.Inst.Ref { if (block.is_typeof) return inst; - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const uncasted_ty = sema.typeOf(inst); const coerced = switch (uncasted_ty.zigTypeTag(mod)) { // TODO consider casting to c_int/f64 if they fit @@ -30449,9 +30734,9 @@ fn coerceVarArgParam( }, .Array => return sema.fail(block, inst_src, "arrays must be passed by reference to variadic function", .{}), .Float => float: { - const target = sema.mod.getTarget(); + const target = mod.getTarget(); const double_bits = target.c_type_bit_size(.double); - const inst_bits = uncasted_ty.floatBits(sema.mod.getTarget()); + const inst_bits = uncasted_ty.floatBits(target); if (inst_bits >= double_bits) break :float inst; switch (double_bits) { 32 => break :float try sema.coerce(block, Type.f32, inst, inst_src), @@ -30461,7 +30746,7 @@ fn coerceVarArgParam( }, else => if (uncasted_ty.isAbiInt(mod)) int: { if (!try sema.validateExternType(uncasted_ty, .param_ty)) break :int inst; - const target = sema.mod.getTarget(); + const target = mod.getTarget(); const uncasted_info = uncasted_ty.intInfo(mod); if (uncasted_info.bits <= target.c_type_bit_size(switch (uncasted_info.signedness) { .signed => .int, @@ -30491,7 +30776,7 @@ fn coerceVarArgParam( const coerced_ty = sema.typeOf(coerced); if (!try sema.validateExternType(coerced_ty, .param_ty)) { const msg = msg: { - const msg = try sema.errMsg(inst_src, "cannot pass '{}' to variadic function", .{coerced_ty.fmt(sema.mod)}); + const msg = try sema.errMsg(inst_src, "cannot pass '{}' to variadic function", .{coerced_ty.fmt(pt)}); errdefer msg.destroy(sema.gpa); try sema.explainWhyTypeIsNotExtern(msg, inst_src, coerced_ty, .param_ty); @@ -30526,7 +30811,8 @@ fn storePtr2( operand_src: LazySrcLoc, air_tag: Air.Inst.Tag, ) CompileError!void { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const ptr_ty = sema.typeOf(ptr); if (ptr_ty.isConstPtr(mod)) return sema.fail(block, ptr_src, "cannot assign to constant", .{}); @@ -30548,7 +30834,7 @@ fn storePtr2( while (i < field_count) : (i += 1) { const elem_src = operand_src; // TODO better source location const elem = try sema.tupleField(block, operand_src, uncasted_operand, elem_src, i); - const elem_index = try mod.intRef(Type.usize, i); + const elem_index = try pt.intRef(Type.usize, i); const elem_ptr = try sema.elemPtr(block, ptr_src, ptr, elem_index, elem_src, false, true); try sema.storePtr2(block, src, elem_ptr, elem_src, elem, elem_src, .store); } @@ -30620,7 +30906,7 @@ fn storePtr2( return; } return sema.fail(block, ptr_src, "unable to determine vector element index of type '{}'", .{ - ptr_ty.fmt(sema.mod), + ptr_ty.fmt(pt), }); } @@ -30734,7 +31020,8 @@ fn markMaybeComptimeAllocRuntime(sema: *Sema, block: *Block, alloc_inst: Air.Ins /// pointer. Only if the final element type matches the vector element type, and the /// lengths match. fn obtainBitCastedVectorPtr(sema: *Sema, ptr: Air.Inst.Ref) ?Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const array_ty = sema.typeOf(ptr).childType(mod); if (array_ty.zigTypeTag(mod) != .Array) return null; var ptr_ref = ptr; @@ -30751,7 +31038,7 @@ fn obtainBitCastedVectorPtr(sema: *Sema, ptr: Air.Inst.Ref) ?Air.Inst.Ref { // We have a pointer-to-array and a pointer-to-vector. If the elements and // lengths match, return the result. - if (array_ty.childType(mod).eql(vector_ty.childType(mod), sema.mod) and + if (array_ty.childType(mod).eql(vector_ty.childType(mod), mod) and array_ty.arrayLen(mod) == vector_ty.vectorLen(mod)) { return ptr_ref; @@ -30770,17 +31057,18 @@ fn storePtrVal( operand_val: Value, operand_ty: Type, ) !void { - const zcu = sema.mod; + const pt = sema.pt; + const zcu = pt.zcu; const ip = &zcu.intern_pool; // TODO: audit use sites to eliminate this coercion - const coerced_operand_val = try zcu.getCoerced(operand_val, operand_ty); + const coerced_operand_val = try pt.getCoerced(operand_val, operand_ty); // TODO: audit use sites to eliminate this coercion - const ptr_ty = try zcu.ptrType(info: { + const ptr_ty = try pt.ptrType(info: { var info = ptr_val.typeOf(zcu).ptrInfo(zcu); info.child = operand_ty.toIntern(); break :info info; }); - const coerced_ptr_val = try zcu.getCoerced(ptr_val, ptr_ty); + const coerced_ptr_val = try pt.getCoerced(ptr_val, ptr_ty); switch (try sema.storeComptimePtr(block, src, coerced_ptr_val, coerced_operand_val)) { .success => {}, @@ -30800,13 +31088,13 @@ fn storePtrVal( block, src, "comptime dereference requires '{}' to have a well-defined layout", - .{ty.fmt(zcu)}, + .{ty.fmt(pt)}, ), .out_of_bounds => |ty| return sema.fail( block, src, "dereference of '{}' exceeds bounds of containing decl of type '{}'", - .{ ptr_ty.fmt(zcu), ty.fmt(zcu) }, + .{ ptr_ty.fmt(pt), ty.fmt(pt) }, ), .exceeds_host_size => return sema.fail(block, src, "bit-pointer target exceeds host size", .{}), } @@ -30820,31 +31108,32 @@ fn bitCast( inst_src: LazySrcLoc, operand_src: ?LazySrcLoc, ) CompileError!Air.Inst.Ref { - const zcu = sema.mod; - try dest_ty.resolveLayout(zcu); + const pt = sema.pt; + const zcu = pt.zcu; + try dest_ty.resolveLayout(pt); const old_ty = sema.typeOf(inst); - try old_ty.resolveLayout(zcu); + try old_ty.resolveLayout(pt); - const dest_bits = dest_ty.bitSize(zcu); - const old_bits = old_ty.bitSize(zcu); + const dest_bits = dest_ty.bitSize(pt); + const old_bits = old_ty.bitSize(pt); if (old_bits != dest_bits) { return sema.fail(block, inst_src, "@bitCast size mismatch: destination type '{}' has {d} bits but source type '{}' has {d} bits", .{ - dest_ty.fmt(zcu), + dest_ty.fmt(pt), dest_bits, - old_ty.fmt(zcu), + old_ty.fmt(pt), old_bits, }); } if (try sema.resolveValue(inst)) |val| { if (val.isUndef(zcu)) - return zcu.undefRef(dest_ty); + return pt.undefRef(dest_ty); if (old_ty.zigTypeTag(zcu) == .ErrorSet and dest_ty.zigTypeTag(zcu) == .ErrorSet) { // Special case: we sometimes call `bitCast` on error set values, but they // don't have a well-defined layout, so we can't use `bitCastVal` on them. - return Air.internedToRef((try zcu.getCoerced(val, dest_ty)).toIntern()); + return Air.internedToRef((try pt.getCoerced(val, dest_ty)).toIntern()); } if (try sema.bitCastVal(val, dest_ty, 0, 0, 0)) |result_val| { return Air.internedToRef(result_val.toIntern()); @@ -30862,16 +31151,17 @@ fn coerceArrayPtrToSlice( inst: Air.Inst.Ref, inst_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; if (try sema.resolveValue(inst)) |val| { const ptr_array_ty = sema.typeOf(inst); const array_ty = ptr_array_ty.childType(mod); const slice_ptr_ty = dest_ty.slicePtrFieldType(mod); - const slice_ptr = try mod.getCoerced(val, slice_ptr_ty); - const slice_val = try mod.intern(.{ .slice = .{ + const slice_ptr = try pt.getCoerced(val, slice_ptr_ty); + const slice_val = try pt.intern(.{ .slice = .{ .ty = dest_ty.toIntern(), .ptr = slice_ptr.toIntern(), - .len = (try mod.intValue(Type.usize, array_ty.arrayLen(mod))).toIntern(), + .len = (try pt.intValue(Type.usize, array_ty.arrayLen(mod))).toIntern(), } }); return Air.internedToRef(slice_val); } @@ -30880,7 +31170,8 @@ fn coerceArrayPtrToSlice( } fn checkPtrAttributes(sema: *Sema, dest_ty: Type, inst_ty: Type, in_memory_result: *InMemoryCoercionResult) bool { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const dest_info = dest_ty.ptrInfo(mod); const inst_info = inst_ty.ptrInfo(mod); const len0 = (Type.fromInterned(inst_info.child).zigTypeTag(mod) == .Array and (Type.fromInterned(inst_info.child).arrayLenIncludingSentinel(mod) == 0 or @@ -30913,12 +31204,12 @@ fn checkPtrAttributes(sema: *Sema, dest_ty: Type, inst_ty: Type, in_memory_resul const inst_align = if (inst_info.flags.alignment != .none) inst_info.flags.alignment else - Type.fromInterned(inst_info.child).abiAlignment(mod); + Type.fromInterned(inst_info.child).abiAlignment(pt); const dest_align = if (dest_info.flags.alignment != .none) dest_info.flags.alignment else - Type.fromInterned(dest_info.child).abiAlignment(mod); + Type.fromInterned(dest_info.child).abiAlignment(pt); if (dest_align.compare(.gt, inst_align)) { in_memory_result.* = .{ .ptr_alignment = .{ @@ -30937,15 +31228,16 @@ fn coerceCompatiblePtrs( inst: Air.Inst.Ref, inst_src: LazySrcLoc, ) !Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const inst_ty = sema.typeOf(inst); if (try sema.resolveValue(inst)) |val| { if (!val.isUndef(mod) and val.isNull(mod) and !dest_ty.isAllowzeroPtr(mod)) { - return sema.fail(block, inst_src, "null pointer casted to type '{}'", .{dest_ty.fmt(sema.mod)}); + return sema.fail(block, inst_src, "null pointer casted to type '{}'", .{dest_ty.fmt(pt)}); } // The comptime Value representation is compatible with both types. return Air.internedToRef( - (try mod.getCoerced(val, dest_ty)).toIntern(), + (try pt.getCoerced(val, dest_ty)).toIntern(), ); } try sema.requireRuntimeBlock(block, inst_src, null); @@ -30979,14 +31271,15 @@ fn coerceEnumToUnion( inst: Air.Inst.Ref, inst_src: LazySrcLoc, ) !Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const ip = &mod.intern_pool; const inst_ty = sema.typeOf(inst); const tag_ty = union_ty.unionTagType(mod) orelse { const msg = msg: { const msg = try sema.errMsg(inst_src, "expected type '{}', found '{}'", .{ - union_ty.fmt(sema.mod), inst_ty.fmt(sema.mod), + union_ty.fmt(pt), inst_ty.fmt(pt), }); errdefer msg.destroy(sema.gpa); try sema.errNote(union_ty_src, msg, "cannot coerce enum to untagged union", .{}); @@ -30998,15 +31291,15 @@ fn coerceEnumToUnion( const enum_tag = try sema.coerce(block, tag_ty, inst, inst_src); if (try sema.resolveDefinedValue(block, inst_src, enum_tag)) |val| { - const field_index = union_ty.unionTagFieldIndex(val, sema.mod) orelse { + const field_index = union_ty.unionTagFieldIndex(val, pt.zcu) orelse { return sema.fail(block, inst_src, "union '{}' has no tag with value '{}'", .{ - union_ty.fmt(sema.mod), val.fmtValue(sema.mod, sema), + union_ty.fmt(pt), val.fmtValue(pt, sema), }); }; const union_obj = mod.typeToUnion(union_ty).?; const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]); - try field_ty.resolveFields(mod); + try field_ty.resolveFields(pt); if (field_ty.zigTypeTag(mod) == .NoReturn) { const msg = msg: { const msg = try sema.errMsg(inst_src, "cannot initialize 'noreturn' field of union", .{}); @@ -31025,8 +31318,8 @@ fn coerceEnumToUnion( const msg = msg: { const field_name = union_obj.loadTagType(ip).names.get(ip)[field_index]; const msg = try sema.errMsg(inst_src, "coercion from enum '{}' to union '{}' must initialize '{}' field '{}'", .{ - inst_ty.fmt(sema.mod), union_ty.fmt(sema.mod), - field_ty.fmt(sema.mod), field_name.fmt(ip), + inst_ty.fmt(pt), union_ty.fmt(pt), + field_ty.fmt(pt), field_name.fmt(ip), }); errdefer msg.destroy(sema.gpa); @@ -31039,7 +31332,7 @@ fn coerceEnumToUnion( return sema.failWithOwnedErrorMsg(block, msg); }; - return Air.internedToRef((try mod.unionValue(union_ty, val, opv)).toIntern()); + return Air.internedToRef((try pt.unionValue(union_ty, val, opv)).toIntern()); } try sema.requireRuntimeBlock(block, inst_src, null); @@ -31047,7 +31340,7 @@ fn coerceEnumToUnion( if (tag_ty.isNonexhaustiveEnum(mod)) { const msg = msg: { const msg = try sema.errMsg(inst_src, "runtime coercion to union '{}' from non-exhaustive enum", .{ - union_ty.fmt(sema.mod), + union_ty.fmt(pt), }); errdefer msg.destroy(sema.gpa); try sema.addDeclaredHereNote(msg, tag_ty); @@ -31066,7 +31359,7 @@ fn coerceEnumToUnion( const err_msg = msg orelse try sema.errMsg( inst_src, "runtime coercion from enum '{}' to union '{}' which has a 'noreturn' field", - .{ tag_ty.fmt(sema.mod), union_ty.fmt(sema.mod) }, + .{ tag_ty.fmt(pt), union_ty.fmt(pt) }, ); msg = err_msg; @@ -31081,7 +31374,7 @@ fn coerceEnumToUnion( } // If the union has all fields 0 bits, the union value is just the enum value. - if (union_ty.unionHasAllZeroBitFieldTypes(mod)) { + if (union_ty.unionHasAllZeroBitFieldTypes(pt)) { return block.addBitCast(union_ty, enum_tag); } @@ -31089,7 +31382,7 @@ fn coerceEnumToUnion( const msg = try sema.errMsg( inst_src, "runtime coercion from enum '{}' to union '{}' which has non-void fields", - .{ tag_ty.fmt(sema.mod), union_ty.fmt(sema.mod) }, + .{ tag_ty.fmt(pt), union_ty.fmt(pt) }, ); errdefer msg.destroy(sema.gpa); @@ -31099,7 +31392,7 @@ fn coerceEnumToUnion( if (!(try sema.typeHasRuntimeBits(field_ty))) continue; try sema.addFieldErrNote(union_ty, field_index, msg, "field '{}' has type '{}'", .{ field_name.fmt(ip), - field_ty.fmt(sema.mod), + field_ty.fmt(pt), }); } try sema.addDeclaredHereNote(msg, union_ty); @@ -31116,7 +31409,8 @@ fn coerceAnonStructToUnion( inst: Air.Inst.Ref, inst_src: LazySrcLoc, ) !Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const ip = &mod.intern_pool; const inst_ty = sema.typeOf(inst); const field_info: union(enum) { @@ -31174,7 +31468,8 @@ fn coerceAnonStructToUnionPtrs( ptr_anon_struct: Air.Inst.Ref, anon_struct_src: LazySrcLoc, ) !Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const union_ty = ptr_union_ty.childType(mod); const anon_struct = try sema.analyzeLoad(block, anon_struct_src, ptr_anon_struct, anon_struct_src); const union_inst = try sema.coerceAnonStructToUnion(block, union_ty, union_ty_src, anon_struct, anon_struct_src); @@ -31189,7 +31484,8 @@ fn coerceAnonStructToStructPtrs( ptr_anon_struct: Air.Inst.Ref, anon_struct_src: LazySrcLoc, ) !Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const struct_ty = ptr_struct_ty.childType(mod); const anon_struct = try sema.analyzeLoad(block, anon_struct_src, ptr_anon_struct, anon_struct_src); const struct_inst = try sema.coerceTupleToStruct(block, struct_ty, anon_struct, anon_struct_src); @@ -31205,7 +31501,8 @@ fn coerceArrayLike( inst: Air.Inst.Ref, inst_src: LazySrcLoc, ) !Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const inst_ty = sema.typeOf(inst); const target = mod.getTarget(); @@ -31226,7 +31523,7 @@ fn coerceArrayLike( if (dest_len != inst_len) { const msg = msg: { const msg = try sema.errMsg(inst_src, "expected type '{}', found '{}'", .{ - dest_ty.fmt(mod), inst_ty.fmt(mod), + dest_ty.fmt(pt), inst_ty.fmt(pt), }); errdefer msg.destroy(sema.gpa); try sema.errNote(dest_ty_src, msg, "destination has length {d}", .{dest_len}); @@ -31270,7 +31567,7 @@ fn coerceArrayLike( var runtime_src: ?LazySrcLoc = null; for (element_vals, element_refs, 0..) |*val, *ref, i| { - const index_ref = Air.internedToRef((try mod.intValue(Type.usize, i)).toIntern()); + const index_ref = Air.internedToRef((try pt.intValue(Type.usize, i)).toIntern()); const src = inst_src; // TODO better source location const elem_src = inst_src; // TODO better source location const elem_ref = try sema.elemValArray(block, src, inst_src, inst, elem_src, index_ref, true); @@ -31290,7 +31587,7 @@ fn coerceArrayLike( return block.addAggregateInit(dest_ty, element_refs); } - return Air.internedToRef((try mod.intern(.{ .aggregate = .{ + return Air.internedToRef((try pt.intern(.{ .aggregate = .{ .ty = dest_ty.toIntern(), .storage = .{ .elems = element_vals }, } }))); @@ -31305,7 +31602,8 @@ fn coerceTupleToArray( inst: Air.Inst.Ref, inst_src: LazySrcLoc, ) !Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const inst_ty = sema.typeOf(inst); const inst_len = inst_ty.arrayLen(mod); const dest_len = dest_ty.arrayLen(mod); @@ -31313,7 +31611,7 @@ fn coerceTupleToArray( if (dest_len != inst_len) { const msg = msg: { const msg = try sema.errMsg(inst_src, "expected type '{}', found '{}'", .{ - dest_ty.fmt(sema.mod), inst_ty.fmt(sema.mod), + dest_ty.fmt(pt), inst_ty.fmt(pt), }); errdefer msg.destroy(sema.gpa); try sema.errNote(dest_ty_src, msg, "destination has length {d}", .{dest_len}); @@ -31355,7 +31653,7 @@ fn coerceTupleToArray( return block.addAggregateInit(dest_ty, element_refs); } - return Air.internedToRef((try mod.intern(.{ .aggregate = .{ + return Air.internedToRef((try pt.intern(.{ .aggregate = .{ .ty = dest_ty.toIntern(), .storage = .{ .elems = element_vals }, } }))); @@ -31370,11 +31668,12 @@ fn coerceTupleToSlicePtrs( ptr_tuple: Air.Inst.Ref, tuple_src: LazySrcLoc, ) !Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const tuple_ty = sema.typeOf(ptr_tuple).childType(mod); const tuple = try sema.analyzeLoad(block, tuple_src, ptr_tuple, tuple_src); const slice_info = slice_ty.ptrInfo(mod); - const array_ty = try mod.arrayType(.{ + const array_ty = try pt.arrayType(.{ .len = tuple_ty.structFieldCount(mod), .sentinel = slice_info.sentinel, .child = slice_info.child, @@ -31396,7 +31695,8 @@ fn coerceTupleToArrayPtrs( ptr_tuple: Air.Inst.Ref, tuple_src: LazySrcLoc, ) !Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const tuple = try sema.analyzeLoad(block, tuple_src, ptr_tuple, tuple_src); const ptr_info = ptr_array_ty.ptrInfo(mod); const array_ty = Type.fromInterned(ptr_info.child); @@ -31417,10 +31717,11 @@ fn coerceTupleToStruct( inst: Air.Inst.Ref, inst_src: LazySrcLoc, ) !Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const ip = &mod.intern_pool; - try struct_ty.resolveFields(mod); - try struct_ty.resolveStructFieldInits(mod); + try struct_ty.resolveFields(pt); + try struct_ty.resolveStructFieldInits(pt); if (struct_ty.isTupleOrAnonStruct(mod)) { return sema.coerceTupleToTuple(block, struct_ty, inst, inst_src); @@ -31461,7 +31762,7 @@ fn coerceTupleToStruct( }; const field_init = Value.fromInterned(struct_type.field_inits.get(ip)[struct_field_index]); - if (!init_val.eql(field_init, struct_field_ty, sema.mod)) { + if (!init_val.eql(field_init, struct_field_ty, pt.zcu)) { return sema.failWithInvalidComptimeFieldStore(block, field_src, inst_ty, tuple_field_index); } } @@ -31512,7 +31813,7 @@ fn coerceTupleToStruct( return block.addAggregateInit(struct_ty, field_refs); } - const struct_val = try mod.intern(.{ .aggregate = .{ + const struct_val = try pt.intern(.{ .aggregate = .{ .ty = struct_ty.toIntern(), .storage = .{ .elems = field_vals }, } }); @@ -31529,7 +31830,8 @@ fn coerceTupleToTuple( inst: Air.Inst.Ref, inst_src: LazySrcLoc, ) !Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const ip = &mod.intern_pool; const dest_field_count = switch (ip.indexToKey(tuple_ty.toIntern())) { .anon_struct_type => |anon_struct_type| anon_struct_type.types.len, @@ -31594,7 +31896,7 @@ fn coerceTupleToTuple( }); }; - if (!init_val.eql(Value.fromInterned(default_val), Type.fromInterned(field_ty), sema.mod)) { + if (!init_val.eql(Value.fromInterned(default_val), Type.fromInterned(field_ty), pt.zcu)) { return sema.failWithInvalidComptimeFieldStore(block, field_src, inst_ty, field_i); } } @@ -31659,7 +31961,7 @@ fn coerceTupleToTuple( return block.addAggregateInit(tuple_ty, field_refs); } - return Air.internedToRef((try mod.intern(.{ .aggregate = .{ + return Air.internedToRef((try pt.intern(.{ .aggregate = .{ .ty = tuple_ty.toIntern(), .storage = .{ .elems = field_vals }, } }))); @@ -31689,17 +31991,19 @@ fn addReferenceEntry( src: LazySrcLoc, referenced_unit: AnalUnit, ) !void { - if (sema.mod.comp.reference_trace == 0) return; + const zcu = sema.pt.zcu; + if (zcu.comp.reference_trace == 0) return; const gop = try sema.references.getOrPut(sema.gpa, referenced_unit); if (gop.found_existing) return; // TODO: we need to figure out how to model inline calls here. // They aren't references in the analysis sense, but ought to show up in the reference trace! // Would representing inline calls in the reference table cause excessive memory usage? - try sema.mod.addUnitReference(sema.ownerUnit(), referenced_unit, src); + try zcu.addUnitReference(sema.ownerUnit(), referenced_unit, src); } pub fn ensureDeclAnalyzed(sema: *Sema, decl_index: InternPool.DeclIndex) CompileError!void { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const ip = &mod.intern_pool; const decl = mod.declPtr(decl_index); if (decl.analysis == .in_progress) { @@ -31710,7 +32014,7 @@ pub fn ensureDeclAnalyzed(sema: *Sema, decl_index: InternPool.DeclIndex) Compile return sema.failWithOwnedErrorMsg(null, msg); } - mod.ensureDeclAnalyzed(decl_index) catch |err| { + pt.ensureDeclAnalyzed(decl_index) catch |err| { if (sema.owner_func_index != .none) { ip.funcAnalysis(sema.owner_func_index).state = .dependency_failure; } else { @@ -31721,9 +32025,10 @@ pub fn ensureDeclAnalyzed(sema: *Sema, decl_index: InternPool.DeclIndex) Compile } fn ensureFuncBodyAnalyzed(sema: *Sema, func: InternPool.Index) CompileError!void { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const ip = &mod.intern_pool; - mod.ensureFuncBodyAnalyzed(func) catch |err| { + pt.ensureFuncBodyAnalyzed(func) catch |err| { if (sema.owner_func_index != .none) { ip.funcAnalysis(sema.owner_func_index).state = .dependency_failure; } else { @@ -31734,15 +32039,15 @@ fn ensureFuncBodyAnalyzed(sema: *Sema, func: InternPool.Index) CompileError!void } fn optRefValue(sema: *Sema, opt_val: ?Value) !Value { - const mod = sema.mod; - const ptr_anyopaque_ty = try mod.singleConstPtrType(Type.anyopaque); - return Value.fromInterned((try mod.intern(.{ .opt = .{ - .ty = (try mod.optionalType(ptr_anyopaque_ty.toIntern())).toIntern(), - .val = if (opt_val) |val| (try mod.getCoerced( + const pt = sema.pt; + const ptr_anyopaque_ty = try pt.singleConstPtrType(Type.anyopaque); + return Value.fromInterned(try pt.intern(.{ .opt = .{ + .ty = (try pt.optionalType(ptr_anyopaque_ty.toIntern())).toIntern(), + .val = if (opt_val) |val| (try pt.getCoerced( Value.fromInterned(try sema.refValue(val.toIntern())), ptr_anyopaque_ty, )).toIntern() else .none, - } }))); + } })); } fn analyzeDeclRef(sema: *Sema, src: LazySrcLoc, decl_index: InternPool.DeclIndex) CompileError!Air.Inst.Ref { @@ -31754,7 +32059,8 @@ fn analyzeDeclRef(sema: *Sema, src: LazySrcLoc, decl_index: InternPool.DeclIndex /// decl_ref to end up in runtime code, the function body must be analyzed: `analyzeDeclRef` wraps /// this function with `analyze_fn_body` set to true. fn analyzeDeclRefInner(sema: *Sema, src: LazySrcLoc, decl_index: InternPool.DeclIndex, analyze_fn_body: bool) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; try sema.addReferenceEntry(src, AnalUnit.wrap(.{ .decl = decl_index })); try sema.ensureDeclAnalyzed(decl_index); @@ -31767,7 +32073,7 @@ fn analyzeDeclRefInner(sema: *Sema, src: LazySrcLoc, decl_index: InternPool.Decl }); // TODO: if this is a `decl_ref` of a non-variable decl, only depend on decl type try sema.declareDependency(.{ .decl_val = decl_index }); - const ptr_ty = try mod.ptrTypeSema(.{ + const ptr_ty = try pt.ptrTypeSema(.{ .child = decl_val.typeOf(mod).toIntern(), .flags = .{ .alignment = owner_decl.alignment, @@ -31778,7 +32084,7 @@ fn analyzeDeclRefInner(sema: *Sema, src: LazySrcLoc, decl_index: InternPool.Decl if (analyze_fn_body) { try sema.maybeQueueFuncBodyAnalysis(src, decl_index); } - return Air.internedToRef((try mod.intern(.{ .ptr = .{ + return Air.internedToRef((try pt.intern(.{ .ptr = .{ .ty = ptr_ty.toIntern(), .base_addr = .{ .decl = decl_index }, .byte_offset = 0, @@ -31786,7 +32092,7 @@ fn analyzeDeclRefInner(sema: *Sema, src: LazySrcLoc, decl_index: InternPool.Decl } fn maybeQueueFuncBodyAnalysis(sema: *Sema, src: LazySrcLoc, decl_index: InternPool.DeclIndex) !void { - const mod = sema.mod; + const mod = sema.pt.zcu; const decl = mod.declPtr(decl_index); const decl_val = try decl.valueOrFail(); if (!mod.intern_pool.isFuncBody(decl_val.toIntern())) return; @@ -31801,7 +32107,8 @@ fn analyzeRef( src: LazySrcLoc, operand: Air.Inst.Ref, ) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const operand_ty = sema.typeOf(operand); if (try sema.resolveValue(operand)) |val| { @@ -31814,14 +32121,14 @@ fn analyzeRef( try sema.requireRuntimeBlock(block, src, null); const address_space = target_util.defaultAddressSpace(mod.getTarget(), .local); - const ptr_type = try mod.ptrTypeSema(.{ + const ptr_type = try pt.ptrTypeSema(.{ .child = operand_ty.toIntern(), .flags = .{ .is_const = true, .address_space = address_space, }, }); - const mut_ptr_type = try mod.ptrTypeSema(.{ + const mut_ptr_type = try pt.ptrTypeSema(.{ .child = operand_ty.toIntern(), .flags = .{ .address_space = address_space }, }); @@ -31839,14 +32146,15 @@ fn analyzeLoad( ptr: Air.Inst.Ref, ptr_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const ptr_ty = sema.typeOf(ptr); const elem_ty = switch (ptr_ty.zigTypeTag(mod)) { .Pointer => ptr_ty.childType(mod), - else => return sema.fail(block, ptr_src, "expected pointer, found '{}'", .{ptr_ty.fmt(sema.mod)}), + else => return sema.fail(block, ptr_src, "expected pointer, found '{}'", .{ptr_ty.fmt(pt)}), }; if (elem_ty.zigTypeTag(mod) == .Opaque) { - return sema.fail(block, ptr_src, "cannot load opaque type '{}'", .{elem_ty.fmt(mod)}); + return sema.fail(block, ptr_src, "cannot load opaque type '{}'", .{elem_ty.fmt(pt)}); } if (try sema.typeHasOnePossibleValue(elem_ty)) |opv| { @@ -31868,7 +32176,7 @@ fn analyzeLoad( return block.addBinOp(.ptr_elem_val, bin_op.lhs, bin_op.rhs); } return sema.fail(block, ptr_src, "unable to determine vector element index of type '{}'", .{ - ptr_ty.fmt(sema.mod), + ptr_ty.fmt(pt), }); } @@ -31882,10 +32190,11 @@ fn analyzeSlicePtr( slice: Air.Inst.Ref, slice_ty: Type, ) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const result_ty = slice_ty.slicePtrFieldType(mod); if (try sema.resolveValue(slice)) |val| { - if (val.isUndef(mod)) return mod.undefRef(result_ty); + if (val.isUndef(mod)) return pt.undefRef(result_ty); return Air.internedToRef(val.slicePtr(mod).toIntern()); } try sema.requireRuntimeBlock(block, slice_src, null); @@ -31899,11 +32208,12 @@ fn analyzeOptionalSlicePtr( opt_slice: Air.Inst.Ref, opt_slice_ty: Type, ) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const result_ty = opt_slice_ty.optionalChild(mod).slicePtrFieldType(mod); if (try sema.resolveValue(opt_slice)) |opt_val| { - if (opt_val.isUndef(mod)) return mod.undefRef(result_ty); + if (opt_val.isUndef(mod)) return pt.undefRef(result_ty); const slice_ptr: InternPool.Index = if (opt_val.optionalValue(mod)) |val| val.slicePtr(mod).toIntern() else @@ -31924,12 +32234,13 @@ fn analyzeSliceLen( src: LazySrcLoc, slice_inst: Air.Inst.Ref, ) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; if (try sema.resolveValue(slice_inst)) |slice_val| { if (slice_val.isUndef(mod)) { - return mod.undefRef(Type.usize); + return pt.undefRef(Type.usize); } - return mod.intRef(Type.usize, try slice_val.sliceLen(mod)); + return pt.intRef(Type.usize, try slice_val.sliceLen(pt)); } try sema.requireRuntimeBlock(block, src, null); return block.addTyOp(.slice_len, Type.usize, slice_inst); @@ -31942,11 +32253,12 @@ fn analyzeIsNull( operand: Air.Inst.Ref, invert_logic: bool, ) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const result_ty = Type.bool; if (try sema.resolveValue(operand)) |opt_val| { if (opt_val.isUndef(mod)) { - return mod.undefRef(result_ty); + return pt.undefRef(result_ty); } const is_null = opt_val.isNull(mod); const bool_value = if (invert_logic) !is_null else is_null; @@ -31972,7 +32284,8 @@ fn analyzePtrIsNonErrComptimeOnly( src: LazySrcLoc, operand: Air.Inst.Ref, ) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const ptr_ty = sema.typeOf(operand); assert(ptr_ty.zigTypeTag(mod) == .Pointer); const child_ty = ptr_ty.childType(mod); @@ -31994,7 +32307,8 @@ fn analyzeIsNonErrComptimeOnly( src: LazySrcLoc, operand: Air.Inst.Ref, ) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const ip = &mod.intern_pool; const operand_ty = sema.typeOf(operand); const ot = operand_ty.zigTypeTag(mod); @@ -32014,7 +32328,7 @@ fn analyzeIsNonErrComptimeOnly( else => {}, } } else if (operand == .undef) { - return mod.undefRef(Type.bool); + return pt.undefRef(Type.bool); } else if (@intFromEnum(operand) < InternPool.static_len) { // None of the ref tags can be errors. return .bool_true; @@ -32098,7 +32412,7 @@ fn analyzeIsNonErrComptimeOnly( if (maybe_operand_val) |err_union| { if (err_union.isUndef(mod)) { - return mod.undefRef(Type.bool); + return pt.undefRef(Type.bool); } if (err_union.getErrorName(mod) == .none) { return .bool_true; @@ -32153,13 +32467,14 @@ fn analyzeSlice( end_src: LazySrcLoc, by_length: bool, ) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; // Slice expressions can operate on a variable whose type is an array. This requires // the slice operand to be a pointer. In the case of a non-array, it will be a double pointer. const ptr_ptr_ty = sema.typeOf(ptr_ptr); const ptr_ptr_child_ty = switch (ptr_ptr_ty.zigTypeTag(mod)) { .Pointer => ptr_ptr_ty.childType(mod), - else => return sema.fail(block, ptr_src, "expected pointer, found '{}'", .{ptr_ptr_ty.fmt(mod)}), + else => return sema.fail(block, ptr_src, "expected pointer, found '{}'", .{ptr_ptr_ty.fmt(pt)}), }; var array_ty = ptr_ptr_child_ty; @@ -32210,8 +32525,8 @@ fn analyzeSlice( msg, "expected '{}', found '{}'", .{ - Value.zero_comptime_int.fmtValue(mod, sema), - start_value.fmtValue(mod, sema), + Value.zero_comptime_int.fmtValue(pt, sema), + start_value.fmtValue(pt, sema), }, ); break :msg msg; @@ -32226,8 +32541,8 @@ fn analyzeSlice( msg, "expected '{}', found '{}'", .{ - Value.one_comptime_int.fmtValue(mod, sema), - end_value.fmtValue(mod, sema), + Value.one_comptime_int.fmtValue(pt, sema), + end_value.fmtValue(pt, sema), }, ); break :msg msg; @@ -32240,17 +32555,17 @@ fn analyzeSlice( block, end_src, "end index {} out of bounds for slice of single-item pointer", - .{end_value.fmtValue(mod, sema)}, + .{end_value.fmtValue(pt, sema)}, ); } } - array_ty = try mod.arrayType(.{ + array_ty = try pt.arrayType(.{ .len = 1, .child = double_child_ty.toIntern(), }); const ptr_info = ptr_ptr_child_ty.ptrInfo(mod); - slice_ty = try mod.ptrType(.{ + slice_ty = try pt.ptrType(.{ .child = array_ty.toIntern(), .flags = .{ .alignment = ptr_info.flags.alignment, @@ -32286,7 +32601,7 @@ fn analyzeSlice( elem_ty = ptr_ptr_child_ty.childType(mod); }, }, - else => return sema.fail(block, src, "slice of non-array type '{}'", .{ptr_ptr_child_ty.fmt(mod)}), + else => return sema.fail(block, src, "slice of non-array type '{}'", .{ptr_ptr_child_ty.fmt(pt)}), } const ptr = if (slice_ty.isSlice(mod)) @@ -32297,7 +32612,7 @@ fn analyzeSlice( assert(manyptr_ty_key.flags.size == .One); manyptr_ty_key.child = elem_ty.toIntern(); manyptr_ty_key.flags.size = .Many; - break :ptr try sema.coerceCompatiblePtrs(block, try mod.ptrTypeSema(manyptr_ty_key), ptr_or_slice, ptr_src); + break :ptr try sema.coerceCompatiblePtrs(block, try pt.ptrTypeSema(manyptr_ty_key), ptr_or_slice, ptr_src); } else ptr_or_slice; const start = try sema.coerce(block, Type.usize, uncasted_start, start_src); @@ -32311,7 +32626,7 @@ fn analyzeSlice( var end_is_len = uncasted_end_opt == .none; const end = e: { if (array_ty.zigTypeTag(mod) == .Array) { - const len_val = try mod.intValue(Type.usize, array_ty.arrayLen(mod)); + const len_val = try pt.intValue(Type.usize, array_ty.arrayLen(mod)); if (!end_is_len) { const end = if (by_length) end: { @@ -32320,7 +32635,7 @@ fn analyzeSlice( break :end try sema.coerce(block, Type.usize, uncasted_end, end_src); } else try sema.coerce(block, Type.usize, uncasted_end_opt, end_src); if (try sema.resolveDefinedValue(block, end_src, end)) |end_val| { - const len_s_val = try mod.intValue( + const len_s_val = try pt.intValue( Type.usize, array_ty.arrayLenIncludingSentinel(mod), ); @@ -32335,8 +32650,8 @@ fn analyzeSlice( end_src, "end index {} out of bounds for array of length {}{s}", .{ - end_val.fmtValue(mod, sema), - len_val.fmtValue(mod, sema), + end_val.fmtValue(pt, sema), + len_val.fmtValue(pt, sema), sentinel_label, }, ); @@ -32366,9 +32681,9 @@ fn analyzeSlice( return sema.fail(block, src, "slice of undefined", .{}); } const has_sentinel = slice_ty.sentinel(mod) != null; - const slice_len = try slice_val.sliceLen(mod); + const slice_len = try slice_val.sliceLen(pt); const len_plus_sent = slice_len + @intFromBool(has_sentinel); - const slice_len_val_with_sentinel = try mod.intValue(Type.usize, len_plus_sent); + const slice_len_val_with_sentinel = try pt.intValue(Type.usize, len_plus_sent); if (!(try sema.compareAll(end_val, .lte, slice_len_val_with_sentinel, Type.usize))) { const sentinel_label: []const u8 = if (has_sentinel) " +1 (sentinel)" @@ -32380,8 +32695,8 @@ fn analyzeSlice( end_src, "end index {} out of bounds for slice of length {d}{s}", .{ - end_val.fmtValue(mod, sema), - try slice_val.sliceLen(mod), + end_val.fmtValue(pt, sema), + try slice_val.sliceLen(pt), sentinel_label, }, ); @@ -32390,7 +32705,7 @@ fn analyzeSlice( // If the slice has a sentinel, we consider end_is_len // is only true if it equals the length WITHOUT the // sentinel, so we don't add a sentinel type. - const slice_len_val = try mod.intValue(Type.usize, slice_len); + const slice_len_val = try pt.intValue(Type.usize, slice_len); if (end_val.eql(slice_len_val, Type.usize, mod)) { end_is_len = true; } @@ -32440,21 +32755,21 @@ fn analyzeSlice( start_src, "start index {} is larger than end index {}", .{ - start_val.fmtValue(mod, sema), - end_val.fmtValue(mod, sema), + start_val.fmtValue(pt, sema), + end_val.fmtValue(pt, sema), }, ); } checked_start_lte_end = true; if (try sema.resolveValue(new_ptr)) |ptr_val| sentinel_check: { const expected_sentinel = sentinel orelse break :sentinel_check; - const start_int = start_val.getUnsignedInt(mod).?; - const end_int = end_val.getUnsignedInt(mod).?; + const start_int = start_val.getUnsignedInt(pt).?; + const end_int = end_val.getUnsignedInt(pt).?; const sentinel_index = try sema.usizeCast(block, end_src, end_int - start_int); - const many_ptr_ty = try mod.manyConstPtrType(elem_ty); - const many_ptr_val = try mod.getCoerced(ptr_val, many_ptr_ty); - const elem_ptr = try many_ptr_val.ptrElem(sentinel_index, mod); + const many_ptr_ty = try pt.manyConstPtrType(elem_ty); + const many_ptr_val = try pt.getCoerced(ptr_val, many_ptr_ty); + const elem_ptr = try many_ptr_val.ptrElem(sentinel_index, pt); const res = try sema.pointerDerefExtra(block, src, elem_ptr); const actual_sentinel = switch (res) { .runtime_load => break :sentinel_check, @@ -32463,13 +32778,13 @@ fn analyzeSlice( block, src, "comptime dereference requires '{}' to have a well-defined layout", - .{ty.fmt(mod)}, + .{ty.fmt(pt)}, ), .out_of_bounds => |ty| return sema.fail( block, end_src, "slice end index {d} exceeds bounds of containing decl of type '{}'", - .{ end_int, ty.fmt(mod) }, + .{ end_int, ty.fmt(pt) }, ), }; @@ -32478,8 +32793,8 @@ fn analyzeSlice( const msg = try sema.errMsg(src, "value in memory does not match slice sentinel", .{}); errdefer msg.destroy(sema.gpa); try sema.errNote(src, msg, "expected '{}', found '{}'", .{ - expected_sentinel.fmtValue(mod, sema), - actual_sentinel.fmtValue(mod, sema), + expected_sentinel.fmtValue(pt, sema), + actual_sentinel.fmtValue(pt, sema), }); break :msg msg; @@ -32501,7 +32816,7 @@ fn analyzeSlice( assert(!block.is_comptime); try sema.requireRuntimeBlock(block, src, runtime_src.?); const ok = try block.addBinOp(.cmp_lte, start, end); - if (!sema.mod.comp.formatted_panics) { + if (!pt.zcu.comp.formatted_panics) { try sema.addSafetyCheck(block, src, ok, .start_index_greater_than_end); } else { try sema.safetyCheckFormatted(block, src, ok, "panicStartGreaterThanEnd", &.{ start, end }); @@ -32517,10 +32832,10 @@ fn analyzeSlice( const new_allowzero = new_ptr_ty_info.flags.is_allowzero and sema.typeOf(ptr).ptrSize(mod) != .C; if (opt_new_len_val) |new_len_val| { - const new_len_int = try new_len_val.toUnsignedIntSema(mod); + const new_len_int = try new_len_val.toUnsignedIntSema(pt); - const return_ty = try mod.ptrTypeSema(.{ - .child = (try mod.arrayType(.{ + const return_ty = try pt.ptrTypeSema(.{ + .child = (try pt.arrayType(.{ .len = new_len_int, .sentinel = if (sentinel) |s| s.toIntern() else .none, .child = elem_ty.toIntern(), @@ -32546,7 +32861,7 @@ fn analyzeSlice( bounds_check: { const actual_len = if (array_ty.zigTypeTag(mod) == .Array) - try mod.intRef(Type.usize, array_ty.arrayLenIncludingSentinel(mod)) + try pt.intRef(Type.usize, array_ty.arrayLenIncludingSentinel(mod)) else if (slice_ty.isSlice(mod)) l: { const slice_len_inst = try block.addTyOp(.slice_len, Type.usize, ptr_or_slice); break :l if (slice_ty.sentinel(mod) == null) @@ -32570,18 +32885,18 @@ fn analyzeSlice( }; if (!new_ptr_val.isUndef(mod)) { - return Air.internedToRef((try mod.getCoerced(new_ptr_val, return_ty)).toIntern()); + return Air.internedToRef((try pt.getCoerced(new_ptr_val, return_ty)).toIntern()); } // Special case: @as([]i32, undefined)[x..x] if (new_len_int == 0) { - return mod.undefRef(return_ty); + return pt.undefRef(return_ty); } return sema.fail(block, src, "non-zero length slice of undefined pointer", .{}); } - const return_ty = try mod.ptrTypeSema(.{ + const return_ty = try pt.ptrTypeSema(.{ .child = elem_ty.toIntern(), .sentinel = if (sentinel) |s| s.toIntern() else .none, .flags = .{ @@ -32604,12 +32919,12 @@ fn analyzeSlice( // requirement: end <= len const opt_len_inst = if (array_ty.zigTypeTag(mod) == .Array) - try mod.intRef(Type.usize, array_ty.arrayLenIncludingSentinel(mod)) + try pt.intRef(Type.usize, array_ty.arrayLenIncludingSentinel(mod)) else if (slice_ty.isSlice(mod)) blk: { if (try sema.resolveDefinedValue(block, src, ptr_or_slice)) |slice_val| { // we don't need to add one for sentinels because the // underlying value data includes the sentinel - break :blk try mod.intRef(Type.usize, try slice_val.sliceLen(mod)); + break :blk try pt.intRef(Type.usize, try slice_val.sliceLen(pt)); } const slice_len_inst = try block.addTyOp(.slice_len, Type.usize, ptr_or_slice); @@ -32657,7 +32972,8 @@ fn cmpNumeric( lhs_src: LazySrcLoc, rhs_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const lhs_ty = sema.typeOf(uncasted_lhs); const rhs_ty = sema.typeOf(uncasted_rhs); @@ -32696,12 +33012,12 @@ fn cmpNumeric( } if (lhs_val.isUndef(mod) or rhs_val.isUndef(mod)) { - return mod.undefRef(Type.bool); + return pt.undefRef(Type.bool); } if (lhs_val.isNan(mod) or rhs_val.isNan(mod)) { return if (op == std.math.CompareOperator.neq) .bool_true else .bool_false; } - return if (try Value.compareHeteroAdvanced(lhs_val, op, rhs_val, mod, .sema)) + return if (try Value.compareHeteroAdvanced(lhs_val, op, rhs_val, pt, .sema)) .bool_true else .bool_false; @@ -32770,11 +33086,11 @@ fn cmpNumeric( // a signed integer with mantissa bits + 1, and if there was any non-integral part of the float, // add/subtract 1. const lhs_is_signed = if (try sema.resolveDefinedValue(block, lhs_src, lhs)) |lhs_val| - !(try lhs_val.compareAllWithZeroSema(.gte, mod)) + !(try lhs_val.compareAllWithZeroSema(.gte, pt)) else (lhs_ty.isRuntimeFloat() or lhs_ty.isSignedInt(mod)); const rhs_is_signed = if (try sema.resolveDefinedValue(block, rhs_src, rhs)) |rhs_val| - !(try rhs_val.compareAllWithZeroSema(.gte, mod)) + !(try rhs_val.compareAllWithZeroSema(.gte, pt)) else (rhs_ty.isRuntimeFloat() or rhs_ty.isSignedInt(mod)); const dest_int_is_signed = lhs_is_signed or rhs_is_signed; @@ -32784,7 +33100,7 @@ fn cmpNumeric( var lhs_bits: usize = undefined; if (try sema.resolveValueResolveLazy(lhs)) |lhs_val| { if (lhs_val.isUndef(mod)) - return mod.undefRef(Type.bool); + return pt.undefRef(Type.bool); if (lhs_val.isNan(mod)) switch (op) { .neq => return .bool_true, else => return .bool_false, @@ -32796,7 +33112,7 @@ fn cmpNumeric( .lt, .lte => return if (lhs_val.isNegativeInf(mod)) .bool_true else .bool_false, }; if (!rhs_is_signed) { - switch (lhs_val.orderAgainstZero(mod)) { + switch (lhs_val.orderAgainstZero(pt)) { .gt => {}, .eq => switch (op) { // LHS = 0, RHS is unsigned .lte => return .bool_true, @@ -32818,7 +33134,7 @@ fn cmpNumeric( } } - var bigint = try float128IntPartToBigInt(sema.gpa, lhs_val.toFloat(f128, mod)); + var bigint = try float128IntPartToBigInt(sema.gpa, lhs_val.toFloat(f128, pt)); defer bigint.deinit(); if (lhs_val.floatHasFraction(mod)) { if (lhs_is_signed) { @@ -32829,7 +33145,7 @@ fn cmpNumeric( } lhs_bits = bigint.toConst().bitCountTwosComp(); } else { - lhs_bits = lhs_val.intBitCountTwosComp(mod); + lhs_bits = lhs_val.intBitCountTwosComp(pt); } lhs_bits += @intFromBool(!lhs_is_signed and dest_int_is_signed); } else if (lhs_is_float) { @@ -32842,7 +33158,7 @@ fn cmpNumeric( var rhs_bits: usize = undefined; if (try sema.resolveValueResolveLazy(rhs)) |rhs_val| { if (rhs_val.isUndef(mod)) - return mod.undefRef(Type.bool); + return pt.undefRef(Type.bool); if (rhs_val.isNan(mod)) switch (op) { .neq => return .bool_true, else => return .bool_false, @@ -32854,7 +33170,7 @@ fn cmpNumeric( .lt, .lte => return if (rhs_val.isNegativeInf(mod)) .bool_false else .bool_true, }; if (!lhs_is_signed) { - switch (rhs_val.orderAgainstZero(mod)) { + switch (rhs_val.orderAgainstZero(pt)) { .gt => {}, .eq => switch (op) { // RHS = 0, LHS is unsigned .gte => return .bool_true, @@ -32876,7 +33192,7 @@ fn cmpNumeric( } } - var bigint = try float128IntPartToBigInt(sema.gpa, rhs_val.toFloat(f128, mod)); + var bigint = try float128IntPartToBigInt(sema.gpa, rhs_val.toFloat(f128, pt)); defer bigint.deinit(); if (rhs_val.floatHasFraction(mod)) { if (rhs_is_signed) { @@ -32887,7 +33203,7 @@ fn cmpNumeric( } rhs_bits = bigint.toConst().bitCountTwosComp(); } else { - rhs_bits = rhs_val.intBitCountTwosComp(mod); + rhs_bits = rhs_val.intBitCountTwosComp(pt); } rhs_bits += @intFromBool(!rhs_is_signed and dest_int_is_signed); } else if (rhs_is_float) { @@ -32901,7 +33217,7 @@ fn cmpNumeric( const max_bits = @max(lhs_bits, rhs_bits); const casted_bits = std.math.cast(u16, max_bits) orelse return sema.fail(block, src, "{d} exceeds maximum integer bit count", .{max_bits}); const signedness: std.builtin.Signedness = if (dest_int_is_signed) .signed else .unsigned; - break :blk try mod.intType(signedness, casted_bits); + break :blk try pt.intType(signedness, casted_bits); }; const casted_lhs = try sema.coerce(block, dest_ty, lhs, lhs_src); const casted_rhs = try sema.coerce(block, dest_ty, rhs, rhs_src); @@ -32920,9 +33236,10 @@ fn compareIntsOnlyPossibleResult( op: std.math.CompareOperator, rhs_ty: Type, ) Allocator.Error!?bool { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const rhs_info = rhs_ty.intInfo(mod); - const vs_zero = lhs_val.orderAgainstZeroAdvanced(mod, .sema) catch unreachable; + const vs_zero = lhs_val.orderAgainstZeroAdvanced(pt, .sema) catch unreachable; const is_zero = vs_zero == .eq; const is_negative = vs_zero == .lt; const is_positive = vs_zero == .gt; @@ -32954,7 +33271,7 @@ fn compareIntsOnlyPossibleResult( }; const sign_adj = @intFromBool(!is_negative and rhs_info.signedness == .signed); - const req_bits = lhs_val.intBitCountTwosComp(mod) + sign_adj; + const req_bits = lhs_val.intBitCountTwosComp(pt) + sign_adj; // No sized type can have more than 65535 bits. // The RHS type operand is either a runtime value or sized (but undefined) constant. @@ -32981,11 +33298,11 @@ fn compareIntsOnlyPossibleResult( if (req_bits != rhs_info.bits) break :edge .{ false, false }; - const ty = try mod.intType( + const ty = try pt.intType( if (is_negative) .signed else .unsigned, @intCast(req_bits), ); - const pop_count = lhs_val.popCount(ty, mod); + const pop_count = lhs_val.popCount(ty, pt); if (is_negative) { break :edge .{ pop_count == 1, false }; @@ -33015,7 +33332,8 @@ fn cmpVector( lhs_src: LazySrcLoc, rhs_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const lhs_ty = sema.typeOf(lhs); const rhs_ty = sema.typeOf(rhs); assert(lhs_ty.zigTypeTag(mod) == .Vector); @@ -33026,7 +33344,7 @@ fn cmpVector( const casted_lhs = try sema.coerce(block, resolved_ty, lhs, lhs_src); const casted_rhs = try sema.coerce(block, resolved_ty, rhs, rhs_src); - const result_ty = try mod.vectorType(.{ + const result_ty = try pt.vectorType(.{ .len = lhs_ty.vectorLen(mod), .child = .bool_type, }); @@ -33035,7 +33353,7 @@ fn cmpVector( if (try sema.resolveValue(casted_lhs)) |lhs_val| { if (try sema.resolveValue(casted_rhs)) |rhs_val| { if (lhs_val.isUndef(mod) or rhs_val.isUndef(mod)) { - return mod.undefRef(result_ty); + return pt.undefRef(result_ty); } const cmp_val = try sema.compareVector(lhs_val, op, rhs_val, resolved_ty); return Air.internedToRef(cmp_val.toIntern()); @@ -33059,7 +33377,7 @@ fn wrapOptional( inst_src: LazySrcLoc, ) !Air.Inst.Ref { if (try sema.resolveValue(inst)) |val| { - return Air.internedToRef((try sema.mod.intern(.{ .opt = .{ + return Air.internedToRef((try sema.pt.intern(.{ .opt = .{ .ty = dest_ty.toIntern(), .val = val.toIntern(), } }))); @@ -33076,11 +33394,12 @@ fn wrapErrorUnionPayload( inst: Air.Inst.Ref, inst_src: LazySrcLoc, ) !Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const dest_payload_ty = dest_ty.errorUnionPayload(mod); const coerced = try sema.coerceExtra(block, dest_payload_ty, inst, inst_src, .{ .report_err = false }); if (try sema.resolveValue(coerced)) |val| { - return Air.internedToRef((try mod.intern(.{ .error_union = .{ + return Air.internedToRef((try pt.intern(.{ .error_union = .{ .ty = dest_ty.toIntern(), .val = .{ .payload = val.toIntern() }, } }))); @@ -33096,7 +33415,8 @@ fn wrapErrorUnionSet( inst: Air.Inst.Ref, inst_src: LazySrcLoc, ) !Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const ip = &mod.intern_pool; const inst_ty = sema.typeOf(inst); const dest_err_set_ty = dest_ty.errorUnionSet(mod); @@ -33140,7 +33460,7 @@ fn wrapErrorUnionSet( else => unreachable, }, } - return Air.internedToRef((try mod.intern(.{ .error_union = .{ + return Air.internedToRef((try pt.intern(.{ .error_union = .{ .ty = dest_ty.toIntern(), .val = .{ .err_name = expected_name }, } }))); @@ -33158,14 +33478,15 @@ fn unionToTag( un: Air.Inst.Ref, un_src: LazySrcLoc, ) !Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; if ((try sema.typeHasOnePossibleValue(enum_ty))) |opv| { return Air.internedToRef(opv.toIntern()); } if (try sema.resolveValue(un)) |un_val| { const tag_val = un_val.unionTag(mod).?; if (tag_val.isUndef(mod)) - return try mod.undefRef(enum_ty); + return try pt.undefRef(enum_ty); return Air.internedToRef(tag_val.toIntern()); } try sema.requireRuntimeBlock(block, un_src, null); @@ -33399,7 +33720,7 @@ const PeerResolveResult = union(enum) { instructions: []const Air.Inst.Ref, candidate_srcs: PeerTypeCandidateSrc, ) !*Module.ErrorMsg { - const mod = sema.mod; + const pt = sema.pt; var opt_msg: ?*Module.ErrorMsg = null; errdefer if (opt_msg) |msg| msg.destroy(sema.gpa); @@ -33425,7 +33746,7 @@ const PeerResolveResult = union(enum) { }, .field_error => |field_error| { const fmt = "struct field '{}' has conflicting types"; - const args = .{field_error.field_name.fmt(&mod.intern_pool)}; + const args = .{field_error.field_name.fmt(&pt.zcu.intern_pool)}; if (opt_msg) |msg| { try sema.errNote(src, msg, fmt, args); } else { @@ -33457,8 +33778,8 @@ const PeerResolveResult = union(enum) { const fmt = "incompatible types: '{}' and '{}'"; const args = .{ - conflict_tys[0].fmt(mod), - conflict_tys[1].fmt(mod), + conflict_tys[0].fmt(pt), + conflict_tys[1].fmt(pt), }; const msg = if (opt_msg) |msg| msg: { try sema.errNote(src, msg, fmt, args); @@ -33469,8 +33790,8 @@ const PeerResolveResult = union(enum) { break :msg msg; }; - if (conflict_srcs[0]) |src_loc| try sema.errNote(src_loc, msg, "type '{}' here", .{conflict_tys[0].fmt(mod)}); - if (conflict_srcs[1]) |src_loc| try sema.errNote(src_loc, msg, "type '{}' here", .{conflict_tys[1].fmt(mod)}); + if (conflict_srcs[0]) |src_loc| try sema.errNote(src_loc, msg, "type '{}' here", .{conflict_tys[0].fmt(pt)}); + if (conflict_srcs[1]) |src_loc| try sema.errNote(src_loc, msg, "type '{}' here", .{conflict_tys[1].fmt(pt)}); // No child error break; @@ -33517,7 +33838,8 @@ fn resolvePeerTypesInner( peer_tys: []?Type, peer_vals: []?Value, ) !PeerResolveResult { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const ip = &mod.intern_pool; var strat_reason: usize = 0; @@ -33581,7 +33903,7 @@ fn resolvePeerTypesInner( .payload => |payload_ip| val_ptr.* = Value.fromInterned(payload_ip), .err_name => val_ptr.* = null, }, - .undef => val_ptr.* = Value.fromInterned((try sema.mod.intern(.{ .undef = ty_ptr.*.?.toIntern() }))), + .undef => val_ptr.* = Value.fromInterned(try pt.intern(.{ .undef = ty_ptr.*.?.toIntern() })), else => unreachable, }; break :blk set_ty; @@ -33604,7 +33926,7 @@ fn resolvePeerTypesInner( .success => |ty| ty, else => |result| return result, }; - return .{ .success = try mod.errorUnionType(final_set.?, final_payload) }; + return .{ .success = try pt.errorUnionType(final_set.?, final_payload) }; }, .nullable => { @@ -33642,7 +33964,7 @@ fn resolvePeerTypesInner( .success => |ty| ty, else => |result| return result, }; - return .{ .success = try mod.optionalType(child_ty.toIntern()) }; + return .{ .success = try pt.optionalType(child_ty.toIntern()) }; }, .array => { @@ -33730,7 +34052,7 @@ fn resolvePeerTypesInner( // There should always be at least one array or vector peer assert(opt_first_arr_idx != null); - return .{ .success = try mod.arrayType(.{ + return .{ .success = try pt.arrayType(.{ .len = len, .child = elem_ty.toIntern(), .sentinel = if (sentinel) |sent_val| sent_val.toIntern() else .none, @@ -33792,7 +34114,7 @@ fn resolvePeerTypesInner( else => |result| return result, }; - return .{ .success = try mod.vectorType(.{ + return .{ .success = try pt.vectorType(.{ .len = @intCast(len.?), .child = child_ty.toIntern(), }) }; @@ -33844,8 +34166,8 @@ fn resolvePeerTypesInner( }).toIntern(); if (ptr_info.sentinel != .none and peer_info.sentinel != .none) { - const peer_sent = try ip.getCoerced(sema.gpa, ptr_info.sentinel, ptr_info.child); - const ptr_sent = try ip.getCoerced(sema.gpa, peer_info.sentinel, ptr_info.child); + const peer_sent = try ip.getCoerced(sema.gpa, pt.tid, ptr_info.sentinel, ptr_info.child); + const ptr_sent = try ip.getCoerced(sema.gpa, pt.tid, peer_info.sentinel, ptr_info.child); if (ptr_sent == peer_sent) { ptr_info.sentinel = ptr_sent; } else { @@ -33860,12 +34182,12 @@ fn resolvePeerTypesInner( if (ptr_info.flags.alignment != .none) ptr_info.flags.alignment else - Type.fromInterned(ptr_info.child).abiAlignment(mod), + Type.fromInterned(ptr_info.child).abiAlignment(pt), if (peer_info.flags.alignment != .none) peer_info.flags.alignment else - Type.fromInterned(peer_info.child).abiAlignment(mod), + Type.fromInterned(peer_info.child).abiAlignment(pt), ); if (ptr_info.flags.address_space != peer_info.flags.address_space) { return .{ .conflict = .{ @@ -33888,7 +34210,7 @@ fn resolvePeerTypesInner( opt_ptr_info = ptr_info; } - return .{ .success = try mod.ptrTypeSema(opt_ptr_info.?) }; + return .{ .success = try pt.ptrTypeSema(opt_ptr_info.?) }; }, .ptr => { @@ -34004,7 +34326,7 @@ fn resolvePeerTypesInner( if (try sema.resolvePairInMemoryCoercible(block, src, cur_arr.elem_ty, peer_arr.elem_ty)) |elem_ty| { // *[n:x]T + *[n:y]T = *[n]T if (cur_arr.len == peer_arr.len) { - ptr_info.child = (try mod.arrayType(.{ + ptr_info.child = (try pt.arrayType(.{ .len = cur_arr.len, .child = elem_ty.toIntern(), })).toIntern(); @@ -34148,12 +34470,12 @@ fn resolvePeerTypesInner( no_sentinel: { if (peer_sentinel == .none) break :no_sentinel; if (cur_sentinel == .none) break :no_sentinel; - const peer_sent_coerced = try ip.getCoerced(sema.gpa, peer_sentinel, sentinel_ty); - const cur_sent_coerced = try ip.getCoerced(sema.gpa, cur_sentinel, sentinel_ty); + const peer_sent_coerced = try ip.getCoerced(sema.gpa, pt.tid, peer_sentinel, sentinel_ty); + const cur_sent_coerced = try ip.getCoerced(sema.gpa, pt.tid, cur_sentinel, sentinel_ty); if (peer_sent_coerced != cur_sent_coerced) break :no_sentinel; // Sentinels match if (ptr_info.flags.size == .One) switch (ip.indexToKey(ptr_info.child)) { - .array_type => |array_type| ptr_info.child = (try mod.arrayType(.{ + .array_type => |array_type| ptr_info.child = (try pt.arrayType(.{ .len = array_type.len, .child = array_type.child, .sentinel = cur_sent_coerced, @@ -34167,7 +34489,7 @@ fn resolvePeerTypesInner( // Clear existing sentinel ptr_info.sentinel = .none; switch (ip.indexToKey(ptr_info.child)) { - .array_type => |array_type| ptr_info.child = (try mod.arrayType(.{ + .array_type => |array_type| ptr_info.child = (try pt.arrayType(.{ .len = array_type.len, .child = array_type.child, .sentinel = .none, @@ -34198,7 +34520,7 @@ fn resolvePeerTypesInner( }, } - return .{ .success = try mod.ptrTypeSema(opt_ptr_info.?) }; + return .{ .success = try pt.ptrTypeSema(opt_ptr_info.?) }; }, .func => { @@ -34517,7 +34839,7 @@ fn resolvePeerTypesInner( continue; }; peer_field_ty.* = ty.structFieldType(field_index, mod); - peer_field_val.* = if (opt_val) |val| try val.fieldValue(mod, field_index) else null; + peer_field_val.* = if (opt_val) |val| try val.fieldValue(pt, field_index) else null; } // Resolve field type recursively @@ -34555,9 +34877,9 @@ fn resolvePeerTypesInner( var comptime_val: ?Value = null; for (peer_tys) |opt_ty| { const struct_ty = opt_ty orelse continue; - try struct_ty.resolveStructFieldInits(mod); + try struct_ty.resolveStructFieldInits(pt); - const uncoerced_field_val = try struct_ty.structFieldValueComptime(mod, field_index) orelse { + const uncoerced_field_val = try struct_ty.structFieldValueComptime(pt, field_index) orelse { comptime_val = null; break; }; @@ -34584,7 +34906,7 @@ fn resolvePeerTypesInner( field_val.* = if (comptime_val) |v| v.toIntern() else .none; } - const final_ty = try ip.getAnonStructType(mod.gpa, .{ + const final_ty = try ip.getAnonStructType(mod.gpa, pt.tid, .{ .types = field_types, .names = if (is_tuple) &.{} else field_names, .values = field_vals, @@ -34628,13 +34950,15 @@ fn maybeMergeErrorSets(sema: *Sema, block: *Block, src: LazySrcLoc, e0: Type, e1 } fn resolvePairInMemoryCoercible(sema: *Sema, block: *Block, src: LazySrcLoc, ty_a: Type, ty_b: Type) !?Type { + const target = sema.pt.zcu.getTarget(); + // ty_b -> ty_a - if (.ok == try sema.coerceInMemoryAllowed(block, ty_a, ty_b, true, sema.mod.getTarget(), src, src)) { + if (.ok == try sema.coerceInMemoryAllowed(block, ty_a, ty_b, true, target, src, src)) { return ty_a; } // ty_a -> ty_b - if (.ok == try sema.coerceInMemoryAllowed(block, ty_b, ty_a, true, sema.mod.getTarget(), src, src)) { + if (.ok == try sema.coerceInMemoryAllowed(block, ty_b, ty_a, true, target, src, src)) { return ty_b; } @@ -34647,7 +34971,8 @@ const ArrayLike = struct { elem_ty: Type, }; fn typeIsArrayLike(sema: *Sema, ty: Type) ?ArrayLike { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; return switch (ty.zigTypeTag(mod)) { .Array => .{ .len = ty.arrayLen(mod), @@ -34676,7 +35001,8 @@ fn typeIsArrayLike(sema: *Sema, ty: Type) ?ArrayLike { } pub fn resolveIes(sema: *Sema, block: *Block, src: LazySrcLoc) CompileError!void { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const ip = &mod.intern_pool; if (sema.fn_ret_ty_ies) |ies| { @@ -34687,26 +35013,27 @@ pub fn resolveIes(sema: *Sema, block: *Block, src: LazySrcLoc) CompileError!void } pub fn resolveFnTypes(sema: *Sema, fn_ty: Type) CompileError!void { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const ip = &mod.intern_pool; const fn_ty_info = mod.typeToFunc(fn_ty).?; - try Type.fromInterned(fn_ty_info.return_type).resolveFully(mod); + try Type.fromInterned(fn_ty_info.return_type).resolveFully(pt); if (mod.comp.config.any_error_tracing and Type.fromInterned(fn_ty_info.return_type).isError(mod)) { // Ensure the type exists so that backends can assume that. - _ = try mod.getBuiltinType("StackTrace"); + _ = try pt.getBuiltinType("StackTrace"); } for (0..fn_ty_info.param_types.len) |i| { - try Type.fromInterned(fn_ty_info.param_types.get(ip)[i]).resolveFully(mod); + try Type.fromInterned(fn_ty_info.param_types.get(ip)[i]).resolveFully(pt); } } fn resolveLazyValue(sema: *Sema, val: Value) CompileError!Value { - return val.resolveLazy(sema.arena, sema.mod); + return val.resolveLazy(sema.arena, sema.pt); } /// Resolve a struct's alignment only without triggering resolution of its layout. @@ -34716,7 +35043,8 @@ pub fn resolveStructAlignment( ty: InternPool.Index, struct_type: InternPool.LoadedStructType, ) SemaError!void { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const ip = &mod.intern_pool; const target = mod.getTarget(); @@ -34754,7 +35082,7 @@ pub fn resolveStructAlignment( const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]); if (struct_type.fieldIsComptime(ip, i) or try sema.typeRequiresComptime(field_ty)) continue; - const field_align = try mod.structFieldAlignmentAdvanced( + const field_align = try pt.structFieldAlignmentAdvanced( struct_type.fieldAlign(ip, i), field_ty, struct_type.layout, @@ -34767,7 +35095,8 @@ pub fn resolveStructAlignment( } pub fn resolveStructLayout(sema: *Sema, ty: Type) SemaError!void { - const zcu = sema.mod; + const pt = sema.pt; + const zcu = pt.zcu; const ip = &zcu.intern_pool; const struct_type = zcu.typeToStruct(ty) orelse return; @@ -34776,10 +35105,10 @@ pub fn resolveStructLayout(sema: *Sema, ty: Type) SemaError!void { if (struct_type.haveLayout(ip)) return; - try ty.resolveFields(zcu); + try ty.resolveFields(pt); if (struct_type.layout == .@"packed") { - semaBackingIntType(zcu, struct_type) catch |err| switch (err) { + semaBackingIntType(pt, struct_type) catch |err| switch (err) { error.OutOfMemory, error.AnalysisFail => |e| return e, error.ComptimeBreak, error.ComptimeReturn, error.GenericPoison => unreachable, }; @@ -34790,7 +35119,7 @@ pub fn resolveStructLayout(sema: *Sema, ty: Type) SemaError!void { const msg = try sema.errMsg( ty.srcLoc(zcu), "struct '{}' depends on itself", - .{ty.fmt(zcu)}, + .{ty.fmt(pt)}, ); return sema.failWithOwnedErrorMsg(null, msg); } @@ -34818,7 +35147,7 @@ pub fn resolveStructLayout(sema: *Sema, ty: Type) SemaError!void { }, else => return err, }; - field_align.* = try zcu.structFieldAlignmentAdvanced( + field_align.* = try pt.structFieldAlignmentAdvanced( struct_type.fieldAlign(ip, i), field_ty, struct_type.layout, @@ -34911,7 +35240,8 @@ pub fn resolveStructLayout(sema: *Sema, ty: Type) SemaError!void { _ = try sema.typeRequiresComptime(ty); } -fn semaBackingIntType(zcu: *Zcu, struct_type: InternPool.LoadedStructType) CompileError!void { +fn semaBackingIntType(pt: Zcu.PerThread, struct_type: InternPool.LoadedStructType) CompileError!void { + const zcu = pt.zcu; const gpa = zcu.gpa; const ip = &zcu.intern_pool; @@ -34927,7 +35257,7 @@ fn semaBackingIntType(zcu: *Zcu, struct_type: InternPool.LoadedStructType) Compi defer comptime_err_ret_trace.deinit(); var sema: Sema = .{ - .mod = zcu, + .pt = pt, .gpa = gpa, .arena = analysis_arena.allocator(), .code = zir, @@ -34958,7 +35288,7 @@ fn semaBackingIntType(zcu: *Zcu, struct_type: InternPool.LoadedStructType) Compi var accumulator: u64 = 0; for (0..struct_type.field_types.len) |i| { const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]); - accumulator += try field_ty.bitSizeAdvanced(zcu, .sema); + accumulator += try field_ty.bitSizeAdvanced(pt, .sema); } break :blk accumulator; }; @@ -35004,7 +35334,7 @@ fn semaBackingIntType(zcu: *Zcu, struct_type: InternPool.LoadedStructType) Compi if (fields_bit_sum > std.math.maxInt(u16)) { return sema.fail(&block, block.nodeOffset(0), "size of packed struct '{d}' exceeds maximum bit width of 65535", .{fields_bit_sum}); } - const backing_int_ty = try zcu.intType(.unsigned, @intCast(fields_bit_sum)); + const backing_int_ty = try pt.intType(.unsigned, @intCast(fields_bit_sum)); struct_type.backingIntType(ip).* = backing_int_ty.toIntern(); } @@ -35012,26 +35342,27 @@ fn semaBackingIntType(zcu: *Zcu, struct_type: InternPool.LoadedStructType) Compi } fn checkBackingIntType(sema: *Sema, block: *Block, src: LazySrcLoc, backing_int_ty: Type, fields_bit_sum: u64) CompileError!void { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; if (!backing_int_ty.isInt(mod)) { - return sema.fail(block, src, "expected backing integer type, found '{}'", .{backing_int_ty.fmt(sema.mod)}); + return sema.fail(block, src, "expected backing integer type, found '{}'", .{backing_int_ty.fmt(pt)}); } - if (backing_int_ty.bitSize(mod) != fields_bit_sum) { + if (backing_int_ty.bitSize(pt) != fields_bit_sum) { return sema.fail( block, src, "backing integer type '{}' has bit size {} but the struct fields have a total bit size of {}", - .{ backing_int_ty.fmt(sema.mod), backing_int_ty.bitSize(mod), fields_bit_sum }, + .{ backing_int_ty.fmt(pt), backing_int_ty.bitSize(pt), fields_bit_sum }, ); } } fn checkIndexable(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) !void { - const mod = sema.mod; - if (!ty.isIndexable(mod)) { + const pt = sema.pt; + if (!ty.isIndexable(pt.zcu)) { const msg = msg: { - const msg = try sema.errMsg(src, "type '{}' does not support indexing", .{ty.fmt(sema.mod)}); + const msg = try sema.errMsg(src, "type '{}' does not support indexing", .{ty.fmt(pt)}); errdefer msg.destroy(sema.gpa); try sema.errNote(src, msg, "operand must be an array, slice, tuple, or vector", .{}); break :msg msg; @@ -35041,7 +35372,8 @@ fn checkIndexable(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) !void { } fn checkMemOperand(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) !void { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; if (ty.zigTypeTag(mod) == .Pointer) { switch (ty.ptrSize(mod)) { .Slice, .Many, .C => return, @@ -35054,7 +35386,7 @@ fn checkMemOperand(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) !void } } const msg = msg: { - const msg = try sema.errMsg(src, "type '{}' is not an indexable pointer", .{ty.fmt(sema.mod)}); + const msg = try sema.errMsg(src, "type '{}' is not an indexable pointer", .{ty.fmt(pt)}); errdefer msg.destroy(sema.gpa); try sema.errNote(src, msg, "operand must be a slice, a many pointer or a pointer to an array", .{}); break :msg msg; @@ -35069,9 +35401,9 @@ pub fn resolveUnionAlignment( ty: Type, union_type: InternPool.LoadedUnionType, ) SemaError!void { - const mod = sema.mod; - const ip = &mod.intern_pool; - const target = mod.getTarget(); + const zcu = sema.pt.zcu; + const ip = &zcu.intern_pool; + const target = zcu.getTarget(); assert(sema.ownerUnit().unwrap().decl == union_type.decl); @@ -35108,8 +35440,8 @@ pub fn resolveUnionAlignment( /// This logic must be kept in sync with `Module.getUnionLayout`. pub fn resolveUnionLayout(sema: *Sema, ty: Type) SemaError!void { - const zcu = sema.mod; - const ip = &zcu.intern_pool; + const pt = sema.pt; + const ip = &pt.zcu.intern_pool; try sema.resolveTypeFieldsUnion(ty, ip.loadUnionType(ty.ip_index)); @@ -35122,9 +35454,9 @@ pub fn resolveUnionLayout(sema: *Sema, ty: Type) SemaError!void { .none, .have_field_types => {}, .field_types_wip, .layout_wip => { const msg = try sema.errMsg( - ty.srcLoc(zcu), + ty.srcLoc(pt.zcu), "union '{}' depends on itself", - .{ty.fmt(zcu)}, + .{ty.fmt(pt)}, ); return sema.failWithOwnedErrorMsg(null, msg); }, @@ -35143,7 +35475,7 @@ pub fn resolveUnionLayout(sema: *Sema, ty: Type) SemaError!void { for (0..union_type.field_types.len) |field_index| { const field_ty = Type.fromInterned(union_type.field_types.get(ip)[field_index]); - if (try sema.typeRequiresComptime(field_ty) or field_ty.zigTypeTag(zcu) == .NoReturn) continue; // TODO: should this affect alignment? + if (try sema.typeRequiresComptime(field_ty) or field_ty.zigTypeTag(pt.zcu) == .NoReturn) continue; // TODO: should this affect alignment? max_size = @max(max_size, sema.typeAbiSize(field_ty) catch |err| switch (err) { error.AnalysisFail => { @@ -35185,7 +35517,7 @@ pub fn resolveUnionLayout(sema: *Sema, ty: Type) SemaError!void { } else { // {Payload, Tag} size += max_size; - size = switch (zcu.getTarget().ofmt) { + size = switch (pt.zcu.getTarget().ofmt) { .c => max_align, else => tag_align, }.forward(size); @@ -35205,7 +35537,7 @@ pub fn resolveUnionLayout(sema: *Sema, ty: Type) SemaError!void { if (union_type.flagsPtr(ip).assumed_runtime_bits and !(try sema.typeHasRuntimeBits(ty))) { const msg = try sema.errMsg( - ty.srcLoc(zcu), + ty.srcLoc(pt.zcu), "union layout depends on it having runtime bits", .{}, ); @@ -35213,10 +35545,10 @@ pub fn resolveUnionLayout(sema: *Sema, ty: Type) SemaError!void { } if (union_type.flagsPtr(ip).assumed_pointer_aligned and - alignment.compareStrict(.neq, Alignment.fromByteUnits(@divExact(zcu.getTarget().ptrBitWidth(), 8)))) + alignment.compareStrict(.neq, Alignment.fromByteUnits(@divExact(pt.zcu.getTarget().ptrBitWidth(), 8)))) { const msg = try sema.errMsg( - ty.srcLoc(zcu), + ty.srcLoc(pt.zcu), "union layout depends on being pointer aligned", .{}, ); @@ -35229,7 +35561,8 @@ pub fn resolveUnionLayout(sema: *Sema, ty: Type) SemaError!void { pub fn resolveStructFully(sema: *Sema, ty: Type) SemaError!void { try sema.resolveStructLayout(ty); - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const ip = &mod.intern_pool; const struct_type = mod.typeToStruct(ty).?; @@ -35244,14 +35577,15 @@ pub fn resolveStructFully(sema: *Sema, ty: Type) SemaError!void { for (0..struct_type.field_types.len) |i| { const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]); - try field_ty.resolveFully(mod); + try field_ty.resolveFully(pt); } } pub fn resolveUnionFully(sema: *Sema, ty: Type) SemaError!void { try sema.resolveUnionLayout(ty); - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const ip = &mod.intern_pool; const union_obj = mod.typeToUnion(ty).?; @@ -35272,7 +35606,7 @@ pub fn resolveUnionFully(sema: *Sema, ty: Type) SemaError!void { union_obj.flagsPtr(ip).status = .fully_resolved_wip; for (0..union_obj.field_types.len) |field_index| { const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]); - try field_ty.resolveFully(mod); + try field_ty.resolveFully(pt); } union_obj.flagsPtr(ip).status = .fully_resolved; } @@ -35286,7 +35620,8 @@ pub fn resolveTypeFieldsStruct( ty: InternPool.Index, struct_type: InternPool.LoadedStructType, ) SemaError!void { - const zcu = sema.mod; + const pt = sema.pt; + const zcu = pt.zcu; const ip = &zcu.intern_pool; // If there is no owner decl it means the struct has no fields. const owner_decl = struct_type.decl.unwrap() orelse return; @@ -35310,13 +35645,13 @@ pub fn resolveTypeFieldsStruct( const msg = try sema.errMsg( Type.fromInterned(ty).srcLoc(zcu), "struct '{}' depends on itself", - .{Type.fromInterned(ty).fmt(zcu)}, + .{Type.fromInterned(ty).fmt(pt)}, ); return sema.failWithOwnedErrorMsg(null, msg); } defer struct_type.clearTypesWip(ip); - semaStructFields(zcu, sema.arena, struct_type) catch |err| switch (err) { + semaStructFields(pt, sema.arena, struct_type) catch |err| switch (err) { error.AnalysisFail => { if (zcu.declPtr(owner_decl).analysis == .complete) { zcu.declPtr(owner_decl).analysis = .dependency_failure; @@ -35329,7 +35664,8 @@ pub fn resolveTypeFieldsStruct( } pub fn resolveStructFieldInits(sema: *Sema, ty: Type) SemaError!void { - const zcu = sema.mod; + const pt = sema.pt; + const zcu = pt.zcu; const ip = &zcu.intern_pool; const struct_type = zcu.typeToStruct(ty) orelse return; const owner_decl = struct_type.decl.unwrap() orelse return; @@ -35345,13 +35681,13 @@ pub fn resolveStructFieldInits(sema: *Sema, ty: Type) SemaError!void { const msg = try sema.errMsg( ty.srcLoc(zcu), "struct '{}' depends on itself", - .{ty.fmt(zcu)}, + .{ty.fmt(pt)}, ); return sema.failWithOwnedErrorMsg(null, msg); } defer struct_type.clearInitsWip(ip); - semaStructFieldInits(zcu, sema.arena, struct_type) catch |err| switch (err) { + semaStructFieldInits(pt, sema.arena, struct_type) catch |err| switch (err) { error.AnalysisFail => { if (zcu.declPtr(owner_decl).analysis == .complete) { zcu.declPtr(owner_decl).analysis = .dependency_failure; @@ -35365,7 +35701,8 @@ pub fn resolveStructFieldInits(sema: *Sema, ty: Type) SemaError!void { } pub fn resolveTypeFieldsUnion(sema: *Sema, ty: Type, union_type: InternPool.LoadedUnionType) SemaError!void { - const zcu = sema.mod; + const pt = sema.pt; + const zcu = pt.zcu; const ip = &zcu.intern_pool; const owner_decl = zcu.declPtr(union_type.decl); @@ -35387,7 +35724,7 @@ pub fn resolveTypeFieldsUnion(sema: *Sema, ty: Type, union_type: InternPool.Load const msg = try sema.errMsg( ty.srcLoc(zcu), "union '{}' depends on itself", - .{ty.fmt(zcu)}, + .{ty.fmt(pt)}, ); return sema.failWithOwnedErrorMsg(null, msg); }, @@ -35401,7 +35738,7 @@ pub fn resolveTypeFieldsUnion(sema: *Sema, ty: Type, union_type: InternPool.Load union_type.flagsPtr(ip).status = .field_types_wip; errdefer union_type.flagsPtr(ip).status = .none; - semaUnionFields(zcu, sema.arena, union_type) catch |err| switch (err) { + semaUnionFields(pt, sema.arena, union_type) catch |err| switch (err) { error.AnalysisFail => { if (owner_decl.analysis == .complete) { owner_decl.analysis = .dependency_failure; @@ -35422,7 +35759,8 @@ fn resolveInferredErrorSet( src: LazySrcLoc, ies_index: InternPool.Index, ) CompileError!InternPool.Index { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const ip = &mod.intern_pool; const func_index = ip.iesFuncIndex(ies_index); const func = mod.funcInfo(func_index); @@ -35482,8 +35820,8 @@ pub fn resolveInferredErrorSetPtr( src: LazySrcLoc, ies: *InferredErrorSet, ) CompileError!void { - const mod = sema.mod; - const ip = &mod.intern_pool; + const pt = sema.pt; + const ip = &pt.zcu.intern_pool; if (ies.resolved != .none) return; @@ -35505,7 +35843,7 @@ pub fn resolveInferredErrorSetPtr( } } - const resolved_error_set_ty = try mod.errorSetFromUnsortedNames(ies.errors.keys()); + const resolved_error_set_ty = try pt.errorSetFromUnsortedNames(ies.errors.keys()); ies.resolved = resolved_error_set_ty.toIntern(); } @@ -35515,12 +35853,13 @@ fn resolveAdHocInferredErrorSet( src: LazySrcLoc, value: InternPool.Index, ) CompileError!InternPool.Index { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const gpa = sema.gpa; const ip = &mod.intern_pool; const new_ty = try resolveAdHocInferredErrorSetTy(sema, block, src, ip.typeOf(value)); if (new_ty == .none) return value; - return ip.getCoerced(gpa, value, new_ty); + return ip.getCoerced(gpa, pt.tid, value, new_ty); } fn resolveAdHocInferredErrorSetTy( @@ -35530,8 +35869,8 @@ fn resolveAdHocInferredErrorSetTy( ty: InternPool.Index, ) CompileError!InternPool.Index { const ies = sema.fn_ret_ty_ies orelse return .none; - const mod = sema.mod; - const gpa = sema.gpa; + const pt = sema.pt; + const mod = pt.zcu; const ip = &mod.intern_pool; const error_union_info = switch (ip.indexToKey(ty)) { .error_union_type => |x| x, @@ -35541,7 +35880,7 @@ fn resolveAdHocInferredErrorSetTy( return .none; try sema.resolveInferredErrorSetPtr(block, src, ies); - const new_ty = try ip.get(gpa, .{ .error_union_type = .{ + const new_ty = try pt.intern(.{ .error_union_type = .{ .error_set_type = ies.resolved, .payload_type = error_union_info.payload_type, } }); @@ -35554,7 +35893,8 @@ fn resolveInferredErrorSetTy( src: LazySrcLoc, ty: InternPool.Index, ) CompileError!InternPool.Index { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const ip = &mod.intern_pool; if (ty == .anyerror_type) return ty; switch (ip.indexToKey(ty)) { @@ -35614,10 +35954,11 @@ fn structZirInfo(zir: Zir, zir_index: Zir.Inst.Index) struct { } fn semaStructFields( - zcu: *Zcu, + pt: Zcu.PerThread, arena: Allocator, struct_type: InternPool.LoadedStructType, ) CompileError!void { + const zcu = pt.zcu; const gpa = zcu.gpa; const ip = &zcu.intern_pool; const decl_index = struct_type.decl.unwrap() orelse return; @@ -35630,7 +35971,7 @@ fn semaStructFields( if (fields_len == 0) switch (struct_type.layout) { .@"packed" => { - try semaBackingIntType(zcu, struct_type); + try semaBackingIntType(pt, struct_type); return; }, .auto, .@"extern" => { @@ -35644,7 +35985,7 @@ fn semaStructFields( defer comptime_err_ret_trace.deinit(); var sema: Sema = .{ - .mod = zcu, + .pt = pt, .gpa = gpa, .arena = arena, .code = zir, @@ -35789,7 +36130,7 @@ fn semaStructFields( switch (struct_type.layout) { .@"extern" => if (!try sema.validateExternType(field_ty, .struct_field)) { const msg = msg: { - const msg = try sema.errMsg(ty_src, "extern structs cannot contain fields of type '{}'", .{field_ty.fmt(zcu)}); + const msg = try sema.errMsg(ty_src, "extern structs cannot contain fields of type '{}'", .{field_ty.fmt(pt)}); errdefer msg.destroy(sema.gpa); try sema.explainWhyTypeIsNotExtern(msg, ty_src, field_ty, .struct_field); @@ -35801,7 +36142,7 @@ fn semaStructFields( }, .@"packed" => if (!try sema.validatePackedType(field_ty)) { const msg = msg: { - const msg = try sema.errMsg(ty_src, "packed structs cannot contain fields of type '{}'", .{field_ty.fmt(zcu)}); + const msg = try sema.errMsg(ty_src, "packed structs cannot contain fields of type '{}'", .{field_ty.fmt(pt)}); errdefer msg.destroy(sema.gpa); try sema.explainWhyTypeIsNotPacked(msg, ty_src, field_ty); @@ -35837,10 +36178,11 @@ fn semaStructFields( // This logic must be kept in sync with `semaStructFields` fn semaStructFieldInits( - zcu: *Zcu, + pt: Zcu.PerThread, arena: Allocator, struct_type: InternPool.LoadedStructType, ) CompileError!void { + const zcu = pt.zcu; const gpa = zcu.gpa; const ip = &zcu.intern_pool; @@ -35857,7 +36199,7 @@ fn semaStructFieldInits( defer comptime_err_ret_trace.deinit(); var sema: Sema = .{ - .mod = zcu, + .pt = pt, .gpa = gpa, .arena = arena, .code = zir, @@ -35977,10 +36319,11 @@ fn semaStructFieldInits( try sema.flushExports(); } -fn semaUnionFields(zcu: *Zcu, arena: Allocator, union_type: InternPool.LoadedUnionType) CompileError!void { +fn semaUnionFields(pt: Zcu.PerThread, arena: Allocator, union_type: InternPool.LoadedUnionType) CompileError!void { const tracy = trace(@src()); defer tracy.end(); + const zcu = pt.zcu; const gpa = zcu.gpa; const ip = &zcu.intern_pool; const decl_index = union_type.decl; @@ -36034,7 +36377,7 @@ fn semaUnionFields(zcu: *Zcu, arena: Allocator, union_type: InternPool.LoadedUni defer comptime_err_ret_trace.deinit(); var sema: Sema = .{ - .mod = zcu, + .pt = pt, .gpa = gpa, .arena = arena, .code = zir, @@ -36081,17 +36424,17 @@ fn semaUnionFields(zcu: *Zcu, arena: Allocator, union_type: InternPool.LoadedUni // The provided type is an integer type and we must construct the enum tag type here. int_tag_ty = provided_ty; if (int_tag_ty.zigTypeTag(zcu) != .Int and int_tag_ty.zigTypeTag(zcu) != .ComptimeInt) { - return sema.fail(&block_scope, tag_ty_src, "expected integer tag type, found '{}'", .{int_tag_ty.fmt(zcu)}); + return sema.fail(&block_scope, tag_ty_src, "expected integer tag type, found '{}'", .{int_tag_ty.fmt(pt)}); } if (fields_len > 0) { - const field_count_val = try zcu.intValue(Type.comptime_int, fields_len - 1); + const field_count_val = try pt.intValue(Type.comptime_int, fields_len - 1); if (!(try sema.intFitsInType(field_count_val, int_tag_ty, null))) { const msg = msg: { const msg = try sema.errMsg(tag_ty_src, "specified integer tag type cannot represent every field", .{}); errdefer msg.destroy(sema.gpa); try sema.errNote(tag_ty_src, msg, "type '{}' cannot fit values in range 0...{d}", .{ - int_tag_ty.fmt(zcu), + int_tag_ty.fmt(pt), fields_len - 1, }); break :msg msg; @@ -36106,7 +36449,7 @@ fn semaUnionFields(zcu: *Zcu, arena: Allocator, union_type: InternPool.LoadedUni union_type.tagTypePtr(ip).* = provided_ty.toIntern(); const enum_type = switch (ip.indexToKey(provided_ty.toIntern())) { .enum_type => ip.loadEnumType(provided_ty.toIntern()), - else => return sema.fail(&block_scope, tag_ty_src, "expected enum tag type, found '{}'", .{provided_ty.fmt(zcu)}), + else => return sema.fail(&block_scope, tag_ty_src, "expected enum tag type, found '{}'", .{provided_ty.fmt(pt)}), }; // The fields of the union must match the enum exactly. // A flag per field is used to check for missing and extraneous fields. @@ -36202,7 +36545,7 @@ fn semaUnionFields(zcu: *Zcu, arena: Allocator, union_type: InternPool.LoadedUni const val = if (last_tag_val) |val| try sema.intAdd(val, Value.one_comptime_int, int_tag_ty, undefined) else - try zcu.intValue(int_tag_ty, 0); + try pt.intValue(int_tag_ty, 0); last_tag_val = val; break :blk val; @@ -36214,7 +36557,7 @@ fn semaUnionFields(zcu: *Zcu, arena: Allocator, union_type: InternPool.LoadedUni .offset = .{ .container_field_value = @intCast(gop.index) }, }; const msg = msg: { - const msg = try sema.errMsg(value_src, "enum tag value {} already taken", .{enum_tag_val.fmtValue(zcu, &sema)}); + const msg = try sema.errMsg(value_src, "enum tag value {} already taken", .{enum_tag_val.fmtValue(pt, &sema)}); errdefer msg.destroy(gpa); try sema.errNote(other_value_src, msg, "other occurrence here", .{}); break :msg msg; @@ -36244,7 +36587,7 @@ fn semaUnionFields(zcu: *Zcu, arena: Allocator, union_type: InternPool.LoadedUni const tag_info = ip.loadEnumType(union_type.tagTypePtr(ip).*); const enum_index = tag_info.nameIndex(ip, field_name) orelse { return sema.fail(&block_scope, name_src, "no field named '{}' in enum '{}'", .{ - field_name.fmt(ip), Type.fromInterned(union_type.tagTypePtr(ip).*).fmt(zcu), + field_name.fmt(ip), Type.fromInterned(union_type.tagTypePtr(ip).*).fmt(pt), }); }; @@ -36286,7 +36629,7 @@ fn semaUnionFields(zcu: *Zcu, arena: Allocator, union_type: InternPool.LoadedUni !try sema.validateExternType(field_ty, .union_field)) { const msg = msg: { - const msg = try sema.errMsg(type_src, "extern unions cannot contain fields of type '{}'", .{field_ty.fmt(zcu)}); + const msg = try sema.errMsg(type_src, "extern unions cannot contain fields of type '{}'", .{field_ty.fmt(pt)}); errdefer msg.destroy(sema.gpa); try sema.explainWhyTypeIsNotExtern(msg, type_src, field_ty, .union_field); @@ -36297,7 +36640,7 @@ fn semaUnionFields(zcu: *Zcu, arena: Allocator, union_type: InternPool.LoadedUni return sema.failWithOwnedErrorMsg(&block_scope, msg); } else if (layout == .@"packed" and !try sema.validatePackedType(field_ty)) { const msg = msg: { - const msg = try sema.errMsg(type_src, "packed unions cannot contain fields of type '{}'", .{field_ty.fmt(zcu)}); + const msg = try sema.errMsg(type_src, "packed unions cannot contain fields of type '{}'", .{field_ty.fmt(pt)}); errdefer msg.destroy(sema.gpa); try sema.explainWhyTypeIsNotPacked(msg, type_src, field_ty); @@ -36366,7 +36709,8 @@ fn generateUnionTagTypeNumbered( enum_field_vals: []const InternPool.Index, union_owner_decl: *Module.Decl, ) !InternPool.Index { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const gpa = sema.gpa; const ip = &mod.intern_pool; @@ -36390,11 +36734,11 @@ fn generateUnionTagTypeNumbered( new_decl.owns_tv = true; new_decl.name_fully_qualified = true; - const enum_ty = try ip.getGeneratedTagEnumType(gpa, .{ + const enum_ty = try ip.getGeneratedTagEnumType(gpa, pt.tid, .{ .decl = new_decl_index, .owner_union_ty = union_owner_decl.val.toIntern(), .tag_ty = if (enum_field_vals.len == 0) - (try mod.intType(.unsigned, 0)).toIntern() + (try pt.intType(.unsigned, 0)).toIntern() else ip.typeOf(enum_field_vals[0]), .names = enum_field_names, @@ -36404,7 +36748,7 @@ fn generateUnionTagTypeNumbered( new_decl.val = Value.fromInterned(enum_ty); - try mod.finalizeAnonDecl(new_decl_index); + try pt.finalizeAnonDecl(new_decl_index); return enum_ty; } @@ -36414,7 +36758,8 @@ fn generateUnionTagTypeSimple( enum_field_names: []const InternPool.NullTerminatedString, union_owner_decl: *Module.Decl, ) !InternPool.Index { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const ip = &mod.intern_pool; const gpa = sema.gpa; @@ -36438,13 +36783,13 @@ fn generateUnionTagTypeSimple( }; errdefer mod.abortAnonDecl(new_decl_index); - const enum_ty = try ip.getGeneratedTagEnumType(gpa, .{ + const enum_ty = try ip.getGeneratedTagEnumType(gpa, pt.tid, .{ .decl = new_decl_index, .owner_union_ty = union_owner_decl.val.toIntern(), .tag_ty = if (enum_field_names.len == 0) - (try mod.intType(.unsigned, 0)).toIntern() + (try pt.intType(.unsigned, 0)).toIntern() else - (try mod.smallestUnsignedInt(enum_field_names.len - 1)).toIntern(), + (try pt.smallestUnsignedInt(enum_field_names.len - 1)).toIntern(), .names = enum_field_names, .values = &.{}, .tag_mode = .auto, @@ -36454,7 +36799,7 @@ fn generateUnionTagTypeSimple( new_decl.owns_tv = true; new_decl.val = Value.fromInterned(enum_ty); - try mod.finalizeAnonDecl(new_decl_index); + try pt.finalizeAnonDecl(new_decl_index); return enum_ty; } @@ -36464,12 +36809,13 @@ fn generateUnionTagTypeSimple( /// that the types are already resolved. /// TODO assert the return value matches `ty.onePossibleValue` pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { - const zcu = sema.mod; + const pt = sema.pt; + const zcu = pt.zcu; const ip = &zcu.intern_pool; return switch (ty.toIntern()) { .u0_type, .i0_type, - => try zcu.intValue(ty, 0), + => try pt.intValue(ty, 0), .u1_type, .u8_type, .i8_type, @@ -36532,7 +36878,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { .anyframe_type => unreachable, .null_type => Value.null, .undefined_type => Value.undef, - .optional_noreturn_type => try zcu.nullValue(ty), + .optional_noreturn_type => try pt.nullValue(ty), .generic_poison_type => error.GenericPoison, .empty_struct_type => Value.empty_struct, // values, not types @@ -36646,16 +36992,16 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { => switch (ip.indexToKey(ty.toIntern())) { inline .array_type, .vector_type => |seq_type, seq_tag| { const has_sentinel = seq_tag == .array_type and seq_type.sentinel != .none; - if (seq_type.len + @intFromBool(has_sentinel) == 0) return Value.fromInterned((try zcu.intern(.{ .aggregate = .{ + if (seq_type.len + @intFromBool(has_sentinel) == 0) return Value.fromInterned(try pt.intern(.{ .aggregate = .{ .ty = ty.toIntern(), .storage = .{ .elems = &.{} }, - } }))); + } })); if (try sema.typeHasOnePossibleValue(Type.fromInterned(seq_type.child))) |opv| { - return Value.fromInterned((try zcu.intern(.{ .aggregate = .{ + return Value.fromInterned(try pt.intern(.{ .aggregate = .{ .ty = ty.toIntern(), .storage = .{ .repeated_elem = opv.toIntern() }, - } }))); + } })); } return null; }, @@ -36663,17 +37009,17 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { .struct_type => { // Resolving the layout first helps to avoid loops. // If the type has a coherent layout, we can recurse through fields safely. - try ty.resolveLayout(zcu); + try ty.resolveLayout(pt); const struct_type = ip.loadStructType(ty.toIntern()); if (struct_type.field_types.len == 0) { // In this case the struct has no fields at all and // therefore has one possible value. - return Value.fromInterned((try zcu.intern(.{ .aggregate = .{ + return Value.fromInterned(try pt.intern(.{ .aggregate = .{ .ty = ty.toIntern(), .storage = .{ .elems = &.{} }, - } }))); + } })); } const field_vals = try sema.arena.alloc( @@ -36682,7 +37028,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { ); for (field_vals, 0..) |*field_val, i| { if (struct_type.fieldIsComptime(ip, i)) { - try ty.resolveStructFieldInits(zcu); + try ty.resolveStructFieldInits(pt); field_val.* = struct_type.field_inits.get(ip)[i]; continue; } @@ -36694,10 +37040,10 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { // In this case the struct has no runtime-known fields and // therefore has one possible value. - return Value.fromInterned((try zcu.intern(.{ .aggregate = .{ + return Value.fromInterned(try pt.intern(.{ .aggregate = .{ .ty = ty.toIntern(), .storage = .{ .elems = field_vals }, - } }))); + } })); }, .anon_struct_type => |tuple| { @@ -36707,28 +37053,28 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { // In this case the struct has all comptime-known fields and // therefore has one possible value. // TODO: write something like getCoercedInts to avoid needing to dupe - return Value.fromInterned((try zcu.intern(.{ .aggregate = .{ + return Value.fromInterned(try pt.intern(.{ .aggregate = .{ .ty = ty.toIntern(), .storage = .{ .elems = try sema.arena.dupe(InternPool.Index, tuple.values.get(ip)) }, - } }))); + } })); }, .union_type => { // Resolving the layout first helps to avoid loops. // If the type has a coherent layout, we can recurse through fields safely. - try ty.resolveLayout(zcu); + try ty.resolveLayout(pt); const union_obj = ip.loadUnionType(ty.toIntern()); const tag_val = (try sema.typeHasOnePossibleValue(Type.fromInterned(union_obj.tagTypePtr(ip).*))) orelse return null; if (union_obj.field_types.len == 0) { - const only = try zcu.intern(.{ .empty_enum_value = ty.toIntern() }); + const only = try pt.intern(.{ .empty_enum_value = ty.toIntern() }); return Value.fromInterned(only); } const only_field_ty = Type.fromInterned(union_obj.field_types.get(ip)[0]); const val_val = (try sema.typeHasOnePossibleValue(only_field_ty)) orelse return null; - const only = try zcu.intern(.{ .un = .{ + const only = try pt.intern(.{ .un = .{ .ty = ty.toIntern(), .tag = tag_val.toIntern(), .val = val_val.toIntern(), @@ -36743,7 +37089,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { if (enum_type.tag_ty == .comptime_int_type) return null; if (try sema.typeHasOnePossibleValue(Type.fromInterned(enum_type.tag_ty))) |int_opv| { - const only = try zcu.intern(.{ .enum_tag = .{ + const only = try pt.intern(.{ .enum_tag = .{ .ty = ty.toIntern(), .int = int_opv.toIntern(), } }); @@ -36753,18 +37099,19 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { return null; }, .auto, .explicit => { - if (Type.fromInterned(enum_type.tag_ty).hasRuntimeBits(zcu)) return null; + if (Type.fromInterned(enum_type.tag_ty).hasRuntimeBits(pt)) return null; return Value.fromInterned(switch (enum_type.names.len) { - 0 => try zcu.intern(.{ .empty_enum_value = ty.toIntern() }), - 1 => try zcu.intern(.{ .enum_tag = .{ + 0 => try pt.intern(.{ .empty_enum_value = ty.toIntern() }), + 1 => try pt.intern(.{ .enum_tag = .{ .ty = ty.toIntern(), .int = if (enum_type.values.len == 0) - (try zcu.intValue(Type.fromInterned(enum_type.tag_ty), 0)).toIntern() + (try pt.intValue(Type.fromInterned(enum_type.tag_ty), 0)).toIntern() else - try zcu.intern_pool.getCoercedInts( + try ip.getCoercedInts( zcu.gpa, - zcu.intern_pool.indexToKey(enum_type.values.get(ip)[0]).int, + pt.tid, + ip.indexToKey(enum_type.values.get(ip)[0]).int, enum_type.tag_ty, ), } }), @@ -36782,7 +37129,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { /// Returns the type of the AIR instruction. fn typeOf(sema: *Sema, inst: Air.Inst.Ref) Type { - return sema.getTmpAir().typeOf(inst, &sema.mod.intern_pool); + return sema.getTmpAir().typeOf(inst, &sema.pt.zcu.intern_pool); } pub fn getTmpAir(sema: Sema) Air { @@ -36838,12 +37185,13 @@ fn analyzeComptimeAlloc( var_type: Type, alignment: Alignment, ) CompileError!Air.Inst.Ref { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; // Needed to make an anon decl with type `var_type` (the `finish()` call below). _ = try sema.typeHasOnePossibleValue(var_type); - const ptr_type = try mod.ptrTypeSema(.{ + const ptr_type = try pt.ptrTypeSema(.{ .child = var_type.toIntern(), .flags = .{ .alignment = alignment, @@ -36853,7 +37201,7 @@ fn analyzeComptimeAlloc( const alloc = try sema.newComptimeAlloc(block, var_type, alignment); - return Air.internedToRef((try mod.intern(.{ .ptr = .{ + return Air.internedToRef((try pt.intern(.{ .ptr = .{ .ty = ptr_type.toIntern(), .base_addr = .{ .comptime_alloc = alloc }, .byte_offset = 0, @@ -36896,13 +37244,14 @@ pub fn analyzeAsAddressSpace( air_ref: Air.Inst.Ref, ctx: AddressSpaceContext, ) !std.builtin.AddressSpace { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const coerced = try sema.coerce(block, Type.fromInterned(.address_space_type), air_ref, src); const addrspace_val = try sema.resolveConstDefinedValue(block, src, coerced, .{ .needed_comptime_reason = "address space must be comptime-known", }); const address_space = mod.toEnum(std.builtin.AddressSpace, addrspace_val); - const target = sema.mod.getTarget(); + const target = pt.zcu.getTarget(); const arch = target.cpu.arch; const is_nv = arch == .nvptx or arch == .nvptx64; @@ -36946,7 +37295,8 @@ pub fn analyzeAsAddressSpace( /// Returns `null` if the pointer contents cannot be loaded at comptime. fn pointerDeref(sema: *Sema, block: *Block, src: LazySrcLoc, ptr_val: Value, ptr_ty: Type) CompileError!?Value { // TODO: audit use sites to eliminate this coercion - const coerced_ptr_val = try sema.mod.getCoerced(ptr_val, ptr_ty); + const pt = sema.pt; + const coerced_ptr_val = try pt.getCoerced(ptr_val, ptr_ty); switch (try sema.pointerDerefExtra(block, src, coerced_ptr_val)) { .runtime_load => return null, .val => |v| return v, @@ -36954,13 +37304,13 @@ fn pointerDeref(sema: *Sema, block: *Block, src: LazySrcLoc, ptr_val: Value, ptr block, src, "comptime dereference requires '{}' to have a well-defined layout", - .{ty.fmt(sema.mod)}, + .{ty.fmt(pt)}, ), .out_of_bounds => |ty| return sema.fail( block, src, "dereference of '{}' exceeds bounds of containing decl of type '{}'", - .{ ptr_ty.fmt(sema.mod), ty.fmt(sema.mod) }, + .{ ptr_ty.fmt(pt), ty.fmt(pt) }, ), } } @@ -36973,10 +37323,10 @@ const DerefResult = union(enum) { }; fn pointerDerefExtra(sema: *Sema, block: *Block, src: LazySrcLoc, ptr_val: Value) CompileError!DerefResult { - const zcu = sema.mod; - const ip = &zcu.intern_pool; + const pt = sema.pt; + const ip = &pt.zcu.intern_pool; switch (try sema.loadComptimePtr(block, src, ptr_val)) { - .success => |mv| return .{ .val = try mv.intern(zcu, sema.arena) }, + .success => |mv| return .{ .val = try mv.intern(pt, sema.arena) }, .runtime_load => return .runtime_load, .undef => return sema.failWithUseOfUndef(block, src), .err_payload => |err_name| return sema.fail(block, src, "attempt to unwrap error: {}", .{err_name.fmt(ip)}), @@ -37001,7 +37351,8 @@ fn usizeCast(sema: *Sema, block: *Block, src: LazySrcLoc, int: u64) CompileError /// a type has zero bits, which can cause a "foo depends on itself" compile error. /// This logic must be kept in sync with `Type.isPtrLikeOptional`. fn typePtrOrOptionalPtrTy(sema: *Sema, ty: Type) !?Type { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; return switch (mod.intern_pool.indexToKey(ty.toIntern())) { .ptr_type => |ptr_type| switch (ptr_type.flags.size) { .One, .Many, .C => ty, @@ -37031,27 +37382,28 @@ fn typePtrOrOptionalPtrTy(sema: *Sema, ty: Type) !?Type { /// `generic_poison` will return false. /// May return false negatives when structs and unions are having their field types resolved. pub fn typeRequiresComptime(sema: *Sema, ty: Type) SemaError!bool { - return ty.comptimeOnlyAdvanced(sema.mod, .sema); + return ty.comptimeOnlyAdvanced(sema.pt, .sema); } pub fn typeHasRuntimeBits(sema: *Sema, ty: Type) SemaError!bool { - return ty.hasRuntimeBitsAdvanced(sema.mod, false, .sema) catch |err| switch (err) { + return ty.hasRuntimeBitsAdvanced(sema.pt, false, .sema) catch |err| switch (err) { error.NeedLazy => unreachable, else => |e| return e, }; } pub fn typeAbiSize(sema: *Sema, ty: Type) SemaError!u64 { - try ty.resolveLayout(sema.mod); - return ty.abiSize(sema.mod); + const pt = sema.pt; + try ty.resolveLayout(pt); + return ty.abiSize(pt); } pub fn typeAbiAlignment(sema: *Sema, ty: Type) SemaError!Alignment { - return (try ty.abiAlignmentAdvanced(sema.mod, .sema)).scalar; + return (try ty.abiAlignmentAdvanced(sema.pt, .sema)).scalar; } pub fn fnHasRuntimeBits(sema: *Sema, ty: Type) CompileError!bool { - return ty.fnHasRuntimeBitsAdvanced(sema.mod, .sema); + return ty.fnHasRuntimeBitsAdvanced(sema.pt, .sema); } fn unionFieldIndex( @@ -37061,9 +37413,10 @@ fn unionFieldIndex( field_name: InternPool.NullTerminatedString, field_src: LazySrcLoc, ) !u32 { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const ip = &mod.intern_pool; - try union_ty.resolveFields(mod); + try union_ty.resolveFields(pt); const union_obj = mod.typeToUnion(union_ty).?; const field_index = union_obj.loadTagType(ip).nameIndex(ip, field_name) orelse return sema.failWithBadUnionFieldAccess(block, union_ty, union_obj, field_src, field_name); @@ -37077,9 +37430,10 @@ fn structFieldIndex( field_name: InternPool.NullTerminatedString, field_src: LazySrcLoc, ) !u32 { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const ip = &mod.intern_pool; - try struct_ty.resolveFields(mod); + try struct_ty.resolveFields(pt); if (struct_ty.isAnonStruct(mod)) { return sema.anonStructFieldIndex(block, struct_ty, field_name, field_src); } else { @@ -37096,7 +37450,8 @@ fn anonStructFieldIndex( field_name: InternPool.NullTerminatedString, field_src: LazySrcLoc, ) !u32 { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const ip = &mod.intern_pool; switch (ip.indexToKey(struct_ty.toIntern())) { .anon_struct_type => |anon_struct_type| for (anon_struct_type.names.get(ip), 0..) |name, i| { @@ -37106,20 +37461,21 @@ fn anonStructFieldIndex( else => unreachable, } return sema.fail(block, field_src, "no field named '{}' in anonymous struct '{}'", .{ - field_name.fmt(ip), struct_ty.fmt(sema.mod), + field_name.fmt(ip), struct_ty.fmt(pt), }); } /// If the value overflowed the type, returns a comptime_int (or vector thereof) instead, setting /// overflow_idx to the vector index the overflow was at (or 0 for a scalar). fn intAdd(sema: *Sema, lhs: Value, rhs: Value, ty: Type, overflow_idx: *?usize) !Value { + const pt = sema.pt; var overflow: usize = undefined; return sema.intAddInner(lhs, rhs, ty, &overflow) catch |err| switch (err) { error.Overflow => { - const is_vec = ty.isVector(sema.mod); + const is_vec = ty.isVector(pt.zcu); overflow_idx.* = if (is_vec) overflow else 0; - const safe_ty = if (is_vec) try sema.mod.vectorType(.{ - .len = ty.vectorLen(sema.mod), + const safe_ty = if (is_vec) try pt.vectorType(.{ + .len = ty.vectorLen(pt.zcu), .child = .comptime_int_type, }) else Type.comptime_int; return sema.intAddInner(lhs, rhs, safe_ty, undefined) catch |err1| switch (err1) { @@ -37132,13 +37488,14 @@ fn intAdd(sema: *Sema, lhs: Value, rhs: Value, ty: Type, overflow_idx: *?usize) } fn intAddInner(sema: *Sema, lhs: Value, rhs: Value, ty: Type, overflow_idx: *usize) !Value { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; if (ty.zigTypeTag(mod) == .Vector) { const result_data = try sema.arena.alloc(InternPool.Index, ty.vectorLen(mod)); const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { - const lhs_elem = try lhs.elemValue(mod, i); - const rhs_elem = try rhs.elemValue(mod, i); + const lhs_elem = try lhs.elemValue(pt, i); + const rhs_elem = try rhs.elemValue(pt, i); const val = sema.intAddScalar(lhs_elem, rhs_elem, scalar_ty) catch |err| switch (err) { error.Overflow => { overflow_idx.* = i; @@ -37148,34 +37505,34 @@ fn intAddInner(sema: *Sema, lhs: Value, rhs: Value, ty: Type, overflow_idx: *usi }; scalar.* = val.toIntern(); } - return Value.fromInterned((try mod.intern(.{ .aggregate = .{ + return Value.fromInterned(try pt.intern(.{ .aggregate = .{ .ty = ty.toIntern(), .storage = .{ .elems = result_data }, - } }))); + } })); } return sema.intAddScalar(lhs, rhs, ty); } fn intAddScalar(sema: *Sema, lhs: Value, rhs: Value, scalar_ty: Type) !Value { - const mod = sema.mod; + const pt = sema.pt; if (scalar_ty.toIntern() != .comptime_int_type) { const res = try sema.intAddWithOverflowScalar(lhs, rhs, scalar_ty); - if (res.overflow_bit.compareAllWithZero(.neq, mod)) return error.Overflow; + if (res.overflow_bit.compareAllWithZero(.neq, pt)) return error.Overflow; return res.wrapped_result; } // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. var lhs_space: Value.BigIntSpace = undefined; var rhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, mod, .sema); - const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, mod, .sema); + const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, pt, .sema); + const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, pt, .sema); const limbs = try sema.arena.alloc( std.math.big.Limb, @max(lhs_bigint.limbs.len, rhs_bigint.limbs.len) + 1, ); var result_bigint = std.math.big.int.Mutable{ .limbs = limbs, .positive = undefined, .len = undefined }; result_bigint.add(lhs_bigint, rhs_bigint); - return mod.intValue_big(scalar_ty, result_bigint.toConst()); + return pt.intValue_big(scalar_ty, result_bigint.toConst()); } /// Supports both floats and ints; handles undefined. @@ -37185,15 +37542,16 @@ fn numberAddWrapScalar( rhs: Value, ty: Type, ) !Value { - const mod = sema.mod; - if (lhs.isUndef(mod) or rhs.isUndef(mod)) return mod.undefValue(ty); + const pt = sema.pt; + const mod = pt.zcu; + if (lhs.isUndef(mod) or rhs.isUndef(mod)) return pt.undefValue(ty); if (ty.zigTypeTag(mod) == .ComptimeInt) { return sema.intAdd(lhs, rhs, ty, undefined); } if (ty.isAnyFloat()) { - return Value.floatAdd(lhs, rhs, ty, sema.arena, mod); + return Value.floatAdd(lhs, rhs, ty, sema.arena, pt); } const overflow_result = try sema.intAddWithOverflow(lhs, rhs, ty); @@ -37203,13 +37561,14 @@ fn numberAddWrapScalar( /// If the value overflowed the type, returns a comptime_int (or vector thereof) instead, setting /// overflow_idx to the vector index the overflow was at (or 0 for a scalar). fn intSub(sema: *Sema, lhs: Value, rhs: Value, ty: Type, overflow_idx: *?usize) !Value { + const pt = sema.pt; var overflow: usize = undefined; return sema.intSubInner(lhs, rhs, ty, &overflow) catch |err| switch (err) { error.Overflow => { - const is_vec = ty.isVector(sema.mod); + const is_vec = ty.isVector(pt.zcu); overflow_idx.* = if (is_vec) overflow else 0; - const safe_ty = if (is_vec) try sema.mod.vectorType(.{ - .len = ty.vectorLen(sema.mod), + const safe_ty = if (is_vec) try pt.vectorType(.{ + .len = ty.vectorLen(pt.zcu), .child = .comptime_int_type, }) else Type.comptime_int; return sema.intSubInner(lhs, rhs, safe_ty, undefined) catch |err1| switch (err1) { @@ -37222,13 +37581,13 @@ fn intSub(sema: *Sema, lhs: Value, rhs: Value, ty: Type, overflow_idx: *?usize) } fn intSubInner(sema: *Sema, lhs: Value, rhs: Value, ty: Type, overflow_idx: *usize) !Value { - const mod = sema.mod; - if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try sema.arena.alloc(InternPool.Index, ty.vectorLen(mod)); - const scalar_ty = ty.scalarType(mod); + const pt = sema.pt; + if (ty.zigTypeTag(pt.zcu) == .Vector) { + const result_data = try sema.arena.alloc(InternPool.Index, ty.vectorLen(pt.zcu)); + const scalar_ty = ty.scalarType(pt.zcu); for (result_data, 0..) |*scalar, i| { - const lhs_elem = try lhs.elemValue(sema.mod, i); - const rhs_elem = try rhs.elemValue(sema.mod, i); + const lhs_elem = try lhs.elemValue(pt, i); + const rhs_elem = try rhs.elemValue(pt, i); const val = sema.intSubScalar(lhs_elem, rhs_elem, scalar_ty) catch |err| switch (err) { error.Overflow => { overflow_idx.* = i; @@ -37238,34 +37597,34 @@ fn intSubInner(sema: *Sema, lhs: Value, rhs: Value, ty: Type, overflow_idx: *usi }; scalar.* = val.toIntern(); } - return Value.fromInterned((try mod.intern(.{ .aggregate = .{ + return Value.fromInterned(try pt.intern(.{ .aggregate = .{ .ty = ty.toIntern(), .storage = .{ .elems = result_data }, - } }))); + } })); } return sema.intSubScalar(lhs, rhs, ty); } fn intSubScalar(sema: *Sema, lhs: Value, rhs: Value, scalar_ty: Type) !Value { - const mod = sema.mod; + const pt = sema.pt; if (scalar_ty.toIntern() != .comptime_int_type) { const res = try sema.intSubWithOverflowScalar(lhs, rhs, scalar_ty); - if (res.overflow_bit.compareAllWithZero(.neq, mod)) return error.Overflow; + if (res.overflow_bit.compareAllWithZero(.neq, pt)) return error.Overflow; return res.wrapped_result; } // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. var lhs_space: Value.BigIntSpace = undefined; var rhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, mod, .sema); - const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, mod, .sema); + const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, pt, .sema); + const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, pt, .sema); const limbs = try sema.arena.alloc( std.math.big.Limb, @max(lhs_bigint.limbs.len, rhs_bigint.limbs.len) + 1, ); var result_bigint = std.math.big.int.Mutable{ .limbs = limbs, .positive = undefined, .len = undefined }; result_bigint.sub(lhs_bigint, rhs_bigint); - return mod.intValue_big(scalar_ty, result_bigint.toConst()); + return pt.intValue_big(scalar_ty, result_bigint.toConst()); } /// Supports both floats and ints; handles undefined. @@ -37275,15 +37634,16 @@ fn numberSubWrapScalar( rhs: Value, ty: Type, ) !Value { - const mod = sema.mod; - if (lhs.isUndef(mod) or rhs.isUndef(mod)) return mod.undefValue(ty); + const pt = sema.pt; + const mod = pt.zcu; + if (lhs.isUndef(mod) or rhs.isUndef(mod)) return pt.undefValue(ty); if (ty.zigTypeTag(mod) == .ComptimeInt) { return sema.intSub(lhs, rhs, ty, undefined); } if (ty.isAnyFloat()) { - return Value.floatSub(lhs, rhs, ty, sema.arena, mod); + return Value.floatSub(lhs, rhs, ty, sema.arena, pt); } const overflow_result = try sema.intSubWithOverflow(lhs, rhs, ty); @@ -37296,28 +37656,29 @@ fn intSubWithOverflow( rhs: Value, ty: Type, ) !Value.OverflowArithmeticResult { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; if (ty.zigTypeTag(mod) == .Vector) { const vec_len = ty.vectorLen(mod); const overflowed_data = try sema.arena.alloc(InternPool.Index, vec_len); const result_data = try sema.arena.alloc(InternPool.Index, vec_len); const scalar_ty = ty.scalarType(mod); for (overflowed_data, result_data, 0..) |*of, *scalar, i| { - const lhs_elem = try lhs.elemValue(sema.mod, i); - const rhs_elem = try rhs.elemValue(sema.mod, i); + const lhs_elem = try lhs.elemValue(pt, i); + const rhs_elem = try rhs.elemValue(pt, i); const of_math_result = try sema.intSubWithOverflowScalar(lhs_elem, rhs_elem, scalar_ty); of.* = of_math_result.overflow_bit.toIntern(); scalar.* = of_math_result.wrapped_result.toIntern(); } return Value.OverflowArithmeticResult{ - .overflow_bit = Value.fromInterned((try mod.intern(.{ .aggregate = .{ - .ty = (try mod.vectorType(.{ .len = vec_len, .child = .u1_type })).toIntern(), + .overflow_bit = Value.fromInterned(try pt.intern(.{ .aggregate = .{ + .ty = (try pt.vectorType(.{ .len = vec_len, .child = .u1_type })).toIntern(), .storage = .{ .elems = overflowed_data }, - } }))), - .wrapped_result = Value.fromInterned((try mod.intern(.{ .aggregate = .{ + } })), + .wrapped_result = Value.fromInterned(try pt.intern(.{ .aggregate = .{ .ty = ty.toIntern(), .storage = .{ .elems = result_data }, - } }))), + } })), }; } return sema.intSubWithOverflowScalar(lhs, rhs, ty); @@ -37329,29 +37690,30 @@ fn intSubWithOverflowScalar( rhs: Value, ty: Type, ) !Value.OverflowArithmeticResult { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const info = ty.intInfo(mod); if (lhs.isUndef(mod) or rhs.isUndef(mod)) { return .{ - .overflow_bit = try mod.undefValue(Type.u1), - .wrapped_result = try mod.undefValue(ty), + .overflow_bit = try pt.undefValue(Type.u1), + .wrapped_result = try pt.undefValue(ty), }; } var lhs_space: Value.BigIntSpace = undefined; var rhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, mod, .sema); - const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, mod, .sema); + const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, pt, .sema); + const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, pt, .sema); const limbs = try sema.arena.alloc( std.math.big.Limb, std.math.big.int.calcTwosCompLimbCount(info.bits), ); var result_bigint = std.math.big.int.Mutable{ .limbs = limbs, .positive = undefined, .len = undefined }; const overflowed = result_bigint.subWrap(lhs_bigint, rhs_bigint, info.signedness, info.bits); - const wrapped_result = try mod.intValue_big(ty, result_bigint.toConst()); + const wrapped_result = try pt.intValue_big(ty, result_bigint.toConst()); return Value.OverflowArithmeticResult{ - .overflow_bit = try mod.intValue(Type.u1, @intFromBool(overflowed)), + .overflow_bit = try pt.intValue(Type.u1, @intFromBool(overflowed)), .wrapped_result = wrapped_result, }; } @@ -37367,17 +37729,18 @@ fn intFromFloat( int_ty: Type, mode: IntFromFloatMode, ) CompileError!Value { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; if (float_ty.zigTypeTag(mod) == .Vector) { const result_data = try sema.arena.alloc(InternPool.Index, float_ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { - const elem_val = try val.elemValue(sema.mod, i); + const elem_val = try val.elemValue(pt, i); scalar.* = (try sema.intFromFloatScalar(block, src, elem_val, int_ty.scalarType(mod), mode)).toIntern(); } - return Value.fromInterned((try mod.intern(.{ .aggregate = .{ + return Value.fromInterned(try pt.intern(.{ .aggregate = .{ .ty = int_ty.toIntern(), .storage = .{ .elems = result_data }, - } }))); + } })); } return sema.intFromFloatScalar(block, src, val, int_ty, mode); } @@ -37415,7 +37778,8 @@ fn intFromFloatScalar( int_ty: Type, mode: IntFromFloatMode, ) CompileError!Value { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; if (val.isUndef(mod)) return sema.failWithUseOfUndef(block, src); @@ -37423,32 +37787,32 @@ fn intFromFloatScalar( block, src, "fractional component prevents float value '{}' from coercion to type '{}'", - .{ val.fmtValue(mod, sema), int_ty.fmt(mod) }, + .{ val.fmtValue(pt, sema), int_ty.fmt(pt) }, ); - const float = val.toFloat(f128, mod); + const float = val.toFloat(f128, pt); if (std.math.isNan(float)) { return sema.fail(block, src, "float value NaN cannot be stored in integer type '{}'", .{ - int_ty.fmt(sema.mod), + int_ty.fmt(pt), }); } if (std.math.isInf(float)) { return sema.fail(block, src, "float value Inf cannot be stored in integer type '{}'", .{ - int_ty.fmt(sema.mod), + int_ty.fmt(pt), }); } var big_int = try float128IntPartToBigInt(sema.arena, float); defer big_int.deinit(); - const cti_result = try mod.intValue_big(Type.comptime_int, big_int.toConst()); + const cti_result = try pt.intValue_big(Type.comptime_int, big_int.toConst()); if (!(try sema.intFitsInType(cti_result, int_ty, null))) { return sema.fail(block, src, "float value '{}' cannot be stored in integer type '{}'", .{ - val.fmtValue(sema.mod, sema), int_ty.fmt(sema.mod), + val.fmtValue(pt, sema), int_ty.fmt(pt), }); } - return mod.getCoerced(cti_result, int_ty); + return pt.getCoerced(cti_result, int_ty); } /// Asserts the value is an integer, and the destination type is ComptimeInt or Int. @@ -37461,7 +37825,8 @@ fn intFitsInType( ty: Type, vector_index: ?*usize, ) CompileError!bool { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; if (ty.toIntern() == .comptime_int_type) return true; const info = ty.intInfo(mod); switch (val.toIntern()) { @@ -37528,22 +37893,23 @@ fn intFitsInType( } fn intInRange(sema: *Sema, tag_ty: Type, int_val: Value, end: usize) !bool { - const mod = sema.mod; - if (!(try int_val.compareAllWithZeroSema(.gte, mod))) return false; - const end_val = try mod.intValue(tag_ty, end); + const pt = sema.pt; + if (!(try int_val.compareAllWithZeroSema(.gte, pt))) return false; + const end_val = try pt.intValue(tag_ty, end); if (!(try sema.compareAll(int_val, .lt, end_val, tag_ty))) return false; return true; } /// Asserts the type is an enum. fn enumHasInt(sema: *Sema, ty: Type, int: Value) CompileError!bool { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const enum_type = mod.intern_pool.loadEnumType(ty.toIntern()); assert(enum_type.tag_mode != .nonexhaustive); // The `tagValueIndex` function call below relies on the type being the integer tag type. // `getCoerced` assumes the value will fit the new type. if (!(try sema.intFitsInType(int, Type.fromInterned(enum_type.tag_ty), null))) return false; - const int_coerced = try mod.getCoerced(int, Type.fromInterned(enum_type.tag_ty)); + const int_coerced = try pt.getCoerced(int, Type.fromInterned(enum_type.tag_ty)); return enum_type.tagValueIndex(&mod.intern_pool, int_coerced.toIntern()) != null; } @@ -37554,28 +37920,29 @@ fn intAddWithOverflow( rhs: Value, ty: Type, ) !Value.OverflowArithmeticResult { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; if (ty.zigTypeTag(mod) == .Vector) { const vec_len = ty.vectorLen(mod); const overflowed_data = try sema.arena.alloc(InternPool.Index, vec_len); const result_data = try sema.arena.alloc(InternPool.Index, vec_len); const scalar_ty = ty.scalarType(mod); for (overflowed_data, result_data, 0..) |*of, *scalar, i| { - const lhs_elem = try lhs.elemValue(sema.mod, i); - const rhs_elem = try rhs.elemValue(sema.mod, i); + const lhs_elem = try lhs.elemValue(pt, i); + const rhs_elem = try rhs.elemValue(pt, i); const of_math_result = try sema.intAddWithOverflowScalar(lhs_elem, rhs_elem, scalar_ty); of.* = of_math_result.overflow_bit.toIntern(); scalar.* = of_math_result.wrapped_result.toIntern(); } return Value.OverflowArithmeticResult{ - .overflow_bit = Value.fromInterned((try mod.intern(.{ .aggregate = .{ - .ty = (try mod.vectorType(.{ .len = vec_len, .child = .u1_type })).toIntern(), + .overflow_bit = Value.fromInterned(try pt.intern(.{ .aggregate = .{ + .ty = (try pt.vectorType(.{ .len = vec_len, .child = .u1_type })).toIntern(), .storage = .{ .elems = overflowed_data }, - } }))), - .wrapped_result = Value.fromInterned((try mod.intern(.{ .aggregate = .{ + } })), + .wrapped_result = Value.fromInterned(try pt.intern(.{ .aggregate = .{ .ty = ty.toIntern(), .storage = .{ .elems = result_data }, - } }))), + } })), }; } return sema.intAddWithOverflowScalar(lhs, rhs, ty); @@ -37587,29 +37954,30 @@ fn intAddWithOverflowScalar( rhs: Value, ty: Type, ) !Value.OverflowArithmeticResult { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; const info = ty.intInfo(mod); if (lhs.isUndef(mod) or rhs.isUndef(mod)) { return .{ - .overflow_bit = try mod.undefValue(Type.u1), - .wrapped_result = try mod.undefValue(ty), + .overflow_bit = try pt.undefValue(Type.u1), + .wrapped_result = try pt.undefValue(ty), }; } var lhs_space: Value.BigIntSpace = undefined; var rhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, mod, .sema); - const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, mod, .sema); + const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, pt, .sema); + const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, pt, .sema); const limbs = try sema.arena.alloc( std.math.big.Limb, std.math.big.int.calcTwosCompLimbCount(info.bits), ); var result_bigint = std.math.big.int.Mutable{ .limbs = limbs, .positive = undefined, .len = undefined }; const overflowed = result_bigint.addWrap(lhs_bigint, rhs_bigint, info.signedness, info.bits); - const result = try mod.intValue_big(ty, result_bigint.toConst()); + const result = try pt.intValue_big(ty, result_bigint.toConst()); return Value.OverflowArithmeticResult{ - .overflow_bit = try mod.intValue(Type.u1, @intFromBool(overflowed)), + .overflow_bit = try pt.intValue(Type.u1, @intFromBool(overflowed)), .wrapped_result = result, }; } @@ -37625,12 +37993,13 @@ fn compareAll( rhs: Value, ty: Type, ) CompileError!bool { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; if (ty.zigTypeTag(mod) == .Vector) { var i: usize = 0; while (i < ty.vectorLen(mod)) : (i += 1) { - const lhs_elem = try lhs.elemValue(sema.mod, i); - const rhs_elem = try rhs.elemValue(sema.mod, i); + const lhs_elem = try lhs.elemValue(pt, i); + const rhs_elem = try rhs.elemValue(pt, i); if (!(try sema.compareScalar(lhs_elem, op, rhs_elem, ty.scalarType(mod)))) { return false; } @@ -37648,13 +38017,13 @@ fn compareScalar( rhs: Value, ty: Type, ) CompileError!bool { - const mod = sema.mod; - const coerced_lhs = try mod.getCoerced(lhs, ty); - const coerced_rhs = try mod.getCoerced(rhs, ty); + const pt = sema.pt; + const coerced_lhs = try pt.getCoerced(lhs, ty); + const coerced_rhs = try pt.getCoerced(rhs, ty); switch (op) { .eq => return sema.valuesEqual(coerced_lhs, coerced_rhs, ty), .neq => return !(try sema.valuesEqual(coerced_lhs, coerced_rhs, ty)), - else => return Value.compareHeteroAdvanced(coerced_lhs, op, coerced_rhs, mod, .sema), + else => return Value.compareHeteroAdvanced(coerced_lhs, op, coerced_rhs, pt, .sema), } } @@ -37664,7 +38033,7 @@ fn valuesEqual( rhs: Value, ty: Type, ) CompileError!bool { - return lhs.eql(rhs, ty, sema.mod); + return lhs.eql(rhs, ty, sema.pt.zcu); } /// Asserts the values are comparable vectors of type `ty`. @@ -37675,29 +38044,30 @@ fn compareVector( rhs: Value, ty: Type, ) !Value { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; assert(ty.zigTypeTag(mod) == .Vector); const result_data = try sema.arena.alloc(InternPool.Index, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { - const lhs_elem = try lhs.elemValue(sema.mod, i); - const rhs_elem = try rhs.elemValue(sema.mod, i); + const lhs_elem = try lhs.elemValue(pt, i); + const rhs_elem = try rhs.elemValue(pt, i); const res_bool = try sema.compareScalar(lhs_elem, op, rhs_elem, ty.scalarType(mod)); scalar.* = Value.makeBool(res_bool).toIntern(); } - return Value.fromInterned((try mod.intern(.{ .aggregate = .{ - .ty = (try mod.vectorType(.{ .len = ty.vectorLen(mod), .child = .bool_type })).toIntern(), + return Value.fromInterned(try pt.intern(.{ .aggregate = .{ + .ty = (try pt.vectorType(.{ .len = ty.vectorLen(mod), .child = .bool_type })).toIntern(), .storage = .{ .elems = result_data }, - } }))); + } })); } /// Merge lhs with rhs. /// Asserts that lhs and rhs are both error sets and are resolved. fn errorSetMerge(sema: *Sema, lhs: Type, rhs: Type) !Type { - const mod = sema.mod; - const ip = &mod.intern_pool; + const pt = sema.pt; + const ip = &pt.zcu.intern_pool; const arena = sema.arena; - const lhs_names = lhs.errorSetNames(mod); - const rhs_names = rhs.errorSetNames(mod); + const lhs_names = lhs.errorSetNames(pt.zcu); + const rhs_names = rhs.errorSetNames(pt.zcu); var names: InferredErrorSet.NameMap = .{}; try names.ensureUnusedCapacity(arena, lhs_names.len); @@ -37708,7 +38078,7 @@ fn errorSetMerge(sema: *Sema, lhs: Type, rhs: Type) !Type { try names.put(arena, rhs_names.get(ip)[rhs_index], {}); } - return mod.errorSetFromUnsortedNames(names.keys()); + return pt.errorSetFromUnsortedNames(names.keys()); } /// Avoids crashing the compiler when asking if inferred allocations are noreturn. @@ -37718,7 +38088,7 @@ fn isNoReturn(sema: *Sema, ref: Air.Inst.Ref) bool { .inferred_alloc, .inferred_alloc_comptime => return false, else => {}, }; - return sema.typeOf(ref).isNoReturn(sema.mod); + return sema.typeOf(ref).isNoReturn(sema.pt.zcu); } /// Avoids crashing the compiler when asking if inferred allocations are known to be a certain zig type. @@ -37727,11 +38097,12 @@ fn isKnownZigType(sema: *Sema, ref: Air.Inst.Ref, tag: std.builtin.TypeId) bool .inferred_alloc, .inferred_alloc_comptime => return false, else => {}, }; - return sema.typeOf(ref).zigTypeTag(sema.mod) == tag; + return sema.typeOf(ref).zigTypeTag(sema.pt.zcu) == tag; } pub fn declareDependency(sema: *Sema, dependee: InternPool.Dependee) !void { - if (!sema.mod.comp.debug_incremental) return; + const zcu = sema.pt.zcu; + if (!zcu.comp.debug_incremental) return; // Avoid creating dependencies on ourselves. This situation can arise when we analyze the fields // of a type and they use `@This()`. This dependency would be unnecessary, and in fact would @@ -37747,11 +38118,11 @@ pub fn declareDependency(sema: *Sema, dependee: InternPool.Dependee) !void { else .{ .decl = sema.owner_decl_index }, ); - try sema.mod.intern_pool.addDependency(sema.gpa, depender, dependee); + try zcu.intern_pool.addDependency(sema.gpa, depender, dependee); } fn isComptimeMutablePtr(sema: *Sema, val: Value) bool { - return switch (sema.mod.intern_pool.indexToKey(val.toIntern())) { + return switch (sema.pt.zcu.intern_pool.indexToKey(val.toIntern())) { .slice => |slice| sema.isComptimeMutablePtr(Value.fromInterned(slice.ptr)), .ptr => |ptr| switch (ptr.base_addr) { .anon_decl, .decl, .int => false, @@ -37766,7 +38137,7 @@ fn isComptimeMutablePtr(sema: *Sema, val: Value) bool { fn checkRuntimeValue(sema: *Sema, ptr: Air.Inst.Ref) bool { const val = ptr.toInterned() orelse return true; - return !Value.fromInterned(val).canMutateComptimeVarState(sema.mod); + return !Value.fromInterned(val).canMutateComptimeVarState(sema.pt.zcu); } fn validateRuntimeValue(sema: *Sema, block: *Block, val_src: LazySrcLoc, val: Air.Inst.Ref) CompileError!void { @@ -37781,7 +38152,8 @@ fn validateRuntimeValue(sema: *Sema, block: *Block, val_src: LazySrcLoc, val: Ai /// Returns true if any value contained in `val` is undefined. fn anyUndef(sema: *Sema, block: *Block, src: LazySrcLoc, val: Value) !bool { - const mod = sema.mod; + const pt = sema.pt; + const mod = pt.zcu; return switch (mod.intern_pool.indexToKey(val.toIntern())) { .undef => true, .simple_value => |v| v == .undefined, @@ -37807,13 +38179,14 @@ fn sliceToIpString( slice_val: Value, reason: NeededComptimeReason, ) CompileError!InternPool.NullTerminatedString { - const zcu = sema.mod; + const pt = sema.pt; + const zcu = pt.zcu; const slice_ty = slice_val.typeOf(zcu); assert(slice_ty.isSlice(zcu)); assert(slice_ty.childType(zcu).toIntern() == .u8_type); const array_val = try sema.derefSliceAsArray(block, src, slice_val, reason); const array_ty = array_val.typeOf(zcu); - return array_val.toIpString(array_ty, zcu); + return array_val.toIpString(array_ty, pt); } /// Given a slice value, attempts to dereference it into a comptime-known array. @@ -37840,7 +38213,8 @@ fn maybeDerefSliceAsArray( src: LazySrcLoc, slice_val: Value, ) CompileError!?Value { - const zcu = sema.mod; + const pt = sema.pt; + const zcu = pt.zcu; const ip = &zcu.intern_pool; assert(slice_val.typeOf(zcu).isSlice(zcu)); const slice = switch (ip.indexToKey(slice_val.toIntern())) { @@ -37849,19 +38223,19 @@ fn maybeDerefSliceAsArray( else => unreachable, }; const elem_ty = Type.fromInterned(slice.ty).childType(zcu); - const len = try Value.fromInterned(slice.len).toUnsignedIntSema(zcu); - const array_ty = try zcu.arrayType(.{ + const len = try Value.fromInterned(slice.len).toUnsignedIntSema(pt); + const array_ty = try pt.arrayType(.{ .child = elem_ty.toIntern(), .len = len, }); - const ptr_ty = try zcu.ptrTypeSema(p: { + const ptr_ty = try pt.ptrTypeSema(p: { var p = Type.fromInterned(slice.ty).ptrInfo(zcu); p.flags.size = .One; p.child = array_ty.toIntern(); p.sentinel = .none; break :p p; }); - const casted_ptr = try zcu.getCoerced(Value.fromInterned(slice.ptr), ptr_ty); + const casted_ptr = try pt.getCoerced(Value.fromInterned(slice.ptr), ptr_ty); return sema.pointerDeref(block, src, casted_ptr, ptr_ty); } @@ -37879,7 +38253,7 @@ fn analyzeUnreachable(sema: *Sema, block: *Block, src: LazySrcLoc, safety_check: pub fn flushExports(sema: *Sema) !void { if (sema.exports.items.len == 0) return; - const zcu = sema.mod; + const zcu = sema.pt.zcu; const gpa = zcu.gpa; const unit = sema.ownerUnit(); diff --git a/src/Sema/bitcast.zig b/src/Sema/bitcast.zig index 3c3ccdbfaaa4..c5155dec6377 100644 --- a/src/Sema/bitcast.zig +++ b/src/Sema/bitcast.zig @@ -69,7 +69,8 @@ fn bitCastInner( host_bits: u64, bit_offset: u64, ) BitCastError!Value { - const zcu = sema.mod; + const pt = sema.pt; + const zcu = pt.zcu; const endian = zcu.getTarget().cpu.arch.endian(); if (dest_ty.toIntern() == val.typeOf(zcu).toIntern() and bit_offset == 0) { @@ -78,29 +79,29 @@ fn bitCastInner( const val_ty = val.typeOf(zcu); - try val_ty.resolveLayout(zcu); - try dest_ty.resolveLayout(zcu); + try val_ty.resolveLayout(pt); + try dest_ty.resolveLayout(pt); assert(val_ty.hasWellDefinedLayout(zcu)); const abi_pad_bits, const host_pad_bits = if (host_bits > 0) - .{ val_ty.abiSize(zcu) * 8 - host_bits, host_bits - val_ty.bitSize(zcu) } + .{ val_ty.abiSize(pt) * 8 - host_bits, host_bits - val_ty.bitSize(pt) } else - .{ val_ty.abiSize(zcu) * 8 - val_ty.bitSize(zcu), 0 }; + .{ val_ty.abiSize(pt) * 8 - val_ty.bitSize(pt), 0 }; const skip_bits = switch (endian) { .little => bit_offset + byte_offset * 8, .big => if (host_bits > 0) - val_ty.abiSize(zcu) * 8 - byte_offset * 8 - host_bits + bit_offset + val_ty.abiSize(pt) * 8 - byte_offset * 8 - host_bits + bit_offset else - val_ty.abiSize(zcu) * 8 - byte_offset * 8 - dest_ty.bitSize(zcu), + val_ty.abiSize(pt) * 8 - byte_offset * 8 - dest_ty.bitSize(pt), }; var unpack: UnpackValueBits = .{ - .zcu = zcu, + .pt = sema.pt, .arena = sema.arena, .skip_bits = skip_bits, - .remaining_bits = dest_ty.bitSize(zcu), + .remaining_bits = dest_ty.bitSize(pt), .unpacked = std.ArrayList(InternPool.Index).init(sema.arena), }; switch (endian) { @@ -116,7 +117,7 @@ fn bitCastInner( try unpack.padding(host_pad_bits); var pack: PackValueBits = .{ - .zcu = zcu, + .pt = sema.pt, .arena = sema.arena, .unpacked = unpack.unpacked.items, }; @@ -131,33 +132,34 @@ fn bitCastSpliceInner( host_bits: u64, bit_offset: u64, ) BitCastError!Value { - const zcu = sema.mod; + const pt = sema.pt; + const zcu = pt.zcu; const endian = zcu.getTarget().cpu.arch.endian(); const val_ty = val.typeOf(zcu); const splice_val_ty = splice_val.typeOf(zcu); - try val_ty.resolveLayout(zcu); - try splice_val_ty.resolveLayout(zcu); + try val_ty.resolveLayout(pt); + try splice_val_ty.resolveLayout(pt); - const splice_bits = splice_val_ty.bitSize(zcu); + const splice_bits = splice_val_ty.bitSize(pt); const splice_offset = switch (endian) { .little => bit_offset + byte_offset * 8, .big => if (host_bits > 0) - val_ty.abiSize(zcu) * 8 - byte_offset * 8 - host_bits + bit_offset + val_ty.abiSize(pt) * 8 - byte_offset * 8 - host_bits + bit_offset else - val_ty.abiSize(zcu) * 8 - byte_offset * 8 - splice_bits, + val_ty.abiSize(pt) * 8 - byte_offset * 8 - splice_bits, }; - assert(splice_offset + splice_bits <= val_ty.abiSize(zcu) * 8); + assert(splice_offset + splice_bits <= val_ty.abiSize(pt) * 8); const abi_pad_bits, const host_pad_bits = if (host_bits > 0) - .{ val_ty.abiSize(zcu) * 8 - host_bits, host_bits - val_ty.bitSize(zcu) } + .{ val_ty.abiSize(pt) * 8 - host_bits, host_bits - val_ty.bitSize(pt) } else - .{ val_ty.abiSize(zcu) * 8 - val_ty.bitSize(zcu), 0 }; + .{ val_ty.abiSize(pt) * 8 - val_ty.bitSize(pt), 0 }; var unpack: UnpackValueBits = .{ - .zcu = zcu, + .pt = pt, .arena = sema.arena, .skip_bits = 0, .remaining_bits = splice_offset, @@ -179,7 +181,7 @@ fn bitCastSpliceInner( try unpack.add(splice_val); unpack.skip_bits = splice_offset + splice_bits; - unpack.remaining_bits = val_ty.abiSize(zcu) * 8 - splice_offset - splice_bits; + unpack.remaining_bits = val_ty.abiSize(pt) * 8 - splice_offset - splice_bits; switch (endian) { .little => { try unpack.add(val); @@ -193,7 +195,7 @@ fn bitCastSpliceInner( try unpack.padding(host_pad_bits); var pack: PackValueBits = .{ - .zcu = zcu, + .pt = pt, .arena = sema.arena, .unpacked = unpack.unpacked.items, }; @@ -209,7 +211,7 @@ fn bitCastSpliceInner( /// of values in *packed* memory - therefore, on big-endian targets, the first element of this /// list contains bits from the *final* byte of the value. const UnpackValueBits = struct { - zcu: *Zcu, + pt: Zcu.PerThread, arena: Allocator, skip_bits: u64, remaining_bits: u64, @@ -217,7 +219,8 @@ const UnpackValueBits = struct { unpacked: std.ArrayList(InternPool.Index), fn add(unpack: *UnpackValueBits, val: Value) BitCastError!void { - const zcu = unpack.zcu; + const pt = unpack.pt; + const zcu = pt.zcu; const endian = zcu.getTarget().cpu.arch.endian(); const ip = &zcu.intern_pool; @@ -226,7 +229,7 @@ const UnpackValueBits = struct { } const ty = val.typeOf(zcu); - const bit_size = ty.bitSize(zcu); + const bit_size = ty.bitSize(pt); if (unpack.skip_bits >= bit_size) { unpack.skip_bits -= bit_size; @@ -279,7 +282,7 @@ const UnpackValueBits = struct { .little => i, .big => len - i - 1, }; - const elem_val = try val.elemValue(zcu, real_idx); + const elem_val = try val.elemValue(pt, real_idx); try unpack.add(elem_val); } }, @@ -288,7 +291,7 @@ const UnpackValueBits = struct { // The final element does not have trailing padding. // Elements are reversed in packed memory on BE targets. const elem_ty = ty.childType(zcu); - const pad_bits = elem_ty.abiSize(zcu) * 8 - elem_ty.bitSize(zcu); + const pad_bits = elem_ty.abiSize(pt) * 8 - elem_ty.bitSize(pt); const len = ty.arrayLen(zcu); const maybe_sent = ty.sentinel(zcu); @@ -303,7 +306,7 @@ const UnpackValueBits = struct { .little => i, .big => len - i - 1, }; - const elem_val = try val.elemValue(zcu, @intCast(real_idx)); + const elem_val = try val.elemValue(pt, @intCast(real_idx)); try unpack.add(elem_val); if (i != len - 1) try unpack.padding(pad_bits); } @@ -320,12 +323,12 @@ const UnpackValueBits = struct { var cur_bit_off: u64 = 0; var it = zcu.typeToStruct(ty).?.iterateRuntimeOrder(ip); while (it.next()) |field_idx| { - const want_bit_off = ty.structFieldOffset(field_idx, zcu) * 8; + const want_bit_off = ty.structFieldOffset(field_idx, pt) * 8; const pad_bits = want_bit_off - cur_bit_off; - const field_val = try val.fieldValue(zcu, field_idx); + const field_val = try val.fieldValue(pt, field_idx); try unpack.padding(pad_bits); try unpack.add(field_val); - cur_bit_off = want_bit_off + field_val.typeOf(zcu).bitSize(zcu); + cur_bit_off = want_bit_off + field_val.typeOf(zcu).bitSize(pt); } // Add trailing padding bits. try unpack.padding(bit_size - cur_bit_off); @@ -334,13 +337,13 @@ const UnpackValueBits = struct { var cur_bit_off: u64 = bit_size; var it = zcu.typeToStruct(ty).?.iterateRuntimeOrderReverse(ip); while (it.next()) |field_idx| { - const field_val = try val.fieldValue(zcu, field_idx); + const field_val = try val.fieldValue(pt, field_idx); const field_ty = field_val.typeOf(zcu); - const want_bit_off = ty.structFieldOffset(field_idx, zcu) * 8 + field_ty.bitSize(zcu); + const want_bit_off = ty.structFieldOffset(field_idx, pt) * 8 + field_ty.bitSize(pt); const pad_bits = cur_bit_off - want_bit_off; try unpack.padding(pad_bits); try unpack.add(field_val); - cur_bit_off = want_bit_off - field_ty.bitSize(zcu); + cur_bit_off = want_bit_off - field_ty.bitSize(pt); } assert(cur_bit_off == 0); }, @@ -349,7 +352,7 @@ const UnpackValueBits = struct { // Just add all fields in order. There are no padding bits. // This is identical between LE and BE targets. for (0..ty.structFieldCount(zcu)) |i| { - const field_val = try val.fieldValue(zcu, i); + const field_val = try val.fieldValue(pt, i); try unpack.add(field_val); } }, @@ -363,7 +366,7 @@ const UnpackValueBits = struct { // This correctly handles the case where `tag == .none`, since the payload is then // either an integer or a byte array, both of which we can unpack. const payload_val = Value.fromInterned(un.val); - const pad_bits = bit_size - payload_val.typeOf(zcu).bitSize(zcu); + const pad_bits = bit_size - payload_val.typeOf(zcu).bitSize(pt); if (endian == .little or ty.containerLayout(zcu) == .@"packed") { try unpack.add(payload_val); try unpack.padding(pad_bits); @@ -377,31 +380,31 @@ const UnpackValueBits = struct { fn padding(unpack: *UnpackValueBits, pad_bits: u64) BitCastError!void { if (pad_bits == 0) return; - const zcu = unpack.zcu; + const pt = unpack.pt; // Figure out how many full bytes and leftover bits there are. const bytes = pad_bits / 8; const bits = pad_bits % 8; // Add undef u8 values for the bytes... - const undef_u8 = try zcu.undefValue(Type.u8); + const undef_u8 = try pt.undefValue(Type.u8); for (0..@intCast(bytes)) |_| { try unpack.primitive(undef_u8); } // ...and an undef int for the leftover bits. if (bits == 0) return; - const bits_ty = try zcu.intType(.unsigned, @intCast(bits)); - const bits_val = try zcu.undefValue(bits_ty); + const bits_ty = try pt.intType(.unsigned, @intCast(bits)); + const bits_val = try pt.undefValue(bits_ty); try unpack.primitive(bits_val); } fn primitive(unpack: *UnpackValueBits, val: Value) BitCastError!void { - const zcu = unpack.zcu; + const pt = unpack.pt; if (unpack.remaining_bits == 0) { return; } - const ty = val.typeOf(zcu); - const bit_size = ty.bitSize(zcu); + const ty = val.typeOf(pt.zcu); + const bit_size = ty.bitSize(pt); // Note that this skips all zero-bit types. if (unpack.skip_bits >= bit_size) { @@ -425,21 +428,21 @@ const UnpackValueBits = struct { } fn splitPrimitive(unpack: *UnpackValueBits, val: Value, bit_offset: u64, bit_count: u64) BitCastError!void { - const zcu = unpack.zcu; - const ty = val.typeOf(zcu); + const pt = unpack.pt; + const ty = val.typeOf(pt.zcu); - const val_bits = ty.bitSize(zcu); + const val_bits = ty.bitSize(pt); assert(bit_offset + bit_count <= val_bits); - switch (zcu.intern_pool.indexToKey(val.toIntern())) { + switch (pt.zcu.intern_pool.indexToKey(val.toIntern())) { // In the `ptr` case, this will return `error.ReinterpretDeclRef` // if we're trying to split a non-integer pointer value. .int, .float, .enum_tag, .ptr, .opt => { // This @intCast is okay because no primitive can exceed the size of a u16. - const int_ty = try zcu.intType(.unsigned, @intCast(bit_count)); + const int_ty = try unpack.pt.intType(.unsigned, @intCast(bit_count)); const buf = try unpack.arena.alloc(u8, @intCast((val_bits + 7) / 8)); - try val.writeToPackedMemory(ty, zcu, buf, 0); - const sub_val = try Value.readFromPackedMemory(int_ty, zcu, buf, @intCast(bit_offset), unpack.arena); + try val.writeToPackedMemory(ty, unpack.pt, buf, 0); + const sub_val = try Value.readFromPackedMemory(int_ty, unpack.pt, buf, @intCast(bit_offset), unpack.arena); try unpack.primitive(sub_val); }, .undef => try unpack.padding(bit_count), @@ -456,13 +459,14 @@ const UnpackValueBits = struct { /// reconstructs a value of an arbitrary type, with correct handling of `undefined` /// values and of pointers which align in virtual memory. const PackValueBits = struct { - zcu: *Zcu, + pt: Zcu.PerThread, arena: Allocator, bit_offset: u64 = 0, unpacked: []const InternPool.Index, fn get(pack: *PackValueBits, ty: Type) BitCastError!Value { - const zcu = pack.zcu; + const pt = pack.pt; + const zcu = pt.zcu; const endian = zcu.getTarget().cpu.arch.endian(); const ip = &zcu.intern_pool; const arena = pack.arena; @@ -485,7 +489,7 @@ const PackValueBits = struct { } }, } - return Value.fromInterned(try zcu.intern(.{ .aggregate = .{ + return Value.fromInterned(try pt.intern(.{ .aggregate = .{ .ty = ty.toIntern(), .storage = .{ .elems = elems }, } })); @@ -495,12 +499,12 @@ const PackValueBits = struct { const len = ty.arrayLen(zcu); const elem_ty = ty.childType(zcu); const maybe_sent = ty.sentinel(zcu); - const pad_bits = elem_ty.abiSize(zcu) * 8 - elem_ty.bitSize(zcu); + const pad_bits = elem_ty.abiSize(pt) * 8 - elem_ty.bitSize(pt); const elems = try arena.alloc(InternPool.Index, @intCast(len)); if (endian == .big and maybe_sent != null) { // TODO: validate sentinel was preserved! - try pack.padding(elem_ty.bitSize(zcu)); + try pack.padding(elem_ty.bitSize(pt)); if (len != 0) try pack.padding(pad_bits); } @@ -516,10 +520,10 @@ const PackValueBits = struct { if (endian == .little and maybe_sent != null) { // TODO: validate sentinel was preserved! if (len != 0) try pack.padding(pad_bits); - try pack.padding(elem_ty.bitSize(zcu)); + try pack.padding(elem_ty.bitSize(pt)); } - return Value.fromInterned(try zcu.intern(.{ .aggregate = .{ + return Value.fromInterned(try pt.intern(.{ .aggregate = .{ .ty = ty.toIntern(), .storage = .{ .elems = elems }, } })); @@ -534,23 +538,23 @@ const PackValueBits = struct { var cur_bit_off: u64 = 0; var it = zcu.typeToStruct(ty).?.iterateRuntimeOrder(ip); while (it.next()) |field_idx| { - const want_bit_off = ty.structFieldOffset(field_idx, zcu) * 8; + const want_bit_off = ty.structFieldOffset(field_idx, pt) * 8; try pack.padding(want_bit_off - cur_bit_off); const field_ty = ty.structFieldType(field_idx, zcu); elems[field_idx] = (try pack.get(field_ty)).toIntern(); - cur_bit_off = want_bit_off + field_ty.bitSize(zcu); + cur_bit_off = want_bit_off + field_ty.bitSize(pt); } - try pack.padding(ty.bitSize(zcu) - cur_bit_off); + try pack.padding(ty.bitSize(pt) - cur_bit_off); }, .big => { - var cur_bit_off: u64 = ty.bitSize(zcu); + var cur_bit_off: u64 = ty.bitSize(pt); var it = zcu.typeToStruct(ty).?.iterateRuntimeOrderReverse(ip); while (it.next()) |field_idx| { const field_ty = ty.structFieldType(field_idx, zcu); - const want_bit_off = ty.structFieldOffset(field_idx, zcu) * 8 + field_ty.bitSize(zcu); + const want_bit_off = ty.structFieldOffset(field_idx, pt) * 8 + field_ty.bitSize(pt); try pack.padding(cur_bit_off - want_bit_off); elems[field_idx] = (try pack.get(field_ty)).toIntern(); - cur_bit_off = want_bit_off - field_ty.bitSize(zcu); + cur_bit_off = want_bit_off - field_ty.bitSize(pt); } assert(cur_bit_off == 0); }, @@ -559,10 +563,10 @@ const PackValueBits = struct { // Fill those values now. for (elems, 0..) |*elem, field_idx| { if (elem.* != .none) continue; - const val = (try ty.structFieldValueComptime(zcu, field_idx)).?; + const val = (try ty.structFieldValueComptime(pt, field_idx)).?; elem.* = val.toIntern(); } - return Value.fromInterned(try zcu.intern(.{ .aggregate = .{ + return Value.fromInterned(try pt.intern(.{ .aggregate = .{ .ty = ty.toIntern(), .storage = .{ .elems = elems }, } })); @@ -575,7 +579,7 @@ const PackValueBits = struct { const field_ty = ty.structFieldType(i, zcu); elem.* = (try pack.get(field_ty)).toIntern(); } - return Value.fromInterned(try zcu.intern(.{ .aggregate = .{ + return Value.fromInterned(try pt.intern(.{ .aggregate = .{ .ty = ty.toIntern(), .storage = .{ .elems = elems }, } })); @@ -591,7 +595,7 @@ const PackValueBits = struct { const prev_unpacked = pack.unpacked; const prev_bit_offset = pack.bit_offset; - const backing_ty = try ty.unionBackingType(zcu); + const backing_ty = try ty.unionBackingType(pt); backing: { const backing_val = pack.get(backing_ty) catch |err| switch (err) { @@ -607,7 +611,7 @@ const PackValueBits = struct { pack.bit_offset = prev_bit_offset; break :backing; } - return Value.fromInterned(try zcu.intern(.{ .un = .{ + return Value.fromInterned(try pt.intern(.{ .un = .{ .ty = ty.toIntern(), .tag = .none, .val = backing_val.toIntern(), @@ -618,16 +622,16 @@ const PackValueBits = struct { for (field_order, 0..) |*f, i| f.* = @intCast(i); // Sort `field_order` to put the fields with the largest bit sizes first. const SizeSortCtx = struct { - zcu: *Zcu, + pt: Zcu.PerThread, field_types: []const InternPool.Index, fn lessThan(ctx: @This(), a_idx: u32, b_idx: u32) bool { const a_ty = Type.fromInterned(ctx.field_types[a_idx]); const b_ty = Type.fromInterned(ctx.field_types[b_idx]); - return a_ty.bitSize(ctx.zcu) > b_ty.bitSize(ctx.zcu); + return a_ty.bitSize(ctx.pt) > b_ty.bitSize(ctx.pt); } }; std.mem.sortUnstable(u32, field_order, SizeSortCtx{ - .zcu = zcu, + .pt = pt, .field_types = zcu.typeToUnion(ty).?.field_types.get(ip), }, SizeSortCtx.lessThan); @@ -635,7 +639,7 @@ const PackValueBits = struct { for (field_order) |field_idx| { const field_ty = Type.fromInterned(zcu.typeToUnion(ty).?.field_types.get(ip)[field_idx]); - const pad_bits = ty.bitSize(zcu) - field_ty.bitSize(zcu); + const pad_bits = ty.bitSize(pt) - field_ty.bitSize(pt); if (!padding_after) try pack.padding(pad_bits); const field_val = pack.get(field_ty) catch |err| switch (err) { error.ReinterpretDeclRef => { @@ -651,8 +655,8 @@ const PackValueBits = struct { pack.bit_offset = prev_bit_offset; continue; } - const tag_val = try zcu.enumValueFieldIndex(ty.unionTagTypeHypothetical(zcu), field_idx); - return Value.fromInterned(try zcu.intern(.{ .un = .{ + const tag_val = try pt.enumValueFieldIndex(ty.unionTagTypeHypothetical(zcu), field_idx); + return Value.fromInterned(try pt.intern(.{ .un = .{ .ty = ty.toIntern(), .tag = tag_val.toIntern(), .val = field_val.toIntern(), @@ -662,7 +666,7 @@ const PackValueBits = struct { // No field could represent the value. Just do whatever happens when we try to read // the backing type - either `undefined` or `error.ReinterpretDeclRef`. const backing_val = try pack.get(backing_ty); - return Value.fromInterned(try zcu.intern(.{ .un = .{ + return Value.fromInterned(try pt.intern(.{ .un = .{ .ty = ty.toIntern(), .tag = .none, .val = backing_val.toIntern(), @@ -677,14 +681,14 @@ const PackValueBits = struct { } fn primitive(pack: *PackValueBits, want_ty: Type) BitCastError!Value { - const zcu = pack.zcu; - const vals, const bit_offset = pack.prepareBits(want_ty.bitSize(zcu)); + const pt = pack.pt; + const vals, const bit_offset = pack.prepareBits(want_ty.bitSize(pt)); for (vals) |val| { - if (!Value.fromInterned(val).isUndef(zcu)) break; + if (!Value.fromInterned(val).isUndef(pt.zcu)) break; } else { // All bits of the value are `undefined`. - return zcu.undefValue(want_ty); + return pt.undefValue(want_ty); } // TODO: we need to decide how to handle partially-undef values here. @@ -702,9 +706,9 @@ const PackValueBits = struct { ptr_cast: { if (vals.len != 1) break :ptr_cast; const val = Value.fromInterned(vals[0]); - if (!val.typeOf(zcu).isPtrAtRuntime(zcu)) break :ptr_cast; - if (!want_ty.isPtrAtRuntime(zcu)) break :ptr_cast; - return zcu.getCoerced(val, want_ty); + if (!val.typeOf(pt.zcu).isPtrAtRuntime(pt.zcu)) break :ptr_cast; + if (!want_ty.isPtrAtRuntime(pt.zcu)) break :ptr_cast; + return pt.getCoerced(val, want_ty); } // Reinterpret via an in-memory buffer. @@ -712,8 +716,8 @@ const PackValueBits = struct { var buf_bits: u64 = 0; for (vals) |ip_val| { const val = Value.fromInterned(ip_val); - const ty = val.typeOf(zcu); - buf_bits += ty.bitSize(zcu); + const ty = val.typeOf(pt.zcu); + buf_bits += ty.bitSize(pt); } const buf = try pack.arena.alloc(u8, @intCast((buf_bits + 7) / 8)); @@ -722,25 +726,25 @@ const PackValueBits = struct { var cur_bit_off: usize = 0; for (vals) |ip_val| { const val = Value.fromInterned(ip_val); - const ty = val.typeOf(zcu); - if (!val.isUndef(zcu)) { - try val.writeToPackedMemory(ty, zcu, buf, cur_bit_off); + const ty = val.typeOf(pt.zcu); + if (!val.isUndef(pt.zcu)) { + try val.writeToPackedMemory(ty, pt, buf, cur_bit_off); } - cur_bit_off += @intCast(ty.bitSize(zcu)); + cur_bit_off += @intCast(ty.bitSize(pt)); } - return Value.readFromPackedMemory(want_ty, zcu, buf, @intCast(bit_offset), pack.arena); + return Value.readFromPackedMemory(want_ty, pt, buf, @intCast(bit_offset), pack.arena); } fn prepareBits(pack: *PackValueBits, need_bits: u64) struct { []const InternPool.Index, u64 } { if (need_bits == 0) return .{ &.{}, 0 }; - const zcu = pack.zcu; + const pt = pack.pt; var bits: u64 = 0; var len: usize = 0; while (bits < pack.bit_offset + need_bits) { - bits += Value.fromInterned(pack.unpacked[len]).typeOf(zcu).bitSize(zcu); + bits += Value.fromInterned(pack.unpacked[len]).typeOf(pt.zcu).bitSize(pt); len += 1; } @@ -753,7 +757,7 @@ const PackValueBits = struct { pack.bit_offset = 0; } else { pack.unpacked = pack.unpacked[len - 1 ..]; - pack.bit_offset = Value.fromInterned(pack.unpacked[0]).typeOf(zcu).bitSize(zcu) - extra_bits; + pack.bit_offset = Value.fromInterned(pack.unpacked[0]).typeOf(pt.zcu).bitSize(pt) - extra_bits; } return .{ result_vals, result_offset }; diff --git a/src/Sema/comptime_ptr_access.zig b/src/Sema/comptime_ptr_access.zig index d8e638ca2660..79e39cabfea8 100644 --- a/src/Sema/comptime_ptr_access.zig +++ b/src/Sema/comptime_ptr_access.zig @@ -12,19 +12,19 @@ pub const ComptimeLoadResult = union(enum) { }; pub fn loadComptimePtr(sema: *Sema, block: *Block, src: LazySrcLoc, ptr: Value) !ComptimeLoadResult { - const zcu = sema.mod; - const ptr_info = ptr.typeOf(zcu).ptrInfo(zcu); + const pt = sema.pt; + const ptr_info = ptr.typeOf(pt.zcu).ptrInfo(pt.zcu); // TODO: host size for vectors is terrible const host_bits = switch (ptr_info.flags.vector_index) { .none => ptr_info.packed_offset.host_size * 8, - else => ptr_info.packed_offset.host_size * Type.fromInterned(ptr_info.child).bitSize(zcu), + else => ptr_info.packed_offset.host_size * Type.fromInterned(ptr_info.child).bitSize(pt), }; const bit_offset = if (host_bits != 0) bit_offset: { - const child_bits = Type.fromInterned(ptr_info.child).bitSize(zcu); + const child_bits = Type.fromInterned(ptr_info.child).bitSize(pt); const bit_offset = ptr_info.packed_offset.bit_offset + switch (ptr_info.flags.vector_index) { .none => 0, .runtime => return .runtime_load, - else => |idx| switch (zcu.getTarget().cpu.arch.endian()) { + else => |idx| switch (pt.zcu.getTarget().cpu.arch.endian()) { .little => child_bits * @intFromEnum(idx), .big => host_bits - child_bits * (@intFromEnum(idx) + 1), // element order reversed on big endian }, @@ -60,28 +60,29 @@ pub fn storeComptimePtr( ptr: Value, store_val: Value, ) !ComptimeStoreResult { - const zcu = sema.mod; + const pt = sema.pt; + const zcu = pt.zcu; const ptr_info = ptr.typeOf(zcu).ptrInfo(zcu); assert(store_val.typeOf(zcu).toIntern() == ptr_info.child); // TODO: host size for vectors is terrible const host_bits = switch (ptr_info.flags.vector_index) { .none => ptr_info.packed_offset.host_size * 8, - else => ptr_info.packed_offset.host_size * Type.fromInterned(ptr_info.child).bitSize(zcu), + else => ptr_info.packed_offset.host_size * Type.fromInterned(ptr_info.child).bitSize(pt), }; const bit_offset = ptr_info.packed_offset.bit_offset + switch (ptr_info.flags.vector_index) { .none => 0, .runtime => return .runtime_store, else => |idx| switch (zcu.getTarget().cpu.arch.endian()) { - .little => Type.fromInterned(ptr_info.child).bitSize(zcu) * @intFromEnum(idx), - .big => host_bits - Type.fromInterned(ptr_info.child).bitSize(zcu) * (@intFromEnum(idx) + 1), // element order reversed on big endian + .little => Type.fromInterned(ptr_info.child).bitSize(pt) * @intFromEnum(idx), + .big => host_bits - Type.fromInterned(ptr_info.child).bitSize(pt) * (@intFromEnum(idx) + 1), // element order reversed on big endian }, }; const pseudo_store_ty = if (host_bits > 0) t: { - const need_bits = Type.fromInterned(ptr_info.child).bitSize(zcu); + const need_bits = Type.fromInterned(ptr_info.child).bitSize(pt); if (need_bits + bit_offset > host_bits) { return .exceeds_host_size; } - break :t try zcu.intType(.unsigned, @intCast(host_bits)); + break :t try sema.pt.intType(.unsigned, @intCast(host_bits)); } else Type.fromInterned(ptr_info.child); const strat = try prepareComptimePtrStore(sema, block, src, ptr, pseudo_store_ty, 0); @@ -103,7 +104,7 @@ pub fn storeComptimePtr( .needed_well_defined => |ty| return .{ .needed_well_defined = ty }, .out_of_bounds => |ty| return .{ .out_of_bounds = ty }, }; - const expected = try expected_mv.intern(zcu, sema.arena); + const expected = try expected_mv.intern(pt, sema.arena); if (store_val.toIntern() != expected.toIntern()) { return .{ .comptime_field_mismatch = expected }; } @@ -126,14 +127,14 @@ pub fn storeComptimePtr( switch (strat) { .direct => |direct| { const want_ty = direct.val.typeOf(zcu); - const coerced_store_val = try zcu.getCoerced(store_val, want_ty); + const coerced_store_val = try pt.getCoerced(store_val, want_ty); direct.val.* = .{ .interned = coerced_store_val.toIntern() }; return .success; }, .index => |index| { const want_ty = index.val.typeOf(zcu).childType(zcu); - const coerced_store_val = try zcu.getCoerced(store_val, want_ty); - try index.val.setElem(zcu, sema.arena, @intCast(index.elem_index), .{ .interned = coerced_store_val.toIntern() }); + const coerced_store_val = try pt.getCoerced(store_val, want_ty); + try index.val.setElem(pt, sema.arena, @intCast(index.elem_index), .{ .interned = coerced_store_val.toIntern() }); return .success; }, .flat_index => |flat| { @@ -149,7 +150,7 @@ pub fn storeComptimePtr( // Better would be to gather all the store targets into an array. var index: u64 = flat.flat_elem_index + idx; const val_ptr, const final_idx = (try recursiveIndex(sema, flat.val, &index)).?; - try val_ptr.setElem(zcu, sema.arena, @intCast(final_idx), .{ .interned = elem }); + try val_ptr.setElem(pt, sema.arena, @intCast(final_idx), .{ .interned = elem }); } return .success; }, @@ -165,9 +166,9 @@ pub fn storeComptimePtr( .direct => |direct| .{ direct.val, 0 }, .index => |index| .{ index.val, - index.elem_index * index.val.typeOf(zcu).childType(zcu).abiSize(zcu), + index.elem_index * index.val.typeOf(zcu).childType(zcu).abiSize(pt), }, - .flat_index => |flat| .{ flat.val, flat.flat_elem_index * flat.val.typeOf(zcu).arrayBase(zcu)[0].abiSize(zcu) }, + .flat_index => |flat| .{ flat.val, flat.flat_elem_index * flat.val.typeOf(zcu).arrayBase(zcu)[0].abiSize(pt) }, .reinterpret => |reinterpret| .{ reinterpret.val, reinterpret.byte_offset }, else => unreachable, }; @@ -181,7 +182,7 @@ pub fn storeComptimePtr( } const new_val = try sema.bitCastSpliceVal( - try val_ptr.intern(zcu, sema.arena), + try val_ptr.intern(pt, sema.arena), store_val, byte_offset, host_bits, @@ -205,7 +206,8 @@ fn loadComptimePtrInner( /// before `load_ty`. Otherwise, it is ignored and may be `undefined`. array_offset: u64, ) !ComptimeLoadResult { - const zcu = sema.mod; + const pt = sema.pt; + const zcu = pt.zcu; const ip = &zcu.intern_pool; const ptr = switch (ip.indexToKey(ptr_val.toIntern())) { @@ -263,7 +265,7 @@ fn loadComptimePtrInner( const load_one_ty, const load_count = load_ty.arrayBase(zcu); const count = if (load_one_ty.toIntern() == base_ty.toIntern()) load_count else 1; - const want_ty = try zcu.arrayType(.{ + const want_ty = try sema.pt.arrayType(.{ .len = count, .child = base_ty.toIntern(), }); @@ -285,7 +287,7 @@ fn loadComptimePtrInner( const agg_ty = agg_val.typeOf(zcu); switch (agg_ty.zigTypeTag(zcu)) { - .Struct, .Pointer => break :val try agg_val.getElem(zcu, @intCast(base_index.index)), + .Struct, .Pointer => break :val try agg_val.getElem(sema.pt, @intCast(base_index.index)), .Union => { const tag_val: Value, const payload_mv: MutableValue = switch (agg_val) { .un => |un| .{ Value.fromInterned(un.tag), un.payload.* }, @@ -427,7 +429,7 @@ fn loadComptimePtrInner( const next_elem_off = elem_size * (elem_idx + 1); if (cur_offset + need_bytes <= next_elem_off) { // We can look at a single array element. - cur_val = try cur_val.getElem(zcu, @intCast(elem_idx)); + cur_val = try cur_val.getElem(sema.pt, @intCast(elem_idx)); cur_offset -= elem_idx * elem_size; } else { break; @@ -437,10 +439,10 @@ fn loadComptimePtrInner( .auto => unreachable, // ill-defined layout .@"packed" => break, // let the bitcast logic handle this .@"extern" => for (0..cur_ty.structFieldCount(zcu)) |field_idx| { - const start_off = cur_ty.structFieldOffset(field_idx, zcu); + const start_off = cur_ty.structFieldOffset(field_idx, pt); const end_off = start_off + try sema.typeAbiSize(cur_ty.structFieldType(field_idx, zcu)); if (cur_offset >= start_off and cur_offset + need_bytes <= end_off) { - cur_val = try cur_val.getElem(zcu, field_idx); + cur_val = try cur_val.getElem(sema.pt, field_idx); cur_offset -= start_off; break; } @@ -482,7 +484,7 @@ fn loadComptimePtrInner( } const result_val = try sema.bitCastVal( - try cur_val.intern(zcu, sema.arena), + try cur_val.intern(sema.pt, sema.arena), load_ty, cur_offset, host_bits, @@ -564,7 +566,8 @@ fn prepareComptimePtrStore( /// before `store_ty`. Otherwise, it is ignored and may be `undefined`. array_offset: u64, ) !ComptimeStoreStrategy { - const zcu = sema.mod; + const pt = sema.pt; + const zcu = pt.zcu; const ip = &zcu.intern_pool; const ptr = switch (ip.indexToKey(ptr_val.toIntern())) { @@ -587,14 +590,14 @@ fn prepareComptimePtrStore( const eu_val_ptr, const alloc = switch (try prepareComptimePtrStore(sema, block, src, base_ptr, base_ty, undefined)) { .direct => |direct| .{ direct.val, direct.alloc }, .index => |index| .{ - try index.val.elem(zcu, sema.arena, @intCast(index.elem_index)), + try index.val.elem(pt, sema.arena, @intCast(index.elem_index)), index.alloc, }, .flat_index => unreachable, // base_ty is not an array .reinterpret => unreachable, // base_ty has ill-defined layout else => |err| return err, }; - try eu_val_ptr.unintern(zcu, sema.arena, false, false); + try eu_val_ptr.unintern(pt, sema.arena, false, false); switch (eu_val_ptr.*) { .interned => |ip_index| switch (ip.indexToKey(ip_index)) { .undef => return .undef, @@ -614,14 +617,14 @@ fn prepareComptimePtrStore( const opt_val_ptr, const alloc = switch (try prepareComptimePtrStore(sema, block, src, base_ptr, base_ty, undefined)) { .direct => |direct| .{ direct.val, direct.alloc }, .index => |index| .{ - try index.val.elem(zcu, sema.arena, @intCast(index.elem_index)), + try index.val.elem(pt, sema.arena, @intCast(index.elem_index)), index.alloc, }, .flat_index => unreachable, // base_ty is not an array .reinterpret => unreachable, // base_ty has ill-defined layout else => |err| return err, }; - try opt_val_ptr.unintern(zcu, sema.arena, false, false); + try opt_val_ptr.unintern(pt, sema.arena, false, false); switch (opt_val_ptr.*) { .interned => |ip_index| switch (ip.indexToKey(ip_index)) { .undef => return .undef, @@ -648,7 +651,7 @@ fn prepareComptimePtrStore( const store_one_ty, const store_count = store_ty.arrayBase(zcu); const count = if (store_one_ty.toIntern() == base_ty.toIntern()) store_count else 1; - const want_ty = try zcu.arrayType(.{ + const want_ty = try pt.arrayType(.{ .len = count, .child = base_ty.toIntern(), }); @@ -668,7 +671,7 @@ fn prepareComptimePtrStore( const agg_val, const alloc = switch (try prepareComptimePtrStore(sema, block, src, base_ptr, base_ty, undefined)) { .direct => |direct| .{ direct.val, direct.alloc }, .index => |index| .{ - try index.val.elem(zcu, sema.arena, @intCast(index.elem_index)), + try index.val.elem(pt, sema.arena, @intCast(index.elem_index)), index.alloc, }, .flat_index => unreachable, // base_ty is not an array @@ -679,14 +682,14 @@ fn prepareComptimePtrStore( const agg_ty = agg_val.typeOf(zcu); switch (agg_ty.zigTypeTag(zcu)) { .Struct, .Pointer => break :strat .{ .direct = .{ - .val = try agg_val.elem(zcu, sema.arena, @intCast(base_index.index)), + .val = try agg_val.elem(pt, sema.arena, @intCast(base_index.index)), .alloc = alloc, } }, .Union => { if (agg_val.* == .interned and Value.fromInterned(agg_val.interned).isUndef(zcu)) { return .undef; } - try agg_val.unintern(zcu, sema.arena, false, false); + try agg_val.unintern(pt, sema.arena, false, false); const un = agg_val.un; const tag_ty = agg_ty.unionTagTypeHypothetical(zcu); if (tag_ty.enumTagFieldIndex(Value.fromInterned(un.tag), zcu).? != base_index.index) { @@ -847,7 +850,7 @@ fn prepareComptimePtrStore( const next_elem_off = elem_size * (elem_idx + 1); if (cur_offset + need_bytes <= next_elem_off) { // We can look at a single array element. - cur_val = try cur_val.elem(zcu, sema.arena, @intCast(elem_idx)); + cur_val = try cur_val.elem(pt, sema.arena, @intCast(elem_idx)); cur_offset -= elem_idx * elem_size; } else { break; @@ -857,10 +860,10 @@ fn prepareComptimePtrStore( .auto => unreachable, // ill-defined layout .@"packed" => break, // let the bitcast logic handle this .@"extern" => for (0..cur_ty.structFieldCount(zcu)) |field_idx| { - const start_off = cur_ty.structFieldOffset(field_idx, zcu); + const start_off = cur_ty.structFieldOffset(field_idx, pt); const end_off = start_off + try sema.typeAbiSize(cur_ty.structFieldType(field_idx, zcu)); if (cur_offset >= start_off and cur_offset + need_bytes <= end_off) { - cur_val = try cur_val.elem(zcu, sema.arena, field_idx); + cur_val = try cur_val.elem(pt, sema.arena, field_idx); cur_offset -= start_off; break; } @@ -874,7 +877,7 @@ fn prepareComptimePtrStore( // Otherwise, we might traverse into a union field which doesn't allow pointers. // Figure out a solution! if (true) break; - try cur_val.unintern(zcu, sema.arena, false, false); + try cur_val.unintern(pt, sema.arena, false, false); const payload = switch (cur_val.*) { .un => |un| un.payload, else => unreachable, @@ -918,7 +921,7 @@ fn flattenArray( ) Allocator.Error!void { if (next_idx.* == out.len) return; - const zcu = sema.mod; + const zcu = sema.pt.zcu; const ty = val.typeOf(zcu); const base_elem_count = ty.arrayBase(zcu)[1]; @@ -928,7 +931,7 @@ fn flattenArray( } if (ty.zigTypeTag(zcu) != .Array) { - out[@intCast(next_idx.*)] = (try val.intern(zcu, sema.arena)).toIntern(); + out[@intCast(next_idx.*)] = (try val.intern(sema.pt, sema.arena)).toIntern(); next_idx.* += 1; return; } @@ -942,7 +945,7 @@ fn flattenArray( skip.* -= arr_base_elem_count; continue; } - try flattenArray(sema, try val.getElem(zcu, elem_idx), skip, next_idx, out); + try flattenArray(sema, try val.getElem(sema.pt, elem_idx), skip, next_idx, out); } if (ty.sentinel(zcu)) |s| { try flattenArray(sema, .{ .interned = s.toIntern() }, skip, next_idx, out); @@ -957,13 +960,13 @@ fn unflattenArray( elems: []const InternPool.Index, next_idx: *u64, ) Allocator.Error!Value { - const zcu = sema.mod; + const zcu = sema.pt.zcu; const arena = sema.arena; if (ty.zigTypeTag(zcu) != .Array) { const val = Value.fromInterned(elems[@intCast(next_idx.*)]); next_idx.* += 1; - return zcu.getCoerced(val, ty); + return sema.pt.getCoerced(val, ty); } const elem_ty = ty.childType(zcu); @@ -975,7 +978,7 @@ fn unflattenArray( // TODO: validate sentinel _ = try unflattenArray(sema, elem_ty, elems, next_idx); } - return Value.fromInterned(try zcu.intern(.{ .aggregate = .{ + return Value.fromInterned(try sema.pt.intern(.{ .aggregate = .{ .ty = ty.toIntern(), .storage = .{ .elems = buf }, } })); @@ -990,25 +993,25 @@ fn recursiveIndex( mv: *MutableValue, index: *u64, ) !?struct { *MutableValue, u64 } { - const zcu = sema.mod; + const pt = sema.pt; - const ty = mv.typeOf(zcu); - assert(ty.zigTypeTag(zcu) == .Array); + const ty = mv.typeOf(pt.zcu); + assert(ty.zigTypeTag(pt.zcu) == .Array); - const ty_base_elems = ty.arrayBase(zcu)[1]; + const ty_base_elems = ty.arrayBase(pt.zcu)[1]; if (index.* >= ty_base_elems) { index.* -= ty_base_elems; return null; } - const elem_ty = ty.childType(zcu); - if (elem_ty.zigTypeTag(zcu) != .Array) { - assert(index.* < ty.arrayLenIncludingSentinel(zcu)); // should be handled by initial check + const elem_ty = ty.childType(pt.zcu); + if (elem_ty.zigTypeTag(pt.zcu) != .Array) { + assert(index.* < ty.arrayLenIncludingSentinel(pt.zcu)); // should be handled by initial check return .{ mv, index.* }; } - for (0..@intCast(ty.arrayLenIncludingSentinel(zcu))) |elem_index| { - if (try recursiveIndex(sema, try mv.elem(zcu, sema.arena, elem_index), index)) |result| { + for (0..@intCast(ty.arrayLenIncludingSentinel(pt.zcu))) |elem_index| { + if (try recursiveIndex(sema, try mv.elem(pt, sema.arena, elem_index), index)) |result| { return result; } } diff --git a/src/Type.zig b/src/Type.zig index 49f127607057..0bb8f3f1448f 100644 --- a/src/Type.zig +++ b/src/Type.zig @@ -136,16 +136,16 @@ pub fn format(ty: Type, comptime unused_fmt_string: []const u8, options: std.fmt pub const Formatter = std.fmt.Formatter(format2); -pub fn fmt(ty: Type, module: *Module) Formatter { +pub fn fmt(ty: Type, pt: Zcu.PerThread) Formatter { return .{ .data = .{ .ty = ty, - .module = module, + .pt = pt, } }; } const FormatContext = struct { ty: Type, - module: *Module, + pt: Zcu.PerThread, }; fn format2( @@ -156,7 +156,7 @@ fn format2( ) !void { comptime assert(unused_format_string.len == 0); _ = options; - return print(ctx.ty, writer, ctx.module); + return print(ctx.ty, writer, ctx.pt); } pub fn fmtDebug(ty: Type) std.fmt.Formatter(dump) { @@ -178,7 +178,8 @@ pub fn dump( /// Prints a name suitable for `@typeName`. /// TODO: take an `opt_sema` to pass to `fmtValue` when printing sentinels. -pub fn print(ty: Type, writer: anytype, mod: *Module) @TypeOf(writer).Error!void { +pub fn print(ty: Type, writer: anytype, pt: Zcu.PerThread) @TypeOf(writer).Error!void { + const mod = pt.zcu; const ip = &mod.intern_pool; switch (ip.indexToKey(ty.toIntern())) { .int_type => |int_type| { @@ -193,8 +194,8 @@ pub fn print(ty: Type, writer: anytype, mod: *Module) @TypeOf(writer).Error!void if (info.sentinel != .none) switch (info.flags.size) { .One, .C => unreachable, - .Many => try writer.print("[*:{}]", .{Value.fromInterned(info.sentinel).fmtValue(mod, null)}), - .Slice => try writer.print("[:{}]", .{Value.fromInterned(info.sentinel).fmtValue(mod, null)}), + .Many => try writer.print("[*:{}]", .{Value.fromInterned(info.sentinel).fmtValue(pt, null)}), + .Slice => try writer.print("[:{}]", .{Value.fromInterned(info.sentinel).fmtValue(pt, null)}), } else switch (info.flags.size) { .One => try writer.writeAll("*"), .Many => try writer.writeAll("[*]"), @@ -208,7 +209,7 @@ pub fn print(ty: Type, writer: anytype, mod: *Module) @TypeOf(writer).Error!void const alignment = if (info.flags.alignment != .none) info.flags.alignment else - Type.fromInterned(info.child).abiAlignment(mod); + Type.fromInterned(info.child).abiAlignment(pt); try writer.print("align({d}", .{alignment.toByteUnits() orelse 0}); if (info.packed_offset.bit_offset != 0 or info.packed_offset.host_size != 0) { @@ -230,39 +231,39 @@ pub fn print(ty: Type, writer: anytype, mod: *Module) @TypeOf(writer).Error!void if (info.flags.is_volatile) try writer.writeAll("volatile "); if (info.flags.is_allowzero and info.flags.size != .C) try writer.writeAll("allowzero "); - try print(Type.fromInterned(info.child), writer, mod); + try print(Type.fromInterned(info.child), writer, pt); return; }, .array_type => |array_type| { if (array_type.sentinel == .none) { try writer.print("[{d}]", .{array_type.len}); - try print(Type.fromInterned(array_type.child), writer, mod); + try print(Type.fromInterned(array_type.child), writer, pt); } else { try writer.print("[{d}:{}]", .{ array_type.len, - Value.fromInterned(array_type.sentinel).fmtValue(mod, null), + Value.fromInterned(array_type.sentinel).fmtValue(pt, null), }); - try print(Type.fromInterned(array_type.child), writer, mod); + try print(Type.fromInterned(array_type.child), writer, pt); } return; }, .vector_type => |vector_type| { try writer.print("@Vector({d}, ", .{vector_type.len}); - try print(Type.fromInterned(vector_type.child), writer, mod); + try print(Type.fromInterned(vector_type.child), writer, pt); try writer.writeAll(")"); return; }, .opt_type => |child| { try writer.writeByte('?'); - return print(Type.fromInterned(child), writer, mod); + return print(Type.fromInterned(child), writer, pt); }, .error_union_type => |error_union_type| { - try print(Type.fromInterned(error_union_type.error_set_type), writer, mod); + try print(Type.fromInterned(error_union_type.error_set_type), writer, pt); try writer.writeByte('!'); if (error_union_type.payload_type == .generic_poison_type) { try writer.writeAll("anytype"); } else { - try print(Type.fromInterned(error_union_type.payload_type), writer, mod); + try print(Type.fromInterned(error_union_type.payload_type), writer, pt); } return; }, @@ -355,10 +356,10 @@ pub fn print(ty: Type, writer: anytype, mod: *Module) @TypeOf(writer).Error!void try writer.print("{}: ", .{anon_struct.names.get(ip)[i].fmt(&mod.intern_pool)}); } - try print(Type.fromInterned(field_ty), writer, mod); + try print(Type.fromInterned(field_ty), writer, pt); if (val != .none) { - try writer.print(" = {}", .{Value.fromInterned(val).fmtValue(mod, null)}); + try writer.print(" = {}", .{Value.fromInterned(val).fmtValue(pt, null)}); } } try writer.writeAll("}"); @@ -395,7 +396,7 @@ pub fn print(ty: Type, writer: anytype, mod: *Module) @TypeOf(writer).Error!void if (param_ty == .generic_poison_type) { try writer.writeAll("anytype"); } else { - try print(Type.fromInterned(param_ty), writer, mod); + try print(Type.fromInterned(param_ty), writer, pt); } } if (fn_info.is_var_args) { @@ -413,13 +414,13 @@ pub fn print(ty: Type, writer: anytype, mod: *Module) @TypeOf(writer).Error!void if (fn_info.return_type == .generic_poison_type) { try writer.writeAll("anytype"); } else { - try print(Type.fromInterned(fn_info.return_type), writer, mod); + try print(Type.fromInterned(fn_info.return_type), writer, pt); } }, .anyframe_type => |child| { if (child == .none) return writer.writeAll("anyframe"); try writer.writeAll("anyframe->"); - return print(Type.fromInterned(child), writer, mod); + return print(Type.fromInterned(child), writer, pt); }, // values, not types @@ -475,10 +476,11 @@ const RuntimeBitsError = SemaError || error{NeedLazy}; /// may return false positives. pub fn hasRuntimeBitsAdvanced( ty: Type, - mod: *Module, + pt: Zcu.PerThread, ignore_comptime_only: bool, strat: ResolveStratLazy, ) RuntimeBitsError!bool { + const mod = pt.zcu; const ip = &mod.intern_pool; return switch (ty.toIntern()) { // False because it is a comptime-only type. @@ -490,16 +492,16 @@ pub fn hasRuntimeBitsAdvanced( // to comptime-only types do not, with the exception of function pointers. if (ignore_comptime_only) return true; return switch (strat) { - .sema => !try ty.comptimeOnlyAdvanced(mod, .sema), - .eager => !ty.comptimeOnly(mod), + .sema => !try ty.comptimeOnlyAdvanced(pt, .sema), + .eager => !ty.comptimeOnly(pt), .lazy => error.NeedLazy, }; }, .anyframe_type => true, .array_type => |array_type| return array_type.lenIncludingSentinel() > 0 and - try Type.fromInterned(array_type.child).hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat), + try Type.fromInterned(array_type.child).hasRuntimeBitsAdvanced(pt, ignore_comptime_only, strat), .vector_type => |vector_type| return vector_type.len > 0 and - try Type.fromInterned(vector_type.child).hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat), + try Type.fromInterned(vector_type.child).hasRuntimeBitsAdvanced(pt, ignore_comptime_only, strat), .opt_type => |child| { const child_ty = Type.fromInterned(child); if (child_ty.isNoReturn(mod)) { @@ -508,8 +510,8 @@ pub fn hasRuntimeBitsAdvanced( } if (ignore_comptime_only) return true; return switch (strat) { - .sema => !try child_ty.comptimeOnlyAdvanced(mod, .sema), - .eager => !child_ty.comptimeOnly(mod), + .sema => !try child_ty.comptimeOnlyAdvanced(pt, .sema), + .eager => !child_ty.comptimeOnly(pt), .lazy => error.NeedLazy, }; }, @@ -580,14 +582,14 @@ pub fn hasRuntimeBitsAdvanced( return true; } switch (strat) { - .sema => try ty.resolveFields(mod), + .sema => try ty.resolveFields(pt), .eager => assert(struct_type.haveFieldTypes(ip)), .lazy => if (!struct_type.haveFieldTypes(ip)) return error.NeedLazy, } for (0..struct_type.field_types.len) |i| { if (struct_type.comptime_bits.getBit(ip, i)) continue; const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]); - if (try field_ty.hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat)) + if (try field_ty.hasRuntimeBitsAdvanced(pt, ignore_comptime_only, strat)) return true; } else { return false; @@ -596,7 +598,7 @@ pub fn hasRuntimeBitsAdvanced( .anon_struct_type => |tuple| { for (tuple.types.get(ip), tuple.values.get(ip)) |field_ty, val| { if (val != .none) continue; // comptime field - if (try Type.fromInterned(field_ty).hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat)) return true; + if (try Type.fromInterned(field_ty).hasRuntimeBitsAdvanced(pt, ignore_comptime_only, strat)) return true; } return false; }, @@ -617,21 +619,21 @@ pub fn hasRuntimeBitsAdvanced( // tag_ty will be `none` if this union's tag type is not resolved yet, // in which case we want control flow to continue down below. if (tag_ty != .none and - try Type.fromInterned(tag_ty).hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat)) + try Type.fromInterned(tag_ty).hasRuntimeBitsAdvanced(pt, ignore_comptime_only, strat)) { return true; } }, } switch (strat) { - .sema => try ty.resolveFields(mod), + .sema => try ty.resolveFields(pt), .eager => assert(union_type.flagsPtr(ip).status.haveFieldTypes()), .lazy => if (!union_type.flagsPtr(ip).status.haveFieldTypes()) return error.NeedLazy, } for (0..union_type.field_types.len) |field_index| { const field_ty = Type.fromInterned(union_type.field_types.get(ip)[field_index]); - if (try field_ty.hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat)) + if (try field_ty.hasRuntimeBitsAdvanced(pt, ignore_comptime_only, strat)) return true; } else { return false; @@ -639,7 +641,7 @@ pub fn hasRuntimeBitsAdvanced( }, .opaque_type => true, - .enum_type => Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty).hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat), + .enum_type => Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty).hasRuntimeBitsAdvanced(pt, ignore_comptime_only, strat), // values, not types .undef, @@ -777,41 +779,41 @@ pub fn hasWellDefinedLayout(ty: Type, mod: *Module) bool { }; } -pub fn hasRuntimeBits(ty: Type, mod: *Module) bool { - return hasRuntimeBitsAdvanced(ty, mod, false, .eager) catch unreachable; +pub fn hasRuntimeBits(ty: Type, pt: Zcu.PerThread) bool { + return hasRuntimeBitsAdvanced(ty, pt, false, .eager) catch unreachable; } -pub fn hasRuntimeBitsIgnoreComptime(ty: Type, mod: *Module) bool { - return hasRuntimeBitsAdvanced(ty, mod, true, .eager) catch unreachable; +pub fn hasRuntimeBitsIgnoreComptime(ty: Type, pt: Zcu.PerThread) bool { + return hasRuntimeBitsAdvanced(ty, pt, true, .eager) catch unreachable; } -pub fn fnHasRuntimeBits(ty: Type, mod: *Module) bool { - return ty.fnHasRuntimeBitsAdvanced(mod, .normal) catch unreachable; +pub fn fnHasRuntimeBits(ty: Type, pt: Zcu.PerThread) bool { + return ty.fnHasRuntimeBitsAdvanced(pt, .normal) catch unreachable; } /// Determines whether a function type has runtime bits, i.e. whether a /// function with this type can exist at runtime. /// Asserts that `ty` is a function type. -pub fn fnHasRuntimeBitsAdvanced(ty: Type, mod: *Module, strat: ResolveStrat) SemaError!bool { - const fn_info = mod.typeToFunc(ty).?; +pub fn fnHasRuntimeBitsAdvanced(ty: Type, pt: Zcu.PerThread, strat: ResolveStrat) SemaError!bool { + const fn_info = pt.zcu.typeToFunc(ty).?; if (fn_info.is_generic) return false; if (fn_info.is_var_args) return true; if (fn_info.cc == .Inline) return false; - return !try Type.fromInterned(fn_info.return_type).comptimeOnlyAdvanced(mod, strat); + return !try Type.fromInterned(fn_info.return_type).comptimeOnlyAdvanced(pt, strat); } -pub fn isFnOrHasRuntimeBits(ty: Type, mod: *Module) bool { - switch (ty.zigTypeTag(mod)) { - .Fn => return ty.fnHasRuntimeBits(mod), - else => return ty.hasRuntimeBits(mod), +pub fn isFnOrHasRuntimeBits(ty: Type, pt: Zcu.PerThread) bool { + switch (ty.zigTypeTag(pt.zcu)) { + .Fn => return ty.fnHasRuntimeBits(pt), + else => return ty.hasRuntimeBits(pt), } } /// Same as `isFnOrHasRuntimeBits` but comptime-only types may return a false positive. -pub fn isFnOrHasRuntimeBitsIgnoreComptime(ty: Type, mod: *Module) bool { - return switch (ty.zigTypeTag(mod)) { +pub fn isFnOrHasRuntimeBitsIgnoreComptime(ty: Type, pt: Zcu.PerThread) bool { + return switch (ty.zigTypeTag(pt.zcu)) { .Fn => true, - else => return ty.hasRuntimeBitsIgnoreComptime(mod), + else => return ty.hasRuntimeBitsIgnoreComptime(pt), }; } @@ -820,24 +822,24 @@ pub fn isNoReturn(ty: Type, mod: *Module) bool { } /// Returns `none` if the pointer is naturally aligned and the element type is 0-bit. -pub fn ptrAlignment(ty: Type, mod: *Module) Alignment { - return ptrAlignmentAdvanced(ty, mod, .normal) catch unreachable; +pub fn ptrAlignment(ty: Type, pt: Zcu.PerThread) Alignment { + return ptrAlignmentAdvanced(ty, pt, .normal) catch unreachable; } -pub fn ptrAlignmentAdvanced(ty: Type, mod: *Module, strat: ResolveStrat) !Alignment { - return switch (mod.intern_pool.indexToKey(ty.toIntern())) { +pub fn ptrAlignmentAdvanced(ty: Type, pt: Zcu.PerThread, strat: ResolveStrat) !Alignment { + return switch (pt.zcu.intern_pool.indexToKey(ty.toIntern())) { .ptr_type => |ptr_type| { if (ptr_type.flags.alignment != .none) return ptr_type.flags.alignment; if (strat == .sema) { - const res = try Type.fromInterned(ptr_type.child).abiAlignmentAdvanced(mod, .sema); + const res = try Type.fromInterned(ptr_type.child).abiAlignmentAdvanced(pt, .sema); return res.scalar; } - return (Type.fromInterned(ptr_type.child).abiAlignmentAdvanced(mod, .eager) catch unreachable).scalar; + return (Type.fromInterned(ptr_type.child).abiAlignmentAdvanced(pt, .eager) catch unreachable).scalar; }, - .opt_type => |child| Type.fromInterned(child).ptrAlignmentAdvanced(mod, strat), + .opt_type => |child| Type.fromInterned(child).ptrAlignmentAdvanced(pt, strat), else => unreachable, }; } @@ -851,16 +853,16 @@ pub fn ptrAddressSpace(ty: Type, mod: *const Module) std.builtin.AddressSpace { } /// Never returns `none`. Asserts that all necessary type resolution is already done. -pub fn abiAlignment(ty: Type, mod: *Module) Alignment { - return (ty.abiAlignmentAdvanced(mod, .eager) catch unreachable).scalar; +pub fn abiAlignment(ty: Type, pt: Zcu.PerThread) Alignment { + return (ty.abiAlignmentAdvanced(pt, .eager) catch unreachable).scalar; } /// May capture a reference to `ty`. /// Returned value has type `comptime_int`. -pub fn lazyAbiAlignment(ty: Type, mod: *Module) !Value { - switch (try ty.abiAlignmentAdvanced(mod, .lazy)) { +pub fn lazyAbiAlignment(ty: Type, pt: Zcu.PerThread) !Value { + switch (try ty.abiAlignmentAdvanced(pt, .lazy)) { .val => |val| return val, - .scalar => |x| return mod.intValue(Type.comptime_int, x.toByteUnits() orelse 0), + .scalar => |x| return pt.intValue(Type.comptime_int, x.toByteUnits() orelse 0), } } @@ -907,38 +909,39 @@ pub const ResolveStrat = enum { /// necessary, possibly returning a CompileError. pub fn abiAlignmentAdvanced( ty: Type, - mod: *Module, + pt: Zcu.PerThread, strat: ResolveStratLazy, ) SemaError!AbiAlignmentAdvanced { + const mod = pt.zcu; const target = mod.getTarget(); const use_llvm = mod.comp.config.use_llvm; const ip = &mod.intern_pool; switch (ty.toIntern()) { - .empty_struct_type => return AbiAlignmentAdvanced{ .scalar = .@"1" }, + .empty_struct_type => return .{ .scalar = .@"1" }, else => switch (ip.indexToKey(ty.toIntern())) { .int_type => |int_type| { - if (int_type.bits == 0) return AbiAlignmentAdvanced{ .scalar = .@"1" }; + if (int_type.bits == 0) return .{ .scalar = .@"1" }; return .{ .scalar = intAbiAlignment(int_type.bits, target, use_llvm) }; }, .ptr_type, .anyframe_type => { return .{ .scalar = ptrAbiAlignment(target) }; }, .array_type => |array_type| { - return Type.fromInterned(array_type.child).abiAlignmentAdvanced(mod, strat); + return Type.fromInterned(array_type.child).abiAlignmentAdvanced(pt, strat); }, .vector_type => |vector_type| { if (vector_type.len == 0) return .{ .scalar = .@"1" }; switch (mod.comp.getZigBackend()) { else => { - const elem_bits: u32 = @intCast(try Type.fromInterned(vector_type.child).bitSizeAdvanced(mod, .sema)); + const elem_bits: u32 = @intCast(try Type.fromInterned(vector_type.child).bitSizeAdvanced(pt, .sema)); if (elem_bits == 0) return .{ .scalar = .@"1" }; const bytes = ((elem_bits * vector_type.len) + 7) / 8; const alignment = std.math.ceilPowerOfTwoAssert(u32, bytes); return .{ .scalar = Alignment.fromByteUnits(alignment) }; }, .stage2_c => { - return Type.fromInterned(vector_type.child).abiAlignmentAdvanced(mod, strat); + return Type.fromInterned(vector_type.child).abiAlignmentAdvanced(pt, strat); }, .stage2_x86_64 => { if (vector_type.child == .bool_type) { @@ -949,7 +952,7 @@ pub fn abiAlignmentAdvanced( const alignment = std.math.ceilPowerOfTwoAssert(u32, bytes); return .{ .scalar = Alignment.fromByteUnits(alignment) }; } - const elem_bytes: u32 = @intCast((try Type.fromInterned(vector_type.child).abiSizeAdvanced(mod, strat)).scalar); + const elem_bytes: u32 = @intCast((try Type.fromInterned(vector_type.child).abiSizeAdvanced(pt, strat)).scalar); if (elem_bytes == 0) return .{ .scalar = .@"1" }; const bytes = elem_bytes * vector_type.len; if (bytes > 32 and std.Target.x86.featureSetHas(target.cpu.features, .avx512f)) return .{ .scalar = .@"64" }; @@ -959,12 +962,12 @@ pub fn abiAlignmentAdvanced( } }, - .opt_type => return abiAlignmentAdvancedOptional(ty, mod, strat), - .error_union_type => |info| return abiAlignmentAdvancedErrorUnion(ty, mod, strat, Type.fromInterned(info.payload_type)), + .opt_type => return ty.abiAlignmentAdvancedOptional(pt, strat), + .error_union_type => |info| return ty.abiAlignmentAdvancedErrorUnion(pt, strat, Type.fromInterned(info.payload_type)), .error_set_type, .inferred_error_set_type => { const bits = mod.errorSetBits(); - if (bits == 0) return AbiAlignmentAdvanced{ .scalar = .@"1" }; + if (bits == 0) return .{ .scalar = .@"1" }; return .{ .scalar = intAbiAlignment(bits, target, use_llvm) }; }, @@ -1012,10 +1015,7 @@ pub fn abiAlignmentAdvanced( }, .f80 => switch (target.c_type_bit_size(.longdouble)) { 80 => return .{ .scalar = cTypeAlign(target, .longdouble) }, - else => { - const u80_ty: Type = .{ .ip_index = .u80_type }; - return .{ .scalar = abiAlignment(u80_ty, mod) }; - }, + else => return .{ .scalar = Type.u80.abiAlignment(pt) }, }, .f128 => switch (target.c_type_bit_size(.longdouble)) { 128 => return .{ .scalar = cTypeAlign(target, .longdouble) }, @@ -1024,7 +1024,7 @@ pub fn abiAlignmentAdvanced( .anyerror, .adhoc_inferred_error_set => { const bits = mod.errorSetBits(); - if (bits == 0) return AbiAlignmentAdvanced{ .scalar = .@"1" }; + if (bits == 0) return .{ .scalar = .@"1" }; return .{ .scalar = intAbiAlignment(bits, target, use_llvm) }; }, @@ -1044,22 +1044,22 @@ pub fn abiAlignmentAdvanced( const struct_type = ip.loadStructType(ty.toIntern()); if (struct_type.layout == .@"packed") { switch (strat) { - .sema => try ty.resolveLayout(mod), + .sema => try ty.resolveLayout(pt), .lazy => if (struct_type.backingIntType(ip).* == .none) return .{ - .val = Value.fromInterned((try mod.intern(.{ .int = .{ + .val = Value.fromInterned(try pt.intern(.{ .int = .{ .ty = .comptime_int_type, .storage = .{ .lazy_align = ty.toIntern() }, - } }))), + } })), }, .eager => {}, } - return .{ .scalar = Type.fromInterned(struct_type.backingIntType(ip).*).abiAlignment(mod) }; + return .{ .scalar = Type.fromInterned(struct_type.backingIntType(ip).*).abiAlignment(pt) }; } if (struct_type.flagsPtr(ip).alignment == .none) switch (strat) { .eager => unreachable, // struct alignment not resolved - .sema => try ty.resolveStructAlignment(mod), - .lazy => return .{ .val = Value.fromInterned(try mod.intern(.{ .int = .{ + .sema => try ty.resolveStructAlignment(pt), + .lazy => return .{ .val = Value.fromInterned(try pt.intern(.{ .int = .{ .ty = .comptime_int_type, .storage = .{ .lazy_align = ty.toIntern() }, } })) }, @@ -1071,15 +1071,15 @@ pub fn abiAlignmentAdvanced( var big_align: Alignment = .@"1"; for (tuple.types.get(ip), tuple.values.get(ip)) |field_ty, val| { if (val != .none) continue; // comptime field - switch (try Type.fromInterned(field_ty).abiAlignmentAdvanced(mod, strat)) { + switch (try Type.fromInterned(field_ty).abiAlignmentAdvanced(pt, strat)) { .scalar => |field_align| big_align = big_align.max(field_align), .val => switch (strat) { .eager => unreachable, // field type alignment not resolved .sema => unreachable, // passed to abiAlignmentAdvanced above - .lazy => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{ + .lazy => return .{ .val = Value.fromInterned(try pt.intern(.{ .int = .{ .ty = .comptime_int_type, .storage = .{ .lazy_align = ty.toIntern() }, - } }))) }, + } })) }, }, } } @@ -1090,18 +1090,18 @@ pub fn abiAlignmentAdvanced( if (union_type.flagsPtr(ip).alignment == .none) switch (strat) { .eager => unreachable, // union layout not resolved - .sema => try ty.resolveUnionAlignment(mod), - .lazy => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{ + .sema => try ty.resolveUnionAlignment(pt), + .lazy => return .{ .val = Value.fromInterned(try pt.intern(.{ .int = .{ .ty = .comptime_int_type, .storage = .{ .lazy_align = ty.toIntern() }, - } }))) }, + } })) }, }; return .{ .scalar = union_type.flagsPtr(ip).alignment }; }, .opaque_type => return .{ .scalar = .@"1" }, .enum_type => return .{ - .scalar = Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty).abiAlignment(mod), + .scalar = Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty).abiAlignment(pt), }, // values, not types @@ -1131,91 +1131,92 @@ pub fn abiAlignmentAdvanced( fn abiAlignmentAdvancedErrorUnion( ty: Type, - mod: *Module, + pt: Zcu.PerThread, strat: ResolveStratLazy, payload_ty: Type, ) SemaError!AbiAlignmentAdvanced { // This code needs to be kept in sync with the equivalent switch prong // in abiSizeAdvanced. - const code_align = abiAlignment(Type.anyerror, mod); + const code_align = Type.anyerror.abiAlignment(pt); switch (strat) { .eager, .sema => { - if (!(payload_ty.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) { - error.NeedLazy => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{ + if (!(payload_ty.hasRuntimeBitsAdvanced(pt, false, strat) catch |err| switch (err) { + error.NeedLazy => return .{ .val = Value.fromInterned(try pt.intern(.{ .int = .{ .ty = .comptime_int_type, .storage = .{ .lazy_align = ty.toIntern() }, - } }))) }, + } })) }, else => |e| return e, })) { return .{ .scalar = code_align }; } return .{ .scalar = code_align.max( - (try payload_ty.abiAlignmentAdvanced(mod, strat)).scalar, + (try payload_ty.abiAlignmentAdvanced(pt, strat)).scalar, ) }; }, .lazy => { - switch (try payload_ty.abiAlignmentAdvanced(mod, strat)) { + switch (try payload_ty.abiAlignmentAdvanced(pt, strat)) { .scalar => |payload_align| return .{ .scalar = code_align.max(payload_align) }, .val => {}, } - return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{ + return .{ .val = Value.fromInterned(try pt.intern(.{ .int = .{ .ty = .comptime_int_type, .storage = .{ .lazy_align = ty.toIntern() }, - } }))) }; + } })) }; }, } } fn abiAlignmentAdvancedOptional( ty: Type, - mod: *Module, + pt: Zcu.PerThread, strat: ResolveStratLazy, ) SemaError!AbiAlignmentAdvanced { + const mod = pt.zcu; const target = mod.getTarget(); const child_type = ty.optionalChild(mod); switch (child_type.zigTypeTag(mod)) { .Pointer => return .{ .scalar = ptrAbiAlignment(target) }, - .ErrorSet => return abiAlignmentAdvanced(Type.anyerror, mod, strat), + .ErrorSet => return Type.anyerror.abiAlignmentAdvanced(pt, strat), .NoReturn => return .{ .scalar = .@"1" }, else => {}, } switch (strat) { .eager, .sema => { - if (!(child_type.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) { - error.NeedLazy => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{ + if (!(child_type.hasRuntimeBitsAdvanced(pt, false, strat) catch |err| switch (err) { + error.NeedLazy => return .{ .val = Value.fromInterned(try pt.intern(.{ .int = .{ .ty = .comptime_int_type, .storage = .{ .lazy_align = ty.toIntern() }, - } }))) }, + } })) }, else => |e| return e, })) { return .{ .scalar = .@"1" }; } - return child_type.abiAlignmentAdvanced(mod, strat); + return child_type.abiAlignmentAdvanced(pt, strat); }, - .lazy => switch (try child_type.abiAlignmentAdvanced(mod, strat)) { + .lazy => switch (try child_type.abiAlignmentAdvanced(pt, strat)) { .scalar => |x| return .{ .scalar = x.max(.@"1") }, - .val => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{ + .val => return .{ .val = Value.fromInterned(try pt.intern(.{ .int = .{ .ty = .comptime_int_type, .storage = .{ .lazy_align = ty.toIntern() }, - } }))) }, + } })) }, }, } } /// May capture a reference to `ty`. -pub fn lazyAbiSize(ty: Type, mod: *Module) !Value { - switch (try ty.abiSizeAdvanced(mod, .lazy)) { +pub fn lazyAbiSize(ty: Type, pt: Zcu.PerThread) !Value { + switch (try ty.abiSizeAdvanced(pt, .lazy)) { .val => |val| return val, - .scalar => |x| return mod.intValue(Type.comptime_int, x), + .scalar => |x| return pt.intValue(Type.comptime_int, x), } } /// Asserts the type has the ABI size already resolved. /// Types that return false for hasRuntimeBits() return 0. -pub fn abiSize(ty: Type, mod: *Module) u64 { - return (abiSizeAdvanced(ty, mod, .eager) catch unreachable).scalar; +pub fn abiSize(ty: Type, pt: Zcu.PerThread) u64 { + return (abiSizeAdvanced(ty, pt, .eager) catch unreachable).scalar; } const AbiSizeAdvanced = union(enum) { @@ -1231,38 +1232,39 @@ const AbiSizeAdvanced = union(enum) { /// necessary, possibly returning a CompileError. pub fn abiSizeAdvanced( ty: Type, - mod: *Module, + pt: Zcu.PerThread, strat: ResolveStratLazy, ) SemaError!AbiSizeAdvanced { + const mod = pt.zcu; const target = mod.getTarget(); const use_llvm = mod.comp.config.use_llvm; const ip = &mod.intern_pool; switch (ty.toIntern()) { - .empty_struct_type => return AbiSizeAdvanced{ .scalar = 0 }, + .empty_struct_type => return .{ .scalar = 0 }, else => switch (ip.indexToKey(ty.toIntern())) { .int_type => |int_type| { - if (int_type.bits == 0) return AbiSizeAdvanced{ .scalar = 0 }; - return AbiSizeAdvanced{ .scalar = intAbiSize(int_type.bits, target, use_llvm) }; + if (int_type.bits == 0) return .{ .scalar = 0 }; + return .{ .scalar = intAbiSize(int_type.bits, target, use_llvm) }; }, .ptr_type => |ptr_type| switch (ptr_type.flags.size) { .Slice => return .{ .scalar = @divExact(target.ptrBitWidth(), 8) * 2 }, else => return .{ .scalar = @divExact(target.ptrBitWidth(), 8) }, }, - .anyframe_type => return AbiSizeAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) }, + .anyframe_type => return .{ .scalar = @divExact(target.ptrBitWidth(), 8) }, .array_type => |array_type| { const len = array_type.lenIncludingSentinel(); if (len == 0) return .{ .scalar = 0 }; - switch (try Type.fromInterned(array_type.child).abiSizeAdvanced(mod, strat)) { + switch (try Type.fromInterned(array_type.child).abiSizeAdvanced(pt, strat)) { .scalar => |elem_size| return .{ .scalar = len * elem_size }, .val => switch (strat) { .sema, .eager => unreachable, - .lazy => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{ + .lazy => return .{ .val = Value.fromInterned(try pt.intern(.{ .int = .{ .ty = .comptime_int_type, .storage = .{ .lazy_size = ty.toIntern() }, - } }))) }, + } })) }, }, } }, @@ -1270,71 +1272,71 @@ pub fn abiSizeAdvanced( const sub_strat: ResolveStrat = switch (strat) { .sema => .sema, .eager => .normal, - .lazy => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{ + .lazy => return .{ .val = Value.fromInterned(try pt.intern(.{ .int = .{ .ty = .comptime_int_type, .storage = .{ .lazy_size = ty.toIntern() }, - } }))) }, + } })) }, }; - const alignment = switch (try ty.abiAlignmentAdvanced(mod, strat)) { + const alignment = switch (try ty.abiAlignmentAdvanced(pt, strat)) { .scalar => |x| x, - .val => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{ + .val => return .{ .val = Value.fromInterned(try pt.intern(.{ .int = .{ .ty = .comptime_int_type, .storage = .{ .lazy_size = ty.toIntern() }, - } }))) }, + } })) }, }; const total_bytes = switch (mod.comp.getZigBackend()) { else => total_bytes: { - const elem_bits = try Type.fromInterned(vector_type.child).bitSizeAdvanced(mod, sub_strat); + const elem_bits = try Type.fromInterned(vector_type.child).bitSizeAdvanced(pt, sub_strat); const total_bits = elem_bits * vector_type.len; break :total_bytes (total_bits + 7) / 8; }, .stage2_c => total_bytes: { - const elem_bytes: u32 = @intCast((try Type.fromInterned(vector_type.child).abiSizeAdvanced(mod, strat)).scalar); + const elem_bytes: u32 = @intCast((try Type.fromInterned(vector_type.child).abiSizeAdvanced(pt, strat)).scalar); break :total_bytes elem_bytes * vector_type.len; }, .stage2_x86_64 => total_bytes: { if (vector_type.child == .bool_type) break :total_bytes std.math.divCeil(u32, vector_type.len, 8) catch unreachable; - const elem_bytes: u32 = @intCast((try Type.fromInterned(vector_type.child).abiSizeAdvanced(mod, strat)).scalar); + const elem_bytes: u32 = @intCast((try Type.fromInterned(vector_type.child).abiSizeAdvanced(pt, strat)).scalar); break :total_bytes elem_bytes * vector_type.len; }, }; - return AbiSizeAdvanced{ .scalar = alignment.forward(total_bytes) }; + return .{ .scalar = alignment.forward(total_bytes) }; }, - .opt_type => return ty.abiSizeAdvancedOptional(mod, strat), + .opt_type => return ty.abiSizeAdvancedOptional(pt, strat), .error_set_type, .inferred_error_set_type => { const bits = mod.errorSetBits(); - if (bits == 0) return AbiSizeAdvanced{ .scalar = 0 }; - return AbiSizeAdvanced{ .scalar = intAbiSize(bits, target, use_llvm) }; + if (bits == 0) return .{ .scalar = 0 }; + return .{ .scalar = intAbiSize(bits, target, use_llvm) }; }, .error_union_type => |error_union_type| { const payload_ty = Type.fromInterned(error_union_type.payload_type); // This code needs to be kept in sync with the equivalent switch prong // in abiAlignmentAdvanced. - const code_size = abiSize(Type.anyerror, mod); - if (!(payload_ty.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) { - error.NeedLazy => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{ + const code_size = Type.anyerror.abiSize(pt); + if (!(payload_ty.hasRuntimeBitsAdvanced(pt, false, strat) catch |err| switch (err) { + error.NeedLazy => return .{ .val = Value.fromInterned(try pt.intern(.{ .int = .{ .ty = .comptime_int_type, .storage = .{ .lazy_size = ty.toIntern() }, - } }))) }, + } })) }, else => |e| return e, })) { // Same as anyerror. - return AbiSizeAdvanced{ .scalar = code_size }; + return .{ .scalar = code_size }; } - const code_align = abiAlignment(Type.anyerror, mod); - const payload_align = abiAlignment(payload_ty, mod); - const payload_size = switch (try payload_ty.abiSizeAdvanced(mod, strat)) { + const code_align = Type.anyerror.abiAlignment(pt); + const payload_align = payload_ty.abiAlignment(pt); + const payload_size = switch (try payload_ty.abiSizeAdvanced(pt, strat)) { .scalar => |elem_size| elem_size, .val => switch (strat) { .sema => unreachable, .eager => unreachable, - .lazy => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{ + .lazy => return .{ .val = Value.fromInterned(try pt.intern(.{ .int = .{ .ty = .comptime_int_type, .storage = .{ .lazy_size = ty.toIntern() }, - } }))) }, + } })) }, }, }; @@ -1350,7 +1352,7 @@ pub fn abiSizeAdvanced( size += code_size; size = payload_align.forward(size); } - return AbiSizeAdvanced{ .scalar = size }; + return .{ .scalar = size }; }, .func_type => unreachable, // represents machine code; not a pointer .simple_type => |t| switch (t) { @@ -1362,34 +1364,31 @@ pub fn abiSizeAdvanced( .float_mode, .reduce_op, .call_modifier, - => return AbiSizeAdvanced{ .scalar = 1 }, + => return .{ .scalar = 1 }, - .f16 => return AbiSizeAdvanced{ .scalar = 2 }, - .f32 => return AbiSizeAdvanced{ .scalar = 4 }, - .f64 => return AbiSizeAdvanced{ .scalar = 8 }, - .f128 => return AbiSizeAdvanced{ .scalar = 16 }, + .f16 => return .{ .scalar = 2 }, + .f32 => return .{ .scalar = 4 }, + .f64 => return .{ .scalar = 8 }, + .f128 => return .{ .scalar = 16 }, .f80 => switch (target.c_type_bit_size(.longdouble)) { - 80 => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.longdouble) }, - else => { - const u80_ty: Type = .{ .ip_index = .u80_type }; - return AbiSizeAdvanced{ .scalar = abiSize(u80_ty, mod) }; - }, + 80 => return .{ .scalar = target.c_type_byte_size(.longdouble) }, + else => return .{ .scalar = Type.u80.abiSize(pt) }, }, .usize, .isize, - => return AbiSizeAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) }, - - .c_char => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.char) }, - .c_short => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.short) }, - .c_ushort => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.ushort) }, - .c_int => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.int) }, - .c_uint => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.uint) }, - .c_long => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.long) }, - .c_ulong => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.ulong) }, - .c_longlong => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.longlong) }, - .c_ulonglong => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.ulonglong) }, - .c_longdouble => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.longdouble) }, + => return .{ .scalar = @divExact(target.ptrBitWidth(), 8) }, + + .c_char => return .{ .scalar = target.c_type_byte_size(.char) }, + .c_short => return .{ .scalar = target.c_type_byte_size(.short) }, + .c_ushort => return .{ .scalar = target.c_type_byte_size(.ushort) }, + .c_int => return .{ .scalar = target.c_type_byte_size(.int) }, + .c_uint => return .{ .scalar = target.c_type_byte_size(.uint) }, + .c_long => return .{ .scalar = target.c_type_byte_size(.long) }, + .c_ulong => return .{ .scalar = target.c_type_byte_size(.ulong) }, + .c_longlong => return .{ .scalar = target.c_type_byte_size(.longlong) }, + .c_ulonglong => return .{ .scalar = target.c_type_byte_size(.ulonglong) }, + .c_longdouble => return .{ .scalar = target.c_type_byte_size(.longdouble) }, .anyopaque, .void, @@ -1399,12 +1398,12 @@ pub fn abiSizeAdvanced( .null, .undefined, .enum_literal, - => return AbiSizeAdvanced{ .scalar = 0 }, + => return .{ .scalar = 0 }, .anyerror, .adhoc_inferred_error_set => { const bits = mod.errorSetBits(); - if (bits == 0) return AbiSizeAdvanced{ .scalar = 0 }; - return AbiSizeAdvanced{ .scalar = intAbiSize(bits, target, use_llvm) }; + if (bits == 0) return .{ .scalar = 0 }; + return .{ .scalar = intAbiSize(bits, target, use_llvm) }; }, .prefetch_options => unreachable, // missing call to resolveTypeFields @@ -1418,22 +1417,22 @@ pub fn abiSizeAdvanced( .struct_type => { const struct_type = ip.loadStructType(ty.toIntern()); switch (strat) { - .sema => try ty.resolveLayout(mod), + .sema => try ty.resolveLayout(pt), .lazy => switch (struct_type.layout) { .@"packed" => { if (struct_type.backingIntType(ip).* == .none) return .{ - .val = Value.fromInterned((try mod.intern(.{ .int = .{ + .val = Value.fromInterned(try pt.intern(.{ .int = .{ .ty = .comptime_int_type, .storage = .{ .lazy_size = ty.toIntern() }, - } }))), + } })), }; }, .auto, .@"extern" => { if (!struct_type.haveLayout(ip)) return .{ - .val = Value.fromInterned((try mod.intern(.{ .int = .{ + .val = Value.fromInterned(try pt.intern(.{ .int = .{ .ty = .comptime_int_type, .storage = .{ .lazy_size = ty.toIntern() }, - } }))), + } })), }; }, }, @@ -1441,7 +1440,7 @@ pub fn abiSizeAdvanced( } switch (struct_type.layout) { .@"packed" => return .{ - .scalar = Type.fromInterned(struct_type.backingIntType(ip).*).abiSize(mod), + .scalar = Type.fromInterned(struct_type.backingIntType(ip).*).abiSize(pt), }, .auto, .@"extern" => { assert(struct_type.haveLayout(ip)); @@ -1451,25 +1450,25 @@ pub fn abiSizeAdvanced( }, .anon_struct_type => |tuple| { switch (strat) { - .sema => try ty.resolveLayout(mod), + .sema => try ty.resolveLayout(pt), .lazy, .eager => {}, } const field_count = tuple.types.len; if (field_count == 0) { - return AbiSizeAdvanced{ .scalar = 0 }; + return .{ .scalar = 0 }; } - return AbiSizeAdvanced{ .scalar = ty.structFieldOffset(field_count, mod) }; + return .{ .scalar = ty.structFieldOffset(field_count, pt) }; }, .union_type => { const union_type = ip.loadUnionType(ty.toIntern()); switch (strat) { - .sema => try ty.resolveLayout(mod), + .sema => try ty.resolveLayout(pt), .lazy => if (!union_type.flagsPtr(ip).status.haveLayout()) return .{ - .val = Value.fromInterned((try mod.intern(.{ .int = .{ + .val = Value.fromInterned(try pt.intern(.{ .int = .{ .ty = .comptime_int_type, .storage = .{ .lazy_size = ty.toIntern() }, - } }))), + } })), }, .eager => {}, } @@ -1478,7 +1477,7 @@ pub fn abiSizeAdvanced( return .{ .scalar = union_type.size(ip).* }; }, .opaque_type => unreachable, // no size available - .enum_type => return .{ .scalar = Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty).abiSize(mod) }, + .enum_type => return .{ .scalar = Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty).abiSize(pt) }, // values, not types .undef, @@ -1507,36 +1506,37 @@ pub fn abiSizeAdvanced( fn abiSizeAdvancedOptional( ty: Type, - mod: *Module, + pt: Zcu.PerThread, strat: ResolveStratLazy, ) SemaError!AbiSizeAdvanced { + const mod = pt.zcu; const child_ty = ty.optionalChild(mod); if (child_ty.isNoReturn(mod)) { - return AbiSizeAdvanced{ .scalar = 0 }; + return .{ .scalar = 0 }; } - if (!(child_ty.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) { - error.NeedLazy => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{ + if (!(child_ty.hasRuntimeBitsAdvanced(pt, false, strat) catch |err| switch (err) { + error.NeedLazy => return .{ .val = Value.fromInterned(try pt.intern(.{ .int = .{ .ty = .comptime_int_type, .storage = .{ .lazy_size = ty.toIntern() }, - } }))) }, + } })) }, else => |e| return e, - })) return AbiSizeAdvanced{ .scalar = 1 }; + })) return .{ .scalar = 1 }; if (ty.optionalReprIsPayload(mod)) { - return abiSizeAdvanced(child_ty, mod, strat); + return child_ty.abiSizeAdvanced(pt, strat); } - const payload_size = switch (try child_ty.abiSizeAdvanced(mod, strat)) { + const payload_size = switch (try child_ty.abiSizeAdvanced(pt, strat)) { .scalar => |elem_size| elem_size, .val => switch (strat) { .sema => unreachable, .eager => unreachable, - .lazy => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{ + .lazy => return .{ .val = Value.fromInterned(try pt.intern(.{ .int = .{ .ty = .comptime_int_type, .storage = .{ .lazy_size = ty.toIntern() }, - } }))) }, + } })) }, }, }; @@ -1544,8 +1544,8 @@ fn abiSizeAdvancedOptional( // field and a boolean as the second. Since the child type's abi alignment is // guaranteed to be >= that of bool's (1 byte) the added size is exactly equal // to the child type's ABI alignment. - return AbiSizeAdvanced{ - .scalar = (child_ty.abiAlignment(mod).toByteUnits() orelse 0) + payload_size, + return .{ + .scalar = (child_ty.abiAlignment(pt).toByteUnits() orelse 0) + payload_size, }; } @@ -1675,15 +1675,16 @@ pub fn maxIntAlignment(target: std.Target, use_llvm: bool) u16 { }; } -pub fn bitSize(ty: Type, mod: *Module) u64 { - return bitSizeAdvanced(ty, mod, .normal) catch unreachable; +pub fn bitSize(ty: Type, pt: Zcu.PerThread) u64 { + return bitSizeAdvanced(ty, pt, .normal) catch unreachable; } pub fn bitSizeAdvanced( ty: Type, - mod: *Module, + pt: Zcu.PerThread, strat: ResolveStrat, ) SemaError!u64 { + const mod = pt.zcu; const target = mod.getTarget(); const ip = &mod.intern_pool; @@ -1702,22 +1703,22 @@ pub fn bitSizeAdvanced( if (len == 0) return 0; const elem_ty = Type.fromInterned(array_type.child); const elem_size = @max( - (try elem_ty.abiAlignmentAdvanced(mod, strat_lazy)).scalar.toByteUnits() orelse 0, - (try elem_ty.abiSizeAdvanced(mod, strat_lazy)).scalar, + (try elem_ty.abiAlignmentAdvanced(pt, strat_lazy)).scalar.toByteUnits() orelse 0, + (try elem_ty.abiSizeAdvanced(pt, strat_lazy)).scalar, ); if (elem_size == 0) return 0; - const elem_bit_size = try bitSizeAdvanced(elem_ty, mod, strat); + const elem_bit_size = try elem_ty.bitSizeAdvanced(pt, strat); return (len - 1) * 8 * elem_size + elem_bit_size; }, .vector_type => |vector_type| { const child_ty = Type.fromInterned(vector_type.child); - const elem_bit_size = try bitSizeAdvanced(child_ty, mod, strat); + const elem_bit_size = try child_ty.bitSizeAdvanced(pt, strat); return elem_bit_size * vector_type.len; }, .opt_type => { // Optionals and error unions are not packed so their bitsize // includes padding bits. - return (try abiSizeAdvanced(ty, mod, strat_lazy)).scalar * 8; + return (try ty.abiSizeAdvanced(pt, strat_lazy)).scalar * 8; }, .error_set_type, .inferred_error_set_type => return mod.errorSetBits(), @@ -1725,7 +1726,7 @@ pub fn bitSizeAdvanced( .error_union_type => { // Optionals and error unions are not packed so their bitsize // includes padding bits. - return (try abiSizeAdvanced(ty, mod, strat_lazy)).scalar * 8; + return (try ty.abiSizeAdvanced(pt, strat_lazy)).scalar * 8; }, .func_type => unreachable, // represents machine code; not a pointer .simple_type => |t| switch (t) { @@ -1783,42 +1784,42 @@ pub fn bitSizeAdvanced( const struct_type = ip.loadStructType(ty.toIntern()); const is_packed = struct_type.layout == .@"packed"; if (strat == .sema) { - try ty.resolveFields(mod); - if (is_packed) try ty.resolveLayout(mod); + try ty.resolveFields(pt); + if (is_packed) try ty.resolveLayout(pt); } if (is_packed) { - return try Type.fromInterned(struct_type.backingIntType(ip).*).bitSizeAdvanced(mod, strat); + return try Type.fromInterned(struct_type.backingIntType(ip).*).bitSizeAdvanced(pt, strat); } - return (try ty.abiSizeAdvanced(mod, strat_lazy)).scalar * 8; + return (try ty.abiSizeAdvanced(pt, strat_lazy)).scalar * 8; }, .anon_struct_type => { - if (strat == .sema) try ty.resolveFields(mod); - return (try ty.abiSizeAdvanced(mod, strat_lazy)).scalar * 8; + if (strat == .sema) try ty.resolveFields(pt); + return (try ty.abiSizeAdvanced(pt, strat_lazy)).scalar * 8; }, .union_type => { const union_type = ip.loadUnionType(ty.toIntern()); const is_packed = ty.containerLayout(mod) == .@"packed"; if (strat == .sema) { - try ty.resolveFields(mod); - if (is_packed) try ty.resolveLayout(mod); + try ty.resolveFields(pt); + if (is_packed) try ty.resolveLayout(pt); } if (!is_packed) { - return (try ty.abiSizeAdvanced(mod, strat_lazy)).scalar * 8; + return (try ty.abiSizeAdvanced(pt, strat_lazy)).scalar * 8; } assert(union_type.flagsPtr(ip).status.haveFieldTypes()); var size: u64 = 0; for (0..union_type.field_types.len) |field_index| { const field_ty = union_type.field_types.get(ip)[field_index]; - size = @max(size, try bitSizeAdvanced(Type.fromInterned(field_ty), mod, strat)); + size = @max(size, try Type.fromInterned(field_ty).bitSizeAdvanced(pt, strat)); } return size; }, .opaque_type => unreachable, - .enum_type => return bitSizeAdvanced(Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty), mod, strat), + .enum_type => return Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty).bitSizeAdvanced(pt, strat), // values, not types .undef, @@ -1870,7 +1871,7 @@ pub fn isSinglePointer(ty: Type, mod: *const Module) bool { /// Asserts `ty` is a pointer. pub fn ptrSize(ty: Type, mod: *const Module) std.builtin.Type.Pointer.Size { - return ptrSizeOrNull(ty, mod).?; + return ty.ptrSizeOrNull(mod).?; } /// Returns `null` if `ty` is not a pointer. @@ -2105,29 +2106,28 @@ pub fn unionTagFieldIndex(ty: Type, enum_tag: Value, mod: *Module) ?u32 { return mod.unionTagFieldIndex(union_obj, enum_tag); } -pub fn unionHasAllZeroBitFieldTypes(ty: Type, mod: *Module) bool { - const ip = &mod.intern_pool; - const union_obj = mod.typeToUnion(ty).?; +pub fn unionHasAllZeroBitFieldTypes(ty: Type, pt: Zcu.PerThread) bool { + const ip = &pt.zcu.intern_pool; + const union_obj = pt.zcu.typeToUnion(ty).?; for (union_obj.field_types.get(ip)) |field_ty| { - if (Type.fromInterned(field_ty).hasRuntimeBits(mod)) return false; + if (Type.fromInterned(field_ty).hasRuntimeBits(pt)) return false; } return true; } /// Returns the type used for backing storage of this union during comptime operations. /// Asserts the type is either an extern or packed union. -pub fn unionBackingType(ty: Type, mod: *Module) !Type { - return switch (ty.containerLayout(mod)) { - .@"extern" => try mod.arrayType(.{ .len = ty.abiSize(mod), .child = .u8_type }), - .@"packed" => try mod.intType(.unsigned, @intCast(ty.bitSize(mod))), +pub fn unionBackingType(ty: Type, pt: Zcu.PerThread) !Type { + return switch (ty.containerLayout(pt.zcu)) { + .@"extern" => try pt.arrayType(.{ .len = ty.abiSize(pt), .child = .u8_type }), + .@"packed" => try pt.intType(.unsigned, @intCast(ty.bitSize(pt))), .auto => unreachable, }; } -pub fn unionGetLayout(ty: Type, mod: *Module) Module.UnionLayout { - const ip = &mod.intern_pool; - const union_obj = ip.loadUnionType(ty.toIntern()); - return mod.getUnionLayout(union_obj); +pub fn unionGetLayout(ty: Type, pt: Zcu.PerThread) Module.UnionLayout { + const union_obj = pt.zcu.intern_pool.loadUnionType(ty.toIntern()); + return pt.getUnionLayout(union_obj); } pub fn containerLayout(ty: Type, mod: *Module) std.builtin.Type.ContainerLayout { @@ -2509,7 +2509,8 @@ pub fn isNumeric(ty: Type, mod: *const Module) bool { /// During semantic analysis, instead call `Sema.typeHasOnePossibleValue` which /// resolves field types rather than asserting they are already resolved. -pub fn onePossibleValue(starting_type: Type, mod: *Module) !?Value { +pub fn onePossibleValue(starting_type: Type, pt: Zcu.PerThread) !?Value { + const mod = pt.zcu; var ty = starting_type; const ip = &mod.intern_pool; while (true) switch (ty.toIntern()) { @@ -2518,7 +2519,7 @@ pub fn onePossibleValue(starting_type: Type, mod: *Module) !?Value { else => switch (ip.indexToKey(ty.toIntern())) { .int_type => |int_type| { if (int_type.bits == 0) { - return try mod.intValue(ty, 0); + return try pt.intValue(ty, 0); } else { return null; } @@ -2534,21 +2535,21 @@ pub fn onePossibleValue(starting_type: Type, mod: *Module) !?Value { inline .array_type, .vector_type => |seq_type, seq_tag| { const has_sentinel = seq_tag == .array_type and seq_type.sentinel != .none; - if (seq_type.len + @intFromBool(has_sentinel) == 0) return Value.fromInterned((try mod.intern(.{ .aggregate = .{ + if (seq_type.len + @intFromBool(has_sentinel) == 0) return Value.fromInterned(try pt.intern(.{ .aggregate = .{ .ty = ty.toIntern(), .storage = .{ .elems = &.{} }, - } }))); - if (try Type.fromInterned(seq_type.child).onePossibleValue(mod)) |opv| { - return Value.fromInterned((try mod.intern(.{ .aggregate = .{ + } })); + if (try Type.fromInterned(seq_type.child).onePossibleValue(pt)) |opv| { + return Value.fromInterned(try pt.intern(.{ .aggregate = .{ .ty = ty.toIntern(), .storage = .{ .repeated_elem = opv.toIntern() }, - } }))); + } })); } return null; }, .opt_type => |child| { if (child == .noreturn_type) { - return try mod.nullValue(ty); + return try pt.nullValue(ty); } else { return null; } @@ -2615,17 +2616,17 @@ pub fn onePossibleValue(starting_type: Type, mod: *Module) !?Value { continue; } const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]); - if (try field_ty.onePossibleValue(mod)) |field_opv| { + if (try field_ty.onePossibleValue(pt)) |field_opv| { field_val.* = field_opv.toIntern(); } else return null; } // In this case the struct has no runtime-known fields and // therefore has one possible value. - return Value.fromInterned((try mod.intern(.{ .aggregate = .{ + return Value.fromInterned(try pt.intern(.{ .aggregate = .{ .ty = ty.toIntern(), .storage = .{ .elems = field_vals }, - } }))); + } })); }, .anon_struct_type => |tuple| { @@ -2637,24 +2638,24 @@ pub fn onePossibleValue(starting_type: Type, mod: *Module) !?Value { // TODO: write something like getCoercedInts to avoid needing to dupe const duped_values = try mod.gpa.dupe(InternPool.Index, tuple.values.get(ip)); defer mod.gpa.free(duped_values); - return Value.fromInterned((try mod.intern(.{ .aggregate = .{ + return Value.fromInterned(try pt.intern(.{ .aggregate = .{ .ty = ty.toIntern(), .storage = .{ .elems = duped_values }, - } }))); + } })); }, .union_type => { const union_obj = ip.loadUnionType(ty.toIntern()); - const tag_val = (try Type.fromInterned(union_obj.enum_tag_ty).onePossibleValue(mod)) orelse + const tag_val = (try Type.fromInterned(union_obj.enum_tag_ty).onePossibleValue(pt)) orelse return null; if (union_obj.field_types.len == 0) { - const only = try mod.intern(.{ .empty_enum_value = ty.toIntern() }); + const only = try pt.intern(.{ .empty_enum_value = ty.toIntern() }); return Value.fromInterned(only); } const only_field_ty = union_obj.field_types.get(ip)[0]; - const val_val = (try Type.fromInterned(only_field_ty).onePossibleValue(mod)) orelse + const val_val = (try Type.fromInterned(only_field_ty).onePossibleValue(pt)) orelse return null; - const only = try mod.intern(.{ .un = .{ + const only = try pt.intern(.{ .un = .{ .ty = ty.toIntern(), .tag = tag_val.toIntern(), .val = val_val.toIntern(), @@ -2668,8 +2669,8 @@ pub fn onePossibleValue(starting_type: Type, mod: *Module) !?Value { .nonexhaustive => { if (enum_type.tag_ty == .comptime_int_type) return null; - if (try Type.fromInterned(enum_type.tag_ty).onePossibleValue(mod)) |int_opv| { - const only = try mod.intern(.{ .enum_tag = .{ + if (try Type.fromInterned(enum_type.tag_ty).onePossibleValue(pt)) |int_opv| { + const only = try pt.intern(.{ .enum_tag = .{ .ty = ty.toIntern(), .int = int_opv.toIntern(), } }); @@ -2679,18 +2680,18 @@ pub fn onePossibleValue(starting_type: Type, mod: *Module) !?Value { return null; }, .auto, .explicit => { - if (Type.fromInterned(enum_type.tag_ty).hasRuntimeBits(mod)) return null; + if (Type.fromInterned(enum_type.tag_ty).hasRuntimeBits(pt)) return null; switch (enum_type.names.len) { 0 => { - const only = try mod.intern(.{ .empty_enum_value = ty.toIntern() }); + const only = try pt.intern(.{ .empty_enum_value = ty.toIntern() }); return Value.fromInterned(only); }, 1 => { if (enum_type.values.len == 0) { - const only = try mod.intern(.{ .enum_tag = .{ + const only = try pt.intern(.{ .enum_tag = .{ .ty = ty.toIntern(), - .int = try mod.intern(.{ .int = .{ + .int = try pt.intern(.{ .int = .{ .ty = enum_type.tag_ty, .storage = .{ .u64 = 0 }, } }), @@ -2733,13 +2734,14 @@ pub fn onePossibleValue(starting_type: Type, mod: *Module) !?Value { /// During semantic analysis, instead call `Sema.typeRequiresComptime` which /// resolves field types rather than asserting they are already resolved. -pub fn comptimeOnly(ty: Type, mod: *Module) bool { - return ty.comptimeOnlyAdvanced(mod, .normal) catch unreachable; +pub fn comptimeOnly(ty: Type, pt: Zcu.PerThread) bool { + return ty.comptimeOnlyAdvanced(pt, .normal) catch unreachable; } /// `generic_poison` will return false. /// May return false negatives when structs and unions are having their field types resolved. -pub fn comptimeOnlyAdvanced(ty: Type, mod: *Module, strat: ResolveStrat) SemaError!bool { +pub fn comptimeOnlyAdvanced(ty: Type, pt: Zcu.PerThread, strat: ResolveStrat) SemaError!bool { + const mod = pt.zcu; const ip = &mod.intern_pool; return switch (ty.toIntern()) { .empty_struct_type => false, @@ -2749,19 +2751,19 @@ pub fn comptimeOnlyAdvanced(ty: Type, mod: *Module, strat: ResolveStrat) SemaErr .ptr_type => |ptr_type| { const child_ty = Type.fromInterned(ptr_type.child); switch (child_ty.zigTypeTag(mod)) { - .Fn => return !try child_ty.fnHasRuntimeBitsAdvanced(mod, strat), + .Fn => return !try child_ty.fnHasRuntimeBitsAdvanced(pt, strat), .Opaque => return false, - else => return child_ty.comptimeOnlyAdvanced(mod, strat), + else => return child_ty.comptimeOnlyAdvanced(pt, strat), } }, .anyframe_type => |child| { if (child == .none) return false; - return Type.fromInterned(child).comptimeOnlyAdvanced(mod, strat); + return Type.fromInterned(child).comptimeOnlyAdvanced(pt, strat); }, - .array_type => |array_type| return Type.fromInterned(array_type.child).comptimeOnlyAdvanced(mod, strat), - .vector_type => |vector_type| return Type.fromInterned(vector_type.child).comptimeOnlyAdvanced(mod, strat), - .opt_type => |child| return Type.fromInterned(child).comptimeOnlyAdvanced(mod, strat), - .error_union_type => |error_union_type| return Type.fromInterned(error_union_type.payload_type).comptimeOnlyAdvanced(mod, strat), + .array_type => |array_type| return Type.fromInterned(array_type.child).comptimeOnlyAdvanced(pt, strat), + .vector_type => |vector_type| return Type.fromInterned(vector_type.child).comptimeOnlyAdvanced(pt, strat), + .opt_type => |child| return Type.fromInterned(child).comptimeOnlyAdvanced(pt, strat), + .error_union_type => |error_union_type| return Type.fromInterned(error_union_type.payload_type).comptimeOnlyAdvanced(pt, strat), .error_set_type, .inferred_error_set_type, @@ -2836,13 +2838,13 @@ pub fn comptimeOnlyAdvanced(ty: Type, mod: *Module, strat: ResolveStrat) SemaErr struct_type.flagsPtr(ip).requires_comptime = .wip; errdefer struct_type.flagsPtr(ip).requires_comptime = .unknown; - try ty.resolveFields(mod); + try ty.resolveFields(pt); for (0..struct_type.field_types.len) |i_usize| { const i: u32 = @intCast(i_usize); if (struct_type.fieldIsComptime(ip, i)) continue; const field_ty = struct_type.field_types.get(ip)[i]; - if (try Type.fromInterned(field_ty).comptimeOnlyAdvanced(mod, strat)) { + if (try Type.fromInterned(field_ty).comptimeOnlyAdvanced(pt, strat)) { // Note that this does not cause the layout to // be considered resolved. Comptime-only types // still maintain a layout of their @@ -2861,7 +2863,7 @@ pub fn comptimeOnlyAdvanced(ty: Type, mod: *Module, strat: ResolveStrat) SemaErr .anon_struct_type => |tuple| { for (tuple.types.get(ip), tuple.values.get(ip)) |field_ty, val| { const have_comptime_val = val != .none; - if (!have_comptime_val and try Type.fromInterned(field_ty).comptimeOnlyAdvanced(mod, strat)) return true; + if (!have_comptime_val and try Type.fromInterned(field_ty).comptimeOnlyAdvanced(pt, strat)) return true; } return false; }, @@ -2880,11 +2882,11 @@ pub fn comptimeOnlyAdvanced(ty: Type, mod: *Module, strat: ResolveStrat) SemaErr union_type.flagsPtr(ip).requires_comptime = .wip; errdefer union_type.flagsPtr(ip).requires_comptime = .unknown; - try ty.resolveFields(mod); + try ty.resolveFields(pt); for (0..union_type.field_types.len) |field_idx| { const field_ty = union_type.field_types.get(ip)[field_idx]; - if (try Type.fromInterned(field_ty).comptimeOnlyAdvanced(mod, strat)) { + if (try Type.fromInterned(field_ty).comptimeOnlyAdvanced(pt, strat)) { union_type.flagsPtr(ip).requires_comptime = .yes; return true; } @@ -2898,7 +2900,7 @@ pub fn comptimeOnlyAdvanced(ty: Type, mod: *Module, strat: ResolveStrat) SemaErr .opaque_type => false, - .enum_type => return Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty).comptimeOnlyAdvanced(mod, strat), + .enum_type => return Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty).comptimeOnlyAdvanced(pt, strat), // values, not types .undef, @@ -2930,10 +2932,10 @@ pub fn isVector(ty: Type, mod: *const Module) bool { } /// Returns 0 if not a vector, otherwise returns @bitSizeOf(Element) * vector_len. -pub fn totalVectorBits(ty: Type, zcu: *Zcu) u64 { - if (!ty.isVector(zcu)) return 0; - const v = zcu.intern_pool.indexToKey(ty.toIntern()).vector_type; - return v.len * Type.fromInterned(v.child).bitSize(zcu); +pub fn totalVectorBits(ty: Type, pt: Zcu.PerThread) u64 { + if (!ty.isVector(pt.zcu)) return 0; + const v = pt.zcu.intern_pool.indexToKey(ty.toIntern()).vector_type; + return v.len * Type.fromInterned(v.child).bitSize(pt); } pub fn isArrayOrVector(ty: Type, mod: *const Module) bool { @@ -3013,23 +3015,25 @@ pub fn getNamespace(ty: Type, zcu: *Zcu) ?InternPool.OptionalNamespaceIndex { } // Works for vectors and vectors of integers. -pub fn minInt(ty: Type, mod: *Module, dest_ty: Type) !Value { - const scalar = try minIntScalar(ty.scalarType(mod), mod, dest_ty.scalarType(mod)); - return if (ty.zigTypeTag(mod) == .Vector) Value.fromInterned((try mod.intern(.{ .aggregate = .{ +pub fn minInt(ty: Type, pt: Zcu.PerThread, dest_ty: Type) !Value { + const mod = pt.zcu; + const scalar = try minIntScalar(ty.scalarType(mod), pt, dest_ty.scalarType(mod)); + return if (ty.zigTypeTag(mod) == .Vector) Value.fromInterned(try pt.intern(.{ .aggregate = .{ .ty = dest_ty.toIntern(), .storage = .{ .repeated_elem = scalar.toIntern() }, - } }))) else scalar; + } })) else scalar; } /// Asserts that the type is an integer. -pub fn minIntScalar(ty: Type, mod: *Module, dest_ty: Type) !Value { +pub fn minIntScalar(ty: Type, pt: Zcu.PerThread, dest_ty: Type) !Value { + const mod = pt.zcu; const info = ty.intInfo(mod); - if (info.signedness == .unsigned) return mod.intValue(dest_ty, 0); - if (info.bits == 0) return mod.intValue(dest_ty, -1); + if (info.signedness == .unsigned) return pt.intValue(dest_ty, 0); + if (info.bits == 0) return pt.intValue(dest_ty, -1); if (std.math.cast(u6, info.bits - 1)) |shift| { const n = @as(i64, std.math.minInt(i64)) >> (63 - shift); - return mod.intValue(dest_ty, n); + return pt.intValue(dest_ty, n); } var res = try std.math.big.int.Managed.init(mod.gpa); @@ -3037,31 +3041,32 @@ pub fn minIntScalar(ty: Type, mod: *Module, dest_ty: Type) !Value { try res.setTwosCompIntLimit(.min, info.signedness, info.bits); - return mod.intValue_big(dest_ty, res.toConst()); + return pt.intValue_big(dest_ty, res.toConst()); } // Works for vectors and vectors of integers. /// The returned Value will have type dest_ty. -pub fn maxInt(ty: Type, mod: *Module, dest_ty: Type) !Value { - const scalar = try maxIntScalar(ty.scalarType(mod), mod, dest_ty.scalarType(mod)); - return if (ty.zigTypeTag(mod) == .Vector) Value.fromInterned((try mod.intern(.{ .aggregate = .{ +pub fn maxInt(ty: Type, pt: Zcu.PerThread, dest_ty: Type) !Value { + const mod = pt.zcu; + const scalar = try maxIntScalar(ty.scalarType(mod), pt, dest_ty.scalarType(mod)); + return if (ty.zigTypeTag(mod) == .Vector) Value.fromInterned(try pt.intern(.{ .aggregate = .{ .ty = dest_ty.toIntern(), .storage = .{ .repeated_elem = scalar.toIntern() }, - } }))) else scalar; + } })) else scalar; } /// The returned Value will have type dest_ty. -pub fn maxIntScalar(ty: Type, mod: *Module, dest_ty: Type) !Value { - const info = ty.intInfo(mod); +pub fn maxIntScalar(ty: Type, pt: Zcu.PerThread, dest_ty: Type) !Value { + const info = ty.intInfo(pt.zcu); switch (info.bits) { 0 => return switch (info.signedness) { - .signed => try mod.intValue(dest_ty, -1), - .unsigned => try mod.intValue(dest_ty, 0), + .signed => try pt.intValue(dest_ty, -1), + .unsigned => try pt.intValue(dest_ty, 0), }, 1 => return switch (info.signedness) { - .signed => try mod.intValue(dest_ty, 0), - .unsigned => try mod.intValue(dest_ty, 1), + .signed => try pt.intValue(dest_ty, 0), + .unsigned => try pt.intValue(dest_ty, 1), }, else => {}, } @@ -3069,20 +3074,20 @@ pub fn maxIntScalar(ty: Type, mod: *Module, dest_ty: Type) !Value { if (std.math.cast(u6, info.bits - 1)) |shift| switch (info.signedness) { .signed => { const n = @as(i64, std.math.maxInt(i64)) >> (63 - shift); - return mod.intValue(dest_ty, n); + return pt.intValue(dest_ty, n); }, .unsigned => { const n = @as(u64, std.math.maxInt(u64)) >> (63 - shift); - return mod.intValue(dest_ty, n); + return pt.intValue(dest_ty, n); }, }; - var res = try std.math.big.int.Managed.init(mod.gpa); + var res = try std.math.big.int.Managed.init(pt.zcu.gpa); defer res.deinit(); try res.setTwosCompIntLimit(.max, info.signedness, info.bits); - return mod.intValue_big(dest_ty, res.toConst()); + return pt.intValue_big(dest_ty, res.toConst()); } /// Asserts the type is an enum or a union. @@ -3188,26 +3193,26 @@ pub fn structFieldType(ty: Type, index: usize, mod: *Module) Type { }; } -pub fn structFieldAlign(ty: Type, index: usize, zcu: *Zcu) Alignment { - return ty.structFieldAlignAdvanced(index, zcu, .normal) catch unreachable; +pub fn structFieldAlign(ty: Type, index: usize, pt: Zcu.PerThread) Alignment { + return ty.structFieldAlignAdvanced(index, pt, .normal) catch unreachable; } -pub fn structFieldAlignAdvanced(ty: Type, index: usize, zcu: *Zcu, strat: ResolveStrat) !Alignment { - const ip = &zcu.intern_pool; +pub fn structFieldAlignAdvanced(ty: Type, index: usize, pt: Zcu.PerThread, strat: ResolveStrat) !Alignment { + const ip = &pt.zcu.intern_pool; switch (ip.indexToKey(ty.toIntern())) { .struct_type => { const struct_type = ip.loadStructType(ty.toIntern()); assert(struct_type.layout != .@"packed"); const explicit_align = struct_type.fieldAlign(ip, index); const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[index]); - return zcu.structFieldAlignmentAdvanced(explicit_align, field_ty, struct_type.layout, strat); + return pt.structFieldAlignmentAdvanced(explicit_align, field_ty, struct_type.layout, strat); }, .anon_struct_type => |anon_struct| { - return (try Type.fromInterned(anon_struct.types.get(ip)[index]).abiAlignmentAdvanced(zcu, strat.toLazy())).scalar; + return (try Type.fromInterned(anon_struct.types.get(ip)[index]).abiAlignmentAdvanced(pt, strat.toLazy())).scalar; }, .union_type => { const union_obj = ip.loadUnionType(ty.toIntern()); - return zcu.unionFieldNormalAlignmentAdvanced(union_obj, @intCast(index), strat); + return pt.unionFieldNormalAlignmentAdvanced(union_obj, @intCast(index), strat); }, else => unreachable, } @@ -3233,7 +3238,8 @@ pub fn structFieldDefaultValue(ty: Type, index: usize, mod: *Module) Value { } } -pub fn structFieldValueComptime(ty: Type, mod: *Module, index: usize) !?Value { +pub fn structFieldValueComptime(ty: Type, pt: Zcu.PerThread, index: usize) !?Value { + const mod = pt.zcu; const ip = &mod.intern_pool; switch (ip.indexToKey(ty.toIntern())) { .struct_type => { @@ -3242,13 +3248,13 @@ pub fn structFieldValueComptime(ty: Type, mod: *Module, index: usize) !?Value { assert(struct_type.haveFieldInits(ip)); return Value.fromInterned(struct_type.field_inits.get(ip)[index]); } else { - return Type.fromInterned(struct_type.field_types.get(ip)[index]).onePossibleValue(mod); + return Type.fromInterned(struct_type.field_types.get(ip)[index]).onePossibleValue(pt); } }, .anon_struct_type => |tuple| { const val = tuple.values.get(ip)[index]; if (val == .none) { - return Type.fromInterned(tuple.types.get(ip)[index]).onePossibleValue(mod); + return Type.fromInterned(tuple.types.get(ip)[index]).onePossibleValue(pt); } else { return Value.fromInterned(val); } @@ -3272,7 +3278,8 @@ pub const FieldOffset = struct { }; /// Supports structs and unions. -pub fn structFieldOffset(ty: Type, index: usize, mod: *Module) u64 { +pub fn structFieldOffset(ty: Type, index: usize, pt: Zcu.PerThread) u64 { + const mod = pt.zcu; const ip = &mod.intern_pool; switch (ip.indexToKey(ty.toIntern())) { .struct_type => { @@ -3287,17 +3294,17 @@ pub fn structFieldOffset(ty: Type, index: usize, mod: *Module) u64 { var big_align: Alignment = .none; for (tuple.types.get(ip), tuple.values.get(ip), 0..) |field_ty, field_val, i| { - if (field_val != .none or !Type.fromInterned(field_ty).hasRuntimeBits(mod)) { + if (field_val != .none or !Type.fromInterned(field_ty).hasRuntimeBits(pt)) { // comptime field if (i == index) return offset; continue; } - const field_align = Type.fromInterned(field_ty).abiAlignment(mod); + const field_align = Type.fromInterned(field_ty).abiAlignment(pt); big_align = big_align.max(field_align); offset = field_align.forward(offset); if (i == index) return offset; - offset += Type.fromInterned(field_ty).abiSize(mod); + offset += Type.fromInterned(field_ty).abiSize(pt); } offset = big_align.max(.@"1").forward(offset); return offset; @@ -3307,7 +3314,7 @@ pub fn structFieldOffset(ty: Type, index: usize, mod: *Module) u64 { const union_type = ip.loadUnionType(ty.toIntern()); if (!union_type.hasTag(ip)) return 0; - const layout = mod.getUnionLayout(union_type); + const layout = pt.getUnionLayout(union_type); if (layout.tag_align.compare(.gte, layout.payload_align)) { // {Tag, Payload} return layout.payload_align.forward(layout.tag_size); @@ -3421,12 +3428,13 @@ pub fn optEuBaseType(ty: Type, mod: *Module) Type { }; } -pub fn toUnsigned(ty: Type, mod: *Module) !Type { +pub fn toUnsigned(ty: Type, pt: Zcu.PerThread) !Type { + const mod = pt.zcu; return switch (ty.zigTypeTag(mod)) { - .Int => mod.intType(.unsigned, ty.intInfo(mod).bits), - .Vector => try mod.vectorType(.{ + .Int => pt.intType(.unsigned, ty.intInfo(mod).bits), + .Vector => try pt.vectorType(.{ .len = ty.vectorLen(mod), - .child = (try ty.childType(mod).toUnsigned(mod)).toIntern(), + .child = (try ty.childType(mod).toUnsigned(pt)).toIntern(), }), else => unreachable, }; @@ -3492,7 +3500,7 @@ pub fn arrayBase(ty: Type, zcu: *const Zcu) struct { Type, u64 } { return .{ cur_ty, cur_len }; } -pub fn packedStructFieldPtrInfo(struct_ty: Type, parent_ptr_ty: Type, field_idx: u32, zcu: *Zcu) union(enum) { +pub fn packedStructFieldPtrInfo(struct_ty: Type, parent_ptr_ty: Type, field_idx: u32, pt: Zcu.PerThread) union(enum) { /// The result is a bit-pointer with the same value and a new packed offset. bit_ptr: InternPool.Key.PtrType.PackedOffset, /// The result is a standard pointer. @@ -3505,6 +3513,7 @@ pub fn packedStructFieldPtrInfo(struct_ty: Type, parent_ptr_ty: Type, field_idx: } { comptime assert(Type.packed_struct_layout_version == 2); + const zcu = pt.zcu; const parent_ptr_info = parent_ptr_ty.ptrInfo(zcu); const field_ty = struct_ty.structFieldType(field_idx, zcu); @@ -3515,7 +3524,7 @@ pub fn packedStructFieldPtrInfo(struct_ty: Type, parent_ptr_ty: Type, field_idx: if (i == field_idx) { bit_offset = running_bits; } - running_bits += @intCast(f_ty.bitSize(zcu)); + running_bits += @intCast(f_ty.bitSize(pt)); } const res_host_size: u16, const res_bit_offset: u16 = if (parent_ptr_info.packed_offset.host_size != 0) @@ -3532,9 +3541,9 @@ pub fn packedStructFieldPtrInfo(struct_ty: Type, parent_ptr_ty: Type, field_idx: // targets before adding the necessary complications to this code. This will not // cause miscompilations; it only means the field pointer uses bit masking when it // might not be strictly necessary. - if (res_bit_offset % 8 == 0 and field_ty.bitSize(zcu) == field_ty.abiSize(zcu) * 8 and zcu.getTarget().cpu.arch.endian() == .little) { + if (res_bit_offset % 8 == 0 and field_ty.bitSize(pt) == field_ty.abiSize(pt) * 8 and zcu.getTarget().cpu.arch.endian() == .little) { const byte_offset = res_bit_offset / 8; - const new_align = Alignment.fromLog2Units(@ctz(byte_offset | parent_ptr_ty.ptrAlignment(zcu).toByteUnits().?)); + const new_align = Alignment.fromLog2Units(@ctz(byte_offset | parent_ptr_ty.ptrAlignment(pt).toByteUnits().?)); return .{ .byte_ptr = .{ .offset = byte_offset, .alignment = new_align, @@ -3547,34 +3556,35 @@ pub fn packedStructFieldPtrInfo(struct_ty: Type, parent_ptr_ty: Type, field_idx: } }; } -pub fn resolveLayout(ty: Type, zcu: *Zcu) SemaError!void { +pub fn resolveLayout(ty: Type, pt: Zcu.PerThread) SemaError!void { + const zcu = pt.zcu; const ip = &zcu.intern_pool; switch (ip.indexToKey(ty.toIntern())) { - .simple_type => |simple_type| return resolveSimpleType(simple_type, zcu), + .simple_type => |simple_type| return resolveSimpleType(simple_type, pt), else => {}, } switch (ty.zigTypeTag(zcu)) { .Struct => switch (ip.indexToKey(ty.toIntern())) { .anon_struct_type => |anon_struct_type| for (0..anon_struct_type.types.len) |i| { const field_ty = Type.fromInterned(anon_struct_type.types.get(ip)[i]); - try field_ty.resolveLayout(zcu); + try field_ty.resolveLayout(pt); }, - .struct_type => return ty.resolveStructInner(zcu, .layout), + .struct_type => return ty.resolveStructInner(pt, .layout), else => unreachable, }, - .Union => return ty.resolveUnionInner(zcu, .layout), + .Union => return ty.resolveUnionInner(pt, .layout), .Array => { if (ty.arrayLenIncludingSentinel(zcu) == 0) return; const elem_ty = ty.childType(zcu); - return elem_ty.resolveLayout(zcu); + return elem_ty.resolveLayout(pt); }, .Optional => { const payload_ty = ty.optionalChild(zcu); - return payload_ty.resolveLayout(zcu); + return payload_ty.resolveLayout(pt); }, .ErrorUnion => { const payload_ty = ty.errorUnionPayload(zcu); - return payload_ty.resolveLayout(zcu); + return payload_ty.resolveLayout(pt); }, .Fn => { const info = zcu.typeToFunc(ty).?; @@ -3585,16 +3595,16 @@ pub fn resolveLayout(ty: Type, zcu: *Zcu) SemaError!void { } for (0..info.param_types.len) |i| { const param_ty = info.param_types.get(ip)[i]; - try Type.fromInterned(param_ty).resolveLayout(zcu); + try Type.fromInterned(param_ty).resolveLayout(pt); } - try Type.fromInterned(info.return_type).resolveLayout(zcu); + try Type.fromInterned(info.return_type).resolveLayout(pt); }, else => {}, } } -pub fn resolveFields(ty: Type, zcu: *Zcu) SemaError!void { - const ip = &zcu.intern_pool; +pub fn resolveFields(ty: Type, pt: Zcu.PerThread) SemaError!void { + const ip = &pt.zcu.intern_pool; const ty_ip = ty.toIntern(); switch (ty_ip) { @@ -3680,22 +3690,23 @@ pub fn resolveFields(ty: Type, zcu: *Zcu) SemaError!void { .type_struct, .type_struct_packed, .type_struct_packed_inits, - => return ty.resolveStructInner(zcu, .fields), + => return ty.resolveStructInner(pt, .fields), - .type_union => return ty.resolveUnionInner(zcu, .fields), + .type_union => return ty.resolveUnionInner(pt, .fields), - .simple_type => return resolveSimpleType(ip.indexToKey(ty_ip).simple_type, zcu), + .simple_type => return resolveSimpleType(ip.indexToKey(ty_ip).simple_type, pt), else => {}, }, } } -pub fn resolveFully(ty: Type, zcu: *Zcu) SemaError!void { +pub fn resolveFully(ty: Type, pt: Zcu.PerThread) SemaError!void { + const zcu = pt.zcu; const ip = &zcu.intern_pool; switch (ip.indexToKey(ty.toIntern())) { - .simple_type => |simple_type| return resolveSimpleType(simple_type, zcu), + .simple_type => |simple_type| return resolveSimpleType(simple_type, pt), else => {}, } @@ -3719,52 +3730,53 @@ pub fn resolveFully(ty: Type, zcu: *Zcu) SemaError!void { .EnumLiteral, => {}, - .Pointer => return ty.childType(zcu).resolveFully(zcu), - .Array => return ty.childType(zcu).resolveFully(zcu), - .Optional => return ty.optionalChild(zcu).resolveFully(zcu), - .ErrorUnion => return ty.errorUnionPayload(zcu).resolveFully(zcu), + .Pointer => return ty.childType(zcu).resolveFully(pt), + .Array => return ty.childType(zcu).resolveFully(pt), + .Optional => return ty.optionalChild(zcu).resolveFully(pt), + .ErrorUnion => return ty.errorUnionPayload(zcu).resolveFully(pt), .Fn => { const info = zcu.typeToFunc(ty).?; if (info.is_generic) return; for (0..info.param_types.len) |i| { const param_ty = info.param_types.get(ip)[i]; - try Type.fromInterned(param_ty).resolveFully(zcu); + try Type.fromInterned(param_ty).resolveFully(pt); } - try Type.fromInterned(info.return_type).resolveFully(zcu); + try Type.fromInterned(info.return_type).resolveFully(pt); }, .Struct => switch (ip.indexToKey(ty.toIntern())) { .anon_struct_type => |anon_struct_type| for (0..anon_struct_type.types.len) |i| { const field_ty = Type.fromInterned(anon_struct_type.types.get(ip)[i]); - try field_ty.resolveFully(zcu); + try field_ty.resolveFully(pt); }, - .struct_type => return ty.resolveStructInner(zcu, .full), + .struct_type => return ty.resolveStructInner(pt, .full), else => unreachable, }, - .Union => return ty.resolveUnionInner(zcu, .full), + .Union => return ty.resolveUnionInner(pt, .full), } } -pub fn resolveStructFieldInits(ty: Type, zcu: *Zcu) SemaError!void { +pub fn resolveStructFieldInits(ty: Type, pt: Zcu.PerThread) SemaError!void { // TODO: stop calling this for tuples! - _ = zcu.typeToStruct(ty) orelse return; - return ty.resolveStructInner(zcu, .inits); + _ = pt.zcu.typeToStruct(ty) orelse return; + return ty.resolveStructInner(pt, .inits); } -pub fn resolveStructAlignment(ty: Type, zcu: *Zcu) SemaError!void { - return ty.resolveStructInner(zcu, .alignment); +pub fn resolveStructAlignment(ty: Type, pt: Zcu.PerThread) SemaError!void { + return ty.resolveStructInner(pt, .alignment); } -pub fn resolveUnionAlignment(ty: Type, zcu: *Zcu) SemaError!void { - return ty.resolveUnionInner(zcu, .alignment); +pub fn resolveUnionAlignment(ty: Type, pt: Zcu.PerThread) SemaError!void { + return ty.resolveUnionInner(pt, .alignment); } /// `ty` must be a struct. fn resolveStructInner( ty: Type, - zcu: *Zcu, + pt: Zcu.PerThread, resolution: enum { fields, inits, alignment, layout, full }, ) SemaError!void { + const zcu = pt.zcu; const gpa = zcu.gpa; const struct_obj = zcu.typeToStruct(ty).?; @@ -3777,7 +3789,7 @@ fn resolveStructInner( defer comptime_err_ret_trace.deinit(); var sema: Sema = .{ - .mod = zcu, + .pt = pt, .gpa = gpa, .arena = analysis_arena.allocator(), .code = undefined, // This ZIR will not be used. @@ -3804,9 +3816,10 @@ fn resolveStructInner( /// `ty` must be a union. fn resolveUnionInner( ty: Type, - zcu: *Zcu, + pt: Zcu.PerThread, resolution: enum { fields, alignment, layout, full }, ) SemaError!void { + const zcu = pt.zcu; const gpa = zcu.gpa; const union_obj = zcu.typeToUnion(ty).?; @@ -3819,7 +3832,7 @@ fn resolveUnionInner( defer comptime_err_ret_trace.deinit(); var sema: Sema = .{ - .mod = zcu, + .pt = pt, .gpa = gpa, .arena = analysis_arena.allocator(), .code = undefined, // This ZIR will not be used. @@ -3845,7 +3858,7 @@ fn resolveUnionInner( /// Fully resolves a simple type. This is usually a nop, but for builtin types with /// special InternPool indices (such as std.builtin.Type) it will analyze and fully /// resolve the type. -fn resolveSimpleType(simple_type: InternPool.SimpleType, zcu: *Zcu) Allocator.Error!void { +fn resolveSimpleType(simple_type: InternPool.SimpleType, pt: Zcu.PerThread) Allocator.Error!void { const builtin_type_name: []const u8 = switch (simple_type) { .atomic_order => "AtomicOrder", .atomic_rmw_op => "AtomicRmwOp", @@ -3861,7 +3874,7 @@ fn resolveSimpleType(simple_type: InternPool.SimpleType, zcu: *Zcu) Allocator.Er else => return, }; // This will fully resolve the type. - _ = try zcu.getBuiltinType(builtin_type_name); + _ = try pt.getBuiltinType(builtin_type_name); } /// Returns the type of a pointer to an element. @@ -3874,7 +3887,8 @@ fn resolveSimpleType(simple_type: InternPool.SimpleType, zcu: *Zcu) Allocator.Er /// Handles const-ness and address spaces in particular. /// This code is duplicated in `Sema.analyzePtrArithmetic`. /// May perform type resolution and return a transitive `error.AnalysisFail`. -pub fn elemPtrType(ptr_ty: Type, offset: ?usize, zcu: *Zcu) !Type { +pub fn elemPtrType(ptr_ty: Type, offset: ?usize, pt: Zcu.PerThread) !Type { + const zcu = pt.zcu; const ptr_info = ptr_ty.ptrInfo(zcu); const elem_ty = ptr_ty.elemType2(zcu); const is_allowzero = ptr_info.flags.is_allowzero and (offset orelse 0) == 0; @@ -3887,14 +3901,14 @@ pub fn elemPtrType(ptr_ty: Type, offset: ?usize, zcu: *Zcu) !Type { alignment: Alignment = .none, vector_index: VI = .none, } = if (parent_ty.isVector(zcu) and ptr_info.flags.size == .One) blk: { - const elem_bits = elem_ty.bitSize(zcu); + const elem_bits = elem_ty.bitSize(pt); if (elem_bits == 0) break :blk .{}; const is_packed = elem_bits < 8 or !std.math.isPowerOfTwo(elem_bits); if (!is_packed) break :blk .{}; break :blk .{ .host_size = @intCast(parent_ty.arrayLen(zcu)), - .alignment = parent_ty.abiAlignment(zcu), + .alignment = parent_ty.abiAlignment(pt), .vector_index = if (offset) |some| @enumFromInt(some) else .runtime, }; } else .{}; @@ -3908,7 +3922,7 @@ pub fn elemPtrType(ptr_ty: Type, offset: ?usize, zcu: *Zcu) !Type { } // If the addend is not a comptime-known value we can still count on // it being a multiple of the type size. - const elem_size = (try elem_ty.abiSizeAdvanced(zcu, .sema)).scalar; + const elem_size = (try elem_ty.abiSizeAdvanced(pt, .sema)).scalar; const addend = if (offset) |off| elem_size * off else elem_size; // The resulting pointer is aligned to the lcd between the offset (an @@ -3921,7 +3935,7 @@ pub fn elemPtrType(ptr_ty: Type, offset: ?usize, zcu: *Zcu) !Type { assert(new_align != .none); break :a new_align; }; - return zcu.ptrTypeSema(.{ + return pt.ptrTypeSema(.{ .child = elem_ty.toIntern(), .flags = .{ .alignment = alignment, @@ -3944,6 +3958,7 @@ pub const @"u16": Type = .{ .ip_index = .u16_type }; pub const @"u29": Type = .{ .ip_index = .u29_type }; pub const @"u32": Type = .{ .ip_index = .u32_type }; pub const @"u64": Type = .{ .ip_index = .u64_type }; +pub const @"u80": Type = .{ .ip_index = .u80_type }; pub const @"u128": Type = .{ .ip_index = .u128_type }; pub const @"i8": Type = .{ .ip_index = .i8_type }; diff --git a/src/Value.zig b/src/Value.zig index 34a0472c169b..21bb207b59c7 100644 --- a/src/Value.zig +++ b/src/Value.zig @@ -40,10 +40,10 @@ pub fn fmtDebug(val: Value) std.fmt.Formatter(dump) { return .{ .data = val }; } -pub fn fmtValue(val: Value, mod: *Module, opt_sema: ?*Sema) std.fmt.Formatter(print_value.format) { +pub fn fmtValue(val: Value, pt: Zcu.PerThread, opt_sema: ?*Sema) std.fmt.Formatter(print_value.format) { return .{ .data = .{ .val = val, - .mod = mod, + .pt = pt, .opt_sema = opt_sema, .depth = 3, } }; @@ -55,15 +55,16 @@ pub fn fmtValueFull(ctx: print_value.FormatContext) std.fmt.Formatter(print_valu /// Converts `val` to a null-terminated string stored in the InternPool. /// Asserts `val` is an array of `u8` -pub fn toIpString(val: Value, ty: Type, mod: *Module) !InternPool.NullTerminatedString { +pub fn toIpString(val: Value, ty: Type, pt: Zcu.PerThread) !InternPool.NullTerminatedString { + const mod = pt.zcu; assert(ty.zigTypeTag(mod) == .Array); assert(ty.childType(mod).toIntern() == .u8_type); const ip = &mod.intern_pool; switch (mod.intern_pool.indexToKey(val.toIntern()).aggregate.storage) { .bytes => |bytes| return bytes.toNullTerminatedString(ty.arrayLen(mod), ip), - .elems => return arrayToIpString(val, ty.arrayLen(mod), mod), + .elems => return arrayToIpString(val, ty.arrayLen(mod), pt), .repeated_elem => |elem| { - const byte: u8 = @intCast(Value.fromInterned(elem).toUnsignedInt(mod)); + const byte: u8 = @intCast(Value.fromInterned(elem).toUnsignedInt(pt)); const len: usize = @intCast(ty.arrayLen(mod)); try ip.string_bytes.appendNTimes(mod.gpa, byte, len); return ip.getOrPutTrailingString(mod.gpa, len, .no_embedded_nulls); @@ -73,16 +74,17 @@ pub fn toIpString(val: Value, ty: Type, mod: *Module) !InternPool.NullTerminated /// Asserts that the value is representable as an array of bytes. /// Copies the value into a freshly allocated slice of memory, which is owned by the caller. -pub fn toAllocatedBytes(val: Value, ty: Type, allocator: Allocator, mod: *Module) ![]u8 { +pub fn toAllocatedBytes(val: Value, ty: Type, allocator: Allocator, pt: Zcu.PerThread) ![]u8 { + const mod = pt.zcu; const ip = &mod.intern_pool; return switch (ip.indexToKey(val.toIntern())) { .enum_literal => |enum_literal| allocator.dupe(u8, enum_literal.toSlice(ip)), - .slice => |slice| try arrayToAllocatedBytes(val, Value.fromInterned(slice.len).toUnsignedInt(mod), allocator, mod), + .slice => |slice| try arrayToAllocatedBytes(val, Value.fromInterned(slice.len).toUnsignedInt(pt), allocator, pt), .aggregate => |aggregate| switch (aggregate.storage) { .bytes => |bytes| try allocator.dupe(u8, bytes.toSlice(ty.arrayLenIncludingSentinel(mod), ip)), - .elems => try arrayToAllocatedBytes(val, ty.arrayLen(mod), allocator, mod), + .elems => try arrayToAllocatedBytes(val, ty.arrayLen(mod), allocator, pt), .repeated_elem => |elem| { - const byte: u8 = @intCast(Value.fromInterned(elem).toUnsignedInt(mod)); + const byte: u8 = @intCast(Value.fromInterned(elem).toUnsignedInt(pt)); const result = try allocator.alloc(u8, @intCast(ty.arrayLen(mod))); @memset(result, byte); return result; @@ -92,16 +94,17 @@ pub fn toAllocatedBytes(val: Value, ty: Type, allocator: Allocator, mod: *Module }; } -fn arrayToAllocatedBytes(val: Value, len: u64, allocator: Allocator, mod: *Module) ![]u8 { +fn arrayToAllocatedBytes(val: Value, len: u64, allocator: Allocator, pt: Zcu.PerThread) ![]u8 { const result = try allocator.alloc(u8, @intCast(len)); for (result, 0..) |*elem, i| { - const elem_val = try val.elemValue(mod, i); - elem.* = @intCast(elem_val.toUnsignedInt(mod)); + const elem_val = try val.elemValue(pt, i); + elem.* = @intCast(elem_val.toUnsignedInt(pt)); } return result; } -fn arrayToIpString(val: Value, len_u64: u64, mod: *Module) !InternPool.NullTerminatedString { +fn arrayToIpString(val: Value, len_u64: u64, pt: Zcu.PerThread) !InternPool.NullTerminatedString { + const mod = pt.zcu; const gpa = mod.gpa; const ip = &mod.intern_pool; const len: usize = @intCast(len_u64); @@ -110,9 +113,9 @@ fn arrayToIpString(val: Value, len_u64: u64, mod: *Module) !InternPool.NullTermi // I don't think elemValue has the possibility to affect ip.string_bytes. Let's // assert just to be sure. const prev = ip.string_bytes.items.len; - const elem_val = try val.elemValue(mod, i); + const elem_val = try val.elemValue(pt, i); assert(ip.string_bytes.items.len == prev); - const byte: u8 = @intCast(elem_val.toUnsignedInt(mod)); + const byte: u8 = @intCast(elem_val.toUnsignedInt(pt)); ip.string_bytes.appendAssumeCapacity(byte); } return ip.getOrPutTrailingString(gpa, len, .no_embedded_nulls); @@ -133,14 +136,14 @@ pub fn toType(self: Value) Type { return Type.fromInterned(self.toIntern()); } -pub fn intFromEnum(val: Value, ty: Type, mod: *Module) Allocator.Error!Value { - const ip = &mod.intern_pool; +pub fn intFromEnum(val: Value, ty: Type, pt: Zcu.PerThread) Allocator.Error!Value { + const ip = &pt.zcu.intern_pool; const enum_ty = ip.typeOf(val.toIntern()); return switch (ip.indexToKey(enum_ty)) { // Assume it is already an integer and return it directly. .simple_type, .int_type => val, .enum_literal => |enum_literal| { - const field_index = ty.enumFieldIndex(enum_literal, mod).?; + const field_index = ty.enumFieldIndex(enum_literal, pt.zcu).?; switch (ip.indexToKey(ty.toIntern())) { // Assume it is already an integer and return it directly. .simple_type, .int_type => return val, @@ -150,13 +153,13 @@ pub fn intFromEnum(val: Value, ty: Type, mod: *Module) Allocator.Error!Value { return Value.fromInterned(enum_type.values.get(ip)[field_index]); } else { // Field index and integer values are the same. - return mod.intValue(Type.fromInterned(enum_type.tag_ty), field_index); + return pt.intValue(Type.fromInterned(enum_type.tag_ty), field_index); } }, else => unreachable, } }, - .enum_type => try mod.getCoerced(val, Type.fromInterned(ip.loadEnumType(enum_ty).tag_ty)), + .enum_type => try pt.getCoerced(val, Type.fromInterned(ip.loadEnumType(enum_ty).tag_ty)), else => unreachable, }; } @@ -164,38 +167,38 @@ pub fn intFromEnum(val: Value, ty: Type, mod: *Module) Allocator.Error!Value { pub const ResolveStrat = Type.ResolveStrat; /// Asserts the value is an integer. -pub fn toBigInt(val: Value, space: *BigIntSpace, mod: *Module) BigIntConst { - return val.toBigIntAdvanced(space, mod, .normal) catch unreachable; +pub fn toBigInt(val: Value, space: *BigIntSpace, pt: Zcu.PerThread) BigIntConst { + return val.toBigIntAdvanced(space, pt, .normal) catch unreachable; } /// Asserts the value is an integer. pub fn toBigIntAdvanced( val: Value, space: *BigIntSpace, - mod: *Module, + pt: Zcu.PerThread, strat: ResolveStrat, ) Module.CompileError!BigIntConst { return switch (val.toIntern()) { .bool_false => BigIntMutable.init(&space.limbs, 0).toConst(), .bool_true => BigIntMutable.init(&space.limbs, 1).toConst(), .null_value => BigIntMutable.init(&space.limbs, 0).toConst(), - else => switch (mod.intern_pool.indexToKey(val.toIntern())) { + else => switch (pt.zcu.intern_pool.indexToKey(val.toIntern())) { .int => |int| switch (int.storage) { .u64, .i64, .big_int => int.storage.toBigInt(space), .lazy_align, .lazy_size => |ty| { - if (strat == .sema) try Type.fromInterned(ty).resolveLayout(mod); + if (strat == .sema) try Type.fromInterned(ty).resolveLayout(pt); const x = switch (int.storage) { else => unreachable, - .lazy_align => Type.fromInterned(ty).abiAlignment(mod).toByteUnits() orelse 0, - .lazy_size => Type.fromInterned(ty).abiSize(mod), + .lazy_align => Type.fromInterned(ty).abiAlignment(pt).toByteUnits() orelse 0, + .lazy_size => Type.fromInterned(ty).abiSize(pt), }; return BigIntMutable.init(&space.limbs, x).toConst(); }, }, - .enum_tag => |enum_tag| Value.fromInterned(enum_tag.int).toBigIntAdvanced(space, mod, strat), + .enum_tag => |enum_tag| Value.fromInterned(enum_tag.int).toBigIntAdvanced(space, pt, strat), .opt, .ptr => BigIntMutable.init( &space.limbs, - (try val.getUnsignedIntAdvanced(mod, strat)).?, + (try val.getUnsignedIntAdvanced(pt, strat)).?, ).toConst(), else => unreachable, }, @@ -229,13 +232,14 @@ pub fn getVariable(val: Value, mod: *Module) ?InternPool.Key.Variable { /// If the value fits in a u64, return it, otherwise null. /// Asserts not undefined. -pub fn getUnsignedInt(val: Value, mod: *Module) ?u64 { - return getUnsignedIntAdvanced(val, mod, .normal) catch unreachable; +pub fn getUnsignedInt(val: Value, pt: Zcu.PerThread) ?u64 { + return getUnsignedIntAdvanced(val, pt, .normal) catch unreachable; } /// If the value fits in a u64, return it, otherwise null. /// Asserts not undefined. -pub fn getUnsignedIntAdvanced(val: Value, mod: *Module, strat: ResolveStrat) !?u64 { +pub fn getUnsignedIntAdvanced(val: Value, pt: Zcu.PerThread, strat: ResolveStrat) !?u64 { + const mod = pt.zcu; return switch (val.toIntern()) { .undef => unreachable, .bool_false => 0, @@ -246,22 +250,22 @@ pub fn getUnsignedIntAdvanced(val: Value, mod: *Module, strat: ResolveStrat) !?u .big_int => |big_int| big_int.to(u64) catch null, .u64 => |x| x, .i64 => |x| std.math.cast(u64, x), - .lazy_align => |ty| (try Type.fromInterned(ty).abiAlignmentAdvanced(mod, strat.toLazy())).scalar.toByteUnits() orelse 0, - .lazy_size => |ty| (try Type.fromInterned(ty).abiSizeAdvanced(mod, strat.toLazy())).scalar, + .lazy_align => |ty| (try Type.fromInterned(ty).abiAlignmentAdvanced(pt, strat.toLazy())).scalar.toByteUnits() orelse 0, + .lazy_size => |ty| (try Type.fromInterned(ty).abiSizeAdvanced(pt, strat.toLazy())).scalar, }, .ptr => |ptr| switch (ptr.base_addr) { .int => ptr.byte_offset, .field => |field| { - const base_addr = (try Value.fromInterned(field.base).getUnsignedIntAdvanced(mod, strat)) orelse return null; + const base_addr = (try Value.fromInterned(field.base).getUnsignedIntAdvanced(pt, strat)) orelse return null; const struct_ty = Value.fromInterned(field.base).typeOf(mod).childType(mod); - if (strat == .sema) try struct_ty.resolveLayout(mod); - return base_addr + struct_ty.structFieldOffset(@intCast(field.index), mod) + ptr.byte_offset; + if (strat == .sema) try struct_ty.resolveLayout(pt); + return base_addr + struct_ty.structFieldOffset(@intCast(field.index), pt) + ptr.byte_offset; }, else => null, }, .opt => |opt| switch (opt.val) { .none => 0, - else => |payload| Value.fromInterned(payload).getUnsignedIntAdvanced(mod, strat), + else => |payload| Value.fromInterned(payload).getUnsignedIntAdvanced(pt, strat), }, else => null, }, @@ -269,27 +273,27 @@ pub fn getUnsignedIntAdvanced(val: Value, mod: *Module, strat: ResolveStrat) !?u } /// Asserts the value is an integer and it fits in a u64 -pub fn toUnsignedInt(val: Value, zcu: *Zcu) u64 { - return getUnsignedInt(val, zcu).?; +pub fn toUnsignedInt(val: Value, pt: Zcu.PerThread) u64 { + return getUnsignedInt(val, pt).?; } /// Asserts the value is an integer and it fits in a u64 -pub fn toUnsignedIntSema(val: Value, zcu: *Zcu) !u64 { - return (try getUnsignedIntAdvanced(val, zcu, .sema)).?; +pub fn toUnsignedIntSema(val: Value, pt: Zcu.PerThread) !u64 { + return (try getUnsignedIntAdvanced(val, pt, .sema)).?; } /// Asserts the value is an integer and it fits in a i64 -pub fn toSignedInt(val: Value, mod: *Module) i64 { +pub fn toSignedInt(val: Value, pt: Zcu.PerThread) i64 { return switch (val.toIntern()) { .bool_false => 0, .bool_true => 1, - else => switch (mod.intern_pool.indexToKey(val.toIntern())) { + else => switch (pt.zcu.intern_pool.indexToKey(val.toIntern())) { .int => |int| switch (int.storage) { .big_int => |big_int| big_int.to(i64) catch unreachable, .i64 => |x| x, .u64 => |x| @intCast(x), - .lazy_align => |ty| @intCast(Type.fromInterned(ty).abiAlignment(mod).toByteUnits() orelse 0), - .lazy_size => |ty| @intCast(Type.fromInterned(ty).abiSize(mod)), + .lazy_align => |ty| @intCast(Type.fromInterned(ty).abiAlignment(pt).toByteUnits() orelse 0), + .lazy_size => |ty| @intCast(Type.fromInterned(ty).abiSize(pt)), }, else => unreachable, }, @@ -321,16 +325,17 @@ fn ptrHasIntAddr(val: Value, mod: *Module) bool { /// /// Asserts that buffer.len >= ty.abiSize(). The buffer is allowed to extend past /// the end of the value in memory. -pub fn writeToMemory(val: Value, ty: Type, mod: *Module, buffer: []u8) error{ +pub fn writeToMemory(val: Value, ty: Type, pt: Zcu.PerThread, buffer: []u8) error{ ReinterpretDeclRef, IllDefinedMemoryLayout, Unimplemented, OutOfMemory, }!void { + const mod = pt.zcu; const target = mod.getTarget(); const endian = target.cpu.arch.endian(); if (val.isUndef(mod)) { - const size: usize = @intCast(ty.abiSize(mod)); + const size: usize = @intCast(ty.abiSize(pt)); @memset(buffer[0..size], 0xaa); return; } @@ -346,41 +351,41 @@ pub fn writeToMemory(val: Value, ty: Type, mod: *Module, buffer: []u8) error{ const byte_count: u16 = @intCast((@as(u17, bits) + 7) / 8); var bigint_buffer: BigIntSpace = undefined; - const bigint = val.toBigInt(&bigint_buffer, mod); + const bigint = val.toBigInt(&bigint_buffer, pt); bigint.writeTwosComplement(buffer[0..byte_count], endian); }, .Float => switch (ty.floatBits(target)) { - 16 => std.mem.writeInt(u16, buffer[0..2], @bitCast(val.toFloat(f16, mod)), endian), - 32 => std.mem.writeInt(u32, buffer[0..4], @bitCast(val.toFloat(f32, mod)), endian), - 64 => std.mem.writeInt(u64, buffer[0..8], @bitCast(val.toFloat(f64, mod)), endian), - 80 => std.mem.writeInt(u80, buffer[0..10], @bitCast(val.toFloat(f80, mod)), endian), - 128 => std.mem.writeInt(u128, buffer[0..16], @bitCast(val.toFloat(f128, mod)), endian), + 16 => std.mem.writeInt(u16, buffer[0..2], @bitCast(val.toFloat(f16, pt)), endian), + 32 => std.mem.writeInt(u32, buffer[0..4], @bitCast(val.toFloat(f32, pt)), endian), + 64 => std.mem.writeInt(u64, buffer[0..8], @bitCast(val.toFloat(f64, pt)), endian), + 80 => std.mem.writeInt(u80, buffer[0..10], @bitCast(val.toFloat(f80, pt)), endian), + 128 => std.mem.writeInt(u128, buffer[0..16], @bitCast(val.toFloat(f128, pt)), endian), else => unreachable, }, .Array => { const len = ty.arrayLen(mod); const elem_ty = ty.childType(mod); - const elem_size: usize = @intCast(elem_ty.abiSize(mod)); + const elem_size: usize = @intCast(elem_ty.abiSize(pt)); var elem_i: usize = 0; var buf_off: usize = 0; while (elem_i < len) : (elem_i += 1) { - const elem_val = try val.elemValue(mod, elem_i); - try elem_val.writeToMemory(elem_ty, mod, buffer[buf_off..]); + const elem_val = try val.elemValue(pt, elem_i); + try elem_val.writeToMemory(elem_ty, pt, buffer[buf_off..]); buf_off += elem_size; } }, .Vector => { // We use byte_count instead of abi_size here, so that any padding bytes // follow the data bytes, on both big- and little-endian systems. - const byte_count = (@as(usize, @intCast(ty.bitSize(mod))) + 7) / 8; - return writeToPackedMemory(val, ty, mod, buffer[0..byte_count], 0); + const byte_count = (@as(usize, @intCast(ty.bitSize(pt))) + 7) / 8; + return writeToPackedMemory(val, ty, pt, buffer[0..byte_count], 0); }, .Struct => { const struct_type = mod.typeToStruct(ty) orelse return error.IllDefinedMemoryLayout; switch (struct_type.layout) { .auto => return error.IllDefinedMemoryLayout, .@"extern" => for (0..struct_type.field_types.len) |field_index| { - const off: usize = @intCast(ty.structFieldOffset(field_index, mod)); + const off: usize = @intCast(ty.structFieldOffset(field_index, pt)); const field_val = Value.fromInterned(switch (ip.indexToKey(val.toIntern()).aggregate.storage) { .bytes => |bytes| { buffer[off] = bytes.at(field_index, ip); @@ -390,11 +395,11 @@ pub fn writeToMemory(val: Value, ty: Type, mod: *Module, buffer: []u8) error{ .repeated_elem => |elem| elem, }); const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_index]); - try writeToMemory(field_val, field_ty, mod, buffer[off..]); + try writeToMemory(field_val, field_ty, pt, buffer[off..]); }, .@"packed" => { - const byte_count = (@as(usize, @intCast(ty.bitSize(mod))) + 7) / 8; - return writeToPackedMemory(val, ty, mod, buffer[0..byte_count], 0); + const byte_count = (@as(usize, @intCast(ty.bitSize(pt))) + 7) / 8; + return writeToPackedMemory(val, ty, pt, buffer[0..byte_count], 0); }, } }, @@ -421,34 +426,34 @@ pub fn writeToMemory(val: Value, ty: Type, mod: *Module, buffer: []u8) error{ const union_obj = mod.typeToUnion(ty).?; const field_index = mod.unionTagFieldIndex(union_obj, union_tag).?; const field_type = Type.fromInterned(union_obj.field_types.get(&mod.intern_pool)[field_index]); - const field_val = try val.fieldValue(mod, field_index); - const byte_count: usize = @intCast(field_type.abiSize(mod)); - return writeToMemory(field_val, field_type, mod, buffer[0..byte_count]); + const field_val = try val.fieldValue(pt, field_index); + const byte_count: usize = @intCast(field_type.abiSize(pt)); + return writeToMemory(field_val, field_type, pt, buffer[0..byte_count]); } else { - const backing_ty = try ty.unionBackingType(mod); - const byte_count: usize = @intCast(backing_ty.abiSize(mod)); - return writeToMemory(val.unionValue(mod), backing_ty, mod, buffer[0..byte_count]); + const backing_ty = try ty.unionBackingType(pt); + const byte_count: usize = @intCast(backing_ty.abiSize(pt)); + return writeToMemory(val.unionValue(mod), backing_ty, pt, buffer[0..byte_count]); } }, .@"packed" => { - const backing_ty = try ty.unionBackingType(mod); - const byte_count: usize = @intCast(backing_ty.abiSize(mod)); - return writeToPackedMemory(val, ty, mod, buffer[0..byte_count], 0); + const backing_ty = try ty.unionBackingType(pt); + const byte_count: usize = @intCast(backing_ty.abiSize(pt)); + return writeToPackedMemory(val, ty, pt, buffer[0..byte_count], 0); }, }, .Pointer => { if (ty.isSlice(mod)) return error.IllDefinedMemoryLayout; if (!val.ptrHasIntAddr(mod)) return error.ReinterpretDeclRef; - return val.writeToMemory(Type.usize, mod, buffer); + return val.writeToMemory(Type.usize, pt, buffer); }, .Optional => { if (!ty.isPtrLikeOptional(mod)) return error.IllDefinedMemoryLayout; const child = ty.optionalChild(mod); const opt_val = val.optionalValue(mod); if (opt_val) |some| { - return some.writeToMemory(child, mod, buffer); + return some.writeToMemory(child, pt, buffer); } else { - return writeToMemory(try mod.intValue(Type.usize, 0), Type.usize, mod, buffer); + return writeToMemory(try pt.intValue(Type.usize, 0), Type.usize, pt, buffer); } }, else => return error.Unimplemented, @@ -462,15 +467,16 @@ pub fn writeToMemory(val: Value, ty: Type, mod: *Module, buffer: []u8) error{ pub fn writeToPackedMemory( val: Value, ty: Type, - mod: *Module, + pt: Zcu.PerThread, buffer: []u8, bit_offset: usize, ) error{ ReinterpretDeclRef, OutOfMemory }!void { + const mod = pt.zcu; const ip = &mod.intern_pool; const target = mod.getTarget(); const endian = target.cpu.arch.endian(); if (val.isUndef(mod)) { - const bit_size: usize = @intCast(ty.bitSize(mod)); + const bit_size: usize = @intCast(ty.bitSize(pt)); if (bit_size != 0) { std.mem.writeVarPackedInt(buffer, bit_offset, bit_size, @as(u1, 0), endian); } @@ -494,30 +500,30 @@ pub fn writeToPackedMemory( const bits = ty.intInfo(mod).bits; if (bits == 0) return; - switch (ip.indexToKey((try val.intFromEnum(ty, mod)).toIntern()).int.storage) { + switch (ip.indexToKey((try val.intFromEnum(ty, pt)).toIntern()).int.storage) { inline .u64, .i64 => |int| std.mem.writeVarPackedInt(buffer, bit_offset, bits, int, endian), .big_int => |bigint| bigint.writePackedTwosComplement(buffer, bit_offset, bits, endian), .lazy_align => |lazy_align| { - const num = Type.fromInterned(lazy_align).abiAlignment(mod).toByteUnits() orelse 0; + const num = Type.fromInterned(lazy_align).abiAlignment(pt).toByteUnits() orelse 0; std.mem.writeVarPackedInt(buffer, bit_offset, bits, num, endian); }, .lazy_size => |lazy_size| { - const num = Type.fromInterned(lazy_size).abiSize(mod); + const num = Type.fromInterned(lazy_size).abiSize(pt); std.mem.writeVarPackedInt(buffer, bit_offset, bits, num, endian); }, } }, .Float => switch (ty.floatBits(target)) { - 16 => std.mem.writePackedInt(u16, buffer, bit_offset, @bitCast(val.toFloat(f16, mod)), endian), - 32 => std.mem.writePackedInt(u32, buffer, bit_offset, @bitCast(val.toFloat(f32, mod)), endian), - 64 => std.mem.writePackedInt(u64, buffer, bit_offset, @bitCast(val.toFloat(f64, mod)), endian), - 80 => std.mem.writePackedInt(u80, buffer, bit_offset, @bitCast(val.toFloat(f80, mod)), endian), - 128 => std.mem.writePackedInt(u128, buffer, bit_offset, @bitCast(val.toFloat(f128, mod)), endian), + 16 => std.mem.writePackedInt(u16, buffer, bit_offset, @bitCast(val.toFloat(f16, pt)), endian), + 32 => std.mem.writePackedInt(u32, buffer, bit_offset, @bitCast(val.toFloat(f32, pt)), endian), + 64 => std.mem.writePackedInt(u64, buffer, bit_offset, @bitCast(val.toFloat(f64, pt)), endian), + 80 => std.mem.writePackedInt(u80, buffer, bit_offset, @bitCast(val.toFloat(f80, pt)), endian), + 128 => std.mem.writePackedInt(u128, buffer, bit_offset, @bitCast(val.toFloat(f128, pt)), endian), else => unreachable, }, .Vector => { const elem_ty = ty.childType(mod); - const elem_bit_size: u16 = @intCast(elem_ty.bitSize(mod)); + const elem_bit_size: u16 = @intCast(elem_ty.bitSize(pt)); const len: usize = @intCast(ty.arrayLen(mod)); var bits: u16 = 0; @@ -525,8 +531,8 @@ pub fn writeToPackedMemory( while (elem_i < len) : (elem_i += 1) { // On big-endian systems, LLVM reverses the element order of vectors by default const tgt_elem_i = if (endian == .big) len - elem_i - 1 else elem_i; - const elem_val = try val.elemValue(mod, tgt_elem_i); - try elem_val.writeToPackedMemory(elem_ty, mod, buffer, bit_offset + bits); + const elem_val = try val.elemValue(pt, tgt_elem_i); + try elem_val.writeToPackedMemory(elem_ty, pt, buffer, bit_offset + bits); bits += elem_bit_size; } }, @@ -543,8 +549,8 @@ pub fn writeToPackedMemory( .repeated_elem => |elem| elem, }); const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]); - const field_bits: u16 = @intCast(field_ty.bitSize(mod)); - try field_val.writeToPackedMemory(field_ty, mod, buffer, bit_offset + bits); + const field_bits: u16 = @intCast(field_ty.bitSize(pt)); + try field_val.writeToPackedMemory(field_ty, pt, buffer, bit_offset + bits); bits += field_bits; } }, @@ -556,11 +562,11 @@ pub fn writeToPackedMemory( if (val.unionTag(mod)) |union_tag| { const field_index = mod.unionTagFieldIndex(union_obj, union_tag).?; const field_type = Type.fromInterned(union_obj.field_types.get(ip)[field_index]); - const field_val = try val.fieldValue(mod, field_index); - return field_val.writeToPackedMemory(field_type, mod, buffer, bit_offset); + const field_val = try val.fieldValue(pt, field_index); + return field_val.writeToPackedMemory(field_type, pt, buffer, bit_offset); } else { - const backing_ty = try ty.unionBackingType(mod); - return val.unionValue(mod).writeToPackedMemory(backing_ty, mod, buffer, bit_offset); + const backing_ty = try ty.unionBackingType(pt); + return val.unionValue(mod).writeToPackedMemory(backing_ty, pt, buffer, bit_offset); } }, } @@ -568,16 +574,16 @@ pub fn writeToPackedMemory( .Pointer => { assert(!ty.isSlice(mod)); // No well defined layout. if (!val.ptrHasIntAddr(mod)) return error.ReinterpretDeclRef; - return val.writeToPackedMemory(Type.usize, mod, buffer, bit_offset); + return val.writeToPackedMemory(Type.usize, pt, buffer, bit_offset); }, .Optional => { assert(ty.isPtrLikeOptional(mod)); const child = ty.optionalChild(mod); const opt_val = val.optionalValue(mod); if (opt_val) |some| { - return some.writeToPackedMemory(child, mod, buffer, bit_offset); + return some.writeToPackedMemory(child, pt, buffer, bit_offset); } else { - return writeToPackedMemory(try mod.intValue(Type.usize, 0), Type.usize, mod, buffer, bit_offset); + return writeToPackedMemory(try pt.intValue(Type.usize, 0), Type.usize, pt, buffer, bit_offset); } }, else => @panic("TODO implement writeToPackedMemory for more types"), @@ -590,7 +596,7 @@ pub fn writeToPackedMemory( /// the end of the value in memory. pub fn readFromMemory( ty: Type, - mod: *Module, + pt: Zcu.PerThread, buffer: []const u8, arena: Allocator, ) error{ @@ -598,6 +604,7 @@ pub fn readFromMemory( Unimplemented, OutOfMemory, }!Value { + const mod = pt.zcu; const ip = &mod.intern_pool; const target = mod.getTarget(); const endian = target.cpu.arch.endian(); @@ -642,7 +649,7 @@ pub fn readFromMemory( return mod.getCoerced(try mod.intValue_big(int_ty, bigint.toConst()), ty); } }, - .Float => return Value.fromInterned((try mod.intern(.{ .float = .{ + .Float => return Value.fromInterned(try pt.intern(.{ .float = .{ .ty = ty.toIntern(), .storage = switch (ty.floatBits(target)) { 16 => .{ .f16 = @bitCast(std.mem.readInt(u16, buffer[0..2], endian)) }, @@ -652,25 +659,25 @@ pub fn readFromMemory( 128 => .{ .f128 = @bitCast(std.mem.readInt(u128, buffer[0..16], endian)) }, else => unreachable, }, - } }))), + } })), .Array => { const elem_ty = ty.childType(mod); - const elem_size = elem_ty.abiSize(mod); + const elem_size = elem_ty.abiSize(pt); const elems = try arena.alloc(InternPool.Index, @intCast(ty.arrayLen(mod))); var offset: usize = 0; for (elems) |*elem| { elem.* = (try readFromMemory(elem_ty, mod, buffer[offset..], arena)).toIntern(); offset += @intCast(elem_size); } - return Value.fromInterned((try mod.intern(.{ .aggregate = .{ + return Value.fromInterned(try pt.intern(.{ .aggregate = .{ .ty = ty.toIntern(), .storage = .{ .elems = elems }, - } }))); + } })); }, .Vector => { // We use byte_count instead of abi_size here, so that any padding bytes // follow the data bytes, on both big- and little-endian systems. - const byte_count = (@as(usize, @intCast(ty.bitSize(mod))) + 7) / 8; + const byte_count = (@as(usize, @intCast(ty.bitSize(pt))) + 7) / 8; return readFromPackedMemory(ty, mod, buffer[0..byte_count], 0, arena); }, .Struct => { @@ -683,16 +690,16 @@ pub fn readFromMemory( for (field_vals, 0..) |*field_val, i| { const field_ty = Type.fromInterned(field_types.get(ip)[i]); const off: usize = @intCast(ty.structFieldOffset(i, mod)); - const sz: usize = @intCast(field_ty.abiSize(mod)); + const sz: usize = @intCast(field_ty.abiSize(pt)); field_val.* = (try readFromMemory(field_ty, mod, buffer[off..(off + sz)], arena)).toIntern(); } - return Value.fromInterned((try mod.intern(.{ .aggregate = .{ + return Value.fromInterned(try pt.intern(.{ .aggregate = .{ .ty = ty.toIntern(), .storage = .{ .elems = field_vals }, - } }))); + } })); }, .@"packed" => { - const byte_count = (@as(usize, @intCast(ty.bitSize(mod))) + 7) / 8; + const byte_count = (@as(usize, @intCast(ty.bitSize(pt))) + 7) / 8; return readFromPackedMemory(ty, mod, buffer[0..byte_count], 0, arena); }, } @@ -704,49 +711,49 @@ pub fn readFromMemory( const index = (int << @as(u6, @intCast(64 - bits))) >> @as(u6, @intCast(64 - bits)); const name = mod.global_error_set.keys()[@intCast(index)]; - return Value.fromInterned((try mod.intern(.{ .err = .{ + return Value.fromInterned(try pt.intern(.{ .err = .{ .ty = ty.toIntern(), .name = name, - } }))); + } })); }, .Union => switch (ty.containerLayout(mod)) { .auto => return error.IllDefinedMemoryLayout, .@"extern" => { - const union_size = ty.abiSize(mod); + const union_size = ty.abiSize(pt); const array_ty = try mod.arrayType(.{ .len = union_size, .child = .u8_type }); const val = (try readFromMemory(array_ty, mod, buffer, arena)).toIntern(); - return Value.fromInterned((try mod.intern(.{ .un = .{ + return Value.fromInterned(try pt.intern(.{ .un = .{ .ty = ty.toIntern(), .tag = .none, .val = val, - } }))); + } })); }, .@"packed" => { - const byte_count = (@as(usize, @intCast(ty.bitSize(mod))) + 7) / 8; + const byte_count = (@as(usize, @intCast(ty.bitSize(pt))) + 7) / 8; return readFromPackedMemory(ty, mod, buffer[0..byte_count], 0, arena); }, }, .Pointer => { assert(!ty.isSlice(mod)); // No well defined layout. const int_val = try readFromMemory(Type.usize, mod, buffer, arena); - return Value.fromInterned((try mod.intern(.{ .ptr = .{ + return Value.fromInterned(try pt.intern(.{ .ptr = .{ .ty = ty.toIntern(), .base_addr = .int, - .byte_offset = int_val.toUnsignedInt(mod), - } }))); + .byte_offset = int_val.toUnsignedInt(pt), + } })); }, .Optional => { assert(ty.isPtrLikeOptional(mod)); const child_ty = ty.optionalChild(mod); const child_val = try readFromMemory(child_ty, mod, buffer, arena); - return Value.fromInterned((try mod.intern(.{ .opt = .{ + return Value.fromInterned(try pt.intern(.{ .opt = .{ .ty = ty.toIntern(), - .val = switch (child_val.orderAgainstZero(mod)) { + .val = switch (child_val.orderAgainstZero(pt)) { .lt => unreachable, .eq => .none, .gt => child_val.toIntern(), }, - } }))); + } })); }, else => return error.Unimplemented, } @@ -758,7 +765,7 @@ pub fn readFromMemory( /// big-endian packed memory layouts start at the end of the buffer. pub fn readFromPackedMemory( ty: Type, - mod: *Module, + pt: Zcu.PerThread, buffer: []const u8, bit_offset: usize, arena: Allocator, @@ -766,6 +773,7 @@ pub fn readFromPackedMemory( IllDefinedMemoryLayout, OutOfMemory, }!Value { + const mod = pt.zcu; const ip = &mod.intern_pool; const target = mod.getTarget(); const endian = target.cpu.arch.endian(); @@ -783,35 +791,35 @@ pub fn readFromPackedMemory( } }, .Int => { - if (buffer.len == 0) return mod.intValue(ty, 0); + if (buffer.len == 0) return pt.intValue(ty, 0); const int_info = ty.intInfo(mod); const bits = int_info.bits; - if (bits == 0) return mod.intValue(ty, 0); + if (bits == 0) return pt.intValue(ty, 0); // Fast path for integers <= u64 if (bits <= 64) switch (int_info.signedness) { // Use different backing types for unsigned vs signed to avoid the need to go via // a larger type like `i128`. - .unsigned => return mod.intValue(ty, std.mem.readVarPackedInt(u64, buffer, bit_offset, bits, endian, .unsigned)), - .signed => return mod.intValue(ty, std.mem.readVarPackedInt(i64, buffer, bit_offset, bits, endian, .signed)), + .unsigned => return pt.intValue(ty, std.mem.readVarPackedInt(u64, buffer, bit_offset, bits, endian, .unsigned)), + .signed => return pt.intValue(ty, std.mem.readVarPackedInt(i64, buffer, bit_offset, bits, endian, .signed)), }; // Slow path, we have to construct a big-int - const abi_size: usize = @intCast(ty.abiSize(mod)); + const abi_size: usize = @intCast(ty.abiSize(pt)); const Limb = std.math.big.Limb; const limb_count = (abi_size + @sizeOf(Limb) - 1) / @sizeOf(Limb); const limbs_buffer = try arena.alloc(Limb, limb_count); var bigint = BigIntMutable.init(limbs_buffer, 0); bigint.readPackedTwosComplement(buffer, bit_offset, bits, endian, int_info.signedness); - return mod.intValue_big(ty, bigint.toConst()); + return pt.intValue_big(ty, bigint.toConst()); }, .Enum => { const int_ty = ty.intTagType(mod); - const int_val = try Value.readFromPackedMemory(int_ty, mod, buffer, bit_offset, arena); - return mod.getCoerced(int_val, ty); + const int_val = try Value.readFromPackedMemory(int_ty, pt, buffer, bit_offset, arena); + return pt.getCoerced(int_val, ty); }, - .Float => return Value.fromInterned((try mod.intern(.{ .float = .{ + .Float => return Value.fromInterned(try pt.intern(.{ .float = .{ .ty = ty.toIntern(), .storage = switch (ty.floatBits(target)) { 16 => .{ .f16 = @bitCast(std.mem.readPackedInt(u16, buffer, bit_offset, endian)) }, @@ -821,23 +829,23 @@ pub fn readFromPackedMemory( 128 => .{ .f128 = @bitCast(std.mem.readPackedInt(u128, buffer, bit_offset, endian)) }, else => unreachable, }, - } }))), + } })), .Vector => { const elem_ty = ty.childType(mod); const elems = try arena.alloc(InternPool.Index, @intCast(ty.arrayLen(mod))); var bits: u16 = 0; - const elem_bit_size: u16 = @intCast(elem_ty.bitSize(mod)); + const elem_bit_size: u16 = @intCast(elem_ty.bitSize(pt)); for (elems, 0..) |_, i| { // On big-endian systems, LLVM reverses the element order of vectors by default const tgt_elem_i = if (endian == .big) elems.len - i - 1 else i; - elems[tgt_elem_i] = (try readFromPackedMemory(elem_ty, mod, buffer, bit_offset + bits, arena)).toIntern(); + elems[tgt_elem_i] = (try readFromPackedMemory(elem_ty, pt, buffer, bit_offset + bits, arena)).toIntern(); bits += elem_bit_size; } - return Value.fromInterned((try mod.intern(.{ .aggregate = .{ + return Value.fromInterned(try pt.intern(.{ .aggregate = .{ .ty = ty.toIntern(), .storage = .{ .elems = elems }, - } }))); + } })); }, .Struct => { // Sema is supposed to have emitted a compile error already for Auto layout structs, @@ -847,43 +855,43 @@ pub fn readFromPackedMemory( const field_vals = try arena.alloc(InternPool.Index, struct_type.field_types.len); for (field_vals, 0..) |*field_val, i| { const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]); - const field_bits: u16 = @intCast(field_ty.bitSize(mod)); - field_val.* = (try readFromPackedMemory(field_ty, mod, buffer, bit_offset + bits, arena)).toIntern(); + const field_bits: u16 = @intCast(field_ty.bitSize(pt)); + field_val.* = (try readFromPackedMemory(field_ty, pt, buffer, bit_offset + bits, arena)).toIntern(); bits += field_bits; } - return Value.fromInterned((try mod.intern(.{ .aggregate = .{ + return Value.fromInterned(try pt.intern(.{ .aggregate = .{ .ty = ty.toIntern(), .storage = .{ .elems = field_vals }, - } }))); + } })); }, .Union => switch (ty.containerLayout(mod)) { .auto, .@"extern" => unreachable, // Handled by non-packed readFromMemory .@"packed" => { - const backing_ty = try ty.unionBackingType(mod); - const val = (try readFromPackedMemory(backing_ty, mod, buffer, bit_offset, arena)).toIntern(); - return Value.fromInterned((try mod.intern(.{ .un = .{ + const backing_ty = try ty.unionBackingType(pt); + const val = (try readFromPackedMemory(backing_ty, pt, buffer, bit_offset, arena)).toIntern(); + return Value.fromInterned(try pt.intern(.{ .un = .{ .ty = ty.toIntern(), .tag = .none, .val = val, - } }))); + } })); }, }, .Pointer => { assert(!ty.isSlice(mod)); // No well defined layout. - const int_val = try readFromPackedMemory(Type.usize, mod, buffer, bit_offset, arena); - return Value.fromInterned(try mod.intern(.{ .ptr = .{ + const int_val = try readFromPackedMemory(Type.usize, pt, buffer, bit_offset, arena); + return Value.fromInterned(try pt.intern(.{ .ptr = .{ .ty = ty.toIntern(), .base_addr = .int, - .byte_offset = int_val.toUnsignedInt(mod), + .byte_offset = int_val.toUnsignedInt(pt), } })); }, .Optional => { assert(ty.isPtrLikeOptional(mod)); const child_ty = ty.optionalChild(mod); - const child_val = try readFromPackedMemory(child_ty, mod, buffer, bit_offset, arena); - return Value.fromInterned(try mod.intern(.{ .opt = .{ + const child_val = try readFromPackedMemory(child_ty, pt, buffer, bit_offset, arena); + return Value.fromInterned(try pt.intern(.{ .opt = .{ .ty = ty.toIntern(), - .val = switch (child_val.orderAgainstZero(mod)) { + .val = switch (child_val.orderAgainstZero(pt)) { .lt => unreachable, .eq => .none, .gt => child_val.toIntern(), @@ -895,8 +903,8 @@ pub fn readFromPackedMemory( } /// Asserts that the value is a float or an integer. -pub fn toFloat(val: Value, comptime T: type, mod: *Module) T { - return switch (mod.intern_pool.indexToKey(val.toIntern())) { +pub fn toFloat(val: Value, comptime T: type, pt: Zcu.PerThread) T { + return switch (pt.zcu.intern_pool.indexToKey(val.toIntern())) { .int => |int| switch (int.storage) { .big_int => |big_int| @floatCast(bigIntToFloat(big_int.limbs, big_int.positive)), inline .u64, .i64 => |x| { @@ -905,8 +913,8 @@ pub fn toFloat(val: Value, comptime T: type, mod: *Module) T { } return @floatFromInt(x); }, - .lazy_align => |ty| @floatFromInt(Type.fromInterned(ty).abiAlignment(mod).toByteUnits() orelse 0), - .lazy_size => |ty| @floatFromInt(Type.fromInterned(ty).abiSize(mod)), + .lazy_align => |ty| @floatFromInt(Type.fromInterned(ty).abiAlignment(pt).toByteUnits() orelse 0), + .lazy_size => |ty| @floatFromInt(Type.fromInterned(ty).abiSize(pt)), }, .float => |float| switch (float.storage) { inline else => |x| @floatCast(x), @@ -934,29 +942,30 @@ fn bigIntToFloat(limbs: []const std.math.big.Limb, positive: bool) f128 { } } -pub fn clz(val: Value, ty: Type, mod: *Module) u64 { +pub fn clz(val: Value, ty: Type, pt: Zcu.PerThread) u64 { var bigint_buf: BigIntSpace = undefined; - const bigint = val.toBigInt(&bigint_buf, mod); - return bigint.clz(ty.intInfo(mod).bits); + const bigint = val.toBigInt(&bigint_buf, pt); + return bigint.clz(ty.intInfo(pt.zcu).bits); } -pub fn ctz(val: Value, ty: Type, mod: *Module) u64 { +pub fn ctz(val: Value, ty: Type, pt: Zcu.PerThread) u64 { var bigint_buf: BigIntSpace = undefined; - const bigint = val.toBigInt(&bigint_buf, mod); - return bigint.ctz(ty.intInfo(mod).bits); + const bigint = val.toBigInt(&bigint_buf, pt); + return bigint.ctz(ty.intInfo(pt.zcu).bits); } -pub fn popCount(val: Value, ty: Type, mod: *Module) u64 { +pub fn popCount(val: Value, ty: Type, pt: Zcu.PerThread) u64 { var bigint_buf: BigIntSpace = undefined; - const bigint = val.toBigInt(&bigint_buf, mod); - return @intCast(bigint.popCount(ty.intInfo(mod).bits)); + const bigint = val.toBigInt(&bigint_buf, pt); + return @intCast(bigint.popCount(ty.intInfo(pt.zcu).bits)); } -pub fn bitReverse(val: Value, ty: Type, mod: *Module, arena: Allocator) !Value { +pub fn bitReverse(val: Value, ty: Type, pt: Zcu.PerThread, arena: Allocator) !Value { + const mod = pt.zcu; const info = ty.intInfo(mod); var buffer: Value.BigIntSpace = undefined; - const operand_bigint = val.toBigInt(&buffer, mod); + const operand_bigint = val.toBigInt(&buffer, pt); const limbs = try arena.alloc( std.math.big.Limb, @@ -965,17 +974,18 @@ pub fn bitReverse(val: Value, ty: Type, mod: *Module, arena: Allocator) !Value { var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; result_bigint.bitReverse(operand_bigint, info.signedness, info.bits); - return mod.intValue_big(ty, result_bigint.toConst()); + return pt.intValue_big(ty, result_bigint.toConst()); } -pub fn byteSwap(val: Value, ty: Type, mod: *Module, arena: Allocator) !Value { +pub fn byteSwap(val: Value, ty: Type, pt: Zcu.PerThread, arena: Allocator) !Value { + const mod = pt.zcu; const info = ty.intInfo(mod); // Bit count must be evenly divisible by 8 assert(info.bits % 8 == 0); var buffer: Value.BigIntSpace = undefined; - const operand_bigint = val.toBigInt(&buffer, mod); + const operand_bigint = val.toBigInt(&buffer, pt); const limbs = try arena.alloc( std.math.big.Limb, @@ -984,33 +994,33 @@ pub fn byteSwap(val: Value, ty: Type, mod: *Module, arena: Allocator) !Value { var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; result_bigint.byteSwap(operand_bigint, info.signedness, info.bits / 8); - return mod.intValue_big(ty, result_bigint.toConst()); + return pt.intValue_big(ty, result_bigint.toConst()); } /// Asserts the value is an integer and not undefined. /// Returns the number of bits the value requires to represent stored in twos complement form. -pub fn intBitCountTwosComp(self: Value, mod: *Module) usize { +pub fn intBitCountTwosComp(self: Value, pt: Zcu.PerThread) usize { var buffer: BigIntSpace = undefined; - const big_int = self.toBigInt(&buffer, mod); + const big_int = self.toBigInt(&buffer, pt); return big_int.bitCountTwosComp(); } /// Converts an integer or a float to a float. May result in a loss of information. /// Caller can find out by equality checking the result against the operand. -pub fn floatCast(val: Value, dest_ty: Type, zcu: *Zcu) !Value { - const target = zcu.getTarget(); - if (val.isUndef(zcu)) return zcu.undefValue(dest_ty); - return Value.fromInterned((try zcu.intern(.{ .float = .{ +pub fn floatCast(val: Value, dest_ty: Type, pt: Zcu.PerThread) !Value { + const target = pt.zcu.getTarget(); + if (val.isUndef(pt.zcu)) return pt.undefValue(dest_ty); + return Value.fromInterned(try pt.intern(.{ .float = .{ .ty = dest_ty.toIntern(), .storage = switch (dest_ty.floatBits(target)) { - 16 => .{ .f16 = val.toFloat(f16, zcu) }, - 32 => .{ .f32 = val.toFloat(f32, zcu) }, - 64 => .{ .f64 = val.toFloat(f64, zcu) }, - 80 => .{ .f80 = val.toFloat(f80, zcu) }, - 128 => .{ .f128 = val.toFloat(f128, zcu) }, + 16 => .{ .f16 = val.toFloat(f16, pt) }, + 32 => .{ .f32 = val.toFloat(f32, pt) }, + 64 => .{ .f64 = val.toFloat(f64, pt) }, + 80 => .{ .f80 = val.toFloat(f80, pt) }, + 128 => .{ .f128 = val.toFloat(f128, pt) }, else => unreachable, }, - } }))); + } })); } /// Asserts the value is a float @@ -1023,19 +1033,19 @@ pub fn floatHasFraction(self: Value, mod: *const Module) bool { }; } -pub fn orderAgainstZero(lhs: Value, mod: *Module) std.math.Order { - return orderAgainstZeroAdvanced(lhs, mod, .normal) catch unreachable; +pub fn orderAgainstZero(lhs: Value, pt: Zcu.PerThread) std.math.Order { + return orderAgainstZeroAdvanced(lhs, pt, .normal) catch unreachable; } pub fn orderAgainstZeroAdvanced( lhs: Value, - mod: *Module, + pt: Zcu.PerThread, strat: ResolveStrat, ) Module.CompileError!std.math.Order { return switch (lhs.toIntern()) { .bool_false => .eq, .bool_true => .gt, - else => switch (mod.intern_pool.indexToKey(lhs.toIntern())) { + else => switch (pt.zcu.intern_pool.indexToKey(lhs.toIntern())) { .ptr => |ptr| if (ptr.byte_offset > 0) .gt else switch (ptr.base_addr) { .decl, .comptime_alloc, .comptime_field => .gt, .int => .eq, @@ -1046,7 +1056,7 @@ pub fn orderAgainstZeroAdvanced( inline .u64, .i64 => |x| std.math.order(x, 0), .lazy_align => .gt, // alignment is never 0 .lazy_size => |ty| return if (Type.fromInterned(ty).hasRuntimeBitsAdvanced( - mod, + pt, false, strat.toLazy(), ) catch |err| switch (err) { @@ -1054,7 +1064,7 @@ pub fn orderAgainstZeroAdvanced( else => |e| return e, }) .gt else .eq, }, - .enum_tag => |enum_tag| Value.fromInterned(enum_tag.int).orderAgainstZeroAdvanced(mod, strat), + .enum_tag => |enum_tag| Value.fromInterned(enum_tag.int).orderAgainstZeroAdvanced(pt, strat), .float => |float| switch (float.storage) { inline else => |x| std.math.order(x, 0), }, @@ -1064,14 +1074,14 @@ pub fn orderAgainstZeroAdvanced( } /// Asserts the value is comparable. -pub fn order(lhs: Value, rhs: Value, mod: *Module) std.math.Order { - return orderAdvanced(lhs, rhs, mod, .normal) catch unreachable; +pub fn order(lhs: Value, rhs: Value, pt: Zcu.PerThread) std.math.Order { + return orderAdvanced(lhs, rhs, pt, .normal) catch unreachable; } /// Asserts the value is comparable. -pub fn orderAdvanced(lhs: Value, rhs: Value, mod: *Module, strat: ResolveStrat) !std.math.Order { - const lhs_against_zero = try lhs.orderAgainstZeroAdvanced(mod, strat); - const rhs_against_zero = try rhs.orderAgainstZeroAdvanced(mod, strat); +pub fn orderAdvanced(lhs: Value, rhs: Value, pt: Zcu.PerThread, strat: ResolveStrat) !std.math.Order { + const lhs_against_zero = try lhs.orderAgainstZeroAdvanced(pt, strat); + const rhs_against_zero = try rhs.orderAgainstZeroAdvanced(pt, strat); switch (lhs_against_zero) { .lt => if (rhs_against_zero != .lt) return .lt, .eq => return rhs_against_zero.invert(), @@ -1083,34 +1093,34 @@ pub fn orderAdvanced(lhs: Value, rhs: Value, mod: *Module, strat: ResolveStrat) .gt => {}, } - if (lhs.isFloat(mod) or rhs.isFloat(mod)) { - const lhs_f128 = lhs.toFloat(f128, mod); - const rhs_f128 = rhs.toFloat(f128, mod); + if (lhs.isFloat(pt.zcu) or rhs.isFloat(pt.zcu)) { + const lhs_f128 = lhs.toFloat(f128, pt); + const rhs_f128 = rhs.toFloat(f128, pt); return std.math.order(lhs_f128, rhs_f128); } var lhs_bigint_space: BigIntSpace = undefined; var rhs_bigint_space: BigIntSpace = undefined; - const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_bigint_space, mod, strat); - const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_bigint_space, mod, strat); + const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_bigint_space, pt, strat); + const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_bigint_space, pt, strat); return lhs_bigint.order(rhs_bigint); } /// Asserts the value is comparable. Does not take a type parameter because it supports /// comparisons between heterogeneous types. -pub fn compareHetero(lhs: Value, op: std.math.CompareOperator, rhs: Value, mod: *Module) bool { - return compareHeteroAdvanced(lhs, op, rhs, mod, .normal) catch unreachable; +pub fn compareHetero(lhs: Value, op: std.math.CompareOperator, rhs: Value, pt: Zcu.PerThread) bool { + return compareHeteroAdvanced(lhs, op, rhs, pt, .normal) catch unreachable; } pub fn compareHeteroAdvanced( lhs: Value, op: std.math.CompareOperator, rhs: Value, - mod: *Module, + pt: Zcu.PerThread, strat: ResolveStrat, ) !bool { - if (lhs.pointerDecl(mod)) |lhs_decl| { - if (rhs.pointerDecl(mod)) |rhs_decl| { + if (lhs.pointerDecl(pt.zcu)) |lhs_decl| { + if (rhs.pointerDecl(pt.zcu)) |rhs_decl| { switch (op) { .eq => return lhs_decl == rhs_decl, .neq => return lhs_decl != rhs_decl, @@ -1123,31 +1133,32 @@ pub fn compareHeteroAdvanced( else => {}, } } - } else if (rhs.pointerDecl(mod)) |_| { + } else if (rhs.pointerDecl(pt.zcu)) |_| { switch (op) { .eq => return false, .neq => return true, else => {}, } } - return (try orderAdvanced(lhs, rhs, mod, strat)).compare(op); + return (try orderAdvanced(lhs, rhs, pt, strat)).compare(op); } /// Asserts the values are comparable. Both operands have type `ty`. /// For vectors, returns true if comparison is true for ALL elements. -pub fn compareAll(lhs: Value, op: std.math.CompareOperator, rhs: Value, ty: Type, mod: *Module) !bool { +pub fn compareAll(lhs: Value, op: std.math.CompareOperator, rhs: Value, ty: Type, pt: Zcu.PerThread) !bool { + const mod = pt.zcu; if (ty.zigTypeTag(mod) == .Vector) { const scalar_ty = ty.scalarType(mod); for (0..ty.vectorLen(mod)) |i| { - const lhs_elem = try lhs.elemValue(mod, i); - const rhs_elem = try rhs.elemValue(mod, i); - if (!compareScalar(lhs_elem, op, rhs_elem, scalar_ty, mod)) { + const lhs_elem = try lhs.elemValue(pt, i); + const rhs_elem = try rhs.elemValue(pt, i); + if (!compareScalar(lhs_elem, op, rhs_elem, scalar_ty, pt)) { return false; } } return true; } - return compareScalar(lhs, op, rhs, ty, mod); + return compareScalar(lhs, op, rhs, ty, pt); } /// Asserts the values are comparable. Both operands have type `ty`. @@ -1156,12 +1167,12 @@ pub fn compareScalar( op: std.math.CompareOperator, rhs: Value, ty: Type, - mod: *Module, + pt: Zcu.PerThread, ) bool { return switch (op) { - .eq => lhs.eql(rhs, ty, mod), - .neq => !lhs.eql(rhs, ty, mod), - else => compareHetero(lhs, op, rhs, mod), + .eq => lhs.eql(rhs, ty, pt.zcu), + .neq => !lhs.eql(rhs, ty, pt.zcu), + else => compareHetero(lhs, op, rhs, pt), }; } @@ -1170,24 +1181,25 @@ pub fn compareScalar( /// Returns `false` if the value or any vector element is undefined. /// /// Note that `!compareAllWithZero(.eq, ...) != compareAllWithZero(.neq, ...)` -pub fn compareAllWithZero(lhs: Value, op: std.math.CompareOperator, mod: *Module) bool { - return compareAllWithZeroAdvancedExtra(lhs, op, mod, .normal) catch unreachable; +pub fn compareAllWithZero(lhs: Value, op: std.math.CompareOperator, pt: Zcu.PerThread) bool { + return compareAllWithZeroAdvancedExtra(lhs, op, pt, .normal) catch unreachable; } pub fn compareAllWithZeroSema( lhs: Value, op: std.math.CompareOperator, - zcu: *Zcu, + pt: Zcu.PerThread, ) Module.CompileError!bool { - return compareAllWithZeroAdvancedExtra(lhs, op, zcu, .sema); + return compareAllWithZeroAdvancedExtra(lhs, op, pt, .sema); } pub fn compareAllWithZeroAdvancedExtra( lhs: Value, op: std.math.CompareOperator, - mod: *Module, + pt: Zcu.PerThread, strat: ResolveStrat, ) Module.CompileError!bool { + const mod = pt.zcu; if (lhs.isInf(mod)) { switch (op) { .neq => return true, @@ -1206,14 +1218,14 @@ pub fn compareAllWithZeroAdvancedExtra( if (!std.math.order(byte, 0).compare(op)) break false; } else true, .elems => |elems| for (elems) |elem| { - if (!try Value.fromInterned(elem).compareAllWithZeroAdvancedExtra(op, mod, strat)) break false; + if (!try Value.fromInterned(elem).compareAllWithZeroAdvancedExtra(op, pt, strat)) break false; } else true, - .repeated_elem => |elem| Value.fromInterned(elem).compareAllWithZeroAdvancedExtra(op, mod, strat), + .repeated_elem => |elem| Value.fromInterned(elem).compareAllWithZeroAdvancedExtra(op, pt, strat), }, .undef => return false, else => {}, } - return (try orderAgainstZeroAdvanced(lhs, mod, strat)).compare(op); + return (try orderAgainstZeroAdvanced(lhs, pt, strat)).compare(op); } pub fn eql(a: Value, b: Value, ty: Type, mod: *Module) bool { @@ -1275,21 +1287,22 @@ pub fn slicePtr(val: Value, mod: *Module) Value { /// Gets the `len` field of a slice value as a `u64`. /// Resolves the length using `Sema` if necessary. -pub fn sliceLen(val: Value, zcu: *Zcu) !u64 { - return Value.fromInterned(zcu.intern_pool.sliceLen(val.toIntern())).toUnsignedIntSema(zcu); +pub fn sliceLen(val: Value, pt: Zcu.PerThread) !u64 { + return Value.fromInterned(pt.zcu.intern_pool.sliceLen(val.toIntern())).toUnsignedIntSema(pt); } /// Asserts the value is an aggregate, and returns the element value at the given index. -pub fn elemValue(val: Value, zcu: *Zcu, index: usize) Allocator.Error!Value { +pub fn elemValue(val: Value, pt: Zcu.PerThread, index: usize) Allocator.Error!Value { + const zcu = pt.zcu; const ip = &zcu.intern_pool; switch (zcu.intern_pool.indexToKey(val.toIntern())) { .undef => |ty| { - return Value.fromInterned(try zcu.intern(.{ .undef = Type.fromInterned(ty).childType(zcu).toIntern() })); + return Value.fromInterned(try pt.intern(.{ .undef = Type.fromInterned(ty).childType(zcu).toIntern() })); }, .aggregate => |aggregate| { const len = ip.aggregateTypeLen(aggregate.ty); if (index < len) return Value.fromInterned(switch (aggregate.storage) { - .bytes => |bytes| try zcu.intern(.{ .int = .{ + .bytes => |bytes| try pt.intern(.{ .int = .{ .ty = .u8_type, .storage = .{ .u64 = bytes.at(index, ip) }, } }), @@ -1330,17 +1343,17 @@ pub fn sliceArray( start: usize, end: usize, ) error{OutOfMemory}!Value { - const mod = sema.mod; - const ip = &mod.intern_pool; - return Value.fromInterned(try mod.intern(.{ + const pt = sema.pt; + const ip = &pt.zcu.intern_pool; + return Value.fromInterned(try pt.intern(.{ .aggregate = .{ - .ty = switch (mod.intern_pool.indexToKey(mod.intern_pool.typeOf(val.toIntern()))) { - .array_type => |array_type| try mod.arrayType(.{ + .ty = switch (pt.zcu.intern_pool.indexToKey(pt.zcu.intern_pool.typeOf(val.toIntern()))) { + .array_type => |array_type| try pt.arrayType(.{ .len = @intCast(end - start), .child = array_type.child, .sentinel = if (end == array_type.len) array_type.sentinel else .none, }), - .vector_type => |vector_type| try mod.vectorType(.{ + .vector_type => |vector_type| try pt.vectorType(.{ .len = @intCast(end - start), .child = vector_type.child, }), @@ -1363,13 +1376,14 @@ pub fn sliceArray( })); } -pub fn fieldValue(val: Value, mod: *Module, index: usize) !Value { +pub fn fieldValue(val: Value, pt: Zcu.PerThread, index: usize) !Value { + const mod = pt.zcu; return switch (mod.intern_pool.indexToKey(val.toIntern())) { - .undef => |ty| Value.fromInterned((try mod.intern(.{ + .undef => |ty| Value.fromInterned(try pt.intern(.{ .undef = Type.fromInterned(ty).structFieldType(index, mod).toIntern(), - }))), + })), .aggregate => |aggregate| Value.fromInterned(switch (aggregate.storage) { - .bytes => |bytes| try mod.intern(.{ .int = .{ + .bytes => |bytes| try pt.intern(.{ .int = .{ .ty = .u8_type, .storage = .{ .u64 = bytes.at(index, &mod.intern_pool) }, } }), @@ -1483,40 +1497,49 @@ pub fn floatFromInt(val: Value, arena: Allocator, int_ty: Type, float_ty: Type, }; } -pub fn floatFromIntAdvanced(val: Value, arena: Allocator, int_ty: Type, float_ty: Type, mod: *Module, strat: ResolveStrat) !Value { +pub fn floatFromIntAdvanced( + val: Value, + arena: Allocator, + int_ty: Type, + float_ty: Type, + pt: Zcu.PerThread, + strat: ResolveStrat, +) !Value { + const mod = pt.zcu; if (int_ty.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(InternPool.Index, int_ty.vectorLen(mod)); const scalar_ty = float_ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { - const elem_val = try val.elemValue(mod, i); - scalar.* = (try floatFromIntScalar(elem_val, scalar_ty, mod, strat)).toIntern(); + const elem_val = try val.elemValue(pt, i); + scalar.* = (try floatFromIntScalar(elem_val, scalar_ty, pt, strat)).toIntern(); } - return Value.fromInterned((try mod.intern(.{ .aggregate = .{ + return Value.fromInterned(try pt.intern(.{ .aggregate = .{ .ty = float_ty.toIntern(), .storage = .{ .elems = result_data }, - } }))); + } })); } - return floatFromIntScalar(val, float_ty, mod, strat); + return floatFromIntScalar(val, float_ty, pt, strat); } -pub fn floatFromIntScalar(val: Value, float_ty: Type, mod: *Module, strat: ResolveStrat) !Value { +pub fn floatFromIntScalar(val: Value, float_ty: Type, pt: Zcu.PerThread, strat: ResolveStrat) !Value { + const mod = pt.zcu; return switch (mod.intern_pool.indexToKey(val.toIntern())) { - .undef => try mod.undefValue(float_ty), + .undef => try pt.undefValue(float_ty), .int => |int| switch (int.storage) { .big_int => |big_int| { const float = bigIntToFloat(big_int.limbs, big_int.positive); - return mod.floatValue(float_ty, float); + return pt.floatValue(float_ty, float); }, - inline .u64, .i64 => |x| floatFromIntInner(x, float_ty, mod), - .lazy_align => |ty| return floatFromIntInner((try Type.fromInterned(ty).abiAlignmentAdvanced(mod, strat.toLazy())).scalar.toByteUnits() orelse 0, float_ty, mod), - .lazy_size => |ty| return floatFromIntInner((try Type.fromInterned(ty).abiSizeAdvanced(mod, strat.toLazy())).scalar, float_ty, mod), + inline .u64, .i64 => |x| floatFromIntInner(x, float_ty, pt), + .lazy_align => |ty| return floatFromIntInner((try Type.fromInterned(ty).abiAlignmentAdvanced(pt, strat.toLazy())).scalar.toByteUnits() orelse 0, float_ty, pt), + .lazy_size => |ty| return floatFromIntInner((try Type.fromInterned(ty).abiSizeAdvanced(pt, strat.toLazy())).scalar, float_ty, pt), }, else => unreachable, }; } -fn floatFromIntInner(x: anytype, dest_ty: Type, mod: *Module) !Value { - const target = mod.getTarget(); +fn floatFromIntInner(x: anytype, dest_ty: Type, pt: Zcu.PerThread) !Value { + const target = pt.zcu.getTarget(); const storage: InternPool.Key.Float.Storage = switch (dest_ty.floatBits(target)) { 16 => .{ .f16 = @floatFromInt(x) }, 32 => .{ .f32 = @floatFromInt(x) }, @@ -1525,10 +1548,10 @@ fn floatFromIntInner(x: anytype, dest_ty: Type, mod: *Module) !Value { 128 => .{ .f128 = @floatFromInt(x) }, else => unreachable, }; - return Value.fromInterned((try mod.intern(.{ .float = .{ + return Value.fromInterned(try pt.intern(.{ .float = .{ .ty = dest_ty.toIntern(), .storage = storage, - } }))); + } })); } fn calcLimbLenFloat(scalar: anytype) usize { @@ -1551,22 +1574,22 @@ pub fn intAddSat( rhs: Value, ty: Type, arena: Allocator, - mod: *Module, + pt: Zcu.PerThread, ) !Value { - if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(InternPool.Index, ty.vectorLen(mod)); - const scalar_ty = ty.scalarType(mod); + if (ty.zigTypeTag(pt.zcu) == .Vector) { + const result_data = try arena.alloc(InternPool.Index, ty.vectorLen(pt.zcu)); + const scalar_ty = ty.scalarType(pt.zcu); for (result_data, 0..) |*scalar, i| { - const lhs_elem = try lhs.elemValue(mod, i); - const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = (try intAddSatScalar(lhs_elem, rhs_elem, scalar_ty, arena, mod)).toIntern(); + const lhs_elem = try lhs.elemValue(pt, i); + const rhs_elem = try rhs.elemValue(pt, i); + scalar.* = (try intAddSatScalar(lhs_elem, rhs_elem, scalar_ty, arena, pt)).toIntern(); } - return Value.fromInterned((try mod.intern(.{ .aggregate = .{ + return Value.fromInterned(try pt.intern(.{ .aggregate = .{ .ty = ty.toIntern(), .storage = .{ .elems = result_data }, - } }))); + } })); } - return intAddSatScalar(lhs, rhs, ty, arena, mod); + return intAddSatScalar(lhs, rhs, ty, arena, pt); } /// Supports integers only; asserts neither operand is undefined. @@ -1575,24 +1598,24 @@ pub fn intAddSatScalar( rhs: Value, ty: Type, arena: Allocator, - mod: *Module, + pt: Zcu.PerThread, ) !Value { - assert(!lhs.isUndef(mod)); - assert(!rhs.isUndef(mod)); + assert(!lhs.isUndef(pt.zcu)); + assert(!rhs.isUndef(pt.zcu)); - const info = ty.intInfo(mod); + const info = ty.intInfo(pt.zcu); var lhs_space: Value.BigIntSpace = undefined; var rhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = lhs.toBigInt(&lhs_space, mod); - const rhs_bigint = rhs.toBigInt(&rhs_space, mod); + const lhs_bigint = lhs.toBigInt(&lhs_space, pt); + const rhs_bigint = rhs.toBigInt(&rhs_space, pt); const limbs = try arena.alloc( std.math.big.Limb, std.math.big.int.calcTwosCompLimbCount(info.bits), ); var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; result_bigint.addSat(lhs_bigint, rhs_bigint, info.signedness, info.bits); - return mod.intValue_big(ty, result_bigint.toConst()); + return pt.intValue_big(ty, result_bigint.toConst()); } /// Supports (vectors of) integers only; asserts neither operand is undefined. @@ -1601,22 +1624,22 @@ pub fn intSubSat( rhs: Value, ty: Type, arena: Allocator, - mod: *Module, + pt: Zcu.PerThread, ) !Value { - if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(InternPool.Index, ty.vectorLen(mod)); - const scalar_ty = ty.scalarType(mod); + if (ty.zigTypeTag(pt.zcu) == .Vector) { + const result_data = try arena.alloc(InternPool.Index, ty.vectorLen(pt.zcu)); + const scalar_ty = ty.scalarType(pt.zcu); for (result_data, 0..) |*scalar, i| { - const lhs_elem = try lhs.elemValue(mod, i); - const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = (try intSubSatScalar(lhs_elem, rhs_elem, scalar_ty, arena, mod)).toIntern(); + const lhs_elem = try lhs.elemValue(pt, i); + const rhs_elem = try rhs.elemValue(pt, i); + scalar.* = (try intSubSatScalar(lhs_elem, rhs_elem, scalar_ty, arena, pt)).toIntern(); } - return Value.fromInterned((try mod.intern(.{ .aggregate = .{ + return Value.fromInterned(try pt.intern(.{ .aggregate = .{ .ty = ty.toIntern(), .storage = .{ .elems = result_data }, - } }))); + } })); } - return intSubSatScalar(lhs, rhs, ty, arena, mod); + return intSubSatScalar(lhs, rhs, ty, arena, pt); } /// Supports integers only; asserts neither operand is undefined. @@ -1625,24 +1648,24 @@ pub fn intSubSatScalar( rhs: Value, ty: Type, arena: Allocator, - mod: *Module, + pt: Zcu.PerThread, ) !Value { - assert(!lhs.isUndef(mod)); - assert(!rhs.isUndef(mod)); + assert(!lhs.isUndef(pt.zcu)); + assert(!rhs.isUndef(pt.zcu)); - const info = ty.intInfo(mod); + const info = ty.intInfo(pt.zcu); var lhs_space: Value.BigIntSpace = undefined; var rhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = lhs.toBigInt(&lhs_space, mod); - const rhs_bigint = rhs.toBigInt(&rhs_space, mod); + const lhs_bigint = lhs.toBigInt(&lhs_space, pt); + const rhs_bigint = rhs.toBigInt(&rhs_space, pt); const limbs = try arena.alloc( std.math.big.Limb, std.math.big.int.calcTwosCompLimbCount(info.bits), ); var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; result_bigint.subSat(lhs_bigint, rhs_bigint, info.signedness, info.bits); - return mod.intValue_big(ty, result_bigint.toConst()); + return pt.intValue_big(ty, result_bigint.toConst()); } pub fn intMulWithOverflow( @@ -1650,32 +1673,33 @@ pub fn intMulWithOverflow( rhs: Value, ty: Type, arena: Allocator, - mod: *Module, + pt: Zcu.PerThread, ) !OverflowArithmeticResult { + const mod = pt.zcu; if (ty.zigTypeTag(mod) == .Vector) { const vec_len = ty.vectorLen(mod); const overflowed_data = try arena.alloc(InternPool.Index, vec_len); const result_data = try arena.alloc(InternPool.Index, vec_len); const scalar_ty = ty.scalarType(mod); for (overflowed_data, result_data, 0..) |*of, *scalar, i| { - const lhs_elem = try lhs.elemValue(mod, i); - const rhs_elem = try rhs.elemValue(mod, i); - const of_math_result = try intMulWithOverflowScalar(lhs_elem, rhs_elem, scalar_ty, arena, mod); + const lhs_elem = try lhs.elemValue(pt, i); + const rhs_elem = try rhs.elemValue(pt, i); + const of_math_result = try intMulWithOverflowScalar(lhs_elem, rhs_elem, scalar_ty, arena, pt); of.* = of_math_result.overflow_bit.toIntern(); scalar.* = of_math_result.wrapped_result.toIntern(); } return OverflowArithmeticResult{ - .overflow_bit = Value.fromInterned((try mod.intern(.{ .aggregate = .{ - .ty = (try mod.vectorType(.{ .len = vec_len, .child = .u1_type })).toIntern(), + .overflow_bit = Value.fromInterned(try pt.intern(.{ .aggregate = .{ + .ty = (try pt.vectorType(.{ .len = vec_len, .child = .u1_type })).toIntern(), .storage = .{ .elems = overflowed_data }, - } }))), - .wrapped_result = Value.fromInterned((try mod.intern(.{ .aggregate = .{ + } })), + .wrapped_result = Value.fromInterned(try pt.intern(.{ .aggregate = .{ .ty = ty.toIntern(), .storage = .{ .elems = result_data }, - } }))), + } })), }; } - return intMulWithOverflowScalar(lhs, rhs, ty, arena, mod); + return intMulWithOverflowScalar(lhs, rhs, ty, arena, pt); } pub fn intMulWithOverflowScalar( @@ -1683,21 +1707,22 @@ pub fn intMulWithOverflowScalar( rhs: Value, ty: Type, arena: Allocator, - mod: *Module, + pt: Zcu.PerThread, ) !OverflowArithmeticResult { + const mod = pt.zcu; const info = ty.intInfo(mod); if (lhs.isUndef(mod) or rhs.isUndef(mod)) { return .{ - .overflow_bit = try mod.undefValue(Type.u1), - .wrapped_result = try mod.undefValue(ty), + .overflow_bit = try pt.undefValue(Type.u1), + .wrapped_result = try pt.undefValue(ty), }; } var lhs_space: Value.BigIntSpace = undefined; var rhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = lhs.toBigInt(&lhs_space, mod); - const rhs_bigint = rhs.toBigInt(&rhs_space, mod); + const lhs_bigint = lhs.toBigInt(&lhs_space, pt); + const rhs_bigint = rhs.toBigInt(&rhs_space, pt); const limbs = try arena.alloc( std.math.big.Limb, lhs_bigint.limbs.len + rhs_bigint.limbs.len, @@ -1715,8 +1740,8 @@ pub fn intMulWithOverflowScalar( } return OverflowArithmeticResult{ - .overflow_bit = try mod.intValue(Type.u1, @intFromBool(overflowed)), - .wrapped_result = try mod.intValue_big(ty, result_bigint.toConst()), + .overflow_bit = try pt.intValue(Type.u1, @intFromBool(overflowed)), + .wrapped_result = try pt.intValue_big(ty, result_bigint.toConst()), }; } @@ -1726,22 +1751,23 @@ pub fn numberMulWrap( rhs: Value, ty: Type, arena: Allocator, - mod: *Module, + pt: Zcu.PerThread, ) !Value { + const mod = pt.zcu; if (ty.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(InternPool.Index, ty.vectorLen(mod)); const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { - const lhs_elem = try lhs.elemValue(mod, i); - const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = (try numberMulWrapScalar(lhs_elem, rhs_elem, scalar_ty, arena, mod)).toIntern(); + const lhs_elem = try lhs.elemValue(pt, i); + const rhs_elem = try rhs.elemValue(pt, i); + scalar.* = (try numberMulWrapScalar(lhs_elem, rhs_elem, scalar_ty, arena, pt)).toIntern(); } - return Value.fromInterned((try mod.intern(.{ .aggregate = .{ + return Value.fromInterned(try pt.intern(.{ .aggregate = .{ .ty = ty.toIntern(), .storage = .{ .elems = result_data }, - } }))); + } })); } - return numberMulWrapScalar(lhs, rhs, ty, arena, mod); + return numberMulWrapScalar(lhs, rhs, ty, arena, pt); } /// Supports both floats and ints; handles undefined. @@ -1750,19 +1776,20 @@ pub fn numberMulWrapScalar( rhs: Value, ty: Type, arena: Allocator, - mod: *Module, + pt: Zcu.PerThread, ) !Value { + const mod = pt.zcu; if (lhs.isUndef(mod) or rhs.isUndef(mod)) return Value.undef; if (ty.zigTypeTag(mod) == .ComptimeInt) { - return intMul(lhs, rhs, ty, undefined, arena, mod); + return intMul(lhs, rhs, ty, undefined, arena, pt); } if (ty.isAnyFloat()) { - return floatMul(lhs, rhs, ty, arena, mod); + return floatMul(lhs, rhs, ty, arena, pt); } - const overflow_result = try intMulWithOverflow(lhs, rhs, ty, arena, mod); + const overflow_result = try intMulWithOverflow(lhs, rhs, ty, arena, pt); return overflow_result.wrapped_result; } @@ -1772,22 +1799,22 @@ pub fn intMulSat( rhs: Value, ty: Type, arena: Allocator, - mod: *Module, + pt: Zcu.PerThread, ) !Value { - if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(InternPool.Index, ty.vectorLen(mod)); - const scalar_ty = ty.scalarType(mod); + if (ty.zigTypeTag(pt.zcu) == .Vector) { + const result_data = try arena.alloc(InternPool.Index, ty.vectorLen(pt.zcu)); + const scalar_ty = ty.scalarType(pt.zcu); for (result_data, 0..) |*scalar, i| { - const lhs_elem = try lhs.elemValue(mod, i); - const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = (try intMulSatScalar(lhs_elem, rhs_elem, scalar_ty, arena, mod)).toIntern(); + const lhs_elem = try lhs.elemValue(pt, i); + const rhs_elem = try rhs.elemValue(pt, i); + scalar.* = (try intMulSatScalar(lhs_elem, rhs_elem, scalar_ty, arena, pt)).toIntern(); } - return Value.fromInterned((try mod.intern(.{ .aggregate = .{ + return Value.fromInterned(try pt.intern(.{ .aggregate = .{ .ty = ty.toIntern(), .storage = .{ .elems = result_data }, - } }))); + } })); } - return intMulSatScalar(lhs, rhs, ty, arena, mod); + return intMulSatScalar(lhs, rhs, ty, arena, pt); } /// Supports (vectors of) integers only; asserts neither operand is undefined. @@ -1796,17 +1823,17 @@ pub fn intMulSatScalar( rhs: Value, ty: Type, arena: Allocator, - mod: *Module, + pt: Zcu.PerThread, ) !Value { - assert(!lhs.isUndef(mod)); - assert(!rhs.isUndef(mod)); + assert(!lhs.isUndef(pt.zcu)); + assert(!rhs.isUndef(pt.zcu)); - const info = ty.intInfo(mod); + const info = ty.intInfo(pt.zcu); var lhs_space: Value.BigIntSpace = undefined; var rhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = lhs.toBigInt(&lhs_space, mod); - const rhs_bigint = rhs.toBigInt(&rhs_space, mod); + const lhs_bigint = lhs.toBigInt(&lhs_space, pt); + const rhs_bigint = rhs.toBigInt(&rhs_space, pt); const limbs = try arena.alloc( std.math.big.Limb, @max( @@ -1822,53 +1849,55 @@ pub fn intMulSatScalar( ); result_bigint.mul(lhs_bigint, rhs_bigint, limbs_buffer, arena); result_bigint.saturate(result_bigint.toConst(), info.signedness, info.bits); - return mod.intValue_big(ty, result_bigint.toConst()); + return pt.intValue_big(ty, result_bigint.toConst()); } /// Supports both floats and ints; handles undefined. -pub fn numberMax(lhs: Value, rhs: Value, mod: *Module) Value { - if (lhs.isUndef(mod) or rhs.isUndef(mod)) return undef; - if (lhs.isNan(mod)) return rhs; - if (rhs.isNan(mod)) return lhs; +pub fn numberMax(lhs: Value, rhs: Value, pt: Zcu.PerThread) Value { + if (lhs.isUndef(pt.zcu) or rhs.isUndef(pt.zcu)) return undef; + if (lhs.isNan(pt.zcu)) return rhs; + if (rhs.isNan(pt.zcu)) return lhs; - return switch (order(lhs, rhs, mod)) { + return switch (order(lhs, rhs, pt)) { .lt => rhs, .gt, .eq => lhs, }; } /// Supports both floats and ints; handles undefined. -pub fn numberMin(lhs: Value, rhs: Value, mod: *Module) Value { - if (lhs.isUndef(mod) or rhs.isUndef(mod)) return undef; - if (lhs.isNan(mod)) return rhs; - if (rhs.isNan(mod)) return lhs; +pub fn numberMin(lhs: Value, rhs: Value, pt: Zcu.PerThread) Value { + if (lhs.isUndef(pt.zcu) or rhs.isUndef(pt.zcu)) return undef; + if (lhs.isNan(pt.zcu)) return rhs; + if (rhs.isNan(pt.zcu)) return lhs; - return switch (order(lhs, rhs, mod)) { + return switch (order(lhs, rhs, pt)) { .lt => lhs, .gt, .eq => rhs, }; } /// operands must be (vectors of) integers; handles undefined scalars. -pub fn bitwiseNot(val: Value, ty: Type, arena: Allocator, mod: *Module) !Value { +pub fn bitwiseNot(val: Value, ty: Type, arena: Allocator, pt: Zcu.PerThread) !Value { + const mod = pt.zcu; if (ty.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(InternPool.Index, ty.vectorLen(mod)); const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { - const elem_val = try val.elemValue(mod, i); - scalar.* = (try bitwiseNotScalar(elem_val, scalar_ty, arena, mod)).toIntern(); + const elem_val = try val.elemValue(pt, i); + scalar.* = (try bitwiseNotScalar(elem_val, scalar_ty, arena, pt)).toIntern(); } - return Value.fromInterned((try mod.intern(.{ .aggregate = .{ + return Value.fromInterned(try pt.intern(.{ .aggregate = .{ .ty = ty.toIntern(), .storage = .{ .elems = result_data }, - } }))); + } })); } - return bitwiseNotScalar(val, ty, arena, mod); + return bitwiseNotScalar(val, ty, arena, pt); } /// operands must be integers; handles undefined. -pub fn bitwiseNotScalar(val: Value, ty: Type, arena: Allocator, mod: *Module) !Value { - if (val.isUndef(mod)) return Value.fromInterned((try mod.intern(.{ .undef = ty.toIntern() }))); +pub fn bitwiseNotScalar(val: Value, ty: Type, arena: Allocator, pt: Zcu.PerThread) !Value { + const mod = pt.zcu; + if (val.isUndef(mod)) return Value.fromInterned(try pt.intern(.{ .undef = ty.toIntern() })); if (ty.toIntern() == .bool_type) return makeBool(!val.toBool()); const info = ty.intInfo(mod); @@ -1880,7 +1909,7 @@ pub fn bitwiseNotScalar(val: Value, ty: Type, arena: Allocator, mod: *Module) !V // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. var val_space: Value.BigIntSpace = undefined; - const val_bigint = val.toBigInt(&val_space, mod); + const val_bigint = val.toBigInt(&val_space, pt); const limbs = try arena.alloc( std.math.big.Limb, std.math.big.int.calcTwosCompLimbCount(info.bits), @@ -1888,29 +1917,31 @@ pub fn bitwiseNotScalar(val: Value, ty: Type, arena: Allocator, mod: *Module) !V var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; result_bigint.bitNotWrap(val_bigint, info.signedness, info.bits); - return mod.intValue_big(ty, result_bigint.toConst()); + return pt.intValue_big(ty, result_bigint.toConst()); } /// operands must be (vectors of) integers; handles undefined scalars. -pub fn bitwiseAnd(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { +pub fn bitwiseAnd(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, pt: Zcu.PerThread) !Value { + const mod = pt.zcu; if (ty.zigTypeTag(mod) == .Vector) { const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(mod)); const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { - const lhs_elem = try lhs.elemValue(mod, i); - const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = (try bitwiseAndScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod)).toIntern(); + const lhs_elem = try lhs.elemValue(pt, i); + const rhs_elem = try rhs.elemValue(pt, i); + scalar.* = (try bitwiseAndScalar(lhs_elem, rhs_elem, scalar_ty, allocator, pt)).toIntern(); } - return Value.fromInterned((try mod.intern(.{ .aggregate = .{ + return Value.fromInterned(try pt.intern(.{ .aggregate = .{ .ty = ty.toIntern(), .storage = .{ .elems = result_data }, - } }))); + } })); } - return bitwiseAndScalar(lhs, rhs, ty, allocator, mod); + return bitwiseAndScalar(lhs, rhs, ty, allocator, pt); } /// operands must be integers; handles undefined. -pub fn bitwiseAndScalar(orig_lhs: Value, orig_rhs: Value, ty: Type, arena: Allocator, zcu: *Zcu) !Value { +pub fn bitwiseAndScalar(orig_lhs: Value, orig_rhs: Value, ty: Type, arena: Allocator, pt: Zcu.PerThread) !Value { + const zcu = pt.zcu; // If one operand is defined, we turn the other into `0xAA` so the bitwise AND can // still zero out some bits. // TODO: ideally we'd still like tracking for the undef bits. Related: #19634. @@ -1919,9 +1950,9 @@ pub fn bitwiseAndScalar(orig_lhs: Value, orig_rhs: Value, ty: Type, arena: Alloc const rhs_undef = orig_rhs.isUndef(zcu); break :make_defined switch ((@as(u2, @intFromBool(lhs_undef)) << 1) | @intFromBool(rhs_undef)) { 0b00 => .{ orig_lhs, orig_rhs }, - 0b01 => .{ orig_lhs, try intValueAa(ty, arena, zcu) }, - 0b10 => .{ try intValueAa(ty, arena, zcu), orig_rhs }, - 0b11 => return zcu.undefValue(ty), + 0b01 => .{ orig_lhs, try intValueAa(ty, arena, pt) }, + 0b10 => .{ try intValueAa(ty, arena, pt), orig_rhs }, + 0b11 => return pt.undefValue(ty), }; }; @@ -1931,8 +1962,8 @@ pub fn bitwiseAndScalar(orig_lhs: Value, orig_rhs: Value, ty: Type, arena: Alloc // resorting to BigInt first. var lhs_space: Value.BigIntSpace = undefined; var rhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = lhs.toBigInt(&lhs_space, zcu); - const rhs_bigint = rhs.toBigInt(&rhs_space, zcu); + const lhs_bigint = lhs.toBigInt(&lhs_space, pt); + const rhs_bigint = rhs.toBigInt(&rhs_space, pt); const limbs = try arena.alloc( std.math.big.Limb, // + 1 for negatives @@ -1940,12 +1971,13 @@ pub fn bitwiseAndScalar(orig_lhs: Value, orig_rhs: Value, ty: Type, arena: Alloc ); var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; result_bigint.bitAnd(lhs_bigint, rhs_bigint); - return zcu.intValue_big(ty, result_bigint.toConst()); + return pt.intValue_big(ty, result_bigint.toConst()); } /// Given an integer or boolean type, creates an value of that with the bit pattern 0xAA. /// This is used to convert undef values into 0xAA when performing e.g. bitwise operations. -fn intValueAa(ty: Type, arena: Allocator, zcu: *Zcu) !Value { +fn intValueAa(ty: Type, arena: Allocator, pt: Zcu.PerThread) !Value { + const zcu = pt.zcu; if (ty.toIntern() == .bool_type) return Value.true; const info = ty.intInfo(zcu); @@ -1958,68 +1990,71 @@ fn intValueAa(ty: Type, arena: Allocator, zcu: *Zcu) !Value { ); var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; result_bigint.readTwosComplement(buf, info.bits, zcu.getTarget().cpu.arch.endian(), info.signedness); - return zcu.intValue_big(ty, result_bigint.toConst()); + return pt.intValue_big(ty, result_bigint.toConst()); } /// operands must be (vectors of) integers; handles undefined scalars. -pub fn bitwiseNand(lhs: Value, rhs: Value, ty: Type, arena: Allocator, mod: *Module) !Value { +pub fn bitwiseNand(lhs: Value, rhs: Value, ty: Type, arena: Allocator, pt: Zcu.PerThread) !Value { + const mod = pt.zcu; if (ty.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(InternPool.Index, ty.vectorLen(mod)); const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { - const lhs_elem = try lhs.elemValue(mod, i); - const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = (try bitwiseNandScalar(lhs_elem, rhs_elem, scalar_ty, arena, mod)).toIntern(); + const lhs_elem = try lhs.elemValue(pt, i); + const rhs_elem = try rhs.elemValue(pt, i); + scalar.* = (try bitwiseNandScalar(lhs_elem, rhs_elem, scalar_ty, arena, pt)).toIntern(); } - return Value.fromInterned((try mod.intern(.{ .aggregate = .{ + return Value.fromInterned(try pt.intern(.{ .aggregate = .{ .ty = ty.toIntern(), .storage = .{ .elems = result_data }, - } }))); + } })); } - return bitwiseNandScalar(lhs, rhs, ty, arena, mod); + return bitwiseNandScalar(lhs, rhs, ty, arena, pt); } /// operands must be integers; handles undefined. -pub fn bitwiseNandScalar(lhs: Value, rhs: Value, ty: Type, arena: Allocator, mod: *Module) !Value { - if (lhs.isUndef(mod) or rhs.isUndef(mod)) return Value.fromInterned((try mod.intern(.{ .undef = ty.toIntern() }))); +pub fn bitwiseNandScalar(lhs: Value, rhs: Value, ty: Type, arena: Allocator, pt: Zcu.PerThread) !Value { + const mod = pt.zcu; + if (lhs.isUndef(mod) or rhs.isUndef(mod)) return Value.fromInterned(try pt.intern(.{ .undef = ty.toIntern() })); if (ty.toIntern() == .bool_type) return makeBool(!(lhs.toBool() and rhs.toBool())); - const anded = try bitwiseAnd(lhs, rhs, ty, arena, mod); - const all_ones = if (ty.isSignedInt(mod)) try mod.intValue(ty, -1) else try ty.maxIntScalar(mod, ty); - return bitwiseXor(anded, all_ones, ty, arena, mod); + const anded = try bitwiseAnd(lhs, rhs, ty, arena, pt); + const all_ones = if (ty.isSignedInt(mod)) try pt.intValue(ty, -1) else try ty.maxIntScalar(pt, ty); + return bitwiseXor(anded, all_ones, ty, arena, pt); } /// operands must be (vectors of) integers; handles undefined scalars. -pub fn bitwiseOr(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { +pub fn bitwiseOr(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, pt: Zcu.PerThread) !Value { + const mod = pt.zcu; if (ty.zigTypeTag(mod) == .Vector) { const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(mod)); const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { - const lhs_elem = try lhs.elemValue(mod, i); - const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = (try bitwiseOrScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod)).toIntern(); + const lhs_elem = try lhs.elemValue(pt, i); + const rhs_elem = try rhs.elemValue(pt, i); + scalar.* = (try bitwiseOrScalar(lhs_elem, rhs_elem, scalar_ty, allocator, pt)).toIntern(); } - return Value.fromInterned((try mod.intern(.{ .aggregate = .{ + return Value.fromInterned(try pt.intern(.{ .aggregate = .{ .ty = ty.toIntern(), .storage = .{ .elems = result_data }, - } }))); + } })); } - return bitwiseOrScalar(lhs, rhs, ty, allocator, mod); + return bitwiseOrScalar(lhs, rhs, ty, allocator, pt); } /// operands must be integers; handles undefined. -pub fn bitwiseOrScalar(orig_lhs: Value, orig_rhs: Value, ty: Type, arena: Allocator, zcu: *Zcu) !Value { +pub fn bitwiseOrScalar(orig_lhs: Value, orig_rhs: Value, ty: Type, arena: Allocator, pt: Zcu.PerThread) !Value { // If one operand is defined, we turn the other into `0xAA` so the bitwise AND can // still zero out some bits. // TODO: ideally we'd still like tracking for the undef bits. Related: #19634. const lhs: Value, const rhs: Value = make_defined: { - const lhs_undef = orig_lhs.isUndef(zcu); - const rhs_undef = orig_rhs.isUndef(zcu); + const lhs_undef = orig_lhs.isUndef(pt.zcu); + const rhs_undef = orig_rhs.isUndef(pt.zcu); break :make_defined switch ((@as(u2, @intFromBool(lhs_undef)) << 1) | @intFromBool(rhs_undef)) { 0b00 => .{ orig_lhs, orig_rhs }, - 0b01 => .{ orig_lhs, try intValueAa(ty, arena, zcu) }, - 0b10 => .{ try intValueAa(ty, arena, zcu), orig_rhs }, - 0b11 => return zcu.undefValue(ty), + 0b01 => .{ orig_lhs, try intValueAa(ty, arena, pt) }, + 0b10 => .{ try intValueAa(ty, arena, pt), orig_rhs }, + 0b11 => return pt.undefValue(ty), }; }; @@ -2029,46 +2064,48 @@ pub fn bitwiseOrScalar(orig_lhs: Value, orig_rhs: Value, ty: Type, arena: Alloca // resorting to BigInt first. var lhs_space: Value.BigIntSpace = undefined; var rhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = lhs.toBigInt(&lhs_space, zcu); - const rhs_bigint = rhs.toBigInt(&rhs_space, zcu); + const lhs_bigint = lhs.toBigInt(&lhs_space, pt); + const rhs_bigint = rhs.toBigInt(&rhs_space, pt); const limbs = try arena.alloc( std.math.big.Limb, @max(lhs_bigint.limbs.len, rhs_bigint.limbs.len), ); var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; result_bigint.bitOr(lhs_bigint, rhs_bigint); - return zcu.intValue_big(ty, result_bigint.toConst()); + return pt.intValue_big(ty, result_bigint.toConst()); } /// operands must be (vectors of) integers; handles undefined scalars. -pub fn bitwiseXor(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { +pub fn bitwiseXor(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, pt: Zcu.PerThread) !Value { + const mod = pt.zcu; if (ty.zigTypeTag(mod) == .Vector) { const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(mod)); const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { - const lhs_elem = try lhs.elemValue(mod, i); - const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = (try bitwiseXorScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod)).toIntern(); + const lhs_elem = try lhs.elemValue(pt, i); + const rhs_elem = try rhs.elemValue(pt, i); + scalar.* = (try bitwiseXorScalar(lhs_elem, rhs_elem, scalar_ty, allocator, pt)).toIntern(); } - return Value.fromInterned((try mod.intern(.{ .aggregate = .{ + return Value.fromInterned(try pt.intern(.{ .aggregate = .{ .ty = ty.toIntern(), .storage = .{ .elems = result_data }, - } }))); + } })); } - return bitwiseXorScalar(lhs, rhs, ty, allocator, mod); + return bitwiseXorScalar(lhs, rhs, ty, allocator, pt); } /// operands must be integers; handles undefined. -pub fn bitwiseXorScalar(lhs: Value, rhs: Value, ty: Type, arena: Allocator, mod: *Module) !Value { - if (lhs.isUndef(mod) or rhs.isUndef(mod)) return Value.fromInterned((try mod.intern(.{ .undef = ty.toIntern() }))); +pub fn bitwiseXorScalar(lhs: Value, rhs: Value, ty: Type, arena: Allocator, pt: Zcu.PerThread) !Value { + const mod = pt.zcu; + if (lhs.isUndef(mod) or rhs.isUndef(mod)) return Value.fromInterned(try pt.intern(.{ .undef = ty.toIntern() })); if (ty.toIntern() == .bool_type) return makeBool(lhs.toBool() != rhs.toBool()); // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. var lhs_space: Value.BigIntSpace = undefined; var rhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = lhs.toBigInt(&lhs_space, mod); - const rhs_bigint = rhs.toBigInt(&rhs_space, mod); + const lhs_bigint = lhs.toBigInt(&lhs_space, pt); + const rhs_bigint = rhs.toBigInt(&rhs_space, pt); const limbs = try arena.alloc( std.math.big.Limb, // + 1 for negatives @@ -2076,22 +2113,22 @@ pub fn bitwiseXorScalar(lhs: Value, rhs: Value, ty: Type, arena: Allocator, mod: ); var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; result_bigint.bitXor(lhs_bigint, rhs_bigint); - return mod.intValue_big(ty, result_bigint.toConst()); + return pt.intValue_big(ty, result_bigint.toConst()); } /// If the value overflowed the type, returns a comptime_int (or vector thereof) instead, setting /// overflow_idx to the vector index the overflow was at (or 0 for a scalar). -pub fn intDiv(lhs: Value, rhs: Value, ty: Type, overflow_idx: *?usize, allocator: Allocator, mod: *Module) !Value { +pub fn intDiv(lhs: Value, rhs: Value, ty: Type, overflow_idx: *?usize, allocator: Allocator, pt: Zcu.PerThread) !Value { var overflow: usize = undefined; - return intDivInner(lhs, rhs, ty, &overflow, allocator, mod) catch |err| switch (err) { + return intDivInner(lhs, rhs, ty, &overflow, allocator, pt) catch |err| switch (err) { error.Overflow => { - const is_vec = ty.isVector(mod); + const is_vec = ty.isVector(pt.zcu); overflow_idx.* = if (is_vec) overflow else 0; - const safe_ty = if (is_vec) try mod.vectorType(.{ - .len = ty.vectorLen(mod), + const safe_ty = if (is_vec) try pt.vectorType(.{ + .len = ty.vectorLen(pt.zcu), .child = .comptime_int_type, }) else Type.comptime_int; - return intDivInner(lhs, rhs, safe_ty, undefined, allocator, mod) catch |err1| switch (err1) { + return intDivInner(lhs, rhs, safe_ty, undefined, allocator, pt) catch |err1| switch (err1) { error.Overflow => unreachable, else => |e| return e, }; @@ -2100,14 +2137,14 @@ pub fn intDiv(lhs: Value, rhs: Value, ty: Type, overflow_idx: *?usize, allocator }; } -fn intDivInner(lhs: Value, rhs: Value, ty: Type, overflow_idx: *usize, allocator: Allocator, mod: *Module) !Value { - if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(mod)); - const scalar_ty = ty.scalarType(mod); +fn intDivInner(lhs: Value, rhs: Value, ty: Type, overflow_idx: *usize, allocator: Allocator, pt: Zcu.PerThread) !Value { + if (ty.zigTypeTag(pt.zcu) == .Vector) { + const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(pt.zcu)); + const scalar_ty = ty.scalarType(pt.zcu); for (result_data, 0..) |*scalar, i| { - const lhs_elem = try lhs.elemValue(mod, i); - const rhs_elem = try rhs.elemValue(mod, i); - const val = intDivScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod) catch |err| switch (err) { + const lhs_elem = try lhs.elemValue(pt, i); + const rhs_elem = try rhs.elemValue(pt, i); + const val = intDivScalar(lhs_elem, rhs_elem, scalar_ty, allocator, pt) catch |err| switch (err) { error.Overflow => { overflow_idx.* = i; return error.Overflow; @@ -2116,21 +2153,21 @@ fn intDivInner(lhs: Value, rhs: Value, ty: Type, overflow_idx: *usize, allocator }; scalar.* = val.toIntern(); } - return Value.fromInterned((try mod.intern(.{ .aggregate = .{ + return Value.fromInterned(try pt.intern(.{ .aggregate = .{ .ty = ty.toIntern(), .storage = .{ .elems = result_data }, - } }))); + } })); } - return intDivScalar(lhs, rhs, ty, allocator, mod); + return intDivScalar(lhs, rhs, ty, allocator, pt); } -pub fn intDivScalar(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { +pub fn intDivScalar(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, pt: Zcu.PerThread) !Value { // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. var lhs_space: Value.BigIntSpace = undefined; var rhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = lhs.toBigInt(&lhs_space, mod); - const rhs_bigint = rhs.toBigInt(&rhs_space, mod); + const lhs_bigint = lhs.toBigInt(&lhs_space, pt); + const rhs_bigint = rhs.toBigInt(&rhs_space, pt); const limbs_q = try allocator.alloc( std.math.big.Limb, lhs_bigint.limbs.len, @@ -2147,38 +2184,38 @@ pub fn intDivScalar(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: var result_r = BigIntMutable{ .limbs = limbs_r, .positive = undefined, .len = undefined }; result_q.divTrunc(&result_r, lhs_bigint, rhs_bigint, limbs_buffer); if (ty.toIntern() != .comptime_int_type) { - const info = ty.intInfo(mod); + const info = ty.intInfo(pt.zcu); if (!result_q.toConst().fitsInTwosComp(info.signedness, info.bits)) { return error.Overflow; } } - return mod.intValue_big(ty, result_q.toConst()); + return pt.intValue_big(ty, result_q.toConst()); } -pub fn intDivFloor(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { - if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(mod)); - const scalar_ty = ty.scalarType(mod); +pub fn intDivFloor(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, pt: Zcu.PerThread) !Value { + if (ty.zigTypeTag(pt.zcu) == .Vector) { + const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(pt.zcu)); + const scalar_ty = ty.scalarType(pt.zcu); for (result_data, 0..) |*scalar, i| { - const lhs_elem = try lhs.elemValue(mod, i); - const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = (try intDivFloorScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod)).toIntern(); + const lhs_elem = try lhs.elemValue(pt, i); + const rhs_elem = try rhs.elemValue(pt, i); + scalar.* = (try intDivFloorScalar(lhs_elem, rhs_elem, scalar_ty, allocator, pt)).toIntern(); } - return Value.fromInterned((try mod.intern(.{ .aggregate = .{ + return Value.fromInterned(try pt.intern(.{ .aggregate = .{ .ty = ty.toIntern(), .storage = .{ .elems = result_data }, - } }))); + } })); } - return intDivFloorScalar(lhs, rhs, ty, allocator, mod); + return intDivFloorScalar(lhs, rhs, ty, allocator, pt); } -pub fn intDivFloorScalar(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { +pub fn intDivFloorScalar(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, pt: Zcu.PerThread) !Value { // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. var lhs_space: Value.BigIntSpace = undefined; var rhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = lhs.toBigInt(&lhs_space, mod); - const rhs_bigint = rhs.toBigInt(&rhs_space, mod); + const lhs_bigint = lhs.toBigInt(&lhs_space, pt); + const rhs_bigint = rhs.toBigInt(&rhs_space, pt); const limbs_q = try allocator.alloc( std.math.big.Limb, lhs_bigint.limbs.len, @@ -2194,33 +2231,33 @@ pub fn intDivFloorScalar(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, var result_q = BigIntMutable{ .limbs = limbs_q, .positive = undefined, .len = undefined }; var result_r = BigIntMutable{ .limbs = limbs_r, .positive = undefined, .len = undefined }; result_q.divFloor(&result_r, lhs_bigint, rhs_bigint, limbs_buffer); - return mod.intValue_big(ty, result_q.toConst()); + return pt.intValue_big(ty, result_q.toConst()); } -pub fn intMod(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { - if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(mod)); - const scalar_ty = ty.scalarType(mod); +pub fn intMod(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, pt: Zcu.PerThread) !Value { + if (ty.zigTypeTag(pt.zcu) == .Vector) { + const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(pt.zcu)); + const scalar_ty = ty.scalarType(pt.zcu); for (result_data, 0..) |*scalar, i| { - const lhs_elem = try lhs.elemValue(mod, i); - const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = (try intModScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod)).toIntern(); + const lhs_elem = try lhs.elemValue(pt, i); + const rhs_elem = try rhs.elemValue(pt, i); + scalar.* = (try intModScalar(lhs_elem, rhs_elem, scalar_ty, allocator, pt)).toIntern(); } - return Value.fromInterned((try mod.intern(.{ .aggregate = .{ + return Value.fromInterned(try pt.intern(.{ .aggregate = .{ .ty = ty.toIntern(), .storage = .{ .elems = result_data }, - } }))); + } })); } - return intModScalar(lhs, rhs, ty, allocator, mod); + return intModScalar(lhs, rhs, ty, allocator, pt); } -pub fn intModScalar(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { +pub fn intModScalar(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, pt: Zcu.PerThread) !Value { // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. var lhs_space: Value.BigIntSpace = undefined; var rhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = lhs.toBigInt(&lhs_space, mod); - const rhs_bigint = rhs.toBigInt(&rhs_space, mod); + const lhs_bigint = lhs.toBigInt(&lhs_space, pt); + const rhs_bigint = rhs.toBigInt(&rhs_space, pt); const limbs_q = try allocator.alloc( std.math.big.Limb, lhs_bigint.limbs.len, @@ -2236,7 +2273,7 @@ pub fn intModScalar(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: var result_q = BigIntMutable{ .limbs = limbs_q, .positive = undefined, .len = undefined }; var result_r = BigIntMutable{ .limbs = limbs_r, .positive = undefined, .len = undefined }; result_q.divFloor(&result_r, lhs_bigint, rhs_bigint, limbs_buffer); - return mod.intValue_big(ty, result_r.toConst()); + return pt.intValue_big(ty, result_r.toConst()); } /// Returns true if the value is a floating point type and is NaN. Returns false otherwise. @@ -2268,85 +2305,86 @@ pub fn isNegativeInf(val: Value, mod: *const Module) bool { }; } -pub fn floatRem(lhs: Value, rhs: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { - if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); - const scalar_ty = float_type.scalarType(mod); +pub fn floatRem(lhs: Value, rhs: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) !Value { + if (float_type.zigTypeTag(pt.zcu) == .Vector) { + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(pt.zcu)); + const scalar_ty = float_type.scalarType(pt.zcu); for (result_data, 0..) |*scalar, i| { - const lhs_elem = try lhs.elemValue(mod, i); - const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = (try floatRemScalar(lhs_elem, rhs_elem, scalar_ty, mod)).toIntern(); + const lhs_elem = try lhs.elemValue(pt, i); + const rhs_elem = try rhs.elemValue(pt, i); + scalar.* = (try floatRemScalar(lhs_elem, rhs_elem, scalar_ty, pt)).toIntern(); } - return Value.fromInterned((try mod.intern(.{ .aggregate = .{ + return Value.fromInterned(try pt.intern(.{ .aggregate = .{ .ty = float_type.toIntern(), .storage = .{ .elems = result_data }, - } }))); + } })); } - return floatRemScalar(lhs, rhs, float_type, mod); + return floatRemScalar(lhs, rhs, float_type, pt); } -pub fn floatRemScalar(lhs: Value, rhs: Value, float_type: Type, mod: *Module) !Value { - const target = mod.getTarget(); +pub fn floatRemScalar(lhs: Value, rhs: Value, float_type: Type, pt: Zcu.PerThread) !Value { + const target = pt.zcu.getTarget(); const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { - 16 => .{ .f16 = @rem(lhs.toFloat(f16, mod), rhs.toFloat(f16, mod)) }, - 32 => .{ .f32 = @rem(lhs.toFloat(f32, mod), rhs.toFloat(f32, mod)) }, - 64 => .{ .f64 = @rem(lhs.toFloat(f64, mod), rhs.toFloat(f64, mod)) }, - 80 => .{ .f80 = @rem(lhs.toFloat(f80, mod), rhs.toFloat(f80, mod)) }, - 128 => .{ .f128 = @rem(lhs.toFloat(f128, mod), rhs.toFloat(f128, mod)) }, + 16 => .{ .f16 = @rem(lhs.toFloat(f16, pt), rhs.toFloat(f16, pt)) }, + 32 => .{ .f32 = @rem(lhs.toFloat(f32, pt), rhs.toFloat(f32, pt)) }, + 64 => .{ .f64 = @rem(lhs.toFloat(f64, pt), rhs.toFloat(f64, pt)) }, + 80 => .{ .f80 = @rem(lhs.toFloat(f80, pt), rhs.toFloat(f80, pt)) }, + 128 => .{ .f128 = @rem(lhs.toFloat(f128, pt), rhs.toFloat(f128, pt)) }, else => unreachable, }; - return Value.fromInterned((try mod.intern(.{ .float = .{ + return Value.fromInterned(try pt.intern(.{ .float = .{ .ty = float_type.toIntern(), .storage = storage, - } }))); + } })); } -pub fn floatMod(lhs: Value, rhs: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { - if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); - const scalar_ty = float_type.scalarType(mod); +pub fn floatMod(lhs: Value, rhs: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) !Value { + if (float_type.zigTypeTag(pt.zcu) == .Vector) { + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(pt.zcu)); + const scalar_ty = float_type.scalarType(pt.zcu); for (result_data, 0..) |*scalar, i| { - const lhs_elem = try lhs.elemValue(mod, i); - const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = (try floatModScalar(lhs_elem, rhs_elem, scalar_ty, mod)).toIntern(); + const lhs_elem = try lhs.elemValue(pt, i); + const rhs_elem = try rhs.elemValue(pt, i); + scalar.* = (try floatModScalar(lhs_elem, rhs_elem, scalar_ty, pt)).toIntern(); } - return Value.fromInterned((try mod.intern(.{ .aggregate = .{ + return Value.fromInterned(try pt.intern(.{ .aggregate = .{ .ty = float_type.toIntern(), .storage = .{ .elems = result_data }, - } }))); + } })); } - return floatModScalar(lhs, rhs, float_type, mod); + return floatModScalar(lhs, rhs, float_type, pt); } -pub fn floatModScalar(lhs: Value, rhs: Value, float_type: Type, mod: *Module) !Value { - const target = mod.getTarget(); +pub fn floatModScalar(lhs: Value, rhs: Value, float_type: Type, pt: Zcu.PerThread) !Value { + const target = pt.zcu.getTarget(); const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { - 16 => .{ .f16 = @mod(lhs.toFloat(f16, mod), rhs.toFloat(f16, mod)) }, - 32 => .{ .f32 = @mod(lhs.toFloat(f32, mod), rhs.toFloat(f32, mod)) }, - 64 => .{ .f64 = @mod(lhs.toFloat(f64, mod), rhs.toFloat(f64, mod)) }, - 80 => .{ .f80 = @mod(lhs.toFloat(f80, mod), rhs.toFloat(f80, mod)) }, - 128 => .{ .f128 = @mod(lhs.toFloat(f128, mod), rhs.toFloat(f128, mod)) }, + 16 => .{ .f16 = @mod(lhs.toFloat(f16, pt), rhs.toFloat(f16, pt)) }, + 32 => .{ .f32 = @mod(lhs.toFloat(f32, pt), rhs.toFloat(f32, pt)) }, + 64 => .{ .f64 = @mod(lhs.toFloat(f64, pt), rhs.toFloat(f64, pt)) }, + 80 => .{ .f80 = @mod(lhs.toFloat(f80, pt), rhs.toFloat(f80, pt)) }, + 128 => .{ .f128 = @mod(lhs.toFloat(f128, pt), rhs.toFloat(f128, pt)) }, else => unreachable, }; - return Value.fromInterned((try mod.intern(.{ .float = .{ + return Value.fromInterned(try pt.intern(.{ .float = .{ .ty = float_type.toIntern(), .storage = storage, - } }))); + } })); } /// If the value overflowed the type, returns a comptime_int (or vector thereof) instead, setting /// overflow_idx to the vector index the overflow was at (or 0 for a scalar). -pub fn intMul(lhs: Value, rhs: Value, ty: Type, overflow_idx: *?usize, allocator: Allocator, mod: *Module) !Value { +pub fn intMul(lhs: Value, rhs: Value, ty: Type, overflow_idx: *?usize, allocator: Allocator, pt: Zcu.PerThread) !Value { + const mod = pt.zcu; var overflow: usize = undefined; - return intMulInner(lhs, rhs, ty, &overflow, allocator, mod) catch |err| switch (err) { + return intMulInner(lhs, rhs, ty, &overflow, allocator, pt) catch |err| switch (err) { error.Overflow => { const is_vec = ty.isVector(mod); overflow_idx.* = if (is_vec) overflow else 0; - const safe_ty = if (is_vec) try mod.vectorType(.{ + const safe_ty = if (is_vec) try pt.vectorType(.{ .len = ty.vectorLen(mod), .child = .comptime_int_type, }) else Type.comptime_int; - return intMulInner(lhs, rhs, safe_ty, undefined, allocator, mod) catch |err1| switch (err1) { + return intMulInner(lhs, rhs, safe_ty, undefined, allocator, pt) catch |err1| switch (err1) { error.Overflow => unreachable, else => |e| return e, }; @@ -2355,14 +2393,15 @@ pub fn intMul(lhs: Value, rhs: Value, ty: Type, overflow_idx: *?usize, allocator }; } -fn intMulInner(lhs: Value, rhs: Value, ty: Type, overflow_idx: *usize, allocator: Allocator, mod: *Module) !Value { +fn intMulInner(lhs: Value, rhs: Value, ty: Type, overflow_idx: *usize, allocator: Allocator, pt: Zcu.PerThread) !Value { + const mod = pt.zcu; if (ty.zigTypeTag(mod) == .Vector) { const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(mod)); const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { - const lhs_elem = try lhs.elemValue(mod, i); - const rhs_elem = try rhs.elemValue(mod, i); - const val = intMulScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod) catch |err| switch (err) { + const lhs_elem = try lhs.elemValue(pt, i); + const rhs_elem = try rhs.elemValue(pt, i); + const val = intMulScalar(lhs_elem, rhs_elem, scalar_ty, allocator, pt) catch |err| switch (err) { error.Overflow => { overflow_idx.* = i; return error.Overflow; @@ -2371,26 +2410,26 @@ fn intMulInner(lhs: Value, rhs: Value, ty: Type, overflow_idx: *usize, allocator }; scalar.* = val.toIntern(); } - return Value.fromInterned((try mod.intern(.{ .aggregate = .{ + return Value.fromInterned(try pt.intern(.{ .aggregate = .{ .ty = ty.toIntern(), .storage = .{ .elems = result_data }, - } }))); + } })); } - return intMulScalar(lhs, rhs, ty, allocator, mod); + return intMulScalar(lhs, rhs, ty, allocator, pt); } -pub fn intMulScalar(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { +pub fn intMulScalar(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, pt: Zcu.PerThread) !Value { if (ty.toIntern() != .comptime_int_type) { - const res = try intMulWithOverflowScalar(lhs, rhs, ty, allocator, mod); - if (res.overflow_bit.compareAllWithZero(.neq, mod)) return error.Overflow; + const res = try intMulWithOverflowScalar(lhs, rhs, ty, allocator, pt); + if (res.overflow_bit.compareAllWithZero(.neq, pt)) return error.Overflow; return res.wrapped_result; } // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. var lhs_space: Value.BigIntSpace = undefined; var rhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = lhs.toBigInt(&lhs_space, mod); - const rhs_bigint = rhs.toBigInt(&rhs_space, mod); + const lhs_bigint = lhs.toBigInt(&lhs_space, pt); + const rhs_bigint = rhs.toBigInt(&rhs_space, pt); const limbs = try allocator.alloc( std.math.big.Limb, lhs_bigint.limbs.len + rhs_bigint.limbs.len, @@ -2402,23 +2441,24 @@ pub fn intMulScalar(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: ); defer allocator.free(limbs_buffer); result_bigint.mul(lhs_bigint, rhs_bigint, limbs_buffer, allocator); - return mod.intValue_big(ty, result_bigint.toConst()); + return pt.intValue_big(ty, result_bigint.toConst()); } -pub fn intTrunc(val: Value, ty: Type, allocator: Allocator, signedness: std.builtin.Signedness, bits: u16, mod: *Module) !Value { +pub fn intTrunc(val: Value, ty: Type, allocator: Allocator, signedness: std.builtin.Signedness, bits: u16, pt: Zcu.PerThread) !Value { + const mod = pt.zcu; if (ty.zigTypeTag(mod) == .Vector) { const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(mod)); const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { - const elem_val = try val.elemValue(mod, i); - scalar.* = (try intTruncScalar(elem_val, scalar_ty, allocator, signedness, bits, mod)).toIntern(); + const elem_val = try val.elemValue(pt, i); + scalar.* = (try intTruncScalar(elem_val, scalar_ty, allocator, signedness, bits, pt)).toIntern(); } - return Value.fromInterned((try mod.intern(.{ .aggregate = .{ + return Value.fromInterned(try pt.intern(.{ .aggregate = .{ .ty = ty.toIntern(), .storage = .{ .elems = result_data }, - } }))); + } })); } - return intTruncScalar(val, ty, allocator, signedness, bits, mod); + return intTruncScalar(val, ty, allocator, signedness, bits, pt); } /// This variant may vectorize on `bits`. Asserts that `bits` is a (vector of) `u16`. @@ -2428,22 +2468,22 @@ pub fn intTruncBitsAsValue( allocator: Allocator, signedness: std.builtin.Signedness, bits: Value, - mod: *Module, + pt: Zcu.PerThread, ) !Value { - if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(mod)); - const scalar_ty = ty.scalarType(mod); + if (ty.zigTypeTag(pt.zcu) == .Vector) { + const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(pt.zcu)); + const scalar_ty = ty.scalarType(pt.zcu); for (result_data, 0..) |*scalar, i| { - const elem_val = try val.elemValue(mod, i); - const bits_elem = try bits.elemValue(mod, i); - scalar.* = (try intTruncScalar(elem_val, scalar_ty, allocator, signedness, @intCast(bits_elem.toUnsignedInt(mod)), mod)).toIntern(); + const elem_val = try val.elemValue(pt, i); + const bits_elem = try bits.elemValue(pt, i); + scalar.* = (try intTruncScalar(elem_val, scalar_ty, allocator, signedness, @intCast(bits_elem.toUnsignedInt(pt)), pt)).toIntern(); } - return Value.fromInterned((try mod.intern(.{ .aggregate = .{ + return Value.fromInterned(try pt.intern(.{ .aggregate = .{ .ty = ty.toIntern(), .storage = .{ .elems = result_data }, - } }))); + } })); } - return intTruncScalar(val, ty, allocator, signedness, @intCast(bits.toUnsignedInt(mod)), mod); + return intTruncScalar(val, ty, allocator, signedness, @intCast(bits.toUnsignedInt(pt)), pt); } pub fn intTruncScalar( @@ -2452,14 +2492,15 @@ pub fn intTruncScalar( allocator: Allocator, signedness: std.builtin.Signedness, bits: u16, - zcu: *Zcu, + pt: Zcu.PerThread, ) !Value { - if (bits == 0) return zcu.intValue(ty, 0); + const zcu = pt.zcu; + if (bits == 0) return pt.intValue(ty, 0); - if (val.isUndef(zcu)) return zcu.undefValue(ty); + if (val.isUndef(zcu)) return pt.undefValue(ty); var val_space: Value.BigIntSpace = undefined; - const val_bigint = val.toBigInt(&val_space, zcu); + const val_bigint = val.toBigInt(&val_space, pt); const limbs = try allocator.alloc( std.math.big.Limb, @@ -2468,32 +2509,33 @@ pub fn intTruncScalar( var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; result_bigint.truncate(val_bigint, signedness, bits); - return zcu.intValue_big(ty, result_bigint.toConst()); + return pt.intValue_big(ty, result_bigint.toConst()); } -pub fn shl(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { +pub fn shl(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, pt: Zcu.PerThread) !Value { + const mod = pt.zcu; if (ty.zigTypeTag(mod) == .Vector) { const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(mod)); const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { - const lhs_elem = try lhs.elemValue(mod, i); - const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = (try shlScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod)).toIntern(); + const lhs_elem = try lhs.elemValue(pt, i); + const rhs_elem = try rhs.elemValue(pt, i); + scalar.* = (try shlScalar(lhs_elem, rhs_elem, scalar_ty, allocator, pt)).toIntern(); } - return Value.fromInterned((try mod.intern(.{ .aggregate = .{ + return Value.fromInterned(try pt.intern(.{ .aggregate = .{ .ty = ty.toIntern(), .storage = .{ .elems = result_data }, - } }))); + } })); } - return shlScalar(lhs, rhs, ty, allocator, mod); + return shlScalar(lhs, rhs, ty, allocator, pt); } -pub fn shlScalar(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { +pub fn shlScalar(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, pt: Zcu.PerThread) !Value { // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. var lhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = lhs.toBigInt(&lhs_space, mod); - const shift: usize = @intCast(rhs.toUnsignedInt(mod)); + const lhs_bigint = lhs.toBigInt(&lhs_space, pt); + const shift: usize = @intCast(rhs.toUnsignedInt(pt)); const limbs = try allocator.alloc( std.math.big.Limb, lhs_bigint.limbs.len + (shift / (@sizeOf(std.math.big.Limb) * 8)) + 1, @@ -2505,11 +2547,11 @@ pub fn shlScalar(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *M }; result_bigint.shiftLeft(lhs_bigint, shift); if (ty.toIntern() != .comptime_int_type) { - const int_info = ty.intInfo(mod); + const int_info = ty.intInfo(pt.zcu); result_bigint.truncate(result_bigint.toConst(), int_info.signedness, int_info.bits); } - return mod.intValue_big(ty, result_bigint.toConst()); + return pt.intValue_big(ty, result_bigint.toConst()); } pub fn shlWithOverflow( @@ -2517,32 +2559,32 @@ pub fn shlWithOverflow( rhs: Value, ty: Type, allocator: Allocator, - mod: *Module, + pt: Zcu.PerThread, ) !OverflowArithmeticResult { - if (ty.zigTypeTag(mod) == .Vector) { - const vec_len = ty.vectorLen(mod); + if (ty.zigTypeTag(pt.zcu) == .Vector) { + const vec_len = ty.vectorLen(pt.zcu); const overflowed_data = try allocator.alloc(InternPool.Index, vec_len); const result_data = try allocator.alloc(InternPool.Index, vec_len); - const scalar_ty = ty.scalarType(mod); + const scalar_ty = ty.scalarType(pt.zcu); for (overflowed_data, result_data, 0..) |*of, *scalar, i| { - const lhs_elem = try lhs.elemValue(mod, i); - const rhs_elem = try rhs.elemValue(mod, i); - const of_math_result = try shlWithOverflowScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod); + const lhs_elem = try lhs.elemValue(pt, i); + const rhs_elem = try rhs.elemValue(pt, i); + const of_math_result = try shlWithOverflowScalar(lhs_elem, rhs_elem, scalar_ty, allocator, pt); of.* = of_math_result.overflow_bit.toIntern(); scalar.* = of_math_result.wrapped_result.toIntern(); } return OverflowArithmeticResult{ - .overflow_bit = Value.fromInterned((try mod.intern(.{ .aggregate = .{ - .ty = (try mod.vectorType(.{ .len = vec_len, .child = .u1_type })).toIntern(), + .overflow_bit = Value.fromInterned(try pt.intern(.{ .aggregate = .{ + .ty = (try pt.vectorType(.{ .len = vec_len, .child = .u1_type })).toIntern(), .storage = .{ .elems = overflowed_data }, - } }))), - .wrapped_result = Value.fromInterned((try mod.intern(.{ .aggregate = .{ + } })), + .wrapped_result = Value.fromInterned(try pt.intern(.{ .aggregate = .{ .ty = ty.toIntern(), .storage = .{ .elems = result_data }, - } }))), + } })), }; } - return shlWithOverflowScalar(lhs, rhs, ty, allocator, mod); + return shlWithOverflowScalar(lhs, rhs, ty, allocator, pt); } pub fn shlWithOverflowScalar( @@ -2550,12 +2592,12 @@ pub fn shlWithOverflowScalar( rhs: Value, ty: Type, allocator: Allocator, - mod: *Module, + pt: Zcu.PerThread, ) !OverflowArithmeticResult { - const info = ty.intInfo(mod); + const info = ty.intInfo(pt.zcu); var lhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = lhs.toBigInt(&lhs_space, mod); - const shift: usize = @intCast(rhs.toUnsignedInt(mod)); + const lhs_bigint = lhs.toBigInt(&lhs_space, pt); + const shift: usize = @intCast(rhs.toUnsignedInt(pt)); const limbs = try allocator.alloc( std.math.big.Limb, lhs_bigint.limbs.len + (shift / (@sizeOf(std.math.big.Limb) * 8)) + 1, @@ -2571,8 +2613,8 @@ pub fn shlWithOverflowScalar( result_bigint.truncate(result_bigint.toConst(), info.signedness, info.bits); } return OverflowArithmeticResult{ - .overflow_bit = try mod.intValue(Type.u1, @intFromBool(overflowed)), - .wrapped_result = try mod.intValue_big(ty, result_bigint.toConst()), + .overflow_bit = try pt.intValue(Type.u1, @intFromBool(overflowed)), + .wrapped_result = try pt.intValue_big(ty, result_bigint.toConst()), }; } @@ -2581,22 +2623,22 @@ pub fn shlSat( rhs: Value, ty: Type, arena: Allocator, - mod: *Module, + pt: Zcu.PerThread, ) !Value { - if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(InternPool.Index, ty.vectorLen(mod)); - const scalar_ty = ty.scalarType(mod); + if (ty.zigTypeTag(pt.zcu) == .Vector) { + const result_data = try arena.alloc(InternPool.Index, ty.vectorLen(pt.zcu)); + const scalar_ty = ty.scalarType(pt.zcu); for (result_data, 0..) |*scalar, i| { - const lhs_elem = try lhs.elemValue(mod, i); - const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = (try shlSatScalar(lhs_elem, rhs_elem, scalar_ty, arena, mod)).toIntern(); + const lhs_elem = try lhs.elemValue(pt, i); + const rhs_elem = try rhs.elemValue(pt, i); + scalar.* = (try shlSatScalar(lhs_elem, rhs_elem, scalar_ty, arena, pt)).toIntern(); } - return Value.fromInterned((try mod.intern(.{ .aggregate = .{ + return Value.fromInterned(try pt.intern(.{ .aggregate = .{ .ty = ty.toIntern(), .storage = .{ .elems = result_data }, - } }))); + } })); } - return shlSatScalar(lhs, rhs, ty, arena, mod); + return shlSatScalar(lhs, rhs, ty, arena, pt); } pub fn shlSatScalar( @@ -2604,15 +2646,15 @@ pub fn shlSatScalar( rhs: Value, ty: Type, arena: Allocator, - mod: *Module, + pt: Zcu.PerThread, ) !Value { // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. - const info = ty.intInfo(mod); + const info = ty.intInfo(pt.zcu); var lhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = lhs.toBigInt(&lhs_space, mod); - const shift: usize = @intCast(rhs.toUnsignedInt(mod)); + const lhs_bigint = lhs.toBigInt(&lhs_space, pt); + const shift: usize = @intCast(rhs.toUnsignedInt(pt)); const limbs = try arena.alloc( std.math.big.Limb, std.math.big.int.calcTwosCompLimbCount(info.bits) + 1, @@ -2623,7 +2665,7 @@ pub fn shlSatScalar( .len = undefined, }; result_bigint.shiftLeftSat(lhs_bigint, shift, info.signedness, info.bits); - return mod.intValue_big(ty, result_bigint.toConst()); + return pt.intValue_big(ty, result_bigint.toConst()); } pub fn shlTrunc( @@ -2631,22 +2673,22 @@ pub fn shlTrunc( rhs: Value, ty: Type, arena: Allocator, - mod: *Module, + pt: Zcu.PerThread, ) !Value { - if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(InternPool.Index, ty.vectorLen(mod)); - const scalar_ty = ty.scalarType(mod); + if (ty.zigTypeTag(pt.zcu) == .Vector) { + const result_data = try arena.alloc(InternPool.Index, ty.vectorLen(pt.zcu)); + const scalar_ty = ty.scalarType(pt.zcu); for (result_data, 0..) |*scalar, i| { - const lhs_elem = try lhs.elemValue(mod, i); - const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = (try shlTruncScalar(lhs_elem, rhs_elem, scalar_ty, arena, mod)).toIntern(); + const lhs_elem = try lhs.elemValue(pt, i); + const rhs_elem = try rhs.elemValue(pt, i); + scalar.* = (try shlTruncScalar(lhs_elem, rhs_elem, scalar_ty, arena, pt)).toIntern(); } - return Value.fromInterned((try mod.intern(.{ .aggregate = .{ + return Value.fromInterned(try pt.intern(.{ .aggregate = .{ .ty = ty.toIntern(), .storage = .{ .elems = result_data }, - } }))); + } })); } - return shlTruncScalar(lhs, rhs, ty, arena, mod); + return shlTruncScalar(lhs, rhs, ty, arena, pt); } pub fn shlTruncScalar( @@ -2654,46 +2696,46 @@ pub fn shlTruncScalar( rhs: Value, ty: Type, arena: Allocator, - mod: *Module, + pt: Zcu.PerThread, ) !Value { - const shifted = try lhs.shl(rhs, ty, arena, mod); - const int_info = ty.intInfo(mod); - const truncated = try shifted.intTrunc(ty, arena, int_info.signedness, int_info.bits, mod); + const shifted = try lhs.shl(rhs, ty, arena, pt); + const int_info = ty.intInfo(pt.zcu); + const truncated = try shifted.intTrunc(ty, arena, int_info.signedness, int_info.bits, pt); return truncated; } -pub fn shr(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { - if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(mod)); - const scalar_ty = ty.scalarType(mod); +pub fn shr(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, pt: Zcu.PerThread) !Value { + if (ty.zigTypeTag(pt.zcu) == .Vector) { + const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(pt.zcu)); + const scalar_ty = ty.scalarType(pt.zcu); for (result_data, 0..) |*scalar, i| { - const lhs_elem = try lhs.elemValue(mod, i); - const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = (try shrScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod)).toIntern(); + const lhs_elem = try lhs.elemValue(pt, i); + const rhs_elem = try rhs.elemValue(pt, i); + scalar.* = (try shrScalar(lhs_elem, rhs_elem, scalar_ty, allocator, pt)).toIntern(); } - return Value.fromInterned((try mod.intern(.{ .aggregate = .{ + return Value.fromInterned(try pt.intern(.{ .aggregate = .{ .ty = ty.toIntern(), .storage = .{ .elems = result_data }, - } }))); + } })); } - return shrScalar(lhs, rhs, ty, allocator, mod); + return shrScalar(lhs, rhs, ty, allocator, pt); } -pub fn shrScalar(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { +pub fn shrScalar(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, pt: Zcu.PerThread) !Value { // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. var lhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = lhs.toBigInt(&lhs_space, mod); - const shift: usize = @intCast(rhs.toUnsignedInt(mod)); + const lhs_bigint = lhs.toBigInt(&lhs_space, pt); + const shift: usize = @intCast(rhs.toUnsignedInt(pt)); const result_limbs = lhs_bigint.limbs.len -| (shift / (@sizeOf(std.math.big.Limb) * 8)); if (result_limbs == 0) { // The shift is enough to remove all the bits from the number, which means the // result is 0 or -1 depending on the sign. if (lhs_bigint.positive) { - return mod.intValue(ty, 0); + return pt.intValue(ty, 0); } else { - return mod.intValue(ty, -1); + return pt.intValue(ty, -1); } } @@ -2707,48 +2749,45 @@ pub fn shrScalar(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *M .len = undefined, }; result_bigint.shiftRight(lhs_bigint, shift); - return mod.intValue_big(ty, result_bigint.toConst()); + return pt.intValue_big(ty, result_bigint.toConst()); } pub fn floatNeg( val: Value, float_type: Type, arena: Allocator, - mod: *Module, + pt: Zcu.PerThread, ) !Value { + const mod = pt.zcu; if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); const scalar_ty = float_type.scalarType(mod); for (result_data, 0..) |*scalar, i| { - const elem_val = try val.elemValue(mod, i); - scalar.* = (try floatNegScalar(elem_val, scalar_ty, mod)).toIntern(); + const elem_val = try val.elemValue(pt, i); + scalar.* = (try floatNegScalar(elem_val, scalar_ty, pt)).toIntern(); } - return Value.fromInterned((try mod.intern(.{ .aggregate = .{ + return Value.fromInterned(try pt.intern(.{ .aggregate = .{ .ty = float_type.toIntern(), .storage = .{ .elems = result_data }, - } }))); + } })); } - return floatNegScalar(val, float_type, mod); + return floatNegScalar(val, float_type, pt); } -pub fn floatNegScalar( - val: Value, - float_type: Type, - mod: *Module, -) !Value { - const target = mod.getTarget(); +pub fn floatNegScalar(val: Value, float_type: Type, pt: Zcu.PerThread) !Value { + const target = pt.zcu.getTarget(); const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { - 16 => .{ .f16 = -val.toFloat(f16, mod) }, - 32 => .{ .f32 = -val.toFloat(f32, mod) }, - 64 => .{ .f64 = -val.toFloat(f64, mod) }, - 80 => .{ .f80 = -val.toFloat(f80, mod) }, - 128 => .{ .f128 = -val.toFloat(f128, mod) }, + 16 => .{ .f16 = -val.toFloat(f16, pt) }, + 32 => .{ .f32 = -val.toFloat(f32, pt) }, + 64 => .{ .f64 = -val.toFloat(f64, pt) }, + 80 => .{ .f80 = -val.toFloat(f80, pt) }, + 128 => .{ .f128 = -val.toFloat(f128, pt) }, else => unreachable, }; - return Value.fromInterned((try mod.intern(.{ .float = .{ + return Value.fromInterned(try pt.intern(.{ .float = .{ .ty = float_type.toIntern(), .storage = storage, - } }))); + } })); } pub fn floatAdd( @@ -2756,43 +2795,45 @@ pub fn floatAdd( rhs: Value, float_type: Type, arena: Allocator, - mod: *Module, + pt: Zcu.PerThread, ) !Value { + const mod = pt.zcu; if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); const scalar_ty = float_type.scalarType(mod); for (result_data, 0..) |*scalar, i| { - const lhs_elem = try lhs.elemValue(mod, i); - const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = (try floatAddScalar(lhs_elem, rhs_elem, scalar_ty, mod)).toIntern(); + const lhs_elem = try lhs.elemValue(pt, i); + const rhs_elem = try rhs.elemValue(pt, i); + scalar.* = (try floatAddScalar(lhs_elem, rhs_elem, scalar_ty, pt)).toIntern(); } - return Value.fromInterned((try mod.intern(.{ .aggregate = .{ + return Value.fromInterned(try pt.intern(.{ .aggregate = .{ .ty = float_type.toIntern(), .storage = .{ .elems = result_data }, - } }))); + } })); } - return floatAddScalar(lhs, rhs, float_type, mod); + return floatAddScalar(lhs, rhs, float_type, pt); } pub fn floatAddScalar( lhs: Value, rhs: Value, float_type: Type, - mod: *Module, + pt: Zcu.PerThread, ) !Value { + const mod = pt.zcu; const target = mod.getTarget(); const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { - 16 => .{ .f16 = lhs.toFloat(f16, mod) + rhs.toFloat(f16, mod) }, - 32 => .{ .f32 = lhs.toFloat(f32, mod) + rhs.toFloat(f32, mod) }, - 64 => .{ .f64 = lhs.toFloat(f64, mod) + rhs.toFloat(f64, mod) }, - 80 => .{ .f80 = lhs.toFloat(f80, mod) + rhs.toFloat(f80, mod) }, - 128 => .{ .f128 = lhs.toFloat(f128, mod) + rhs.toFloat(f128, mod) }, + 16 => .{ .f16 = lhs.toFloat(f16, pt) + rhs.toFloat(f16, pt) }, + 32 => .{ .f32 = lhs.toFloat(f32, pt) + rhs.toFloat(f32, pt) }, + 64 => .{ .f64 = lhs.toFloat(f64, pt) + rhs.toFloat(f64, pt) }, + 80 => .{ .f80 = lhs.toFloat(f80, pt) + rhs.toFloat(f80, pt) }, + 128 => .{ .f128 = lhs.toFloat(f128, pt) + rhs.toFloat(f128, pt) }, else => unreachable, }; - return Value.fromInterned((try mod.intern(.{ .float = .{ + return Value.fromInterned(try pt.intern(.{ .float = .{ .ty = float_type.toIntern(), .storage = storage, - } }))); + } })); } pub fn floatSub( @@ -2800,43 +2841,45 @@ pub fn floatSub( rhs: Value, float_type: Type, arena: Allocator, - mod: *Module, + pt: Zcu.PerThread, ) !Value { + const mod = pt.zcu; if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); const scalar_ty = float_type.scalarType(mod); for (result_data, 0..) |*scalar, i| { - const lhs_elem = try lhs.elemValue(mod, i); - const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = (try floatSubScalar(lhs_elem, rhs_elem, scalar_ty, mod)).toIntern(); + const lhs_elem = try lhs.elemValue(pt, i); + const rhs_elem = try rhs.elemValue(pt, i); + scalar.* = (try floatSubScalar(lhs_elem, rhs_elem, scalar_ty, pt)).toIntern(); } - return Value.fromInterned((try mod.intern(.{ .aggregate = .{ + return Value.fromInterned(try pt.intern(.{ .aggregate = .{ .ty = float_type.toIntern(), .storage = .{ .elems = result_data }, - } }))); + } })); } - return floatSubScalar(lhs, rhs, float_type, mod); + return floatSubScalar(lhs, rhs, float_type, pt); } pub fn floatSubScalar( lhs: Value, rhs: Value, float_type: Type, - mod: *Module, + pt: Zcu.PerThread, ) !Value { + const mod = pt.zcu; const target = mod.getTarget(); const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { - 16 => .{ .f16 = lhs.toFloat(f16, mod) - rhs.toFloat(f16, mod) }, - 32 => .{ .f32 = lhs.toFloat(f32, mod) - rhs.toFloat(f32, mod) }, - 64 => .{ .f64 = lhs.toFloat(f64, mod) - rhs.toFloat(f64, mod) }, - 80 => .{ .f80 = lhs.toFloat(f80, mod) - rhs.toFloat(f80, mod) }, - 128 => .{ .f128 = lhs.toFloat(f128, mod) - rhs.toFloat(f128, mod) }, + 16 => .{ .f16 = lhs.toFloat(f16, pt) - rhs.toFloat(f16, pt) }, + 32 => .{ .f32 = lhs.toFloat(f32, pt) - rhs.toFloat(f32, pt) }, + 64 => .{ .f64 = lhs.toFloat(f64, pt) - rhs.toFloat(f64, pt) }, + 80 => .{ .f80 = lhs.toFloat(f80, pt) - rhs.toFloat(f80, pt) }, + 128 => .{ .f128 = lhs.toFloat(f128, pt) - rhs.toFloat(f128, pt) }, else => unreachable, }; - return Value.fromInterned((try mod.intern(.{ .float = .{ + return Value.fromInterned(try pt.intern(.{ .float = .{ .ty = float_type.toIntern(), .storage = storage, - } }))); + } })); } pub fn floatDiv( @@ -2844,43 +2887,43 @@ pub fn floatDiv( rhs: Value, float_type: Type, arena: Allocator, - mod: *Module, + pt: Zcu.PerThread, ) !Value { - if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); - const scalar_ty = float_type.scalarType(mod); + if (float_type.zigTypeTag(pt.zcu) == .Vector) { + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(pt.zcu)); + const scalar_ty = float_type.scalarType(pt.zcu); for (result_data, 0..) |*scalar, i| { - const lhs_elem = try lhs.elemValue(mod, i); - const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = (try floatDivScalar(lhs_elem, rhs_elem, scalar_ty, mod)).toIntern(); + const lhs_elem = try lhs.elemValue(pt, i); + const rhs_elem = try rhs.elemValue(pt, i); + scalar.* = (try floatDivScalar(lhs_elem, rhs_elem, scalar_ty, pt)).toIntern(); } - return Value.fromInterned((try mod.intern(.{ .aggregate = .{ + return Value.fromInterned(try pt.intern(.{ .aggregate = .{ .ty = float_type.toIntern(), .storage = .{ .elems = result_data }, - } }))); + } })); } - return floatDivScalar(lhs, rhs, float_type, mod); + return floatDivScalar(lhs, rhs, float_type, pt); } pub fn floatDivScalar( lhs: Value, rhs: Value, float_type: Type, - mod: *Module, + pt: Zcu.PerThread, ) !Value { - const target = mod.getTarget(); + const target = pt.zcu.getTarget(); const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { - 16 => .{ .f16 = lhs.toFloat(f16, mod) / rhs.toFloat(f16, mod) }, - 32 => .{ .f32 = lhs.toFloat(f32, mod) / rhs.toFloat(f32, mod) }, - 64 => .{ .f64 = lhs.toFloat(f64, mod) / rhs.toFloat(f64, mod) }, - 80 => .{ .f80 = lhs.toFloat(f80, mod) / rhs.toFloat(f80, mod) }, - 128 => .{ .f128 = lhs.toFloat(f128, mod) / rhs.toFloat(f128, mod) }, + 16 => .{ .f16 = lhs.toFloat(f16, pt) / rhs.toFloat(f16, pt) }, + 32 => .{ .f32 = lhs.toFloat(f32, pt) / rhs.toFloat(f32, pt) }, + 64 => .{ .f64 = lhs.toFloat(f64, pt) / rhs.toFloat(f64, pt) }, + 80 => .{ .f80 = lhs.toFloat(f80, pt) / rhs.toFloat(f80, pt) }, + 128 => .{ .f128 = lhs.toFloat(f128, pt) / rhs.toFloat(f128, pt) }, else => unreachable, }; - return Value.fromInterned((try mod.intern(.{ .float = .{ + return Value.fromInterned(try pt.intern(.{ .float = .{ .ty = float_type.toIntern(), .storage = storage, - } }))); + } })); } pub fn floatDivFloor( @@ -2888,43 +2931,43 @@ pub fn floatDivFloor( rhs: Value, float_type: Type, arena: Allocator, - mod: *Module, + pt: Zcu.PerThread, ) !Value { - if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); - const scalar_ty = float_type.scalarType(mod); + if (float_type.zigTypeTag(pt.zcu) == .Vector) { + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(pt.zcu)); + const scalar_ty = float_type.scalarType(pt.zcu); for (result_data, 0..) |*scalar, i| { - const lhs_elem = try lhs.elemValue(mod, i); - const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = (try floatDivFloorScalar(lhs_elem, rhs_elem, scalar_ty, mod)).toIntern(); + const lhs_elem = try lhs.elemValue(pt, i); + const rhs_elem = try rhs.elemValue(pt, i); + scalar.* = (try floatDivFloorScalar(lhs_elem, rhs_elem, scalar_ty, pt)).toIntern(); } - return Value.fromInterned((try mod.intern(.{ .aggregate = .{ + return Value.fromInterned(try pt.intern(.{ .aggregate = .{ .ty = float_type.toIntern(), .storage = .{ .elems = result_data }, - } }))); + } })); } - return floatDivFloorScalar(lhs, rhs, float_type, mod); + return floatDivFloorScalar(lhs, rhs, float_type, pt); } pub fn floatDivFloorScalar( lhs: Value, rhs: Value, float_type: Type, - mod: *Module, + pt: Zcu.PerThread, ) !Value { - const target = mod.getTarget(); + const target = pt.zcu.getTarget(); const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { - 16 => .{ .f16 = @divFloor(lhs.toFloat(f16, mod), rhs.toFloat(f16, mod)) }, - 32 => .{ .f32 = @divFloor(lhs.toFloat(f32, mod), rhs.toFloat(f32, mod)) }, - 64 => .{ .f64 = @divFloor(lhs.toFloat(f64, mod), rhs.toFloat(f64, mod)) }, - 80 => .{ .f80 = @divFloor(lhs.toFloat(f80, mod), rhs.toFloat(f80, mod)) }, - 128 => .{ .f128 = @divFloor(lhs.toFloat(f128, mod), rhs.toFloat(f128, mod)) }, + 16 => .{ .f16 = @divFloor(lhs.toFloat(f16, pt), rhs.toFloat(f16, pt)) }, + 32 => .{ .f32 = @divFloor(lhs.toFloat(f32, pt), rhs.toFloat(f32, pt)) }, + 64 => .{ .f64 = @divFloor(lhs.toFloat(f64, pt), rhs.toFloat(f64, pt)) }, + 80 => .{ .f80 = @divFloor(lhs.toFloat(f80, pt), rhs.toFloat(f80, pt)) }, + 128 => .{ .f128 = @divFloor(lhs.toFloat(f128, pt), rhs.toFloat(f128, pt)) }, else => unreachable, }; - return Value.fromInterned((try mod.intern(.{ .float = .{ + return Value.fromInterned(try pt.intern(.{ .float = .{ .ty = float_type.toIntern(), .storage = storage, - } }))); + } })); } pub fn floatDivTrunc( @@ -2932,43 +2975,43 @@ pub fn floatDivTrunc( rhs: Value, float_type: Type, arena: Allocator, - mod: *Module, + pt: Zcu.PerThread, ) !Value { - if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); - const scalar_ty = float_type.scalarType(mod); + if (float_type.zigTypeTag(pt.zcu) == .Vector) { + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(pt.zcu)); + const scalar_ty = float_type.scalarType(pt.zcu); for (result_data, 0..) |*scalar, i| { - const lhs_elem = try lhs.elemValue(mod, i); - const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = (try floatDivTruncScalar(lhs_elem, rhs_elem, scalar_ty, mod)).toIntern(); + const lhs_elem = try lhs.elemValue(pt, i); + const rhs_elem = try rhs.elemValue(pt, i); + scalar.* = (try floatDivTruncScalar(lhs_elem, rhs_elem, scalar_ty, pt)).toIntern(); } - return Value.fromInterned((try mod.intern(.{ .aggregate = .{ + return Value.fromInterned(try pt.intern(.{ .aggregate = .{ .ty = float_type.toIntern(), .storage = .{ .elems = result_data }, - } }))); + } })); } - return floatDivTruncScalar(lhs, rhs, float_type, mod); + return floatDivTruncScalar(lhs, rhs, float_type, pt); } pub fn floatDivTruncScalar( lhs: Value, rhs: Value, float_type: Type, - mod: *Module, + pt: Zcu.PerThread, ) !Value { - const target = mod.getTarget(); + const target = pt.zcu.getTarget(); const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { - 16 => .{ .f16 = @divTrunc(lhs.toFloat(f16, mod), rhs.toFloat(f16, mod)) }, - 32 => .{ .f32 = @divTrunc(lhs.toFloat(f32, mod), rhs.toFloat(f32, mod)) }, - 64 => .{ .f64 = @divTrunc(lhs.toFloat(f64, mod), rhs.toFloat(f64, mod)) }, - 80 => .{ .f80 = @divTrunc(lhs.toFloat(f80, mod), rhs.toFloat(f80, mod)) }, - 128 => .{ .f128 = @divTrunc(lhs.toFloat(f128, mod), rhs.toFloat(f128, mod)) }, + 16 => .{ .f16 = @divTrunc(lhs.toFloat(f16, pt), rhs.toFloat(f16, pt)) }, + 32 => .{ .f32 = @divTrunc(lhs.toFloat(f32, pt), rhs.toFloat(f32, pt)) }, + 64 => .{ .f64 = @divTrunc(lhs.toFloat(f64, pt), rhs.toFloat(f64, pt)) }, + 80 => .{ .f80 = @divTrunc(lhs.toFloat(f80, pt), rhs.toFloat(f80, pt)) }, + 128 => .{ .f128 = @divTrunc(lhs.toFloat(f128, pt), rhs.toFloat(f128, pt)) }, else => unreachable, }; - return Value.fromInterned((try mod.intern(.{ .float = .{ + return Value.fromInterned(try pt.intern(.{ .float = .{ .ty = float_type.toIntern(), .storage = storage, - } }))); + } })); } pub fn floatMul( @@ -2976,510 +3019,539 @@ pub fn floatMul( rhs: Value, float_type: Type, arena: Allocator, - mod: *Module, + pt: Zcu.PerThread, ) !Value { + const mod = pt.zcu; if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); const scalar_ty = float_type.scalarType(mod); for (result_data, 0..) |*scalar, i| { - const lhs_elem = try lhs.elemValue(mod, i); - const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = (try floatMulScalar(lhs_elem, rhs_elem, scalar_ty, mod)).toIntern(); + const lhs_elem = try lhs.elemValue(pt, i); + const rhs_elem = try rhs.elemValue(pt, i); + scalar.* = (try floatMulScalar(lhs_elem, rhs_elem, scalar_ty, pt)).toIntern(); } - return Value.fromInterned((try mod.intern(.{ .aggregate = .{ + return Value.fromInterned(try pt.intern(.{ .aggregate = .{ .ty = float_type.toIntern(), .storage = .{ .elems = result_data }, - } }))); + } })); } - return floatMulScalar(lhs, rhs, float_type, mod); + return floatMulScalar(lhs, rhs, float_type, pt); } pub fn floatMulScalar( lhs: Value, rhs: Value, float_type: Type, - mod: *Module, + pt: Zcu.PerThread, ) !Value { + const mod = pt.zcu; const target = mod.getTarget(); const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { - 16 => .{ .f16 = lhs.toFloat(f16, mod) * rhs.toFloat(f16, mod) }, - 32 => .{ .f32 = lhs.toFloat(f32, mod) * rhs.toFloat(f32, mod) }, - 64 => .{ .f64 = lhs.toFloat(f64, mod) * rhs.toFloat(f64, mod) }, - 80 => .{ .f80 = lhs.toFloat(f80, mod) * rhs.toFloat(f80, mod) }, - 128 => .{ .f128 = lhs.toFloat(f128, mod) * rhs.toFloat(f128, mod) }, + 16 => .{ .f16 = lhs.toFloat(f16, pt) * rhs.toFloat(f16, pt) }, + 32 => .{ .f32 = lhs.toFloat(f32, pt) * rhs.toFloat(f32, pt) }, + 64 => .{ .f64 = lhs.toFloat(f64, pt) * rhs.toFloat(f64, pt) }, + 80 => .{ .f80 = lhs.toFloat(f80, pt) * rhs.toFloat(f80, pt) }, + 128 => .{ .f128 = lhs.toFloat(f128, pt) * rhs.toFloat(f128, pt) }, else => unreachable, }; - return Value.fromInterned((try mod.intern(.{ .float = .{ + return Value.fromInterned(try pt.intern(.{ .float = .{ .ty = float_type.toIntern(), .storage = storage, - } }))); + } })); } -pub fn sqrt(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { - if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); - const scalar_ty = float_type.scalarType(mod); +pub fn sqrt(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) !Value { + if (float_type.zigTypeTag(pt.zcu) == .Vector) { + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(pt.zcu)); + const scalar_ty = float_type.scalarType(pt.zcu); for (result_data, 0..) |*scalar, i| { - const elem_val = try val.elemValue(mod, i); - scalar.* = (try sqrtScalar(elem_val, scalar_ty, mod)).toIntern(); + const elem_val = try val.elemValue(pt, i); + scalar.* = (try sqrtScalar(elem_val, scalar_ty, pt)).toIntern(); } - return Value.fromInterned((try mod.intern(.{ .aggregate = .{ + return Value.fromInterned(try pt.intern(.{ .aggregate = .{ .ty = float_type.toIntern(), .storage = .{ .elems = result_data }, - } }))); + } })); } - return sqrtScalar(val, float_type, mod); + return sqrtScalar(val, float_type, pt); } -pub fn sqrtScalar(val: Value, float_type: Type, mod: *Module) Allocator.Error!Value { +pub fn sqrtScalar(val: Value, float_type: Type, pt: Zcu.PerThread) Allocator.Error!Value { + const mod = pt.zcu; const target = mod.getTarget(); const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { - 16 => .{ .f16 = @sqrt(val.toFloat(f16, mod)) }, - 32 => .{ .f32 = @sqrt(val.toFloat(f32, mod)) }, - 64 => .{ .f64 = @sqrt(val.toFloat(f64, mod)) }, - 80 => .{ .f80 = @sqrt(val.toFloat(f80, mod)) }, - 128 => .{ .f128 = @sqrt(val.toFloat(f128, mod)) }, + 16 => .{ .f16 = @sqrt(val.toFloat(f16, pt)) }, + 32 => .{ .f32 = @sqrt(val.toFloat(f32, pt)) }, + 64 => .{ .f64 = @sqrt(val.toFloat(f64, pt)) }, + 80 => .{ .f80 = @sqrt(val.toFloat(f80, pt)) }, + 128 => .{ .f128 = @sqrt(val.toFloat(f128, pt)) }, else => unreachable, }; - return Value.fromInterned((try mod.intern(.{ .float = .{ + return Value.fromInterned(try pt.intern(.{ .float = .{ .ty = float_type.toIntern(), .storage = storage, - } }))); + } })); } -pub fn sin(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { +pub fn sin(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) !Value { + const mod = pt.zcu; if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); const scalar_ty = float_type.scalarType(mod); for (result_data, 0..) |*scalar, i| { - const elem_val = try val.elemValue(mod, i); - scalar.* = (try sinScalar(elem_val, scalar_ty, mod)).toIntern(); + const elem_val = try val.elemValue(pt, i); + scalar.* = (try sinScalar(elem_val, scalar_ty, pt)).toIntern(); } - return Value.fromInterned((try mod.intern(.{ .aggregate = .{ + return Value.fromInterned(try pt.intern(.{ .aggregate = .{ .ty = float_type.toIntern(), .storage = .{ .elems = result_data }, - } }))); + } })); } - return sinScalar(val, float_type, mod); + return sinScalar(val, float_type, pt); } -pub fn sinScalar(val: Value, float_type: Type, mod: *Module) Allocator.Error!Value { +pub fn sinScalar(val: Value, float_type: Type, pt: Zcu.PerThread) Allocator.Error!Value { + const mod = pt.zcu; const target = mod.getTarget(); const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { - 16 => .{ .f16 = @sin(val.toFloat(f16, mod)) }, - 32 => .{ .f32 = @sin(val.toFloat(f32, mod)) }, - 64 => .{ .f64 = @sin(val.toFloat(f64, mod)) }, - 80 => .{ .f80 = @sin(val.toFloat(f80, mod)) }, - 128 => .{ .f128 = @sin(val.toFloat(f128, mod)) }, + 16 => .{ .f16 = @sin(val.toFloat(f16, pt)) }, + 32 => .{ .f32 = @sin(val.toFloat(f32, pt)) }, + 64 => .{ .f64 = @sin(val.toFloat(f64, pt)) }, + 80 => .{ .f80 = @sin(val.toFloat(f80, pt)) }, + 128 => .{ .f128 = @sin(val.toFloat(f128, pt)) }, else => unreachable, }; - return Value.fromInterned((try mod.intern(.{ .float = .{ + return Value.fromInterned(try pt.intern(.{ .float = .{ .ty = float_type.toIntern(), .storage = storage, - } }))); + } })); } -pub fn cos(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { +pub fn cos(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) !Value { + const mod = pt.zcu; if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); const scalar_ty = float_type.scalarType(mod); for (result_data, 0..) |*scalar, i| { - const elem_val = try val.elemValue(mod, i); - scalar.* = (try cosScalar(elem_val, scalar_ty, mod)).toIntern(); + const elem_val = try val.elemValue(pt, i); + scalar.* = (try cosScalar(elem_val, scalar_ty, pt)).toIntern(); } - return Value.fromInterned((try mod.intern(.{ .aggregate = .{ + return Value.fromInterned(try pt.intern(.{ .aggregate = .{ .ty = float_type.toIntern(), .storage = .{ .elems = result_data }, - } }))); + } })); } - return cosScalar(val, float_type, mod); + return cosScalar(val, float_type, pt); } -pub fn cosScalar(val: Value, float_type: Type, mod: *Module) Allocator.Error!Value { +pub fn cosScalar(val: Value, float_type: Type, pt: Zcu.PerThread) Allocator.Error!Value { + const mod = pt.zcu; const target = mod.getTarget(); const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { - 16 => .{ .f16 = @cos(val.toFloat(f16, mod)) }, - 32 => .{ .f32 = @cos(val.toFloat(f32, mod)) }, - 64 => .{ .f64 = @cos(val.toFloat(f64, mod)) }, - 80 => .{ .f80 = @cos(val.toFloat(f80, mod)) }, - 128 => .{ .f128 = @cos(val.toFloat(f128, mod)) }, + 16 => .{ .f16 = @cos(val.toFloat(f16, pt)) }, + 32 => .{ .f32 = @cos(val.toFloat(f32, pt)) }, + 64 => .{ .f64 = @cos(val.toFloat(f64, pt)) }, + 80 => .{ .f80 = @cos(val.toFloat(f80, pt)) }, + 128 => .{ .f128 = @cos(val.toFloat(f128, pt)) }, else => unreachable, }; - return Value.fromInterned((try mod.intern(.{ .float = .{ + return Value.fromInterned(try pt.intern(.{ .float = .{ .ty = float_type.toIntern(), .storage = storage, - } }))); + } })); } -pub fn tan(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { +pub fn tan(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) !Value { + const mod = pt.zcu; if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); const scalar_ty = float_type.scalarType(mod); for (result_data, 0..) |*scalar, i| { - const elem_val = try val.elemValue(mod, i); - scalar.* = (try tanScalar(elem_val, scalar_ty, mod)).toIntern(); + const elem_val = try val.elemValue(pt, i); + scalar.* = (try tanScalar(elem_val, scalar_ty, pt)).toIntern(); } - return Value.fromInterned((try mod.intern(.{ .aggregate = .{ + return Value.fromInterned(try pt.intern(.{ .aggregate = .{ .ty = float_type.toIntern(), .storage = .{ .elems = result_data }, - } }))); + } })); } - return tanScalar(val, float_type, mod); + return tanScalar(val, float_type, pt); } -pub fn tanScalar(val: Value, float_type: Type, mod: *Module) Allocator.Error!Value { +pub fn tanScalar(val: Value, float_type: Type, pt: Zcu.PerThread) Allocator.Error!Value { + const mod = pt.zcu; const target = mod.getTarget(); const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { - 16 => .{ .f16 = @tan(val.toFloat(f16, mod)) }, - 32 => .{ .f32 = @tan(val.toFloat(f32, mod)) }, - 64 => .{ .f64 = @tan(val.toFloat(f64, mod)) }, - 80 => .{ .f80 = @tan(val.toFloat(f80, mod)) }, - 128 => .{ .f128 = @tan(val.toFloat(f128, mod)) }, + 16 => .{ .f16 = @tan(val.toFloat(f16, pt)) }, + 32 => .{ .f32 = @tan(val.toFloat(f32, pt)) }, + 64 => .{ .f64 = @tan(val.toFloat(f64, pt)) }, + 80 => .{ .f80 = @tan(val.toFloat(f80, pt)) }, + 128 => .{ .f128 = @tan(val.toFloat(f128, pt)) }, else => unreachable, }; - return Value.fromInterned((try mod.intern(.{ .float = .{ + return Value.fromInterned(try pt.intern(.{ .float = .{ .ty = float_type.toIntern(), .storage = storage, - } }))); + } })); } -pub fn exp(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { +pub fn exp(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) !Value { + const mod = pt.zcu; if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); const scalar_ty = float_type.scalarType(mod); for (result_data, 0..) |*scalar, i| { - const elem_val = try val.elemValue(mod, i); - scalar.* = (try expScalar(elem_val, scalar_ty, mod)).toIntern(); + const elem_val = try val.elemValue(pt, i); + scalar.* = (try expScalar(elem_val, scalar_ty, pt)).toIntern(); } - return Value.fromInterned((try mod.intern(.{ .aggregate = .{ + return Value.fromInterned(try pt.intern(.{ .aggregate = .{ .ty = float_type.toIntern(), .storage = .{ .elems = result_data }, - } }))); + } })); } - return expScalar(val, float_type, mod); + return expScalar(val, float_type, pt); } -pub fn expScalar(val: Value, float_type: Type, mod: *Module) Allocator.Error!Value { +pub fn expScalar(val: Value, float_type: Type, pt: Zcu.PerThread) Allocator.Error!Value { + const mod = pt.zcu; const target = mod.getTarget(); const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { - 16 => .{ .f16 = @exp(val.toFloat(f16, mod)) }, - 32 => .{ .f32 = @exp(val.toFloat(f32, mod)) }, - 64 => .{ .f64 = @exp(val.toFloat(f64, mod)) }, - 80 => .{ .f80 = @exp(val.toFloat(f80, mod)) }, - 128 => .{ .f128 = @exp(val.toFloat(f128, mod)) }, + 16 => .{ .f16 = @exp(val.toFloat(f16, pt)) }, + 32 => .{ .f32 = @exp(val.toFloat(f32, pt)) }, + 64 => .{ .f64 = @exp(val.toFloat(f64, pt)) }, + 80 => .{ .f80 = @exp(val.toFloat(f80, pt)) }, + 128 => .{ .f128 = @exp(val.toFloat(f128, pt)) }, else => unreachable, }; - return Value.fromInterned((try mod.intern(.{ .float = .{ + return Value.fromInterned(try pt.intern(.{ .float = .{ .ty = float_type.toIntern(), .storage = storage, - } }))); + } })); } -pub fn exp2(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { +pub fn exp2(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) !Value { + const mod = pt.zcu; if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); const scalar_ty = float_type.scalarType(mod); for (result_data, 0..) |*scalar, i| { - const elem_val = try val.elemValue(mod, i); - scalar.* = (try exp2Scalar(elem_val, scalar_ty, mod)).toIntern(); + const elem_val = try val.elemValue(pt, i); + scalar.* = (try exp2Scalar(elem_val, scalar_ty, pt)).toIntern(); } - return Value.fromInterned((try mod.intern(.{ .aggregate = .{ + return Value.fromInterned(try pt.intern(.{ .aggregate = .{ .ty = float_type.toIntern(), .storage = .{ .elems = result_data }, - } }))); + } })); } - return exp2Scalar(val, float_type, mod); + return exp2Scalar(val, float_type, pt); } -pub fn exp2Scalar(val: Value, float_type: Type, mod: *Module) Allocator.Error!Value { +pub fn exp2Scalar(val: Value, float_type: Type, pt: Zcu.PerThread) Allocator.Error!Value { + const mod = pt.zcu; const target = mod.getTarget(); const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { - 16 => .{ .f16 = @exp2(val.toFloat(f16, mod)) }, - 32 => .{ .f32 = @exp2(val.toFloat(f32, mod)) }, - 64 => .{ .f64 = @exp2(val.toFloat(f64, mod)) }, - 80 => .{ .f80 = @exp2(val.toFloat(f80, mod)) }, - 128 => .{ .f128 = @exp2(val.toFloat(f128, mod)) }, + 16 => .{ .f16 = @exp2(val.toFloat(f16, pt)) }, + 32 => .{ .f32 = @exp2(val.toFloat(f32, pt)) }, + 64 => .{ .f64 = @exp2(val.toFloat(f64, pt)) }, + 80 => .{ .f80 = @exp2(val.toFloat(f80, pt)) }, + 128 => .{ .f128 = @exp2(val.toFloat(f128, pt)) }, else => unreachable, }; - return Value.fromInterned((try mod.intern(.{ .float = .{ + return Value.fromInterned(try pt.intern(.{ .float = .{ .ty = float_type.toIntern(), .storage = storage, - } }))); + } })); } -pub fn log(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { +pub fn log(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) !Value { + const mod = pt.zcu; if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); const scalar_ty = float_type.scalarType(mod); for (result_data, 0..) |*scalar, i| { - const elem_val = try val.elemValue(mod, i); - scalar.* = (try logScalar(elem_val, scalar_ty, mod)).toIntern(); + const elem_val = try val.elemValue(pt, i); + scalar.* = (try logScalar(elem_val, scalar_ty, pt)).toIntern(); } - return Value.fromInterned((try mod.intern(.{ .aggregate = .{ + return Value.fromInterned(try pt.intern(.{ .aggregate = .{ .ty = float_type.toIntern(), .storage = .{ .elems = result_data }, - } }))); + } })); } - return logScalar(val, float_type, mod); + return logScalar(val, float_type, pt); } -pub fn logScalar(val: Value, float_type: Type, mod: *Module) Allocator.Error!Value { +pub fn logScalar(val: Value, float_type: Type, pt: Zcu.PerThread) Allocator.Error!Value { + const mod = pt.zcu; const target = mod.getTarget(); const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { - 16 => .{ .f16 = @log(val.toFloat(f16, mod)) }, - 32 => .{ .f32 = @log(val.toFloat(f32, mod)) }, - 64 => .{ .f64 = @log(val.toFloat(f64, mod)) }, - 80 => .{ .f80 = @log(val.toFloat(f80, mod)) }, - 128 => .{ .f128 = @log(val.toFloat(f128, mod)) }, + 16 => .{ .f16 = @log(val.toFloat(f16, pt)) }, + 32 => .{ .f32 = @log(val.toFloat(f32, pt)) }, + 64 => .{ .f64 = @log(val.toFloat(f64, pt)) }, + 80 => .{ .f80 = @log(val.toFloat(f80, pt)) }, + 128 => .{ .f128 = @log(val.toFloat(f128, pt)) }, else => unreachable, }; - return Value.fromInterned((try mod.intern(.{ .float = .{ + return Value.fromInterned(try pt.intern(.{ .float = .{ .ty = float_type.toIntern(), .storage = storage, - } }))); + } })); } -pub fn log2(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { +pub fn log2(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) !Value { + const mod = pt.zcu; if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); const scalar_ty = float_type.scalarType(mod); for (result_data, 0..) |*scalar, i| { - const elem_val = try val.elemValue(mod, i); - scalar.* = (try log2Scalar(elem_val, scalar_ty, mod)).toIntern(); + const elem_val = try val.elemValue(pt, i); + scalar.* = (try log2Scalar(elem_val, scalar_ty, pt)).toIntern(); } - return Value.fromInterned((try mod.intern(.{ .aggregate = .{ + return Value.fromInterned(try pt.intern(.{ .aggregate = .{ .ty = float_type.toIntern(), .storage = .{ .elems = result_data }, - } }))); + } })); } - return log2Scalar(val, float_type, mod); + return log2Scalar(val, float_type, pt); } -pub fn log2Scalar(val: Value, float_type: Type, mod: *Module) Allocator.Error!Value { +pub fn log2Scalar(val: Value, float_type: Type, pt: Zcu.PerThread) Allocator.Error!Value { + const mod = pt.zcu; const target = mod.getTarget(); const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { - 16 => .{ .f16 = @log2(val.toFloat(f16, mod)) }, - 32 => .{ .f32 = @log2(val.toFloat(f32, mod)) }, - 64 => .{ .f64 = @log2(val.toFloat(f64, mod)) }, - 80 => .{ .f80 = @log2(val.toFloat(f80, mod)) }, - 128 => .{ .f128 = @log2(val.toFloat(f128, mod)) }, + 16 => .{ .f16 = @log2(val.toFloat(f16, pt)) }, + 32 => .{ .f32 = @log2(val.toFloat(f32, pt)) }, + 64 => .{ .f64 = @log2(val.toFloat(f64, pt)) }, + 80 => .{ .f80 = @log2(val.toFloat(f80, pt)) }, + 128 => .{ .f128 = @log2(val.toFloat(f128, pt)) }, else => unreachable, }; - return Value.fromInterned((try mod.intern(.{ .float = .{ + return Value.fromInterned(try pt.intern(.{ .float = .{ .ty = float_type.toIntern(), .storage = storage, - } }))); + } })); } -pub fn log10(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { +pub fn log10(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) !Value { + const mod = pt.zcu; if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); const scalar_ty = float_type.scalarType(mod); for (result_data, 0..) |*scalar, i| { - const elem_val = try val.elemValue(mod, i); - scalar.* = (try log10Scalar(elem_val, scalar_ty, mod)).toIntern(); + const elem_val = try val.elemValue(pt, i); + scalar.* = (try log10Scalar(elem_val, scalar_ty, pt)).toIntern(); } - return Value.fromInterned((try mod.intern(.{ .aggregate = .{ + return Value.fromInterned(try pt.intern(.{ .aggregate = .{ .ty = float_type.toIntern(), .storage = .{ .elems = result_data }, - } }))); + } })); } - return log10Scalar(val, float_type, mod); + return log10Scalar(val, float_type, pt); } -pub fn log10Scalar(val: Value, float_type: Type, mod: *Module) Allocator.Error!Value { +pub fn log10Scalar(val: Value, float_type: Type, pt: Zcu.PerThread) Allocator.Error!Value { + const mod = pt.zcu; const target = mod.getTarget(); const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { - 16 => .{ .f16 = @log10(val.toFloat(f16, mod)) }, - 32 => .{ .f32 = @log10(val.toFloat(f32, mod)) }, - 64 => .{ .f64 = @log10(val.toFloat(f64, mod)) }, - 80 => .{ .f80 = @log10(val.toFloat(f80, mod)) }, - 128 => .{ .f128 = @log10(val.toFloat(f128, mod)) }, + 16 => .{ .f16 = @log10(val.toFloat(f16, pt)) }, + 32 => .{ .f32 = @log10(val.toFloat(f32, pt)) }, + 64 => .{ .f64 = @log10(val.toFloat(f64, pt)) }, + 80 => .{ .f80 = @log10(val.toFloat(f80, pt)) }, + 128 => .{ .f128 = @log10(val.toFloat(f128, pt)) }, else => unreachable, }; - return Value.fromInterned((try mod.intern(.{ .float = .{ + return Value.fromInterned(try pt.intern(.{ .float = .{ .ty = float_type.toIntern(), .storage = storage, - } }))); + } })); } -pub fn abs(val: Value, ty: Type, arena: Allocator, mod: *Module) !Value { +pub fn abs(val: Value, ty: Type, arena: Allocator, pt: Zcu.PerThread) !Value { + const mod = pt.zcu; if (ty.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(InternPool.Index, ty.vectorLen(mod)); const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { - const elem_val = try val.elemValue(mod, i); - scalar.* = (try absScalar(elem_val, scalar_ty, mod, arena)).toIntern(); + const elem_val = try val.elemValue(pt, i); + scalar.* = (try absScalar(elem_val, scalar_ty, pt, arena)).toIntern(); } - return Value.fromInterned((try mod.intern(.{ .aggregate = .{ + return Value.fromInterned(try pt.intern(.{ .aggregate = .{ .ty = ty.toIntern(), .storage = .{ .elems = result_data }, - } }))); + } })); } - return absScalar(val, ty, mod, arena); + return absScalar(val, ty, pt, arena); } -pub fn absScalar(val: Value, ty: Type, mod: *Module, arena: Allocator) Allocator.Error!Value { +pub fn absScalar(val: Value, ty: Type, pt: Zcu.PerThread, arena: Allocator) Allocator.Error!Value { + const mod = pt.zcu; switch (ty.zigTypeTag(mod)) { .Int => { var buffer: Value.BigIntSpace = undefined; - var operand_bigint = try val.toBigInt(&buffer, mod).toManaged(arena); + var operand_bigint = try val.toBigInt(&buffer, pt).toManaged(arena); operand_bigint.abs(); - return mod.intValue_big(try ty.toUnsigned(mod), operand_bigint.toConst()); + return pt.intValue_big(try ty.toUnsigned(pt), operand_bigint.toConst()); }, .ComptimeInt => { var buffer: Value.BigIntSpace = undefined; - var operand_bigint = try val.toBigInt(&buffer, mod).toManaged(arena); + var operand_bigint = try val.toBigInt(&buffer, pt).toManaged(arena); operand_bigint.abs(); - return mod.intValue_big(ty, operand_bigint.toConst()); + return pt.intValue_big(ty, operand_bigint.toConst()); }, .ComptimeFloat, .Float => { const target = mod.getTarget(); const storage: InternPool.Key.Float.Storage = switch (ty.floatBits(target)) { - 16 => .{ .f16 = @abs(val.toFloat(f16, mod)) }, - 32 => .{ .f32 = @abs(val.toFloat(f32, mod)) }, - 64 => .{ .f64 = @abs(val.toFloat(f64, mod)) }, - 80 => .{ .f80 = @abs(val.toFloat(f80, mod)) }, - 128 => .{ .f128 = @abs(val.toFloat(f128, mod)) }, + 16 => .{ .f16 = @abs(val.toFloat(f16, pt)) }, + 32 => .{ .f32 = @abs(val.toFloat(f32, pt)) }, + 64 => .{ .f64 = @abs(val.toFloat(f64, pt)) }, + 80 => .{ .f80 = @abs(val.toFloat(f80, pt)) }, + 128 => .{ .f128 = @abs(val.toFloat(f128, pt)) }, else => unreachable, }; - return Value.fromInterned((try mod.intern(.{ .float = .{ + return Value.fromInterned(try pt.intern(.{ .float = .{ .ty = ty.toIntern(), .storage = storage, - } }))); + } })); }, else => unreachable, } } -pub fn floor(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { +pub fn floor(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) !Value { + const mod = pt.zcu; if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); const scalar_ty = float_type.scalarType(mod); for (result_data, 0..) |*scalar, i| { - const elem_val = try val.elemValue(mod, i); - scalar.* = (try floorScalar(elem_val, scalar_ty, mod)).toIntern(); + const elem_val = try val.elemValue(pt, i); + scalar.* = (try floorScalar(elem_val, scalar_ty, pt)).toIntern(); } - return Value.fromInterned((try mod.intern(.{ .aggregate = .{ + return Value.fromInterned(try pt.intern(.{ .aggregate = .{ .ty = float_type.toIntern(), .storage = .{ .elems = result_data }, - } }))); + } })); } - return floorScalar(val, float_type, mod); + return floorScalar(val, float_type, pt); } -pub fn floorScalar(val: Value, float_type: Type, mod: *Module) Allocator.Error!Value { +pub fn floorScalar(val: Value, float_type: Type, pt: Zcu.PerThread) Allocator.Error!Value { + const mod = pt.zcu; const target = mod.getTarget(); const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { - 16 => .{ .f16 = @floor(val.toFloat(f16, mod)) }, - 32 => .{ .f32 = @floor(val.toFloat(f32, mod)) }, - 64 => .{ .f64 = @floor(val.toFloat(f64, mod)) }, - 80 => .{ .f80 = @floor(val.toFloat(f80, mod)) }, - 128 => .{ .f128 = @floor(val.toFloat(f128, mod)) }, + 16 => .{ .f16 = @floor(val.toFloat(f16, pt)) }, + 32 => .{ .f32 = @floor(val.toFloat(f32, pt)) }, + 64 => .{ .f64 = @floor(val.toFloat(f64, pt)) }, + 80 => .{ .f80 = @floor(val.toFloat(f80, pt)) }, + 128 => .{ .f128 = @floor(val.toFloat(f128, pt)) }, else => unreachable, }; - return Value.fromInterned((try mod.intern(.{ .float = .{ + return Value.fromInterned(try pt.intern(.{ .float = .{ .ty = float_type.toIntern(), .storage = storage, - } }))); + } })); } -pub fn ceil(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { +pub fn ceil(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) !Value { + const mod = pt.zcu; if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); const scalar_ty = float_type.scalarType(mod); for (result_data, 0..) |*scalar, i| { - const elem_val = try val.elemValue(mod, i); - scalar.* = (try ceilScalar(elem_val, scalar_ty, mod)).toIntern(); + const elem_val = try val.elemValue(pt, i); + scalar.* = (try ceilScalar(elem_val, scalar_ty, pt)).toIntern(); } - return Value.fromInterned((try mod.intern(.{ .aggregate = .{ + return Value.fromInterned(try pt.intern(.{ .aggregate = .{ .ty = float_type.toIntern(), .storage = .{ .elems = result_data }, - } }))); + } })); } - return ceilScalar(val, float_type, mod); + return ceilScalar(val, float_type, pt); } -pub fn ceilScalar(val: Value, float_type: Type, mod: *Module) Allocator.Error!Value { +pub fn ceilScalar(val: Value, float_type: Type, pt: Zcu.PerThread) Allocator.Error!Value { + const mod = pt.zcu; const target = mod.getTarget(); const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { - 16 => .{ .f16 = @ceil(val.toFloat(f16, mod)) }, - 32 => .{ .f32 = @ceil(val.toFloat(f32, mod)) }, - 64 => .{ .f64 = @ceil(val.toFloat(f64, mod)) }, - 80 => .{ .f80 = @ceil(val.toFloat(f80, mod)) }, - 128 => .{ .f128 = @ceil(val.toFloat(f128, mod)) }, + 16 => .{ .f16 = @ceil(val.toFloat(f16, pt)) }, + 32 => .{ .f32 = @ceil(val.toFloat(f32, pt)) }, + 64 => .{ .f64 = @ceil(val.toFloat(f64, pt)) }, + 80 => .{ .f80 = @ceil(val.toFloat(f80, pt)) }, + 128 => .{ .f128 = @ceil(val.toFloat(f128, pt)) }, else => unreachable, }; - return Value.fromInterned((try mod.intern(.{ .float = .{ + return Value.fromInterned(try pt.intern(.{ .float = .{ .ty = float_type.toIntern(), .storage = storage, - } }))); + } })); } -pub fn round(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { +pub fn round(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) !Value { + const mod = pt.zcu; if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); const scalar_ty = float_type.scalarType(mod); for (result_data, 0..) |*scalar, i| { - const elem_val = try val.elemValue(mod, i); - scalar.* = (try roundScalar(elem_val, scalar_ty, mod)).toIntern(); + const elem_val = try val.elemValue(pt, i); + scalar.* = (try roundScalar(elem_val, scalar_ty, pt)).toIntern(); } - return Value.fromInterned((try mod.intern(.{ .aggregate = .{ + return Value.fromInterned(try pt.intern(.{ .aggregate = .{ .ty = float_type.toIntern(), .storage = .{ .elems = result_data }, - } }))); + } })); } - return roundScalar(val, float_type, mod); + return roundScalar(val, float_type, pt); } -pub fn roundScalar(val: Value, float_type: Type, mod: *Module) Allocator.Error!Value { +pub fn roundScalar(val: Value, float_type: Type, pt: Zcu.PerThread) Allocator.Error!Value { + const mod = pt.zcu; const target = mod.getTarget(); const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { - 16 => .{ .f16 = @round(val.toFloat(f16, mod)) }, - 32 => .{ .f32 = @round(val.toFloat(f32, mod)) }, - 64 => .{ .f64 = @round(val.toFloat(f64, mod)) }, - 80 => .{ .f80 = @round(val.toFloat(f80, mod)) }, - 128 => .{ .f128 = @round(val.toFloat(f128, mod)) }, + 16 => .{ .f16 = @round(val.toFloat(f16, pt)) }, + 32 => .{ .f32 = @round(val.toFloat(f32, pt)) }, + 64 => .{ .f64 = @round(val.toFloat(f64, pt)) }, + 80 => .{ .f80 = @round(val.toFloat(f80, pt)) }, + 128 => .{ .f128 = @round(val.toFloat(f128, pt)) }, else => unreachable, }; - return Value.fromInterned((try mod.intern(.{ .float = .{ + return Value.fromInterned(try pt.intern(.{ .float = .{ .ty = float_type.toIntern(), .storage = storage, - } }))); + } })); } -pub fn trunc(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { +pub fn trunc(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) !Value { + const mod = pt.zcu; if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); const scalar_ty = float_type.scalarType(mod); for (result_data, 0..) |*scalar, i| { - const elem_val = try val.elemValue(mod, i); - scalar.* = (try truncScalar(elem_val, scalar_ty, mod)).toIntern(); + const elem_val = try val.elemValue(pt, i); + scalar.* = (try truncScalar(elem_val, scalar_ty, pt)).toIntern(); } - return Value.fromInterned((try mod.intern(.{ .aggregate = .{ + return Value.fromInterned(try pt.intern(.{ .aggregate = .{ .ty = float_type.toIntern(), .storage = .{ .elems = result_data }, - } }))); + } })); } - return truncScalar(val, float_type, mod); + return truncScalar(val, float_type, pt); } -pub fn truncScalar(val: Value, float_type: Type, mod: *Module) Allocator.Error!Value { +pub fn truncScalar(val: Value, float_type: Type, pt: Zcu.PerThread) Allocator.Error!Value { + const mod = pt.zcu; const target = mod.getTarget(); const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { - 16 => .{ .f16 = @trunc(val.toFloat(f16, mod)) }, - 32 => .{ .f32 = @trunc(val.toFloat(f32, mod)) }, - 64 => .{ .f64 = @trunc(val.toFloat(f64, mod)) }, - 80 => .{ .f80 = @trunc(val.toFloat(f80, mod)) }, - 128 => .{ .f128 = @trunc(val.toFloat(f128, mod)) }, + 16 => .{ .f16 = @trunc(val.toFloat(f16, pt)) }, + 32 => .{ .f32 = @trunc(val.toFloat(f32, pt)) }, + 64 => .{ .f64 = @trunc(val.toFloat(f64, pt)) }, + 80 => .{ .f80 = @trunc(val.toFloat(f80, pt)) }, + 128 => .{ .f128 = @trunc(val.toFloat(f128, pt)) }, else => unreachable, }; - return Value.fromInterned((try mod.intern(.{ .float = .{ + return Value.fromInterned(try pt.intern(.{ .float = .{ .ty = float_type.toIntern(), .storage = storage, - } }))); + } })); } pub fn mulAdd( @@ -3488,23 +3560,24 @@ pub fn mulAdd( mulend2: Value, addend: Value, arena: Allocator, - mod: *Module, + pt: Zcu.PerThread, ) !Value { + const mod = pt.zcu; if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); const scalar_ty = float_type.scalarType(mod); for (result_data, 0..) |*scalar, i| { - const mulend1_elem = try mulend1.elemValue(mod, i); - const mulend2_elem = try mulend2.elemValue(mod, i); - const addend_elem = try addend.elemValue(mod, i); - scalar.* = (try mulAddScalar(scalar_ty, mulend1_elem, mulend2_elem, addend_elem, mod)).toIntern(); + const mulend1_elem = try mulend1.elemValue(pt, i); + const mulend2_elem = try mulend2.elemValue(pt, i); + const addend_elem = try addend.elemValue(pt, i); + scalar.* = (try mulAddScalar(scalar_ty, mulend1_elem, mulend2_elem, addend_elem, pt)).toIntern(); } - return Value.fromInterned((try mod.intern(.{ .aggregate = .{ + return Value.fromInterned(try pt.intern(.{ .aggregate = .{ .ty = float_type.toIntern(), .storage = .{ .elems = result_data }, - } }))); + } })); } - return mulAddScalar(float_type, mulend1, mulend2, addend, mod); + return mulAddScalar(float_type, mulend1, mulend2, addend, pt); } pub fn mulAddScalar( @@ -3512,32 +3585,33 @@ pub fn mulAddScalar( mulend1: Value, mulend2: Value, addend: Value, - mod: *Module, + pt: Zcu.PerThread, ) Allocator.Error!Value { + const mod = pt.zcu; const target = mod.getTarget(); const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { - 16 => .{ .f16 = @mulAdd(f16, mulend1.toFloat(f16, mod), mulend2.toFloat(f16, mod), addend.toFloat(f16, mod)) }, - 32 => .{ .f32 = @mulAdd(f32, mulend1.toFloat(f32, mod), mulend2.toFloat(f32, mod), addend.toFloat(f32, mod)) }, - 64 => .{ .f64 = @mulAdd(f64, mulend1.toFloat(f64, mod), mulend2.toFloat(f64, mod), addend.toFloat(f64, mod)) }, - 80 => .{ .f80 = @mulAdd(f80, mulend1.toFloat(f80, mod), mulend2.toFloat(f80, mod), addend.toFloat(f80, mod)) }, - 128 => .{ .f128 = @mulAdd(f128, mulend1.toFloat(f128, mod), mulend2.toFloat(f128, mod), addend.toFloat(f128, mod)) }, + 16 => .{ .f16 = @mulAdd(f16, mulend1.toFloat(f16, pt), mulend2.toFloat(f16, pt), addend.toFloat(f16, pt)) }, + 32 => .{ .f32 = @mulAdd(f32, mulend1.toFloat(f32, pt), mulend2.toFloat(f32, pt), addend.toFloat(f32, pt)) }, + 64 => .{ .f64 = @mulAdd(f64, mulend1.toFloat(f64, pt), mulend2.toFloat(f64, pt), addend.toFloat(f64, pt)) }, + 80 => .{ .f80 = @mulAdd(f80, mulend1.toFloat(f80, pt), mulend2.toFloat(f80, pt), addend.toFloat(f80, pt)) }, + 128 => .{ .f128 = @mulAdd(f128, mulend1.toFloat(f128, pt), mulend2.toFloat(f128, pt), addend.toFloat(f128, pt)) }, else => unreachable, }; - return Value.fromInterned((try mod.intern(.{ .float = .{ + return Value.fromInterned(try pt.intern(.{ .float = .{ .ty = float_type.toIntern(), .storage = storage, - } }))); + } })); } /// If the value is represented in-memory as a series of bytes that all /// have the same value, return that byte value, otherwise null. -pub fn hasRepeatedByteRepr(val: Value, ty: Type, mod: *Module) !?u8 { - const abi_size = std.math.cast(usize, ty.abiSize(mod)) orelse return null; +pub fn hasRepeatedByteRepr(val: Value, ty: Type, pt: Zcu.PerThread) !?u8 { + const abi_size = std.math.cast(usize, ty.abiSize(pt)) orelse return null; assert(abi_size >= 1); - const byte_buffer = try mod.gpa.alloc(u8, abi_size); - defer mod.gpa.free(byte_buffer); + const byte_buffer = try pt.zcu.gpa.alloc(u8, abi_size); + defer pt.zcu.gpa.free(byte_buffer); - writeToMemory(val, ty, mod, byte_buffer) catch |err| switch (err) { + writeToMemory(val, ty, pt, byte_buffer) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.ReinterpretDeclRef => return null, // TODO: The writeToMemory function was originally created for the purpose @@ -3567,13 +3641,13 @@ pub fn typeOf(val: Value, zcu: *const Zcu) Type { /// If `val` is not undef, the bounds are both `val`. /// If `val` is undef and has a fixed-width type, the bounds are the bounds of the type. /// If `val` is undef and is a `comptime_int`, returns null. -pub fn intValueBounds(val: Value, mod: *Module) !?[2]Value { - if (!val.isUndef(mod)) return .{ val, val }; - const ty = mod.intern_pool.typeOf(val.toIntern()); +pub fn intValueBounds(val: Value, pt: Zcu.PerThread) !?[2]Value { + if (!val.isUndef(pt.zcu)) return .{ val, val }; + const ty = pt.zcu.intern_pool.typeOf(val.toIntern()); if (ty == .comptime_int_type) return null; return .{ - try Type.fromInterned(ty).minInt(mod, Type.fromInterned(ty)), - try Type.fromInterned(ty).maxInt(mod, Type.fromInterned(ty)), + try Type.fromInterned(ty).minInt(pt, Type.fromInterned(ty)), + try Type.fromInterned(ty).maxInt(pt, Type.fromInterned(ty)), }; } @@ -3604,14 +3678,15 @@ pub const RuntimeIndex = InternPool.RuntimeIndex; /// `parent_ptr` must be a single-pointer to some optional. /// Returns a pointer to the payload of the optional. /// May perform type resolution. -pub fn ptrOptPayload(parent_ptr: Value, zcu: *Zcu) !Value { +pub fn ptrOptPayload(parent_ptr: Value, pt: Zcu.PerThread) !Value { + const zcu = pt.zcu; const parent_ptr_ty = parent_ptr.typeOf(zcu); const opt_ty = parent_ptr_ty.childType(zcu); assert(parent_ptr_ty.ptrSize(zcu) == .One); assert(opt_ty.zigTypeTag(zcu) == .Optional); - const result_ty = try zcu.ptrTypeSema(info: { + const result_ty = try pt.ptrTypeSema(info: { var new = parent_ptr_ty.ptrInfo(zcu); // We can correctly preserve alignment `.none`, since an optional has the same // natural alignment as its child type. @@ -3619,15 +3694,15 @@ pub fn ptrOptPayload(parent_ptr: Value, zcu: *Zcu) !Value { break :info new; }); - if (parent_ptr.isUndef(zcu)) return zcu.undefValue(result_ty); + if (parent_ptr.isUndef(zcu)) return pt.undefValue(result_ty); if (opt_ty.isPtrLikeOptional(zcu)) { // Just reinterpret the pointer, since the layout is well-defined - return zcu.getCoerced(parent_ptr, result_ty); + return pt.getCoerced(parent_ptr, result_ty); } - const base_ptr = try parent_ptr.canonicalizeBasePtr(.One, opt_ty, zcu); - return Value.fromInterned(try zcu.intern(.{ .ptr = .{ + const base_ptr = try parent_ptr.canonicalizeBasePtr(.One, opt_ty, pt); + return Value.fromInterned(try pt.intern(.{ .ptr = .{ .ty = result_ty.toIntern(), .base_addr = .{ .opt_payload = base_ptr.toIntern() }, .byte_offset = 0, @@ -3637,14 +3712,15 @@ pub fn ptrOptPayload(parent_ptr: Value, zcu: *Zcu) !Value { /// `parent_ptr` must be a single-pointer to some error union. /// Returns a pointer to the payload of the error union. /// May perform type resolution. -pub fn ptrEuPayload(parent_ptr: Value, zcu: *Zcu) !Value { +pub fn ptrEuPayload(parent_ptr: Value, pt: Zcu.PerThread) !Value { + const zcu = pt.zcu; const parent_ptr_ty = parent_ptr.typeOf(zcu); const eu_ty = parent_ptr_ty.childType(zcu); assert(parent_ptr_ty.ptrSize(zcu) == .One); assert(eu_ty.zigTypeTag(zcu) == .ErrorUnion); - const result_ty = try zcu.ptrTypeSema(info: { + const result_ty = try pt.ptrTypeSema(info: { var new = parent_ptr_ty.ptrInfo(zcu); // We can correctly preserve alignment `.none`, since an error union has a // natural alignment greater than or equal to that of its payload type. @@ -3652,10 +3728,10 @@ pub fn ptrEuPayload(parent_ptr: Value, zcu: *Zcu) !Value { break :info new; }); - if (parent_ptr.isUndef(zcu)) return zcu.undefValue(result_ty); + if (parent_ptr.isUndef(zcu)) return pt.undefValue(result_ty); - const base_ptr = try parent_ptr.canonicalizeBasePtr(.One, eu_ty, zcu); - return Value.fromInterned(try zcu.intern(.{ .ptr = .{ + const base_ptr = try parent_ptr.canonicalizeBasePtr(.One, eu_ty, pt); + return Value.fromInterned(try pt.intern(.{ .ptr = .{ .ty = result_ty.toIntern(), .base_addr = .{ .eu_payload = base_ptr.toIntern() }, .byte_offset = 0, @@ -3666,7 +3742,8 @@ pub fn ptrEuPayload(parent_ptr: Value, zcu: *Zcu) !Value { /// Returns a pointer to the aggregate field at the specified index. /// For slices, uses `slice_ptr_index` and `slice_len_index`. /// May perform type resolution. -pub fn ptrField(parent_ptr: Value, field_idx: u32, zcu: *Zcu) !Value { +pub fn ptrField(parent_ptr: Value, field_idx: u32, pt: Zcu.PerThread) !Value { + const zcu = pt.zcu; const parent_ptr_ty = parent_ptr.typeOf(zcu); const aggregate_ty = parent_ptr_ty.childType(zcu); @@ -3679,39 +3756,39 @@ pub fn ptrField(parent_ptr: Value, field_idx: u32, zcu: *Zcu) !Value { .Struct => field: { const field_ty = aggregate_ty.structFieldType(field_idx, zcu); switch (aggregate_ty.containerLayout(zcu)) { - .auto => break :field .{ field_ty, try aggregate_ty.structFieldAlignAdvanced(@intCast(field_idx), zcu, .sema) }, + .auto => break :field .{ field_ty, try aggregate_ty.structFieldAlignAdvanced(@intCast(field_idx), pt, .sema) }, .@"extern" => { // Well-defined layout, so just offset the pointer appropriately. - const byte_off = aggregate_ty.structFieldOffset(field_idx, zcu); + const byte_off = aggregate_ty.structFieldOffset(field_idx, pt); const field_align = a: { const parent_align = if (parent_ptr_info.flags.alignment == .none) pa: { - break :pa (try aggregate_ty.abiAlignmentAdvanced(zcu, .sema)).scalar; + break :pa (try aggregate_ty.abiAlignmentAdvanced(pt, .sema)).scalar; } else parent_ptr_info.flags.alignment; break :a InternPool.Alignment.fromLog2Units(@min(parent_align.toLog2Units(), @ctz(byte_off))); }; - const result_ty = try zcu.ptrTypeSema(info: { + const result_ty = try pt.ptrTypeSema(info: { var new = parent_ptr_info; new.child = field_ty.toIntern(); new.flags.alignment = field_align; break :info new; }); - return parent_ptr.getOffsetPtr(byte_off, result_ty, zcu); + return parent_ptr.getOffsetPtr(byte_off, result_ty, pt); }, - .@"packed" => switch (aggregate_ty.packedStructFieldPtrInfo(parent_ptr_ty, field_idx, zcu)) { + .@"packed" => switch (aggregate_ty.packedStructFieldPtrInfo(parent_ptr_ty, field_idx, pt)) { .bit_ptr => |packed_offset| { - const result_ty = try zcu.ptrType(info: { + const result_ty = try pt.ptrType(info: { var new = parent_ptr_info; new.packed_offset = packed_offset; new.child = field_ty.toIntern(); if (new.flags.alignment == .none) { - new.flags.alignment = (try aggregate_ty.abiAlignmentAdvanced(zcu, .sema)).scalar; + new.flags.alignment = (try aggregate_ty.abiAlignmentAdvanced(pt, .sema)).scalar; } break :info new; }); - return zcu.getCoerced(parent_ptr, result_ty); + return pt.getCoerced(parent_ptr, result_ty); }, .byte_ptr => |ptr_info| { - const result_ty = try zcu.ptrTypeSema(info: { + const result_ty = try pt.ptrTypeSema(info: { var new = parent_ptr_info; new.child = field_ty.toIntern(); new.packed_offset = .{ @@ -3721,7 +3798,7 @@ pub fn ptrField(parent_ptr: Value, field_idx: u32, zcu: *Zcu) !Value { new.flags.alignment = ptr_info.alignment; break :info new; }); - return parent_ptr.getOffsetPtr(ptr_info.offset, result_ty, zcu); + return parent_ptr.getOffsetPtr(ptr_info.offset, result_ty, pt); }, }, } @@ -3730,46 +3807,46 @@ pub fn ptrField(parent_ptr: Value, field_idx: u32, zcu: *Zcu) !Value { const union_obj = zcu.typeToUnion(aggregate_ty).?; const field_ty = Type.fromInterned(union_obj.field_types.get(&zcu.intern_pool)[field_idx]); switch (aggregate_ty.containerLayout(zcu)) { - .auto => break :field .{ field_ty, try aggregate_ty.structFieldAlignAdvanced(@intCast(field_idx), zcu, .sema) }, + .auto => break :field .{ field_ty, try aggregate_ty.structFieldAlignAdvanced(@intCast(field_idx), pt, .sema) }, .@"extern" => { // Point to the same address. - const result_ty = try zcu.ptrTypeSema(info: { + const result_ty = try pt.ptrTypeSema(info: { var new = parent_ptr_info; new.child = field_ty.toIntern(); break :info new; }); - return zcu.getCoerced(parent_ptr, result_ty); + return pt.getCoerced(parent_ptr, result_ty); }, .@"packed" => { // If the field has an ABI size matching its bit size, then we can continue to use a // non-bit pointer if the parent pointer is also a non-bit pointer. - if (parent_ptr_info.packed_offset.host_size == 0 and (try field_ty.abiSizeAdvanced(zcu, .sema)).scalar * 8 == try field_ty.bitSizeAdvanced(zcu, .sema)) { + if (parent_ptr_info.packed_offset.host_size == 0 and (try field_ty.abiSizeAdvanced(pt, .sema)).scalar * 8 == try field_ty.bitSizeAdvanced(pt, .sema)) { // We must offset the pointer on big-endian targets, since the bits of packed memory don't align nicely. const byte_offset = switch (zcu.getTarget().cpu.arch.endian()) { .little => 0, - .big => (try aggregate_ty.abiSizeAdvanced(zcu, .sema)).scalar - (try field_ty.abiSizeAdvanced(zcu, .sema)).scalar, + .big => (try aggregate_ty.abiSizeAdvanced(pt, .sema)).scalar - (try field_ty.abiSizeAdvanced(pt, .sema)).scalar, }; - const result_ty = try zcu.ptrTypeSema(info: { + const result_ty = try pt.ptrTypeSema(info: { var new = parent_ptr_info; new.child = field_ty.toIntern(); new.flags.alignment = InternPool.Alignment.fromLog2Units( - @ctz(byte_offset | (try parent_ptr_ty.ptrAlignmentAdvanced(zcu, .sema)).toByteUnits().?), + @ctz(byte_offset | (try parent_ptr_ty.ptrAlignmentAdvanced(pt, .sema)).toByteUnits().?), ); break :info new; }); - return parent_ptr.getOffsetPtr(byte_offset, result_ty, zcu); + return parent_ptr.getOffsetPtr(byte_offset, result_ty, pt); } else { // The result must be a bit-pointer if it is not already. - const result_ty = try zcu.ptrTypeSema(info: { + const result_ty = try pt.ptrTypeSema(info: { var new = parent_ptr_info; new.child = field_ty.toIntern(); if (new.packed_offset.host_size == 0) { - new.packed_offset.host_size = @intCast(((try aggregate_ty.bitSizeAdvanced(zcu, .sema)) + 7) / 8); + new.packed_offset.host_size = @intCast(((try aggregate_ty.bitSizeAdvanced(pt, .sema)) + 7) / 8); assert(new.packed_offset.bit_offset == 0); } break :info new; }); - return zcu.getCoerced(parent_ptr, result_ty); + return pt.getCoerced(parent_ptr, result_ty); } }, } @@ -3777,8 +3854,8 @@ pub fn ptrField(parent_ptr: Value, field_idx: u32, zcu: *Zcu) !Value { .Pointer => field_ty: { assert(aggregate_ty.isSlice(zcu)); break :field_ty switch (field_idx) { - Value.slice_ptr_index => .{ aggregate_ty.slicePtrFieldType(zcu), Type.usize.abiAlignment(zcu) }, - Value.slice_len_index => .{ Type.usize, Type.usize.abiAlignment(zcu) }, + Value.slice_ptr_index => .{ aggregate_ty.slicePtrFieldType(zcu), Type.usize.abiAlignment(pt) }, + Value.slice_len_index => .{ Type.usize, Type.usize.abiAlignment(pt) }, else => unreachable, }; }, @@ -3786,24 +3863,24 @@ pub fn ptrField(parent_ptr: Value, field_idx: u32, zcu: *Zcu) !Value { }; const new_align: InternPool.Alignment = if (parent_ptr_info.flags.alignment != .none) a: { - const ty_align = (try field_ty.abiAlignmentAdvanced(zcu, .sema)).scalar; + const ty_align = (try field_ty.abiAlignmentAdvanced(pt, .sema)).scalar; const true_field_align = if (field_align == .none) ty_align else field_align; const new_align = true_field_align.min(parent_ptr_info.flags.alignment); if (new_align == ty_align) break :a .none; break :a new_align; } else field_align; - const result_ty = try zcu.ptrTypeSema(info: { + const result_ty = try pt.ptrTypeSema(info: { var new = parent_ptr_info; new.child = field_ty.toIntern(); new.flags.alignment = new_align; break :info new; }); - if (parent_ptr.isUndef(zcu)) return zcu.undefValue(result_ty); + if (parent_ptr.isUndef(zcu)) return pt.undefValue(result_ty); - const base_ptr = try parent_ptr.canonicalizeBasePtr(.One, aggregate_ty, zcu); - return Value.fromInterned(try zcu.intern(.{ .ptr = .{ + const base_ptr = try parent_ptr.canonicalizeBasePtr(.One, aggregate_ty, pt); + return Value.fromInterned(try pt.intern(.{ .ptr = .{ .ty = result_ty.toIntern(), .base_addr = .{ .field = .{ .base = base_ptr.toIntern(), @@ -3816,7 +3893,8 @@ pub fn ptrField(parent_ptr: Value, field_idx: u32, zcu: *Zcu) !Value { /// `orig_parent_ptr` must be either a single-pointer to an array or vector, or a many-pointer or C-pointer or slice. /// Returns a pointer to the element at the specified index. /// May perform type resolution. -pub fn ptrElem(orig_parent_ptr: Value, field_idx: u64, zcu: *Zcu) !Value { +pub fn ptrElem(orig_parent_ptr: Value, field_idx: u64, pt: Zcu.PerThread) !Value { + const zcu = pt.zcu; const parent_ptr = switch (orig_parent_ptr.typeOf(zcu).ptrSize(zcu)) { .One, .Many, .C => orig_parent_ptr, .Slice => orig_parent_ptr.slicePtr(zcu), @@ -3824,14 +3902,14 @@ pub fn ptrElem(orig_parent_ptr: Value, field_idx: u64, zcu: *Zcu) !Value { const parent_ptr_ty = parent_ptr.typeOf(zcu); const elem_ty = parent_ptr_ty.childType(zcu); - const result_ty = try parent_ptr_ty.elemPtrType(@intCast(field_idx), zcu); + const result_ty = try parent_ptr_ty.elemPtrType(@intCast(field_idx), pt); - if (parent_ptr.isUndef(zcu)) return zcu.undefValue(result_ty); + if (parent_ptr.isUndef(zcu)) return pt.undefValue(result_ty); if (result_ty.ptrInfo(zcu).packed_offset.host_size != 0) { // Since we have a bit-pointer, the pointer address should be unchanged. assert(elem_ty.zigTypeTag(zcu) == .Vector); - return zcu.getCoerced(parent_ptr, result_ty); + return pt.getCoerced(parent_ptr, result_ty); } const PtrStrat = union(enum) { @@ -3841,31 +3919,31 @@ pub fn ptrElem(orig_parent_ptr: Value, field_idx: u64, zcu: *Zcu) !Value { const strat: PtrStrat = switch (parent_ptr_ty.ptrSize(zcu)) { .One => switch (elem_ty.zigTypeTag(zcu)) { - .Vector => .{ .offset = field_idx * @divExact(try elem_ty.childType(zcu).bitSizeAdvanced(zcu, .sema), 8) }, + .Vector => .{ .offset = field_idx * @divExact(try elem_ty.childType(zcu).bitSizeAdvanced(pt, .sema), 8) }, .Array => strat: { const arr_elem_ty = elem_ty.childType(zcu); - if (try arr_elem_ty.comptimeOnlyAdvanced(zcu, .sema)) { + if (try arr_elem_ty.comptimeOnlyAdvanced(pt, .sema)) { break :strat .{ .elem_ptr = arr_elem_ty }; } - break :strat .{ .offset = field_idx * (try arr_elem_ty.abiSizeAdvanced(zcu, .sema)).scalar }; + break :strat .{ .offset = field_idx * (try arr_elem_ty.abiSizeAdvanced(pt, .sema)).scalar }; }, else => unreachable, }, - .Many, .C => if (try elem_ty.comptimeOnlyAdvanced(zcu, .sema)) + .Many, .C => if (try elem_ty.comptimeOnlyAdvanced(pt, .sema)) .{ .elem_ptr = elem_ty } else - .{ .offset = field_idx * (try elem_ty.abiSizeAdvanced(zcu, .sema)).scalar }, + .{ .offset = field_idx * (try elem_ty.abiSizeAdvanced(pt, .sema)).scalar }, .Slice => unreachable, }; switch (strat) { .offset => |byte_offset| { - return parent_ptr.getOffsetPtr(byte_offset, result_ty, zcu); + return parent_ptr.getOffsetPtr(byte_offset, result_ty, pt); }, .elem_ptr => |manyptr_elem_ty| if (field_idx == 0) { - return zcu.getCoerced(parent_ptr, result_ty); + return pt.getCoerced(parent_ptr, result_ty); } else { const arr_base_ty, const arr_base_len = manyptr_elem_ty.arrayBase(zcu); const base_idx = arr_base_len * field_idx; @@ -3875,7 +3953,7 @@ pub fn ptrElem(orig_parent_ptr: Value, field_idx: u64, zcu: *Zcu) !Value { if (Value.fromInterned(arr_elem.base).typeOf(zcu).childType(zcu).toIntern() == arr_base_ty.toIntern()) { // We already have a pointer to an element of an array of this type. // Just modify the index. - return Value.fromInterned(try zcu.intern(.{ .ptr = ptr: { + return Value.fromInterned(try pt.intern(.{ .ptr = ptr: { var new = parent_info; new.base_addr.arr_elem.index += base_idx; new.ty = result_ty.toIntern(); @@ -3885,8 +3963,8 @@ pub fn ptrElem(orig_parent_ptr: Value, field_idx: u64, zcu: *Zcu) !Value { }, else => {}, } - const base_ptr = try parent_ptr.canonicalizeBasePtr(.Many, arr_base_ty, zcu); - return Value.fromInterned(try zcu.intern(.{ .ptr = .{ + const base_ptr = try parent_ptr.canonicalizeBasePtr(.Many, arr_base_ty, pt); + return Value.fromInterned(try pt.intern(.{ .ptr = .{ .ty = result_ty.toIntern(), .base_addr = .{ .arr_elem = .{ .base = base_ptr.toIntern(), @@ -3898,9 +3976,9 @@ pub fn ptrElem(orig_parent_ptr: Value, field_idx: u64, zcu: *Zcu) !Value { } } -fn canonicalizeBasePtr(base_ptr: Value, want_size: std.builtin.Type.Pointer.Size, want_child: Type, zcu: *Zcu) !Value { - const ptr_ty = base_ptr.typeOf(zcu); - const ptr_info = ptr_ty.ptrInfo(zcu); +fn canonicalizeBasePtr(base_ptr: Value, want_size: std.builtin.Type.Pointer.Size, want_child: Type, pt: Zcu.PerThread) !Value { + const ptr_ty = base_ptr.typeOf(pt.zcu); + const ptr_info = ptr_ty.ptrInfo(pt.zcu); if (ptr_info.flags.size == want_size and ptr_info.child == want_child.toIntern() and @@ -3914,7 +3992,7 @@ fn canonicalizeBasePtr(base_ptr: Value, want_size: std.builtin.Type.Pointer.Size return base_ptr; } - const new_ty = try zcu.ptrType(.{ + const new_ty = try pt.ptrType(.{ .child = want_child.toIntern(), .sentinel = .none, .flags = .{ @@ -3926,15 +4004,15 @@ fn canonicalizeBasePtr(base_ptr: Value, want_size: std.builtin.Type.Pointer.Size .address_space = ptr_info.flags.address_space, }, }); - return zcu.getCoerced(base_ptr, new_ty); + return pt.getCoerced(base_ptr, new_ty); } -pub fn getOffsetPtr(ptr_val: Value, byte_off: u64, new_ty: Type, zcu: *Zcu) !Value { - if (ptr_val.isUndef(zcu)) return ptr_val; - var ptr = zcu.intern_pool.indexToKey(ptr_val.toIntern()).ptr; +pub fn getOffsetPtr(ptr_val: Value, byte_off: u64, new_ty: Type, pt: Zcu.PerThread) !Value { + if (ptr_val.isUndef(pt.zcu)) return ptr_val; + var ptr = pt.zcu.intern_pool.indexToKey(ptr_val.toIntern()).ptr; ptr.ty = new_ty.toIntern(); ptr.byte_offset += byte_off; - return Value.fromInterned(try zcu.intern(.{ .ptr = ptr })); + return Value.fromInterned(try pt.intern(.{ .ptr = ptr })); } pub const PointerDeriveStep = union(enum) { @@ -3977,21 +4055,21 @@ pub const PointerDeriveStep = union(enum) { new_ptr_ty: Type, }, - pub fn ptrType(step: PointerDeriveStep, zcu: *Zcu) !Type { + pub fn ptrType(step: PointerDeriveStep, pt: Zcu.PerThread) !Type { return switch (step) { .int => |int| int.ptr_ty, - .decl_ptr => |decl| try zcu.declPtr(decl).declPtrType(zcu), + .decl_ptr => |decl| try pt.zcu.declPtr(decl).declPtrType(pt), .anon_decl_ptr => |ad| Type.fromInterned(ad.orig_ty), .comptime_alloc_ptr => |info| info.ptr_ty, - .comptime_field_ptr => |val| try zcu.singleConstPtrType(val.typeOf(zcu)), + .comptime_field_ptr => |val| try pt.singleConstPtrType(val.typeOf(pt.zcu)), .offset_and_cast => |oac| oac.new_ptr_ty, inline .eu_payload_ptr, .opt_payload_ptr, .field_ptr, .elem_ptr => |x| x.result_ptr_ty, }; } }; -pub fn pointerDerivation(ptr_val: Value, arena: Allocator, zcu: *Zcu) Allocator.Error!PointerDeriveStep { - return ptr_val.pointerDerivationAdvanced(arena, zcu, null) catch |err| switch (err) { +pub fn pointerDerivation(ptr_val: Value, arena: Allocator, pt: Zcu.PerThread) Allocator.Error!PointerDeriveStep { + return ptr_val.pointerDerivationAdvanced(arena, pt, null) catch |err| switch (err) { error.OutOfMemory => |e| return e, error.AnalysisFail => unreachable, }; @@ -4001,7 +4079,8 @@ pub fn pointerDerivation(ptr_val: Value, arena: Allocator, zcu: *Zcu) Allocator. /// only field and element pointers with no casts. This can be used by codegen backends /// which prefer field/elem accesses when lowering constant pointer values. /// It is also used by the Value printing logic for pointers. -pub fn pointerDerivationAdvanced(ptr_val: Value, arena: Allocator, zcu: *Zcu, opt_sema: ?*Sema) !PointerDeriveStep { +pub fn pointerDerivationAdvanced(ptr_val: Value, arena: Allocator, pt: Zcu.PerThread, opt_sema: ?*Sema) !PointerDeriveStep { + const zcu = pt.zcu; const ptr = zcu.intern_pool.indexToKey(ptr_val.toIntern()).ptr; const base_derive: PointerDeriveStep = switch (ptr.base_addr) { .int => return .{ .int = .{ @@ -4012,7 +4091,7 @@ pub fn pointerDerivationAdvanced(ptr_val: Value, arena: Allocator, zcu: *Zcu, op .anon_decl => |ad| base: { // A slight tweak: `orig_ty` here is sometimes not `const`, but it ought to be. // TODO: fix this in the sites interning anon decls! - const const_ty = try zcu.ptrType(info: { + const const_ty = try pt.ptrType(info: { var info = Type.fromInterned(ad.orig_ty).ptrInfo(zcu); info.flags.is_const = true; break :info info; @@ -4024,11 +4103,11 @@ pub fn pointerDerivationAdvanced(ptr_val: Value, arena: Allocator, zcu: *Zcu, op }, .comptime_alloc => |idx| base: { const alloc = opt_sema.?.getComptimeAlloc(idx); - const val = try alloc.val.intern(zcu, opt_sema.?.arena); + const val = try alloc.val.intern(pt, opt_sema.?.arena); const ty = val.typeOf(zcu); break :base .{ .comptime_alloc_ptr = .{ .val = val, - .ptr_ty = try zcu.ptrType(.{ + .ptr_ty = try pt.ptrType(.{ .child = ty.toIntern(), .flags = .{ .alignment = alloc.alignment, @@ -4041,20 +4120,20 @@ pub fn pointerDerivationAdvanced(ptr_val: Value, arena: Allocator, zcu: *Zcu, op const base_ptr = Value.fromInterned(eu_ptr); const base_ptr_ty = base_ptr.typeOf(zcu); const parent_step = try arena.create(PointerDeriveStep); - parent_step.* = try pointerDerivationAdvanced(Value.fromInterned(eu_ptr), arena, zcu, opt_sema); + parent_step.* = try pointerDerivationAdvanced(Value.fromInterned(eu_ptr), arena, pt, opt_sema); break :base .{ .eu_payload_ptr = .{ .parent = parent_step, - .result_ptr_ty = try zcu.adjustPtrTypeChild(base_ptr_ty, base_ptr_ty.childType(zcu).errorUnionPayload(zcu)), + .result_ptr_ty = try pt.adjustPtrTypeChild(base_ptr_ty, base_ptr_ty.childType(zcu).errorUnionPayload(zcu)), } }; }, .opt_payload => |opt_ptr| base: { const base_ptr = Value.fromInterned(opt_ptr); const base_ptr_ty = base_ptr.typeOf(zcu); const parent_step = try arena.create(PointerDeriveStep); - parent_step.* = try pointerDerivationAdvanced(Value.fromInterned(opt_ptr), arena, zcu, opt_sema); + parent_step.* = try pointerDerivationAdvanced(Value.fromInterned(opt_ptr), arena, pt, opt_sema); break :base .{ .opt_payload_ptr = .{ .parent = parent_step, - .result_ptr_ty = try zcu.adjustPtrTypeChild(base_ptr_ty, base_ptr_ty.childType(zcu).optionalChild(zcu)), + .result_ptr_ty = try pt.adjustPtrTypeChild(base_ptr_ty, base_ptr_ty.childType(zcu).optionalChild(zcu)), } }; }, .field => |field| base: { @@ -4062,22 +4141,22 @@ pub fn pointerDerivationAdvanced(ptr_val: Value, arena: Allocator, zcu: *Zcu, op const base_ptr_ty = base_ptr.typeOf(zcu); const agg_ty = base_ptr_ty.childType(zcu); const field_ty, const field_align = switch (agg_ty.zigTypeTag(zcu)) { - .Struct => .{ agg_ty.structFieldType(@intCast(field.index), zcu), try agg_ty.structFieldAlignAdvanced(@intCast(field.index), zcu, .sema) }, - .Union => .{ agg_ty.unionFieldTypeByIndex(@intCast(field.index), zcu), try agg_ty.structFieldAlignAdvanced(@intCast(field.index), zcu, .sema) }, + .Struct => .{ agg_ty.structFieldType(@intCast(field.index), zcu), try agg_ty.structFieldAlignAdvanced(@intCast(field.index), pt, .sema) }, + .Union => .{ agg_ty.unionFieldTypeByIndex(@intCast(field.index), zcu), try agg_ty.structFieldAlignAdvanced(@intCast(field.index), pt, .sema) }, .Pointer => .{ switch (field.index) { Value.slice_ptr_index => agg_ty.slicePtrFieldType(zcu), Value.slice_len_index => Type.usize, else => unreachable, - }, Type.usize.abiAlignment(zcu) }, + }, Type.usize.abiAlignment(pt) }, else => unreachable, }; - const base_align = base_ptr_ty.ptrAlignment(zcu); + const base_align = base_ptr_ty.ptrAlignment(pt); const result_align = field_align.minStrict(base_align); - const result_ty = try zcu.ptrType(.{ + const result_ty = try pt.ptrType(.{ .child = field_ty.toIntern(), .flags = flags: { var flags = base_ptr_ty.ptrInfo(zcu).flags; - if (result_align == field_ty.abiAlignment(zcu)) { + if (result_align == field_ty.abiAlignment(pt)) { flags.alignment = .none; } else { flags.alignment = result_align; @@ -4086,7 +4165,7 @@ pub fn pointerDerivationAdvanced(ptr_val: Value, arena: Allocator, zcu: *Zcu, op }, }); const parent_step = try arena.create(PointerDeriveStep); - parent_step.* = try pointerDerivationAdvanced(base_ptr, arena, zcu, opt_sema); + parent_step.* = try pointerDerivationAdvanced(base_ptr, arena, pt, opt_sema); break :base .{ .field_ptr = .{ .parent = parent_step, .field_idx = @intCast(field.index), @@ -4095,9 +4174,9 @@ pub fn pointerDerivationAdvanced(ptr_val: Value, arena: Allocator, zcu: *Zcu, op }, .arr_elem => |arr_elem| base: { const parent_step = try arena.create(PointerDeriveStep); - parent_step.* = try pointerDerivationAdvanced(Value.fromInterned(arr_elem.base), arena, zcu, opt_sema); - const parent_ptr_info = (try parent_step.ptrType(zcu)).ptrInfo(zcu); - const result_ptr_ty = try zcu.ptrType(.{ + parent_step.* = try pointerDerivationAdvanced(Value.fromInterned(arr_elem.base), arena, pt, opt_sema); + const parent_ptr_info = (try parent_step.ptrType(pt)).ptrInfo(zcu); + const result_ptr_ty = try pt.ptrType(.{ .child = parent_ptr_info.child, .flags = flags: { var flags = parent_ptr_info.flags; @@ -4113,12 +4192,12 @@ pub fn pointerDerivationAdvanced(ptr_val: Value, arena: Allocator, zcu: *Zcu, op }, }; - if (ptr.byte_offset == 0 and ptr.ty == (try base_derive.ptrType(zcu)).toIntern()) { + if (ptr.byte_offset == 0 and ptr.ty == (try base_derive.ptrType(pt)).toIntern()) { return base_derive; } const need_child = Type.fromInterned(ptr.ty).childType(zcu); - if (need_child.comptimeOnly(zcu)) { + if (need_child.comptimeOnly(pt)) { // No refinement can happen - this pointer is presumably invalid. // Just offset it. const parent = try arena.create(PointerDeriveStep); @@ -4129,7 +4208,7 @@ pub fn pointerDerivationAdvanced(ptr_val: Value, arena: Allocator, zcu: *Zcu, op .new_ptr_ty = Type.fromInterned(ptr.ty), } }; } - const need_bytes = need_child.abiSize(zcu); + const need_bytes = need_child.abiSize(pt); var cur_derive = base_derive; var cur_offset = ptr.byte_offset; @@ -4137,7 +4216,7 @@ pub fn pointerDerivationAdvanced(ptr_val: Value, arena: Allocator, zcu: *Zcu, op // Refine through fields and array elements as much as possible. if (need_bytes > 0) while (true) { - const cur_ty = (try cur_derive.ptrType(zcu)).childType(zcu); + const cur_ty = (try cur_derive.ptrType(pt)).childType(zcu); if (cur_ty.toIntern() == need_child.toIntern() and cur_offset == 0) { break; } @@ -4168,7 +4247,7 @@ pub fn pointerDerivationAdvanced(ptr_val: Value, arena: Allocator, zcu: *Zcu, op .Array => { const elem_ty = cur_ty.childType(zcu); - const elem_size = elem_ty.abiSize(zcu); + const elem_size = elem_ty.abiSize(pt); const start_idx = cur_offset / elem_size; const end_idx = (cur_offset + need_bytes + elem_size - 1) / elem_size; if (end_idx == start_idx + 1) { @@ -4177,7 +4256,7 @@ pub fn pointerDerivationAdvanced(ptr_val: Value, arena: Allocator, zcu: *Zcu, op cur_derive = .{ .elem_ptr = .{ .parent = parent, .elem_idx = start_idx, - .result_ptr_ty = try zcu.adjustPtrTypeChild(try parent.ptrType(zcu), elem_ty), + .result_ptr_ty = try pt.adjustPtrTypeChild(try parent.ptrType(pt), elem_ty), } }; cur_offset -= start_idx * elem_size; } else { @@ -4188,7 +4267,7 @@ pub fn pointerDerivationAdvanced(ptr_val: Value, arena: Allocator, zcu: *Zcu, op cur_derive = .{ .elem_ptr = .{ .parent = parent, .elem_idx = start_idx, - .result_ptr_ty = try zcu.adjustPtrTypeChild(try parent.ptrType(zcu), elem_ty), + .result_ptr_ty = try pt.adjustPtrTypeChild(try parent.ptrType(pt), elem_ty), } }; cur_offset -= start_idx * elem_size; } @@ -4199,19 +4278,19 @@ pub fn pointerDerivationAdvanced(ptr_val: Value, arena: Allocator, zcu: *Zcu, op .auto, .@"packed" => break, .@"extern" => for (0..cur_ty.structFieldCount(zcu)) |field_idx| { const field_ty = cur_ty.structFieldType(field_idx, zcu); - const start_off = cur_ty.structFieldOffset(field_idx, zcu); - const end_off = start_off + field_ty.abiSize(zcu); + const start_off = cur_ty.structFieldOffset(field_idx, pt); + const end_off = start_off + field_ty.abiSize(pt); if (cur_offset >= start_off and cur_offset + need_bytes <= end_off) { - const old_ptr_ty = try cur_derive.ptrType(zcu); - const parent_align = old_ptr_ty.ptrAlignment(zcu); + const old_ptr_ty = try cur_derive.ptrType(pt); + const parent_align = old_ptr_ty.ptrAlignment(pt); const field_align = InternPool.Alignment.fromLog2Units(@min(parent_align.toLog2Units(), @ctz(start_off))); const parent = try arena.create(PointerDeriveStep); parent.* = cur_derive; - const new_ptr_ty = try zcu.ptrType(.{ + const new_ptr_ty = try pt.ptrType(.{ .child = field_ty.toIntern(), .flags = flags: { var flags = old_ptr_ty.ptrInfo(zcu).flags; - if (field_align == field_ty.abiAlignment(zcu)) { + if (field_align == field_ty.abiAlignment(pt)) { flags.alignment = .none; } else { flags.alignment = field_align; @@ -4232,7 +4311,7 @@ pub fn pointerDerivationAdvanced(ptr_val: Value, arena: Allocator, zcu: *Zcu, op } }; - if (cur_offset == 0 and (try cur_derive.ptrType(zcu)).toIntern() == ptr.ty) { + if (cur_offset == 0 and (try cur_derive.ptrType(pt)).toIntern() == ptr.ty) { return cur_derive; } @@ -4245,20 +4324,20 @@ pub fn pointerDerivationAdvanced(ptr_val: Value, arena: Allocator, zcu: *Zcu, op } }; } -pub fn resolveLazy(val: Value, arena: Allocator, zcu: *Zcu) Zcu.SemaError!Value { - switch (zcu.intern_pool.indexToKey(val.toIntern())) { +pub fn resolveLazy(val: Value, arena: Allocator, pt: Zcu.PerThread) Zcu.SemaError!Value { + switch (pt.zcu.intern_pool.indexToKey(val.toIntern())) { .int => |int| switch (int.storage) { .u64, .i64, .big_int => return val, - .lazy_align, .lazy_size => return zcu.intValue( + .lazy_align, .lazy_size => return pt.intValue( Type.fromInterned(int.ty), - (try val.getUnsignedIntAdvanced(zcu, .sema)).?, + (try val.getUnsignedIntAdvanced(pt, .sema)).?, ), }, .slice => |slice| { - const ptr = try Value.fromInterned(slice.ptr).resolveLazy(arena, zcu); - const len = try Value.fromInterned(slice.len).resolveLazy(arena, zcu); + const ptr = try Value.fromInterned(slice.ptr).resolveLazy(arena, pt); + const len = try Value.fromInterned(slice.len).resolveLazy(arena, pt); if (ptr.toIntern() == slice.ptr and len.toIntern() == slice.len) return val; - return Value.fromInterned(try zcu.intern(.{ .slice = .{ + return Value.fromInterned(try pt.intern(.{ .slice = .{ .ty = slice.ty, .ptr = ptr.toIntern(), .len = len.toIntern(), @@ -4268,22 +4347,22 @@ pub fn resolveLazy(val: Value, arena: Allocator, zcu: *Zcu) Zcu.SemaError!Value switch (ptr.base_addr) { .decl, .comptime_alloc, .anon_decl, .int => return val, .comptime_field => |field_val| { - const resolved_field_val = (try Value.fromInterned(field_val).resolveLazy(arena, zcu)).toIntern(); + const resolved_field_val = (try Value.fromInterned(field_val).resolveLazy(arena, pt)).toIntern(); return if (resolved_field_val == field_val) val else - Value.fromInterned((try zcu.intern(.{ .ptr = .{ + Value.fromInterned(try pt.intern(.{ .ptr = .{ .ty = ptr.ty, .base_addr = .{ .comptime_field = resolved_field_val }, .byte_offset = ptr.byte_offset, - } }))); + } })); }, .eu_payload, .opt_payload => |base| { - const resolved_base = (try Value.fromInterned(base).resolveLazy(arena, zcu)).toIntern(); + const resolved_base = (try Value.fromInterned(base).resolveLazy(arena, pt)).toIntern(); return if (resolved_base == base) val else - Value.fromInterned((try zcu.intern(.{ .ptr = .{ + Value.fromInterned(try pt.intern(.{ .ptr = .{ .ty = ptr.ty, .base_addr = switch (ptr.base_addr) { .eu_payload => .{ .eu_payload = resolved_base }, @@ -4291,14 +4370,14 @@ pub fn resolveLazy(val: Value, arena: Allocator, zcu: *Zcu) Zcu.SemaError!Value else => unreachable, }, .byte_offset = ptr.byte_offset, - } }))); + } })); }, .arr_elem, .field => |base_index| { - const resolved_base = (try Value.fromInterned(base_index.base).resolveLazy(arena, zcu)).toIntern(); + const resolved_base = (try Value.fromInterned(base_index.base).resolveLazy(arena, pt)).toIntern(); return if (resolved_base == base_index.base) val else - Value.fromInterned((try zcu.intern(.{ .ptr = .{ + Value.fromInterned(try pt.intern(.{ .ptr = .{ .ty = ptr.ty, .base_addr = switch (ptr.base_addr) { .arr_elem => .{ .arr_elem = .{ @@ -4312,7 +4391,7 @@ pub fn resolveLazy(val: Value, arena: Allocator, zcu: *Zcu) Zcu.SemaError!Value else => unreachable, }, .byte_offset = ptr.byte_offset, - } }))); + } })); }, } }, @@ -4321,40 +4400,40 @@ pub fn resolveLazy(val: Value, arena: Allocator, zcu: *Zcu) Zcu.SemaError!Value .elems => |elems| { var resolved_elems: []InternPool.Index = &.{}; for (elems, 0..) |elem, i| { - const resolved_elem = (try Value.fromInterned(elem).resolveLazy(arena, zcu)).toIntern(); + const resolved_elem = (try Value.fromInterned(elem).resolveLazy(arena, pt)).toIntern(); if (resolved_elems.len == 0 and resolved_elem != elem) { resolved_elems = try arena.alloc(InternPool.Index, elems.len); @memcpy(resolved_elems[0..i], elems[0..i]); } if (resolved_elems.len > 0) resolved_elems[i] = resolved_elem; } - return if (resolved_elems.len == 0) val else Value.fromInterned((try zcu.intern(.{ .aggregate = .{ + return if (resolved_elems.len == 0) val else Value.fromInterned(try pt.intern(.{ .aggregate = .{ .ty = aggregate.ty, .storage = .{ .elems = resolved_elems }, - } }))); + } })); }, .repeated_elem => |elem| { - const resolved_elem = (try Value.fromInterned(elem).resolveLazy(arena, zcu)).toIntern(); - return if (resolved_elem == elem) val else Value.fromInterned((try zcu.intern(.{ .aggregate = .{ + const resolved_elem = (try Value.fromInterned(elem).resolveLazy(arena, pt)).toIntern(); + return if (resolved_elem == elem) val else Value.fromInterned(try pt.intern(.{ .aggregate = .{ .ty = aggregate.ty, .storage = .{ .repeated_elem = resolved_elem }, - } }))); + } })); }, }, .un => |un| { const resolved_tag = if (un.tag == .none) .none else - (try Value.fromInterned(un.tag).resolveLazy(arena, zcu)).toIntern(); - const resolved_val = (try Value.fromInterned(un.val).resolveLazy(arena, zcu)).toIntern(); + (try Value.fromInterned(un.tag).resolveLazy(arena, pt)).toIntern(); + const resolved_val = (try Value.fromInterned(un.val).resolveLazy(arena, pt)).toIntern(); return if (resolved_tag == un.tag and resolved_val == un.val) val else - Value.fromInterned((try zcu.intern(.{ .un = .{ + Value.fromInterned(try pt.intern(.{ .un = .{ .ty = un.ty, .tag = resolved_tag, .val = resolved_val, - } }))); + } })); }, else => return val, } diff --git a/src/Zcu.zig b/src/Zcu.zig index 203238c6334a..bfc70815dfc5 100644 --- a/src/Zcu.zig +++ b/src/Zcu.zig @@ -6,7 +6,6 @@ const std = @import("std"); const builtin = @import("builtin"); const mem = std.mem; const Allocator = std.mem.Allocator; -const ArrayListUnmanaged = std.ArrayListUnmanaged; const assert = std.debug.assert; const log = std.log.scoped(.module); const BigIntConst = std.math.big.int.Const; @@ -75,10 +74,10 @@ local_zir_cache: Compilation.Directory, /// This is where all `Export` values are stored. Not all values here are necessarily valid exports; /// to enumerate all exports, `single_exports` and `multi_exports` must be consulted. -all_exports: ArrayListUnmanaged(Export) = .{}, +all_exports: std.ArrayListUnmanaged(Export) = .{}, /// This is a list of free indices in `all_exports`. These indices may be reused by exports from /// future semantic analysis. -free_exports: ArrayListUnmanaged(u32) = .{}, +free_exports: std.ArrayListUnmanaged(u32) = .{}, /// Maps from an `AnalUnit` which performs a single export, to the index into `all_exports` of /// the export it performs. Note that the key is not the `Decl` being exported, but the `AnalUnit` /// whose analysis triggered the export. @@ -179,7 +178,7 @@ stage1_flags: packed struct { reserved: u2 = 0, } = .{}, -compile_log_text: ArrayListUnmanaged(u8) = .{}, +compile_log_text: std.ArrayListUnmanaged(u8) = .{}, emit_h: ?*GlobalEmitH, @@ -203,6 +202,8 @@ panic_messages: [PanicId.len]Decl.OptionalIndex = .{.none} ** PanicId.len, panic_func_index: InternPool.Index = .none, null_stack_trace: InternPool.Index = .none, +pub const PerThread = @import("Zcu/PerThread.zig"); + pub const PanicId = enum { unreach, unwrap_null, @@ -519,24 +520,24 @@ pub const Decl = struct { return decl.getExternDecl(zcu) != .none; } - pub fn getAlignment(decl: Decl, zcu: *Zcu) Alignment { + pub fn getAlignment(decl: Decl, pt: Zcu.PerThread) Alignment { assert(decl.has_tv); if (decl.alignment != .none) return decl.alignment; - return decl.typeOf(zcu).abiAlignment(zcu); + return decl.typeOf(pt.zcu).abiAlignment(pt); } - pub fn declPtrType(decl: Decl, zcu: *Zcu) !Type { + pub fn declPtrType(decl: Decl, pt: Zcu.PerThread) !Type { assert(decl.has_tv); - const decl_ty = decl.typeOf(zcu); - return zcu.ptrType(.{ + const decl_ty = decl.typeOf(pt.zcu); + return pt.ptrType(.{ .child = decl_ty.toIntern(), .flags = .{ - .alignment = if (decl.alignment == decl_ty.abiAlignment(zcu)) + .alignment = if (decl.alignment == decl_ty.abiAlignment(pt)) .none else decl.alignment, .address_space = decl.@"addrspace", - .is_const = decl.getOwnedVariable(zcu) == null, + .is_const = decl.getOwnedVariable(pt.zcu) == null, }, }); } @@ -589,7 +590,7 @@ pub const Decl = struct { /// This state is attached to every Decl when Module emit_h is non-null. pub const EmitH = struct { - fwd_decl: ArrayListUnmanaged(u8) = .{}, + fwd_decl: std.ArrayListUnmanaged(u8) = .{}, }; pub const DeclAdapter = struct { @@ -622,8 +623,8 @@ pub const Namespace = struct { /// Value is whether the usingnamespace decl is marked `pub`. usingnamespace_set: std.AutoHashMapUnmanaged(Decl.Index, bool) = .{}, - const Index = InternPool.NamespaceIndex; - const OptionalIndex = InternPool.OptionalNamespaceIndex; + pub const Index = InternPool.NamespaceIndex; + pub const OptionalIndex = InternPool.OptionalNamespaceIndex; const DeclContext = struct { zcu: *Zcu, @@ -3079,7 +3080,7 @@ pub fn markDependeeOutdated(zcu: *Zcu, dependee: InternPool.Dependee) !void { } } -fn markPoDependeeUpToDate(zcu: *Zcu, dependee: InternPool.Dependee) !void { +pub fn markPoDependeeUpToDate(zcu: *Zcu, dependee: InternPool.Dependee) !void { var it = zcu.intern_pool.dependencyIterator(dependee); while (it.next()) |depender| { if (zcu.outdated.getPtr(depender)) |po_dep_count| { @@ -3279,7 +3280,7 @@ pub fn mapOldZirToNew( old_inst: Zir.Inst.Index, new_inst: Zir.Inst.Index, }; - var match_stack: ArrayListUnmanaged(MatchedZirDecl) = .{}; + var match_stack: std.ArrayListUnmanaged(MatchedZirDecl) = .{}; defer match_stack.deinit(gpa); // Main struct inst is always matched @@ -3394,357 +3395,6 @@ pub fn mapOldZirToNew( } } -/// Like `ensureDeclAnalyzed`, but the Decl is a file's root Decl. -pub fn ensureFileAnalyzed(zcu: *Zcu, file_index: File.Index) SemaError!void { - if (zcu.fileRootDecl(file_index).unwrap()) |existing_root| { - return zcu.ensureDeclAnalyzed(existing_root); - } else { - return zcu.semaFile(file_index); - } -} - -/// This ensures that the Decl will have an up-to-date Type and Value populated. -/// However the resolution status of the Type may not be fully resolved. -/// For example an inferred error set is not resolved until after `analyzeFnBody`. -/// is called. -pub fn ensureDeclAnalyzed(mod: *Module, decl_index: Decl.Index) SemaError!void { - const tracy = trace(@src()); - defer tracy.end(); - - const ip = &mod.intern_pool; - const decl = mod.declPtr(decl_index); - - log.debug("ensureDeclAnalyzed '{d}' (name '{}')", .{ - @intFromEnum(decl_index), - decl.name.fmt(ip), - }); - - // Determine whether or not this Decl is outdated, i.e. requires re-analysis - // even if `complete`. If a Decl is PO, we pessismistically assume that it - // *does* require re-analysis, to ensure that the Decl is definitely - // up-to-date when this function returns. - - // If analysis occurs in a poor order, this could result in over-analysis. - // We do our best to avoid this by the other dependency logic in this file - // which tries to limit re-analysis to Decls whose previously listed - // dependencies are all up-to-date. - - const decl_as_depender = AnalUnit.wrap(.{ .decl = decl_index }); - const decl_was_outdated = mod.outdated.swapRemove(decl_as_depender) or - mod.potentially_outdated.swapRemove(decl_as_depender); - - if (decl_was_outdated) { - _ = mod.outdated_ready.swapRemove(decl_as_depender); - } - - const was_outdated = mod.outdated_file_root.swapRemove(decl_index) or decl_was_outdated; - - switch (decl.analysis) { - .in_progress => unreachable, - - .file_failure => return error.AnalysisFail, - - .sema_failure, - .dependency_failure, - .codegen_failure, - => if (!was_outdated) return error.AnalysisFail, - - .complete => if (!was_outdated) return, - - .unreferenced => {}, - } - - if (was_outdated) { - // The exports this Decl performs will be re-discovered, so we remove them here - // prior to re-analysis. - if (build_options.only_c) unreachable; - mod.deleteUnitExports(decl_as_depender); - mod.deleteUnitReferences(decl_as_depender); - } - - const sema_result: SemaDeclResult = blk: { - if (decl.zir_decl_index == .none and !mod.declIsRoot(decl_index)) { - // Anonymous decl. We don't semantically analyze these. - break :blk .{ - .invalidate_decl_val = false, - .invalidate_decl_ref = false, - }; - } - - if (mod.declIsRoot(decl_index)) { - const changed = try mod.semaFileUpdate(decl.getFileScopeIndex(mod), decl_was_outdated); - break :blk .{ - .invalidate_decl_val = changed, - .invalidate_decl_ref = changed, - }; - } - - const decl_prog_node = mod.sema_prog_node.start((try decl.fullyQualifiedName(mod)).toSlice(ip), 0); - defer decl_prog_node.end(); - - break :blk mod.semaDecl(decl_index) catch |err| switch (err) { - error.AnalysisFail => { - if (decl.analysis == .in_progress) { - // If this decl caused the compile error, the analysis field would - // be changed to indicate it was this Decl's fault. Because this - // did not happen, we infer here that it was a dependency failure. - decl.analysis = .dependency_failure; - } - return error.AnalysisFail; - }, - error.GenericPoison => unreachable, - else => |e| { - decl.analysis = .sema_failure; - try mod.failed_analysis.ensureUnusedCapacity(mod.gpa, 1); - try mod.retryable_failures.append(mod.gpa, AnalUnit.wrap(.{ .decl = decl_index })); - mod.failed_analysis.putAssumeCapacityNoClobber(AnalUnit.wrap(.{ .decl = decl_index }), try ErrorMsg.create( - mod.gpa, - decl.navSrcLoc(mod), - "unable to analyze: {s}", - .{@errorName(e)}, - )); - return error.AnalysisFail; - }, - }; - }; - - // TODO: we do not yet have separate dependencies for decl values vs types. - if (decl_was_outdated) { - if (sema_result.invalidate_decl_val or sema_result.invalidate_decl_ref) { - log.debug("Decl tv invalidated ('{d}')", .{@intFromEnum(decl_index)}); - // This dependency was marked as PO, meaning dependees were waiting - // on its analysis result, and it has turned out to be outdated. - // Update dependees accordingly. - try mod.markDependeeOutdated(.{ .decl_val = decl_index }); - } else { - log.debug("Decl tv up-to-date ('{d}')", .{@intFromEnum(decl_index)}); - // This dependency was previously PO, but turned out to be up-to-date. - // We do not need to queue successive analysis. - try mod.markPoDependeeUpToDate(.{ .decl_val = decl_index }); - } - } -} - -pub fn ensureFuncBodyAnalyzed(zcu: *Zcu, maybe_coerced_func_index: InternPool.Index) SemaError!void { - const tracy = trace(@src()); - defer tracy.end(); - - const gpa = zcu.gpa; - const ip = &zcu.intern_pool; - - // We only care about the uncoerced function. - // We need to do this for the "orphaned function" check below to be valid. - const func_index = ip.unwrapCoercedFunc(maybe_coerced_func_index); - - const func = zcu.funcInfo(maybe_coerced_func_index); - const decl_index = func.owner_decl; - const decl = zcu.declPtr(decl_index); - - log.debug("ensureFuncBodyAnalyzed '{d}' (instance of '{}')", .{ - @intFromEnum(func_index), - decl.name.fmt(ip), - }); - - // First, our owner decl must be up-to-date. This will always be the case - // during the first update, but may not on successive updates if we happen - // to get analyzed before our parent decl. - try zcu.ensureDeclAnalyzed(decl_index); - - // On an update, it's possible this function changed such that our owner - // decl now refers to a different function, making this one orphaned. If - // that's the case, we should remove this function from the binary. - if (decl.val.ip_index != func_index) { - try zcu.markDependeeOutdated(.{ .func_ies = func_index }); - ip.removeDependenciesForDepender(gpa, AnalUnit.wrap(.{ .func = func_index })); - ip.remove(func_index); - @panic("TODO: remove orphaned function from binary"); - } - - // We'll want to remember what the IES used to be before the update for - // dependency invalidation purposes. - const old_resolved_ies = if (func.analysis(ip).inferred_error_set) - func.resolvedErrorSet(ip).* - else - .none; - - switch (decl.analysis) { - .unreferenced => unreachable, - .in_progress => unreachable, - - .codegen_failure => unreachable, // functions do not perform constant value generation - - .file_failure, - .sema_failure, - .dependency_failure, - => return error.AnalysisFail, - - .complete => {}, - } - - const func_as_depender = AnalUnit.wrap(.{ .func = func_index }); - const was_outdated = zcu.outdated.swapRemove(func_as_depender) or - zcu.potentially_outdated.swapRemove(func_as_depender); - - if (was_outdated) { - if (build_options.only_c) unreachable; - _ = zcu.outdated_ready.swapRemove(func_as_depender); - zcu.deleteUnitExports(func_as_depender); - zcu.deleteUnitReferences(func_as_depender); - } - - switch (func.analysis(ip).state) { - .success => if (!was_outdated) return, - .sema_failure, - .dependency_failure, - .codegen_failure, - => if (!was_outdated) return error.AnalysisFail, - .none, .queued => {}, - .in_progress => unreachable, - .inline_only => unreachable, // don't queue work for this - } - - log.debug("analyze and generate fn body '{d}'; reason='{s}'", .{ - @intFromEnum(func_index), - if (was_outdated) "outdated" else "never analyzed", - }); - - var tmp_arena = std.heap.ArenaAllocator.init(gpa); - defer tmp_arena.deinit(); - const sema_arena = tmp_arena.allocator(); - - var air = zcu.analyzeFnBody(func_index, sema_arena) catch |err| switch (err) { - error.AnalysisFail => { - if (func.analysis(ip).state == .in_progress) { - // If this decl caused the compile error, the analysis field would - // be changed to indicate it was this Decl's fault. Because this - // did not happen, we infer here that it was a dependency failure. - func.analysis(ip).state = .dependency_failure; - } - return error.AnalysisFail; - }, - error.OutOfMemory => return error.OutOfMemory, - }; - errdefer air.deinit(gpa); - - const invalidate_ies_deps = i: { - if (!was_outdated) break :i false; - if (!func.analysis(ip).inferred_error_set) break :i true; - const new_resolved_ies = func.resolvedErrorSet(ip).*; - break :i new_resolved_ies != old_resolved_ies; - }; - if (invalidate_ies_deps) { - log.debug("func IES invalidated ('{d}')", .{@intFromEnum(func_index)}); - try zcu.markDependeeOutdated(.{ .func_ies = func_index }); - } else if (was_outdated) { - log.debug("func IES up-to-date ('{d}')", .{@intFromEnum(func_index)}); - try zcu.markPoDependeeUpToDate(.{ .func_ies = func_index }); - } - - const comp = zcu.comp; - - const dump_air = build_options.enable_debug_extensions and comp.verbose_air; - const dump_llvm_ir = build_options.enable_debug_extensions and (comp.verbose_llvm_ir != null or comp.verbose_llvm_bc != null); - - if (comp.bin_file == null and zcu.llvm_object == null and !dump_air and !dump_llvm_ir) { - air.deinit(gpa); - return; - } - - try comp.work_queue.writeItem(.{ .codegen_func = .{ - .func = func_index, - .air = air, - } }); -} - -/// Takes ownership of `air`, even on error. -/// If any types referenced by `air` are unresolved, marks the codegen as failed. -pub fn linkerUpdateFunc(zcu: *Zcu, func_index: InternPool.Index, air: Air) Allocator.Error!void { - const gpa = zcu.gpa; - const ip = &zcu.intern_pool; - const comp = zcu.comp; - - defer { - var air_mut = air; - air_mut.deinit(gpa); - } - - const func = zcu.funcInfo(func_index); - const decl_index = func.owner_decl; - const decl = zcu.declPtr(decl_index); - - var liveness = try Liveness.analyze(gpa, air, ip); - defer liveness.deinit(gpa); - - if (build_options.enable_debug_extensions and comp.verbose_air) { - const fqn = try decl.fullyQualifiedName(zcu); - std.debug.print("# Begin Function AIR: {}:\n", .{fqn.fmt(ip)}); - @import("print_air.zig").dump(zcu, air, liveness); - std.debug.print("# End Function AIR: {}\n\n", .{fqn.fmt(ip)}); - } - - if (std.debug.runtime_safety) { - var verify: Liveness.Verify = .{ - .gpa = gpa, - .air = air, - .liveness = liveness, - .intern_pool = ip, - }; - defer verify.deinit(); - - verify.verify() catch |err| switch (err) { - error.OutOfMemory => return error.OutOfMemory, - else => { - try zcu.failed_analysis.ensureUnusedCapacity(gpa, 1); - zcu.failed_analysis.putAssumeCapacityNoClobber( - AnalUnit.wrap(.{ .func = func_index }), - try Module.ErrorMsg.create( - gpa, - decl.navSrcLoc(zcu), - "invalid liveness: {s}", - .{@errorName(err)}, - ), - ); - func.analysis(ip).state = .codegen_failure; - return; - }, - }; - } - - const codegen_prog_node = zcu.codegen_prog_node.start((try decl.fullyQualifiedName(zcu)).toSlice(ip), 0); - defer codegen_prog_node.end(); - - if (!air.typesFullyResolved(zcu)) { - // A type we depend on failed to resolve. This is a transitive failure. - // Correcting this failure will involve changing a type this function - // depends on, hence triggering re-analysis of this function, so this - // interacts correctly with incremental compilation. - func.analysis(ip).state = .codegen_failure; - } else if (comp.bin_file) |lf| { - lf.updateFunc(zcu, func_index, air, liveness) catch |err| switch (err) { - error.OutOfMemory => return error.OutOfMemory, - error.AnalysisFail => { - func.analysis(ip).state = .codegen_failure; - }, - else => { - try zcu.failed_analysis.ensureUnusedCapacity(gpa, 1); - zcu.failed_analysis.putAssumeCapacityNoClobber(AnalUnit.wrap(.{ .func = func_index }), try Module.ErrorMsg.create( - gpa, - decl.navSrcLoc(zcu), - "unable to codegen: {s}", - .{@errorName(err)}, - )); - func.analysis(ip).state = .codegen_failure; - try zcu.retryable_failures.append(zcu.gpa, AnalUnit.wrap(.{ .func = func_index })); - }, - }; - } else if (zcu.llvm_object) |llvm_object| { - if (build_options.only_c) unreachable; - llvm_object.updateFunc(zcu, func_index, air, liveness) catch |err| switch (err) { - error.OutOfMemory => return error.OutOfMemory, - }; - } -} - /// Ensure this function's body is or will be analyzed and emitted. This should /// be called whenever a potential runtime call of a function is seen. /// @@ -3804,608 +3454,105 @@ pub fn ensureFuncBodyAnalysisQueued(mod: *Module, func_index: InternPool.Index) func.analysis(ip).state = .queued; } -pub fn semaPkg(zcu: *Zcu, pkg: *Package.Module) !void { - const import_file_result = try zcu.importPkg(pkg); - const root_decl_index = zcu.fileRootDecl(import_file_result.file_index); - if (root_decl_index == .none) { - return zcu.semaFile(import_file_result.file_index); - } -} - -fn getFileRootStruct( - zcu: *Zcu, - decl_index: Decl.Index, - namespace_index: Namespace.Index, - file_index: File.Index, -) Allocator.Error!InternPool.Index { - const gpa = zcu.gpa; - const ip = &zcu.intern_pool; - const file = zcu.fileByIndex(file_index); - const extended = file.zir.instructions.items(.data)[@intFromEnum(Zir.Inst.Index.main_struct_inst)].extended; - assert(extended.opcode == .struct_decl); - const small: Zir.Inst.StructDecl.Small = @bitCast(extended.small); - assert(!small.has_captures_len); - assert(!small.has_backing_int); - assert(small.layout == .auto); - var extra_index: usize = extended.operand + @typeInfo(Zir.Inst.StructDecl).Struct.fields.len; - const fields_len = if (small.has_fields_len) blk: { - const fields_len = file.zir.extra[extra_index]; - extra_index += 1; - break :blk fields_len; - } else 0; - const decls_len = if (small.has_decls_len) blk: { - const decls_len = file.zir.extra[extra_index]; - extra_index += 1; - break :blk decls_len; - } else 0; - const decls = file.zir.bodySlice(extra_index, decls_len); - extra_index += decls_len; - - const tracked_inst = try ip.trackZir(gpa, file_index, .main_struct_inst); - const wip_ty = switch (try ip.getStructType(gpa, .{ - .layout = .auto, - .fields_len = fields_len, - .known_non_opv = small.known_non_opv, - .requires_comptime = if (small.known_comptime_only) .yes else .unknown, - .is_tuple = small.is_tuple, - .any_comptime_fields = small.any_comptime_fields, - .any_default_inits = small.any_default_inits, - .inits_resolved = false, - .any_aligned_fields = small.any_aligned_fields, - .has_namespace = true, - .key = .{ .declared = .{ - .zir_index = tracked_inst, - .captures = &.{}, - } }, - })) { - .existing => unreachable, // we wouldn't be analysing the file root if this type existed - .wip => |wip| wip, - }; - errdefer wip_ty.cancel(ip); - - if (zcu.comp.debug_incremental) { - try ip.addDependency( - gpa, - AnalUnit.wrap(.{ .decl = decl_index }), - .{ .src_hash = tracked_inst }, - ); - } +pub const SemaDeclResult = packed struct { + /// Whether the value of a `decl_val` of this Decl changed. + invalidate_decl_val: bool, + /// Whether the type of a `decl_ref` of this Decl changed. + invalidate_decl_ref: bool, +}; +pub fn semaAnonOwnerDecl(zcu: *Zcu, decl_index: Decl.Index) !SemaDeclResult { const decl = zcu.declPtr(decl_index); - decl.val = Value.fromInterned(wip_ty.index); - decl.has_tv = true; - decl.owns_tv = true; - decl.analysis = .complete; - - try zcu.scanNamespace(namespace_index, decls, decl); - try zcu.comp.work_queue.writeItem(.{ .resolve_type_fully = wip_ty.index }); - return wip_ty.finish(ip, decl_index, namespace_index.toOptional()); -} - -/// Re-analyze the root Decl of a file on an incremental update. -/// If `type_outdated`, the struct type itself is considered outdated and is -/// reconstructed at a new InternPool index. Otherwise, the namespace is just -/// re-analyzed. Returns whether the decl's tyval was invalidated. -fn semaFileUpdate(zcu: *Zcu, file_index: File.Index, type_outdated: bool) SemaError!bool { - const file = zcu.fileByIndex(file_index); - const decl = zcu.declPtr(zcu.fileRootDecl(file_index).unwrap().?); - - log.debug("semaFileUpdate mod={s} sub_file_path={s} type_outdated={}", .{ - file.mod.fully_qualified_name, - file.sub_file_path, - type_outdated, - }); - - if (file.status != .success_zir) { - if (decl.analysis == .file_failure) { - return false; - } else { - decl.analysis = .file_failure; - return true; - } - } - - if (decl.analysis == .file_failure) { - // No struct type currently exists. Create one! - const root_decl = zcu.fileRootDecl(file_index); - _ = try zcu.getFileRootStruct(root_decl.unwrap().?, decl.src_namespace, file_index); - return true; - } assert(decl.has_tv); assert(decl.owns_tv); - if (type_outdated) { - // Invalidate the existing type, reusing the decl and namespace. - const file_root_decl = zcu.fileRootDecl(file_index).unwrap().?; - zcu.intern_pool.removeDependenciesForDepender(zcu.gpa, AnalUnit.wrap(.{ - .decl = file_root_decl, - })); - zcu.intern_pool.remove(decl.val.toIntern()); - decl.val = undefined; - _ = try zcu.getFileRootStruct(file_root_decl, decl.src_namespace, file_index); - return true; - } - - // Only the struct's namespace is outdated. - // Preserve the type - just scan the namespace again. - - const extended = file.zir.instructions.items(.data)[@intFromEnum(Zir.Inst.Index.main_struct_inst)].extended; - const small: Zir.Inst.StructDecl.Small = @bitCast(extended.small); - - var extra_index: usize = extended.operand + @typeInfo(Zir.Inst.StructDecl).Struct.fields.len; - extra_index += @intFromBool(small.has_fields_len); - const decls_len = if (small.has_decls_len) blk: { - const decls_len = file.zir.extra[extra_index]; - extra_index += 1; - break :blk decls_len; - } else 0; - const decls = file.zir.bodySlice(extra_index, decls_len); + log.debug("semaAnonOwnerDecl '{d}'", .{@intFromEnum(decl_index)}); - if (!type_outdated) { - try zcu.scanNamespace(decl.src_namespace, decls, decl); + switch (decl.typeOf(zcu).zigTypeTag(zcu)) { + .Fn => @panic("TODO: update fn instance"), + .Type => {}, + else => unreachable, } - return false; + // We are the owner Decl of a type, and we were marked as outdated. That means the *structure* + // of this type changed; not just its namespace. Therefore, we need a new InternPool index. + // + // However, as soon as we make that, the context that created us will require re-analysis anyway + // (as it depends on this Decl's value), meaning the `struct_decl` (or equivalent) instruction + // will be analyzed again. Since Sema already needs to be able to reconstruct types like this, + // why should we bother implementing it here too when the Sema logic will be hit right after? + // + // So instead, let's just mark this Decl as failed - so that any remaining Decls which genuinely + // reference it (via `@This`) end up silently erroring too - and we'll let Sema make a new type + // with a new Decl. + // + // Yes, this does mean that any type owner Decl has a constant value for its entire lifetime. + zcu.intern_pool.removeDependenciesForDepender(zcu.gpa, AnalUnit.wrap(.{ .decl = decl_index })); + zcu.intern_pool.remove(decl.val.toIntern()); + decl.analysis = .dependency_failure; + return .{ + .invalidate_decl_val = true, + .invalidate_decl_ref = true, + }; } -/// Regardless of the file status, will create a `Decl` if none exists so that we can track -/// dependencies and re-analyze when the file becomes outdated. -fn semaFile(zcu: *Zcu, file_index: File.Index) SemaError!void { - const tracy = trace(@src()); - defer tracy.end(); - - const file = zcu.fileByIndex(file_index); - assert(zcu.fileRootDecl(file_index) == .none); +pub const ImportFileResult = struct { + file: *File, + file_index: File.Index, + is_new: bool, + is_pkg: bool, +}; +pub fn importPkg(zcu: *Zcu, mod: *Package.Module) !ImportFileResult { const gpa = zcu.gpa; - log.debug("semaFile zcu={s} sub_file_path={s}", .{ - file.mod.fully_qualified_name, file.sub_file_path, - }); - // Because these three things each reference each other, `undefined` - // placeholders are used before being set after the struct type gains an - // InternPool index. - const new_namespace_index = try zcu.createNamespace(.{ - .parent = .none, - .decl_index = undefined, - .file_scope = file_index, + // The resolved path is used as the key in the import table, to detect if + // an import refers to the same as another, despite different relative paths + // or differently mapped package names. + const resolved_path = try std.fs.path.resolve(gpa, &.{ + mod.root.root_dir.path orelse ".", + mod.root.sub_path, + mod.root_src_path, }); - errdefer zcu.destroyNamespace(new_namespace_index); + var keep_resolved_path = false; + defer if (!keep_resolved_path) gpa.free(resolved_path); - const new_decl_index = try zcu.allocateNewDecl(new_namespace_index); - const new_decl = zcu.declPtr(new_decl_index); - errdefer @panic("TODO error handling"); + const gop = try zcu.import_table.getOrPut(gpa, resolved_path); + errdefer _ = zcu.import_table.pop(); + if (gop.found_existing) { + try gop.value_ptr.*.addReference(zcu.*, .{ .root = mod }); + return .{ + .file = gop.value_ptr.*, + .file_index = @enumFromInt(gop.index), + .is_new = false, + .is_pkg = true, + }; + } - zcu.setFileRootDecl(file_index, new_decl_index.toOptional()); - zcu.namespacePtr(new_namespace_index).decl_index = new_decl_index; + const ip = &zcu.intern_pool; - new_decl.name = try file.fullyQualifiedName(zcu); - new_decl.name_fully_qualified = true; - new_decl.is_pub = true; - new_decl.is_exported = false; - new_decl.alignment = .none; - new_decl.@"linksection" = .none; - new_decl.analysis = .in_progress; + try ip.files.ensureUnusedCapacity(gpa, 1); - if (file.status != .success_zir) { - new_decl.analysis = .file_failure; - return; + if (mod.builtin_file) |builtin_file| { + keep_resolved_path = true; // It's now owned by import_table. + gop.value_ptr.* = builtin_file; + try builtin_file.addReference(zcu.*, .{ .root = mod }); + const path_digest = computePathDigest(zcu, mod, builtin_file.sub_file_path); + ip.files.putAssumeCapacityNoClobber(path_digest, .none); + return .{ + .file = builtin_file, + .file_index = @enumFromInt(ip.files.entries.len - 1), + .is_new = false, + .is_pkg = true, + }; } - assert(file.zir_loaded); - const struct_ty = try zcu.getFileRootStruct(new_decl_index, new_namespace_index, file_index); - errdefer zcu.intern_pool.remove(struct_ty); + const sub_file_path = try gpa.dupe(u8, mod.root_src_path); + errdefer gpa.free(sub_file_path); - switch (zcu.comp.cache_use) { - .whole => |whole| if (whole.cache_manifest) |man| { - const source = file.getSource(gpa) catch |err| { - try reportRetryableFileError(zcu, file_index, "unable to load source: {s}", .{@errorName(err)}); - return error.AnalysisFail; - }; - - const resolved_path = std.fs.path.resolve(gpa, &.{ - file.mod.root.root_dir.path orelse ".", - file.mod.root.sub_path, - file.sub_file_path, - }) catch |err| { - try reportRetryableFileError(zcu, file_index, "unable to resolve path: {s}", .{@errorName(err)}); - return error.AnalysisFail; - }; - errdefer gpa.free(resolved_path); - - whole.cache_manifest_mutex.lock(); - defer whole.cache_manifest_mutex.unlock(); - try man.addFilePostContents(resolved_path, source.bytes, source.stat); - }, - .incremental => {}, - } -} - -const SemaDeclResult = packed struct { - /// Whether the value of a `decl_val` of this Decl changed. - invalidate_decl_val: bool, - /// Whether the type of a `decl_ref` of this Decl changed. - invalidate_decl_ref: bool, -}; - -fn semaDecl(zcu: *Zcu, decl_index: Decl.Index) !SemaDeclResult { - const tracy = trace(@src()); - defer tracy.end(); - - const decl = zcu.declPtr(decl_index); - const ip = &zcu.intern_pool; - - if (decl.getFileScope(zcu).status != .success_zir) { - return error.AnalysisFail; - } - - assert(!zcu.declIsRoot(decl_index)); - - if (decl.zir_decl_index == .none and decl.owns_tv) { - // We are re-analyzing an anonymous owner Decl (for a function or a namespace type). - return zcu.semaAnonOwnerDecl(decl_index); - } - - log.debug("semaDecl '{d}'", .{@intFromEnum(decl_index)}); - log.debug("decl name '{}'", .{(try decl.fullyQualifiedName(zcu)).fmt(ip)}); - defer blk: { - log.debug("finish decl name '{}'", .{(decl.fullyQualifiedName(zcu) catch break :blk).fmt(ip)}); - } - - const old_has_tv = decl.has_tv; - // The following values are ignored if `!old_has_tv` - const old_ty = if (old_has_tv) decl.typeOf(zcu) else undefined; - const old_val = decl.val; - const old_align = decl.alignment; - const old_linksection = decl.@"linksection"; - const old_addrspace = decl.@"addrspace"; - const old_is_inline = if (decl.getOwnedFunction(zcu)) |prev_func| - prev_func.analysis(ip).state == .inline_only - else - false; - - const decl_inst = decl.zir_decl_index.unwrap().?.resolve(ip); - - const gpa = zcu.gpa; - const zir = decl.getFileScope(zcu).zir; - - const builtin_type_target_index: InternPool.Index = ip_index: { - const std_mod = zcu.std_mod; - if (decl.getFileScope(zcu).mod != std_mod) break :ip_index .none; - // We're in the std module. - const std_file_imported = try zcu.importPkg(std_mod); - const std_file_root_decl_index = zcu.fileRootDecl(std_file_imported.file_index); - const std_decl = zcu.declPtr(std_file_root_decl_index.unwrap().?); - const std_namespace = std_decl.getInnerNamespace(zcu).?; - const builtin_str = try ip.getOrPutString(gpa, "builtin", .no_embedded_nulls); - const builtin_decl = zcu.declPtr(std_namespace.decls.getKeyAdapted(builtin_str, DeclAdapter{ .zcu = zcu }) orelse break :ip_index .none); - const builtin_namespace = builtin_decl.getInnerNamespaceIndex(zcu).unwrap() orelse break :ip_index .none; - if (decl.src_namespace != builtin_namespace) break :ip_index .none; - // We're in builtin.zig. This could be a builtin we need to add to a specific InternPool index. - for ([_][]const u8{ - "AtomicOrder", - "AtomicRmwOp", - "CallingConvention", - "AddressSpace", - "FloatMode", - "ReduceOp", - "CallModifier", - "PrefetchOptions", - "ExportOptions", - "ExternOptions", - "Type", - }, [_]InternPool.Index{ - .atomic_order_type, - .atomic_rmw_op_type, - .calling_convention_type, - .address_space_type, - .float_mode_type, - .reduce_op_type, - .call_modifier_type, - .prefetch_options_type, - .export_options_type, - .extern_options_type, - .type_info_type, - }) |type_name, type_ip| { - if (decl.name.eqlSlice(type_name, ip)) break :ip_index type_ip; - } - break :ip_index .none; - }; - - zcu.intern_pool.removeDependenciesForDepender(gpa, AnalUnit.wrap(.{ .decl = decl_index })); - - decl.analysis = .in_progress; - - var analysis_arena = std.heap.ArenaAllocator.init(gpa); - defer analysis_arena.deinit(); - - var comptime_err_ret_trace = std.ArrayList(LazySrcLoc).init(gpa); - defer comptime_err_ret_trace.deinit(); - - var sema: Sema = .{ - .mod = zcu, - .gpa = gpa, - .arena = analysis_arena.allocator(), - .code = zir, - .owner_decl = decl, - .owner_decl_index = decl_index, - .func_index = .none, - .func_is_naked = false, - .fn_ret_ty = Type.void, - .fn_ret_ty_ies = null, - .owner_func_index = .none, - .comptime_err_ret_trace = &comptime_err_ret_trace, - .builtin_type_target_index = builtin_type_target_index, - }; - defer sema.deinit(); - - // Every Decl (other than file root Decls, which do not have a ZIR index) has a dependency on its own source. - try sema.declareDependency(.{ .src_hash = try ip.trackZir( - gpa, - decl.getFileScopeIndex(zcu), - decl_inst, - ) }); - - var block_scope: Sema.Block = .{ - .parent = null, - .sema = &sema, - .namespace = decl.src_namespace, - .instructions = .{}, - .inlining = null, - .is_comptime = true, - .src_base_inst = decl.zir_decl_index.unwrap().?, - .type_name_ctx = decl.name, - }; - defer block_scope.instructions.deinit(gpa); - - const decl_bodies = decl.zirBodies(zcu); - - const result_ref = try sema.resolveInlineBody(&block_scope, decl_bodies.value_body, decl_inst); - // We'll do some other bits with the Sema. Clear the type target index just - // in case they analyze any type. - sema.builtin_type_target_index = .none; - const align_src: LazySrcLoc = block_scope.src(.{ .node_offset_var_decl_align = 0 }); - const section_src: LazySrcLoc = block_scope.src(.{ .node_offset_var_decl_section = 0 }); - const address_space_src: LazySrcLoc = block_scope.src(.{ .node_offset_var_decl_addrspace = 0 }); - const ty_src: LazySrcLoc = block_scope.src(.{ .node_offset_var_decl_ty = 0 }); - const init_src: LazySrcLoc = block_scope.src(.{ .node_offset_var_decl_init = 0 }); - const decl_val = try sema.resolveFinalDeclValue(&block_scope, init_src, result_ref); - const decl_ty = decl_val.typeOf(zcu); - - // Note this resolves the type of the Decl, not the value; if this Decl - // is a struct, for example, this resolves `type` (which needs no resolution), - // not the struct itself. - try decl_ty.resolveLayout(zcu); - - if (decl.kind == .@"usingnamespace") { - if (!decl_ty.eql(Type.type, zcu)) { - return sema.fail(&block_scope, ty_src, "expected type, found {}", .{ - decl_ty.fmt(zcu), - }); - } - const ty = decl_val.toType(); - if (ty.getNamespace(zcu) == null) { - return sema.fail(&block_scope, ty_src, "type {} has no namespace", .{ty.fmt(zcu)}); - } - - decl.val = ty.toValue(); - decl.alignment = .none; - decl.@"linksection" = .none; - decl.has_tv = true; - decl.owns_tv = false; - decl.analysis = .complete; - - // TODO: usingnamespace cannot currently participate in incremental compilation - return .{ - .invalidate_decl_val = true, - .invalidate_decl_ref = true, - }; - } - - var queue_linker_work = true; - var is_func = false; - var is_inline = false; - switch (decl_val.toIntern()) { - .generic_poison => unreachable, - .unreachable_value => unreachable, - else => switch (ip.indexToKey(decl_val.toIntern())) { - .variable => |variable| { - decl.owns_tv = variable.decl == decl_index; - queue_linker_work = decl.owns_tv; - }, - - .extern_func => |extern_func| { - decl.owns_tv = extern_func.decl == decl_index; - queue_linker_work = decl.owns_tv; - is_func = decl.owns_tv; - }, - - .func => |func| { - decl.owns_tv = func.owner_decl == decl_index; - queue_linker_work = false; - is_inline = decl.owns_tv and decl_ty.fnCallingConvention(zcu) == .Inline; - is_func = decl.owns_tv; - }, - - else => {}, - }, - } - - decl.val = decl_val; - // Function linksection, align, and addrspace were already set by Sema - if (!is_func) { - decl.alignment = blk: { - const align_body = decl_bodies.align_body orelse break :blk .none; - const align_ref = try sema.resolveInlineBody(&block_scope, align_body, decl_inst); - break :blk try sema.analyzeAsAlign(&block_scope, align_src, align_ref); - }; - decl.@"linksection" = blk: { - const linksection_body = decl_bodies.linksection_body orelse break :blk .none; - const linksection_ref = try sema.resolveInlineBody(&block_scope, linksection_body, decl_inst); - const bytes = try sema.toConstString(&block_scope, section_src, linksection_ref, .{ - .needed_comptime_reason = "linksection must be comptime-known", - }); - if (mem.indexOfScalar(u8, bytes, 0) != null) { - return sema.fail(&block_scope, section_src, "linksection cannot contain null bytes", .{}); - } else if (bytes.len == 0) { - return sema.fail(&block_scope, section_src, "linksection cannot be empty", .{}); - } - break :blk try ip.getOrPutStringOpt(gpa, bytes, .no_embedded_nulls); - }; - decl.@"addrspace" = blk: { - const addrspace_ctx: Sema.AddressSpaceContext = switch (ip.indexToKey(decl_val.toIntern())) { - .variable => .variable, - .extern_func, .func => .function, - else => .constant, - }; - - const target = sema.mod.getTarget(); - - const addrspace_body = decl_bodies.addrspace_body orelse break :blk switch (addrspace_ctx) { - .function => target_util.defaultAddressSpace(target, .function), - .variable => target_util.defaultAddressSpace(target, .global_mutable), - .constant => target_util.defaultAddressSpace(target, .global_constant), - else => unreachable, - }; - const addrspace_ref = try sema.resolveInlineBody(&block_scope, addrspace_body, decl_inst); - break :blk try sema.analyzeAsAddressSpace(&block_scope, address_space_src, addrspace_ref, addrspace_ctx); - }; - } - decl.has_tv = true; - decl.analysis = .complete; - - const result: SemaDeclResult = if (old_has_tv) .{ - .invalidate_decl_val = !decl_ty.eql(old_ty, zcu) or - !decl.val.eql(old_val, decl_ty, zcu) or - is_inline != old_is_inline, - .invalidate_decl_ref = !decl_ty.eql(old_ty, zcu) or - decl.alignment != old_align or - decl.@"linksection" != old_linksection or - decl.@"addrspace" != old_addrspace or - is_inline != old_is_inline, - } else .{ - .invalidate_decl_val = true, - .invalidate_decl_ref = true, - }; - - const has_runtime_bits = queue_linker_work and (is_func or try sema.typeHasRuntimeBits(decl_ty)); - if (has_runtime_bits) { - // Needed for codegen_decl which will call updateDecl and then the - // codegen backend wants full access to the Decl Type. - try decl_ty.resolveFully(zcu); - - try zcu.comp.work_queue.writeItem(.{ .codegen_decl = decl_index }); - - if (result.invalidate_decl_ref and zcu.emit_h != null) { - try zcu.comp.work_queue.writeItem(.{ .emit_h_decl = decl_index }); - } - } - - if (decl.is_exported) { - const export_src: LazySrcLoc = block_scope.src(.{ .token_offset = @intFromBool(decl.is_pub) }); - if (is_inline) return sema.fail(&block_scope, export_src, "export of inline function", .{}); - // The scope needs to have the decl in it. - try sema.analyzeExport(&block_scope, export_src, .{ .name = decl.name }, decl_index); - } - - try sema.flushExports(); - - return result; -} - -fn semaAnonOwnerDecl(zcu: *Zcu, decl_index: Decl.Index) !SemaDeclResult { - const decl = zcu.declPtr(decl_index); - - assert(decl.has_tv); - assert(decl.owns_tv); - - log.debug("semaAnonOwnerDecl '{d}'", .{@intFromEnum(decl_index)}); - - switch (decl.typeOf(zcu).zigTypeTag(zcu)) { - .Fn => @panic("TODO: update fn instance"), - .Type => {}, - else => unreachable, - } - - // We are the owner Decl of a type, and we were marked as outdated. That means the *structure* - // of this type changed; not just its namespace. Therefore, we need a new InternPool index. - // - // However, as soon as we make that, the context that created us will require re-analysis anyway - // (as it depends on this Decl's value), meaning the `struct_decl` (or equivalent) instruction - // will be analyzed again. Since Sema already needs to be able to reconstruct types like this, - // why should we bother implementing it here too when the Sema logic will be hit right after? - // - // So instead, let's just mark this Decl as failed - so that any remaining Decls which genuinely - // reference it (via `@This`) end up silently erroring too - and we'll let Sema make a new type - // with a new Decl. - // - // Yes, this does mean that any type owner Decl has a constant value for its entire lifetime. - zcu.intern_pool.removeDependenciesForDepender(zcu.gpa, AnalUnit.wrap(.{ .decl = decl_index })); - zcu.intern_pool.remove(decl.val.toIntern()); - decl.analysis = .dependency_failure; - return .{ - .invalidate_decl_val = true, - .invalidate_decl_ref = true, - }; -} - -pub const ImportFileResult = struct { - file: *File, - file_index: File.Index, - is_new: bool, - is_pkg: bool, -}; - -pub fn importPkg(zcu: *Zcu, mod: *Package.Module) !ImportFileResult { - const gpa = zcu.gpa; - - // The resolved path is used as the key in the import table, to detect if - // an import refers to the same as another, despite different relative paths - // or differently mapped package names. - const resolved_path = try std.fs.path.resolve(gpa, &.{ - mod.root.root_dir.path orelse ".", - mod.root.sub_path, - mod.root_src_path, - }); - var keep_resolved_path = false; - defer if (!keep_resolved_path) gpa.free(resolved_path); - - const gop = try zcu.import_table.getOrPut(gpa, resolved_path); - errdefer _ = zcu.import_table.pop(); - if (gop.found_existing) { - try gop.value_ptr.*.addReference(zcu.*, .{ .root = mod }); - return .{ - .file = gop.value_ptr.*, - .file_index = @enumFromInt(gop.index), - .is_new = false, - .is_pkg = true, - }; - } - - const ip = &zcu.intern_pool; - - try ip.files.ensureUnusedCapacity(gpa, 1); - - if (mod.builtin_file) |builtin_file| { - keep_resolved_path = true; // It's now owned by import_table. - gop.value_ptr.* = builtin_file; - try builtin_file.addReference(zcu.*, .{ .root = mod }); - const path_digest = computePathDigest(zcu, mod, builtin_file.sub_file_path); - ip.files.putAssumeCapacityNoClobber(path_digest, .none); - return .{ - .file = builtin_file, - .file_index = @enumFromInt(ip.files.entries.len - 1), - .is_new = false, - .is_pkg = true, - }; - } - - const sub_file_path = try gpa.dupe(u8, mod.root_src_path); - errdefer gpa.free(sub_file_path); - - const new_file = try gpa.create(File); - errdefer gpa.destroy(new_file); + const new_file = try gpa.create(File); + errdefer gpa.destroy(new_file); keep_resolved_path = true; // It's now owned by import_table. gop.value_ptr.* = new_file; @@ -4533,78 +3680,6 @@ pub fn importFile( }; } -pub fn embedFile( - mod: *Module, - cur_file: *File, - import_string: []const u8, - src_loc: LazySrcLoc, -) !InternPool.Index { - const gpa = mod.gpa; - - if (cur_file.mod.deps.get(import_string)) |pkg| { - const resolved_path = try std.fs.path.resolve(gpa, &.{ - pkg.root.root_dir.path orelse ".", - pkg.root.sub_path, - pkg.root_src_path, - }); - var keep_resolved_path = false; - defer if (!keep_resolved_path) gpa.free(resolved_path); - - const gop = try mod.embed_table.getOrPut(gpa, resolved_path); - errdefer { - assert(std.mem.eql(u8, mod.embed_table.pop().key, resolved_path)); - keep_resolved_path = false; - } - if (gop.found_existing) return gop.value_ptr.*.val; - keep_resolved_path = true; - - const sub_file_path = try gpa.dupe(u8, pkg.root_src_path); - errdefer gpa.free(sub_file_path); - - return newEmbedFile(mod, pkg, sub_file_path, resolved_path, gop.value_ptr, src_loc); - } - - // The resolved path is used as the key in the table, to detect if a file - // refers to the same as another, despite different relative paths. - const resolved_path = try std.fs.path.resolve(gpa, &.{ - cur_file.mod.root.root_dir.path orelse ".", - cur_file.mod.root.sub_path, - cur_file.sub_file_path, - "..", - import_string, - }); - - var keep_resolved_path = false; - defer if (!keep_resolved_path) gpa.free(resolved_path); - - const gop = try mod.embed_table.getOrPut(gpa, resolved_path); - errdefer { - assert(std.mem.eql(u8, mod.embed_table.pop().key, resolved_path)); - keep_resolved_path = false; - } - if (gop.found_existing) return gop.value_ptr.*.val; - keep_resolved_path = true; - - const resolved_root_path = try std.fs.path.resolve(gpa, &.{ - cur_file.mod.root.root_dir.path orelse ".", - cur_file.mod.root.sub_path, - }); - defer gpa.free(resolved_root_path); - - const sub_file_path = p: { - const relative = try std.fs.path.relative(gpa, resolved_root_path, resolved_path); - errdefer gpa.free(relative); - - if (!isUpDir(relative) and !std.fs.path.isAbsolute(relative)) { - break :p relative; - } - return error.ImportOutsideModulePath; - }; - defer gpa.free(sub_file_path); - - return newEmbedFile(mod, cur_file.mod, sub_file_path, resolved_path, gop.value_ptr, src_loc); -} - fn computePathDigest(zcu: *Zcu, mod: *Package.Module, sub_file_path: []const u8) Cache.BinDigest { const want_local_cache = mod == zcu.main_mod; var path_hash: Cache.HashHelper = .{}; @@ -4620,87 +3695,6 @@ fn computePathDigest(zcu: *Zcu, mod: *Package.Module, sub_file_path: []const u8) return bin; } -/// https://github.com/ziglang/zig/issues/14307 -fn newEmbedFile( - mod: *Module, - pkg: *Package.Module, - sub_file_path: []const u8, - resolved_path: []const u8, - result: **EmbedFile, - src_loc: LazySrcLoc, -) !InternPool.Index { - const gpa = mod.gpa; - const ip = &mod.intern_pool; - - const new_file = try gpa.create(EmbedFile); - errdefer gpa.destroy(new_file); - - var file = try pkg.root.openFile(sub_file_path, .{}); - defer file.close(); - - const actual_stat = try file.stat(); - const stat: Cache.File.Stat = .{ - .size = actual_stat.size, - .inode = actual_stat.inode, - .mtime = actual_stat.mtime, - }; - const size = std.math.cast(usize, actual_stat.size) orelse return error.Overflow; - - const bytes = try ip.string_bytes.addManyAsSlice(gpa, try std.math.add(usize, size, 1)); - const actual_read = try file.readAll(bytes[0..size]); - if (actual_read != size) return error.UnexpectedEndOfFile; - bytes[size] = 0; - - const comp = mod.comp; - switch (comp.cache_use) { - .whole => |whole| if (whole.cache_manifest) |man| { - const copied_resolved_path = try gpa.dupe(u8, resolved_path); - errdefer gpa.free(copied_resolved_path); - whole.cache_manifest_mutex.lock(); - defer whole.cache_manifest_mutex.unlock(); - try man.addFilePostContents(copied_resolved_path, bytes[0..size], stat); - }, - .incremental => {}, - } - - const array_ty = try ip.get(gpa, .{ .array_type = .{ - .len = size, - .sentinel = .zero_u8, - .child = .u8_type, - } }); - const array_val = try ip.get(gpa, .{ .aggregate = .{ - .ty = array_ty, - .storage = .{ .bytes = try ip.getOrPutTrailingString(gpa, bytes.len, .maybe_embedded_nulls) }, - } }); - - const ptr_ty = (try mod.ptrType(.{ - .child = array_ty, - .flags = .{ - .alignment = .none, - .is_const = true, - .address_space = .generic, - }, - })).toIntern(); - const ptr_val = try ip.get(gpa, .{ .ptr = .{ - .ty = ptr_ty, - .base_addr = .{ .anon_decl = .{ - .val = array_val, - .orig_ty = ptr_ty, - } }, - .byte_offset = 0, - } }); - - result.* = new_file; - new_file.* = .{ - .sub_file_path = try ip.getOrPutString(gpa, sub_file_path, .no_embedded_nulls), - .owner = pkg, - .stat = stat, - .val = ptr_val, - .src_loc = src_loc, - }; - return ptr_val; -} - pub fn scanNamespace( zcu: *Zcu, namespace_index: Namespace.Index, @@ -4970,13 +3964,6 @@ pub fn abortAnonDecl(mod: *Module, decl_index: Decl.Index) void { mod.destroyDecl(decl_index); } -/// Finalize the creation of an anon decl. -pub fn finalizeAnonDecl(mod: *Module, decl_index: Decl.Index) Allocator.Error!void { - if (mod.declPtr(decl_index).typeOf(mod).isFnOrHasRuntimeBits(mod)) { - try mod.comp.work_queue.writeItem(.{ .codegen_decl = decl_index }); - } -} - /// Delete all the Export objects that are caused by this `AnalUnit`. Re-analysis of /// this `AnalUnit` will cause them to be re-created (or not). pub fn deleteUnitExports(zcu: *Zcu, anal_unit: AnalUnit) void { @@ -5019,7 +4006,7 @@ pub fn deleteUnitExports(zcu: *Zcu, anal_unit: AnalUnit) void { /// Delete all references in `reference_table` which are caused by this `AnalUnit`. /// Re-analysis of the `AnalUnit` will cause appropriate references to be recreated. -fn deleteUnitReferences(zcu: *Zcu, anal_unit: AnalUnit) void { +pub fn deleteUnitReferences(zcu: *Zcu, anal_unit: AnalUnit) void { const gpa = zcu.gpa; const kv = zcu.reference_table.fetchSwapRemove(anal_unit) orelse return; @@ -5058,258 +4045,13 @@ pub fn addUnitReference(zcu: *Zcu, src_unit: AnalUnit, referenced_unit: AnalUnit gop.value_ptr.* = @intCast(ref_idx); } -pub fn analyzeFnBody(mod: *Module, func_index: InternPool.Index, arena: Allocator) SemaError!Air { - const tracy = trace(@src()); - defer tracy.end(); - - const gpa = mod.gpa; - const ip = &mod.intern_pool; - const func = mod.funcInfo(func_index); - const decl_index = func.owner_decl; - const decl = mod.declPtr(decl_index); - - log.debug("func name '{}'", .{(try decl.fullyQualifiedName(mod)).fmt(ip)}); - defer blk: { - log.debug("finish func name '{}'", .{(decl.fullyQualifiedName(mod) catch break :blk).fmt(ip)}); - } - - const decl_prog_node = mod.sema_prog_node.start((try decl.fullyQualifiedName(mod)).toSlice(ip), 0); - defer decl_prog_node.end(); - - mod.intern_pool.removeDependenciesForDepender(gpa, AnalUnit.wrap(.{ .func = func_index })); - - var comptime_err_ret_trace = std.ArrayList(LazySrcLoc).init(gpa); - defer comptime_err_ret_trace.deinit(); +pub fn createNamespace(mod: *Module, initialization: Namespace) !Namespace.Index { + return mod.intern_pool.createNamespace(mod.gpa, initialization); +} - // In the case of a generic function instance, this is the type of the - // instance, which has comptime parameters elided. In other words, it is - // the runtime-known parameters only, not to be confused with the - // generic_owner function type, which potentially has more parameters, - // including comptime parameters. - const fn_ty = decl.typeOf(mod); - const fn_ty_info = mod.typeToFunc(fn_ty).?; - - var sema: Sema = .{ - .mod = mod, - .gpa = gpa, - .arena = arena, - .code = decl.getFileScope(mod).zir, - .owner_decl = decl, - .owner_decl_index = decl_index, - .func_index = func_index, - .func_is_naked = fn_ty_info.cc == .Naked, - .fn_ret_ty = Type.fromInterned(fn_ty_info.return_type), - .fn_ret_ty_ies = null, - .owner_func_index = func_index, - .branch_quota = @max(func.branchQuota(ip).*, Sema.default_branch_quota), - .comptime_err_ret_trace = &comptime_err_ret_trace, - }; - defer sema.deinit(); - - // Every runtime function has a dependency on the source of the Decl it originates from. - // It also depends on the value of its owner Decl. - try sema.declareDependency(.{ .src_hash = decl.zir_decl_index.unwrap().? }); - try sema.declareDependency(.{ .decl_val = decl_index }); - - if (func.analysis(ip).inferred_error_set) { - const ies = try arena.create(Sema.InferredErrorSet); - ies.* = .{ .func = func_index }; - sema.fn_ret_ty_ies = ies; - } - - // reset in case calls to errorable functions are removed. - func.analysis(ip).calls_or_awaits_errorable_fn = false; - - // First few indexes of extra are reserved and set at the end. - const reserved_count = @typeInfo(Air.ExtraIndex).Enum.fields.len; - try sema.air_extra.ensureTotalCapacity(gpa, reserved_count); - sema.air_extra.items.len += reserved_count; - - var inner_block: Sema.Block = .{ - .parent = null, - .sema = &sema, - .namespace = decl.src_namespace, - .instructions = .{}, - .inlining = null, - .is_comptime = false, - .src_base_inst = inst: { - const owner_info = if (func.generic_owner == .none) - func - else - mod.funcInfo(func.generic_owner); - const orig_decl = mod.declPtr(owner_info.owner_decl); - break :inst orig_decl.zir_decl_index.unwrap().?; - }, - .type_name_ctx = decl.name, - }; - defer inner_block.instructions.deinit(gpa); - - const fn_info = sema.code.getFnInfo(func.zirBodyInst(ip).resolve(ip)); - - // Here we are performing "runtime semantic analysis" for a function body, which means - // we must map the parameter ZIR instructions to `arg` AIR instructions. - // AIR requires the `arg` parameters to be the first N instructions. - // This could be a generic function instantiation, however, in which case we need to - // map the comptime parameters to constant values and only emit arg AIR instructions - // for the runtime ones. - const runtime_params_len = fn_ty_info.param_types.len; - try inner_block.instructions.ensureTotalCapacityPrecise(gpa, runtime_params_len); - try sema.air_instructions.ensureUnusedCapacity(gpa, fn_info.total_params_len); - try sema.inst_map.ensureSpaceForInstructions(gpa, fn_info.param_body); - - // In the case of a generic function instance, pre-populate all the comptime args. - if (func.comptime_args.len != 0) { - for ( - fn_info.param_body[0..func.comptime_args.len], - func.comptime_args.get(ip), - ) |inst, comptime_arg| { - if (comptime_arg == .none) continue; - sema.inst_map.putAssumeCapacityNoClobber(inst, Air.internedToRef(comptime_arg)); - } - } - - const src_params_len = if (func.comptime_args.len != 0) - func.comptime_args.len - else - runtime_params_len; - - var runtime_param_index: usize = 0; - for (fn_info.param_body[0..src_params_len], 0..) |inst, src_param_index| { - const gop = sema.inst_map.getOrPutAssumeCapacity(inst); - if (gop.found_existing) continue; // provided above by comptime arg - - const param_ty = fn_ty_info.param_types.get(ip)[runtime_param_index]; - runtime_param_index += 1; - - const opt_opv = sema.typeHasOnePossibleValue(Type.fromInterned(param_ty)) catch |err| switch (err) { - error.GenericPoison => unreachable, - error.ComptimeReturn => unreachable, - error.ComptimeBreak => unreachable, - else => |e| return e, - }; - if (opt_opv) |opv| { - gop.value_ptr.* = Air.internedToRef(opv.toIntern()); - continue; - } - const arg_index: Air.Inst.Index = @enumFromInt(sema.air_instructions.len); - gop.value_ptr.* = arg_index.toRef(); - inner_block.instructions.appendAssumeCapacity(arg_index); - sema.air_instructions.appendAssumeCapacity(.{ - .tag = .arg, - .data = .{ .arg = .{ - .ty = Air.internedToRef(param_ty), - .src_index = @intCast(src_param_index), - } }, - }); - } - - func.analysis(ip).state = .in_progress; - - const last_arg_index = inner_block.instructions.items.len; - - // Save the error trace as our first action in the function. - // If this is unnecessary after all, Liveness will clean it up for us. - const error_return_trace_index = try sema.analyzeSaveErrRetIndex(&inner_block); - sema.error_return_trace_index_on_fn_entry = error_return_trace_index; - inner_block.error_return_trace_index = error_return_trace_index; - - sema.analyzeFnBody(&inner_block, fn_info.body) catch |err| switch (err) { - // TODO make these unreachable instead of @panic - error.GenericPoison => @panic("zig compiler bug: GenericPoison"), - error.ComptimeReturn => @panic("zig compiler bug: ComptimeReturn"), - else => |e| return e, - }; - - for (sema.unresolved_inferred_allocs.keys()) |ptr_inst| { - // The lack of a resolve_inferred_alloc means that this instruction - // is unused so it just has to be a no-op. - sema.air_instructions.set(@intFromEnum(ptr_inst), .{ - .tag = .alloc, - .data = .{ .ty = Type.single_const_pointer_to_comptime_int }, - }); - } - - // If we don't get an error return trace from a caller, create our own. - if (func.analysis(ip).calls_or_awaits_errorable_fn and - mod.comp.config.any_error_tracing and - !sema.fn_ret_ty.isError(mod)) - { - sema.setupErrorReturnTrace(&inner_block, last_arg_index) catch |err| switch (err) { - // TODO make these unreachable instead of @panic - error.GenericPoison => @panic("zig compiler bug: GenericPoison"), - error.ComptimeReturn => @panic("zig compiler bug: ComptimeReturn"), - error.ComptimeBreak => @panic("zig compiler bug: ComptimeBreak"), - else => |e| return e, - }; - } - - // Copy the block into place and mark that as the main block. - try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Block).Struct.fields.len + - inner_block.instructions.items.len); - const main_block_index = sema.addExtraAssumeCapacity(Air.Block{ - .body_len = @intCast(inner_block.instructions.items.len), - }); - sema.air_extra.appendSliceAssumeCapacity(@ptrCast(inner_block.instructions.items)); - sema.air_extra.items[@intFromEnum(Air.ExtraIndex.main_block)] = main_block_index; - - // Resolving inferred error sets is done *before* setting the function - // state to success, so that "unable to resolve inferred error set" errors - // can be emitted here. - if (sema.fn_ret_ty_ies) |ies| { - sema.resolveInferredErrorSetPtr(&inner_block, .{ - .base_node_inst = inner_block.src_base_inst, - .offset = LazySrcLoc.Offset.nodeOffset(0), - }, ies) catch |err| switch (err) { - error.GenericPoison => unreachable, - error.ComptimeReturn => unreachable, - error.ComptimeBreak => unreachable, - error.AnalysisFail => { - // In this case our function depends on a type that had a compile error. - // We should not try to lower this function. - decl.analysis = .dependency_failure; - return error.AnalysisFail; - }, - else => |e| return e, - }; - assert(ies.resolved != .none); - ip.funcIesResolved(func_index).* = ies.resolved; - } - - func.analysis(ip).state = .success; - - // Finally we must resolve the return type and parameter types so that backends - // have full access to type information. - // Crucially, this happens *after* we set the function state to success above, - // so that dependencies on the function body will now be satisfied rather than - // result in circular dependency errors. - sema.resolveFnTypes(fn_ty) catch |err| switch (err) { - error.GenericPoison => unreachable, - error.ComptimeReturn => unreachable, - error.ComptimeBreak => unreachable, - error.AnalysisFail => { - // In this case our function depends on a type that had a compile error. - // We should not try to lower this function. - decl.analysis = .dependency_failure; - return error.AnalysisFail; - }, - else => |e| return e, - }; - - try sema.flushExports(); - - return .{ - .instructions = sema.air_instructions.toOwnedSlice(), - .extra = try sema.air_extra.toOwnedSlice(gpa), - }; -} - -pub fn createNamespace(mod: *Module, initialization: Namespace) !Namespace.Index { - return mod.intern_pool.createNamespace(mod.gpa, initialization); -} - -pub fn destroyNamespace(mod: *Module, index: Namespace.Index) void { - return mod.intern_pool.destroyNamespace(mod.gpa, index); -} +pub fn destroyNamespace(mod: *Module, index: Namespace.Index) void { + return mod.intern_pool.destroyNamespace(mod.gpa, index); +} pub fn allocateNewDecl(zcu: *Zcu, namespace: Namespace.Index) !Decl.Index { const gpa = zcu.gpa; @@ -5420,117 +4162,7 @@ fn lockAndClearFileCompileError(mod: *Module, file: *File) void { } } -/// Called from `Compilation.update`, after everything is done, just before -/// reporting compile errors. In this function we emit exported symbol collision -/// errors and communicate exported symbols to the linker backend. -pub fn processExports(zcu: *Zcu) !void { - const gpa = zcu.gpa; - - // First, construct a mapping of every exported value and Decl to the indices of all its different exports. - var decl_exports: std.AutoArrayHashMapUnmanaged(Decl.Index, ArrayListUnmanaged(u32)) = .{}; - var value_exports: std.AutoArrayHashMapUnmanaged(InternPool.Index, ArrayListUnmanaged(u32)) = .{}; - defer { - for (decl_exports.values()) |*exports| { - exports.deinit(gpa); - } - decl_exports.deinit(gpa); - for (value_exports.values()) |*exports| { - exports.deinit(gpa); - } - value_exports.deinit(gpa); - } - - // We note as a heuristic: - // * It is rare to export a value. - // * It is rare for one Decl to be exported multiple times. - // So, this ensureTotalCapacity serves as a reasonable (albeit very approximate) optimization. - try decl_exports.ensureTotalCapacity(gpa, zcu.single_exports.count() + zcu.multi_exports.count()); - - for (zcu.single_exports.values()) |export_idx| { - const exp = zcu.all_exports.items[export_idx]; - const value_ptr, const found_existing = switch (exp.exported) { - .decl_index => |i| gop: { - const gop = try decl_exports.getOrPut(gpa, i); - break :gop .{ gop.value_ptr, gop.found_existing }; - }, - .value => |i| gop: { - const gop = try value_exports.getOrPut(gpa, i); - break :gop .{ gop.value_ptr, gop.found_existing }; - }, - }; - if (!found_existing) value_ptr.* = .{}; - try value_ptr.append(gpa, export_idx); - } - - for (zcu.multi_exports.values()) |info| { - for (zcu.all_exports.items[info.index..][0..info.len], info.index..) |exp, export_idx| { - const value_ptr, const found_existing = switch (exp.exported) { - .decl_index => |i| gop: { - const gop = try decl_exports.getOrPut(gpa, i); - break :gop .{ gop.value_ptr, gop.found_existing }; - }, - .value => |i| gop: { - const gop = try value_exports.getOrPut(gpa, i); - break :gop .{ gop.value_ptr, gop.found_existing }; - }, - }; - if (!found_existing) value_ptr.* = .{}; - try value_ptr.append(gpa, @intCast(export_idx)); - } - } - - // Map symbol names to `Export` for name collision detection. - var symbol_exports: SymbolExports = .{}; - defer symbol_exports.deinit(gpa); - - for (decl_exports.keys(), decl_exports.values()) |exported_decl, exports_list| { - const exported: Exported = .{ .decl_index = exported_decl }; - try processExportsInner(zcu, &symbol_exports, exported, exports_list.items); - } - - for (value_exports.keys(), value_exports.values()) |exported_value, exports_list| { - const exported: Exported = .{ .value = exported_value }; - try processExportsInner(zcu, &symbol_exports, exported, exports_list.items); - } -} - -const SymbolExports = std.AutoArrayHashMapUnmanaged(InternPool.NullTerminatedString, u32); - -fn processExportsInner( - zcu: *Zcu, - symbol_exports: *SymbolExports, - exported: Exported, - export_indices: []const u32, -) error{OutOfMemory}!void { - const gpa = zcu.gpa; - - for (export_indices) |export_idx| { - const new_export = &zcu.all_exports.items[export_idx]; - const gop = try symbol_exports.getOrPut(gpa, new_export.opts.name); - if (gop.found_existing) { - new_export.status = .failed_retryable; - try zcu.failed_exports.ensureUnusedCapacity(gpa, 1); - const msg = try ErrorMsg.create(gpa, new_export.src, "exported symbol collision: {}", .{ - new_export.opts.name.fmt(&zcu.intern_pool), - }); - errdefer msg.destroy(gpa); - const other_export = zcu.all_exports.items[gop.value_ptr.*]; - try zcu.errNote(other_export.src, msg, "other symbol here", .{}); - zcu.failed_exports.putAssumeCapacityNoClobber(export_idx, msg); - new_export.status = .failed; - } else { - gop.value_ptr.* = export_idx; - } - } - if (zcu.comp.bin_file) |lf| { - try handleUpdateExports(zcu, export_indices, lf.updateExports(zcu, exported, export_indices)); - } else if (zcu.llvm_object) |llvm_object| { - if (build_options.only_c) unreachable; - try handleUpdateExports(zcu, export_indices, llvm_object.updateExports(zcu, exported, export_indices)); - } -} - -fn handleUpdateExports( +pub fn handleUpdateExports( zcu: *Zcu, export_indices: []const u32, result: link.File.UpdateExportsError!void, @@ -5551,180 +4183,7 @@ fn handleUpdateExports( }; } -pub fn populateTestFunctions( - zcu: *Zcu, - main_progress_node: std.Progress.Node, -) !void { - const gpa = zcu.gpa; - const ip = &zcu.intern_pool; - const builtin_mod = zcu.root_mod.getBuiltinDependency(); - const builtin_file_index = (zcu.importPkg(builtin_mod) catch unreachable).file_index; - const root_decl_index = zcu.fileRootDecl(builtin_file_index); - const root_decl = zcu.declPtr(root_decl_index.unwrap().?); - const builtin_namespace = zcu.namespacePtr(root_decl.src_namespace); - const test_functions_str = try ip.getOrPutString(gpa, "test_functions", .no_embedded_nulls); - const decl_index = builtin_namespace.decls.getKeyAdapted( - test_functions_str, - DeclAdapter{ .zcu = zcu }, - ).?; - { - // We have to call `ensureDeclAnalyzed` here in case `builtin.test_functions` - // was not referenced by start code. - zcu.sema_prog_node = main_progress_node.start("Semantic Analysis", 0); - defer { - zcu.sema_prog_node.end(); - zcu.sema_prog_node = undefined; - } - try zcu.ensureDeclAnalyzed(decl_index); - } - - const decl = zcu.declPtr(decl_index); - const test_fn_ty = decl.typeOf(zcu).slicePtrFieldType(zcu).childType(zcu); - - const array_anon_decl: InternPool.Key.Ptr.BaseAddr.AnonDecl = array: { - // Add zcu.test_functions to an array decl then make the test_functions - // decl reference it as a slice. - const test_fn_vals = try gpa.alloc(InternPool.Index, zcu.test_functions.count()); - defer gpa.free(test_fn_vals); - - for (test_fn_vals, zcu.test_functions.keys()) |*test_fn_val, test_decl_index| { - const test_decl = zcu.declPtr(test_decl_index); - const test_decl_name = try test_decl.fullyQualifiedName(zcu); - const test_decl_name_len = test_decl_name.length(ip); - const test_name_anon_decl: InternPool.Key.Ptr.BaseAddr.AnonDecl = n: { - const test_name_ty = try zcu.arrayType(.{ - .len = test_decl_name_len, - .child = .u8_type, - }); - const test_name_val = try zcu.intern(.{ .aggregate = .{ - .ty = test_name_ty.toIntern(), - .storage = .{ .bytes = test_decl_name.toString() }, - } }); - break :n .{ - .orig_ty = (try zcu.singleConstPtrType(test_name_ty)).toIntern(), - .val = test_name_val, - }; - }; - - const test_fn_fields = .{ - // name - try zcu.intern(.{ .slice = .{ - .ty = .slice_const_u8_type, - .ptr = try zcu.intern(.{ .ptr = .{ - .ty = .manyptr_const_u8_type, - .base_addr = .{ .anon_decl = test_name_anon_decl }, - .byte_offset = 0, - } }), - .len = try zcu.intern(.{ .int = .{ - .ty = .usize_type, - .storage = .{ .u64 = test_decl_name_len }, - } }), - } }), - // func - try zcu.intern(.{ .ptr = .{ - .ty = try zcu.intern(.{ .ptr_type = .{ - .child = test_decl.typeOf(zcu).toIntern(), - .flags = .{ - .is_const = true, - }, - } }), - .base_addr = .{ .decl = test_decl_index }, - .byte_offset = 0, - } }), - }; - test_fn_val.* = try zcu.intern(.{ .aggregate = .{ - .ty = test_fn_ty.toIntern(), - .storage = .{ .elems = &test_fn_fields }, - } }); - } - - const array_ty = try zcu.arrayType(.{ - .len = test_fn_vals.len, - .child = test_fn_ty.toIntern(), - .sentinel = .none, - }); - const array_val = try zcu.intern(.{ .aggregate = .{ - .ty = array_ty.toIntern(), - .storage = .{ .elems = test_fn_vals }, - } }); - break :array .{ - .orig_ty = (try zcu.singleConstPtrType(array_ty)).toIntern(), - .val = array_val, - }; - }; - - { - const new_ty = try zcu.ptrType(.{ - .child = test_fn_ty.toIntern(), - .flags = .{ - .is_const = true, - .size = .Slice, - }, - }); - const new_val = decl.val; - const new_init = try zcu.intern(.{ .slice = .{ - .ty = new_ty.toIntern(), - .ptr = try zcu.intern(.{ .ptr = .{ - .ty = new_ty.slicePtrFieldType(zcu).toIntern(), - .base_addr = .{ .anon_decl = array_anon_decl }, - .byte_offset = 0, - } }), - .len = (try zcu.intValue(Type.usize, zcu.test_functions.count())).toIntern(), - } }); - ip.mutateVarInit(decl.val.toIntern(), new_init); - - // Since we are replacing the Decl's value we must perform cleanup on the - // previous value. - decl.val = new_val; - decl.has_tv = true; - } - { - zcu.codegen_prog_node = main_progress_node.start("Code Generation", 0); - defer { - zcu.codegen_prog_node.end(); - zcu.codegen_prog_node = undefined; - } - - try zcu.linkerUpdateDecl(decl_index); - } -} - -pub fn linkerUpdateDecl(zcu: *Zcu, decl_index: Decl.Index) !void { - const comp = zcu.comp; - - const decl = zcu.declPtr(decl_index); - - const codegen_prog_node = zcu.codegen_prog_node.start((try decl.fullyQualifiedName(zcu)).toSlice(&zcu.intern_pool), 0); - defer codegen_prog_node.end(); - - if (comp.bin_file) |lf| { - lf.updateDecl(zcu, decl_index) catch |err| switch (err) { - error.OutOfMemory => return error.OutOfMemory, - error.AnalysisFail => { - decl.analysis = .codegen_failure; - }, - else => { - const gpa = zcu.gpa; - try zcu.failed_analysis.ensureUnusedCapacity(gpa, 1); - zcu.failed_analysis.putAssumeCapacityNoClobber(AnalUnit.wrap(.{ .decl = decl_index }), try ErrorMsg.create( - gpa, - decl.navSrcLoc(zcu), - "unable to codegen: {s}", - .{@errorName(err)}, - )); - decl.analysis = .codegen_failure; - try zcu.retryable_failures.append(zcu.gpa, AnalUnit.wrap(.{ .decl = decl_index })); - }, - }; - } else if (zcu.llvm_object) |llvm_object| { - if (build_options.only_c) unreachable; - llvm_object.updateDecl(zcu, decl_index) catch |err| switch (err) { - error.OutOfMemory => return error.OutOfMemory, - }; - } -} - -fn reportRetryableFileError( +pub fn reportRetryableFileError( zcu: *Zcu, file_index: File.Index, comptime format: []const u8, @@ -5795,344 +4254,6 @@ pub fn backendSupportsFeature(zcu: Module, feature: Feature) bool { return target_util.backendSupportsFeature(cpu_arch, ofmt, use_llvm, feature); } -/// Shortcut for calling `intern_pool.get`. -pub fn intern(mod: *Module, key: InternPool.Key) Allocator.Error!InternPool.Index { - return mod.intern_pool.get(mod.gpa, key); -} - -/// Shortcut for calling `intern_pool.getCoerced`. -pub fn getCoerced(mod: *Module, val: Value, new_ty: Type) Allocator.Error!Value { - return Value.fromInterned((try mod.intern_pool.getCoerced(mod.gpa, val.toIntern(), new_ty.toIntern()))); -} - -pub fn intType(mod: *Module, signedness: std.builtin.Signedness, bits: u16) Allocator.Error!Type { - return Type.fromInterned((try intern(mod, .{ .int_type = .{ - .signedness = signedness, - .bits = bits, - } }))); -} - -pub fn errorIntType(mod: *Module) std.mem.Allocator.Error!Type { - return mod.intType(.unsigned, mod.errorSetBits()); -} - -pub fn arrayType(mod: *Module, info: InternPool.Key.ArrayType) Allocator.Error!Type { - const i = try intern(mod, .{ .array_type = info }); - return Type.fromInterned(i); -} - -pub fn vectorType(mod: *Module, info: InternPool.Key.VectorType) Allocator.Error!Type { - const i = try intern(mod, .{ .vector_type = info }); - return Type.fromInterned(i); -} - -pub fn optionalType(mod: *Module, child_type: InternPool.Index) Allocator.Error!Type { - const i = try intern(mod, .{ .opt_type = child_type }); - return Type.fromInterned(i); -} - -pub fn ptrType(mod: *Module, info: InternPool.Key.PtrType) Allocator.Error!Type { - var canon_info = info; - - if (info.flags.size == .C) canon_info.flags.is_allowzero = true; - - // Canonicalize non-zero alignment. If it matches the ABI alignment of the pointee - // type, we change it to 0 here. If this causes an assertion trip because the - // pointee type needs to be resolved more, that needs to be done before calling - // this ptr() function. - if (info.flags.alignment != .none and - info.flags.alignment == Type.fromInterned(info.child).abiAlignment(mod)) - { - canon_info.flags.alignment = .none; - } - - switch (info.flags.vector_index) { - // Canonicalize host_size. If it matches the bit size of the pointee type, - // we change it to 0 here. If this causes an assertion trip, the pointee type - // needs to be resolved before calling this ptr() function. - .none => if (info.packed_offset.host_size != 0) { - const elem_bit_size = Type.fromInterned(info.child).bitSize(mod); - assert(info.packed_offset.bit_offset + elem_bit_size <= info.packed_offset.host_size * 8); - if (info.packed_offset.host_size * 8 == elem_bit_size) { - canon_info.packed_offset.host_size = 0; - } - }, - .runtime => {}, - _ => assert(@intFromEnum(info.flags.vector_index) < info.packed_offset.host_size), - } - - return Type.fromInterned((try intern(mod, .{ .ptr_type = canon_info }))); -} - -/// Like `ptrType`, but if `info` specifies an `alignment`, first ensures the pointer -/// child type's alignment is resolved so that an invalid alignment is not used. -/// In general, prefer this function during semantic analysis. -pub fn ptrTypeSema(zcu: *Zcu, info: InternPool.Key.PtrType) SemaError!Type { - if (info.flags.alignment != .none) { - _ = try Type.fromInterned(info.child).abiAlignmentAdvanced(zcu, .sema); - } - return zcu.ptrType(info); -} - -pub fn singleMutPtrType(mod: *Module, child_type: Type) Allocator.Error!Type { - return ptrType(mod, .{ .child = child_type.toIntern() }); -} - -pub fn singleConstPtrType(mod: *Module, child_type: Type) Allocator.Error!Type { - return ptrType(mod, .{ - .child = child_type.toIntern(), - .flags = .{ - .is_const = true, - }, - }); -} - -pub fn manyConstPtrType(mod: *Module, child_type: Type) Allocator.Error!Type { - return ptrType(mod, .{ - .child = child_type.toIntern(), - .flags = .{ - .size = .Many, - .is_const = true, - }, - }); -} - -pub fn adjustPtrTypeChild(mod: *Module, ptr_ty: Type, new_child: Type) Allocator.Error!Type { - var info = ptr_ty.ptrInfo(mod); - info.child = new_child.toIntern(); - return mod.ptrType(info); -} - -pub fn funcType(mod: *Module, key: InternPool.GetFuncTypeKey) Allocator.Error!Type { - return Type.fromInterned((try mod.intern_pool.getFuncType(mod.gpa, key))); -} - -/// Use this for `anyframe->T` only. -/// For `anyframe`, use the `InternPool.Index.anyframe` tag directly. -pub fn anyframeType(mod: *Module, payload_ty: Type) Allocator.Error!Type { - return Type.fromInterned((try intern(mod, .{ .anyframe_type = payload_ty.toIntern() }))); -} - -pub fn errorUnionType(mod: *Module, error_set_ty: Type, payload_ty: Type) Allocator.Error!Type { - return Type.fromInterned((try intern(mod, .{ .error_union_type = .{ - .error_set_type = error_set_ty.toIntern(), - .payload_type = payload_ty.toIntern(), - } }))); -} - -pub fn singleErrorSetType(mod: *Module, name: InternPool.NullTerminatedString) Allocator.Error!Type { - const names: *const [1]InternPool.NullTerminatedString = &name; - const new_ty = try mod.intern_pool.getErrorSetType(mod.gpa, names); - return Type.fromInterned(new_ty); -} - -/// Sorts `names` in place. -pub fn errorSetFromUnsortedNames( - mod: *Module, - names: []InternPool.NullTerminatedString, -) Allocator.Error!Type { - std.mem.sort( - InternPool.NullTerminatedString, - names, - {}, - InternPool.NullTerminatedString.indexLessThan, - ); - const new_ty = try mod.intern_pool.getErrorSetType(mod.gpa, names); - return Type.fromInterned(new_ty); -} - -/// Supports only pointers, not pointer-like optionals. -pub fn ptrIntValue(mod: *Module, ty: Type, x: u64) Allocator.Error!Value { - assert(ty.zigTypeTag(mod) == .Pointer and !ty.isSlice(mod)); - assert(x != 0 or ty.isAllowzeroPtr(mod)); - const i = try intern(mod, .{ .ptr = .{ - .ty = ty.toIntern(), - .base_addr = .int, - .byte_offset = x, - } }); - return Value.fromInterned(i); -} - -/// Creates an enum tag value based on the integer tag value. -pub fn enumValue(mod: *Module, ty: Type, tag_int: InternPool.Index) Allocator.Error!Value { - if (std.debug.runtime_safety) { - const tag = ty.zigTypeTag(mod); - assert(tag == .Enum); - } - const i = try intern(mod, .{ .enum_tag = .{ - .ty = ty.toIntern(), - .int = tag_int, - } }); - return Value.fromInterned(i); -} - -/// Creates an enum tag value based on the field index according to source code -/// declaration order. -pub fn enumValueFieldIndex(mod: *Module, ty: Type, field_index: u32) Allocator.Error!Value { - const ip = &mod.intern_pool; - const gpa = mod.gpa; - const enum_type = ip.loadEnumType(ty.toIntern()); - - if (enum_type.values.len == 0) { - // Auto-numbered fields. - return Value.fromInterned((try ip.get(gpa, .{ .enum_tag = .{ - .ty = ty.toIntern(), - .int = try ip.get(gpa, .{ .int = .{ - .ty = enum_type.tag_ty, - .storage = .{ .u64 = field_index }, - } }), - } }))); - } - - return Value.fromInterned((try ip.get(gpa, .{ .enum_tag = .{ - .ty = ty.toIntern(), - .int = enum_type.values.get(ip)[field_index], - } }))); -} - -pub fn undefValue(mod: *Module, ty: Type) Allocator.Error!Value { - return Value.fromInterned((try mod.intern(.{ .undef = ty.toIntern() }))); -} - -pub fn undefRef(mod: *Module, ty: Type) Allocator.Error!Air.Inst.Ref { - return Air.internedToRef((try mod.undefValue(ty)).toIntern()); -} - -pub fn intValue(mod: *Module, ty: Type, x: anytype) Allocator.Error!Value { - if (std.math.cast(u64, x)) |casted| return intValue_u64(mod, ty, casted); - if (std.math.cast(i64, x)) |casted| return intValue_i64(mod, ty, casted); - var limbs_buffer: [4]usize = undefined; - var big_int = BigIntMutable.init(&limbs_buffer, x); - return intValue_big(mod, ty, big_int.toConst()); -} - -pub fn intRef(mod: *Module, ty: Type, x: anytype) Allocator.Error!Air.Inst.Ref { - return Air.internedToRef((try mod.intValue(ty, x)).toIntern()); -} - -pub fn intValue_big(mod: *Module, ty: Type, x: BigIntConst) Allocator.Error!Value { - const i = try intern(mod, .{ .int = .{ - .ty = ty.toIntern(), - .storage = .{ .big_int = x }, - } }); - return Value.fromInterned(i); -} - -pub fn intValue_u64(mod: *Module, ty: Type, x: u64) Allocator.Error!Value { - const i = try intern(mod, .{ .int = .{ - .ty = ty.toIntern(), - .storage = .{ .u64 = x }, - } }); - return Value.fromInterned(i); -} - -pub fn intValue_i64(mod: *Module, ty: Type, x: i64) Allocator.Error!Value { - const i = try intern(mod, .{ .int = .{ - .ty = ty.toIntern(), - .storage = .{ .i64 = x }, - } }); - return Value.fromInterned(i); -} - -pub fn unionValue(mod: *Module, union_ty: Type, tag: Value, val: Value) Allocator.Error!Value { - const i = try intern(mod, .{ .un = .{ - .ty = union_ty.toIntern(), - .tag = tag.toIntern(), - .val = val.toIntern(), - } }); - return Value.fromInterned(i); -} - -/// This function casts the float representation down to the representation of the type, potentially -/// losing data if the representation wasn't correct. -pub fn floatValue(mod: *Module, ty: Type, x: anytype) Allocator.Error!Value { - const storage: InternPool.Key.Float.Storage = switch (ty.floatBits(mod.getTarget())) { - 16 => .{ .f16 = @as(f16, @floatCast(x)) }, - 32 => .{ .f32 = @as(f32, @floatCast(x)) }, - 64 => .{ .f64 = @as(f64, @floatCast(x)) }, - 80 => .{ .f80 = @as(f80, @floatCast(x)) }, - 128 => .{ .f128 = @as(f128, @floatCast(x)) }, - else => unreachable, - }; - const i = try intern(mod, .{ .float = .{ - .ty = ty.toIntern(), - .storage = storage, - } }); - return Value.fromInterned(i); -} - -pub fn nullValue(mod: *Module, opt_ty: Type) Allocator.Error!Value { - const ip = &mod.intern_pool; - assert(ip.isOptionalType(opt_ty.toIntern())); - const result = try ip.get(mod.gpa, .{ .opt = .{ - .ty = opt_ty.toIntern(), - .val = .none, - } }); - return Value.fromInterned(result); -} - -pub fn smallestUnsignedInt(mod: *Module, max: u64) Allocator.Error!Type { - return intType(mod, .unsigned, Type.smallestUnsignedBits(max)); -} - -/// Returns the smallest possible integer type containing both `min` and -/// `max`. Asserts that neither value is undef. -/// TODO: if #3806 is implemented, this becomes trivial -pub fn intFittingRange(mod: *Module, min: Value, max: Value) !Type { - assert(!min.isUndef(mod)); - assert(!max.isUndef(mod)); - - if (std.debug.runtime_safety) { - assert(Value.order(min, max, mod).compare(.lte)); - } - - const sign = min.orderAgainstZero(mod) == .lt; - - const min_val_bits = intBitsForValue(mod, min, sign); - const max_val_bits = intBitsForValue(mod, max, sign); - - return mod.intType( - if (sign) .signed else .unsigned, - @max(min_val_bits, max_val_bits), - ); -} - -/// Given a value representing an integer, returns the number of bits necessary to represent -/// this value in an integer. If `sign` is true, returns the number of bits necessary in a -/// twos-complement integer; otherwise in an unsigned integer. -/// Asserts that `val` is not undef. If `val` is negative, asserts that `sign` is true. -pub fn intBitsForValue(mod: *Module, val: Value, sign: bool) u16 { - assert(!val.isUndef(mod)); - - const key = mod.intern_pool.indexToKey(val.toIntern()); - switch (key.int.storage) { - .i64 => |x| { - if (std.math.cast(u64, x)) |casted| return Type.smallestUnsignedBits(casted) + @intFromBool(sign); - assert(sign); - // Protect against overflow in the following negation. - if (x == std.math.minInt(i64)) return 64; - return Type.smallestUnsignedBits(@as(u64, @intCast(-(x + 1)))) + 1; - }, - .u64 => |x| { - return Type.smallestUnsignedBits(x) + @intFromBool(sign); - }, - .big_int => |big| { - if (big.positive) return @as(u16, @intCast(big.bitCountAbs() + @intFromBool(sign))); - - // Zero is still a possibility, in which case unsigned is fine - if (big.eqlZero()) return 0; - - return @as(u16, @intCast(big.bitCountTwosComp())); - }, - .lazy_align => |lazy_ty| { - return Type.smallestUnsignedBits(Type.fromInterned(lazy_ty).abiAlignment(mod).toByteUnits() orelse 0) + @intFromBool(sign); - }, - .lazy_size => |lazy_ty| { - return Type.smallestUnsignedBits(Type.fromInterned(lazy_ty).abiSize(mod)) + @intFromBool(sign); - }, - } -} - pub const AtomicPtrAlignmentError = error{ FloatTooBig, IntTooBig, @@ -6371,101 +4492,6 @@ pub const UnionLayout = struct { padding: u32, }; -pub fn getUnionLayout(mod: *Module, loaded_union: InternPool.LoadedUnionType) UnionLayout { - const ip = &mod.intern_pool; - assert(loaded_union.haveLayout(ip)); - var most_aligned_field: u32 = undefined; - var most_aligned_field_size: u64 = undefined; - var biggest_field: u32 = undefined; - var payload_size: u64 = 0; - var payload_align: Alignment = .@"1"; - for (loaded_union.field_types.get(ip), 0..) |field_ty, field_index| { - if (!Type.fromInterned(field_ty).hasRuntimeBitsIgnoreComptime(mod)) continue; - - const explicit_align = loaded_union.fieldAlign(ip, field_index); - const field_align = if (explicit_align != .none) - explicit_align - else - Type.fromInterned(field_ty).abiAlignment(mod); - const field_size = Type.fromInterned(field_ty).abiSize(mod); - if (field_size > payload_size) { - payload_size = field_size; - biggest_field = @intCast(field_index); - } - if (field_align.compare(.gte, payload_align)) { - payload_align = field_align; - most_aligned_field = @intCast(field_index); - most_aligned_field_size = field_size; - } - } - const have_tag = loaded_union.flagsPtr(ip).runtime_tag.hasTag(); - if (!have_tag or !Type.fromInterned(loaded_union.enum_tag_ty).hasRuntimeBits(mod)) { - return .{ - .abi_size = payload_align.forward(payload_size), - .abi_align = payload_align, - .most_aligned_field = most_aligned_field, - .most_aligned_field_size = most_aligned_field_size, - .biggest_field = biggest_field, - .payload_size = payload_size, - .payload_align = payload_align, - .tag_align = .none, - .tag_size = 0, - .padding = 0, - }; - } - - const tag_size = Type.fromInterned(loaded_union.enum_tag_ty).abiSize(mod); - const tag_align = Type.fromInterned(loaded_union.enum_tag_ty).abiAlignment(mod).max(.@"1"); - return .{ - .abi_size = loaded_union.size(ip).*, - .abi_align = tag_align.max(payload_align), - .most_aligned_field = most_aligned_field, - .most_aligned_field_size = most_aligned_field_size, - .biggest_field = biggest_field, - .payload_size = payload_size, - .payload_align = payload_align, - .tag_align = tag_align, - .tag_size = tag_size, - .padding = loaded_union.padding(ip).*, - }; -} - -pub fn unionAbiSize(mod: *Module, loaded_union: InternPool.LoadedUnionType) u64 { - return mod.getUnionLayout(loaded_union).abi_size; -} - -/// Returns 0 if the union is represented with 0 bits at runtime. -pub fn unionAbiAlignment(mod: *Module, loaded_union: InternPool.LoadedUnionType) Alignment { - const ip = &mod.intern_pool; - const have_tag = loaded_union.flagsPtr(ip).runtime_tag.hasTag(); - var max_align: Alignment = .none; - if (have_tag) max_align = Type.fromInterned(loaded_union.enum_tag_ty).abiAlignment(mod); - for (loaded_union.field_types.get(ip), 0..) |field_ty, field_index| { - if (!Type.fromInterned(field_ty).hasRuntimeBits(mod)) continue; - - const field_align = mod.unionFieldNormalAlignment(loaded_union, @intCast(field_index)); - max_align = max_align.max(field_align); - } - return max_align; -} - -/// Returns the field alignment of a non-packed union. Asserts the layout is not packed. -pub fn unionFieldNormalAlignment(zcu: *Zcu, loaded_union: InternPool.LoadedUnionType, field_index: u32) Alignment { - return zcu.unionFieldNormalAlignmentAdvanced(loaded_union, field_index, .normal) catch unreachable; -} - -/// Returns the field alignment of a non-packed union. Asserts the layout is not packed. -/// If `strat` is `.sema`, may perform type resolution. -pub fn unionFieldNormalAlignmentAdvanced(zcu: *Zcu, loaded_union: InternPool.LoadedUnionType, field_index: u32, strat: Type.ResolveStrat) SemaError!Alignment { - const ip = &zcu.intern_pool; - assert(loaded_union.flagsPtr(ip).layout != .@"packed"); - const field_align = loaded_union.fieldAlign(ip, field_index); - if (field_align != .none) return field_align; - const field_ty = Type.fromInterned(loaded_union.field_types.get(ip)[field_index]); - if (field_ty.isNoReturn(zcu)) return .none; - return (try field_ty.abiAlignmentAdvanced(zcu, strat.toLazy())).scalar; -} - /// Returns the index of the active field, given the current tag value pub fn unionTagFieldIndex(mod: *Module, loaded_union: InternPool.LoadedUnionType, enum_tag: Value) ?u32 { const ip = &mod.intern_pool; @@ -6474,63 +4500,6 @@ pub fn unionTagFieldIndex(mod: *Module, loaded_union: InternPool.LoadedUnionType return loaded_union.loadTagType(ip).tagValueIndex(ip, enum_tag.toIntern()); } -/// Returns the field alignment of a non-packed struct. Asserts the layout is not packed. -pub fn structFieldAlignment( - zcu: *Zcu, - explicit_alignment: InternPool.Alignment, - field_ty: Type, - layout: std.builtin.Type.ContainerLayout, -) Alignment { - return zcu.structFieldAlignmentAdvanced(explicit_alignment, field_ty, layout, .normal) catch unreachable; -} - -/// Returns the field alignment of a non-packed struct. Asserts the layout is not packed. -/// If `strat` is `.sema`, may perform type resolution. -pub fn structFieldAlignmentAdvanced( - zcu: *Zcu, - explicit_alignment: InternPool.Alignment, - field_ty: Type, - layout: std.builtin.Type.ContainerLayout, - strat: Type.ResolveStrat, -) SemaError!Alignment { - assert(layout != .@"packed"); - if (explicit_alignment != .none) return explicit_alignment; - const ty_abi_align = (try field_ty.abiAlignmentAdvanced(zcu, strat.toLazy())).scalar; - switch (layout) { - .@"packed" => unreachable, - .auto => if (zcu.getTarget().ofmt != .c) return ty_abi_align, - .@"extern" => {}, - } - // extern - if (field_ty.isAbiInt(zcu) and field_ty.intInfo(zcu).bits >= 128) { - return ty_abi_align.maxStrict(.@"16"); - } - return ty_abi_align; -} - -/// https://github.com/ziglang/zig/issues/17178 explored storing these bit offsets -/// into the packed struct InternPool data rather than computing this on the -/// fly, however it was found to perform worse when measured on real world -/// projects. -pub fn structPackedFieldBitOffset( - mod: *Module, - struct_type: InternPool.LoadedStructType, - field_index: u32, -) u16 { - const ip = &mod.intern_pool; - assert(struct_type.layout == .@"packed"); - assert(struct_type.haveLayout(ip)); - var bit_sum: u64 = 0; - for (0..struct_type.field_types.len) |i| { - if (i == field_index) { - return @intCast(bit_sum); - } - const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]); - bit_sum += field_ty.bitSize(mod); - } - unreachable; // index out of bounds -} - pub const ResolvedReference = struct { referencer: AnalUnit, src: LazySrcLoc, @@ -6564,33 +4533,6 @@ pub fn resolveReferences(zcu: *Zcu) !std.AutoHashMapUnmanaged(AnalUnit, Resolved return result; } -pub fn getBuiltin(zcu: *Zcu, name: []const u8) Allocator.Error!Air.Inst.Ref { - const decl_index = try zcu.getBuiltinDecl(name); - zcu.ensureDeclAnalyzed(decl_index) catch @panic("std.builtin is corrupt"); - return Air.internedToRef(zcu.declPtr(decl_index).val.toIntern()); -} - -pub fn getBuiltinDecl(zcu: *Zcu, name: []const u8) Allocator.Error!InternPool.DeclIndex { - const gpa = zcu.gpa; - const ip = &zcu.intern_pool; - const std_file_imported = zcu.importPkg(zcu.std_mod) catch @panic("failed to import lib/std.zig"); - const std_file_root_decl = zcu.fileRootDecl(std_file_imported.file_index).unwrap().?; - const std_namespace = zcu.declPtr(std_file_root_decl).getOwnedInnerNamespace(zcu).?; - const builtin_str = try ip.getOrPutString(gpa, "builtin", .no_embedded_nulls); - const builtin_decl = std_namespace.decls.getKeyAdapted(builtin_str, Zcu.DeclAdapter{ .zcu = zcu }) orelse @panic("lib/std.zig is corrupt and missing 'builtin'"); - zcu.ensureDeclAnalyzed(builtin_decl) catch @panic("std.builtin is corrupt"); - const builtin_namespace = zcu.declPtr(builtin_decl).getInnerNamespace(zcu) orelse @panic("std.builtin is corrupt"); - const name_str = try ip.getOrPutString(gpa, name, .no_embedded_nulls); - return builtin_namespace.decls.getKeyAdapted(name_str, Zcu.DeclAdapter{ .zcu = zcu }) orelse @panic("lib/std/builtin.zig is corrupt"); -} - -pub fn getBuiltinType(zcu: *Zcu, name: []const u8) Allocator.Error!Type { - const ty_inst = try zcu.getBuiltin(name); - const ty = Type.fromInterned(ty_inst.toInterned() orelse @panic("std.builtin is corrupt")); - ty.resolveFully(zcu) catch @panic("std.builtin is corrupt"); - return ty; -} - pub fn fileByIndex(zcu: *const Zcu, i: File.Index) *File { return zcu.import_table.values()[@intFromEnum(i)]; } diff --git a/src/Zcu/PerThread.zig b/src/Zcu/PerThread.zig new file mode 100644 index 000000000000..785a5d52e098 --- /dev/null +++ b/src/Zcu/PerThread.zig @@ -0,0 +1,2102 @@ +zcu: *Zcu, + +/// Dense, per-thread unique index. +tid: Id, + +pub const Id = if (builtin.single_threaded) enum { main } else enum(usize) { main, _ }; + +/// Like `ensureDeclAnalyzed`, but the Decl is a file's root Decl. +pub fn ensureFileAnalyzed(pt: Zcu.PerThread, file_index: Zcu.File.Index) Zcu.SemaError!void { + if (pt.zcu.fileRootDecl(file_index).unwrap()) |existing_root| { + return pt.ensureDeclAnalyzed(existing_root); + } else { + return pt.semaFile(file_index); + } +} + +/// This ensures that the Decl will have an up-to-date Type and Value populated. +/// However the resolution status of the Type may not be fully resolved. +/// For example an inferred error set is not resolved until after `analyzeFnBody`. +/// is called. +pub fn ensureDeclAnalyzed(pt: Zcu.PerThread, decl_index: Zcu.Decl.Index) Zcu.SemaError!void { + const tracy = trace(@src()); + defer tracy.end(); + + const mod = pt.zcu; + const ip = &mod.intern_pool; + const decl = mod.declPtr(decl_index); + + log.debug("ensureDeclAnalyzed '{d}' (name '{}')", .{ + @intFromEnum(decl_index), + decl.name.fmt(ip), + }); + + // Determine whether or not this Decl is outdated, i.e. requires re-analysis + // even if `complete`. If a Decl is PO, we pessismistically assume that it + // *does* require re-analysis, to ensure that the Decl is definitely + // up-to-date when this function returns. + + // If analysis occurs in a poor order, this could result in over-analysis. + // We do our best to avoid this by the other dependency logic in this file + // which tries to limit re-analysis to Decls whose previously listed + // dependencies are all up-to-date. + + const decl_as_depender = InternPool.AnalUnit.wrap(.{ .decl = decl_index }); + const decl_was_outdated = mod.outdated.swapRemove(decl_as_depender) or + mod.potentially_outdated.swapRemove(decl_as_depender); + + if (decl_was_outdated) { + _ = mod.outdated_ready.swapRemove(decl_as_depender); + } + + const was_outdated = mod.outdated_file_root.swapRemove(decl_index) or decl_was_outdated; + + switch (decl.analysis) { + .in_progress => unreachable, + + .file_failure => return error.AnalysisFail, + + .sema_failure, + .dependency_failure, + .codegen_failure, + => if (!was_outdated) return error.AnalysisFail, + + .complete => if (!was_outdated) return, + + .unreferenced => {}, + } + + if (was_outdated) { + // The exports this Decl performs will be re-discovered, so we remove them here + // prior to re-analysis. + if (build_options.only_c) unreachable; + mod.deleteUnitExports(decl_as_depender); + mod.deleteUnitReferences(decl_as_depender); + } + + const sema_result: Zcu.SemaDeclResult = blk: { + if (decl.zir_decl_index == .none and !mod.declIsRoot(decl_index)) { + // Anonymous decl. We don't semantically analyze these. + break :blk .{ + .invalidate_decl_val = false, + .invalidate_decl_ref = false, + }; + } + + if (mod.declIsRoot(decl_index)) { + const changed = try pt.semaFileUpdate(decl.getFileScopeIndex(mod), decl_was_outdated); + break :blk .{ + .invalidate_decl_val = changed, + .invalidate_decl_ref = changed, + }; + } + + const decl_prog_node = mod.sema_prog_node.start((try decl.fullyQualifiedName(mod)).toSlice(ip), 0); + defer decl_prog_node.end(); + + break :blk pt.semaDecl(decl_index) catch |err| switch (err) { + error.AnalysisFail => { + if (decl.analysis == .in_progress) { + // If this decl caused the compile error, the analysis field would + // be changed to indicate it was this Decl's fault. Because this + // did not happen, we infer here that it was a dependency failure. + decl.analysis = .dependency_failure; + } + return error.AnalysisFail; + }, + error.GenericPoison => unreachable, + else => |e| { + decl.analysis = .sema_failure; + try mod.failed_analysis.ensureUnusedCapacity(mod.gpa, 1); + try mod.retryable_failures.append(mod.gpa, InternPool.AnalUnit.wrap(.{ .decl = decl_index })); + mod.failed_analysis.putAssumeCapacityNoClobber(InternPool.AnalUnit.wrap(.{ .decl = decl_index }), try Zcu.ErrorMsg.create( + mod.gpa, + decl.navSrcLoc(mod), + "unable to analyze: {s}", + .{@errorName(e)}, + )); + return error.AnalysisFail; + }, + }; + }; + + // TODO: we do not yet have separate dependencies for decl values vs types. + if (decl_was_outdated) { + if (sema_result.invalidate_decl_val or sema_result.invalidate_decl_ref) { + log.debug("Decl tv invalidated ('{d}')", .{@intFromEnum(decl_index)}); + // This dependency was marked as PO, meaning dependees were waiting + // on its analysis result, and it has turned out to be outdated. + // Update dependees accordingly. + try mod.markDependeeOutdated(.{ .decl_val = decl_index }); + } else { + log.debug("Decl tv up-to-date ('{d}')", .{@intFromEnum(decl_index)}); + // This dependency was previously PO, but turned out to be up-to-date. + // We do not need to queue successive analysis. + try mod.markPoDependeeUpToDate(.{ .decl_val = decl_index }); + } + } +} + +pub fn ensureFuncBodyAnalyzed(pt: Zcu.PerThread, maybe_coerced_func_index: InternPool.Index) Zcu.SemaError!void { + const tracy = trace(@src()); + defer tracy.end(); + + const zcu = pt.zcu; + const gpa = zcu.gpa; + const ip = &zcu.intern_pool; + + // We only care about the uncoerced function. + // We need to do this for the "orphaned function" check below to be valid. + const func_index = ip.unwrapCoercedFunc(maybe_coerced_func_index); + + const func = zcu.funcInfo(maybe_coerced_func_index); + const decl_index = func.owner_decl; + const decl = zcu.declPtr(decl_index); + + log.debug("ensureFuncBodyAnalyzed '{d}' (instance of '{}')", .{ + @intFromEnum(func_index), + decl.name.fmt(ip), + }); + + // First, our owner decl must be up-to-date. This will always be the case + // during the first update, but may not on successive updates if we happen + // to get analyzed before our parent decl. + try pt.ensureDeclAnalyzed(decl_index); + + // On an update, it's possible this function changed such that our owner + // decl now refers to a different function, making this one orphaned. If + // that's the case, we should remove this function from the binary. + if (decl.val.ip_index != func_index) { + try zcu.markDependeeOutdated(.{ .func_ies = func_index }); + ip.removeDependenciesForDepender(gpa, InternPool.AnalUnit.wrap(.{ .func = func_index })); + ip.remove(func_index); + @panic("TODO: remove orphaned function from binary"); + } + + // We'll want to remember what the IES used to be before the update for + // dependency invalidation purposes. + const old_resolved_ies = if (func.analysis(ip).inferred_error_set) + func.resolvedErrorSet(ip).* + else + .none; + + switch (decl.analysis) { + .unreferenced => unreachable, + .in_progress => unreachable, + + .codegen_failure => unreachable, // functions do not perform constant value generation + + .file_failure, + .sema_failure, + .dependency_failure, + => return error.AnalysisFail, + + .complete => {}, + } + + const func_as_depender = InternPool.AnalUnit.wrap(.{ .func = func_index }); + const was_outdated = zcu.outdated.swapRemove(func_as_depender) or + zcu.potentially_outdated.swapRemove(func_as_depender); + + if (was_outdated) { + if (build_options.only_c) unreachable; + _ = zcu.outdated_ready.swapRemove(func_as_depender); + zcu.deleteUnitExports(func_as_depender); + zcu.deleteUnitReferences(func_as_depender); + } + + switch (func.analysis(ip).state) { + .success => if (!was_outdated) return, + .sema_failure, + .dependency_failure, + .codegen_failure, + => if (!was_outdated) return error.AnalysisFail, + .none, .queued => {}, + .in_progress => unreachable, + .inline_only => unreachable, // don't queue work for this + } + + log.debug("analyze and generate fn body '{d}'; reason='{s}'", .{ + @intFromEnum(func_index), + if (was_outdated) "outdated" else "never analyzed", + }); + + var tmp_arena = std.heap.ArenaAllocator.init(gpa); + defer tmp_arena.deinit(); + const sema_arena = tmp_arena.allocator(); + + var air = pt.analyzeFnBody(func_index, sema_arena) catch |err| switch (err) { + error.AnalysisFail => { + if (func.analysis(ip).state == .in_progress) { + // If this decl caused the compile error, the analysis field would + // be changed to indicate it was this Decl's fault. Because this + // did not happen, we infer here that it was a dependency failure. + func.analysis(ip).state = .dependency_failure; + } + return error.AnalysisFail; + }, + error.OutOfMemory => return error.OutOfMemory, + }; + errdefer air.deinit(gpa); + + const invalidate_ies_deps = i: { + if (!was_outdated) break :i false; + if (!func.analysis(ip).inferred_error_set) break :i true; + const new_resolved_ies = func.resolvedErrorSet(ip).*; + break :i new_resolved_ies != old_resolved_ies; + }; + if (invalidate_ies_deps) { + log.debug("func IES invalidated ('{d}')", .{@intFromEnum(func_index)}); + try zcu.markDependeeOutdated(.{ .func_ies = func_index }); + } else if (was_outdated) { + log.debug("func IES up-to-date ('{d}')", .{@intFromEnum(func_index)}); + try zcu.markPoDependeeUpToDate(.{ .func_ies = func_index }); + } + + const comp = zcu.comp; + + const dump_air = build_options.enable_debug_extensions and comp.verbose_air; + const dump_llvm_ir = build_options.enable_debug_extensions and (comp.verbose_llvm_ir != null or comp.verbose_llvm_bc != null); + + if (comp.bin_file == null and zcu.llvm_object == null and !dump_air and !dump_llvm_ir) { + air.deinit(gpa); + return; + } + + try comp.work_queue.writeItem(.{ .codegen_func = .{ + .func = func_index, + .air = air, + } }); +} + +/// Takes ownership of `air`, even on error. +/// If any types referenced by `air` are unresolved, marks the codegen as failed. +pub fn linkerUpdateFunc(pt: Zcu.PerThread, func_index: InternPool.Index, air: Air) Allocator.Error!void { + const zcu = pt.zcu; + const gpa = zcu.gpa; + const ip = &zcu.intern_pool; + const comp = zcu.comp; + + defer { + var air_mut = air; + air_mut.deinit(gpa); + } + + const func = zcu.funcInfo(func_index); + const decl_index = func.owner_decl; + const decl = zcu.declPtr(decl_index); + + var liveness = try Liveness.analyze(gpa, air, ip); + defer liveness.deinit(gpa); + + if (build_options.enable_debug_extensions and comp.verbose_air) { + const fqn = try decl.fullyQualifiedName(zcu); + std.debug.print("# Begin Function AIR: {}:\n", .{fqn.fmt(ip)}); + @import("../print_air.zig").dump(pt, air, liveness); + std.debug.print("# End Function AIR: {}\n\n", .{fqn.fmt(ip)}); + } + + if (std.debug.runtime_safety) { + var verify: Liveness.Verify = .{ + .gpa = gpa, + .air = air, + .liveness = liveness, + .intern_pool = ip, + }; + defer verify.deinit(); + + verify.verify() catch |err| switch (err) { + error.OutOfMemory => return error.OutOfMemory, + else => { + try zcu.failed_analysis.ensureUnusedCapacity(gpa, 1); + zcu.failed_analysis.putAssumeCapacityNoClobber( + InternPool.AnalUnit.wrap(.{ .func = func_index }), + try Zcu.ErrorMsg.create( + gpa, + decl.navSrcLoc(zcu), + "invalid liveness: {s}", + .{@errorName(err)}, + ), + ); + func.analysis(ip).state = .codegen_failure; + return; + }, + }; + } + + const codegen_prog_node = zcu.codegen_prog_node.start((try decl.fullyQualifiedName(zcu)).toSlice(ip), 0); + defer codegen_prog_node.end(); + + if (!air.typesFullyResolved(zcu)) { + // A type we depend on failed to resolve. This is a transitive failure. + // Correcting this failure will involve changing a type this function + // depends on, hence triggering re-analysis of this function, so this + // interacts correctly with incremental compilation. + func.analysis(ip).state = .codegen_failure; + } else if (comp.bin_file) |lf| { + lf.updateFunc(pt, func_index, air, liveness) catch |err| switch (err) { + error.OutOfMemory => return error.OutOfMemory, + error.AnalysisFail => { + func.analysis(ip).state = .codegen_failure; + }, + else => { + try zcu.failed_analysis.ensureUnusedCapacity(gpa, 1); + zcu.failed_analysis.putAssumeCapacityNoClobber(InternPool.AnalUnit.wrap(.{ .func = func_index }), try Zcu.ErrorMsg.create( + gpa, + decl.navSrcLoc(zcu), + "unable to codegen: {s}", + .{@errorName(err)}, + )); + func.analysis(ip).state = .codegen_failure; + try zcu.retryable_failures.append(zcu.gpa, InternPool.AnalUnit.wrap(.{ .func = func_index })); + }, + }; + } else if (zcu.llvm_object) |llvm_object| { + if (build_options.only_c) unreachable; + llvm_object.updateFunc(pt, func_index, air, liveness) catch |err| switch (err) { + error.OutOfMemory => return error.OutOfMemory, + }; + } +} + +/// https://github.com/ziglang/zig/issues/14307 +pub fn semaPkg(pt: Zcu.PerThread, pkg: *Module) !void { + const import_file_result = try pt.zcu.importPkg(pkg); + const root_decl_index = pt.zcu.fileRootDecl(import_file_result.file_index); + if (root_decl_index == .none) { + return pt.semaFile(import_file_result.file_index); + } +} + +fn getFileRootStruct( + pt: Zcu.PerThread, + decl_index: Zcu.Decl.Index, + namespace_index: Zcu.Namespace.Index, + file_index: Zcu.File.Index, +) Allocator.Error!InternPool.Index { + const zcu = pt.zcu; + const gpa = zcu.gpa; + const ip = &zcu.intern_pool; + const file = zcu.fileByIndex(file_index); + const extended = file.zir.instructions.items(.data)[@intFromEnum(Zir.Inst.Index.main_struct_inst)].extended; + assert(extended.opcode == .struct_decl); + const small: Zir.Inst.StructDecl.Small = @bitCast(extended.small); + assert(!small.has_captures_len); + assert(!small.has_backing_int); + assert(small.layout == .auto); + var extra_index: usize = extended.operand + @typeInfo(Zir.Inst.StructDecl).Struct.fields.len; + const fields_len = if (small.has_fields_len) blk: { + const fields_len = file.zir.extra[extra_index]; + extra_index += 1; + break :blk fields_len; + } else 0; + const decls_len = if (small.has_decls_len) blk: { + const decls_len = file.zir.extra[extra_index]; + extra_index += 1; + break :blk decls_len; + } else 0; + const decls = file.zir.bodySlice(extra_index, decls_len); + extra_index += decls_len; + + const tracked_inst = try ip.trackZir(gpa, file_index, .main_struct_inst); + const wip_ty = switch (try ip.getStructType(gpa, pt.tid, .{ + .layout = .auto, + .fields_len = fields_len, + .known_non_opv = small.known_non_opv, + .requires_comptime = if (small.known_comptime_only) .yes else .unknown, + .is_tuple = small.is_tuple, + .any_comptime_fields = small.any_comptime_fields, + .any_default_inits = small.any_default_inits, + .inits_resolved = false, + .any_aligned_fields = small.any_aligned_fields, + .has_namespace = true, + .key = .{ .declared = .{ + .zir_index = tracked_inst, + .captures = &.{}, + } }, + })) { + .existing => unreachable, // we wouldn't be analysing the file root if this type existed + .wip => |wip| wip, + }; + errdefer wip_ty.cancel(ip); + + if (zcu.comp.debug_incremental) { + try ip.addDependency( + gpa, + InternPool.AnalUnit.wrap(.{ .decl = decl_index }), + .{ .src_hash = tracked_inst }, + ); + } + + const decl = zcu.declPtr(decl_index); + decl.val = Value.fromInterned(wip_ty.index); + decl.has_tv = true; + decl.owns_tv = true; + decl.analysis = .complete; + + try zcu.scanNamespace(namespace_index, decls, decl); + try zcu.comp.work_queue.writeItem(.{ .resolve_type_fully = wip_ty.index }); + return wip_ty.finish(ip, decl_index, namespace_index.toOptional()); +} + +/// Re-analyze the root Decl of a file on an incremental update. +/// If `type_outdated`, the struct type itself is considered outdated and is +/// reconstructed at a new InternPool index. Otherwise, the namespace is just +/// re-analyzed. Returns whether the decl's tyval was invalidated. +fn semaFileUpdate(pt: Zcu.PerThread, file_index: Zcu.File.Index, type_outdated: bool) Zcu.SemaError!bool { + const zcu = pt.zcu; + const ip = &zcu.intern_pool; + const file = zcu.fileByIndex(file_index); + const decl = zcu.declPtr(zcu.fileRootDecl(file_index).unwrap().?); + + log.debug("semaFileUpdate mod={s} sub_file_path={s} type_outdated={}", .{ + file.mod.fully_qualified_name, + file.sub_file_path, + type_outdated, + }); + + if (file.status != .success_zir) { + if (decl.analysis == .file_failure) { + return false; + } else { + decl.analysis = .file_failure; + return true; + } + } + + if (decl.analysis == .file_failure) { + // No struct type currently exists. Create one! + const root_decl = zcu.fileRootDecl(file_index); + _ = try pt.getFileRootStruct(root_decl.unwrap().?, decl.src_namespace, file_index); + return true; + } + + assert(decl.has_tv); + assert(decl.owns_tv); + + if (type_outdated) { + // Invalidate the existing type, reusing the decl and namespace. + const file_root_decl = zcu.fileRootDecl(file_index).unwrap().?; + ip.removeDependenciesForDepender(zcu.gpa, InternPool.AnalUnit.wrap(.{ + .decl = file_root_decl, + })); + ip.remove(decl.val.toIntern()); + decl.val = undefined; + _ = try pt.getFileRootStruct(file_root_decl, decl.src_namespace, file_index); + return true; + } + + // Only the struct's namespace is outdated. + // Preserve the type - just scan the namespace again. + + const extended = file.zir.instructions.items(.data)[@intFromEnum(Zir.Inst.Index.main_struct_inst)].extended; + const small: Zir.Inst.StructDecl.Small = @bitCast(extended.small); + + var extra_index: usize = extended.operand + @typeInfo(Zir.Inst.StructDecl).Struct.fields.len; + extra_index += @intFromBool(small.has_fields_len); + const decls_len = if (small.has_decls_len) blk: { + const decls_len = file.zir.extra[extra_index]; + extra_index += 1; + break :blk decls_len; + } else 0; + const decls = file.zir.bodySlice(extra_index, decls_len); + + if (!type_outdated) { + try zcu.scanNamespace(decl.src_namespace, decls, decl); + } + + return false; +} + +/// Regardless of the file status, will create a `Decl` if none exists so that we can track +/// dependencies and re-analyze when the file becomes outdated. +fn semaFile(pt: Zcu.PerThread, file_index: Zcu.File.Index) Zcu.SemaError!void { + const tracy = trace(@src()); + defer tracy.end(); + + const zcu = pt.zcu; + const gpa = zcu.gpa; + const file = zcu.fileByIndex(file_index); + assert(zcu.fileRootDecl(file_index) == .none); + log.debug("semaFile zcu={s} sub_file_path={s}", .{ + file.mod.fully_qualified_name, file.sub_file_path, + }); + + // Because these three things each reference each other, `undefined` + // placeholders are used before being set after the struct type gains an + // InternPool index. + const new_namespace_index = try zcu.createNamespace(.{ + .parent = .none, + .decl_index = undefined, + .file_scope = file_index, + }); + errdefer zcu.destroyNamespace(new_namespace_index); + + const new_decl_index = try zcu.allocateNewDecl(new_namespace_index); + const new_decl = zcu.declPtr(new_decl_index); + errdefer @panic("TODO error handling"); + + zcu.setFileRootDecl(file_index, new_decl_index.toOptional()); + zcu.namespacePtr(new_namespace_index).decl_index = new_decl_index; + + new_decl.name = try file.fullyQualifiedName(zcu); + new_decl.name_fully_qualified = true; + new_decl.is_pub = true; + new_decl.is_exported = false; + new_decl.alignment = .none; + new_decl.@"linksection" = .none; + new_decl.analysis = .in_progress; + + if (file.status != .success_zir) { + new_decl.analysis = .file_failure; + return; + } + assert(file.zir_loaded); + + const struct_ty = try pt.getFileRootStruct(new_decl_index, new_namespace_index, file_index); + errdefer zcu.intern_pool.remove(struct_ty); + + switch (zcu.comp.cache_use) { + .whole => |whole| if (whole.cache_manifest) |man| { + const source = file.getSource(gpa) catch |err| { + try Zcu.reportRetryableFileError(zcu, file_index, "unable to load source: {s}", .{@errorName(err)}); + return error.AnalysisFail; + }; + + const resolved_path = std.fs.path.resolve(gpa, &.{ + file.mod.root.root_dir.path orelse ".", + file.mod.root.sub_path, + file.sub_file_path, + }) catch |err| { + try Zcu.reportRetryableFileError(zcu, file_index, "unable to resolve path: {s}", .{@errorName(err)}); + return error.AnalysisFail; + }; + errdefer gpa.free(resolved_path); + + whole.cache_manifest_mutex.lock(); + defer whole.cache_manifest_mutex.unlock(); + try man.addFilePostContents(resolved_path, source.bytes, source.stat); + }, + .incremental => {}, + } +} + +fn semaDecl(pt: Zcu.PerThread, decl_index: Zcu.Decl.Index) !Zcu.SemaDeclResult { + const tracy = trace(@src()); + defer tracy.end(); + + const zcu = pt.zcu; + const decl = zcu.declPtr(decl_index); + const ip = &zcu.intern_pool; + + if (decl.getFileScope(zcu).status != .success_zir) { + return error.AnalysisFail; + } + + assert(!zcu.declIsRoot(decl_index)); + + if (decl.zir_decl_index == .none and decl.owns_tv) { + // We are re-analyzing an anonymous owner Decl (for a function or a namespace type). + return zcu.semaAnonOwnerDecl(decl_index); + } + + log.debug("semaDecl '{d}'", .{@intFromEnum(decl_index)}); + log.debug("decl name '{}'", .{(try decl.fullyQualifiedName(zcu)).fmt(ip)}); + defer blk: { + log.debug("finish decl name '{}'", .{(decl.fullyQualifiedName(zcu) catch break :blk).fmt(ip)}); + } + + const old_has_tv = decl.has_tv; + // The following values are ignored if `!old_has_tv` + const old_ty = if (old_has_tv) decl.typeOf(zcu) else undefined; + const old_val = decl.val; + const old_align = decl.alignment; + const old_linksection = decl.@"linksection"; + const old_addrspace = decl.@"addrspace"; + const old_is_inline = if (decl.getOwnedFunction(zcu)) |prev_func| + prev_func.analysis(ip).state == .inline_only + else + false; + + const decl_inst = decl.zir_decl_index.unwrap().?.resolve(ip); + + const gpa = zcu.gpa; + const zir = decl.getFileScope(zcu).zir; + + const builtin_type_target_index: InternPool.Index = ip_index: { + const std_mod = zcu.std_mod; + if (decl.getFileScope(zcu).mod != std_mod) break :ip_index .none; + // We're in the std module. + const std_file_imported = try zcu.importPkg(std_mod); + const std_file_root_decl_index = zcu.fileRootDecl(std_file_imported.file_index); + const std_decl = zcu.declPtr(std_file_root_decl_index.unwrap().?); + const std_namespace = std_decl.getInnerNamespace(zcu).?; + const builtin_str = try ip.getOrPutString(gpa, "builtin", .no_embedded_nulls); + const builtin_decl = zcu.declPtr(std_namespace.decls.getKeyAdapted(builtin_str, Zcu.DeclAdapter{ .zcu = zcu }) orelse break :ip_index .none); + const builtin_namespace = builtin_decl.getInnerNamespaceIndex(zcu).unwrap() orelse break :ip_index .none; + if (decl.src_namespace != builtin_namespace) break :ip_index .none; + // We're in builtin.zig. This could be a builtin we need to add to a specific InternPool index. + for ([_][]const u8{ + "AtomicOrder", + "AtomicRmwOp", + "CallingConvention", + "AddressSpace", + "FloatMode", + "ReduceOp", + "CallModifier", + "PrefetchOptions", + "ExportOptions", + "ExternOptions", + "Type", + }, [_]InternPool.Index{ + .atomic_order_type, + .atomic_rmw_op_type, + .calling_convention_type, + .address_space_type, + .float_mode_type, + .reduce_op_type, + .call_modifier_type, + .prefetch_options_type, + .export_options_type, + .extern_options_type, + .type_info_type, + }) |type_name, type_ip| { + if (decl.name.eqlSlice(type_name, ip)) break :ip_index type_ip; + } + break :ip_index .none; + }; + + zcu.intern_pool.removeDependenciesForDepender(gpa, InternPool.AnalUnit.wrap(.{ .decl = decl_index })); + + decl.analysis = .in_progress; + + var analysis_arena = std.heap.ArenaAllocator.init(gpa); + defer analysis_arena.deinit(); + + var comptime_err_ret_trace = std.ArrayList(Zcu.LazySrcLoc).init(gpa); + defer comptime_err_ret_trace.deinit(); + + var sema: Sema = .{ + .pt = pt, + .gpa = gpa, + .arena = analysis_arena.allocator(), + .code = zir, + .owner_decl = decl, + .owner_decl_index = decl_index, + .func_index = .none, + .func_is_naked = false, + .fn_ret_ty = Type.void, + .fn_ret_ty_ies = null, + .owner_func_index = .none, + .comptime_err_ret_trace = &comptime_err_ret_trace, + .builtin_type_target_index = builtin_type_target_index, + }; + defer sema.deinit(); + + // Every Decl (other than file root Decls, which do not have a ZIR index) has a dependency on its own source. + try sema.declareDependency(.{ .src_hash = try ip.trackZir( + gpa, + decl.getFileScopeIndex(zcu), + decl_inst, + ) }); + + var block_scope: Sema.Block = .{ + .parent = null, + .sema = &sema, + .namespace = decl.src_namespace, + .instructions = .{}, + .inlining = null, + .is_comptime = true, + .src_base_inst = decl.zir_decl_index.unwrap().?, + .type_name_ctx = decl.name, + }; + defer block_scope.instructions.deinit(gpa); + + const decl_bodies = decl.zirBodies(zcu); + + const result_ref = try sema.resolveInlineBody(&block_scope, decl_bodies.value_body, decl_inst); + // We'll do some other bits with the Sema. Clear the type target index just + // in case they analyze any type. + sema.builtin_type_target_index = .none; + const align_src = block_scope.src(.{ .node_offset_var_decl_align = 0 }); + const section_src = block_scope.src(.{ .node_offset_var_decl_section = 0 }); + const address_space_src = block_scope.src(.{ .node_offset_var_decl_addrspace = 0 }); + const ty_src = block_scope.src(.{ .node_offset_var_decl_ty = 0 }); + const init_src = block_scope.src(.{ .node_offset_var_decl_init = 0 }); + const decl_val = try sema.resolveFinalDeclValue(&block_scope, init_src, result_ref); + const decl_ty = decl_val.typeOf(zcu); + + // Note this resolves the type of the Decl, not the value; if this Decl + // is a struct, for example, this resolves `type` (which needs no resolution), + // not the struct itself. + try decl_ty.resolveLayout(pt); + + if (decl.kind == .@"usingnamespace") { + if (!decl_ty.eql(Type.type, zcu)) { + return sema.fail(&block_scope, ty_src, "expected type, found {}", .{decl_ty.fmt(pt)}); + } + const ty = decl_val.toType(); + if (ty.getNamespace(zcu) == null) { + return sema.fail(&block_scope, ty_src, "type {} has no namespace", .{ty.fmt(pt)}); + } + + decl.val = ty.toValue(); + decl.alignment = .none; + decl.@"linksection" = .none; + decl.has_tv = true; + decl.owns_tv = false; + decl.analysis = .complete; + + // TODO: usingnamespace cannot currently participate in incremental compilation + return .{ + .invalidate_decl_val = true, + .invalidate_decl_ref = true, + }; + } + + var queue_linker_work = true; + var is_func = false; + var is_inline = false; + switch (decl_val.toIntern()) { + .generic_poison => unreachable, + .unreachable_value => unreachable, + else => switch (ip.indexToKey(decl_val.toIntern())) { + .variable => |variable| { + decl.owns_tv = variable.decl == decl_index; + queue_linker_work = decl.owns_tv; + }, + + .extern_func => |extern_func| { + decl.owns_tv = extern_func.decl == decl_index; + queue_linker_work = decl.owns_tv; + is_func = decl.owns_tv; + }, + + .func => |func| { + decl.owns_tv = func.owner_decl == decl_index; + queue_linker_work = false; + is_inline = decl.owns_tv and decl_ty.fnCallingConvention(zcu) == .Inline; + is_func = decl.owns_tv; + }, + + else => {}, + }, + } + + decl.val = decl_val; + // Function linksection, align, and addrspace were already set by Sema + if (!is_func) { + decl.alignment = blk: { + const align_body = decl_bodies.align_body orelse break :blk .none; + const align_ref = try sema.resolveInlineBody(&block_scope, align_body, decl_inst); + break :blk try sema.analyzeAsAlign(&block_scope, align_src, align_ref); + }; + decl.@"linksection" = blk: { + const linksection_body = decl_bodies.linksection_body orelse break :blk .none; + const linksection_ref = try sema.resolveInlineBody(&block_scope, linksection_body, decl_inst); + const bytes = try sema.toConstString(&block_scope, section_src, linksection_ref, .{ + .needed_comptime_reason = "linksection must be comptime-known", + }); + if (std.mem.indexOfScalar(u8, bytes, 0) != null) { + return sema.fail(&block_scope, section_src, "linksection cannot contain null bytes", .{}); + } else if (bytes.len == 0) { + return sema.fail(&block_scope, section_src, "linksection cannot be empty", .{}); + } + break :blk try ip.getOrPutStringOpt(gpa, bytes, .no_embedded_nulls); + }; + decl.@"addrspace" = blk: { + const addrspace_ctx: Sema.AddressSpaceContext = switch (ip.indexToKey(decl_val.toIntern())) { + .variable => .variable, + .extern_func, .func => .function, + else => .constant, + }; + + const target = zcu.getTarget(); + + const addrspace_body = decl_bodies.addrspace_body orelse break :blk switch (addrspace_ctx) { + .function => target_util.defaultAddressSpace(target, .function), + .variable => target_util.defaultAddressSpace(target, .global_mutable), + .constant => target_util.defaultAddressSpace(target, .global_constant), + else => unreachable, + }; + const addrspace_ref = try sema.resolveInlineBody(&block_scope, addrspace_body, decl_inst); + break :blk try sema.analyzeAsAddressSpace(&block_scope, address_space_src, addrspace_ref, addrspace_ctx); + }; + } + decl.has_tv = true; + decl.analysis = .complete; + + const result: Zcu.SemaDeclResult = if (old_has_tv) .{ + .invalidate_decl_val = !decl_ty.eql(old_ty, zcu) or + !decl.val.eql(old_val, decl_ty, zcu) or + is_inline != old_is_inline, + .invalidate_decl_ref = !decl_ty.eql(old_ty, zcu) or + decl.alignment != old_align or + decl.@"linksection" != old_linksection or + decl.@"addrspace" != old_addrspace or + is_inline != old_is_inline, + } else .{ + .invalidate_decl_val = true, + .invalidate_decl_ref = true, + }; + + const has_runtime_bits = queue_linker_work and (is_func or try sema.typeHasRuntimeBits(decl_ty)); + if (has_runtime_bits) { + // Needed for codegen_decl which will call updateDecl and then the + // codegen backend wants full access to the Decl Type. + try decl_ty.resolveFully(pt); + + try zcu.comp.work_queue.writeItem(.{ .codegen_decl = decl_index }); + + if (result.invalidate_decl_ref and zcu.emit_h != null) { + try zcu.comp.work_queue.writeItem(.{ .emit_h_decl = decl_index }); + } + } + + if (decl.is_exported) { + const export_src = block_scope.src(.{ .token_offset = @intFromBool(decl.is_pub) }); + if (is_inline) return sema.fail(&block_scope, export_src, "export of inline function", .{}); + // The scope needs to have the decl in it. + try sema.analyzeExport(&block_scope, export_src, .{ .name = decl.name }, decl_index); + } + + try sema.flushExports(); + + return result; +} + +pub fn embedFile( + pt: Zcu.PerThread, + cur_file: *Zcu.File, + import_string: []const u8, + src_loc: Zcu.LazySrcLoc, +) !InternPool.Index { + const mod = pt.zcu; + const gpa = mod.gpa; + + if (cur_file.mod.deps.get(import_string)) |pkg| { + const resolved_path = try std.fs.path.resolve(gpa, &.{ + pkg.root.root_dir.path orelse ".", + pkg.root.sub_path, + pkg.root_src_path, + }); + var keep_resolved_path = false; + defer if (!keep_resolved_path) gpa.free(resolved_path); + + const gop = try mod.embed_table.getOrPut(gpa, resolved_path); + errdefer { + assert(std.mem.eql(u8, mod.embed_table.pop().key, resolved_path)); + keep_resolved_path = false; + } + if (gop.found_existing) return gop.value_ptr.*.val; + keep_resolved_path = true; + + const sub_file_path = try gpa.dupe(u8, pkg.root_src_path); + errdefer gpa.free(sub_file_path); + + return pt.newEmbedFile(pkg, sub_file_path, resolved_path, gop.value_ptr, src_loc); + } + + // The resolved path is used as the key in the table, to detect if a file + // refers to the same as another, despite different relative paths. + const resolved_path = try std.fs.path.resolve(gpa, &.{ + cur_file.mod.root.root_dir.path orelse ".", + cur_file.mod.root.sub_path, + cur_file.sub_file_path, + "..", + import_string, + }); + + var keep_resolved_path = false; + defer if (!keep_resolved_path) gpa.free(resolved_path); + + const gop = try mod.embed_table.getOrPut(gpa, resolved_path); + errdefer { + assert(std.mem.eql(u8, mod.embed_table.pop().key, resolved_path)); + keep_resolved_path = false; + } + if (gop.found_existing) return gop.value_ptr.*.val; + keep_resolved_path = true; + + const resolved_root_path = try std.fs.path.resolve(gpa, &.{ + cur_file.mod.root.root_dir.path orelse ".", + cur_file.mod.root.sub_path, + }); + defer gpa.free(resolved_root_path); + + const sub_file_path = p: { + const relative = try std.fs.path.relative(gpa, resolved_root_path, resolved_path); + errdefer gpa.free(relative); + + if (!isUpDir(relative) and !std.fs.path.isAbsolute(relative)) { + break :p relative; + } + return error.ImportOutsideModulePath; + }; + defer gpa.free(sub_file_path); + + return pt.newEmbedFile(cur_file.mod, sub_file_path, resolved_path, gop.value_ptr, src_loc); +} + +/// Finalize the creation of an anon decl. +pub fn finalizeAnonDecl(pt: Zcu.PerThread, decl_index: Zcu.Decl.Index) Allocator.Error!void { + if (pt.zcu.declPtr(decl_index).typeOf(pt.zcu).isFnOrHasRuntimeBits(pt)) { + try pt.zcu.comp.work_queue.writeItem(.{ .codegen_decl = decl_index }); + } +} + +/// https://github.com/ziglang/zig/issues/14307 +fn newEmbedFile( + pt: Zcu.PerThread, + pkg: *Module, + sub_file_path: []const u8, + resolved_path: []const u8, + result: **Zcu.EmbedFile, + src_loc: Zcu.LazySrcLoc, +) !InternPool.Index { + const mod = pt.zcu; + const gpa = mod.gpa; + const ip = &mod.intern_pool; + + const new_file = try gpa.create(Zcu.EmbedFile); + errdefer gpa.destroy(new_file); + + var file = try pkg.root.openFile(sub_file_path, .{}); + defer file.close(); + + const actual_stat = try file.stat(); + const stat: Cache.File.Stat = .{ + .size = actual_stat.size, + .inode = actual_stat.inode, + .mtime = actual_stat.mtime, + }; + const size = std.math.cast(usize, actual_stat.size) orelse return error.Overflow; + + const bytes = try ip.string_bytes.addManyAsSlice(gpa, try std.math.add(usize, size, 1)); + const actual_read = try file.readAll(bytes[0..size]); + if (actual_read != size) return error.UnexpectedEndOfFile; + bytes[size] = 0; + + const comp = mod.comp; + switch (comp.cache_use) { + .whole => |whole| if (whole.cache_manifest) |man| { + const copied_resolved_path = try gpa.dupe(u8, resolved_path); + errdefer gpa.free(copied_resolved_path); + whole.cache_manifest_mutex.lock(); + defer whole.cache_manifest_mutex.unlock(); + try man.addFilePostContents(copied_resolved_path, bytes[0..size], stat); + }, + .incremental => {}, + } + + const array_ty = try pt.intern(.{ .array_type = .{ + .len = size, + .sentinel = .zero_u8, + .child = .u8_type, + } }); + const array_val = try pt.intern(.{ .aggregate = .{ + .ty = array_ty, + .storage = .{ .bytes = try ip.getOrPutTrailingString(gpa, bytes.len, .maybe_embedded_nulls) }, + } }); + + const ptr_ty = (try pt.ptrType(.{ + .child = array_ty, + .flags = .{ + .alignment = .none, + .is_const = true, + .address_space = .generic, + }, + })).toIntern(); + const ptr_val = try pt.intern(.{ .ptr = .{ + .ty = ptr_ty, + .base_addr = .{ .anon_decl = .{ + .val = array_val, + .orig_ty = ptr_ty, + } }, + .byte_offset = 0, + } }); + + result.* = new_file; + new_file.* = .{ + .sub_file_path = try ip.getOrPutString(gpa, sub_file_path, .no_embedded_nulls), + .owner = pkg, + .stat = stat, + .val = ptr_val, + .src_loc = src_loc, + }; + return ptr_val; +} + +pub fn analyzeFnBody(pt: Zcu.PerThread, func_index: InternPool.Index, arena: Allocator) Zcu.SemaError!Air { + const tracy = trace(@src()); + defer tracy.end(); + + const mod = pt.zcu; + const gpa = mod.gpa; + const ip = &mod.intern_pool; + const func = mod.funcInfo(func_index); + const decl_index = func.owner_decl; + const decl = mod.declPtr(decl_index); + + log.debug("func name '{}'", .{(try decl.fullyQualifiedName(mod)).fmt(ip)}); + defer blk: { + log.debug("finish func name '{}'", .{(decl.fullyQualifiedName(mod) catch break :blk).fmt(ip)}); + } + + const decl_prog_node = mod.sema_prog_node.start((try decl.fullyQualifiedName(mod)).toSlice(ip), 0); + defer decl_prog_node.end(); + + mod.intern_pool.removeDependenciesForDepender(gpa, InternPool.AnalUnit.wrap(.{ .func = func_index })); + + var comptime_err_ret_trace = std.ArrayList(Zcu.LazySrcLoc).init(gpa); + defer comptime_err_ret_trace.deinit(); + + // In the case of a generic function instance, this is the type of the + // instance, which has comptime parameters elided. In other words, it is + // the runtime-known parameters only, not to be confused with the + // generic_owner function type, which potentially has more parameters, + // including comptime parameters. + const fn_ty = decl.typeOf(mod); + const fn_ty_info = mod.typeToFunc(fn_ty).?; + + var sema: Sema = .{ + .pt = pt, + .gpa = gpa, + .arena = arena, + .code = decl.getFileScope(mod).zir, + .owner_decl = decl, + .owner_decl_index = decl_index, + .func_index = func_index, + .func_is_naked = fn_ty_info.cc == .Naked, + .fn_ret_ty = Type.fromInterned(fn_ty_info.return_type), + .fn_ret_ty_ies = null, + .owner_func_index = func_index, + .branch_quota = @max(func.branchQuota(ip).*, Sema.default_branch_quota), + .comptime_err_ret_trace = &comptime_err_ret_trace, + }; + defer sema.deinit(); + + // Every runtime function has a dependency on the source of the Decl it originates from. + // It also depends on the value of its owner Decl. + try sema.declareDependency(.{ .src_hash = decl.zir_decl_index.unwrap().? }); + try sema.declareDependency(.{ .decl_val = decl_index }); + + if (func.analysis(ip).inferred_error_set) { + const ies = try arena.create(Sema.InferredErrorSet); + ies.* = .{ .func = func_index }; + sema.fn_ret_ty_ies = ies; + } + + // reset in case calls to errorable functions are removed. + func.analysis(ip).calls_or_awaits_errorable_fn = false; + + // First few indexes of extra are reserved and set at the end. + const reserved_count = @typeInfo(Air.ExtraIndex).Enum.fields.len; + try sema.air_extra.ensureTotalCapacity(gpa, reserved_count); + sema.air_extra.items.len += reserved_count; + + var inner_block: Sema.Block = .{ + .parent = null, + .sema = &sema, + .namespace = decl.src_namespace, + .instructions = .{}, + .inlining = null, + .is_comptime = false, + .src_base_inst = inst: { + const owner_info = if (func.generic_owner == .none) + func + else + mod.funcInfo(func.generic_owner); + const orig_decl = mod.declPtr(owner_info.owner_decl); + break :inst orig_decl.zir_decl_index.unwrap().?; + }, + .type_name_ctx = decl.name, + }; + defer inner_block.instructions.deinit(gpa); + + const fn_info = sema.code.getFnInfo(func.zirBodyInst(ip).resolve(ip)); + + // Here we are performing "runtime semantic analysis" for a function body, which means + // we must map the parameter ZIR instructions to `arg` AIR instructions. + // AIR requires the `arg` parameters to be the first N instructions. + // This could be a generic function instantiation, however, in which case we need to + // map the comptime parameters to constant values and only emit arg AIR instructions + // for the runtime ones. + const runtime_params_len = fn_ty_info.param_types.len; + try inner_block.instructions.ensureTotalCapacityPrecise(gpa, runtime_params_len); + try sema.air_instructions.ensureUnusedCapacity(gpa, fn_info.total_params_len); + try sema.inst_map.ensureSpaceForInstructions(gpa, fn_info.param_body); + + // In the case of a generic function instance, pre-populate all the comptime args. + if (func.comptime_args.len != 0) { + for ( + fn_info.param_body[0..func.comptime_args.len], + func.comptime_args.get(ip), + ) |inst, comptime_arg| { + if (comptime_arg == .none) continue; + sema.inst_map.putAssumeCapacityNoClobber(inst, Air.internedToRef(comptime_arg)); + } + } + + const src_params_len = if (func.comptime_args.len != 0) + func.comptime_args.len + else + runtime_params_len; + + var runtime_param_index: usize = 0; + for (fn_info.param_body[0..src_params_len], 0..) |inst, src_param_index| { + const gop = sema.inst_map.getOrPutAssumeCapacity(inst); + if (gop.found_existing) continue; // provided above by comptime arg + + const param_ty = fn_ty_info.param_types.get(ip)[runtime_param_index]; + runtime_param_index += 1; + + const opt_opv = sema.typeHasOnePossibleValue(Type.fromInterned(param_ty)) catch |err| switch (err) { + error.GenericPoison => unreachable, + error.ComptimeReturn => unreachable, + error.ComptimeBreak => unreachable, + else => |e| return e, + }; + if (opt_opv) |opv| { + gop.value_ptr.* = Air.internedToRef(opv.toIntern()); + continue; + } + const arg_index: Air.Inst.Index = @enumFromInt(sema.air_instructions.len); + gop.value_ptr.* = arg_index.toRef(); + inner_block.instructions.appendAssumeCapacity(arg_index); + sema.air_instructions.appendAssumeCapacity(.{ + .tag = .arg, + .data = .{ .arg = .{ + .ty = Air.internedToRef(param_ty), + .src_index = @intCast(src_param_index), + } }, + }); + } + + func.analysis(ip).state = .in_progress; + + const last_arg_index = inner_block.instructions.items.len; + + // Save the error trace as our first action in the function. + // If this is unnecessary after all, Liveness will clean it up for us. + const error_return_trace_index = try sema.analyzeSaveErrRetIndex(&inner_block); + sema.error_return_trace_index_on_fn_entry = error_return_trace_index; + inner_block.error_return_trace_index = error_return_trace_index; + + sema.analyzeFnBody(&inner_block, fn_info.body) catch |err| switch (err) { + // TODO make these unreachable instead of @panic + error.GenericPoison => @panic("zig compiler bug: GenericPoison"), + error.ComptimeReturn => @panic("zig compiler bug: ComptimeReturn"), + else => |e| return e, + }; + + for (sema.unresolved_inferred_allocs.keys()) |ptr_inst| { + // The lack of a resolve_inferred_alloc means that this instruction + // is unused so it just has to be a no-op. + sema.air_instructions.set(@intFromEnum(ptr_inst), .{ + .tag = .alloc, + .data = .{ .ty = Type.single_const_pointer_to_comptime_int }, + }); + } + + // If we don't get an error return trace from a caller, create our own. + if (func.analysis(ip).calls_or_awaits_errorable_fn and + mod.comp.config.any_error_tracing and + !sema.fn_ret_ty.isError(mod)) + { + sema.setupErrorReturnTrace(&inner_block, last_arg_index) catch |err| switch (err) { + // TODO make these unreachable instead of @panic + error.GenericPoison => @panic("zig compiler bug: GenericPoison"), + error.ComptimeReturn => @panic("zig compiler bug: ComptimeReturn"), + error.ComptimeBreak => @panic("zig compiler bug: ComptimeBreak"), + else => |e| return e, + }; + } + + // Copy the block into place and mark that as the main block. + try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Block).Struct.fields.len + + inner_block.instructions.items.len); + const main_block_index = sema.addExtraAssumeCapacity(Air.Block{ + .body_len = @intCast(inner_block.instructions.items.len), + }); + sema.air_extra.appendSliceAssumeCapacity(@ptrCast(inner_block.instructions.items)); + sema.air_extra.items[@intFromEnum(Air.ExtraIndex.main_block)] = main_block_index; + + // Resolving inferred error sets is done *before* setting the function + // state to success, so that "unable to resolve inferred error set" errors + // can be emitted here. + if (sema.fn_ret_ty_ies) |ies| { + sema.resolveInferredErrorSetPtr(&inner_block, .{ + .base_node_inst = inner_block.src_base_inst, + .offset = Zcu.LazySrcLoc.Offset.nodeOffset(0), + }, ies) catch |err| switch (err) { + error.GenericPoison => unreachable, + error.ComptimeReturn => unreachable, + error.ComptimeBreak => unreachable, + error.AnalysisFail => { + // In this case our function depends on a type that had a compile error. + // We should not try to lower this function. + decl.analysis = .dependency_failure; + return error.AnalysisFail; + }, + else => |e| return e, + }; + assert(ies.resolved != .none); + ip.funcIesResolved(func_index).* = ies.resolved; + } + + func.analysis(ip).state = .success; + + // Finally we must resolve the return type and parameter types so that backends + // have full access to type information. + // Crucially, this happens *after* we set the function state to success above, + // so that dependencies on the function body will now be satisfied rather than + // result in circular dependency errors. + sema.resolveFnTypes(fn_ty) catch |err| switch (err) { + error.GenericPoison => unreachable, + error.ComptimeReturn => unreachable, + error.ComptimeBreak => unreachable, + error.AnalysisFail => { + // In this case our function depends on a type that had a compile error. + // We should not try to lower this function. + decl.analysis = .dependency_failure; + return error.AnalysisFail; + }, + else => |e| return e, + }; + + try sema.flushExports(); + + return .{ + .instructions = sema.air_instructions.toOwnedSlice(), + .extra = try sema.air_extra.toOwnedSlice(gpa), + }; +} + +/// Called from `Compilation.update`, after everything is done, just before +/// reporting compile errors. In this function we emit exported symbol collision +/// errors and communicate exported symbols to the linker backend. +pub fn processExports(pt: Zcu.PerThread) !void { + const zcu = pt.zcu; + const gpa = zcu.gpa; + + // First, construct a mapping of every exported value and Decl to the indices of all its different exports. + var decl_exports: std.AutoArrayHashMapUnmanaged(Zcu.Decl.Index, std.ArrayListUnmanaged(u32)) = .{}; + var value_exports: std.AutoArrayHashMapUnmanaged(InternPool.Index, std.ArrayListUnmanaged(u32)) = .{}; + defer { + for (decl_exports.values()) |*exports| { + exports.deinit(gpa); + } + decl_exports.deinit(gpa); + for (value_exports.values()) |*exports| { + exports.deinit(gpa); + } + value_exports.deinit(gpa); + } + + // We note as a heuristic: + // * It is rare to export a value. + // * It is rare for one Decl to be exported multiple times. + // So, this ensureTotalCapacity serves as a reasonable (albeit very approximate) optimization. + try decl_exports.ensureTotalCapacity(gpa, zcu.single_exports.count() + zcu.multi_exports.count()); + + for (zcu.single_exports.values()) |export_idx| { + const exp = zcu.all_exports.items[export_idx]; + const value_ptr, const found_existing = switch (exp.exported) { + .decl_index => |i| gop: { + const gop = try decl_exports.getOrPut(gpa, i); + break :gop .{ gop.value_ptr, gop.found_existing }; + }, + .value => |i| gop: { + const gop = try value_exports.getOrPut(gpa, i); + break :gop .{ gop.value_ptr, gop.found_existing }; + }, + }; + if (!found_existing) value_ptr.* = .{}; + try value_ptr.append(gpa, export_idx); + } + + for (zcu.multi_exports.values()) |info| { + for (zcu.all_exports.items[info.index..][0..info.len], info.index..) |exp, export_idx| { + const value_ptr, const found_existing = switch (exp.exported) { + .decl_index => |i| gop: { + const gop = try decl_exports.getOrPut(gpa, i); + break :gop .{ gop.value_ptr, gop.found_existing }; + }, + .value => |i| gop: { + const gop = try value_exports.getOrPut(gpa, i); + break :gop .{ gop.value_ptr, gop.found_existing }; + }, + }; + if (!found_existing) value_ptr.* = .{}; + try value_ptr.append(gpa, @intCast(export_idx)); + } + } + + // Map symbol names to `Export` for name collision detection. + var symbol_exports: SymbolExports = .{}; + defer symbol_exports.deinit(gpa); + + for (decl_exports.keys(), decl_exports.values()) |exported_decl, exports_list| { + const exported: Zcu.Exported = .{ .decl_index = exported_decl }; + try pt.processExportsInner(&symbol_exports, exported, exports_list.items); + } + + for (value_exports.keys(), value_exports.values()) |exported_value, exports_list| { + const exported: Zcu.Exported = .{ .value = exported_value }; + try pt.processExportsInner(&symbol_exports, exported, exports_list.items); + } +} + +const SymbolExports = std.AutoArrayHashMapUnmanaged(InternPool.NullTerminatedString, u32); + +fn processExportsInner( + pt: Zcu.PerThread, + symbol_exports: *SymbolExports, + exported: Zcu.Exported, + export_indices: []const u32, +) error{OutOfMemory}!void { + const zcu = pt.zcu; + const gpa = zcu.gpa; + + for (export_indices) |export_idx| { + const new_export = &zcu.all_exports.items[export_idx]; + const gop = try symbol_exports.getOrPut(gpa, new_export.opts.name); + if (gop.found_existing) { + new_export.status = .failed_retryable; + try zcu.failed_exports.ensureUnusedCapacity(gpa, 1); + const msg = try Zcu.ErrorMsg.create(gpa, new_export.src, "exported symbol collision: {}", .{ + new_export.opts.name.fmt(&zcu.intern_pool), + }); + errdefer msg.destroy(gpa); + const other_export = zcu.all_exports.items[gop.value_ptr.*]; + try zcu.errNote(other_export.src, msg, "other symbol here", .{}); + zcu.failed_exports.putAssumeCapacityNoClobber(export_idx, msg); + new_export.status = .failed; + } else { + gop.value_ptr.* = export_idx; + } + } + if (zcu.comp.bin_file) |lf| { + try zcu.handleUpdateExports(export_indices, lf.updateExports(pt, exported, export_indices)); + } else if (zcu.llvm_object) |llvm_object| { + if (build_options.only_c) unreachable; + try zcu.handleUpdateExports(export_indices, llvm_object.updateExports(pt, exported, export_indices)); + } +} + +pub fn populateTestFunctions( + pt: Zcu.PerThread, + main_progress_node: std.Progress.Node, +) !void { + const zcu = pt.zcu; + const gpa = zcu.gpa; + const ip = &zcu.intern_pool; + const builtin_mod = zcu.root_mod.getBuiltinDependency(); + const builtin_file_index = (zcu.importPkg(builtin_mod) catch unreachable).file_index; + const root_decl_index = zcu.fileRootDecl(builtin_file_index); + const root_decl = zcu.declPtr(root_decl_index.unwrap().?); + const builtin_namespace = zcu.namespacePtr(root_decl.src_namespace); + const test_functions_str = try ip.getOrPutString(gpa, "test_functions", .no_embedded_nulls); + const decl_index = builtin_namespace.decls.getKeyAdapted( + test_functions_str, + Zcu.DeclAdapter{ .zcu = zcu }, + ).?; + { + // We have to call `ensureDeclAnalyzed` here in case `builtin.test_functions` + // was not referenced by start code. + zcu.sema_prog_node = main_progress_node.start("Semantic Analysis", 0); + defer { + zcu.sema_prog_node.end(); + zcu.sema_prog_node = undefined; + } + try pt.ensureDeclAnalyzed(decl_index); + } + + const decl = zcu.declPtr(decl_index); + const test_fn_ty = decl.typeOf(zcu).slicePtrFieldType(zcu).childType(zcu); + + const array_anon_decl: InternPool.Key.Ptr.BaseAddr.AnonDecl = array: { + // Add zcu.test_functions to an array decl then make the test_functions + // decl reference it as a slice. + const test_fn_vals = try gpa.alloc(InternPool.Index, zcu.test_functions.count()); + defer gpa.free(test_fn_vals); + + for (test_fn_vals, zcu.test_functions.keys()) |*test_fn_val, test_decl_index| { + const test_decl = zcu.declPtr(test_decl_index); + const test_decl_name = try test_decl.fullyQualifiedName(zcu); + const test_decl_name_len = test_decl_name.length(ip); + const test_name_anon_decl: InternPool.Key.Ptr.BaseAddr.AnonDecl = n: { + const test_name_ty = try pt.arrayType(.{ + .len = test_decl_name_len, + .child = .u8_type, + }); + const test_name_val = try pt.intern(.{ .aggregate = .{ + .ty = test_name_ty.toIntern(), + .storage = .{ .bytes = test_decl_name.toString() }, + } }); + break :n .{ + .orig_ty = (try pt.singleConstPtrType(test_name_ty)).toIntern(), + .val = test_name_val, + }; + }; + + const test_fn_fields = .{ + // name + try pt.intern(.{ .slice = .{ + .ty = .slice_const_u8_type, + .ptr = try pt.intern(.{ .ptr = .{ + .ty = .manyptr_const_u8_type, + .base_addr = .{ .anon_decl = test_name_anon_decl }, + .byte_offset = 0, + } }), + .len = try pt.intern(.{ .int = .{ + .ty = .usize_type, + .storage = .{ .u64 = test_decl_name_len }, + } }), + } }), + // func + try pt.intern(.{ .ptr = .{ + .ty = try pt.intern(.{ .ptr_type = .{ + .child = test_decl.typeOf(zcu).toIntern(), + .flags = .{ + .is_const = true, + }, + } }), + .base_addr = .{ .decl = test_decl_index }, + .byte_offset = 0, + } }), + }; + test_fn_val.* = try pt.intern(.{ .aggregate = .{ + .ty = test_fn_ty.toIntern(), + .storage = .{ .elems = &test_fn_fields }, + } }); + } + + const array_ty = try pt.arrayType(.{ + .len = test_fn_vals.len, + .child = test_fn_ty.toIntern(), + .sentinel = .none, + }); + const array_val = try pt.intern(.{ .aggregate = .{ + .ty = array_ty.toIntern(), + .storage = .{ .elems = test_fn_vals }, + } }); + break :array .{ + .orig_ty = (try pt.singleConstPtrType(array_ty)).toIntern(), + .val = array_val, + }; + }; + + { + const new_ty = try pt.ptrType(.{ + .child = test_fn_ty.toIntern(), + .flags = .{ + .is_const = true, + .size = .Slice, + }, + }); + const new_val = decl.val; + const new_init = try pt.intern(.{ .slice = .{ + .ty = new_ty.toIntern(), + .ptr = try pt.intern(.{ .ptr = .{ + .ty = new_ty.slicePtrFieldType(zcu).toIntern(), + .base_addr = .{ .anon_decl = array_anon_decl }, + .byte_offset = 0, + } }), + .len = (try pt.intValue(Type.usize, zcu.test_functions.count())).toIntern(), + } }); + ip.mutateVarInit(decl.val.toIntern(), new_init); + + // Since we are replacing the Decl's value we must perform cleanup on the + // previous value. + decl.val = new_val; + decl.has_tv = true; + } + { + zcu.codegen_prog_node = main_progress_node.start("Code Generation", 0); + defer { + zcu.codegen_prog_node.end(); + zcu.codegen_prog_node = undefined; + } + + try pt.linkerUpdateDecl(decl_index); + } +} + +pub fn linkerUpdateDecl(pt: Zcu.PerThread, decl_index: Zcu.Decl.Index) !void { + const zcu = pt.zcu; + const comp = zcu.comp; + + const decl = zcu.declPtr(decl_index); + + const codegen_prog_node = zcu.codegen_prog_node.start((try decl.fullyQualifiedName(zcu)).toSlice(&zcu.intern_pool), 0); + defer codegen_prog_node.end(); + + if (comp.bin_file) |lf| { + lf.updateDecl(pt, decl_index) catch |err| switch (err) { + error.OutOfMemory => return error.OutOfMemory, + error.AnalysisFail => { + decl.analysis = .codegen_failure; + }, + else => { + const gpa = zcu.gpa; + try zcu.failed_analysis.ensureUnusedCapacity(gpa, 1); + zcu.failed_analysis.putAssumeCapacityNoClobber(InternPool.AnalUnit.wrap(.{ .decl = decl_index }), try Zcu.ErrorMsg.create( + gpa, + decl.navSrcLoc(zcu), + "unable to codegen: {s}", + .{@errorName(err)}, + )); + decl.analysis = .codegen_failure; + try zcu.retryable_failures.append(zcu.gpa, InternPool.AnalUnit.wrap(.{ .decl = decl_index })); + }, + }; + } else if (zcu.llvm_object) |llvm_object| { + if (build_options.only_c) unreachable; + llvm_object.updateDecl(pt, decl_index) catch |err| switch (err) { + error.OutOfMemory => return error.OutOfMemory, + }; + } +} + +/// Shortcut for calling `intern_pool.get`. +pub fn intern(pt: Zcu.PerThread, key: InternPool.Key) Allocator.Error!InternPool.Index { + return pt.zcu.intern_pool.get(pt.zcu.gpa, pt.tid, key); +} + +/// Shortcut for calling `intern_pool.getCoerced`. +pub fn getCoerced(pt: Zcu.PerThread, val: Value, new_ty: Type) Allocator.Error!Value { + return Value.fromInterned(try pt.zcu.intern_pool.getCoerced(pt.zcu.gpa, pt.tid, val.toIntern(), new_ty.toIntern())); +} + +pub fn intType(pt: Zcu.PerThread, signedness: std.builtin.Signedness, bits: u16) Allocator.Error!Type { + return Type.fromInterned(try pt.intern(.{ .int_type = .{ + .signedness = signedness, + .bits = bits, + } })); +} + +pub fn errorIntType(pt: Zcu.PerThread) std.mem.Allocator.Error!Type { + return pt.intType(.unsigned, pt.zcu.errorSetBits()); +} + +pub fn arrayType(pt: Zcu.PerThread, info: InternPool.Key.ArrayType) Allocator.Error!Type { + return Type.fromInterned(try pt.intern(.{ .array_type = info })); +} + +pub fn vectorType(pt: Zcu.PerThread, info: InternPool.Key.VectorType) Allocator.Error!Type { + return Type.fromInterned(try pt.intern(.{ .vector_type = info })); +} + +pub fn optionalType(pt: Zcu.PerThread, child_type: InternPool.Index) Allocator.Error!Type { + return Type.fromInterned(try pt.intern(.{ .opt_type = child_type })); +} + +pub fn ptrType(pt: Zcu.PerThread, info: InternPool.Key.PtrType) Allocator.Error!Type { + var canon_info = info; + + if (info.flags.size == .C) canon_info.flags.is_allowzero = true; + + // Canonicalize non-zero alignment. If it matches the ABI alignment of the pointee + // type, we change it to 0 here. If this causes an assertion trip because the + // pointee type needs to be resolved more, that needs to be done before calling + // this ptr() function. + if (info.flags.alignment != .none and + info.flags.alignment == Type.fromInterned(info.child).abiAlignment(pt)) + { + canon_info.flags.alignment = .none; + } + + switch (info.flags.vector_index) { + // Canonicalize host_size. If it matches the bit size of the pointee type, + // we change it to 0 here. If this causes an assertion trip, the pointee type + // needs to be resolved before calling this ptr() function. + .none => if (info.packed_offset.host_size != 0) { + const elem_bit_size = Type.fromInterned(info.child).bitSize(pt); + assert(info.packed_offset.bit_offset + elem_bit_size <= info.packed_offset.host_size * 8); + if (info.packed_offset.host_size * 8 == elem_bit_size) { + canon_info.packed_offset.host_size = 0; + } + }, + .runtime => {}, + _ => assert(@intFromEnum(info.flags.vector_index) < info.packed_offset.host_size), + } + + return Type.fromInterned(try pt.intern(.{ .ptr_type = canon_info })); +} + +/// Like `ptrType`, but if `info` specifies an `alignment`, first ensures the pointer +/// child type's alignment is resolved so that an invalid alignment is not used. +/// In general, prefer this function during semantic analysis. +pub fn ptrTypeSema(pt: Zcu.PerThread, info: InternPool.Key.PtrType) Zcu.SemaError!Type { + if (info.flags.alignment != .none) { + _ = try Type.fromInterned(info.child).abiAlignmentAdvanced(pt, .sema); + } + return pt.ptrType(info); +} + +pub fn singleMutPtrType(pt: Zcu.PerThread, child_type: Type) Allocator.Error!Type { + return pt.ptrType(.{ .child = child_type.toIntern() }); +} + +pub fn singleConstPtrType(pt: Zcu.PerThread, child_type: Type) Allocator.Error!Type { + return pt.ptrType(.{ + .child = child_type.toIntern(), + .flags = .{ + .is_const = true, + }, + }); +} + +pub fn manyConstPtrType(pt: Zcu.PerThread, child_type: Type) Allocator.Error!Type { + return pt.ptrType(.{ + .child = child_type.toIntern(), + .flags = .{ + .size = .Many, + .is_const = true, + }, + }); +} + +pub fn adjustPtrTypeChild(pt: Zcu.PerThread, ptr_ty: Type, new_child: Type) Allocator.Error!Type { + var info = ptr_ty.ptrInfo(pt.zcu); + info.child = new_child.toIntern(); + return pt.ptrType(info); +} + +pub fn funcType(pt: Zcu.PerThread, key: InternPool.GetFuncTypeKey) Allocator.Error!Type { + return Type.fromInterned(try pt.zcu.intern_pool.getFuncType(pt.zcu.gpa, pt.tid, key)); +} + +/// Use this for `anyframe->T` only. +/// For `anyframe`, use the `InternPool.Index.anyframe` tag directly. +pub fn anyframeType(pt: Zcu.PerThread, payload_ty: Type) Allocator.Error!Type { + return Type.fromInterned(try pt.intern(.{ .anyframe_type = payload_ty.toIntern() })); +} + +pub fn errorUnionType(pt: Zcu.PerThread, error_set_ty: Type, payload_ty: Type) Allocator.Error!Type { + return Type.fromInterned(try pt.intern(.{ .error_union_type = .{ + .error_set_type = error_set_ty.toIntern(), + .payload_type = payload_ty.toIntern(), + } })); +} + +pub fn singleErrorSetType(pt: Zcu.PerThread, name: InternPool.NullTerminatedString) Allocator.Error!Type { + const names: *const [1]InternPool.NullTerminatedString = &name; + return Type.fromInterned(try pt.zcu.intern_pool.getErrorSetType(pt.zcu.gpa, pt.tid, names)); +} + +/// Sorts `names` in place. +pub fn errorSetFromUnsortedNames( + pt: Zcu.PerThread, + names: []InternPool.NullTerminatedString, +) Allocator.Error!Type { + std.mem.sort( + InternPool.NullTerminatedString, + names, + {}, + InternPool.NullTerminatedString.indexLessThan, + ); + const new_ty = try pt.zcu.intern_pool.getErrorSetType(pt.zcu.gpa, pt.tid, names); + return Type.fromInterned(new_ty); +} + +/// Supports only pointers, not pointer-like optionals. +pub fn ptrIntValue(pt: Zcu.PerThread, ty: Type, x: u64) Allocator.Error!Value { + const mod = pt.zcu; + assert(ty.zigTypeTag(mod) == .Pointer and !ty.isSlice(mod)); + assert(x != 0 or ty.isAllowzeroPtr(mod)); + return Value.fromInterned(try pt.intern(.{ .ptr = .{ + .ty = ty.toIntern(), + .base_addr = .int, + .byte_offset = x, + } })); +} + +/// Creates an enum tag value based on the integer tag value. +pub fn enumValue(pt: Zcu.PerThread, ty: Type, tag_int: InternPool.Index) Allocator.Error!Value { + if (std.debug.runtime_safety) { + const tag = ty.zigTypeTag(pt.zcu); + assert(tag == .Enum); + } + return Value.fromInterned(try pt.intern(.{ .enum_tag = .{ + .ty = ty.toIntern(), + .int = tag_int, + } })); +} + +/// Creates an enum tag value based on the field index according to source code +/// declaration order. +pub fn enumValueFieldIndex(pt: Zcu.PerThread, ty: Type, field_index: u32) Allocator.Error!Value { + const ip = &pt.zcu.intern_pool; + const enum_type = ip.loadEnumType(ty.toIntern()); + + if (enum_type.values.len == 0) { + // Auto-numbered fields. + return Value.fromInterned(try pt.intern(.{ .enum_tag = .{ + .ty = ty.toIntern(), + .int = try pt.intern(.{ .int = .{ + .ty = enum_type.tag_ty, + .storage = .{ .u64 = field_index }, + } }), + } })); + } + + return Value.fromInterned(try pt.intern(.{ .enum_tag = .{ + .ty = ty.toIntern(), + .int = enum_type.values.get(ip)[field_index], + } })); +} + +pub fn undefValue(pt: Zcu.PerThread, ty: Type) Allocator.Error!Value { + return Value.fromInterned(try pt.intern(.{ .undef = ty.toIntern() })); +} + +pub fn undefRef(pt: Zcu.PerThread, ty: Type) Allocator.Error!Air.Inst.Ref { + return Air.internedToRef((try pt.undefValue(ty)).toIntern()); +} + +pub fn intValue(pt: Zcu.PerThread, ty: Type, x: anytype) Allocator.Error!Value { + if (std.math.cast(u64, x)) |casted| return pt.intValue_u64(ty, casted); + if (std.math.cast(i64, x)) |casted| return pt.intValue_i64(ty, casted); + var limbs_buffer: [4]usize = undefined; + var big_int = BigIntMutable.init(&limbs_buffer, x); + return pt.intValue_big(ty, big_int.toConst()); +} + +pub fn intRef(pt: Zcu.PerThread, ty: Type, x: anytype) Allocator.Error!Air.Inst.Ref { + return Air.internedToRef((try pt.intValue(ty, x)).toIntern()); +} + +pub fn intValue_big(pt: Zcu.PerThread, ty: Type, x: BigIntConst) Allocator.Error!Value { + return Value.fromInterned(try pt.intern(.{ .int = .{ + .ty = ty.toIntern(), + .storage = .{ .big_int = x }, + } })); +} + +pub fn intValue_u64(pt: Zcu.PerThread, ty: Type, x: u64) Allocator.Error!Value { + return Value.fromInterned(try pt.intern(.{ .int = .{ + .ty = ty.toIntern(), + .storage = .{ .u64 = x }, + } })); +} + +pub fn intValue_i64(pt: Zcu.PerThread, ty: Type, x: i64) Allocator.Error!Value { + return Value.fromInterned(try pt.intern(.{ .int = .{ + .ty = ty.toIntern(), + .storage = .{ .i64 = x }, + } })); +} + +pub fn unionValue(pt: Zcu.PerThread, union_ty: Type, tag: Value, val: Value) Allocator.Error!Value { + return Value.fromInterned(try pt.intern(.{ .un = .{ + .ty = union_ty.toIntern(), + .tag = tag.toIntern(), + .val = val.toIntern(), + } })); +} + +/// This function casts the float representation down to the representation of the type, potentially +/// losing data if the representation wasn't correct. +pub fn floatValue(pt: Zcu.PerThread, ty: Type, x: anytype) Allocator.Error!Value { + const storage: InternPool.Key.Float.Storage = switch (ty.floatBits(pt.zcu.getTarget())) { + 16 => .{ .f16 = @as(f16, @floatCast(x)) }, + 32 => .{ .f32 = @as(f32, @floatCast(x)) }, + 64 => .{ .f64 = @as(f64, @floatCast(x)) }, + 80 => .{ .f80 = @as(f80, @floatCast(x)) }, + 128 => .{ .f128 = @as(f128, @floatCast(x)) }, + else => unreachable, + }; + return Value.fromInterned(try pt.intern(.{ .float = .{ + .ty = ty.toIntern(), + .storage = storage, + } })); +} + +pub fn nullValue(pt: Zcu.PerThread, opt_ty: Type) Allocator.Error!Value { + assert(pt.zcu.intern_pool.isOptionalType(opt_ty.toIntern())); + return Value.fromInterned(try pt.intern(.{ .opt = .{ + .ty = opt_ty.toIntern(), + .val = .none, + } })); +} + +pub fn smallestUnsignedInt(pt: Zcu.PerThread, max: u64) Allocator.Error!Type { + return pt.intType(.unsigned, Type.smallestUnsignedBits(max)); +} + +/// Returns the smallest possible integer type containing both `min` and +/// `max`. Asserts that neither value is undef. +/// TODO: if #3806 is implemented, this becomes trivial +pub fn intFittingRange(pt: Zcu.PerThread, min: Value, max: Value) !Type { + const mod = pt.zcu; + assert(!min.isUndef(mod)); + assert(!max.isUndef(mod)); + + if (std.debug.runtime_safety) { + assert(Value.order(min, max, pt).compare(.lte)); + } + + const sign = min.orderAgainstZero(pt) == .lt; + + const min_val_bits = pt.intBitsForValue(min, sign); + const max_val_bits = pt.intBitsForValue(max, sign); + + return pt.intType( + if (sign) .signed else .unsigned, + @max(min_val_bits, max_val_bits), + ); +} + +/// Given a value representing an integer, returns the number of bits necessary to represent +/// this value in an integer. If `sign` is true, returns the number of bits necessary in a +/// twos-complement integer; otherwise in an unsigned integer. +/// Asserts that `val` is not undef. If `val` is negative, asserts that `sign` is true. +pub fn intBitsForValue(pt: Zcu.PerThread, val: Value, sign: bool) u16 { + const mod = pt.zcu; + assert(!val.isUndef(mod)); + + const key = mod.intern_pool.indexToKey(val.toIntern()); + switch (key.int.storage) { + .i64 => |x| { + if (std.math.cast(u64, x)) |casted| return Type.smallestUnsignedBits(casted) + @intFromBool(sign); + assert(sign); + // Protect against overflow in the following negation. + if (x == std.math.minInt(i64)) return 64; + return Type.smallestUnsignedBits(@as(u64, @intCast(-(x + 1)))) + 1; + }, + .u64 => |x| { + return Type.smallestUnsignedBits(x) + @intFromBool(sign); + }, + .big_int => |big| { + if (big.positive) return @as(u16, @intCast(big.bitCountAbs() + @intFromBool(sign))); + + // Zero is still a possibility, in which case unsigned is fine + if (big.eqlZero()) return 0; + + return @as(u16, @intCast(big.bitCountTwosComp())); + }, + .lazy_align => |lazy_ty| { + return Type.smallestUnsignedBits(Type.fromInterned(lazy_ty).abiAlignment(pt).toByteUnits() orelse 0) + @intFromBool(sign); + }, + .lazy_size => |lazy_ty| { + return Type.smallestUnsignedBits(Type.fromInterned(lazy_ty).abiSize(pt)) + @intFromBool(sign); + }, + } +} + +pub fn getUnionLayout(pt: Zcu.PerThread, loaded_union: InternPool.LoadedUnionType) Zcu.UnionLayout { + const mod = pt.zcu; + const ip = &mod.intern_pool; + assert(loaded_union.haveLayout(ip)); + var most_aligned_field: u32 = undefined; + var most_aligned_field_size: u64 = undefined; + var biggest_field: u32 = undefined; + var payload_size: u64 = 0; + var payload_align: InternPool.Alignment = .@"1"; + for (loaded_union.field_types.get(ip), 0..) |field_ty, field_index| { + if (!Type.fromInterned(field_ty).hasRuntimeBitsIgnoreComptime(pt)) continue; + + const explicit_align = loaded_union.fieldAlign(ip, field_index); + const field_align = if (explicit_align != .none) + explicit_align + else + Type.fromInterned(field_ty).abiAlignment(pt); + const field_size = Type.fromInterned(field_ty).abiSize(pt); + if (field_size > payload_size) { + payload_size = field_size; + biggest_field = @intCast(field_index); + } + if (field_align.compare(.gte, payload_align)) { + payload_align = field_align; + most_aligned_field = @intCast(field_index); + most_aligned_field_size = field_size; + } + } + const have_tag = loaded_union.flagsPtr(ip).runtime_tag.hasTag(); + if (!have_tag or !Type.fromInterned(loaded_union.enum_tag_ty).hasRuntimeBits(pt)) { + return .{ + .abi_size = payload_align.forward(payload_size), + .abi_align = payload_align, + .most_aligned_field = most_aligned_field, + .most_aligned_field_size = most_aligned_field_size, + .biggest_field = biggest_field, + .payload_size = payload_size, + .payload_align = payload_align, + .tag_align = .none, + .tag_size = 0, + .padding = 0, + }; + } + + const tag_size = Type.fromInterned(loaded_union.enum_tag_ty).abiSize(pt); + const tag_align = Type.fromInterned(loaded_union.enum_tag_ty).abiAlignment(pt).max(.@"1"); + return .{ + .abi_size = loaded_union.size(ip).*, + .abi_align = tag_align.max(payload_align), + .most_aligned_field = most_aligned_field, + .most_aligned_field_size = most_aligned_field_size, + .biggest_field = biggest_field, + .payload_size = payload_size, + .payload_align = payload_align, + .tag_align = tag_align, + .tag_size = tag_size, + .padding = loaded_union.padding(ip).*, + }; +} + +pub fn unionAbiSize(mod: *Module, loaded_union: InternPool.LoadedUnionType) u64 { + return mod.getUnionLayout(loaded_union).abi_size; +} + +/// Returns 0 if the union is represented with 0 bits at runtime. +pub fn unionAbiAlignment(pt: Zcu.PerThread, loaded_union: InternPool.LoadedUnionType) InternPool.Alignment { + const mod = pt.zcu; + const ip = &mod.intern_pool; + const have_tag = loaded_union.flagsPtr(ip).runtime_tag.hasTag(); + var max_align: InternPool.Alignment = .none; + if (have_tag) max_align = Type.fromInterned(loaded_union.enum_tag_ty).abiAlignment(pt); + for (loaded_union.field_types.get(ip), 0..) |field_ty, field_index| { + if (!Type.fromInterned(field_ty).hasRuntimeBits(pt)) continue; + + const field_align = mod.unionFieldNormalAlignment(loaded_union, @intCast(field_index)); + max_align = max_align.max(field_align); + } + return max_align; +} + +/// Returns the field alignment of a non-packed union. Asserts the layout is not packed. +pub fn unionFieldNormalAlignment( + pt: Zcu.PerThread, + loaded_union: InternPool.LoadedUnionType, + field_index: u32, +) InternPool.Alignment { + return pt.unionFieldNormalAlignmentAdvanced(loaded_union, field_index, .normal) catch unreachable; +} + +/// Returns the field alignment of a non-packed union. Asserts the layout is not packed. +/// If `strat` is `.sema`, may perform type resolution. +pub fn unionFieldNormalAlignmentAdvanced( + pt: Zcu.PerThread, + loaded_union: InternPool.LoadedUnionType, + field_index: u32, + strat: Type.ResolveStrat, +) Zcu.SemaError!InternPool.Alignment { + const ip = &pt.zcu.intern_pool; + assert(loaded_union.flagsPtr(ip).layout != .@"packed"); + const field_align = loaded_union.fieldAlign(ip, field_index); + if (field_align != .none) return field_align; + const field_ty = Type.fromInterned(loaded_union.field_types.get(ip)[field_index]); + if (field_ty.isNoReturn(pt.zcu)) return .none; + return (try field_ty.abiAlignmentAdvanced(pt, strat.toLazy())).scalar; +} + +/// Returns the field alignment of a non-packed struct. Asserts the layout is not packed. +pub fn structFieldAlignment( + pt: Zcu.PerThread, + explicit_alignment: InternPool.Alignment, + field_ty: Type, + layout: std.builtin.Type.ContainerLayout, +) InternPool.Alignment { + return pt.structFieldAlignmentAdvanced(explicit_alignment, field_ty, layout, .normal) catch unreachable; +} + +/// Returns the field alignment of a non-packed struct. Asserts the layout is not packed. +/// If `strat` is `.sema`, may perform type resolution. +pub fn structFieldAlignmentAdvanced( + pt: Zcu.PerThread, + explicit_alignment: InternPool.Alignment, + field_ty: Type, + layout: std.builtin.Type.ContainerLayout, + strat: Type.ResolveStrat, +) Zcu.SemaError!InternPool.Alignment { + assert(layout != .@"packed"); + if (explicit_alignment != .none) return explicit_alignment; + const ty_abi_align = (try field_ty.abiAlignmentAdvanced(pt, strat.toLazy())).scalar; + switch (layout) { + .@"packed" => unreachable, + .auto => if (pt.zcu.getTarget().ofmt != .c) return ty_abi_align, + .@"extern" => {}, + } + // extern + if (field_ty.isAbiInt(pt.zcu) and field_ty.intInfo(pt.zcu).bits >= 128) { + return ty_abi_align.maxStrict(.@"16"); + } + return ty_abi_align; +} + +/// https://github.com/ziglang/zig/issues/17178 explored storing these bit offsets +/// into the packed struct InternPool data rather than computing this on the +/// fly, however it was found to perform worse when measured on real world +/// projects. +pub fn structPackedFieldBitOffset( + pt: Zcu.PerThread, + struct_type: InternPool.LoadedStructType, + field_index: u32, +) u16 { + const mod = pt.zcu; + const ip = &mod.intern_pool; + assert(struct_type.layout == .@"packed"); + assert(struct_type.haveLayout(ip)); + var bit_sum: u64 = 0; + for (0..struct_type.field_types.len) |i| { + if (i == field_index) { + return @intCast(bit_sum); + } + const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]); + bit_sum += field_ty.bitSize(pt); + } + unreachable; // index out of bounds +} + +pub fn getBuiltin(pt: Zcu.PerThread, name: []const u8) Allocator.Error!Air.Inst.Ref { + const decl_index = try pt.getBuiltinDecl(name); + pt.ensureDeclAnalyzed(decl_index) catch @panic("std.builtin is corrupt"); + return Air.internedToRef(pt.zcu.declPtr(decl_index).val.toIntern()); +} + +pub fn getBuiltinDecl(pt: Zcu.PerThread, name: []const u8) Allocator.Error!InternPool.DeclIndex { + const zcu = pt.zcu; + const gpa = zcu.gpa; + const ip = &zcu.intern_pool; + const std_file_imported = zcu.importPkg(zcu.std_mod) catch @panic("failed to import lib/std.zig"); + const std_file_root_decl = zcu.fileRootDecl(std_file_imported.file_index).unwrap().?; + const std_namespace = zcu.declPtr(std_file_root_decl).getOwnedInnerNamespace(zcu).?; + const builtin_str = try ip.getOrPutString(gpa, "builtin", .no_embedded_nulls); + const builtin_decl = std_namespace.decls.getKeyAdapted(builtin_str, Zcu.DeclAdapter{ .zcu = zcu }) orelse @panic("lib/std.zig is corrupt and missing 'builtin'"); + pt.ensureDeclAnalyzed(builtin_decl) catch @panic("std.builtin is corrupt"); + const builtin_namespace = zcu.declPtr(builtin_decl).getInnerNamespace(zcu) orelse @panic("std.builtin is corrupt"); + const name_str = try ip.getOrPutString(gpa, name, .no_embedded_nulls); + return builtin_namespace.decls.getKeyAdapted(name_str, Zcu.DeclAdapter{ .zcu = zcu }) orelse @panic("lib/std/builtin.zig is corrupt"); +} + +pub fn getBuiltinType(pt: Zcu.PerThread, name: []const u8) Allocator.Error!Type { + const ty_inst = try pt.getBuiltin(name); + const ty = Type.fromInterned(ty_inst.toInterned() orelse @panic("std.builtin is corrupt")); + ty.resolveFully(pt) catch @panic("std.builtin is corrupt"); + return ty; +} + +const Air = @import("../Air.zig"); +const Allocator = std.mem.Allocator; +const assert = std.debug.assert; +const BigIntConst = std.math.big.int.Const; +const BigIntMutable = std.math.big.int.Mutable; +const build_options = @import("build_options"); +const builtin = @import("builtin"); +const Cache = std.Build.Cache; +const InternPool = @import("../InternPool.zig"); +const isUpDir = @import("../introspect.zig").isUpDir; +const Liveness = @import("../Liveness.zig"); +const log = std.log.scoped(.zcu); +const Module = @import("../Package.zig").Module; +const Sema = @import("../Sema.zig"); +const std = @import("std"); +const target_util = @import("../target.zig"); +const trace = @import("../tracy.zig").trace; +const Type = @import("../Type.zig"); +const Value = @import("../Value.zig"); +const Zcu = @import("../Zcu.zig"); +const Zir = std.zig.Zir; diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig index 023c86dfb8f0..7a2c0178cd07 100644 --- a/src/arch/aarch64/CodeGen.zig +++ b/src/arch/aarch64/CodeGen.zig @@ -12,11 +12,9 @@ const Type = @import("../../Type.zig"); const Value = @import("../../Value.zig"); const link = @import("../../link.zig"); const Zcu = @import("../../Zcu.zig"); -/// Deprecated. -const Module = Zcu; const InternPool = @import("../../InternPool.zig"); const Compilation = @import("../../Compilation.zig"); -const ErrorMsg = Module.ErrorMsg; +const ErrorMsg = Zcu.ErrorMsg; const Target = std.Target; const Allocator = mem.Allocator; const trace = @import("../../tracy.zig").trace; @@ -47,6 +45,7 @@ const gp = abi.RegisterClass.gp; const InnerError = CodeGenError || error{OutOfRegisters}; gpa: Allocator, +pt: Zcu.PerThread, air: Air, liveness: Liveness, bin_file: *link.File, @@ -59,7 +58,7 @@ args: []MCValue, ret_mcv: MCValue, fn_type: Type, arg_index: u32, -src_loc: Module.LazySrcLoc, +src_loc: Zcu.LazySrcLoc, stack_align: u32, /// MIR Instructions @@ -331,15 +330,16 @@ const Self = @This(); pub fn generate( lf: *link.File, - src_loc: Module.LazySrcLoc, + pt: Zcu.PerThread, + src_loc: Zcu.LazySrcLoc, func_index: InternPool.Index, air: Air, liveness: Liveness, code: *std.ArrayList(u8), debug_output: DebugInfoOutput, ) CodeGenError!Result { - const gpa = lf.comp.gpa; - const zcu = lf.comp.module.?; + const zcu = pt.zcu; + const gpa = zcu.gpa; const func = zcu.funcInfo(func_index); const fn_owner_decl = zcu.declPtr(func.owner_decl); assert(fn_owner_decl.has_tv); @@ -355,8 +355,9 @@ pub fn generate( } try branch_stack.append(.{}); - var function = Self{ + var function: Self = .{ .gpa = gpa, + .pt = pt, .air = air, .liveness = liveness, .debug_output = debug_output, @@ -476,7 +477,8 @@ pub fn addExtraAssumeCapacity(self: *Self, extra: anytype) u32 { } fn gen(self: *Self) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const cc = self.fn_type.fnCallingConvention(mod); if (cc != .Naked) { // stp fp, lr, [sp, #-16]! @@ -526,8 +528,8 @@ fn gen(self: *Self) !void { const ty = self.typeOfIndex(inst); - const abi_size = @as(u32, @intCast(ty.abiSize(mod))); - const abi_align = ty.abiAlignment(mod); + const abi_size = @as(u32, @intCast(ty.abiSize(pt))); + const abi_align = ty.abiAlignment(pt); const stack_offset = try self.allocMem(abi_size, abi_align, inst); try self.genSetStack(ty, stack_offset, MCValue{ .register = reg }); @@ -656,7 +658,8 @@ fn gen(self: *Self) !void { } fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const ip = &mod.intern_pool; const air_tags = self.air.instructions.items(.tag); @@ -1022,31 +1025,32 @@ fn allocMem( /// Use a pointer instruction as the basis for allocating stack memory. fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const elem_ty = self.typeOfIndex(inst).childType(mod); - if (!elem_ty.hasRuntimeBits(mod)) { + if (!elem_ty.hasRuntimeBits(pt)) { // return the stack offset 0. Stack offset 0 will be where all // zero-sized stack allocations live as non-zero-sized // allocations will always have an offset > 0. return @as(u32, 0); } - const abi_size = math.cast(u32, elem_ty.abiSize(mod)) orelse { - return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)}); + const abi_size = math.cast(u32, elem_ty.abiSize(pt)) orelse { + return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(pt)}); }; // TODO swap this for inst.ty.ptrAlign - const abi_align = elem_ty.abiAlignment(mod); + const abi_align = elem_ty.abiAlignment(pt); return self.allocMem(abi_size, abi_align, inst); } fn allocRegOrMem(self: *Self, elem_ty: Type, reg_ok: bool, maybe_inst: ?Air.Inst.Index) !MCValue { - const mod = self.bin_file.comp.module.?; - const abi_size = math.cast(u32, elem_ty.abiSize(mod)) orelse { - return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)}); + const pt = self.pt; + const abi_size = math.cast(u32, elem_ty.abiSize(pt)) orelse { + return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(pt)}); }; - const abi_align = elem_ty.abiAlignment(mod); + const abi_align = elem_ty.abiAlignment(pt); if (reg_ok) { // Make sure the type can fit in a register before we try to allocate one. @@ -1133,14 +1137,15 @@ fn airAlloc(self: *Self, inst: Air.Inst.Index) !void { } fn airRetPtr(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const result: MCValue = switch (self.ret_mcv) { .none, .register => .{ .ptr_stack_offset = try self.allocMemPtr(inst) }, .stack_offset => blk: { // self.ret_mcv is an address to where this function // should store its result into const ret_ty = self.fn_type.fnReturnType(mod); - const ptr_ty = try mod.singleMutPtrType(ret_ty); + const ptr_ty = try pt.singleMutPtrType(ret_ty); // addr_reg will contain the address of where to store the // result into @@ -1170,7 +1175,8 @@ fn airIntCast(self: *Self, inst: Air.Inst.Index) !void { if (self.liveness.isUnused(inst)) return self.finishAir(inst, .dead, .{ ty_op.operand, .none, .none }); - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const operand = ty_op.operand; const operand_mcv = try self.resolveInst(operand); const operand_ty = self.typeOf(operand); @@ -1251,7 +1257,8 @@ fn trunc( operand_ty: Type, dest_ty: Type, ) !MCValue { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const info_a = operand_ty.intInfo(mod); const info_b = dest_ty.intInfo(mod); @@ -1314,7 +1321,8 @@ fn airIntFromBool(self: *Self, inst: Air.Inst.Index) !void { fn airNot(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand = try self.resolveInst(ty_op.operand); const operand_ty = self.typeOf(ty_op.operand); @@ -1409,7 +1417,8 @@ fn minMax( rhs_ty: Type, maybe_inst: ?Air.Inst.Index, ) !MCValue { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO ARM min/max on floats", .{}), .Vector => return self.fail("TODO ARM min/max on vectors", .{}), @@ -1899,7 +1908,8 @@ fn addSub( rhs_ty: Type, maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO binary operations on floats", .{}), .Vector => return self.fail("TODO binary operations on vectors", .{}), @@ -1960,7 +1970,8 @@ fn mul( rhs_ty: Type, maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO binary operations on vectors", .{}), .Int => { @@ -1992,7 +2003,8 @@ fn divFloat( _ = rhs_ty; _ = maybe_inst; - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO div_float", .{}), .Vector => return self.fail("TODO div_float on vectors", .{}), @@ -2008,7 +2020,8 @@ fn divTrunc( rhs_ty: Type, maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO div on floats", .{}), .Vector => return self.fail("TODO div on vectors", .{}), @@ -2042,7 +2055,8 @@ fn divFloor( rhs_ty: Type, maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO div on floats", .{}), .Vector => return self.fail("TODO div on vectors", .{}), @@ -2075,7 +2089,8 @@ fn divExact( rhs_ty: Type, maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO div on floats", .{}), .Vector => return self.fail("TODO div on vectors", .{}), @@ -2111,7 +2126,8 @@ fn rem( ) InnerError!MCValue { _ = maybe_inst; - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO rem/mod on floats", .{}), .Vector => return self.fail("TODO rem/mod on vectors", .{}), @@ -2182,7 +2198,8 @@ fn modulo( _ = rhs_ty; _ = maybe_inst; - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO mod on floats", .{}), .Vector => return self.fail("TODO mod on vectors", .{}), @@ -2200,7 +2217,8 @@ fn wrappingArithmetic( rhs_ty: Type, maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO binary operations on vectors", .{}), .Int => { @@ -2235,7 +2253,8 @@ fn bitwise( rhs_ty: Type, maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO binary operations on vectors", .{}), .Int => { @@ -2270,7 +2289,8 @@ fn shiftExact( ) InnerError!MCValue { _ = rhs_ty; - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO binary operations on vectors", .{}), .Int => { @@ -2320,7 +2340,8 @@ fn shiftNormal( rhs_ty: Type, maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO binary operations on vectors", .{}), .Int => { @@ -2360,7 +2381,8 @@ fn booleanOp( rhs_ty: Type, maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; switch (lhs_ty.zigTypeTag(mod)) { .Bool => { assert((try lhs_bind.resolveToImmediate(self)) == null); // should have been handled by Sema @@ -2387,7 +2409,8 @@ fn ptrArithmetic( rhs_ty: Type, maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; switch (lhs_ty.zigTypeTag(mod)) { .Pointer => { assert(rhs_ty.eql(Type.usize, mod)); @@ -2397,7 +2420,7 @@ fn ptrArithmetic( .One => ptr_ty.childType(mod).childType(mod), // ptr to array, so get array element type else => ptr_ty.childType(mod), }; - const elem_size = elem_ty.abiSize(mod); + const elem_size = elem_ty.abiSize(pt); const base_tag: Air.Inst.Tag = switch (tag) { .ptr_add => .add, @@ -2510,7 +2533,8 @@ fn airOverflow(self: *Self, inst: Air.Inst.Index) !void { const tag = self.air.instructions.items(.tag)[@intFromEnum(inst)]; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const lhs_bind: ReadArg.Bind = .{ .inst = extra.lhs }; const rhs_bind: ReadArg.Bind = .{ .inst = extra.rhs }; @@ -2518,9 +2542,9 @@ fn airOverflow(self: *Self, inst: Air.Inst.Index) !void { const rhs_ty = self.typeOf(extra.rhs); const tuple_ty = self.typeOfIndex(inst); - const tuple_size = @as(u32, @intCast(tuple_ty.abiSize(mod))); - const tuple_align = tuple_ty.abiAlignment(mod); - const overflow_bit_offset = @as(u32, @intCast(tuple_ty.structFieldOffset(1, mod))); + const tuple_size = @as(u32, @intCast(tuple_ty.abiSize(pt))); + const tuple_align = tuple_ty.abiAlignment(pt); + const overflow_bit_offset = @as(u32, @intCast(tuple_ty.structFieldOffset(1, pt))); switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO implement add_with_overflow/sub_with_overflow for vectors", .{}), @@ -2638,7 +2662,8 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; if (self.liveness.isUnused(inst)) return self.finishAir(inst, .dead, .{ extra.lhs, extra.rhs, .none }); - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const result: MCValue = result: { const lhs_bind: ReadArg.Bind = .{ .inst = extra.lhs }; const rhs_bind: ReadArg.Bind = .{ .inst = extra.rhs }; @@ -2646,9 +2671,9 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const rhs_ty = self.typeOf(extra.rhs); const tuple_ty = self.typeOfIndex(inst); - const tuple_size = @as(u32, @intCast(tuple_ty.abiSize(mod))); - const tuple_align = tuple_ty.abiAlignment(mod); - const overflow_bit_offset = @as(u32, @intCast(tuple_ty.structFieldOffset(1, mod))); + const tuple_size = @as(u32, @intCast(tuple_ty.abiSize(pt))); + const tuple_align = tuple_ty.abiAlignment(pt); + const overflow_bit_offset = @as(u32, @intCast(tuple_ty.structFieldOffset(1, pt))); switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO implement mul_with_overflow for vectors", .{}), @@ -2862,7 +2887,8 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; if (self.liveness.isUnused(inst)) return self.finishAir(inst, .dead, .{ extra.lhs, extra.rhs, .none }); - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const result: MCValue = result: { const lhs_bind: ReadArg.Bind = .{ .inst = extra.lhs }; const rhs_bind: ReadArg.Bind = .{ .inst = extra.rhs }; @@ -2870,9 +2896,9 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const rhs_ty = self.typeOf(extra.rhs); const tuple_ty = self.typeOfIndex(inst); - const tuple_size = @as(u32, @intCast(tuple_ty.abiSize(mod))); - const tuple_align = tuple_ty.abiAlignment(mod); - const overflow_bit_offset = @as(u32, @intCast(tuple_ty.structFieldOffset(1, mod))); + const tuple_size = @as(u32, @intCast(tuple_ty.abiSize(pt))); + const tuple_align = tuple_ty.abiAlignment(pt); + const overflow_bit_offset = @as(u32, @intCast(tuple_ty.structFieldOffset(1, pt))); switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO implement shl_with_overflow for vectors", .{}), @@ -3010,9 +3036,10 @@ fn airOptionalPayload(self: *Self, inst: Air.Inst.Index) !void { } fn optionalPayload(self: *Self, inst: Air.Inst.Index, mcv: MCValue, optional_ty: Type) !MCValue { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const payload_ty = optional_ty.optionalChild(mod); - if (!payload_ty.hasRuntimeBits(mod)) return MCValue.none; + if (!payload_ty.hasRuntimeBits(pt)) return MCValue.none; if (optional_ty.isPtrLikeOptional(mod)) { // TODO should we reuse the operand here? const raw_reg = try self.register_manager.allocReg(inst, gp); @@ -3054,17 +3081,18 @@ fn errUnionErr( error_union_ty: Type, maybe_inst: ?Air.Inst.Index, ) !MCValue { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const err_ty = error_union_ty.errorUnionSet(mod); const payload_ty = error_union_ty.errorUnionPayload(mod); if (err_ty.errorSetIsEmpty(mod)) { return MCValue{ .immediate = 0 }; } - if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) { return try error_union_bind.resolveToMcv(self); } - const err_offset = @as(u32, @intCast(errUnionErrorOffset(payload_ty, mod))); + const err_offset: u32 = @intCast(errUnionErrorOffset(payload_ty, pt)); switch (try error_union_bind.resolveToMcv(self)) { .register => { var operand_reg: Register = undefined; @@ -3086,7 +3114,7 @@ fn errUnionErr( ); const err_bit_offset = err_offset * 8; - const err_bit_size = @as(u32, @intCast(err_ty.abiSize(mod))) * 8; + const err_bit_size = @as(u32, @intCast(err_ty.abiSize(pt))) * 8; _ = try self.addInst(.{ .tag = .ubfx, // errors are unsigned integers @@ -3134,17 +3162,18 @@ fn errUnionPayload( error_union_ty: Type, maybe_inst: ?Air.Inst.Index, ) !MCValue { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const err_ty = error_union_ty.errorUnionSet(mod); const payload_ty = error_union_ty.errorUnionPayload(mod); if (err_ty.errorSetIsEmpty(mod)) { return try error_union_bind.resolveToMcv(self); } - if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) { return MCValue.none; } - const payload_offset = @as(u32, @intCast(errUnionPayloadOffset(payload_ty, mod))); + const payload_offset = @as(u32, @intCast(errUnionPayloadOffset(payload_ty, pt))); switch (try error_union_bind.resolveToMcv(self)) { .register => { var operand_reg: Register = undefined; @@ -3166,7 +3195,7 @@ fn errUnionPayload( ); const payload_bit_offset = payload_offset * 8; - const payload_bit_size = @as(u32, @intCast(payload_ty.abiSize(mod))) * 8; + const payload_bit_size = @as(u32, @intCast(payload_ty.abiSize(pt))) * 8; _ = try self.addInst(.{ .tag = if (payload_ty.isSignedInt(mod)) Mir.Inst.Tag.sbfx else .ubfx, @@ -3246,7 +3275,8 @@ fn airSaveErrReturnTraceIndex(self: *Self, inst: Air.Inst.Index) !void { } fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; if (self.liveness.isUnused(inst)) { @@ -3255,7 +3285,7 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void { const result: MCValue = result: { const payload_ty = self.typeOf(ty_op.operand); - if (!payload_ty.hasRuntimeBits(mod)) { + if (!payload_ty.hasRuntimeBits(pt)) { break :result MCValue{ .immediate = 1 }; } @@ -3275,9 +3305,9 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void { break :result MCValue{ .register = reg }; } - const optional_abi_size: u32 = @intCast(optional_ty.abiSize(mod)); - const optional_abi_align = optional_ty.abiAlignment(mod); - const offset: u32 = @intCast(payload_ty.abiSize(mod)); + const optional_abi_size: u32 = @intCast(optional_ty.abiSize(pt)); + const optional_abi_align = optional_ty.abiAlignment(pt); + const offset: u32 = @intCast(payload_ty.abiSize(pt)); const stack_offset = try self.allocMem(optional_abi_size, optional_abi_align, inst); try self.genSetStack(payload_ty, stack_offset, operand); @@ -3291,20 +3321,21 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void { /// T to E!T fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const error_union_ty = ty_op.ty.toType(); const error_ty = error_union_ty.errorUnionSet(mod); const payload_ty = error_union_ty.errorUnionPayload(mod); const operand = try self.resolveInst(ty_op.operand); - if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result operand; + if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) break :result operand; - const abi_size = @as(u32, @intCast(error_union_ty.abiSize(mod))); - const abi_align = error_union_ty.abiAlignment(mod); + const abi_size = @as(u32, @intCast(error_union_ty.abiSize(pt))); + const abi_align = error_union_ty.abiAlignment(pt); const stack_offset = try self.allocMem(abi_size, abi_align, inst); - const payload_off = errUnionPayloadOffset(payload_ty, mod); - const err_off = errUnionErrorOffset(payload_ty, mod); + const payload_off = errUnionPayloadOffset(payload_ty, pt); + const err_off = errUnionErrorOffset(payload_ty, pt); try self.genSetStack(payload_ty, stack_offset - @as(u32, @intCast(payload_off)), operand); try self.genSetStack(error_ty, stack_offset - @as(u32, @intCast(err_off)), .{ .immediate = 0 }); @@ -3317,18 +3348,19 @@ fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void { fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const error_union_ty = ty_op.ty.toType(); const error_ty = error_union_ty.errorUnionSet(mod); const payload_ty = error_union_ty.errorUnionPayload(mod); const operand = try self.resolveInst(ty_op.operand); - if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result operand; + if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) break :result operand; - const abi_size = @as(u32, @intCast(error_union_ty.abiSize(mod))); - const abi_align = error_union_ty.abiAlignment(mod); + const abi_size = @as(u32, @intCast(error_union_ty.abiSize(pt))); + const abi_align = error_union_ty.abiAlignment(pt); const stack_offset = try self.allocMem(abi_size, abi_align, inst); - const payload_off = errUnionPayloadOffset(payload_ty, mod); - const err_off = errUnionErrorOffset(payload_ty, mod); + const payload_off = errUnionPayloadOffset(payload_ty, pt); + const err_off = errUnionErrorOffset(payload_ty, pt); try self.genSetStack(error_ty, stack_offset - @as(u32, @intCast(err_off)), operand); try self.genSetStack(payload_ty, stack_offset - @as(u32, @intCast(payload_off)), .undef); @@ -3420,7 +3452,8 @@ fn airPtrSlicePtrPtr(self: *Self, inst: Air.Inst.Index) !void { } fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const slice_ty = self.typeOf(bin_op.lhs); const result: MCValue = if (!slice_ty.isVolatilePtr(mod) and self.liveness.isUnused(inst)) .dead else result: { @@ -3444,9 +3477,10 @@ fn ptrElemVal( ptr_ty: Type, maybe_inst: ?Air.Inst.Index, ) !MCValue { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const elem_ty = ptr_ty.childType(mod); - const elem_size = @as(u32, @intCast(elem_ty.abiSize(mod))); + const elem_size = @as(u32, @intCast(elem_ty.abiSize(pt))); // TODO optimize for elem_sizes of 1, 2, 4, 8 switch (elem_size) { @@ -3486,7 +3520,8 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void { } fn airPtrElemVal(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const ptr_ty = self.typeOf(bin_op.lhs); const result: MCValue = if (!ptr_ty.isVolatilePtr(mod) and self.liveness.isUnused(inst)) .dead else result: { @@ -3609,9 +3644,10 @@ fn reuseOperand( } fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const elem_ty = ptr_ty.childType(mod); - const elem_size = elem_ty.abiSize(mod); + const elem_size = elem_ty.abiSize(pt); switch (ptr) { .none => unreachable, @@ -3857,12 +3893,13 @@ fn genInlineMemsetCode( } fn airLoad(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const elem_ty = self.typeOfIndex(inst); - const elem_size = elem_ty.abiSize(mod); + const elem_size = elem_ty.abiSize(pt); const result: MCValue = result: { - if (!elem_ty.hasRuntimeBits(mod)) + if (!elem_ty.hasRuntimeBits(pt)) break :result MCValue.none; const ptr = try self.resolveInst(ty_op.operand); @@ -3888,8 +3925,9 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void { } fn genLdrRegister(self: *Self, value_reg: Register, addr_reg: Register, ty: Type) !void { - const mod = self.bin_file.comp.module.?; - const abi_size = ty.abiSize(mod); + const pt = self.pt; + const mod = pt.zcu; + const abi_size = ty.abiSize(pt); const tag: Mir.Inst.Tag = switch (abi_size) { 1 => if (ty.isSignedInt(mod)) Mir.Inst.Tag.ldrsb_immediate else .ldrb_immediate, @@ -3911,8 +3949,8 @@ fn genLdrRegister(self: *Self, value_reg: Register, addr_reg: Register, ty: Type } fn genStrRegister(self: *Self, value_reg: Register, addr_reg: Register, ty: Type) !void { - const mod = self.bin_file.comp.module.?; - const abi_size = ty.abiSize(mod); + const pt = self.pt; + const abi_size = ty.abiSize(pt); const tag: Mir.Inst.Tag = switch (abi_size) { 1 => .strb_immediate, @@ -3933,9 +3971,9 @@ fn genStrRegister(self: *Self, value_reg: Register, addr_reg: Register, ty: Type } fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type) InnerError!void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; log.debug("store: storing {} to {}", .{ value, ptr }); - const abi_size = value_ty.abiSize(mod); + const abi_size = value_ty.abiSize(pt); switch (ptr) { .none => unreachable, @@ -4087,11 +4125,12 @@ fn airStructFieldPtrIndex(self: *Self, inst: Air.Inst.Index, index: u8) !void { fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, index: u32) !MCValue { return if (self.liveness.isUnused(inst)) .dead else result: { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const mcv = try self.resolveInst(operand); const ptr_ty = self.typeOf(operand); const struct_ty = ptr_ty.childType(mod); - const struct_field_offset = @as(u32, @intCast(struct_ty.structFieldOffset(index, mod))); + const struct_field_offset = @as(u32, @intCast(struct_ty.structFieldOffset(index, pt))); switch (mcv) { .ptr_stack_offset => |off| { break :result MCValue{ .ptr_stack_offset = off - struct_field_offset }; @@ -4112,11 +4151,12 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { const operand = extra.struct_operand; const index = extra.field_index; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const mcv = try self.resolveInst(operand); const struct_ty = self.typeOf(operand); const struct_field_ty = struct_ty.structFieldType(index, mod); - const struct_field_offset = @as(u32, @intCast(struct_ty.structFieldOffset(index, mod))); + const struct_field_offset = @as(u32, @intCast(struct_ty.structFieldOffset(index, pt))); switch (mcv) { .dead, .unreach => unreachable, @@ -4162,13 +4202,14 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { } fn airFieldParentPtr(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = self.air.extraData(Air.FieldParentPtr, ty_pl.payload).data; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const field_ptr = try self.resolveInst(extra.field_ptr); const struct_ty = ty_pl.ty.toType().childType(mod); - const struct_field_offset = @as(u32, @intCast(struct_ty.structFieldOffset(extra.field_index, mod))); + const struct_field_offset = @as(u32, @intCast(struct_ty.structFieldOffset(extra.field_index, pt))); switch (field_ptr) { .ptr_stack_offset => |off| { break :result MCValue{ .ptr_stack_offset = off + struct_field_offset }; @@ -4190,7 +4231,8 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void { while (self.args[arg_index] == .none) arg_index += 1; self.arg_index = arg_index + 1; - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const ty = self.typeOfIndex(inst); const tag = self.air.instructions.items(.tag)[@intFromEnum(inst)]; const src_index = self.air.instructions.items(.data)[@intFromEnum(inst)].arg.src_index; @@ -4245,7 +4287,8 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const extra = self.air.extraData(Air.Call, pl_op.payload); const args = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra.end..][0..extra.data.args_len])); const ty = self.typeOf(callee); - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const fn_ty = switch (ty.zigTypeTag(mod)) { .Fn => ty, @@ -4269,13 +4312,13 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier if (info.return_value == .stack_offset) { log.debug("airCall: return by reference", .{}); const ret_ty = fn_ty.fnReturnType(mod); - const ret_abi_size: u32 = @intCast(ret_ty.abiSize(mod)); - const ret_abi_align = ret_ty.abiAlignment(mod); + const ret_abi_size: u32 = @intCast(ret_ty.abiSize(pt)); + const ret_abi_align = ret_ty.abiAlignment(pt); const stack_offset = try self.allocMem(ret_abi_size, ret_abi_align, inst); const ret_ptr_reg = self.registerAlias(.x0, Type.usize); - const ptr_ty = try mod.singleMutPtrType(ret_ty); + const ptr_ty = try pt.singleMutPtrType(ret_ty); try self.register_manager.getReg(ret_ptr_reg, null); try self.genSetReg(ptr_ty, ret_ptr_reg, .{ .ptr_stack_offset = stack_offset }); @@ -4308,7 +4351,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier // Due to incremental compilation, how function calls are generated depends // on linking. - if (try self.air.value(callee, mod)) |func_value| { + if (try self.air.value(callee, pt)) |func_value| { if (func_value.getFunction(mod)) |func| { if (self.bin_file.cast(link.File.Elf)) |elf_file| { const sym_index = try elf_file.zigObjectPtr().?.getOrCreateMetadataForDecl(elf_file, func.owner_decl); @@ -4421,7 +4464,8 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier } fn airRet(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; const operand = try self.resolveInst(un_op); const ret_ty = self.fn_type.fnReturnType(mod); @@ -4440,7 +4484,7 @@ fn airRet(self: *Self, inst: Air.Inst.Index) !void { // // self.ret_mcv is an address to where this function // should store its result into - const ptr_ty = try mod.singleMutPtrType(ret_ty); + const ptr_ty = try pt.singleMutPtrType(ret_ty); try self.store(self.ret_mcv, operand, ptr_ty, ret_ty); }, else => unreachable, @@ -4453,7 +4497,8 @@ fn airRet(self: *Self, inst: Air.Inst.Index) !void { } fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; const ptr = try self.resolveInst(un_op); const ptr_ty = self.typeOf(un_op); @@ -4477,8 +4522,8 @@ fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void { // location. const op_inst = un_op.toIndex().?; if (self.air.instructions.items(.tag)[@intFromEnum(op_inst)] != .ret_ptr) { - const abi_size = @as(u32, @intCast(ret_ty.abiSize(mod))); - const abi_align = ret_ty.abiAlignment(mod); + const abi_size = @as(u32, @intCast(ret_ty.abiSize(pt))); + const abi_align = ret_ty.abiAlignment(pt); const offset = try self.allocMem(abi_size, abi_align, null); @@ -4513,11 +4558,12 @@ fn cmp( lhs_ty: Type, op: math.CompareOperator, ) !MCValue { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const int_ty = switch (lhs_ty.zigTypeTag(mod)) { .Optional => blk: { const payload_ty = lhs_ty.optionalChild(mod); - if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) { break :blk Type.u1; } else if (lhs_ty.isPtrLikeOptional(mod)) { break :blk Type.usize; @@ -4620,7 +4666,8 @@ fn airDbgStmt(self: *Self, inst: Air.Inst.Index) !void { } fn airDbgInlineBlock(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = self.air.extraData(Air.DbgInlineBlock, ty_pl.payload); const func = mod.funcInfo(extra.data.func); @@ -4825,13 +4872,14 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void { } fn isNull(self: *Self, operand_bind: ReadArg.Bind, operand_ty: Type) !MCValue { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const sentinel: struct { ty: Type, bind: ReadArg.Bind } = if (!operand_ty.isPtrLikeOptional(mod)) blk: { const payload_ty = operand_ty.optionalChild(mod); - if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) + if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) break :blk .{ .ty = operand_ty, .bind = operand_bind }; - const offset = @as(u32, @intCast(payload_ty.abiSize(mod))); + const offset = @as(u32, @intCast(payload_ty.abiSize(pt))); const operand_mcv = try operand_bind.resolveToMcv(self); const new_mcv: MCValue = switch (operand_mcv) { .register => |source_reg| new: { @@ -4881,7 +4929,8 @@ fn isErr( error_union_bind: ReadArg.Bind, error_union_ty: Type, ) !MCValue { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const error_type = error_union_ty.errorUnionSet(mod); if (error_type.errorSetIsEmpty(mod)) { @@ -4923,7 +4972,8 @@ fn airIsNull(self: *Self, inst: Air.Inst.Index) !void { } fn airIsNullPtr(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand_ptr = try self.resolveInst(un_op); @@ -4950,7 +5000,8 @@ fn airIsNonNull(self: *Self, inst: Air.Inst.Index) !void { } fn airIsNonNullPtr(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand_ptr = try self.resolveInst(un_op); @@ -4977,7 +5028,8 @@ fn airIsErr(self: *Self, inst: Air.Inst.Index) !void { } fn airIsErrPtr(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand_ptr = try self.resolveInst(un_op); @@ -5004,7 +5056,8 @@ fn airIsNonErr(self: *Self, inst: Air.Inst.Index) !void { } fn airIsNonErrPtr(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand_ptr = try self.resolveInst(un_op); @@ -5225,10 +5278,10 @@ fn airBr(self: *Self, inst: Air.Inst.Index) !void { } fn br(self: *Self, block: Air.Inst.Index, operand: Air.Inst.Ref) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; const block_data = self.blocks.getPtr(block).?; - if (self.typeOf(operand).hasRuntimeBits(mod)) { + if (self.typeOf(operand).hasRuntimeBits(pt)) { const operand_mcv = try self.resolveInst(operand); const block_mcv = block_data.mcv; if (block_mcv == .none) { @@ -5402,8 +5455,9 @@ fn setRegOrMem(self: *Self, ty: Type, loc: MCValue, val: MCValue) !void { } fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerError!void { - const mod = self.bin_file.comp.module.?; - const abi_size = @as(u32, @intCast(ty.abiSize(mod))); + const pt = self.pt; + const mod = pt.zcu; + const abi_size = @as(u32, @intCast(ty.abiSize(pt))); switch (mcv) { .dead => unreachable, .unreach, .none => return, // Nothing to do. @@ -5462,7 +5516,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro try self.genSetStack(wrapped_ty, stack_offset, .{ .register = rwo.reg }); const overflow_bit_ty = ty.structFieldType(1, mod); - const overflow_bit_offset = @as(u32, @intCast(ty.structFieldOffset(1, mod))); + const overflow_bit_offset = @as(u32, @intCast(ty.structFieldOffset(1, pt))); const raw_cond_reg = try self.register_manager.allocReg(null, gp); const cond_reg = self.registerAlias(raw_cond_reg, overflow_bit_ty); @@ -5495,7 +5549,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro const reg = try self.copyToTmpRegister(ty, mcv); return self.genSetStack(ty, stack_offset, MCValue{ .register = reg }); } else { - const ptr_ty = try mod.singleMutPtrType(ty); + const ptr_ty = try pt.singleMutPtrType(ty); // TODO call extern memcpy const regs = try self.register_manager.allocRegs(5, .{ null, null, null, null, null }, gp); @@ -5573,7 +5627,8 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro } fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; switch (mcv) { .dead => unreachable, .unreach, .none => return, // Nothing to do. @@ -5685,7 +5740,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void try self.genLdrRegister(reg, reg.toX(), ty); }, .stack_offset => |off| { - const abi_size = ty.abiSize(mod); + const abi_size = ty.abiSize(pt); switch (abi_size) { 1, 2, 4, 8 => { @@ -5709,7 +5764,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void } }, .stack_argument_offset => |off| { - const abi_size = ty.abiSize(mod); + const abi_size = ty.abiSize(pt); switch (abi_size) { 1, 2, 4, 8 => { @@ -5736,8 +5791,8 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void } fn genSetStackArgument(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerError!void { - const mod = self.bin_file.comp.module.?; - const abi_size = @as(u32, @intCast(ty.abiSize(mod))); + const pt = self.pt; + const abi_size = @as(u32, @intCast(ty.abiSize(pt))); switch (mcv) { .dead => unreachable, .none, .unreach => return, @@ -5745,7 +5800,7 @@ fn genSetStackArgument(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) I if (!self.wantSafety()) return; // The already existing value will do just fine. // TODO Upgrade this to a memset call when we have that available. - switch (ty.abiSize(mod)) { + switch (ty.abiSize(pt)) { 1 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaa }), 2 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaaaa }), 4 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaaaaaaaa }), @@ -5815,7 +5870,7 @@ fn genSetStackArgument(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) I const reg = try self.copyToTmpRegister(ty, mcv); return self.genSetStackArgument(ty, stack_offset, MCValue{ .register = reg }); } else { - const ptr_ty = try mod.singleMutPtrType(ty); + const ptr_ty = try pt.singleMutPtrType(ty); // TODO call extern memcpy const regs = try self.register_manager.allocRegs(5, .{ null, null, null, null, null }, gp); @@ -5936,7 +5991,8 @@ fn airBitCast(self: *Self, inst: Air.Inst.Index) !void { } fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const ptr_ty = self.typeOf(ty_op.operand); @@ -6056,7 +6112,8 @@ fn airReduce(self: *Self, inst: Air.Inst.Index) !void { } fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const vector_ty = self.typeOfIndex(inst); const len = vector_ty.vectorLen(mod); const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; @@ -6100,15 +6157,15 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void { } fn airTry(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; const extra = self.air.extraData(Air.Try, pl_op.payload); const body: []const Air.Inst.Index = @ptrCast(self.air.extra[extra.end..][0..extra.data.body_len]); const result: MCValue = result: { const error_union_bind: ReadArg.Bind = .{ .inst = pl_op.operand }; const error_union_ty = self.typeOf(pl_op.operand); - const error_union_size = @as(u32, @intCast(error_union_ty.abiSize(mod))); - const error_union_align = error_union_ty.abiAlignment(mod); + const error_union_size = @as(u32, @intCast(error_union_ty.abiSize(pt))); + const error_union_align = error_union_ty.abiAlignment(pt); // The error union will die in the body. However, we need the // error union after the body in order to extract the payload @@ -6137,14 +6194,15 @@ fn airTryPtr(self: *Self, inst: Air.Inst.Index) !void { } fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; // If the type has no codegen bits, no need to store it. const inst_ty = self.typeOf(inst); - if (!inst_ty.hasRuntimeBitsIgnoreComptime(mod) and !inst_ty.isError(mod)) + if (!inst_ty.hasRuntimeBitsIgnoreComptime(pt) and !inst_ty.isError(mod)) return MCValue{ .none = {} }; - const inst_index = inst.toIndex() orelse return self.genTypedValue((try self.air.value(inst, mod)).?); + const inst_index = inst.toIndex() orelse return self.genTypedValue((try self.air.value(inst, pt)).?); return self.getResolvedInstValue(inst_index); } @@ -6164,6 +6222,7 @@ fn getResolvedInstValue(self: *Self, inst: Air.Inst.Index) MCValue { fn genTypedValue(self: *Self, val: Value) InnerError!MCValue { const mcv: MCValue = switch (try codegen.genTypedValue( self.bin_file, + self.pt, self.src_loc, val, self.owner_decl, @@ -6199,7 +6258,8 @@ const CallMCValues = struct { /// Caller must call `CallMCValues.deinit`. fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const ip = &mod.intern_pool; const fn_info = mod.typeToFunc(fn_ty).?; const cc = fn_info.cc; @@ -6229,10 +6289,10 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { if (ret_ty.zigTypeTag(mod) == .NoReturn) { result.return_value = .{ .unreach = {} }; - } else if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod) and !ret_ty.isError(mod)) { + } else if (!ret_ty.hasRuntimeBitsIgnoreComptime(pt) and !ret_ty.isError(mod)) { result.return_value = .{ .none = {} }; } else { - const ret_ty_size = @as(u32, @intCast(ret_ty.abiSize(mod))); + const ret_ty_size: u32 = @intCast(ret_ty.abiSize(pt)); if (ret_ty_size == 0) { assert(ret_ty.isError(mod)); result.return_value = .{ .immediate = 0 }; @@ -6244,7 +6304,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { } for (fn_info.param_types.get(ip), result.args) |ty, *result_arg| { - const param_size = @as(u32, @intCast(Type.fromInterned(ty).abiSize(mod))); + const param_size = @as(u32, @intCast(Type.fromInterned(ty).abiSize(pt))); if (param_size == 0) { result_arg.* = .{ .none = {} }; continue; @@ -6252,7 +6312,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { // We round up NCRN only for non-Apple platforms which allow the 16-byte aligned // values to spread across odd-numbered registers. - if (Type.fromInterned(ty).abiAlignment(mod) == .@"16" and !self.target.isDarwin()) { + if (Type.fromInterned(ty).abiAlignment(pt) == .@"16" and !self.target.isDarwin()) { // Round up NCRN to the next even number ncrn += ncrn % 2; } @@ -6270,7 +6330,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { ncrn = 8; // TODO Apple allows the arguments on the stack to be non-8-byte aligned provided // that the entire stack space consumed by the arguments is 8-byte aligned. - if (Type.fromInterned(ty).abiAlignment(mod) == .@"8") { + if (Type.fromInterned(ty).abiAlignment(pt) == .@"8") { if (nsaa % 8 != 0) { nsaa += 8 - (nsaa % 8); } @@ -6287,10 +6347,10 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { .Unspecified => { if (ret_ty.zigTypeTag(mod) == .NoReturn) { result.return_value = .{ .unreach = {} }; - } else if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod) and !ret_ty.isError(mod)) { + } else if (!ret_ty.hasRuntimeBitsIgnoreComptime(pt) and !ret_ty.isError(mod)) { result.return_value = .{ .none = {} }; } else { - const ret_ty_size = @as(u32, @intCast(ret_ty.abiSize(mod))); + const ret_ty_size = @as(u32, @intCast(ret_ty.abiSize(pt))); if (ret_ty_size == 0) { assert(ret_ty.isError(mod)); result.return_value = .{ .immediate = 0 }; @@ -6309,9 +6369,9 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { var stack_offset: u32 = 0; for (fn_info.param_types.get(ip), result.args) |ty, *result_arg| { - if (Type.fromInterned(ty).abiSize(mod) > 0) { - const param_size: u32 = @intCast(Type.fromInterned(ty).abiSize(mod)); - const param_alignment = Type.fromInterned(ty).abiAlignment(mod); + if (Type.fromInterned(ty).abiSize(pt) > 0) { + const param_size: u32 = @intCast(Type.fromInterned(ty).abiSize(pt)); + const param_alignment = Type.fromInterned(ty).abiAlignment(pt); stack_offset = @intCast(param_alignment.forward(stack_offset)); result_arg.* = .{ .stack_argument_offset = stack_offset }; @@ -6330,7 +6390,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { return result; } -/// TODO support scope overrides. Also note this logic is duplicated with `Module.wantSafety`. +/// TODO support scope overrides. Also note this logic is duplicated with `Zcu.wantSafety`. fn wantSafety(self: *Self) bool { return switch (self.bin_file.comp.root_mod.optimize_mode) { .Debug => true, @@ -6362,8 +6422,7 @@ fn parseRegName(name: []const u8) ?Register { } fn registerAlias(self: *Self, reg: Register, ty: Type) Register { - const mod = self.bin_file.comp.module.?; - const abi_size = ty.abiSize(mod); + const abi_size = ty.abiSize(self.pt); switch (reg.class()) { .general_purpose => { @@ -6391,11 +6450,9 @@ fn registerAlias(self: *Self, reg: Register, ty: Type) Register { } fn typeOf(self: *Self, inst: Air.Inst.Ref) Type { - const mod = self.bin_file.comp.module.?; - return self.air.typeOf(inst, &mod.intern_pool); + return self.air.typeOf(inst, &self.pt.zcu.intern_pool); } fn typeOfIndex(self: *Self, inst: Air.Inst.Index) Type { - const mod = self.bin_file.comp.module.?; - return self.air.typeOfIndex(inst, &mod.intern_pool); + return self.air.typeOfIndex(inst, &self.pt.zcu.intern_pool); } diff --git a/src/arch/aarch64/Emit.zig b/src/arch/aarch64/Emit.zig index 2588db6adce2..7010267e179c 100644 --- a/src/arch/aarch64/Emit.zig +++ b/src/arch/aarch64/Emit.zig @@ -8,9 +8,7 @@ const Mir = @import("Mir.zig"); const bits = @import("bits.zig"); const link = @import("../../link.zig"); const Zcu = @import("../../Zcu.zig"); -/// Deprecated. -const Module = Zcu; -const ErrorMsg = Module.ErrorMsg; +const ErrorMsg = Zcu.ErrorMsg; const assert = std.debug.assert; const Instruction = bits.Instruction; const Register = bits.Register; @@ -22,7 +20,7 @@ bin_file: *link.File, debug_output: DebugInfoOutput, target: *const std.Target, err_msg: ?*ErrorMsg = null, -src_loc: Module.LazySrcLoc, +src_loc: Zcu.LazySrcLoc, code: *std.ArrayList(u8), prev_di_line: u32, diff --git a/src/arch/aarch64/abi.zig b/src/arch/aarch64/abi.zig index 5eeeee0fa2bf..05a77c54b56b 100644 --- a/src/arch/aarch64/abi.zig +++ b/src/arch/aarch64/abi.zig @@ -5,8 +5,6 @@ const Register = bits.Register; const RegisterManagerFn = @import("../../register_manager.zig").RegisterManager; const Type = @import("../../Type.zig"); const Zcu = @import("../../Zcu.zig"); -/// Deprecated. -const Module = Zcu; pub const Class = union(enum) { memory, @@ -17,44 +15,44 @@ pub const Class = union(enum) { }; /// For `float_array` the second element will be the amount of floats. -pub fn classifyType(ty: Type, mod: *Module) Class { - std.debug.assert(ty.hasRuntimeBitsIgnoreComptime(mod)); +pub fn classifyType(ty: Type, pt: Zcu.PerThread) Class { + std.debug.assert(ty.hasRuntimeBitsIgnoreComptime(pt)); var maybe_float_bits: ?u16 = null; - switch (ty.zigTypeTag(mod)) { + switch (ty.zigTypeTag(pt.zcu)) { .Struct => { - if (ty.containerLayout(mod) == .@"packed") return .byval; - const float_count = countFloats(ty, mod, &maybe_float_bits); + if (ty.containerLayout(pt.zcu) == .@"packed") return .byval; + const float_count = countFloats(ty, pt.zcu, &maybe_float_bits); if (float_count <= sret_float_count) return .{ .float_array = float_count }; - const bit_size = ty.bitSize(mod); + const bit_size = ty.bitSize(pt); if (bit_size > 128) return .memory; if (bit_size > 64) return .double_integer; return .integer; }, .Union => { - if (ty.containerLayout(mod) == .@"packed") return .byval; - const float_count = countFloats(ty, mod, &maybe_float_bits); + if (ty.containerLayout(pt.zcu) == .@"packed") return .byval; + const float_count = countFloats(ty, pt.zcu, &maybe_float_bits); if (float_count <= sret_float_count) return .{ .float_array = float_count }; - const bit_size = ty.bitSize(mod); + const bit_size = ty.bitSize(pt); if (bit_size > 128) return .memory; if (bit_size > 64) return .double_integer; return .integer; }, .Int, .Enum, .ErrorSet, .Float, .Bool => return .byval, .Vector => { - const bit_size = ty.bitSize(mod); + const bit_size = ty.bitSize(pt); // TODO is this controlled by a cpu feature? if (bit_size > 128) return .memory; return .byval; }, .Optional => { - std.debug.assert(ty.isPtrLikeOptional(mod)); + std.debug.assert(ty.isPtrLikeOptional(pt.zcu)); return .byval; }, .Pointer => { - std.debug.assert(!ty.isSlice(mod)); + std.debug.assert(!ty.isSlice(pt.zcu)); return .byval; }, .ErrorUnion, @@ -76,16 +74,16 @@ pub fn classifyType(ty: Type, mod: *Module) Class { } const sret_float_count = 4; -fn countFloats(ty: Type, mod: *Module, maybe_float_bits: *?u16) u8 { - const ip = &mod.intern_pool; - const target = mod.getTarget(); +fn countFloats(ty: Type, zcu: *Zcu, maybe_float_bits: *?u16) u8 { + const ip = &zcu.intern_pool; + const target = zcu.getTarget(); const invalid = std.math.maxInt(u8); - switch (ty.zigTypeTag(mod)) { + switch (ty.zigTypeTag(zcu)) { .Union => { - const union_obj = mod.typeToUnion(ty).?; + const union_obj = zcu.typeToUnion(ty).?; var max_count: u8 = 0; for (union_obj.field_types.get(ip)) |field_ty| { - const field_count = countFloats(Type.fromInterned(field_ty), mod, maybe_float_bits); + const field_count = countFloats(Type.fromInterned(field_ty), zcu, maybe_float_bits); if (field_count == invalid) return invalid; if (field_count > max_count) max_count = field_count; if (max_count > sret_float_count) return invalid; @@ -93,12 +91,12 @@ fn countFloats(ty: Type, mod: *Module, maybe_float_bits: *?u16) u8 { return max_count; }, .Struct => { - const fields_len = ty.structFieldCount(mod); + const fields_len = ty.structFieldCount(zcu); var count: u8 = 0; var i: u32 = 0; while (i < fields_len) : (i += 1) { - const field_ty = ty.structFieldType(i, mod); - const field_count = countFloats(field_ty, mod, maybe_float_bits); + const field_ty = ty.structFieldType(i, zcu); + const field_count = countFloats(field_ty, zcu, maybe_float_bits); if (field_count == invalid) return invalid; count += field_count; if (count > sret_float_count) return invalid; @@ -118,22 +116,22 @@ fn countFloats(ty: Type, mod: *Module, maybe_float_bits: *?u16) u8 { } } -pub fn getFloatArrayType(ty: Type, mod: *Module) ?Type { - const ip = &mod.intern_pool; - switch (ty.zigTypeTag(mod)) { +pub fn getFloatArrayType(ty: Type, zcu: *Zcu) ?Type { + const ip = &zcu.intern_pool; + switch (ty.zigTypeTag(zcu)) { .Union => { - const union_obj = mod.typeToUnion(ty).?; + const union_obj = zcu.typeToUnion(ty).?; for (union_obj.field_types.get(ip)) |field_ty| { - if (getFloatArrayType(Type.fromInterned(field_ty), mod)) |some| return some; + if (getFloatArrayType(Type.fromInterned(field_ty), zcu)) |some| return some; } return null; }, .Struct => { - const fields_len = ty.structFieldCount(mod); + const fields_len = ty.structFieldCount(zcu); var i: u32 = 0; while (i < fields_len) : (i += 1) { - const field_ty = ty.structFieldType(i, mod); - if (getFloatArrayType(field_ty, mod)) |some| return some; + const field_ty = ty.structFieldType(i, zcu); + if (getFloatArrayType(field_ty, zcu)) |some| return some; } return null; }, diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index 3f10513bb2fe..0dd513d4fe44 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -12,11 +12,9 @@ const Type = @import("../../Type.zig"); const Value = @import("../../Value.zig"); const link = @import("../../link.zig"); const Zcu = @import("../../Zcu.zig"); -/// Deprecated. -const Module = Zcu; const InternPool = @import("../../InternPool.zig"); const Compilation = @import("../../Compilation.zig"); -const ErrorMsg = Module.ErrorMsg; +const ErrorMsg = Zcu.ErrorMsg; const Target = std.Target; const Allocator = mem.Allocator; const trace = @import("../../tracy.zig").trace; @@ -48,6 +46,7 @@ const gp = abi.RegisterClass.gp; const InnerError = CodeGenError || error{OutOfRegisters}; gpa: Allocator, +pt: Zcu.PerThread, air: Air, liveness: Liveness, bin_file: *link.File, @@ -59,7 +58,7 @@ args: []MCValue, ret_mcv: MCValue, fn_type: Type, arg_index: u32, -src_loc: Module.LazySrcLoc, +src_loc: Zcu.LazySrcLoc, stack_align: u32, /// MIR Instructions @@ -261,7 +260,6 @@ const DbgInfoReloc = struct { } fn genArgDbgInfo(reloc: DbgInfoReloc, function: Self) error{OutOfMemory}!void { - const mod = function.bin_file.comp.module.?; switch (function.debug_output) { .dwarf => |dw| { const loc: link.File.Dwarf.DeclState.DbgInfoLoc = switch (reloc.mcv) { @@ -282,7 +280,7 @@ const DbgInfoReloc = struct { else => unreachable, // not a possible argument }; - try dw.genArgDbgInfo(reloc.name, reloc.ty, mod.funcOwnerDeclIndex(function.func_index), loc); + try dw.genArgDbgInfo(reloc.name, reloc.ty, function.pt.zcu.funcOwnerDeclIndex(function.func_index), loc); }, .plan9 => {}, .none => {}, @@ -290,7 +288,6 @@ const DbgInfoReloc = struct { } fn genVarDbgInfo(reloc: DbgInfoReloc, function: Self) !void { - const mod = function.bin_file.comp.module.?; const is_ptr = switch (reloc.tag) { .dbg_var_ptr => true, .dbg_var_val => false, @@ -326,7 +323,7 @@ const DbgInfoReloc = struct { break :blk .nop; }, }; - try dw.genVarDbgInfo(reloc.name, reloc.ty, mod.funcOwnerDeclIndex(function.func_index), is_ptr, loc); + try dw.genVarDbgInfo(reloc.name, reloc.ty, function.pt.zcu.funcOwnerDeclIndex(function.func_index), is_ptr, loc); }, .plan9 => {}, .none => {}, @@ -338,15 +335,16 @@ const Self = @This(); pub fn generate( lf: *link.File, - src_loc: Module.LazySrcLoc, + pt: Zcu.PerThread, + src_loc: Zcu.LazySrcLoc, func_index: InternPool.Index, air: Air, liveness: Liveness, code: *std.ArrayList(u8), debug_output: DebugInfoOutput, ) CodeGenError!Result { - const gpa = lf.comp.gpa; - const zcu = lf.comp.module.?; + const zcu = pt.zcu; + const gpa = zcu.gpa; const func = zcu.funcInfo(func_index); const fn_owner_decl = zcu.declPtr(func.owner_decl); assert(fn_owner_decl.has_tv); @@ -364,6 +362,7 @@ pub fn generate( var function: Self = .{ .gpa = gpa, + .pt = pt, .air = air, .liveness = liveness, .target = target, @@ -482,7 +481,8 @@ pub fn addExtraAssumeCapacity(self: *Self, extra: anytype) u32 { } fn gen(self: *Self) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const cc = self.fn_type.fnCallingConvention(mod); if (cc != .Naked) { // push {fp, lr} @@ -526,8 +526,8 @@ fn gen(self: *Self) !void { const ty = self.typeOfIndex(inst); - const abi_size: u32 = @intCast(ty.abiSize(mod)); - const abi_align = ty.abiAlignment(mod); + const abi_size: u32 = @intCast(ty.abiSize(pt)); + const abi_align = ty.abiAlignment(pt); const stack_offset = try self.allocMem(abi_size, abi_align, inst); try self.genSetStack(ty, stack_offset, MCValue{ .register = reg }); @@ -642,7 +642,8 @@ fn gen(self: *Self) !void { } fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const ip = &mod.intern_pool; const air_tags = self.air.instructions.items(.tag); @@ -1004,10 +1005,11 @@ fn allocMem( /// Use a pointer instruction as the basis for allocating stack memory. fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const elem_ty = self.typeOfIndex(inst).childType(mod); - if (!elem_ty.hasRuntimeBits(mod)) { + if (!elem_ty.hasRuntimeBits(pt)) { // As this stack item will never be dereferenced at runtime, // return the stack offset 0. Stack offset 0 will be where all // zero-sized stack allocations live as non-zero-sized @@ -1015,21 +1017,21 @@ fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 { return 0; } - const abi_size = math.cast(u32, elem_ty.abiSize(mod)) orelse { - return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)}); + const abi_size = math.cast(u32, elem_ty.abiSize(pt)) orelse { + return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(pt)}); }; // TODO swap this for inst.ty.ptrAlign - const abi_align = elem_ty.abiAlignment(mod); + const abi_align = elem_ty.abiAlignment(pt); return self.allocMem(abi_size, abi_align, inst); } fn allocRegOrMem(self: *Self, elem_ty: Type, reg_ok: bool, maybe_inst: ?Air.Inst.Index) !MCValue { - const mod = self.bin_file.comp.module.?; - const abi_size = math.cast(u32, elem_ty.abiSize(mod)) orelse { - return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)}); + const pt = self.pt; + const abi_size = math.cast(u32, elem_ty.abiSize(pt)) orelse { + return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(pt)}); }; - const abi_align = elem_ty.abiAlignment(mod); + const abi_align = elem_ty.abiAlignment(pt); if (reg_ok) { // Make sure the type can fit in a register before we try to allocate one. @@ -1112,14 +1114,15 @@ fn airAlloc(self: *Self, inst: Air.Inst.Index) !void { } fn airRetPtr(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const result: MCValue = switch (self.ret_mcv) { .none, .register => .{ .ptr_stack_offset = try self.allocMemPtr(inst) }, .stack_offset => blk: { // self.ret_mcv is an address to where this function // should store its result into const ret_ty = self.fn_type.fnReturnType(mod); - const ptr_ty = try mod.singleMutPtrType(ret_ty); + const ptr_ty = try pt.singleMutPtrType(ret_ty); // addr_reg will contain the address of where to store the // result into @@ -1145,7 +1148,8 @@ fn airFpext(self: *Self, inst: Air.Inst.Index) !void { } fn airIntCast(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; if (self.liveness.isUnused(inst)) return self.finishAir(inst, .dead, .{ ty_op.operand, .none, .none }); @@ -1154,8 +1158,8 @@ fn airIntCast(self: *Self, inst: Air.Inst.Index) !void { const operand_ty = self.typeOf(ty_op.operand); const dest_ty = self.typeOfIndex(inst); - const operand_abi_size = operand_ty.abiSize(mod); - const dest_abi_size = dest_ty.abiSize(mod); + const operand_abi_size = operand_ty.abiSize(pt); + const dest_abi_size = dest_ty.abiSize(pt); const info_a = operand_ty.intInfo(mod); const info_b = dest_ty.intInfo(mod); @@ -1211,7 +1215,8 @@ fn trunc( operand_ty: Type, dest_ty: Type, ) !MCValue { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const info_a = operand_ty.intInfo(mod); const info_b = dest_ty.intInfo(mod); @@ -1275,7 +1280,8 @@ fn airIntFromBool(self: *Self, inst: Air.Inst.Index) !void { fn airNot(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand_bind: ReadArg.Bind = .{ .inst = ty_op.operand }; const operand_ty = self.typeOf(ty_op.operand); @@ -1371,7 +1377,8 @@ fn minMax( rhs_ty: Type, maybe_inst: ?Air.Inst.Index, ) !MCValue { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO ARM min/max on floats", .{}), .Vector => return self.fail("TODO ARM min/max on vectors", .{}), @@ -1580,7 +1587,8 @@ fn airOverflow(self: *Self, inst: Air.Inst.Index) !void { const tag = self.air.instructions.items(.tag)[@intFromEnum(inst)]; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const lhs_bind: ReadArg.Bind = .{ .inst = extra.lhs }; const rhs_bind: ReadArg.Bind = .{ .inst = extra.rhs }; @@ -1588,9 +1596,9 @@ fn airOverflow(self: *Self, inst: Air.Inst.Index) !void { const rhs_ty = self.typeOf(extra.rhs); const tuple_ty = self.typeOfIndex(inst); - const tuple_size: u32 = @intCast(tuple_ty.abiSize(mod)); - const tuple_align = tuple_ty.abiAlignment(mod); - const overflow_bit_offset: u32 = @intCast(tuple_ty.structFieldOffset(1, mod)); + const tuple_size: u32 = @intCast(tuple_ty.abiSize(pt)); + const tuple_align = tuple_ty.abiAlignment(pt); + const overflow_bit_offset: u32 = @intCast(tuple_ty.structFieldOffset(1, pt)); switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO implement add_with_overflow/sub_with_overflow for vectors", .{}), @@ -1693,7 +1701,8 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; if (self.liveness.isUnused(inst)) return self.finishAir(inst, .dead, .{ extra.lhs, extra.rhs, .none }); - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const result: MCValue = result: { const lhs_bind: ReadArg.Bind = .{ .inst = extra.lhs }; const rhs_bind: ReadArg.Bind = .{ .inst = extra.rhs }; @@ -1701,9 +1710,9 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const rhs_ty = self.typeOf(extra.rhs); const tuple_ty = self.typeOfIndex(inst); - const tuple_size: u32 = @intCast(tuple_ty.abiSize(mod)); - const tuple_align = tuple_ty.abiAlignment(mod); - const overflow_bit_offset: u32 = @intCast(tuple_ty.structFieldOffset(1, mod)); + const tuple_size: u32 = @intCast(tuple_ty.abiSize(pt)); + const tuple_align = tuple_ty.abiAlignment(pt); + const overflow_bit_offset: u32 = @intCast(tuple_ty.structFieldOffset(1, pt)); switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO implement mul_with_overflow for vectors", .{}), @@ -1857,15 +1866,16 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; if (self.liveness.isUnused(inst)) return self.finishAir(inst, .dead, .{ extra.lhs, extra.rhs, .none }); - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const result: MCValue = result: { const lhs_ty = self.typeOf(extra.lhs); const rhs_ty = self.typeOf(extra.rhs); const tuple_ty = self.typeOfIndex(inst); - const tuple_size: u32 = @intCast(tuple_ty.abiSize(mod)); - const tuple_align = tuple_ty.abiAlignment(mod); - const overflow_bit_offset: u32 = @intCast(tuple_ty.structFieldOffset(1, mod)); + const tuple_size: u32 = @intCast(tuple_ty.abiSize(pt)); + const tuple_align = tuple_ty.abiAlignment(pt); + const overflow_bit_offset: u32 = @intCast(tuple_ty.structFieldOffset(1, pt)); switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO implement shl_with_overflow for vectors", .{}), @@ -2013,11 +2023,11 @@ fn airOptionalPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void { } fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const optional_ty = self.typeOfIndex(inst); - const abi_size: u32 = @intCast(optional_ty.abiSize(mod)); + const abi_size: u32 = @intCast(optional_ty.abiSize(pt)); // Optional with a zero-bit payload type is just a boolean true if (abi_size == 1) { @@ -2036,17 +2046,18 @@ fn errUnionErr( error_union_ty: Type, maybe_inst: ?Air.Inst.Index, ) !MCValue { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const err_ty = error_union_ty.errorUnionSet(mod); const payload_ty = error_union_ty.errorUnionPayload(mod); if (err_ty.errorSetIsEmpty(mod)) { return MCValue{ .immediate = 0 }; } - if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) { return try error_union_bind.resolveToMcv(self); } - const err_offset: u32 = @intCast(errUnionErrorOffset(payload_ty, mod)); + const err_offset: u32 = @intCast(errUnionErrorOffset(payload_ty, pt)); switch (try error_union_bind.resolveToMcv(self)) { .register => { var operand_reg: Register = undefined; @@ -2068,7 +2079,7 @@ fn errUnionErr( ); const err_bit_offset = err_offset * 8; - const err_bit_size: u32 = @intCast(err_ty.abiSize(mod) * 8); + const err_bit_size: u32 = @intCast(err_ty.abiSize(pt) * 8); _ = try self.addInst(.{ .tag = .ubfx, // errors are unsigned integers @@ -2113,17 +2124,18 @@ fn errUnionPayload( error_union_ty: Type, maybe_inst: ?Air.Inst.Index, ) !MCValue { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const err_ty = error_union_ty.errorUnionSet(mod); const payload_ty = error_union_ty.errorUnionPayload(mod); if (err_ty.errorSetIsEmpty(mod)) { return try error_union_bind.resolveToMcv(self); } - if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) { return MCValue.none; } - const payload_offset: u32 = @intCast(errUnionPayloadOffset(payload_ty, mod)); + const payload_offset: u32 = @intCast(errUnionPayloadOffset(payload_ty, pt)); switch (try error_union_bind.resolveToMcv(self)) { .register => { var operand_reg: Register = undefined; @@ -2145,7 +2157,7 @@ fn errUnionPayload( ); const payload_bit_offset = payload_offset * 8; - const payload_bit_size: u32 = @intCast(payload_ty.abiSize(mod) * 8); + const payload_bit_size: u32 = @intCast(payload_ty.abiSize(pt) * 8); _ = try self.addInst(.{ .tag = if (payload_ty.isSignedInt(mod)) Mir.Inst.Tag.sbfx else .ubfx, @@ -2223,20 +2235,21 @@ fn airSaveErrReturnTraceIndex(self: *Self, inst: Air.Inst.Index) !void { /// T to E!T fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const error_union_ty = ty_op.ty.toType(); const error_ty = error_union_ty.errorUnionSet(mod); const payload_ty = error_union_ty.errorUnionPayload(mod); const operand = try self.resolveInst(ty_op.operand); - if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result operand; + if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) break :result operand; - const abi_size: u32 = @intCast(error_union_ty.abiSize(mod)); - const abi_align = error_union_ty.abiAlignment(mod); + const abi_size: u32 = @intCast(error_union_ty.abiSize(pt)); + const abi_align = error_union_ty.abiAlignment(pt); const stack_offset: u32 = @intCast(try self.allocMem(abi_size, abi_align, inst)); - const payload_off = errUnionPayloadOffset(payload_ty, mod); - const err_off = errUnionErrorOffset(payload_ty, mod); + const payload_off = errUnionPayloadOffset(payload_ty, pt); + const err_off = errUnionErrorOffset(payload_ty, pt); try self.genSetStack(payload_ty, stack_offset - @as(u32, @intCast(payload_off)), operand); try self.genSetStack(error_ty, stack_offset - @as(u32, @intCast(err_off)), .{ .immediate = 0 }); @@ -2247,20 +2260,21 @@ fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void { /// E to E!T fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const error_union_ty = ty_op.ty.toType(); const error_ty = error_union_ty.errorUnionSet(mod); const payload_ty = error_union_ty.errorUnionPayload(mod); const operand = try self.resolveInst(ty_op.operand); - if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result operand; + if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) break :result operand; - const abi_size: u32 = @intCast(error_union_ty.abiSize(mod)); - const abi_align = error_union_ty.abiAlignment(mod); + const abi_size: u32 = @intCast(error_union_ty.abiSize(pt)); + const abi_align = error_union_ty.abiAlignment(pt); const stack_offset: u32 = @intCast(try self.allocMem(abi_size, abi_align, inst)); - const payload_off = errUnionPayloadOffset(payload_ty, mod); - const err_off = errUnionErrorOffset(payload_ty, mod); + const payload_off = errUnionPayloadOffset(payload_ty, pt); + const err_off = errUnionErrorOffset(payload_ty, pt); try self.genSetStack(error_ty, stack_offset - @as(u32, @intCast(err_off)), operand); try self.genSetStack(payload_ty, stack_offset - @as(u32, @intCast(payload_off)), .undef); @@ -2364,9 +2378,10 @@ fn ptrElemVal( ptr_ty: Type, maybe_inst: ?Air.Inst.Index, ) !MCValue { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const elem_ty = ptr_ty.childType(mod); - const elem_size: u32 = @intCast(elem_ty.abiSize(mod)); + const elem_size: u32 = @intCast(elem_ty.abiSize(pt)); switch (elem_size) { 1, 4 => { @@ -2423,7 +2438,8 @@ fn ptrElemVal( } fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const slice_ty = self.typeOf(bin_op.lhs); const result: MCValue = if (!slice_ty.isVolatilePtr(mod) and self.liveness.isUnused(inst)) .dead else result: { @@ -2466,7 +2482,8 @@ fn arrayElemVal( array_ty: Type, maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const elem_ty = array_ty.childType(mod); const mcv = try array_bind.resolveToMcv(self); @@ -2501,7 +2518,7 @@ fn arrayElemVal( const base_bind: ReadArg.Bind = .{ .mcv = ptr_to_mcv }; - const ptr_ty = try mod.singleMutPtrType(elem_ty); + const ptr_ty = try pt.singleMutPtrType(elem_ty); return try self.ptrElemVal(base_bind, index_bind, ptr_ty, maybe_inst); }, @@ -2522,7 +2539,8 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void { } fn airPtrElemVal(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const ptr_ty = self.typeOf(bin_op.lhs); const result: MCValue = if (!ptr_ty.isVolatilePtr(mod) and self.liveness.isUnused(inst)) .dead else result: { @@ -2656,9 +2674,10 @@ fn reuseOperand( } fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const elem_ty = ptr_ty.childType(mod); - const elem_size: u32 = @intCast(elem_ty.abiSize(mod)); + const elem_size: u32 = @intCast(elem_ty.abiSize(pt)); switch (ptr) { .none => unreachable, @@ -2733,11 +2752,12 @@ fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!vo } fn airLoad(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const elem_ty = self.typeOfIndex(inst); const result: MCValue = result: { - if (!elem_ty.hasRuntimeBits(mod)) + if (!elem_ty.hasRuntimeBits(pt)) break :result MCValue.none; const ptr = try self.resolveInst(ty_op.operand); @@ -2746,7 +2766,7 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void { break :result MCValue.dead; const dest_mcv: MCValue = blk: { - const ptr_fits_dest = elem_ty.abiSize(mod) <= 4; + const ptr_fits_dest = elem_ty.abiSize(pt) <= 4; if (ptr_fits_dest and self.reuseOperand(inst, ty_op.operand, 0, ptr)) { // The MCValue that holds the pointer can be re-used as the value. break :blk ptr; @@ -2762,8 +2782,8 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void { } fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type) InnerError!void { - const mod = self.bin_file.comp.module.?; - const elem_size: u32 = @intCast(value_ty.abiSize(mod)); + const pt = self.pt; + const elem_size: u32 = @intCast(value_ty.abiSize(pt)); switch (ptr) { .none => unreachable, @@ -2882,11 +2902,12 @@ fn airStructFieldPtrIndex(self: *Self, inst: Air.Inst.Index, index: u8) !void { fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, index: u32) !MCValue { return if (self.liveness.isUnused(inst)) .dead else result: { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const mcv = try self.resolveInst(operand); const ptr_ty = self.typeOf(operand); const struct_ty = ptr_ty.childType(mod); - const struct_field_offset: u32 = @intCast(struct_ty.structFieldOffset(index, mod)); + const struct_field_offset: u32 = @intCast(struct_ty.structFieldOffset(index, pt)); switch (mcv) { .ptr_stack_offset => |off| { break :result MCValue{ .ptr_stack_offset = off - struct_field_offset }; @@ -2906,11 +2927,12 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { const extra = self.air.extraData(Air.StructField, ty_pl.payload).data; const operand = extra.struct_operand; const index = extra.field_index; - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const mcv = try self.resolveInst(operand); const struct_ty = self.typeOf(operand); - const struct_field_offset: u32 = @intCast(struct_ty.structFieldOffset(index, mod)); + const struct_field_offset: u32 = @intCast(struct_ty.structFieldOffset(index, pt)); const struct_field_ty = struct_ty.structFieldType(index, mod); switch (mcv) { @@ -2974,7 +2996,7 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { ); const field_bit_offset = struct_field_offset * 8; - const field_bit_size: u32 = @intCast(struct_field_ty.abiSize(mod) * 8); + const field_bit_size: u32 = @intCast(struct_field_ty.abiSize(pt) * 8); _ = try self.addInst(.{ .tag = if (struct_field_ty.isSignedInt(mod)) Mir.Inst.Tag.sbfx else .ubfx, @@ -2996,7 +3018,8 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { } fn airFieldParentPtr(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = self.air.extraData(Air.FieldParentPtr, ty_pl.payload).data; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { @@ -3007,7 +3030,7 @@ fn airFieldParentPtr(self: *Self, inst: Air.Inst.Index) !void { return self.fail("TODO implement @fieldParentPtr codegen for unions", .{}); } - const struct_field_offset: u32 = @intCast(struct_ty.structFieldOffset(extra.field_index, mod)); + const struct_field_offset: u32 = @intCast(struct_ty.structFieldOffset(extra.field_index, pt)); switch (field_ptr) { .ptr_stack_offset => |off| { break :result MCValue{ .ptr_stack_offset = off + struct_field_offset }; @@ -3390,7 +3413,8 @@ fn addSub( rhs_ty: Type, maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO ARM binary operations on floats", .{}), .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), @@ -3446,7 +3470,8 @@ fn mul( rhs_ty: Type, maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO ARM binary operations on floats", .{}), .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), @@ -3479,7 +3504,8 @@ fn divFloat( _ = rhs_ty; _ = maybe_inst; - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO ARM binary operations on floats", .{}), .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), @@ -3495,7 +3521,8 @@ fn divTrunc( rhs_ty: Type, maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO ARM binary operations on floats", .{}), .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), @@ -3538,7 +3565,8 @@ fn divFloor( rhs_ty: Type, maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO ARM binary operations on floats", .{}), .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), @@ -3586,7 +3614,8 @@ fn divExact( _ = rhs_ty; _ = maybe_inst; - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO ARM binary operations on floats", .{}), .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), @@ -3603,7 +3632,8 @@ fn rem( rhs_ty: Type, maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO ARM binary operations on floats", .{}), .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), @@ -3672,7 +3702,8 @@ fn modulo( _ = rhs_ty; _ = maybe_inst; - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO ARM binary operations on floats", .{}), .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), @@ -3690,7 +3721,8 @@ fn wrappingArithmetic( rhs_ty: Type, maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), .Int => { @@ -3728,7 +3760,8 @@ fn bitwise( rhs_ty: Type, maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), .Int => { @@ -3773,7 +3806,8 @@ fn shiftExact( rhs_ty: Type, maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), .Int => { @@ -3812,7 +3846,8 @@ fn shiftNormal( rhs_ty: Type, maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), .Int => { @@ -3855,7 +3890,8 @@ fn booleanOp( rhs_ty: Type, maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; switch (lhs_ty.zigTypeTag(mod)) { .Bool => { const lhs_immediate = try lhs_bind.resolveToImmediate(self); @@ -3889,7 +3925,8 @@ fn ptrArithmetic( rhs_ty: Type, maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; switch (lhs_ty.zigTypeTag(mod)) { .Pointer => { assert(rhs_ty.eql(Type.usize, mod)); @@ -3899,7 +3936,7 @@ fn ptrArithmetic( .One => ptr_ty.childType(mod).childType(mod), // ptr to array, so get array element type else => ptr_ty.childType(mod), }; - const elem_size: u32 = @intCast(elem_ty.abiSize(mod)); + const elem_size: u32 = @intCast(elem_ty.abiSize(pt)); const base_tag: Air.Inst.Tag = switch (tag) { .ptr_add => .add, @@ -3926,8 +3963,9 @@ fn ptrArithmetic( } fn genLdrRegister(self: *Self, dest_reg: Register, addr_reg: Register, ty: Type) !void { - const mod = self.bin_file.comp.module.?; - const abi_size = ty.abiSize(mod); + const pt = self.pt; + const mod = pt.zcu; + const abi_size = ty.abiSize(pt); const tag: Mir.Inst.Tag = switch (abi_size) { 1 => if (ty.isSignedInt(mod)) Mir.Inst.Tag.ldrsb else .ldrb, @@ -3961,8 +3999,8 @@ fn genLdrRegister(self: *Self, dest_reg: Register, addr_reg: Register, ty: Type) } fn genStrRegister(self: *Self, source_reg: Register, addr_reg: Register, ty: Type) !void { - const mod = self.bin_file.comp.module.?; - const abi_size = ty.abiSize(mod); + const pt = self.pt; + const abi_size = ty.abiSize(pt); const tag: Mir.Inst.Tag = switch (abi_size) { 1 => .strb, @@ -4168,7 +4206,8 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void { while (self.args[arg_index] == .none) arg_index += 1; self.arg_index = arg_index + 1; - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const ty = self.typeOfIndex(inst); const tag = self.air.instructions.items(.tag)[@intFromEnum(inst)]; const src_index = self.air.instructions.items(.data)[@intFromEnum(inst)].arg.src_index; @@ -4223,7 +4262,8 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const extra = self.air.extraData(Air.Call, pl_op.payload); const args: []const Air.Inst.Ref = @ptrCast(self.air.extra[extra.end..][0..extra.data.args_len]); const ty = self.typeOf(callee); - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const fn_ty = switch (ty.zigTypeTag(mod)) { .Fn => ty, @@ -4253,11 +4293,11 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const r0_lock: ?RegisterLock = if (info.return_value == .stack_offset) blk: { log.debug("airCall: return by reference", .{}); const ret_ty = fn_ty.fnReturnType(mod); - const ret_abi_size: u32 = @intCast(ret_ty.abiSize(mod)); - const ret_abi_align = ret_ty.abiAlignment(mod); + const ret_abi_size: u32 = @intCast(ret_ty.abiSize(pt)); + const ret_abi_align = ret_ty.abiAlignment(pt); const stack_offset = try self.allocMem(ret_abi_size, ret_abi_align, inst); - const ptr_ty = try mod.singleMutPtrType(ret_ty); + const ptr_ty = try pt.singleMutPtrType(ret_ty); try self.register_manager.getReg(.r0, null); try self.genSetReg(ptr_ty, .r0, .{ .ptr_stack_offset = stack_offset }); @@ -4293,7 +4333,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier // Due to incremental compilation, how function calls are generated depends // on linking. - if (try self.air.value(callee, mod)) |func_value| { + if (try self.air.value(callee, pt)) |func_value| { if (func_value.getFunction(mod)) |func| { if (self.bin_file.cast(link.File.Elf)) |elf_file| { const sym_index = try elf_file.zigObjectPtr().?.getOrCreateMetadataForDecl(elf_file, func.owner_decl); @@ -4374,7 +4414,8 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier } fn airRet(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; const operand = try self.resolveInst(un_op); const ret_ty = self.fn_type.fnReturnType(mod); @@ -4393,7 +4434,7 @@ fn airRet(self: *Self, inst: Air.Inst.Index) !void { // // self.ret_mcv is an address to where this function // should store its result into - const ptr_ty = try mod.singleMutPtrType(ret_ty); + const ptr_ty = try pt.singleMutPtrType(ret_ty); try self.store(self.ret_mcv, operand, ptr_ty, ret_ty); }, else => unreachable, // invalid return result @@ -4406,7 +4447,8 @@ fn airRet(self: *Self, inst: Air.Inst.Index) !void { } fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; const ptr = try self.resolveInst(un_op); const ptr_ty = self.typeOf(un_op); @@ -4430,8 +4472,8 @@ fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void { // location. const op_inst = un_op.toIndex().?; if (self.air.instructions.items(.tag)[@intFromEnum(op_inst)] != .ret_ptr) { - const abi_size: u32 = @intCast(ret_ty.abiSize(mod)); - const abi_align = ret_ty.abiAlignment(mod); + const abi_size: u32 = @intCast(ret_ty.abiSize(pt)); + const abi_align = ret_ty.abiAlignment(pt); const offset = try self.allocMem(abi_size, abi_align, null); @@ -4467,11 +4509,12 @@ fn cmp( lhs_ty: Type, op: math.CompareOperator, ) !MCValue { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const int_ty = switch (lhs_ty.zigTypeTag(mod)) { .Optional => blk: { const payload_ty = lhs_ty.optionalChild(mod); - if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) { break :blk Type.u1; } else if (lhs_ty.isPtrLikeOptional(mod)) { break :blk Type.usize; @@ -4573,7 +4616,8 @@ fn airDbgStmt(self: *Self, inst: Air.Inst.Index) !void { } fn airDbgInlineBlock(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = self.air.extraData(Air.DbgInlineBlock, ty_pl.payload); const func = mod.funcInfo(extra.data.func); @@ -4785,9 +4829,10 @@ fn isNull( operand_bind: ReadArg.Bind, operand_ty: Type, ) !MCValue { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; if (operand_ty.isPtrLikeOptional(mod)) { - assert(operand_ty.abiSize(mod) == 4); + assert(operand_ty.abiSize(pt) == 4); const imm_bind: ReadArg.Bind = .{ .mcv = .{ .immediate = 0 } }; return self.cmp(operand_bind, imm_bind, Type.usize, .eq); @@ -4819,7 +4864,8 @@ fn airIsNull(self: *Self, inst: Air.Inst.Index) !void { } fn airIsNullPtr(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand_ptr = try self.resolveInst(un_op); @@ -4846,7 +4892,8 @@ fn airIsNonNull(self: *Self, inst: Air.Inst.Index) !void { } fn airIsNonNullPtr(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand_ptr = try self.resolveInst(un_op); @@ -4866,7 +4913,8 @@ fn isErr( error_union_bind: ReadArg.Bind, error_union_ty: Type, ) !MCValue { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const error_type = error_union_ty.errorUnionSet(mod); if (error_type.errorSetIsEmpty(mod)) { @@ -4908,7 +4956,8 @@ fn airIsErr(self: *Self, inst: Air.Inst.Index) !void { } fn airIsErrPtr(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand_ptr = try self.resolveInst(un_op); @@ -4935,7 +4984,8 @@ fn airIsNonErr(self: *Self, inst: Air.Inst.Index) !void { } fn airIsNonErrPtr(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand_ptr = try self.resolveInst(un_op); @@ -5154,10 +5204,10 @@ fn airBr(self: *Self, inst: Air.Inst.Index) !void { } fn br(self: *Self, block: Air.Inst.Index, operand: Air.Inst.Ref) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; const block_data = self.blocks.getPtr(block).?; - if (self.typeOf(operand).hasRuntimeBits(mod)) { + if (self.typeOf(operand).hasRuntimeBits(pt)) { const operand_mcv = try self.resolveInst(operand); const block_mcv = block_data.mcv; if (block_mcv == .none) { @@ -5325,8 +5375,9 @@ fn setRegOrMem(self: *Self, ty: Type, loc: MCValue, val: MCValue) !void { } fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerError!void { - const mod = self.bin_file.comp.module.?; - const abi_size: u32 = @intCast(ty.abiSize(mod)); + const pt = self.pt; + const mod = pt.zcu; + const abi_size: u32 = @intCast(ty.abiSize(pt)); switch (mcv) { .dead => unreachable, .unreach, .none => return, // Nothing to do. @@ -5407,7 +5458,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro try self.genSetStack(wrapped_ty, stack_offset, .{ .register = reg }); const overflow_bit_ty = ty.structFieldType(1, mod); - const overflow_bit_offset: u32 = @intCast(ty.structFieldOffset(1, mod)); + const overflow_bit_offset: u32 = @intCast(ty.structFieldOffset(1, pt)); const cond_reg = try self.register_manager.allocReg(null, gp); // C flag: movcs reg, #1 @@ -5445,7 +5496,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro const reg = try self.copyToTmpRegister(ty, mcv); return self.genSetStack(ty, stack_offset, MCValue{ .register = reg }); } else { - const ptr_ty = try mod.singleMutPtrType(ty); + const ptr_ty = try pt.singleMutPtrType(ty); // TODO call extern memcpy const regs = try self.register_manager.allocRegs(5, .{ null, null, null, null, null }, gp); @@ -5487,7 +5538,8 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro } fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; switch (mcv) { .dead => unreachable, .unreach, .none => return, // Nothing to do. @@ -5662,7 +5714,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void }, .stack_offset => |off| { // TODO: maybe addressing from sp instead of fp - const abi_size: u32 = @intCast(ty.abiSize(mod)); + const abi_size: u32 = @intCast(ty.abiSize(pt)); const tag: Mir.Inst.Tag = switch (abi_size) { 1 => if (ty.isSignedInt(mod)) Mir.Inst.Tag.ldrsb else .ldrb, @@ -5713,7 +5765,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void } }, .stack_argument_offset => |off| { - const abi_size = ty.abiSize(mod); + const abi_size = ty.abiSize(pt); const tag: Mir.Inst.Tag = switch (abi_size) { 1 => if (ty.isSignedInt(mod)) Mir.Inst.Tag.ldrsb_stack_argument else .ldrb_stack_argument, @@ -5734,8 +5786,8 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void } fn genSetStackArgument(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerError!void { - const mod = self.bin_file.comp.module.?; - const abi_size: u32 = @intCast(ty.abiSize(mod)); + const pt = self.pt; + const abi_size: u32 = @intCast(ty.abiSize(pt)); switch (mcv) { .dead => unreachable, .none, .unreach => return, @@ -5802,7 +5854,7 @@ fn genSetStackArgument(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) I const reg = try self.copyToTmpRegister(ty, mcv); return self.genSetStackArgument(ty, stack_offset, MCValue{ .register = reg }); } else { - const ptr_ty = try mod.singleMutPtrType(ty); + const ptr_ty = try pt.singleMutPtrType(ty); // TODO call extern memcpy const regs = try self.register_manager.allocRegs(5, .{ null, null, null, null, null }, gp); @@ -5890,7 +5942,8 @@ fn airBitCast(self: *Self, inst: Air.Inst.Index) !void { } fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const ptr_ty = self.typeOf(ty_op.operand); @@ -6009,7 +6062,8 @@ fn airReduce(self: *Self, inst: Air.Inst.Index) !void { } fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const vector_ty = self.typeOfIndex(inst); const len = vector_ty.vectorLen(mod); const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; @@ -6054,15 +6108,15 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void { } fn airTry(self: *Self, inst: Air.Inst.Index) !void { + const pt = self.pt; const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; const extra = self.air.extraData(Air.Try, pl_op.payload); const body: []const Air.Inst.Index = @ptrCast(self.air.extra[extra.end..][0..extra.data.body_len]); const result: MCValue = result: { const error_union_bind: ReadArg.Bind = .{ .inst = pl_op.operand }; const error_union_ty = self.typeOf(pl_op.operand); - const mod = self.bin_file.comp.module.?; - const error_union_size: u32 = @intCast(error_union_ty.abiSize(mod)); - const error_union_align = error_union_ty.abiAlignment(mod); + const error_union_size: u32 = @intCast(error_union_ty.abiSize(pt)); + const error_union_align = error_union_ty.abiAlignment(pt); // The error union will die in the body. However, we need the // error union after the body in order to extract the payload @@ -6091,14 +6145,15 @@ fn airTryPtr(self: *Self, inst: Air.Inst.Index) !void { } fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; // If the type has no codegen bits, no need to store it. const inst_ty = self.typeOf(inst); - if (!inst_ty.hasRuntimeBitsIgnoreComptime(mod) and !inst_ty.isError(mod)) + if (!inst_ty.hasRuntimeBitsIgnoreComptime(pt) and !inst_ty.isError(mod)) return MCValue{ .none = {} }; - const inst_index = inst.toIndex() orelse return self.genTypedValue((try self.air.value(inst, mod)).?); + const inst_index = inst.toIndex() orelse return self.genTypedValue((try self.air.value(inst, pt)).?); return self.getResolvedInstValue(inst_index); } @@ -6116,12 +6171,13 @@ fn getResolvedInstValue(self: *Self, inst: Air.Inst.Index) MCValue { } fn genTypedValue(self: *Self, val: Value) InnerError!MCValue { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; const mcv: MCValue = switch (try codegen.genTypedValue( self.bin_file, + pt, self.src_loc, val, - mod.funcOwnerDeclIndex(self.func_index), + pt.zcu.funcOwnerDeclIndex(self.func_index), )) { .mcv => |mcv| switch (mcv) { .none => .none, @@ -6152,7 +6208,8 @@ const CallMCValues = struct { /// Caller must call `CallMCValues.deinit`. fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const ip = &mod.intern_pool; const fn_info = mod.typeToFunc(fn_ty).?; const cc = fn_info.cc; @@ -6182,10 +6239,10 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { if (ret_ty.zigTypeTag(mod) == .NoReturn) { result.return_value = .{ .unreach = {} }; - } else if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) { + } else if (!ret_ty.hasRuntimeBitsIgnoreComptime(pt)) { result.return_value = .{ .none = {} }; } else { - const ret_ty_size: u32 = @intCast(ret_ty.abiSize(mod)); + const ret_ty_size: u32 = @intCast(ret_ty.abiSize(pt)); // TODO handle cases where multiple registers are used if (ret_ty_size <= 4) { result.return_value = .{ .register = c_abi_int_return_regs[0] }; @@ -6200,10 +6257,10 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { } for (fn_info.param_types.get(ip), result.args) |ty, *result_arg| { - if (Type.fromInterned(ty).abiAlignment(mod) == .@"8") + if (Type.fromInterned(ty).abiAlignment(pt) == .@"8") ncrn = std.mem.alignForward(usize, ncrn, 2); - const param_size: u32 = @intCast(Type.fromInterned(ty).abiSize(mod)); + const param_size: u32 = @intCast(Type.fromInterned(ty).abiSize(pt)); if (std.math.divCeil(u32, param_size, 4) catch unreachable <= 4 - ncrn) { if (param_size <= 4) { result_arg.* = .{ .register = c_abi_int_param_regs[ncrn] }; @@ -6215,7 +6272,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { return self.fail("TODO MCValues split between registers and stack", .{}); } else { ncrn = 4; - if (Type.fromInterned(ty).abiAlignment(mod) == .@"8") + if (Type.fromInterned(ty).abiAlignment(pt) == .@"8") nsaa = std.mem.alignForward(u32, nsaa, 8); result_arg.* = .{ .stack_argument_offset = nsaa }; @@ -6229,10 +6286,10 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { .Unspecified => { if (ret_ty.zigTypeTag(mod) == .NoReturn) { result.return_value = .{ .unreach = {} }; - } else if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod) and !ret_ty.isError(mod)) { + } else if (!ret_ty.hasRuntimeBitsIgnoreComptime(pt) and !ret_ty.isError(mod)) { result.return_value = .{ .none = {} }; } else { - const ret_ty_size: u32 = @intCast(ret_ty.abiSize(mod)); + const ret_ty_size: u32 = @intCast(ret_ty.abiSize(pt)); if (ret_ty_size == 0) { assert(ret_ty.isError(mod)); result.return_value = .{ .immediate = 0 }; @@ -6250,9 +6307,9 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { var stack_offset: u32 = 0; for (fn_info.param_types.get(ip), result.args) |ty, *result_arg| { - if (Type.fromInterned(ty).abiSize(mod) > 0) { - const param_size: u32 = @intCast(Type.fromInterned(ty).abiSize(mod)); - const param_alignment = Type.fromInterned(ty).abiAlignment(mod); + if (Type.fromInterned(ty).abiSize(pt) > 0) { + const param_size: u32 = @intCast(Type.fromInterned(ty).abiSize(pt)); + const param_alignment = Type.fromInterned(ty).abiAlignment(pt); stack_offset = @intCast(param_alignment.forward(stack_offset)); result_arg.* = .{ .stack_argument_offset = stack_offset }; @@ -6271,7 +6328,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { return result; } -/// TODO support scope overrides. Also note this logic is duplicated with `Module.wantSafety`. +/// TODO support scope overrides. Also note this logic is duplicated with `Zcu.wantSafety`. fn wantSafety(self: *Self) bool { return switch (self.bin_file.comp.root_mod.optimize_mode) { .Debug => true, @@ -6305,11 +6362,9 @@ fn parseRegName(name: []const u8) ?Register { } fn typeOf(self: *Self, inst: Air.Inst.Ref) Type { - const mod = self.bin_file.comp.module.?; - return self.air.typeOf(inst, &mod.intern_pool); + return self.air.typeOf(inst, &self.pt.zcu.intern_pool); } fn typeOfIndex(self: *Self, inst: Air.Inst.Index) Type { - const mod = self.bin_file.comp.module.?; - return self.air.typeOfIndex(inst, &mod.intern_pool); + return self.air.typeOfIndex(inst, &self.pt.zcu.intern_pool); } diff --git a/src/arch/arm/Emit.zig b/src/arch/arm/Emit.zig index da19760d8bc8..dd68f2397da5 100644 --- a/src/arch/arm/Emit.zig +++ b/src/arch/arm/Emit.zig @@ -9,10 +9,8 @@ const Mir = @import("Mir.zig"); const bits = @import("bits.zig"); const link = @import("../../link.zig"); const Zcu = @import("../../Zcu.zig"); -/// Deprecated. -const Module = Zcu; const Type = @import("../../Type.zig"); -const ErrorMsg = Module.ErrorMsg; +const ErrorMsg = Zcu.ErrorMsg; const Target = std.Target; const assert = std.debug.assert; const Instruction = bits.Instruction; @@ -26,7 +24,7 @@ bin_file: *link.File, debug_output: DebugInfoOutput, target: *const std.Target, err_msg: ?*ErrorMsg = null, -src_loc: Module.LazySrcLoc, +src_loc: Zcu.LazySrcLoc, code: *std.ArrayList(u8), prev_di_line: u32, diff --git a/src/arch/arm/abi.zig b/src/arch/arm/abi.zig index f88218bc57dc..da474c126137 100644 --- a/src/arch/arm/abi.zig +++ b/src/arch/arm/abi.zig @@ -5,8 +5,6 @@ const Register = bits.Register; const RegisterManagerFn = @import("../../register_manager.zig").RegisterManager; const Type = @import("../../Type.zig"); const Zcu = @import("../../Zcu.zig"); -/// Deprecated. -const Module = Zcu; pub const Class = union(enum) { memory, @@ -26,29 +24,29 @@ pub const Class = union(enum) { pub const Context = enum { ret, arg }; -pub fn classifyType(ty: Type, mod: *Module, ctx: Context) Class { - assert(ty.hasRuntimeBitsIgnoreComptime(mod)); +pub fn classifyType(ty: Type, pt: Zcu.PerThread, ctx: Context) Class { + assert(ty.hasRuntimeBitsIgnoreComptime(pt)); var maybe_float_bits: ?u16 = null; const max_byval_size = 512; - const ip = &mod.intern_pool; - switch (ty.zigTypeTag(mod)) { + const ip = &pt.zcu.intern_pool; + switch (ty.zigTypeTag(pt.zcu)) { .Struct => { - const bit_size = ty.bitSize(mod); - if (ty.containerLayout(mod) == .@"packed") { + const bit_size = ty.bitSize(pt); + if (ty.containerLayout(pt.zcu) == .@"packed") { if (bit_size > 64) return .memory; return .byval; } if (bit_size > max_byval_size) return .memory; - const float_count = countFloats(ty, mod, &maybe_float_bits); + const float_count = countFloats(ty, pt.zcu, &maybe_float_bits); if (float_count <= byval_float_count) return .byval; - const fields = ty.structFieldCount(mod); + const fields = ty.structFieldCount(pt.zcu); var i: u32 = 0; while (i < fields) : (i += 1) { - const field_ty = ty.structFieldType(i, mod); - const field_alignment = ty.structFieldAlign(i, mod); - const field_size = field_ty.bitSize(mod); + const field_ty = ty.structFieldType(i, pt.zcu); + const field_alignment = ty.structFieldAlign(i, pt); + const field_size = field_ty.bitSize(pt); if (field_size > 32 or field_alignment.compare(.gt, .@"32")) { return Class.arrSize(bit_size, 64); } @@ -56,19 +54,19 @@ pub fn classifyType(ty: Type, mod: *Module, ctx: Context) Class { return Class.arrSize(bit_size, 32); }, .Union => { - const bit_size = ty.bitSize(mod); - const union_obj = mod.typeToUnion(ty).?; + const bit_size = ty.bitSize(pt); + const union_obj = pt.zcu.typeToUnion(ty).?; if (union_obj.getLayout(ip) == .@"packed") { if (bit_size > 64) return .memory; return .byval; } if (bit_size > max_byval_size) return .memory; - const float_count = countFloats(ty, mod, &maybe_float_bits); + const float_count = countFloats(ty, pt.zcu, &maybe_float_bits); if (float_count <= byval_float_count) return .byval; for (union_obj.field_types.get(ip), 0..) |field_ty, field_index| { - if (Type.fromInterned(field_ty).bitSize(mod) > 32 or - mod.unionFieldNormalAlignment(union_obj, @intCast(field_index)).compare(.gt, .@"32")) + if (Type.fromInterned(field_ty).bitSize(pt) > 32 or + pt.unionFieldNormalAlignment(union_obj, @intCast(field_index)).compare(.gt, .@"32")) { return Class.arrSize(bit_size, 64); } @@ -79,28 +77,28 @@ pub fn classifyType(ty: Type, mod: *Module, ctx: Context) Class { .Int => { // TODO this is incorrect for _BitInt(128) but implementing // this correctly makes implementing compiler-rt impossible. - // const bit_size = ty.bitSize(mod); + // const bit_size = ty.bitSize(pt); // if (bit_size > 64) return .memory; return .byval; }, .Enum, .ErrorSet => { - const bit_size = ty.bitSize(mod); + const bit_size = ty.bitSize(pt); if (bit_size > 64) return .memory; return .byval; }, .Vector => { - const bit_size = ty.bitSize(mod); + const bit_size = ty.bitSize(pt); // TODO is this controlled by a cpu feature? if (ctx == .ret and bit_size > 128) return .memory; if (bit_size > 512) return .memory; return .byval; }, .Optional => { - assert(ty.isPtrLikeOptional(mod)); + assert(ty.isPtrLikeOptional(pt.zcu)); return .byval; }, .Pointer => { - assert(!ty.isSlice(mod)); + assert(!ty.isSlice(pt.zcu)); return .byval; }, .ErrorUnion, @@ -122,16 +120,16 @@ pub fn classifyType(ty: Type, mod: *Module, ctx: Context) Class { } const byval_float_count = 4; -fn countFloats(ty: Type, mod: *Module, maybe_float_bits: *?u16) u32 { - const ip = &mod.intern_pool; - const target = mod.getTarget(); +fn countFloats(ty: Type, zcu: *Zcu, maybe_float_bits: *?u16) u32 { + const ip = &zcu.intern_pool; + const target = zcu.getTarget(); const invalid = std.math.maxInt(u32); - switch (ty.zigTypeTag(mod)) { + switch (ty.zigTypeTag(zcu)) { .Union => { - const union_obj = mod.typeToUnion(ty).?; + const union_obj = zcu.typeToUnion(ty).?; var max_count: u32 = 0; for (union_obj.field_types.get(ip)) |field_ty| { - const field_count = countFloats(Type.fromInterned(field_ty), mod, maybe_float_bits); + const field_count = countFloats(Type.fromInterned(field_ty), zcu, maybe_float_bits); if (field_count == invalid) return invalid; if (field_count > max_count) max_count = field_count; if (max_count > byval_float_count) return invalid; @@ -139,12 +137,12 @@ fn countFloats(ty: Type, mod: *Module, maybe_float_bits: *?u16) u32 { return max_count; }, .Struct => { - const fields_len = ty.structFieldCount(mod); + const fields_len = ty.structFieldCount(zcu); var count: u32 = 0; var i: u32 = 0; while (i < fields_len) : (i += 1) { - const field_ty = ty.structFieldType(i, mod); - const field_count = countFloats(field_ty, mod, maybe_float_bits); + const field_ty = ty.structFieldType(i, zcu); + const field_count = countFloats(field_ty, zcu, maybe_float_bits); if (field_count == invalid) return invalid; count += field_count; if (count > byval_float_count) return invalid; diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index 2faddc22e8dd..f4f0ff09724b 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -46,6 +46,7 @@ const RegisterLock = RegisterManager.RegisterLock; const InnerError = CodeGenError || error{OutOfRegisters}; gpa: Allocator, +pt: Zcu.PerThread, air: Air, mod: *Package.Module, liveness: Liveness, @@ -541,14 +542,14 @@ const FrameAlloc = struct { .ref_count = 0, }; } - fn initType(ty: Type, zcu: *Zcu) FrameAlloc { + fn initType(ty: Type, pt: Zcu.PerThread) FrameAlloc { return init(.{ - .size = ty.abiSize(zcu), - .alignment = ty.abiAlignment(zcu), + .size = ty.abiSize(pt), + .alignment = ty.abiAlignment(pt), }); } - fn initSpill(ty: Type, zcu: *Zcu) FrameAlloc { - const abi_size = ty.abiSize(zcu); + fn initSpill(ty: Type, pt: Zcu.PerThread) FrameAlloc { + const abi_size = ty.abiSize(pt); const spill_size = if (abi_size < 8) math.ceilPowerOfTwoAssert(u64, abi_size) else @@ -556,7 +557,7 @@ const FrameAlloc = struct { return init(.{ .size = spill_size, .pad = @intCast(spill_size - abi_size), - .alignment = ty.abiAlignment(zcu).maxStrict( + .alignment = ty.abiAlignment(pt).maxStrict( Alignment.fromNonzeroByteUnits(@min(spill_size, 8)), ), }); @@ -696,6 +697,7 @@ const CallView = enum(u1) { pub fn generate( bin_file: *link.File, + pt: Zcu.PerThread, src_loc: Zcu.LazySrcLoc, func_index: InternPool.Index, air: Air, @@ -703,9 +705,9 @@ pub fn generate( code: *std.ArrayList(u8), debug_output: DebugInfoOutput, ) CodeGenError!Result { - const comp = bin_file.comp; - const gpa = comp.gpa; - const zcu = comp.module.?; + const zcu = pt.zcu; + const comp = zcu.comp; + const gpa = zcu.gpa; const ip = &zcu.intern_pool; const func = zcu.funcInfo(func_index); const fn_owner_decl = zcu.declPtr(func.owner_decl); @@ -726,6 +728,7 @@ pub fn generate( var function = Func{ .gpa = gpa, .air = air, + .pt = pt, .mod = mod, .liveness = liveness, .target = target, @@ -787,11 +790,11 @@ pub fn generate( function.args = call_info.args; function.ret_mcv = call_info.return_value; function.frame_allocs.set(@intFromEnum(FrameIndex.ret_addr), FrameAlloc.init(.{ - .size = Type.usize.abiSize(zcu), - .alignment = Type.usize.abiAlignment(zcu).min(call_info.stack_align), + .size = Type.usize.abiSize(pt), + .alignment = Type.usize.abiAlignment(pt).min(call_info.stack_align), })); function.frame_allocs.set(@intFromEnum(FrameIndex.base_ptr), FrameAlloc.init(.{ - .size = Type.usize.abiSize(zcu), + .size = Type.usize.abiSize(pt), .alignment = Alignment.min( call_info.stack_align, Alignment.fromNonzeroByteUnits(function.target.stackAlignment()), @@ -803,7 +806,7 @@ pub fn generate( })); function.frame_allocs.set(@intFromEnum(FrameIndex.spill_frame), FrameAlloc.init(.{ .size = 0, - .alignment = Type.usize.abiAlignment(zcu), + .alignment = Type.usize.abiAlignment(pt), })); function.gen() catch |err| switch (err) { @@ -821,9 +824,10 @@ pub fn generate( }; defer mir.deinit(gpa); - var emit = Emit{ + var emit: Emit = .{ + .bin_file = bin_file, .lower = .{ - .bin_file = bin_file, + .pt = pt, .allocator = gpa, .mir = mir, .cc = fn_info.cc, @@ -875,10 +879,10 @@ fn formatWipMir( _: std.fmt.FormatOptions, writer: anytype, ) @TypeOf(writer).Error!void { - const comp = data.func.bin_file.comp; - const mod = comp.root_mod; - var lower = Lower{ - .bin_file = data.func.bin_file, + const pt = data.func.pt; + const comp = pt.zcu.comp; + var lower: Lower = .{ + .pt = pt, .allocator = data.func.gpa, .mir = .{ .instructions = data.func.mir_instructions.slice(), @@ -889,7 +893,7 @@ fn formatWipMir( .src_loc = data.func.src_loc, .output_mode = comp.config.output_mode, .link_mode = comp.config.link_mode, - .pic = mod.pic, + .pic = comp.root_mod.pic, }; var first = true; for ((lower.lowerMir(data.inst) catch |err| switch (err) { @@ -933,7 +937,7 @@ fn formatDecl( } fn fmtDecl(func: *Func, decl_index: InternPool.DeclIndex) std.fmt.Formatter(formatDecl) { return .{ .data = .{ - .mod = func.bin_file.comp.module.?, + .mod = func.pt.zcu, .decl_index = decl_index, } }; } @@ -950,7 +954,7 @@ fn formatAir( ) @TypeOf(writer).Error!void { @import("../../print_air.zig").dumpInst( data.inst, - data.func.bin_file.comp.module.?, + data.func.pt, data.func.air, data.func.liveness, ); @@ -1044,8 +1048,9 @@ const required_features = [_]Target.riscv.Feature{ }; fn gen(func: *Func) !void { - const mod = func.bin_file.comp.module.?; - const fn_info = mod.typeToFunc(func.fn_type).?; + const pt = func.pt; + const zcu = pt.zcu; + const fn_info = zcu.typeToFunc(func.fn_type).?; inline for (required_features) |feature| { if (!func.hasFeature(feature)) { @@ -1071,7 +1076,7 @@ fn gen(func: *Func) !void { // The address where to store the return value for the caller is in a // register which the callee is free to clobber. Therefore, we purposely // spill it to stack immediately. - const frame_index = try func.allocFrameIndex(FrameAlloc.initSpill(Type.usize, mod)); + const frame_index = try func.allocFrameIndex(FrameAlloc.initSpill(Type.usize, pt)); try func.genSetMem( .{ .frame = frame_index }, 0, @@ -1205,7 +1210,8 @@ fn gen(func: *Func) !void { } fn genBody(func: *Func, body: []const Air.Inst.Index) InnerError!void { - const zcu = func.bin_file.comp.module.?; + const pt = func.pt; + const zcu = pt.zcu; const ip = &zcu.intern_pool; const air_tags = func.air.instructions.items(.tag); @@ -1672,44 +1678,46 @@ fn ensureProcessDeathCapacity(func: *Func, additional_count: usize) !void { } fn memSize(func: *Func, ty: Type) Memory.Size { - const mod = func.bin_file.comp.module.?; - return switch (ty.zigTypeTag(mod)) { + const pt = func.pt; + const zcu = pt.zcu; + return switch (ty.zigTypeTag(zcu)) { .Float => Memory.Size.fromBitSize(ty.floatBits(func.target.*)), - else => Memory.Size.fromByteSize(ty.abiSize(mod)), + else => Memory.Size.fromByteSize(ty.abiSize(pt)), }; } fn splitType(func: *Func, ty: Type) ![2]Type { - const zcu = func.bin_file.comp.module.?; - const classes = mem.sliceTo(&abi.classifySystem(ty, zcu), .none); + const pt = func.pt; + const classes = mem.sliceTo(&abi.classifySystem(ty, pt), .none); var parts: [2]Type = undefined; if (classes.len == 2) for (&parts, classes, 0..) |*part, class, part_i| { part.* = switch (class) { .integer => switch (part_i) { 0 => Type.u64, 1 => part: { - const elem_size = ty.abiAlignment(zcu).minStrict(.@"8").toByteUnits().?; - const elem_ty = try zcu.intType(.unsigned, @intCast(elem_size * 8)); - break :part switch (@divExact(ty.abiSize(zcu) - 8, elem_size)) { + const elem_size = ty.abiAlignment(pt).minStrict(.@"8").toByteUnits().?; + const elem_ty = try pt.intType(.unsigned, @intCast(elem_size * 8)); + break :part switch (@divExact(ty.abiSize(pt) - 8, elem_size)) { 1 => elem_ty, - else => |len| try zcu.arrayType(.{ .len = len, .child = elem_ty.toIntern() }), + else => |len| try pt.arrayType(.{ .len = len, .child = elem_ty.toIntern() }), }; }, else => unreachable, }, else => return func.fail("TODO: splitType class {}", .{class}), }; - } else if (parts[0].abiSize(zcu) + parts[1].abiSize(zcu) == ty.abiSize(zcu)) return parts; - return func.fail("TODO implement splitType for {}", .{ty.fmt(zcu)}); + } else if (parts[0].abiSize(pt) + parts[1].abiSize(pt) == ty.abiSize(pt)) return parts; + return func.fail("TODO implement splitType for {}", .{ty.fmt(pt)}); } /// Truncates the value in the register in place. /// Clobbers any remaining bits. fn truncateRegister(func: *Func, ty: Type, reg: Register) !void { - const mod = func.bin_file.comp.module.?; - const int_info = if (ty.isAbiInt(mod)) ty.intInfo(mod) else std.builtin.Type.Int{ + const pt = func.pt; + const zcu = pt.zcu; + const int_info = if (ty.isAbiInt(zcu)) ty.intInfo(zcu) else std.builtin.Type.Int{ .signedness = .unsigned, - .bits = @intCast(ty.bitSize(mod)), + .bits = @intCast(ty.bitSize(pt)), }; const shift = math.cast(u6, 64 - int_info.bits % 64) orelse return; switch (int_info.signedness) { @@ -1780,7 +1788,8 @@ fn truncateRegister(func: *Func, ty: Type, reg: Register) !void { } fn symbolIndex(func: *Func) !u32 { - const zcu = func.bin_file.comp.module.?; + const pt = func.pt; + const zcu = pt.zcu; const decl_index = zcu.funcOwnerDeclIndex(func.func_index); return switch (func.bin_file.tag) { .elf => blk: { @@ -1817,19 +1826,21 @@ fn allocFrameIndex(func: *Func, alloc: FrameAlloc) !FrameIndex { /// Use a pointer instruction as the basis for allocating stack memory. fn allocMemPtr(func: *Func, inst: Air.Inst.Index) !FrameIndex { - const zcu = func.bin_file.comp.module.?; + const pt = func.pt; + const zcu = pt.zcu; const ptr_ty = func.typeOfIndex(inst); const val_ty = ptr_ty.childType(zcu); return func.allocFrameIndex(FrameAlloc.init(.{ - .size = math.cast(u32, val_ty.abiSize(zcu)) orelse { - return func.fail("type '{}' too big to fit into stack frame", .{val_ty.fmt(zcu)}); + .size = math.cast(u32, val_ty.abiSize(pt)) orelse { + return func.fail("type '{}' too big to fit into stack frame", .{val_ty.fmt(pt)}); }, - .alignment = ptr_ty.ptrAlignment(zcu).max(.@"1"), + .alignment = ptr_ty.ptrAlignment(pt).max(.@"1"), })); } fn typeRegClass(func: *Func, ty: Type) abi.RegisterClass { - const zcu = func.bin_file.comp.module.?; + const pt = func.pt; + const zcu = pt.zcu; return switch (ty.zigTypeTag(zcu)) { .Float => .float, .Vector => @panic("TODO: typeRegClass for Vectors"), @@ -1838,7 +1849,8 @@ fn typeRegClass(func: *Func, ty: Type) abi.RegisterClass { } fn regGeneralClassForType(func: *Func, ty: Type) RegisterManager.RegisterBitSet { - const zcu = func.bin_file.comp.module.?; + const pt = func.pt; + const zcu = pt.zcu; return switch (ty.zigTypeTag(zcu)) { .Float => abi.Registers.Float.general_purpose, .Vector => @panic("TODO: regGeneralClassForType for Vectors"), @@ -1847,7 +1859,8 @@ fn regGeneralClassForType(func: *Func, ty: Type) RegisterManager.RegisterBitSet } fn regTempClassForType(func: *Func, ty: Type) RegisterManager.RegisterBitSet { - const zcu = func.bin_file.comp.module.?; + const pt = func.pt; + const zcu = pt.zcu; return switch (ty.zigTypeTag(zcu)) { .Float => abi.Registers.Float.temporary, .Vector => @panic("TODO: regTempClassForType for Vectors"), @@ -1856,13 +1869,13 @@ fn regTempClassForType(func: *Func, ty: Type) RegisterManager.RegisterBitSet { } fn allocRegOrMem(func: *Func, elem_ty: Type, inst: ?Air.Inst.Index, reg_ok: bool) !MCValue { - const zcu = func.bin_file.comp.module.?; + const pt = func.pt; - const abi_size = math.cast(u32, elem_ty.abiSize(zcu)) orelse { - return func.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(zcu)}); + const abi_size = math.cast(u32, elem_ty.abiSize(pt)) orelse { + return func.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(pt)}); }; - const min_size: u32 = switch (elem_ty.zigTypeTag(zcu)) { + const min_size: u32 = switch (elem_ty.zigTypeTag(pt.zcu)) { .Float => 4, .Vector => @panic("allocRegOrMem Vector"), else => 8, @@ -1874,7 +1887,7 @@ fn allocRegOrMem(func: *Func, elem_ty: Type, inst: ?Air.Inst.Index, reg_ok: bool } } - const frame_index = try func.allocFrameIndex(FrameAlloc.initSpill(elem_ty, zcu)); + const frame_index = try func.allocFrameIndex(FrameAlloc.initSpill(elem_ty, pt)); return .{ .load_frame = .{ .index = frame_index } }; } @@ -1955,7 +1968,7 @@ pub fn spillInstruction(func: *Func, reg: Register, inst: Air.Inst.Index) !void /// allocated. A second call to `copyToTmpRegister` may return the same register. /// This can have a side effect of spilling instructions to the stack to free up a register. fn copyToTmpRegister(func: *Func, ty: Type, mcv: MCValue) !Register { - log.debug("copyToTmpRegister ty: {}", .{ty.fmt(func.bin_file.comp.module.?)}); + log.debug("copyToTmpRegister ty: {}", .{ty.fmt(func.pt)}); const reg = try func.register_manager.allocReg(null, func.regTempClassForType(ty)); try func.genSetReg(ty, reg, mcv); return reg; @@ -2004,7 +2017,8 @@ fn airFpext(func: *Func, inst: Air.Inst.Index) !void { } fn airIntCast(func: *Func, inst: Air.Inst.Index) !void { - const zcu = func.bin_file.comp.module.?; + const pt = func.pt; + const zcu = pt.zcu; const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const src_ty = func.typeOf(ty_op.operand); const dst_ty = func.typeOfIndex(inst); @@ -2040,7 +2054,7 @@ fn airIntCast(func: *Func, inst: Air.Inst.Index) !void { break :result dst_mcv; } orelse return func.fail("TODO: implement airIntCast from {} to {}", .{ - src_ty.fmt(zcu), dst_ty.fmt(zcu), + src_ty.fmt(pt), dst_ty.fmt(pt), }); return func.finishAir(inst, result, .{ ty_op.operand, .none, .none }); @@ -2067,7 +2081,8 @@ fn airIntFromBool(func: *Func, inst: Air.Inst.Index) !void { fn airNot(func: *Func, inst: Air.Inst.Index) !void { const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else result: { - const zcu = func.bin_file.comp.module.?; + const pt = func.pt; + const zcu = pt.zcu; const operand = try func.resolveInst(ty_op.operand); const ty = func.typeOf(ty_op.operand); @@ -2106,12 +2121,12 @@ fn airNot(func: *Func, inst: Air.Inst.Index) !void { } fn airSlice(func: *Func, inst: Air.Inst.Index) !void { - const zcu = func.bin_file.comp.module.?; + const pt = func.pt; const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const bin_op = func.air.extraData(Air.Bin, ty_pl.payload).data; const slice_ty = func.typeOfIndex(inst); - const frame_index = try func.allocFrameIndex(FrameAlloc.initSpill(slice_ty, zcu)); + const frame_index = try func.allocFrameIndex(FrameAlloc.initSpill(slice_ty, pt)); const ptr_ty = func.typeOf(bin_op.lhs); try func.genSetMem(.{ .frame = frame_index }, 0, ptr_ty, .{ .air_ref = bin_op.lhs }); @@ -2119,7 +2134,7 @@ fn airSlice(func: *Func, inst: Air.Inst.Index) !void { const len_ty = func.typeOf(bin_op.rhs); try func.genSetMem( .{ .frame = frame_index }, - @intCast(ptr_ty.abiSize(zcu)), + @intCast(ptr_ty.abiSize(pt)), len_ty, .{ .air_ref = bin_op.rhs }, ); @@ -2129,14 +2144,15 @@ fn airSlice(func: *Func, inst: Air.Inst.Index) !void { } fn airBinOp(func: *Func, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void { - const zcu = func.bin_file.comp.module.?; + const pt = func.pt; + const zcu = pt.zcu; const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const dst_mcv = try func.binOp(inst, tag, bin_op.lhs, bin_op.rhs); const dst_ty = func.typeOfIndex(inst); if (dst_ty.isAbiInt(zcu)) { - const abi_size: u32 = @intCast(dst_ty.abiSize(zcu)); - const bit_size: u32 = @intCast(dst_ty.bitSize(zcu)); + const abi_size: u32 = @intCast(dst_ty.abiSize(pt)); + const bit_size: u32 = @intCast(dst_ty.bitSize(pt)); if (abi_size * 8 > bit_size) { const dst_lock = switch (dst_mcv) { .register => |dst_reg| func.register_manager.lockRegAssumeUnused(dst_reg), @@ -2150,7 +2166,7 @@ fn airBinOp(func: *Func, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void { const tmp_reg, const tmp_lock = try func.allocReg(.int); defer func.register_manager.unlockReg(tmp_lock); - const hi_ty = try zcu.intType(.unsigned, @intCast((dst_ty.bitSize(zcu) - 1) % 64 + 1)); + const hi_ty = try pt.intType(.unsigned, @intCast((dst_ty.bitSize(pt) - 1) % 64 + 1)); const hi_mcv = dst_mcv.address().offset(@intCast(bit_size / 64 * 8)).deref(); try func.genSetReg(hi_ty, tmp_reg, hi_mcv); try func.truncateRegister(dst_ty, tmp_reg); @@ -2170,7 +2186,7 @@ fn binOp( rhs_air: Air.Inst.Ref, ) !MCValue { _ = maybe_inst; - const zcu = func.bin_file.comp.module.?; + const pt = func.pt; const lhs_ty = func.typeOf(lhs_air); const rhs_ty = func.typeOf(rhs_air); @@ -2189,7 +2205,7 @@ fn binOp( return func.fail("binOp libcall runtime-float ops", .{}); } - if (lhs_ty.bitSize(zcu) > 64) return func.fail("TODO: binOp >= 64 bits", .{}); + if (lhs_ty.bitSize(pt) > 64) return func.fail("TODO: binOp >= 64 bits", .{}); const lhs_mcv = try func.resolveInst(lhs_air); const rhs_mcv = try func.resolveInst(rhs_air); @@ -2237,8 +2253,9 @@ fn genBinOp( rhs_ty: Type, dst_reg: Register, ) !void { - const zcu = func.bin_file.comp.module.?; - const bit_size = lhs_ty.bitSize(zcu); + const pt = func.pt; + const zcu = pt.zcu; + const bit_size = lhs_ty.bitSize(pt); assert(bit_size <= 64); const is_unsigned = lhs_ty.isUnsignedInt(zcu); @@ -2349,7 +2366,7 @@ fn genBinOp( defer func.register_manager.unlockReg(tmp_lock); // RISC-V has no immediate mul, so we copy the size to a temporary register - const elem_size = lhs_ty.elemType2(zcu).abiSize(zcu); + const elem_size = lhs_ty.elemType2(zcu).abiSize(pt); const elem_size_reg = try func.copyToTmpRegister(Type.usize, .{ .immediate = elem_size }); try func.genBinOp( @@ -2613,7 +2630,8 @@ fn airPtrArithmetic(func: *Func, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void } fn airAddWithOverflow(func: *Func, inst: Air.Inst.Index) !void { - const zcu = func.bin_file.comp.module.?; + const pt = func.pt; + const zcu = pt.zcu; const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = func.air.extraData(Air.Bin, ty_pl.payload).data; @@ -2632,7 +2650,7 @@ fn airAddWithOverflow(func: *Func, inst: Air.Inst.Index) !void { const add_result_reg_lock = func.register_manager.lockRegAssumeUnused(add_result_reg); defer func.register_manager.unlockReg(add_result_reg_lock); - const shift_amount: u6 = @intCast(Type.usize.bitSize(zcu) - int_info.bits); + const shift_amount: u6 = @intCast(Type.usize.bitSize(pt) - int_info.bits); const shift_reg, const shift_lock = try func.allocReg(.int); defer func.register_manager.unlockReg(shift_lock); @@ -2663,7 +2681,7 @@ fn airAddWithOverflow(func: *Func, inst: Air.Inst.Index) !void { try func.genSetMem( .{ .frame = offset.index }, - offset.off + @as(i32, @intCast(tuple_ty.structFieldOffset(0, zcu))), + offset.off + @as(i32, @intCast(tuple_ty.structFieldOffset(0, pt))), lhs_ty, add_result, ); @@ -2682,7 +2700,7 @@ fn airAddWithOverflow(func: *Func, inst: Air.Inst.Index) !void { try func.genSetMem( .{ .frame = offset.index }, - offset.off + @as(i32, @intCast(tuple_ty.structFieldOffset(1, zcu))), + offset.off + @as(i32, @intCast(tuple_ty.structFieldOffset(1, pt))), Type.u1, .{ .register = overflow_reg }, ); @@ -2697,7 +2715,8 @@ fn airAddWithOverflow(func: *Func, inst: Air.Inst.Index) !void { } fn airSubWithOverflow(func: *Func, inst: Air.Inst.Index) !void { - const zcu = func.bin_file.comp.module.?; + const pt = func.pt; + const zcu = pt.zcu; const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = func.air.extraData(Air.Bin, ty_pl.payload).data; @@ -2727,7 +2746,7 @@ fn airSubWithOverflow(func: *Func, inst: Air.Inst.Index) !void { try func.genSetMem( .{ .frame = offset.index }, - offset.off + @as(i32, @intCast(tuple_ty.structFieldOffset(0, zcu))), + offset.off + @as(i32, @intCast(tuple_ty.structFieldOffset(0, pt))), lhs_ty, .{ .register = dest_reg }, ); @@ -2757,7 +2776,7 @@ fn airSubWithOverflow(func: *Func, inst: Air.Inst.Index) !void { try func.genSetMem( .{ .frame = offset.index }, - offset.off + @as(i32, @intCast(tuple_ty.structFieldOffset(1, zcu))), + offset.off + @as(i32, @intCast(tuple_ty.structFieldOffset(1, pt))), Type.u1, .{ .register = overflow_reg }, ); @@ -2808,7 +2827,7 @@ fn airSubWithOverflow(func: *Func, inst: Air.Inst.Index) !void { try func.genSetMem( .{ .frame = offset.index }, - offset.off + @as(i32, @intCast(tuple_ty.structFieldOffset(1, zcu))), + offset.off + @as(i32, @intCast(tuple_ty.structFieldOffset(1, pt))), Type.u1, .{ .register = overflow_reg }, ); @@ -2825,7 +2844,8 @@ fn airSubWithOverflow(func: *Func, inst: Air.Inst.Index) !void { } fn airMulWithOverflow(func: *Func, inst: Air.Inst.Index) !void { - const zcu = func.bin_file.comp.module.?; + const pt = func.pt; + const zcu = pt.zcu; const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = func.air.extraData(Air.Bin, ty_pl.payload).data; @@ -2840,8 +2860,8 @@ fn airMulWithOverflow(func: *Func, inst: Air.Inst.Index) !void { // genSetReg needs to support register_offset src_mcv for this to be true. const result_mcv = try func.allocRegOrMem(tuple_ty, inst, false); - const result_off: i32 = @intCast(tuple_ty.structFieldOffset(0, zcu)); - const overflow_off: i32 = @intCast(tuple_ty.structFieldOffset(1, zcu)); + const result_off: i32 = @intCast(tuple_ty.structFieldOffset(0, pt)); + const overflow_off: i32 = @intCast(tuple_ty.structFieldOffset(1, pt)); const dest_reg, const dest_lock = try func.allocReg(.int); defer func.register_manager.unlockReg(dest_lock); @@ -2957,11 +2977,11 @@ fn airShlSat(func: *Func, inst: Air.Inst.Index) !void { } fn airOptionalPayload(func: *Func, inst: Air.Inst.Index) !void { - const zcu = func.bin_file.comp.module.?; + const pt = func.pt; const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const result: MCValue = result: { const pl_ty = func.typeOfIndex(inst); - if (!pl_ty.hasRuntimeBitsIgnoreComptime(zcu)) break :result .none; + if (!pl_ty.hasRuntimeBitsIgnoreComptime(pt)) break :result .none; const opt_mcv = try func.resolveInst(ty_op.operand); if (func.reuseOperand(inst, ty_op.operand, 0, opt_mcv)) { @@ -2993,7 +3013,8 @@ fn airOptionalPayloadPtrSet(func: *Func, inst: Air.Inst.Index) !void { fn airUnwrapErrErr(func: *Func, inst: Air.Inst.Index) !void { const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const zcu = func.bin_file.comp.module.?; + const pt = func.pt; + const zcu = pt.zcu; const err_union_ty = func.typeOf(ty_op.operand); const err_ty = err_union_ty.errorUnionSet(zcu); const payload_ty = err_union_ty.errorUnionPayload(zcu); @@ -3004,11 +3025,11 @@ fn airUnwrapErrErr(func: *Func, inst: Air.Inst.Index) !void { break :result .{ .immediate = 0 }; } - if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) { break :result operand; } - const err_off: u32 = @intCast(errUnionErrorOffset(payload_ty, zcu)); + const err_off: u32 = @intCast(errUnionErrorOffset(payload_ty, pt)); switch (operand) { .register => |reg| { @@ -3052,13 +3073,14 @@ fn genUnwrapErrUnionPayloadMir( err_union_ty: Type, err_union: MCValue, ) !MCValue { - const zcu = func.bin_file.comp.module.?; + const pt = func.pt; + const zcu = pt.zcu; const payload_ty = err_union_ty.errorUnionPayload(zcu); const result: MCValue = result: { - if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) break :result .none; + if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) break :result .none; - const payload_off: u31 = @intCast(errUnionPayloadOffset(payload_ty, zcu)); + const payload_off: u31 = @intCast(errUnionPayloadOffset(payload_ty, pt)); switch (err_union) { .load_frame => |frame_addr| break :result .{ .load_frame = .{ .index = frame_addr.index, @@ -3127,11 +3149,12 @@ fn airSaveErrReturnTraceIndex(func: *Func, inst: Air.Inst.Index) !void { } fn airWrapOptional(func: *Func, inst: Air.Inst.Index) !void { - const zcu = func.bin_file.comp.module.?; + const pt = func.pt; + const zcu = pt.zcu; const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const result: MCValue = result: { const pl_ty = func.typeOf(ty_op.operand); - if (!pl_ty.hasRuntimeBits(zcu)) break :result .{ .immediate = 1 }; + if (!pl_ty.hasRuntimeBits(pt)) break :result .{ .immediate = 1 }; const opt_ty = func.typeOfIndex(inst); const pl_mcv = try func.resolveInst(ty_op.operand); @@ -3148,7 +3171,7 @@ fn airWrapOptional(func: *Func, inst: Air.Inst.Index) !void { try func.genCopy(pl_ty, opt_mcv, pl_mcv); if (!same_repr) { - const pl_abi_size: i32 = @intCast(pl_ty.abiSize(zcu)); + const pl_abi_size: i32 = @intCast(pl_ty.abiSize(pt)); switch (opt_mcv) { .load_frame => |frame_addr| try func.genSetMem( .{ .frame = frame_addr.index }, @@ -3167,7 +3190,8 @@ fn airWrapOptional(func: *Func, inst: Air.Inst.Index) !void { /// T to E!T fn airWrapErrUnionPayload(func: *Func, inst: Air.Inst.Index) !void { - const zcu = func.bin_file.comp.module.?; + const pt = func.pt; + const zcu = pt.zcu; const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const eu_ty = ty_op.ty.toType(); @@ -3176,11 +3200,11 @@ fn airWrapErrUnionPayload(func: *Func, inst: Air.Inst.Index) !void { const operand = try func.resolveInst(ty_op.operand); const result: MCValue = result: { - if (!pl_ty.hasRuntimeBitsIgnoreComptime(zcu)) break :result .{ .immediate = 0 }; + if (!pl_ty.hasRuntimeBitsIgnoreComptime(pt)) break :result .{ .immediate = 0 }; - const frame_index = try func.allocFrameIndex(FrameAlloc.initSpill(eu_ty, zcu)); - const pl_off: i32 = @intCast(errUnionPayloadOffset(pl_ty, zcu)); - const err_off: i32 = @intCast(errUnionErrorOffset(pl_ty, zcu)); + const frame_index = try func.allocFrameIndex(FrameAlloc.initSpill(eu_ty, pt)); + const pl_off: i32 = @intCast(errUnionPayloadOffset(pl_ty, pt)); + const err_off: i32 = @intCast(errUnionErrorOffset(pl_ty, pt)); try func.genSetMem(.{ .frame = frame_index }, pl_off, pl_ty, operand); try func.genSetMem(.{ .frame = frame_index }, err_off, err_ty, .{ .immediate = 0 }); break :result .{ .load_frame = .{ .index = frame_index } }; @@ -3191,7 +3215,8 @@ fn airWrapErrUnionPayload(func: *Func, inst: Air.Inst.Index) !void { /// E to E!T fn airWrapErrUnionErr(func: *Func, inst: Air.Inst.Index) !void { - const zcu = func.bin_file.comp.module.?; + const pt = func.pt; + const zcu = pt.zcu; const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const eu_ty = ty_op.ty.toType(); @@ -3199,11 +3224,11 @@ fn airWrapErrUnionErr(func: *Func, inst: Air.Inst.Index) !void { const err_ty = eu_ty.errorUnionSet(zcu); const result: MCValue = result: { - if (!pl_ty.hasRuntimeBitsIgnoreComptime(zcu)) break :result try func.resolveInst(ty_op.operand); + if (!pl_ty.hasRuntimeBitsIgnoreComptime(pt)) break :result try func.resolveInst(ty_op.operand); - const frame_index = try func.allocFrameIndex(FrameAlloc.initSpill(eu_ty, zcu)); - const pl_off: i32 = @intCast(errUnionPayloadOffset(pl_ty, zcu)); - const err_off: i32 = @intCast(errUnionErrorOffset(pl_ty, zcu)); + const frame_index = try func.allocFrameIndex(FrameAlloc.initSpill(eu_ty, pt)); + const pl_off: i32 = @intCast(errUnionPayloadOffset(pl_ty, pt)); + const err_off: i32 = @intCast(errUnionErrorOffset(pl_ty, pt)); try func.genSetMem(.{ .frame = frame_index }, pl_off, pl_ty, .undef); const operand = try func.resolveInst(ty_op.operand); try func.genSetMem(.{ .frame = frame_index }, err_off, err_ty, operand); @@ -3327,15 +3352,16 @@ fn airPtrSlicePtrPtr(func: *Func, inst: Air.Inst.Index) !void { } fn airSliceElemVal(func: *Func, inst: Air.Inst.Index) !void { - const mod = func.bin_file.comp.module.?; + const pt = func.pt; + const zcu = pt.zcu; const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const result: MCValue = result: { const elem_ty = func.typeOfIndex(inst); - if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result .none; + if (!elem_ty.hasRuntimeBitsIgnoreComptime(pt)) break :result .none; const slice_ty = func.typeOf(bin_op.lhs); - const slice_ptr_field_type = slice_ty.slicePtrFieldType(mod); + const slice_ptr_field_type = slice_ty.slicePtrFieldType(zcu); const elem_ptr = try func.genSliceElemPtr(bin_op.lhs, bin_op.rhs); const dst_mcv = try func.allocRegOrMem(elem_ty, inst, false); try func.load(dst_mcv, elem_ptr, slice_ptr_field_type); @@ -3352,7 +3378,8 @@ fn airSliceElemPtr(func: *Func, inst: Air.Inst.Index) !void { } fn genSliceElemPtr(func: *Func, lhs: Air.Inst.Ref, rhs: Air.Inst.Ref) !MCValue { - const zcu = func.bin_file.comp.module.?; + const pt = func.pt; + const zcu = pt.zcu; const slice_ty = func.typeOf(lhs); const slice_mcv = try func.resolveInst(lhs); const slice_mcv_lock: ?RegisterLock = switch (slice_mcv) { @@ -3362,7 +3389,7 @@ fn genSliceElemPtr(func: *Func, lhs: Air.Inst.Ref, rhs: Air.Inst.Ref) !MCValue { defer if (slice_mcv_lock) |lock| func.register_manager.unlockReg(lock); const elem_ty = slice_ty.childType(zcu); - const elem_size = elem_ty.abiSize(zcu); + const elem_size = elem_ty.abiSize(pt); const index_ty = func.typeOf(rhs); const index_mcv = try func.resolveInst(rhs); @@ -3394,7 +3421,8 @@ fn genSliceElemPtr(func: *Func, lhs: Air.Inst.Ref, rhs: Air.Inst.Ref) !MCValue { } fn airArrayElemVal(func: *Func, inst: Air.Inst.Index) !void { - const zcu = func.bin_file.comp.module.?; + const pt = func.pt; + const zcu = pt.zcu; const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else result: { const result_ty = func.typeOfIndex(inst); @@ -3406,14 +3434,14 @@ fn airArrayElemVal(func: *Func, inst: Air.Inst.Index) !void { const index_ty = func.typeOf(bin_op.rhs); const elem_ty = array_ty.childType(zcu); - const elem_abi_size = elem_ty.abiSize(zcu); + const elem_abi_size = elem_ty.abiSize(pt); const addr_reg, const addr_reg_lock = try func.allocReg(.int); defer func.register_manager.unlockReg(addr_reg_lock); switch (array_mcv) { .register => { - const frame_index = try func.allocFrameIndex(FrameAlloc.initType(array_ty, zcu)); + const frame_index = try func.allocFrameIndex(FrameAlloc.initType(array_ty, pt)); try func.genSetMem(.{ .frame = frame_index }, 0, array_ty, array_mcv); try func.genSetReg(Type.usize, addr_reg, .{ .lea_frame = .{ .index = frame_index } }); }, @@ -3451,7 +3479,8 @@ fn airPtrElemVal(func: *Func, inst: Air.Inst.Index) !void { } fn airPtrElemPtr(func: *Func, inst: Air.Inst.Index) !void { - const zcu = func.bin_file.comp.module.?; + const pt = func.pt; + const zcu = pt.zcu; const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = func.air.extraData(Air.Bin, ty_pl.payload).data; @@ -3474,7 +3503,7 @@ fn airPtrElemPtr(func: *Func, inst: Air.Inst.Index) !void { } const elem_ty = base_ptr_ty.elemType2(zcu); - const elem_abi_size = elem_ty.abiSize(zcu); + const elem_abi_size = elem_ty.abiSize(pt); const index_ty = func.typeOf(extra.rhs); const index_mcv = try func.resolveInst(extra.rhs); const index_lock: ?RegisterLock = switch (index_mcv) { @@ -3536,7 +3565,8 @@ fn airPopcount(func: *Func, inst: Air.Inst.Index) !void { } fn airAbs(func: *Func, inst: Air.Inst.Index) !void { - const zcu = func.bin_file.comp.module.?; + const pt = func.pt; + const zcu = pt.zcu; const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else result: { const ty = func.typeOf(ty_op.operand); @@ -3545,7 +3575,7 @@ fn airAbs(func: *Func, inst: Air.Inst.Index) !void { switch (scalar_ty.zigTypeTag(zcu)) { .Int => if (ty.zigTypeTag(zcu) == .Vector) { - return func.fail("TODO implement airAbs for {}", .{ty.fmt(zcu)}); + return func.fail("TODO implement airAbs for {}", .{ty.fmt(pt)}); } else { const return_mcv = try func.copyToNewRegister(inst, operand); const operand_reg = return_mcv.register; @@ -3615,7 +3645,7 @@ fn airAbs(func: *Func, inst: Air.Inst.Index) !void { break :result return_mcv; }, - else => return func.fail("TODO: implement airAbs {}", .{scalar_ty.fmt(zcu)}), + else => return func.fail("TODO: implement airAbs {}", .{scalar_ty.fmt(pt)}), } break :result .unreach; @@ -3626,7 +3656,8 @@ fn airAbs(func: *Func, inst: Air.Inst.Index) !void { fn airByteSwap(func: *Func, inst: Air.Inst.Index) !void { const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else result: { - const zcu = func.bin_file.comp.module.?; + const pt = func.pt; + const zcu = pt.zcu; const ty = func.typeOf(ty_op.operand); const operand = try func.resolveInst(ty_op.operand); @@ -3746,12 +3777,13 @@ fn reuseOperandAdvanced( } fn airLoad(func: *Func, inst: Air.Inst.Index) !void { - const zcu = func.bin_file.comp.module.?; + const pt = func.pt; + const zcu = pt.zcu; const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const elem_ty = func.typeOfIndex(inst); const result: MCValue = result: { - if (!elem_ty.hasRuntimeBits(zcu)) + if (!elem_ty.hasRuntimeBits(pt)) break :result .none; const ptr = try func.resolveInst(ty_op.operand); @@ -3759,7 +3791,7 @@ fn airLoad(func: *Func, inst: Air.Inst.Index) !void { if (func.liveness.isUnused(inst) and !is_volatile) break :result .unreach; - const elem_size = elem_ty.abiSize(zcu); + const elem_size = elem_ty.abiSize(pt); const dst_mcv: MCValue = blk: { // Pointer is 8 bytes, and if the element is more than that, we cannot reuse it. @@ -3778,10 +3810,11 @@ fn airLoad(func: *Func, inst: Air.Inst.Index) !void { } fn load(func: *Func, dst_mcv: MCValue, ptr_mcv: MCValue, ptr_ty: Type) InnerError!void { - const zcu = func.bin_file.comp.module.?; + const pt = func.pt; + const zcu = pt.zcu; const dst_ty = ptr_ty.childType(zcu); - log.debug("loading {}:{} into {}", .{ ptr_mcv, ptr_ty.fmt(zcu), dst_mcv }); + log.debug("loading {}:{} into {}", .{ ptr_mcv, ptr_ty.fmt(pt), dst_mcv }); switch (ptr_mcv) { .none, @@ -3833,9 +3866,7 @@ fn airStore(func: *Func, inst: Air.Inst.Index, safety: bool) !void { /// Loads `value` into the "payload" of `pointer`. fn store(func: *Func, ptr_mcv: MCValue, src_mcv: MCValue, ptr_ty: Type, src_ty: Type) !void { - const zcu = func.bin_file.comp.module.?; - - log.debug("storing {}:{} in {}:{}", .{ src_mcv, src_ty.fmt(zcu), ptr_mcv, ptr_ty.fmt(zcu) }); + log.debug("storing {}:{} in {}:{}", .{ src_mcv, src_ty.fmt(func.pt), ptr_mcv, ptr_ty.fmt(func.pt) }); switch (ptr_mcv) { .none => unreachable, @@ -3881,7 +3912,8 @@ fn airStructFieldPtrIndex(func: *Func, inst: Air.Inst.Index, index: u8) !void { } fn structFieldPtr(func: *Func, inst: Air.Inst.Index, operand: Air.Inst.Ref, index: u32) !MCValue { - const zcu = func.bin_file.comp.module.?; + const pt = func.pt; + const zcu = pt.zcu; const ptr_field_ty = func.typeOfIndex(inst); const ptr_container_ty = func.typeOf(operand); const ptr_container_ty_info = ptr_container_ty.ptrInfo(zcu); @@ -3889,12 +3921,12 @@ fn structFieldPtr(func: *Func, inst: Air.Inst.Index, operand: Air.Inst.Ref, inde const field_offset: i32 = if (zcu.typeToPackedStruct(container_ty)) |struct_obj| if (ptr_field_ty.ptrInfo(zcu).packed_offset.host_size == 0) - @divExact(zcu.structPackedFieldBitOffset(struct_obj, index) + + @divExact(pt.structPackedFieldBitOffset(struct_obj, index) + ptr_container_ty_info.packed_offset.bit_offset, 8) else 0 else - @intCast(container_ty.structFieldOffset(index, zcu)); + @intCast(container_ty.structFieldOffset(index, pt)); const src_mcv = try func.resolveInst(operand); const dst_mcv = if (switch (src_mcv) { @@ -3906,7 +3938,8 @@ fn structFieldPtr(func: *Func, inst: Air.Inst.Index, operand: Air.Inst.Ref, inde } fn airStructFieldVal(func: *Func, inst: Air.Inst.Index) !void { - const mod = func.bin_file.comp.module.?; + const pt = func.pt; + const zcu = pt.zcu; const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = func.air.extraData(Air.StructField, ty_pl.payload).data; @@ -3914,16 +3947,15 @@ fn airStructFieldVal(func: *Func, inst: Air.Inst.Index) !void { const index = extra.field_index; const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else result: { - const zcu = func.bin_file.comp.module.?; const src_mcv = try func.resolveInst(operand); const struct_ty = func.typeOf(operand); const field_ty = struct_ty.structFieldType(index, zcu); - if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) break :result .none; + if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) break :result .none; const field_off: u32 = switch (struct_ty.containerLayout(zcu)) { - .auto, .@"extern" => @intCast(struct_ty.structFieldOffset(index, zcu) * 8), + .auto, .@"extern" => @intCast(struct_ty.structFieldOffset(index, pt) * 8), .@"packed" => if (zcu.typeToStruct(struct_ty)) |struct_type| - zcu.structPackedFieldBitOffset(struct_type, index) + pt.structPackedFieldBitOffset(struct_type, index) else 0, }; @@ -3958,15 +3990,15 @@ fn airStructFieldVal(func: *Func, inst: Air.Inst.Index) !void { break :result if (field_off == 0) dst_mcv else try func.copyToNewRegister(inst, dst_mcv); }, .load_frame => { - const field_abi_size: u32 = @intCast(field_ty.abiSize(mod)); + const field_abi_size: u32 = @intCast(field_ty.abiSize(pt)); if (field_off % 8 == 0) { const field_byte_off = @divExact(field_off, 8); const off_mcv = src_mcv.address().offset(@intCast(field_byte_off)).deref(); - const field_bit_size = field_ty.bitSize(mod); + const field_bit_size = field_ty.bitSize(pt); if (field_abi_size <= 8) { - const int_ty = try mod.intType( - if (field_ty.isAbiInt(mod)) field_ty.intInfo(mod).signedness else .unsigned, + const int_ty = try pt.intType( + if (field_ty.isAbiInt(zcu)) field_ty.intInfo(zcu).signedness else .unsigned, @intCast(field_bit_size), ); @@ -3978,7 +4010,7 @@ fn airStructFieldVal(func: *Func, inst: Air.Inst.Index) !void { break :result try func.copyToNewRegister(inst, dst_mcv); } - const container_abi_size: u32 = @intCast(struct_ty.abiSize(mod)); + const container_abi_size: u32 = @intCast(struct_ty.abiSize(pt)); const dst_mcv = if (field_byte_off + field_abi_size <= container_abi_size and func.reuseOperand(inst, operand, 0, src_mcv)) off_mcv @@ -4014,7 +4046,8 @@ fn airFieldParentPtr(func: *Func, inst: Air.Inst.Index) !void { } fn genArgDbgInfo(func: Func, inst: Air.Inst.Index, mcv: MCValue) !void { - const zcu = func.bin_file.comp.module.?; + const pt = func.pt; + const zcu = pt.zcu; const arg = func.air.instructions.items(.data)[@intFromEnum(inst)].arg; const ty = arg.ty.toType(); const owner_decl = zcu.funcOwnerDeclIndex(func.func_index); @@ -4139,7 +4172,8 @@ fn genCall( arg_tys: []const Type, args: []const MCValue, ) !MCValue { - const zcu = func.bin_file.comp.module.?; + const pt = func.pt; + const zcu = pt.zcu; const fn_ty = switch (info) { .air => |callee| fn_info: { @@ -4150,7 +4184,7 @@ fn genCall( else => unreachable, }; }, - .lib => |lib| try zcu.funcType(.{ + .lib => |lib| try pt.funcType(.{ .param_types = lib.param_types, .return_type = lib.return_type, .cc = .C, @@ -4208,7 +4242,7 @@ fn genCall( try reg_locks.appendSlice(&func.register_manager.lockRegs(2, regs)); }, .indirect => |reg_off| { - frame_index.* = try func.allocFrameIndex(FrameAlloc.initType(arg_ty, zcu)); + frame_index.* = try func.allocFrameIndex(FrameAlloc.initType(arg_ty, pt)); try func.genSetMem(.{ .frame = frame_index.* }, 0, arg_ty, src_arg); try func.register_manager.getReg(reg_off.reg, null); try reg_locks.append(func.register_manager.lockReg(reg_off.reg)); @@ -4221,7 +4255,7 @@ fn genCall( .none, .unreach => {}, .indirect => |reg_off| { const ret_ty = Type.fromInterned(fn_info.return_type); - const frame_index = try func.allocFrameIndex(FrameAlloc.initSpill(ret_ty, zcu)); + const frame_index = try func.allocFrameIndex(FrameAlloc.initSpill(ret_ty, pt)); try func.genSetReg(Type.usize, reg_off.reg, .{ .lea_frame = .{ .index = frame_index, .off = -reg_off.off }, }); @@ -4251,7 +4285,7 @@ fn genCall( // on linking. switch (info) { .air => |callee| { - if (try func.air.value(callee, zcu)) |func_value| { + if (try func.air.value(callee, pt)) |func_value| { const func_key = zcu.intern_pool.indexToKey(func_value.ip_index); switch (switch (func_key) { else => func_key, @@ -4324,7 +4358,8 @@ fn genCall( } fn airRet(func: *Func, inst: Air.Inst.Index, safety: bool) !void { - const zcu = func.bin_file.comp.module.?; + const pt = func.pt; + const zcu = pt.zcu; const un_op = func.air.instructions.items(.data)[@intFromEnum(inst)].un_op; if (safety) { @@ -4394,7 +4429,8 @@ fn airRetLoad(func: *Func, inst: Air.Inst.Index) !void { fn airCmp(func: *Func, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void { const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const zcu = func.bin_file.comp.module.?; + const pt = func.pt; + const zcu = pt.zcu; const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else result: { const lhs_ty = func.typeOf(bin_op.lhs); @@ -4415,7 +4451,7 @@ fn airCmp(func: *Func, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void { .ErrorSet => Type.anyerror, .Optional => blk: { const payload_ty = lhs_ty.optionalChild(zcu); - if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) { break :blk Type.u1; } else if (lhs_ty.isPtrLikeOptional(zcu)) { break :blk Type.usize; @@ -4503,7 +4539,8 @@ fn genVarDbgInfo( mcv: MCValue, name: [:0]const u8, ) !void { - const zcu = func.bin_file.comp.module.?; + const pt = func.pt; + const zcu = pt.zcu; const is_ptr = switch (tag) { .dbg_var_ptr => true, .dbg_var_val => false, @@ -4595,13 +4632,14 @@ fn condBr(func: *Func, cond_ty: Type, condition: MCValue) !Mir.Inst.Index { } fn isNull(func: *Func, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MCValue { - const zcu = func.bin_file.comp.module.?; + const pt = func.pt; + const zcu = pt.zcu; const pl_ty = opt_ty.optionalChild(zcu); const some_info: struct { off: i32, ty: Type } = if (opt_ty.optionalReprIsPayload(zcu)) .{ .off = 0, .ty = if (pl_ty.isSlice(zcu)) pl_ty.slicePtrFieldType(zcu) else pl_ty } else - .{ .off = @intCast(pl_ty.abiSize(zcu)), .ty = Type.bool }; + .{ .off = @intCast(pl_ty.abiSize(pt)), .ty = Type.bool }; const return_mcv = try func.allocRegOrMem(func.typeOfIndex(inst), inst, true); assert(return_mcv == .register); // should not be larger 8 bytes @@ -4642,7 +4680,7 @@ fn isNull(func: *Func, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MC return return_mcv; } assert(some_info.ty.ip_index == .bool_type); - const opt_abi_size: u32 = @intCast(opt_ty.abiSize(zcu)); + const opt_abi_size: u32 = @intCast(opt_ty.abiSize(pt)); _ = opt_abi_size; return func.fail("TODO: isNull some_info.off != 0 register", .{}); }, @@ -4742,7 +4780,8 @@ fn airIsErr(func: *Func, inst: Air.Inst.Index) !void { } fn airIsErrPtr(func: *Func, inst: Air.Inst.Index) !void { - const zcu = func.bin_file.comp.module.?; + const pt = func.pt; + const zcu = pt.zcu; const un_op = func.air.instructions.items(.data)[@intFromEnum(inst)].un_op; const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else result: { const operand_ptr = try func.resolveInst(un_op); @@ -4768,10 +4807,11 @@ fn airIsErrPtr(func: *Func, inst: Air.Inst.Index) !void { /// Result is in the return register. fn isErr(func: *Func, maybe_inst: ?Air.Inst.Index, eu_ty: Type, eu_mcv: MCValue) !MCValue { _ = maybe_inst; - const zcu = func.bin_file.comp.module.?; + const pt = func.pt; + const zcu = pt.zcu; const err_ty = eu_ty.errorUnionSet(zcu); if (err_ty.errorSetIsEmpty(zcu)) return MCValue{ .immediate = 0 }; // always false - const err_off: u31 = @intCast(errUnionErrorOffset(eu_ty.errorUnionPayload(zcu), zcu)); + const err_off: u31 = @intCast(errUnionErrorOffset(eu_ty.errorUnionPayload(zcu), pt)); const return_reg, const return_lock = try func.allocReg(.int); defer func.register_manager.unlockReg(return_lock); @@ -4858,7 +4898,8 @@ fn isNonErr(func: *Func, inst: Air.Inst.Index, eu_ty: Type, eu_mcv: MCValue) !MC } fn airIsNonErrPtr(func: *Func, inst: Air.Inst.Index) !void { - const zcu = func.bin_file.comp.module.?; + const pt = func.pt; + const zcu = pt.zcu; const un_op = func.air.instructions.items(.data)[@intFromEnum(inst)].un_op; const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else result: { const operand_ptr = try func.resolveInst(un_op); @@ -5063,12 +5104,12 @@ fn performReloc(func: *Func, inst: Mir.Inst.Index) void { } fn airBr(func: *Func, inst: Air.Inst.Index) !void { - const mod = func.bin_file.comp.module.?; + const pt = func.pt; const br = func.air.instructions.items(.data)[@intFromEnum(inst)].br; const block_ty = func.typeOfIndex(br.block_inst); const block_unused = - !block_ty.hasRuntimeBitsIgnoreComptime(mod) or func.liveness.isUnused(br.block_inst); + !block_ty.hasRuntimeBitsIgnoreComptime(pt) or func.liveness.isUnused(br.block_inst); const block_tracking = func.inst_tracking.getPtr(br.block_inst).?; const block_data = func.blocks.getPtr(br.block_inst).?; const first_br = block_data.relocs.items.len == 0; @@ -5288,8 +5329,6 @@ fn airAsm(func: *Func, inst: Air.Inst.Index) !void { /// Sets the value of `dst_mcv` to the value of `src_mcv`. fn genCopy(func: *Func, ty: Type, dst_mcv: MCValue, src_mcv: MCValue) !void { - const zcu = func.bin_file.comp.module.?; - // There isn't anything to store if (dst_mcv == .none) return; @@ -5362,7 +5401,7 @@ fn genCopy(func: *Func, ty: Type, dst_mcv: MCValue, src_mcv: MCValue) !void { } }, else => unreachable, }); - part_disp += @intCast(dst_ty.abiSize(zcu)); + part_disp += @intCast(dst_ty.abiSize(func.pt)); } }, else => return func.fail("TODO: genCopy to {s} from {s}", .{ @tagName(dst_mcv), @tagName(src_mcv) }), @@ -5555,8 +5594,9 @@ fn genInlineMemset( /// Sets the value of `src_mcv` into `reg`. Assumes you have a lock on it. fn genSetReg(func: *Func, ty: Type, reg: Register, src_mcv: MCValue) InnerError!void { - const zcu = func.bin_file.comp.module.?; - const abi_size: u32 = @intCast(ty.abiSize(zcu)); + const pt = func.pt; + const zcu = pt.zcu; + const abi_size: u32 = @intCast(ty.abiSize(pt)); if (abi_size > 8) return std.debug.panic("tried to set reg with size {}", .{abi_size}); @@ -5784,8 +5824,8 @@ fn genSetMem( ty: Type, src_mcv: MCValue, ) InnerError!void { - const mod = func.bin_file.comp.module.?; - const abi_size: u32 = @intCast(ty.abiSize(mod)); + const pt = func.pt; + const abi_size: u32 = @intCast(ty.abiSize(pt)); const dst_ptr_mcv: MCValue = switch (base) { .reg => |base_reg| .{ .register_offset = .{ .reg = base_reg, .off = disp } }, .frame => |base_frame_index| .{ .lea_frame = .{ .index = base_frame_index, .off = disp } }, @@ -5883,7 +5923,7 @@ fn genSetMem( var part_disp: i32 = disp; for (try func.splitType(ty), src_regs) |src_ty, src_reg| { try func.genSetMem(base, part_disp, src_ty, .{ .register = src_reg }); - part_disp += @intCast(src_ty.abiSize(mod)); + part_disp += @intCast(src_ty.abiSize(pt)); } }, .immediate => { @@ -5914,7 +5954,8 @@ fn airIntFromPtr(func: *Func, inst: Air.Inst.Index) !void { } fn airBitCast(func: *Func, inst: Air.Inst.Index) !void { - const zcu = func.bin_file.comp.module.?; + const pt = func.pt; + const zcu = pt.zcu; const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const result = if (func.liveness.isUnused(inst)) .unreach else result: { @@ -5926,10 +5967,10 @@ fn airBitCast(func: *Func, inst: Air.Inst.Index) !void { const src_lock = if (src_mcv.getReg()) |reg| func.register_manager.lockReg(reg) else null; defer if (src_lock) |lock| func.register_manager.unlockReg(lock); - const dst_mcv = if (dst_ty.abiSize(zcu) <= src_ty.abiSize(zcu) and + const dst_mcv = if (dst_ty.abiSize(pt) <= src_ty.abiSize(pt) and func.reuseOperand(inst, ty_op.operand, 0, src_mcv)) src_mcv else dst: { const dst_mcv = try func.allocRegOrMem(dst_ty, inst, true); - try func.genCopy(switch (math.order(dst_ty.abiSize(zcu), src_ty.abiSize(zcu))) { + try func.genCopy(switch (math.order(dst_ty.abiSize(pt), src_ty.abiSize(pt))) { .lt => dst_ty, .eq => if (!dst_mcv.isMemory() or src_mcv.isMemory()) dst_ty else src_ty, .gt => src_ty, @@ -5940,17 +5981,18 @@ fn airBitCast(func: *Func, inst: Air.Inst.Index) !void { if (dst_ty.isAbiInt(zcu) and src_ty.isAbiInt(zcu) and dst_ty.intInfo(zcu).signedness == src_ty.intInfo(zcu).signedness) break :result dst_mcv; - const abi_size = dst_ty.abiSize(zcu); - const bit_size = dst_ty.bitSize(zcu); + const abi_size = dst_ty.abiSize(pt); + const bit_size = dst_ty.bitSize(pt); if (abi_size * 8 <= bit_size) break :result dst_mcv; - return func.fail("TODO: airBitCast {} to {}", .{ src_ty.fmt(zcu), dst_ty.fmt(zcu) }); + return func.fail("TODO: airBitCast {} to {}", .{ src_ty.fmt(pt), dst_ty.fmt(pt) }); }; return func.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } fn airArrayToSlice(func: *Func, inst: Air.Inst.Index) !void { - const zcu = func.bin_file.comp.module.?; + const pt = func.pt; + const zcu = pt.zcu; const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const slice_ty = func.typeOfIndex(inst); @@ -5959,11 +6001,11 @@ fn airArrayToSlice(func: *Func, inst: Air.Inst.Index) !void { const array_ty = ptr_ty.childType(zcu); const array_len = array_ty.arrayLen(zcu); - const frame_index = try func.allocFrameIndex(FrameAlloc.initSpill(slice_ty, zcu)); + const frame_index = try func.allocFrameIndex(FrameAlloc.initSpill(slice_ty, pt)); try func.genSetMem(.{ .frame = frame_index }, 0, ptr_ty, ptr); try func.genSetMem( .{ .frame = frame_index }, - @intCast(ptr_ty.abiSize(zcu)), + @intCast(ptr_ty.abiSize(pt)), Type.usize, .{ .immediate = array_len }, ); @@ -6015,7 +6057,8 @@ fn airAtomicStore(func: *Func, inst: Air.Inst.Index, order: std.builtin.AtomicOr } fn airMemset(func: *Func, inst: Air.Inst.Index, safety: bool) !void { - const zcu = func.bin_file.comp.module.?; + const pt = func.pt; + const zcu = pt.zcu; const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; result: { @@ -6037,7 +6080,7 @@ fn airMemset(func: *Func, inst: Air.Inst.Index, safety: bool) !void { }; defer if (src_val_lock) |lock| func.register_manager.unlockReg(lock); - const elem_abi_size: u31 = @intCast(elem_ty.abiSize(zcu)); + const elem_abi_size: u31 = @intCast(elem_ty.abiSize(pt)); if (elem_abi_size == 1) { const ptr: MCValue = switch (dst_ptr_ty.ptrSize(zcu)) { @@ -6068,7 +6111,7 @@ fn airMemset(func: *Func, inst: Air.Inst.Index, safety: bool) !void { switch (dst_ptr_ty.ptrSize(zcu)) { .Slice => return func.fail("TODO: airMemset Slices", .{}), .One => { - const elem_ptr_ty = try zcu.singleMutPtrType(elem_ty); + const elem_ptr_ty = try pt.singleMutPtrType(elem_ty); const len = dst_ptr_ty.childType(zcu).arrayLen(zcu); @@ -6110,7 +6153,8 @@ fn airTagName(func: *Func, inst: Air.Inst.Index) !void { } fn airErrorName(func: *Func, inst: Air.Inst.Index) !void { - const zcu = func.bin_file.comp.module.?; + const pt = func.pt; + const zcu = pt.zcu; const un_op = func.air.instructions.items(.data)[@intFromEnum(inst)].un_op; const err_ty = func.typeOf(un_op); @@ -6126,7 +6170,7 @@ fn airErrorName(func: *Func, inst: Air.Inst.Index) !void { // this is now the base address of the error name table const lazy_sym = link.File.LazySymbol.initDecl(.const_data, null, zcu); if (func.bin_file.cast(link.File.Elf)) |elf_file| { - const sym_index = elf_file.zigObjectPtr().?.getOrCreateMetadataForLazySymbol(elf_file, lazy_sym) catch |err| + const sym_index = elf_file.zigObjectPtr().?.getOrCreateMetadataForLazySymbol(elf_file, pt, lazy_sym) catch |err| return func.fail("{s} creating lazy symbol", .{@errorName(err)}); const sym = elf_file.symbol(sym_index); try func.genSetReg(Type.usize, addr_reg, .{ .load_symbol = .{ .sym = sym.esym_index } }); @@ -6239,7 +6283,8 @@ fn airReduce(func: *Func, inst: Air.Inst.Index) !void { } fn airAggregateInit(func: *Func, inst: Air.Inst.Index) !void { - const zcu = func.bin_file.comp.module.?; + const pt = func.pt; + const zcu = pt.zcu; const result_ty = func.typeOfIndex(inst); const len: usize = @intCast(result_ty.arrayLen(zcu)); const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; @@ -6248,21 +6293,21 @@ fn airAggregateInit(func: *Func, inst: Air.Inst.Index) !void { const result: MCValue = result: { switch (result_ty.zigTypeTag(zcu)) { .Struct => { - const frame_index = try func.allocFrameIndex(FrameAlloc.initSpill(result_ty, zcu)); + const frame_index = try func.allocFrameIndex(FrameAlloc.initSpill(result_ty, pt)); if (result_ty.containerLayout(zcu) == .@"packed") { const struct_obj = zcu.typeToStruct(result_ty).?; try func.genInlineMemset( .{ .lea_frame = .{ .index = frame_index } }, .{ .immediate = 0 }, - .{ .immediate = result_ty.abiSize(zcu) }, + .{ .immediate = result_ty.abiSize(pt) }, ); for (elements, 0..) |elem, elem_i_usize| { const elem_i: u32 = @intCast(elem_i_usize); - if ((try result_ty.structFieldValueComptime(zcu, elem_i)) != null) continue; + if ((try result_ty.structFieldValueComptime(pt, elem_i)) != null) continue; const elem_ty = result_ty.structFieldType(elem_i, zcu); - const elem_bit_size: u32 = @intCast(elem_ty.bitSize(zcu)); + const elem_bit_size: u32 = @intCast(elem_ty.bitSize(pt)); if (elem_bit_size > 64) { return func.fail( "TODO airAggregateInit implement packed structs with large fields", @@ -6270,9 +6315,9 @@ fn airAggregateInit(func: *Func, inst: Air.Inst.Index) !void { ); } - const elem_abi_size: u32 = @intCast(elem_ty.abiSize(zcu)); + const elem_abi_size: u32 = @intCast(elem_ty.abiSize(pt)); const elem_abi_bits = elem_abi_size * 8; - const elem_off = zcu.structPackedFieldBitOffset(struct_obj, elem_i); + const elem_off = pt.structPackedFieldBitOffset(struct_obj, elem_i); const elem_byte_off: i32 = @intCast(elem_off / elem_abi_bits * elem_abi_size); const elem_bit_off = elem_off % elem_abi_bits; const elem_mcv = try func.resolveInst(elem); @@ -6293,10 +6338,10 @@ fn airAggregateInit(func: *Func, inst: Air.Inst.Index) !void { return func.fail("TODO: airAggregateInit packed structs", .{}); } } else for (elements, 0..) |elem, elem_i| { - if ((try result_ty.structFieldValueComptime(zcu, elem_i)) != null) continue; + if ((try result_ty.structFieldValueComptime(pt, elem_i)) != null) continue; const elem_ty = result_ty.structFieldType(elem_i, zcu); - const elem_off: i32 = @intCast(result_ty.structFieldOffset(elem_i, zcu)); + const elem_off: i32 = @intCast(result_ty.structFieldOffset(elem_i, pt)); const elem_mcv = try func.resolveInst(elem); try func.genSetMem(.{ .frame = frame_index }, elem_off, elem_ty, elem_mcv); } @@ -6304,8 +6349,8 @@ fn airAggregateInit(func: *Func, inst: Air.Inst.Index) !void { }, .Array => { const elem_ty = result_ty.childType(zcu); - const frame_index = try func.allocFrameIndex(FrameAlloc.initSpill(result_ty, zcu)); - const elem_size: u32 = @intCast(elem_ty.abiSize(zcu)); + const frame_index = try func.allocFrameIndex(FrameAlloc.initSpill(result_ty, pt)); + const elem_size: u32 = @intCast(elem_ty.abiSize(pt)); for (elements, 0..) |elem, elem_i| { const elem_mcv = try func.resolveInst(elem); @@ -6325,7 +6370,7 @@ fn airAggregateInit(func: *Func, inst: Air.Inst.Index) !void { ); break :result .{ .load_frame = .{ .index = frame_index } }; }, - else => return func.fail("TODO: airAggregate {}", .{result_ty.fmt(zcu)}), + else => return func.fail("TODO: airAggregate {}", .{result_ty.fmt(pt)}), } }; @@ -6364,11 +6409,11 @@ fn airMulAdd(func: *Func, inst: Air.Inst.Index) !void { } fn resolveInst(func: *Func, ref: Air.Inst.Ref) InnerError!MCValue { - const zcu = func.bin_file.comp.module.?; + const pt = func.pt; // If the type has no codegen bits, no need to store it. const inst_ty = func.typeOf(ref); - if (!inst_ty.hasRuntimeBits(zcu)) + if (!inst_ty.hasRuntimeBits(pt)) return .none; const mcv = if (ref.toIndex()) |inst| mcv: { @@ -6394,9 +6439,11 @@ fn getResolvedInstValue(func: *Func, inst: Air.Inst.Index) *InstTracking { } fn genTypedValue(func: *Func, val: Value) InnerError!MCValue { - const zcu = func.bin_file.comp.module.?; + const pt = func.pt; + const zcu = pt.zcu; const result = try codegen.genTypedValue( func.bin_file, + pt, func.src_loc, val, zcu.funcOwnerDeclIndex(func.func_index), @@ -6438,7 +6485,8 @@ fn resolveCallingConventionValues( fn_info: InternPool.Key.FuncType, var_args: []const Type, ) !CallMCValues { - const zcu = func.bin_file.comp.module.?; + const pt = func.pt; + const zcu = pt.zcu; const ip = &zcu.intern_pool; const param_types = try func.gpa.alloc(Type, fn_info.param_types.len + var_args.len); @@ -6481,14 +6529,14 @@ fn resolveCallingConventionValues( // Return values if (ret_ty.zigTypeTag(zcu) == .NoReturn) { result.return_value = InstTracking.init(.unreach); - } else if (!ret_ty.hasRuntimeBitsIgnoreComptime(zcu)) { + } else if (!ret_ty.hasRuntimeBitsIgnoreComptime(pt)) { result.return_value = InstTracking.init(.none); } else { var ret_tracking: [2]InstTracking = undefined; var ret_tracking_i: usize = 0; var ret_float_reg_i: usize = 0; - const classes = mem.sliceTo(&abi.classifySystem(ret_ty, zcu), .none); + const classes = mem.sliceTo(&abi.classifySystem(ret_ty, pt), .none); for (classes) |class| switch (class) { .integer => { @@ -6521,7 +6569,7 @@ fn resolveCallingConventionValues( }; result.return_value = switch (ret_tracking_i) { - else => return func.fail("ty {} took {} tracking return indices", .{ ret_ty.fmt(zcu), ret_tracking_i }), + else => return func.fail("ty {} took {} tracking return indices", .{ ret_ty.fmt(pt), ret_tracking_i }), 1 => ret_tracking[0], 2 => InstTracking.init(.{ .register_pair = .{ ret_tracking[0].short.register, ret_tracking[1].short.register, @@ -6532,7 +6580,7 @@ fn resolveCallingConventionValues( var param_float_reg_i: usize = 0; for (param_types, result.args) |ty, *arg| { - if (!ty.hasRuntimeBitsIgnoreComptime(zcu)) { + if (!ty.hasRuntimeBitsIgnoreComptime(pt)) { assert(cc == .Unspecified); arg.* = .none; continue; @@ -6541,7 +6589,7 @@ fn resolveCallingConventionValues( var arg_mcv: [2]MCValue = undefined; var arg_mcv_i: usize = 0; - const classes = mem.sliceTo(&abi.classifySystem(ty, zcu), .none); + const classes = mem.sliceTo(&abi.classifySystem(ty, pt), .none); for (classes) |class| switch (class) { .integer => { @@ -6576,7 +6624,7 @@ fn resolveCallingConventionValues( else => return func.fail("TODO: C calling convention arg class {}", .{class}), } else { arg.* = switch (arg_mcv_i) { - else => return func.fail("ty {} took {} tracking arg indices", .{ ty.fmt(zcu), arg_mcv_i }), + else => return func.fail("ty {} took {} tracking arg indices", .{ ty.fmt(pt), arg_mcv_i }), 1 => arg_mcv[0], 2 => .{ .register_pair = .{ arg_mcv[0].register, arg_mcv[1].register } }, }; @@ -6621,12 +6669,14 @@ fn parseRegName(name: []const u8) ?Register { } fn typeOf(func: *Func, inst: Air.Inst.Ref) Type { - const zcu = func.bin_file.comp.module.?; + const pt = func.pt; + const zcu = pt.zcu; return func.air.typeOf(inst, &zcu.intern_pool); } fn typeOfIndex(func: *Func, inst: Air.Inst.Index) Type { - const zcu = func.bin_file.comp.module.?; + const pt = func.pt; + const zcu = pt.zcu; return func.air.typeOfIndex(inst, &zcu.intern_pool); } @@ -6634,40 +6684,41 @@ fn hasFeature(func: *Func, feature: Target.riscv.Feature) bool { return Target.riscv.featureSetHas(func.target.cpu.features, feature); } -pub fn errUnionPayloadOffset(payload_ty: Type, zcu: *Zcu) u64 { - if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) return 0; - const payload_align = payload_ty.abiAlignment(zcu); - const error_align = Type.anyerror.abiAlignment(zcu); - if (payload_align.compare(.gte, error_align) or !payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) { +pub fn errUnionPayloadOffset(payload_ty: Type, pt: Zcu.PerThread) u64 { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) return 0; + const payload_align = payload_ty.abiAlignment(pt); + const error_align = Type.anyerror.abiAlignment(pt); + if (payload_align.compare(.gte, error_align) or !payload_ty.hasRuntimeBitsIgnoreComptime(pt)) { return 0; } else { - return payload_align.forward(Type.anyerror.abiSize(zcu)); + return payload_align.forward(Type.anyerror.abiSize(pt)); } } -pub fn errUnionErrorOffset(payload_ty: Type, zcu: *Zcu) u64 { - if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) return 0; - const payload_align = payload_ty.abiAlignment(zcu); - const error_align = Type.anyerror.abiAlignment(zcu); - if (payload_align.compare(.gte, error_align) and payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) { - return error_align.forward(payload_ty.abiSize(zcu)); +pub fn errUnionErrorOffset(payload_ty: Type, pt: Zcu.PerThread) u64 { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) return 0; + const payload_align = payload_ty.abiAlignment(pt); + const error_align = Type.anyerror.abiAlignment(pt); + if (payload_align.compare(.gte, error_align) and payload_ty.hasRuntimeBitsIgnoreComptime(pt)) { + return error_align.forward(payload_ty.abiSize(pt)); } else { return 0; } } fn promoteInt(func: *Func, ty: Type) Type { - const mod = func.bin_file.comp.module.?; + const pt = func.pt; + const zcu = pt.zcu; const int_info: InternPool.Key.IntType = switch (ty.toIntern()) { .bool_type => .{ .signedness = .unsigned, .bits = 1 }, - else => if (ty.isAbiInt(mod)) ty.intInfo(mod) else return ty, + else => if (ty.isAbiInt(zcu)) ty.intInfo(zcu) else return ty, }; for ([_]Type{ Type.c_int, Type.c_uint, Type.c_long, Type.c_ulong, Type.c_longlong, Type.c_ulonglong, }) |promote_ty| { - const promote_info = promote_ty.intInfo(mod); + const promote_info = promote_ty.intInfo(zcu); if (int_info.signedness == .signed and promote_info.signedness == .unsigned) continue; if (int_info.bits + @intFromBool(int_info.signedness == .unsigned and promote_info.signedness == .signed) <= promote_info.bits) return promote_ty; diff --git a/src/arch/riscv64/Emit.zig b/src/arch/riscv64/Emit.zig index 8107c6350f5b..64798fd25511 100644 --- a/src/arch/riscv64/Emit.zig +++ b/src/arch/riscv64/Emit.zig @@ -1,5 +1,6 @@ //! This file contains the functionality for emitting RISC-V MIR as machine code +bin_file: *link.File, lower: Lower, debug_output: DebugInfoOutput, code: *std.ArrayList(u8), @@ -48,7 +49,7 @@ pub fn emitMir(emit: *Emit) Error!void { .Lib => emit.lower.link_mode == .static, }; - if (emit.lower.bin_file.cast(link.File.Elf)) |elf_file| { + if (emit.bin_file.cast(link.File.Elf)) |elf_file| { const atom_ptr = elf_file.symbol(symbol.atom_index).atom(elf_file).?; const sym_index = elf_file.zigObjectPtr().?.symbol(symbol.sym_index); const sym = elf_file.symbol(sym_index); @@ -77,7 +78,7 @@ pub fn emitMir(emit: *Emit) Error!void { } else return emit.fail("TODO: load_symbol_reloc non-ELF", .{}); }, .call_extern_fn_reloc => |symbol| { - if (emit.lower.bin_file.cast(link.File.Elf)) |elf_file| { + if (emit.bin_file.cast(link.File.Elf)) |elf_file| { const atom_ptr = elf_file.symbol(symbol.atom_index).atom(elf_file).?; const r_type: u32 = @intFromEnum(std.elf.R_RISCV.CALL_PLT); diff --git a/src/arch/riscv64/Lower.zig b/src/arch/riscv64/Lower.zig index 3d3dc8513fe1..f71311ff9781 100644 --- a/src/arch/riscv64/Lower.zig +++ b/src/arch/riscv64/Lower.zig @@ -1,6 +1,6 @@ //! This file contains the functionality for lowering RISC-V MIR to Instructions -bin_file: *link.File, +pt: Zcu.PerThread, output_mode: std.builtin.OutputMode, link_mode: std.builtin.LinkMode, pic: bool, @@ -44,7 +44,7 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct { insts: []const Instruction, relocs: []const Reloc, } { - const zcu = lower.bin_file.comp.module.?; + const pt = lower.pt; lower.result_insts = undefined; lower.result_relocs = undefined; @@ -243,11 +243,11 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct { const class = rs1.class(); const ty = compare.ty; - const size = std.math.ceilPowerOfTwo(u64, ty.bitSize(zcu)) catch { - return lower.fail("pseudo_compare size {}", .{ty.bitSize(zcu)}); + const size = std.math.ceilPowerOfTwo(u64, ty.bitSize(pt)) catch { + return lower.fail("pseudo_compare size {}", .{ty.bitSize(pt)}); }; - const is_unsigned = ty.isUnsignedInt(zcu); + const is_unsigned = ty.isUnsignedInt(pt.zcu); const less_than: Encoding.Mnemonic = if (is_unsigned) .sltu else .slt; @@ -502,7 +502,7 @@ pub fn fail(lower: *Lower, comptime format: []const u8, args: anytype) Error { } fn hasFeature(lower: *Lower, feature: std.Target.riscv.Feature) bool { - const target = lower.bin_file.comp.module.?.getTarget(); + const target = lower.pt.zcu.getTarget(); const features = target.cpu.features; return std.Target.riscv.featureSetHas(features, feature); } diff --git a/src/arch/riscv64/abi.zig b/src/arch/riscv64/abi.zig index 042af564f6ab..0a41b61d795f 100644 --- a/src/arch/riscv64/abi.zig +++ b/src/arch/riscv64/abi.zig @@ -9,15 +9,15 @@ const assert = std.debug.assert; pub const Class = enum { memory, byval, integer, double_integer, fields }; -pub fn classifyType(ty: Type, mod: *Zcu) Class { - const target = mod.getTarget(); - std.debug.assert(ty.hasRuntimeBitsIgnoreComptime(mod)); +pub fn classifyType(ty: Type, pt: Zcu.PerThread) Class { + const target = pt.zcu.getTarget(); + std.debug.assert(ty.hasRuntimeBitsIgnoreComptime(pt)); const max_byval_size = target.ptrBitWidth() * 2; - switch (ty.zigTypeTag(mod)) { + switch (ty.zigTypeTag(pt.zcu)) { .Struct => { - const bit_size = ty.bitSize(mod); - if (ty.containerLayout(mod) == .@"packed") { + const bit_size = ty.bitSize(pt); + if (ty.containerLayout(pt.zcu) == .@"packed") { if (bit_size > max_byval_size) return .memory; return .byval; } @@ -25,12 +25,12 @@ pub fn classifyType(ty: Type, mod: *Zcu) Class { if (std.Target.riscv.featureSetHas(target.cpu.features, .d)) fields: { var any_fp = false; var field_count: usize = 0; - for (0..ty.structFieldCount(mod)) |field_index| { - const field_ty = ty.structFieldType(field_index, mod); - if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; + for (0..ty.structFieldCount(pt.zcu)) |field_index| { + const field_ty = ty.structFieldType(field_index, pt.zcu); + if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) continue; if (field_ty.isRuntimeFloat()) any_fp = true - else if (!field_ty.isAbiInt(mod)) + else if (!field_ty.isAbiInt(pt.zcu)) break :fields; field_count += 1; if (field_count > 2) break :fields; @@ -45,8 +45,8 @@ pub fn classifyType(ty: Type, mod: *Zcu) Class { return .integer; }, .Union => { - const bit_size = ty.bitSize(mod); - if (ty.containerLayout(mod) == .@"packed") { + const bit_size = ty.bitSize(pt); + if (ty.containerLayout(pt.zcu) == .@"packed") { if (bit_size > max_byval_size) return .memory; return .byval; } @@ -58,21 +58,21 @@ pub fn classifyType(ty: Type, mod: *Zcu) Class { .Bool => return .integer, .Float => return .byval, .Int, .Enum, .ErrorSet => { - const bit_size = ty.bitSize(mod); + const bit_size = ty.bitSize(pt); if (bit_size > max_byval_size) return .memory; return .byval; }, .Vector => { - const bit_size = ty.bitSize(mod); + const bit_size = ty.bitSize(pt); if (bit_size > max_byval_size) return .memory; return .integer; }, .Optional => { - std.debug.assert(ty.isPtrLikeOptional(mod)); + std.debug.assert(ty.isPtrLikeOptional(pt.zcu)); return .byval; }, .Pointer => { - std.debug.assert(!ty.isSlice(mod)); + std.debug.assert(!ty.isSlice(pt.zcu)); return .byval; }, .ErrorUnion, @@ -97,18 +97,19 @@ pub const SystemClass = enum { integer, float, memory, none }; /// There are a maximum of 8 possible return slots. Returned values are in /// the beginning of the array; unused slots are filled with .none. -pub fn classifySystem(ty: Type, zcu: *Zcu) [8]SystemClass { +pub fn classifySystem(ty: Type, pt: Zcu.PerThread) [8]SystemClass { + const zcu = pt.zcu; var result = [1]SystemClass{.none} ** 8; const memory_class = [_]SystemClass{ .memory, .none, .none, .none, .none, .none, .none, .none, }; - switch (ty.zigTypeTag(zcu)) { + switch (ty.zigTypeTag(pt.zcu)) { .Bool, .Void, .NoReturn => { result[0] = .integer; return result; }, - .Pointer => switch (ty.ptrSize(zcu)) { + .Pointer => switch (ty.ptrSize(pt.zcu)) { .Slice => { result[0] = .integer; result[1] = .integer; @@ -120,17 +121,17 @@ pub fn classifySystem(ty: Type, zcu: *Zcu) [8]SystemClass { }, }, .Optional => { - if (ty.isPtrLikeOptional(zcu)) { + if (ty.isPtrLikeOptional(pt.zcu)) { result[0] = .integer; return result; } result[0] = .integer; - if (ty.optionalChild(zcu).abiSize(zcu) == 0) return result; + if (ty.optionalChild(zcu).abiSize(pt) == 0) return result; result[1] = .integer; return result; }, .Int, .Enum, .ErrorSet => { - const int_bits = ty.intInfo(zcu).bits; + const int_bits = ty.intInfo(pt.zcu).bits; if (int_bits <= 64) { result[0] = .integer; return result; @@ -155,8 +156,8 @@ pub fn classifySystem(ty: Type, zcu: *Zcu) [8]SystemClass { unreachable; // support split float args }, .ErrorUnion => { - const payload_ty = ty.errorUnionPayload(zcu); - const payload_bits = payload_ty.bitSize(zcu); + const payload_ty = ty.errorUnionPayload(pt.zcu); + const payload_bits = payload_ty.bitSize(pt); // the error union itself result[0] = .integer; @@ -167,8 +168,8 @@ pub fn classifySystem(ty: Type, zcu: *Zcu) [8]SystemClass { return memory_class; }, .Struct => { - const layout = ty.containerLayout(zcu); - const ty_size = ty.abiSize(zcu); + const layout = ty.containerLayout(pt.zcu); + const ty_size = ty.abiSize(pt); if (layout == .@"packed") { assert(ty_size <= 16); @@ -180,7 +181,7 @@ pub fn classifySystem(ty: Type, zcu: *Zcu) [8]SystemClass { return memory_class; }, .Array => { - const ty_size = ty.abiSize(zcu); + const ty_size = ty.abiSize(pt); if (ty_size <= 8) { result[0] = .integer; return result; diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig index b837eb9ade4a..6dea4977538a 100644 --- a/src/arch/sparc64/CodeGen.zig +++ b/src/arch/sparc64/CodeGen.zig @@ -11,11 +11,9 @@ const Allocator = mem.Allocator; const builtin = @import("builtin"); const link = @import("../../link.zig"); const Zcu = @import("../../Zcu.zig"); -/// Deprecated. -const Module = Zcu; const InternPool = @import("../../InternPool.zig"); const Value = @import("../../Value.zig"); -const ErrorMsg = Module.ErrorMsg; +const ErrorMsg = Zcu.ErrorMsg; const codegen = @import("../../codegen.zig"); const Air = @import("../../Air.zig"); const Mir = @import("Mir.zig"); @@ -52,6 +50,7 @@ const RegisterView = enum(u1) { }; gpa: Allocator, +pt: Zcu.PerThread, air: Air, liveness: Liveness, bin_file: *link.File, @@ -64,7 +63,7 @@ args: []MCValue, ret_mcv: MCValue, fn_type: Type, arg_index: usize, -src_loc: Module.LazySrcLoc, +src_loc: Zcu.LazySrcLoc, stack_align: Alignment, /// MIR Instructions @@ -263,15 +262,16 @@ const BigTomb = struct { pub fn generate( lf: *link.File, - src_loc: Module.LazySrcLoc, + pt: Zcu.PerThread, + src_loc: Zcu.LazySrcLoc, func_index: InternPool.Index, air: Air, liveness: Liveness, code: *std.ArrayList(u8), debug_output: DebugInfoOutput, ) CodeGenError!Result { - const gpa = lf.comp.gpa; - const zcu = lf.comp.module.?; + const zcu = pt.zcu; + const gpa = zcu.gpa; const func = zcu.funcInfo(func_index); const fn_owner_decl = zcu.declPtr(func.owner_decl); assert(fn_owner_decl.has_tv); @@ -289,11 +289,12 @@ pub fn generate( var function = Self{ .gpa = gpa, + .pt = pt, .air = air, .liveness = liveness, .target = target, - .func_index = func_index, .bin_file = lf, + .func_index = func_index, .code = code, .debug_output = debug_output, .err_msg = null, @@ -365,7 +366,8 @@ pub fn generate( } fn gen(self: *Self) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const cc = self.fn_type.fnCallingConvention(mod); if (cc != .Naked) { // TODO Finish function prologue and epilogue for sparc64. @@ -493,7 +495,8 @@ fn gen(self: *Self) !void { } fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const ip = &mod.intern_pool; const air_tags = self.air.instructions.items(.tag); @@ -757,7 +760,8 @@ fn airAddSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const tag = self.air.instructions.items(.tag)[@intFromEnum(inst)]; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const lhs = try self.resolveInst(extra.lhs); const rhs = try self.resolveInst(extra.rhs); @@ -835,7 +839,8 @@ fn airAddSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void { } fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const vector_ty = self.typeOfIndex(inst); const len = vector_ty.vectorLen(mod); const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; @@ -869,7 +874,8 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void { } fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const ptr_ty = self.typeOf(ty_op.operand); @@ -1006,7 +1012,7 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void { } fn airArg(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; const arg_index = self.arg_index; self.arg_index += 1; @@ -1016,8 +1022,8 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void { const mcv = blk: { switch (arg) { .stack_offset => |off| { - const abi_size = math.cast(u32, ty.abiSize(mod)) orelse { - return self.fail("type '{}' too big to fit into stack frame", .{ty.fmt(mod)}); + const abi_size = math.cast(u32, ty.abiSize(pt)) orelse { + return self.fail("type '{}' too big to fit into stack frame", .{ty.fmt(pt)}); }; const offset = off + abi_size; break :blk MCValue{ .stack_offset = offset }; @@ -1205,7 +1211,8 @@ fn airBreakpoint(self: *Self) !void { } fn airByteSwap(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; // We have hardware byteswapper in SPARCv9, don't let mainstream compilers mislead you. @@ -1228,7 +1235,7 @@ fn airByteSwap(self: *Self, inst: Air.Inst.Index) !void { if (int_info.bits == 8) break :result operand; const abi_size = int_info.bits >> 3; - const abi_align = operand_ty.abiAlignment(mod); + const abi_align = operand_ty.abiAlignment(pt); const opposite_endian_asi = switch (self.target.cpu.arch.endian()) { Endian.big => ASI.asi_primary_little, Endian.little => ASI.asi_primary, @@ -1297,7 +1304,8 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const extra = self.air.extraData(Air.Call, pl_op.payload); const args = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra.end .. extra.end + extra.data.args_len])); const ty = self.typeOf(callee); - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const fn_ty = switch (ty.zigTypeTag(mod)) { .Fn => ty, .Pointer => ty.childType(mod), @@ -1341,7 +1349,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier // Due to incremental compilation, how function calls are generated depends // on linking. - if (try self.air.value(callee, mod)) |func_value| { + if (try self.air.value(callee, pt)) |func_value| { if (self.bin_file.tag == link.File.Elf.base_tag) { switch (mod.intern_pool.indexToKey(func_value.ip_index)) { .func => |func| { @@ -1429,7 +1437,8 @@ fn airClz(self: *Self, inst: Air.Inst.Index) !void { fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); @@ -1444,7 +1453,7 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { .ErrorSet => Type.u16, .Optional => blk: { const payload_ty = lhs_ty.optionalChild(mod); - if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) { break :blk Type.u1; } else if (lhs_ty.isPtrLikeOptional(mod)) { break :blk Type.usize; @@ -1655,7 +1664,8 @@ fn airCtz(self: *Self, inst: Air.Inst.Index) !void { } fn airDbgInlineBlock(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = self.air.extraData(Air.DbgInlineBlock, ty_pl.payload); const func = mod.funcInfo(extra.data.func); @@ -1753,7 +1763,8 @@ fn airIntCast(self: *Self, inst: Air.Inst.Index) !void { if (self.liveness.isUnused(inst)) return self.finishAir(inst, .dead, .{ ty_op.operand, .none, .none }); - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const operand_ty = self.typeOf(ty_op.operand); const operand = try self.resolveInst(ty_op.operand); const info_a = operand_ty.intInfo(mod); @@ -1814,12 +1825,13 @@ fn airIsNonNull(self: *Self, inst: Air.Inst.Index) !void { } fn airLoad(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const elem_ty = self.typeOfIndex(inst); - const elem_size = elem_ty.abiSize(mod); + const elem_size = elem_ty.abiSize(pt); const result: MCValue = result: { - if (!elem_ty.hasRuntimeBits(mod)) + if (!elem_ty.hasRuntimeBits(pt)) break :result MCValue.none; const ptr = try self.resolveInst(ty_op.operand); @@ -1898,7 +1910,7 @@ fn airMod(self: *Self, inst: Air.Inst.Index) !void { const rhs = try self.resolveInst(bin_op.rhs); const lhs_ty = self.typeOf(bin_op.lhs); const rhs_ty = self.typeOf(bin_op.rhs); - assert(lhs_ty.eql(rhs_ty, self.bin_file.comp.module.?)); + assert(lhs_ty.eql(rhs_ty, self.pt.zcu)); if (self.liveness.isUnused(inst)) return self.finishAir(inst, .dead, .{ bin_op.lhs, bin_op.rhs, .none }); @@ -2040,7 +2052,8 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { //const tag = self.air.instructions.items(.tag)[@intFromEnum(inst)]; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const lhs = try self.resolveInst(extra.lhs); const rhs = try self.resolveInst(extra.rhs); @@ -2104,7 +2117,8 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { fn airNot(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand = try self.resolveInst(ty_op.operand); const operand_ty = self.typeOf(ty_op.operand); @@ -2336,7 +2350,8 @@ fn airShlSat(self: *Self, inst: Air.Inst.Index) !void { fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const lhs = try self.resolveInst(extra.lhs); const rhs = try self.resolveInst(extra.rhs); @@ -2441,7 +2456,8 @@ fn airSlice(self: *Self, inst: Air.Inst.Index) !void { } fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const is_volatile = false; // TODO const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; @@ -2452,7 +2468,7 @@ fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void { const slice_ty = self.typeOf(bin_op.lhs); const elem_ty = slice_ty.childType(mod); - const elem_size = elem_ty.abiSize(mod); + const elem_size = elem_ty.abiSize(pt); const slice_ptr_field_type = slice_ty.slicePtrFieldType(mod); @@ -2566,10 +2582,10 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { const operand = extra.struct_operand; const index = extra.field_index; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; const mcv = try self.resolveInst(operand); const struct_ty = self.typeOf(operand); - const struct_field_offset = @as(u32, @intCast(struct_ty.structFieldOffset(index, mod))); + const struct_field_offset = @as(u32, @intCast(struct_ty.structFieldOffset(index, pt))); switch (mcv) { .dead, .unreach => unreachable, @@ -2699,13 +2715,14 @@ fn airUnionInit(self: *Self, inst: Air.Inst.Index) !void { } fn airUnwrapErrErr(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const error_union_ty = self.typeOf(ty_op.operand); const payload_ty = error_union_ty.errorUnionPayload(mod); const mcv = try self.resolveInst(ty_op.operand); - if (!payload_ty.hasRuntimeBits(mod)) break :result mcv; + if (!payload_ty.hasRuntimeBits(pt)) break :result mcv; return self.fail("TODO implement unwrap error union error for non-empty payloads", .{}); }; @@ -2713,12 +2730,13 @@ fn airUnwrapErrErr(self: *Self, inst: Air.Inst.Index) !void { } fn airUnwrapErrPayload(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const error_union_ty = self.typeOf(ty_op.operand); const payload_ty = error_union_ty.errorUnionPayload(mod); - if (!payload_ty.hasRuntimeBits(mod)) break :result MCValue.none; + if (!payload_ty.hasRuntimeBits(pt)) break :result MCValue.none; return self.fail("TODO implement unwrap error union payload for non-empty payloads", .{}); }; @@ -2727,13 +2745,14 @@ fn airUnwrapErrPayload(self: *Self, inst: Air.Inst.Index) !void { /// E to E!T fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const error_union_ty = ty_op.ty.toType(); const payload_ty = error_union_ty.errorUnionPayload(mod); const mcv = try self.resolveInst(ty_op.operand); - if (!payload_ty.hasRuntimeBits(mod)) break :result mcv; + if (!payload_ty.hasRuntimeBits(pt)) break :result mcv; return self.fail("TODO implement wrap errunion error for non-empty payloads", .{}); }; @@ -2748,13 +2767,13 @@ fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void { } fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const optional_ty = self.typeOfIndex(inst); // Optional with a zero-bit payload type is just a boolean true - if (optional_ty.abiSize(mod) == 1) + if (optional_ty.abiSize(pt) == 1) break :result MCValue{ .immediate = 1 }; return self.fail("TODO implement wrap optional for {}", .{self.target.cpu.arch}); @@ -2788,10 +2807,11 @@ fn allocMem(self: *Self, inst: Air.Inst.Index, abi_size: u32, abi_align: Alignme /// Use a pointer instruction as the basis for allocating stack memory. fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const elem_ty = self.typeOfIndex(inst).childType(mod); - if (!elem_ty.hasRuntimeBits(mod)) { + if (!elem_ty.hasRuntimeBits(pt)) { // As this stack item will never be dereferenced at runtime, // return the stack offset 0. Stack offset 0 will be where all // zero-sized stack allocations live as non-zero-sized @@ -2799,21 +2819,21 @@ fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 { return @as(u32, 0); } - const abi_size = math.cast(u32, elem_ty.abiSize(mod)) orelse { - return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)}); + const abi_size = math.cast(u32, elem_ty.abiSize(pt)) orelse { + return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(pt)}); }; // TODO swap this for inst.ty.ptrAlign - const abi_align = elem_ty.abiAlignment(mod); + const abi_align = elem_ty.abiAlignment(pt); return self.allocMem(inst, abi_size, abi_align); } fn allocRegOrMem(self: *Self, inst: Air.Inst.Index, reg_ok: bool) !MCValue { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; const elem_ty = self.typeOfIndex(inst); - const abi_size = math.cast(u32, elem_ty.abiSize(mod)) orelse { - return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)}); + const abi_size = math.cast(u32, elem_ty.abiSize(pt)) orelse { + return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(pt)}); }; - const abi_align = elem_ty.abiAlignment(mod); + const abi_align = elem_ty.abiAlignment(pt); self.stack_align = self.stack_align.max(abi_align); if (reg_ok) { @@ -2855,7 +2875,8 @@ fn binOp( rhs_ty: Type, metadata: ?BinOpMetadata, ) InnerError!MCValue { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; switch (tag) { .add, .sub, @@ -2996,7 +3017,7 @@ fn binOp( .One => ptr_ty.childType(mod).childType(mod), // ptr to array, so get array element type else => ptr_ty.childType(mod), }; - const elem_size = elem_ty.abiSize(mod); + const elem_size = elem_ty.abiSize(pt); if (elem_size == 1) { const base_tag: Mir.Inst.Tag = switch (tag) { @@ -3396,8 +3417,8 @@ fn binOpRegister( fn br(self: *Self, block: Air.Inst.Index, operand: Air.Inst.Ref) !void { const block_data = self.blocks.getPtr(block).?; - const mod = self.bin_file.comp.module.?; - if (self.typeOf(operand).hasRuntimeBits(mod)) { + const pt = self.pt; + if (self.typeOf(operand).hasRuntimeBits(pt)) { const operand_mcv = try self.resolveInst(operand); const block_mcv = block_data.mcv; if (block_mcv == .none) { @@ -3516,17 +3537,18 @@ fn ensureProcessDeathCapacity(self: *Self, additional_count: usize) !void { /// Given an error union, returns the payload fn errUnionPayload(self: *Self, error_union_mcv: MCValue, error_union_ty: Type) !MCValue { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const err_ty = error_union_ty.errorUnionSet(mod); const payload_ty = error_union_ty.errorUnionPayload(mod); if (err_ty.errorSetIsEmpty(mod)) { return error_union_mcv; } - if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) { return MCValue.none; } - const payload_offset = @as(u32, @intCast(errUnionPayloadOffset(payload_ty, mod))); + const payload_offset = @as(u32, @intCast(errUnionPayloadOffset(payload_ty, pt))); switch (error_union_mcv) { .register => return self.fail("TODO errUnionPayload for registers", .{}), .stack_offset => |off| { @@ -3587,7 +3609,8 @@ fn finishAir(self: *Self, inst: Air.Inst.Index, result: MCValue, operands: [Live } fn genArgDbgInfo(self: Self, inst: Air.Inst.Index, mcv: MCValue) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const arg = self.air.instructions.items(.data)[@intFromEnum(inst)].arg; const ty = arg.ty.toType(); const owner_decl = mod.funcOwnerDeclIndex(self.func_index); @@ -3736,7 +3759,7 @@ fn genLoadASI(self: *Self, value_reg: Register, addr_reg: Register, off_reg: Reg } fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; switch (mcv) { .dead => unreachable, .unreach, .none => return, // Nothing to do. @@ -3935,20 +3958,21 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void // The value is in memory at a hard-coded address. // If the type is a pointer, it means the pointer address is at this memory location. try self.genSetReg(ty, reg, .{ .immediate = addr }); - try self.genLoad(reg, reg, i13, 0, ty.abiSize(mod)); + try self.genLoad(reg, reg, i13, 0, ty.abiSize(pt)); }, .stack_offset => |off| { const real_offset = realStackOffset(off); const simm13 = math.cast(i13, real_offset) orelse return self.fail("TODO larger stack offsets: {}", .{real_offset}); - try self.genLoad(reg, .sp, i13, simm13, ty.abiSize(mod)); + try self.genLoad(reg, .sp, i13, simm13, ty.abiSize(pt)); }, } } fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerError!void { - const mod = self.bin_file.comp.module.?; - const abi_size = ty.abiSize(mod); + const pt = self.pt; + const mod = pt.zcu; + const abi_size = ty.abiSize(pt); switch (mcv) { .dead => unreachable, .unreach, .none => return, // Nothing to do. @@ -3956,7 +3980,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro if (!self.wantSafety()) return; // The already existing value will do just fine. // TODO Upgrade this to a memset call when we have that available. - switch (ty.abiSize(mod)) { + switch (ty.abiSize(pt)) { 1 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaa }), 2 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaaaa }), 4 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaaaaaaaa }), @@ -3986,7 +4010,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro try self.genSetStack(wrapped_ty, stack_offset, .{ .register = rwo.reg }); const overflow_bit_ty = ty.structFieldType(1, mod); - const overflow_bit_offset = @as(u32, @intCast(ty.structFieldOffset(1, mod))); + const overflow_bit_offset = @as(u32, @intCast(ty.structFieldOffset(1, pt))); const cond_reg = try self.register_manager.allocReg(null, gp); // TODO handle floating point CCRs @@ -4032,7 +4056,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro const reg = try self.copyToTmpRegister(ty, mcv); return self.genSetStack(ty, stack_offset, MCValue{ .register = reg }); } else { - const ptr_ty = try mod.singleMutPtrType(ty); + const ptr_ty = try pt.singleMutPtrType(ty); const regs = try self.register_manager.allocRegs(4, .{ null, null, null, null }, gp); const regs_locks = self.register_manager.lockRegsAssumeUnused(4, regs); @@ -4121,12 +4145,13 @@ fn genStoreASI(self: *Self, value_reg: Register, addr_reg: Register, off_reg: Re } fn genTypedValue(self: *Self, val: Value) InnerError!MCValue { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; const mcv: MCValue = switch (try codegen.genTypedValue( self.bin_file, + pt, self.src_loc, val, - mod.funcOwnerDeclIndex(self.func_index), + pt.zcu.funcOwnerDeclIndex(self.func_index), )) { .mcv => |mcv| switch (mcv) { .none => .none, @@ -4157,14 +4182,15 @@ fn getResolvedInstValue(self: *Self, inst: Air.Inst.Index) MCValue { } fn isErr(self: *Self, ty: Type, operand: MCValue) !MCValue { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const error_type = ty.errorUnionSet(mod); const payload_type = ty.errorUnionPayload(mod); - if (!error_type.hasRuntimeBits(mod)) { + if (!error_type.hasRuntimeBits(pt)) { return MCValue{ .immediate = 0 }; // always false - } else if (!payload_type.hasRuntimeBits(mod)) { - if (error_type.abiSize(mod) <= 8) { + } else if (!payload_type.hasRuntimeBits(pt)) { + if (error_type.abiSize(pt) <= 8) { const reg_mcv: MCValue = switch (operand) { .register => operand, else => .{ .register = try self.copyToTmpRegister(error_type, operand) }, @@ -4255,9 +4281,10 @@ fn jump(self: *Self, inst: Mir.Inst.Index) !void { } fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const elem_ty = ptr_ty.childType(mod); - const elem_size = elem_ty.abiSize(mod); + const elem_size = elem_ty.abiSize(pt); switch (ptr) { .none => unreachable, @@ -4326,7 +4353,8 @@ fn minMax( lhs_ty: Type, rhs_ty: Type, ) InnerError!MCValue { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; assert(lhs_ty.eql(rhs_ty, mod)); switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO min/max on floats", .{}), @@ -4446,7 +4474,8 @@ fn realStackOffset(off: u32) u32 { /// Caller must call `CallMCValues.deinit`. fn resolveCallingConventionValues(self: *Self, fn_ty: Type, role: RegisterView) !CallMCValues { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const ip = &mod.intern_pool; const fn_info = mod.typeToFunc(fn_ty).?; const cc = fn_info.cc; @@ -4487,7 +4516,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type, role: RegisterView) }; for (fn_info.param_types.get(ip), result.args) |ty, *result_arg| { - const param_size = @as(u32, @intCast(Type.fromInterned(ty).abiSize(mod))); + const param_size = @as(u32, @intCast(Type.fromInterned(ty).abiSize(pt))); if (param_size <= 8) { if (next_register < argument_registers.len) { result_arg.* = .{ .register = argument_registers[next_register] }; @@ -4516,10 +4545,10 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type, role: RegisterView) if (ret_ty.zigTypeTag(mod) == .NoReturn) { result.return_value = .{ .unreach = {} }; - } else if (!ret_ty.hasRuntimeBits(mod)) { + } else if (!ret_ty.hasRuntimeBits(pt)) { result.return_value = .{ .none = {} }; } else { - const ret_ty_size = @as(u32, @intCast(ret_ty.abiSize(mod))); + const ret_ty_size: u32 = @intCast(ret_ty.abiSize(pt)); // The callee puts the return values in %i0-%i3, which becomes %o0-%o3 inside the caller. if (ret_ty_size <= 8) { result.return_value = switch (role) { @@ -4538,21 +4567,22 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type, role: RegisterView) } fn resolveInst(self: *Self, ref: Air.Inst.Ref) InnerError!MCValue { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; const ty = self.typeOf(ref); // If the type has no codegen bits, no need to store it. - if (!ty.hasRuntimeBitsIgnoreComptime(mod)) return .none; + if (!ty.hasRuntimeBitsIgnoreComptime(pt)) return .none; if (ref.toIndex()) |inst| { return self.getResolvedInstValue(inst); } - return self.genTypedValue((try self.air.value(ref, mod)).?); + return self.genTypedValue((try self.air.value(ref, pt)).?); } fn ret(self: *Self, mcv: MCValue) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const ret_ty = self.fn_type.fnReturnType(mod); try self.setRegOrMem(ret_ty, self.ret_mcv, mcv); @@ -4654,8 +4684,8 @@ pub fn spillInstruction(self: *Self, reg: Register, inst: Air.Inst.Index) !void } fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type) InnerError!void { - const mod = self.bin_file.comp.module.?; - const abi_size = value_ty.abiSize(mod); + const pt = self.pt; + const abi_size = value_ty.abiSize(pt); switch (ptr) { .none => unreachable, @@ -4696,11 +4726,12 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, index: u32) !MCValue { return if (self.liveness.isUnused(inst)) .dead else result: { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const mcv = try self.resolveInst(operand); const ptr_ty = self.typeOf(operand); const struct_ty = ptr_ty.childType(mod); - const struct_field_offset = @as(u32, @intCast(struct_ty.structFieldOffset(index, mod))); + const struct_field_offset = @as(u32, @intCast(struct_ty.structFieldOffset(index, pt))); switch (mcv) { .ptr_stack_offset => |off| { break :result MCValue{ .ptr_stack_offset = off - struct_field_offset }; @@ -4738,7 +4769,8 @@ fn trunc( operand_ty: Type, dest_ty: Type, ) !MCValue { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const info_a = operand_ty.intInfo(mod); const info_b = dest_ty.intInfo(mod); @@ -4848,7 +4880,7 @@ fn truncRegister( } } -/// TODO support scope overrides. Also note this logic is duplicated with `Module.wantSafety`. +/// TODO support scope overrides. Also note this logic is duplicated with `Zcu.wantSafety`. fn wantSafety(self: *Self) bool { return switch (self.bin_file.comp.root_mod.optimize_mode) { .Debug => true, @@ -4859,11 +4891,9 @@ fn wantSafety(self: *Self) bool { } fn typeOf(self: *Self, inst: Air.Inst.Ref) Type { - const mod = self.bin_file.comp.module.?; - return self.air.typeOf(inst, &mod.intern_pool); + return self.air.typeOf(inst, &self.pt.zcu.intern_pool); } fn typeOfIndex(self: *Self, inst: Air.Inst.Index) Type { - const mod = self.bin_file.comp.module.?; - return self.air.typeOfIndex(inst, &mod.intern_pool); + return self.air.typeOfIndex(inst, &self.pt.zcu.intern_pool); } diff --git a/src/arch/sparc64/Emit.zig b/src/arch/sparc64/Emit.zig index b509bb7c7966..165ee1c45e78 100644 --- a/src/arch/sparc64/Emit.zig +++ b/src/arch/sparc64/Emit.zig @@ -6,9 +6,7 @@ const Endian = std.builtin.Endian; const assert = std.debug.assert; const link = @import("../../link.zig"); const Zcu = @import("../../Zcu.zig"); -/// Deprecated. -const Module = Zcu; -const ErrorMsg = Module.ErrorMsg; +const ErrorMsg = Zcu.ErrorMsg; const Liveness = @import("../../Liveness.zig"); const log = std.log.scoped(.sparcv9_emit); const DebugInfoOutput = @import("../../codegen.zig").DebugInfoOutput; @@ -24,7 +22,7 @@ bin_file: *link.File, debug_output: DebugInfoOutput, target: *const std.Target, err_msg: ?*ErrorMsg = null, -src_loc: Module.LazySrcLoc, +src_loc: Zcu.LazySrcLoc, code: *std.ArrayList(u8), prev_di_line: u32, diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index 2f2e35a75b29..a8ac674e07d3 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -684,6 +684,7 @@ simd_immediates: std.ArrayListUnmanaged([16]u8) = .{}, target: std.Target, /// Represents the wasm binary file that is being linked. bin_file: *link.File.Wasm, +pt: Zcu.PerThread, /// List of MIR Instructions mir_instructions: std.MultiArrayList(Mir.Inst) = .{}, /// Contains extra data for MIR @@ -764,8 +765,7 @@ pub fn deinit(func: *CodeGen) void { /// Sets `err_msg` on `CodeGen` and returns `error.CodegenFail` which is caught in link/Wasm.zig fn fail(func: *CodeGen, comptime fmt: []const u8, args: anytype) InnerError { - const mod = func.bin_file.base.comp.module.?; - const src_loc = func.decl.navSrcLoc(mod); + const src_loc = func.decl.navSrcLoc(func.pt.zcu); func.err_msg = try Zcu.ErrorMsg.create(func.gpa, src_loc, fmt, args); return error.CodegenFail; } @@ -788,10 +788,11 @@ fn resolveInst(func: *CodeGen, ref: Air.Inst.Ref) InnerError!WValue { const gop = try func.branches.items[0].values.getOrPut(func.gpa, ref); assert(!gop.found_existing); - const mod = func.bin_file.base.comp.module.?; - const val = (try func.air.value(ref, mod)).?; + const pt = func.pt; + const mod = pt.zcu; + const val = (try func.air.value(ref, pt)).?; const ty = func.typeOf(ref); - if (!ty.hasRuntimeBitsIgnoreComptime(mod) and !ty.isInt(mod) and !ty.isError(mod)) { + if (!ty.hasRuntimeBitsIgnoreComptime(pt) and !ty.isInt(mod) and !ty.isError(mod)) { gop.value_ptr.* = WValue{ .none = {} }; return gop.value_ptr.*; } @@ -802,8 +803,8 @@ fn resolveInst(func: *CodeGen, ref: Air.Inst.Ref) InnerError!WValue { // // In the other cases, we will simply lower the constant to a value that fits // into a single local (such as a pointer, integer, bool, etc). - const result = if (isByRef(ty, mod)) blk: { - const sym_index = try func.bin_file.lowerUnnamedConst(val, func.decl_index); + const result = if (isByRef(ty, pt)) blk: { + const sym_index = try func.bin_file.lowerUnnamedConst(pt, val, func.decl_index); break :blk WValue{ .memory = sym_index }; } else try func.lowerConstant(val, ty); @@ -990,7 +991,8 @@ fn addExtraAssumeCapacity(func: *CodeGen, extra: anytype) error{OutOfMemory}!u32 } /// Using a given `Type`, returns the corresponding type -fn typeToValtype(ty: Type, mod: *Zcu) wasm.Valtype { +fn typeToValtype(ty: Type, pt: Zcu.PerThread) wasm.Valtype { + const mod = pt.zcu; const target = mod.getTarget(); const ip = &mod.intern_pool; return switch (ty.zigTypeTag(mod)) { @@ -1002,26 +1004,26 @@ fn typeToValtype(ty: Type, mod: *Zcu) wasm.Valtype { else => unreachable, }, .Int, .Enum => blk: { - const info = ty.intInfo(mod); + const info = ty.intInfo(pt.zcu); if (info.bits <= 32) break :blk wasm.Valtype.i32; if (info.bits > 32 and info.bits <= 128) break :blk wasm.Valtype.i64; break :blk wasm.Valtype.i32; // represented as pointer to stack }, .Struct => { - if (mod.typeToPackedStruct(ty)) |packed_struct| { - return typeToValtype(Type.fromInterned(packed_struct.backingIntType(ip).*), mod); + if (pt.zcu.typeToPackedStruct(ty)) |packed_struct| { + return typeToValtype(Type.fromInterned(packed_struct.backingIntType(ip).*), pt); } else { return wasm.Valtype.i32; } }, - .Vector => switch (determineSimdStoreStrategy(ty, mod)) { + .Vector => switch (determineSimdStoreStrategy(ty, pt)) { .direct => wasm.Valtype.v128, .unrolled => wasm.Valtype.i32, }, - .Union => switch (ty.containerLayout(mod)) { + .Union => switch (ty.containerLayout(pt.zcu)) { .@"packed" => { - const int_ty = mod.intType(.unsigned, @as(u16, @intCast(ty.bitSize(mod)))) catch @panic("out of memory"); - return typeToValtype(int_ty, mod); + const int_ty = pt.intType(.unsigned, @as(u16, @intCast(ty.bitSize(pt)))) catch @panic("out of memory"); + return typeToValtype(int_ty, pt); }, else => wasm.Valtype.i32, }, @@ -1030,17 +1032,17 @@ fn typeToValtype(ty: Type, mod: *Zcu) wasm.Valtype { } /// Using a given `Type`, returns the byte representation of its wasm value type -fn genValtype(ty: Type, mod: *Zcu) u8 { - return wasm.valtype(typeToValtype(ty, mod)); +fn genValtype(ty: Type, pt: Zcu.PerThread) u8 { + return wasm.valtype(typeToValtype(ty, pt)); } /// Using a given `Type`, returns the corresponding wasm value type /// Differently from `genValtype` this also allows `void` to create a block /// with no return type -fn genBlockType(ty: Type, mod: *Zcu) u8 { +fn genBlockType(ty: Type, pt: Zcu.PerThread) u8 { return switch (ty.ip_index) { .void_type, .noreturn_type => wasm.block_empty, - else => genValtype(ty, mod), + else => genValtype(ty, pt), }; } @@ -1101,8 +1103,8 @@ fn getResolvedInst(func: *CodeGen, ref: Air.Inst.Ref) *WValue { /// Creates one locals for a given `Type`. /// Returns a corresponding `Wvalue` with `local` as active tag fn allocLocal(func: *CodeGen, ty: Type) InnerError!WValue { - const mod = func.bin_file.base.comp.module.?; - const valtype = typeToValtype(ty, mod); + const pt = func.pt; + const valtype = typeToValtype(ty, pt); switch (valtype) { .i32 => if (func.free_locals_i32.popOrNull()) |index| { log.debug("reusing local ({d}) of type {}", .{ index, valtype }); @@ -1133,8 +1135,8 @@ fn allocLocal(func: *CodeGen, ty: Type) InnerError!WValue { /// Ensures a new local will be created. This is useful when it's useful /// to use a zero-initialized local. fn ensureAllocLocal(func: *CodeGen, ty: Type) InnerError!WValue { - const mod = func.bin_file.base.comp.module.?; - try func.locals.append(func.gpa, genValtype(ty, mod)); + const pt = func.pt; + try func.locals.append(func.gpa, genValtype(ty, pt)); const initial_index = func.local_index; func.local_index += 1; return WValue{ .local = .{ .value = initial_index, .references = 1 } }; @@ -1147,23 +1149,24 @@ fn genFunctype( cc: std.builtin.CallingConvention, params: []const InternPool.Index, return_type: Type, - mod: *Zcu, + pt: Zcu.PerThread, ) !wasm.Type { + const mod = pt.zcu; var temp_params = std.ArrayList(wasm.Valtype).init(gpa); defer temp_params.deinit(); var returns = std.ArrayList(wasm.Valtype).init(gpa); defer returns.deinit(); - if (firstParamSRet(cc, return_type, mod)) { + if (firstParamSRet(cc, return_type, pt)) { try temp_params.append(.i32); // memory address is always a 32-bit handle - } else if (return_type.hasRuntimeBitsIgnoreComptime(mod)) { + } else if (return_type.hasRuntimeBitsIgnoreComptime(pt)) { if (cc == .C) { - const res_classes = abi.classifyType(return_type, mod); + const res_classes = abi.classifyType(return_type, pt); assert(res_classes[0] == .direct and res_classes[1] == .none); - const scalar_type = abi.scalarType(return_type, mod); - try returns.append(typeToValtype(scalar_type, mod)); + const scalar_type = abi.scalarType(return_type, pt); + try returns.append(typeToValtype(scalar_type, pt)); } else { - try returns.append(typeToValtype(return_type, mod)); + try returns.append(typeToValtype(return_type, pt)); } } else if (return_type.isError(mod)) { try returns.append(.i32); @@ -1172,25 +1175,25 @@ fn genFunctype( // param types for (params) |param_type_ip| { const param_type = Type.fromInterned(param_type_ip); - if (!param_type.hasRuntimeBitsIgnoreComptime(mod)) continue; + if (!param_type.hasRuntimeBitsIgnoreComptime(pt)) continue; switch (cc) { .C => { - const param_classes = abi.classifyType(param_type, mod); + const param_classes = abi.classifyType(param_type, pt); for (param_classes) |class| { if (class == .none) continue; if (class == .direct) { - const scalar_type = abi.scalarType(param_type, mod); - try temp_params.append(typeToValtype(scalar_type, mod)); + const scalar_type = abi.scalarType(param_type, pt); + try temp_params.append(typeToValtype(scalar_type, pt)); } else { - try temp_params.append(typeToValtype(param_type, mod)); + try temp_params.append(typeToValtype(param_type, pt)); } } }, - else => if (isByRef(param_type, mod)) + else => if (isByRef(param_type, pt)) try temp_params.append(.i32) else - try temp_params.append(typeToValtype(param_type, mod)), + try temp_params.append(typeToValtype(param_type, pt)), } } @@ -1202,6 +1205,7 @@ fn genFunctype( pub fn generate( bin_file: *link.File, + pt: Zcu.PerThread, src_loc: Zcu.LazySrcLoc, func_index: InternPool.Index, air: Air, @@ -1210,15 +1214,15 @@ pub fn generate( debug_output: codegen.DebugInfoOutput, ) codegen.CodeGenError!codegen.Result { _ = src_loc; - const comp = bin_file.comp; - const gpa = comp.gpa; - const zcu = comp.module.?; + const zcu = pt.zcu; + const gpa = zcu.gpa; const func = zcu.funcInfo(func_index); const decl = zcu.declPtr(func.owner_decl); const namespace = zcu.namespacePtr(decl.src_namespace); const target = namespace.fileScope(zcu).mod.resolved_target.result; var code_gen: CodeGen = .{ .gpa = gpa, + .pt = pt, .air = air, .liveness = liveness, .code = code, @@ -1242,10 +1246,11 @@ pub fn generate( } fn genFunc(func: *CodeGen) InnerError!void { - const mod = func.bin_file.base.comp.module.?; + const pt = func.pt; + const mod = pt.zcu; const ip = &mod.intern_pool; const fn_info = mod.typeToFunc(func.decl.typeOf(mod)).?; - var func_type = try genFunctype(func.gpa, fn_info.cc, fn_info.param_types.get(ip), Type.fromInterned(fn_info.return_type), mod); + var func_type = try genFunctype(func.gpa, fn_info.cc, fn_info.param_types.get(ip), Type.fromInterned(fn_info.return_type), pt); defer func_type.deinit(func.gpa); _ = try func.bin_file.storeDeclType(func.decl_index, func_type); @@ -1272,7 +1277,7 @@ fn genFunc(func: *CodeGen) InnerError!void { if (func_type.returns.len != 0 and func.air.instructions.len > 0) { const inst: Air.Inst.Index = @enumFromInt(func.air.instructions.len - 1); const last_inst_ty = func.typeOfIndex(inst); - if (!last_inst_ty.hasRuntimeBitsIgnoreComptime(mod) or last_inst_ty.isNoReturn(mod)) { + if (!last_inst_ty.hasRuntimeBitsIgnoreComptime(pt) or last_inst_ty.isNoReturn(mod)) { try func.addTag(.@"unreachable"); } } @@ -1354,7 +1359,8 @@ const CallWValues = struct { }; fn resolveCallingConventionValues(func: *CodeGen, fn_ty: Type) InnerError!CallWValues { - const mod = func.bin_file.base.comp.module.?; + const pt = func.pt; + const mod = pt.zcu; const ip = &mod.intern_pool; const fn_info = mod.typeToFunc(fn_ty).?; const cc = fn_info.cc; @@ -1369,7 +1375,7 @@ fn resolveCallingConventionValues(func: *CodeGen, fn_ty: Type) InnerError!CallWV // Check if we store the result as a pointer to the stack rather than // by value - if (firstParamSRet(fn_info.cc, Type.fromInterned(fn_info.return_type), mod)) { + if (firstParamSRet(fn_info.cc, Type.fromInterned(fn_info.return_type), pt)) { // the sret arg will be passed as first argument, therefore we // set the `return_value` before allocating locals for regular args. result.return_value = .{ .local = .{ .value = func.local_index, .references = 1 } }; @@ -1379,7 +1385,7 @@ fn resolveCallingConventionValues(func: *CodeGen, fn_ty: Type) InnerError!CallWV switch (cc) { .Unspecified => { for (fn_info.param_types.get(ip)) |ty| { - if (!Type.fromInterned(ty).hasRuntimeBitsIgnoreComptime(mod)) { + if (!Type.fromInterned(ty).hasRuntimeBitsIgnoreComptime(pt)) { continue; } @@ -1389,7 +1395,7 @@ fn resolveCallingConventionValues(func: *CodeGen, fn_ty: Type) InnerError!CallWV }, .C => { for (fn_info.param_types.get(ip)) |ty| { - const ty_classes = abi.classifyType(Type.fromInterned(ty), mod); + const ty_classes = abi.classifyType(Type.fromInterned(ty), pt); for (ty_classes) |class| { if (class == .none) continue; try args.append(.{ .local = .{ .value = func.local_index, .references = 1 } }); @@ -1403,11 +1409,11 @@ fn resolveCallingConventionValues(func: *CodeGen, fn_ty: Type) InnerError!CallWV return result; } -fn firstParamSRet(cc: std.builtin.CallingConvention, return_type: Type, mod: *Zcu) bool { +fn firstParamSRet(cc: std.builtin.CallingConvention, return_type: Type, pt: Zcu.PerThread) bool { switch (cc) { - .Unspecified, .Inline => return isByRef(return_type, mod), + .Unspecified, .Inline => return isByRef(return_type, pt), .C => { - const ty_classes = abi.classifyType(return_type, mod); + const ty_classes = abi.classifyType(return_type, pt); if (ty_classes[0] == .indirect) return true; if (ty_classes[0] == .direct and ty_classes[1] == .direct) return true; return false; @@ -1423,8 +1429,9 @@ fn lowerArg(func: *CodeGen, cc: std.builtin.CallingConvention, ty: Type, value: return func.lowerToStack(value); } - const mod = func.bin_file.base.comp.module.?; - const ty_classes = abi.classifyType(ty, mod); + const pt = func.pt; + const mod = pt.zcu; + const ty_classes = abi.classifyType(ty, pt); assert(ty_classes[0] != .none); switch (ty.zigTypeTag(mod)) { .Struct, .Union => { @@ -1432,7 +1439,7 @@ fn lowerArg(func: *CodeGen, cc: std.builtin.CallingConvention, ty: Type, value: return func.lowerToStack(value); } assert(ty_classes[0] == .direct); - const scalar_type = abi.scalarType(ty, mod); + const scalar_type = abi.scalarType(ty, pt); switch (value) { .memory, .memory_offset, @@ -1447,7 +1454,7 @@ fn lowerArg(func: *CodeGen, cc: std.builtin.CallingConvention, ty: Type, value: return func.lowerToStack(value); } assert(ty_classes[0] == .direct and ty_classes[1] == .direct); - assert(ty.abiSize(mod) == 16); + assert(ty.abiSize(pt) == 16); // in this case we have an integer or float that must be lowered as 2 i64's. try func.emitWValue(value); try func.addMemArg(.i64_load, .{ .offset = value.offset(), .alignment = 8 }); @@ -1514,18 +1521,18 @@ fn restoreStackPointer(func: *CodeGen) !void { /// /// Asserts Type has codegenbits fn allocStack(func: *CodeGen, ty: Type) !WValue { - const mod = func.bin_file.base.comp.module.?; - assert(ty.hasRuntimeBitsIgnoreComptime(mod)); + const pt = func.pt; + assert(ty.hasRuntimeBitsIgnoreComptime(pt)); if (func.initial_stack_value == .none) { try func.initializeStack(); } - const abi_size = std.math.cast(u32, ty.abiSize(mod)) orelse { + const abi_size = std.math.cast(u32, ty.abiSize(pt)) orelse { return func.fail("Type {} with ABI size of {d} exceeds stack frame size", .{ - ty.fmt(mod), ty.abiSize(mod), + ty.fmt(pt), ty.abiSize(pt), }); }; - const abi_align = ty.abiAlignment(mod); + const abi_align = ty.abiAlignment(pt); func.stack_alignment = func.stack_alignment.max(abi_align); @@ -1540,7 +1547,8 @@ fn allocStack(func: *CodeGen, ty: Type) !WValue { /// This is different from allocStack where this will use the pointer's alignment /// if it is set, to ensure the stack alignment will be set correctly. fn allocStackPtr(func: *CodeGen, inst: Air.Inst.Index) !WValue { - const mod = func.bin_file.base.comp.module.?; + const pt = func.pt; + const mod = pt.zcu; const ptr_ty = func.typeOfIndex(inst); const pointee_ty = ptr_ty.childType(mod); @@ -1548,14 +1556,14 @@ fn allocStackPtr(func: *CodeGen, inst: Air.Inst.Index) !WValue { try func.initializeStack(); } - if (!pointee_ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (!pointee_ty.hasRuntimeBitsIgnoreComptime(pt)) { return func.allocStack(Type.usize); // create a value containing just the stack pointer. } - const abi_alignment = ptr_ty.ptrAlignment(mod); - const abi_size = std.math.cast(u32, pointee_ty.abiSize(mod)) orelse { + const abi_alignment = ptr_ty.ptrAlignment(pt); + const abi_size = std.math.cast(u32, pointee_ty.abiSize(pt)) orelse { return func.fail("Type {} with ABI size of {d} exceeds stack frame size", .{ - pointee_ty.fmt(mod), pointee_ty.abiSize(mod), + pointee_ty.fmt(pt), pointee_ty.abiSize(pt), }); }; func.stack_alignment = func.stack_alignment.max(abi_alignment); @@ -1711,7 +1719,8 @@ fn arch(func: *const CodeGen) std.Target.Cpu.Arch { /// For a given `Type`, will return true when the type will be passed /// by reference, rather than by value -fn isByRef(ty: Type, mod: *Zcu) bool { +fn isByRef(ty: Type, pt: Zcu.PerThread) bool { + const mod = pt.zcu; const ip = &mod.intern_pool; const target = mod.getTarget(); switch (ty.zigTypeTag(mod)) { @@ -1734,28 +1743,28 @@ fn isByRef(ty: Type, mod: *Zcu) bool { .Array, .Frame, - => return ty.hasRuntimeBitsIgnoreComptime(mod), + => return ty.hasRuntimeBitsIgnoreComptime(pt), .Union => { if (mod.typeToUnion(ty)) |union_obj| { if (union_obj.getLayout(ip) == .@"packed") { - return ty.abiSize(mod) > 8; + return ty.abiSize(pt) > 8; } } - return ty.hasRuntimeBitsIgnoreComptime(mod); + return ty.hasRuntimeBitsIgnoreComptime(pt); }, .Struct => { if (mod.typeToPackedStruct(ty)) |packed_struct| { - return isByRef(Type.fromInterned(packed_struct.backingIntType(ip).*), mod); + return isByRef(Type.fromInterned(packed_struct.backingIntType(ip).*), pt); } - return ty.hasRuntimeBitsIgnoreComptime(mod); + return ty.hasRuntimeBitsIgnoreComptime(pt); }, - .Vector => return determineSimdStoreStrategy(ty, mod) == .unrolled, + .Vector => return determineSimdStoreStrategy(ty, pt) == .unrolled, .Int => return ty.intInfo(mod).bits > 64, .Enum => return ty.intInfo(mod).bits > 64, .Float => return ty.floatBits(target) > 64, .ErrorUnion => { const pl_ty = ty.errorUnionPayload(mod); - if (!pl_ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (!pl_ty.hasRuntimeBitsIgnoreComptime(pt)) { return false; } return true; @@ -1764,7 +1773,7 @@ fn isByRef(ty: Type, mod: *Zcu) bool { if (ty.isPtrLikeOptional(mod)) return false; const pl_type = ty.optionalChild(mod); if (pl_type.zigTypeTag(mod) == .ErrorSet) return false; - return pl_type.hasRuntimeBitsIgnoreComptime(mod); + return pl_type.hasRuntimeBitsIgnoreComptime(pt); }, .Pointer => { // Slices act like struct and will be passed by reference @@ -1783,11 +1792,11 @@ const SimdStoreStrategy = enum { /// This means when a given type is 128 bits and either the simd128 or relaxed-simd /// features are enabled, the function will return `.direct`. This would allow to store /// it using a instruction, rather than an unrolled version. -fn determineSimdStoreStrategy(ty: Type, mod: *Zcu) SimdStoreStrategy { - std.debug.assert(ty.zigTypeTag(mod) == .Vector); - if (ty.bitSize(mod) != 128) return .unrolled; +fn determineSimdStoreStrategy(ty: Type, pt: Zcu.PerThread) SimdStoreStrategy { + std.debug.assert(ty.zigTypeTag(pt.zcu) == .Vector); + if (ty.bitSize(pt) != 128) return .unrolled; const hasFeature = std.Target.wasm.featureSetHas; - const target = mod.getTarget(); + const target = pt.zcu.getTarget(); const features = target.cpu.features; if (hasFeature(features, .relaxed_simd) or hasFeature(features, .simd128)) { return .direct; @@ -2064,7 +2073,8 @@ fn genInst(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn genBody(func: *CodeGen, body: []const Air.Inst.Index) InnerError!void { - const mod = func.bin_file.base.comp.module.?; + const pt = func.pt; + const mod = pt.zcu; const ip = &mod.intern_pool; for (body) |inst| { @@ -2085,7 +2095,8 @@ fn genBody(func: *CodeGen, body: []const Air.Inst.Index) InnerError!void { } fn airRet(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const mod = func.bin_file.base.comp.module.?; + const pt = func.pt; + const mod = pt.zcu; const un_op = func.air.instructions.items(.data)[@intFromEnum(inst)].un_op; const operand = try func.resolveInst(un_op); const fn_info = mod.typeToFunc(func.decl.typeOf(mod)).?; @@ -2095,27 +2106,27 @@ fn airRet(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { // to the stack instead if (func.return_value != .none) { try func.store(func.return_value, operand, ret_ty, 0); - } else if (fn_info.cc == .C and ret_ty.hasRuntimeBitsIgnoreComptime(mod)) { + } else if (fn_info.cc == .C and ret_ty.hasRuntimeBitsIgnoreComptime(pt)) { switch (ret_ty.zigTypeTag(mod)) { // Aggregate types can be lowered as a singular value .Struct, .Union => { - const scalar_type = abi.scalarType(ret_ty, mod); + const scalar_type = abi.scalarType(ret_ty, pt); try func.emitWValue(operand); const opcode = buildOpcode(.{ .op = .load, - .width = @as(u8, @intCast(scalar_type.abiSize(mod) * 8)), + .width = @as(u8, @intCast(scalar_type.abiSize(pt) * 8)), .signedness = if (scalar_type.isSignedInt(mod)) .signed else .unsigned, - .valtype1 = typeToValtype(scalar_type, mod), + .valtype1 = typeToValtype(scalar_type, pt), }); try func.addMemArg(Mir.Inst.Tag.fromOpcode(opcode), .{ .offset = operand.offset(), - .alignment = @intCast(scalar_type.abiAlignment(mod).toByteUnits().?), + .alignment = @intCast(scalar_type.abiAlignment(pt).toByteUnits().?), }); }, else => try func.emitWValue(operand), } } else { - if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod) and ret_ty.isError(mod)) { + if (!ret_ty.hasRuntimeBitsIgnoreComptime(pt) and ret_ty.isError(mod)) { try func.addImm32(0); } else { try func.emitWValue(operand); @@ -2128,16 +2139,17 @@ fn airRet(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airRetPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const mod = func.bin_file.base.comp.module.?; + const pt = func.pt; + const mod = pt.zcu; const child_type = func.typeOfIndex(inst).childType(mod); const result = result: { - if (!child_type.isFnOrHasRuntimeBitsIgnoreComptime(mod)) { + if (!child_type.isFnOrHasRuntimeBitsIgnoreComptime(pt)) { break :result try func.allocStack(Type.usize); // create pointer to void } const fn_info = mod.typeToFunc(func.decl.typeOf(mod)).?; - if (firstParamSRet(fn_info.cc, Type.fromInterned(fn_info.return_type), mod)) { + if (firstParamSRet(fn_info.cc, Type.fromInterned(fn_info.return_type), pt)) { break :result func.return_value; } @@ -2148,17 +2160,18 @@ fn airRetPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airRetLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const mod = func.bin_file.base.comp.module.?; + const pt = func.pt; + const mod = pt.zcu; const un_op = func.air.instructions.items(.data)[@intFromEnum(inst)].un_op; const operand = try func.resolveInst(un_op); const ret_ty = func.typeOf(un_op).childType(mod); const fn_info = mod.typeToFunc(func.decl.typeOf(mod)).?; - if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (!ret_ty.hasRuntimeBitsIgnoreComptime(pt)) { if (ret_ty.isError(mod)) { try func.addImm32(0); } - } else if (!firstParamSRet(fn_info.cc, Type.fromInterned(fn_info.return_type), mod)) { + } else if (!firstParamSRet(fn_info.cc, Type.fromInterned(fn_info.return_type), pt)) { // leave on the stack _ = try func.load(operand, ret_ty, 0); } @@ -2175,7 +2188,8 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif const args = @as([]const Air.Inst.Ref, @ptrCast(func.air.extra[extra.end..][0..extra.data.args_len])); const ty = func.typeOf(pl_op.operand); - const mod = func.bin_file.base.comp.module.?; + const pt = func.pt; + const mod = pt.zcu; const ip = &mod.intern_pool; const fn_ty = switch (ty.zigTypeTag(mod)) { .Fn => ty, @@ -2184,10 +2198,10 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif }; const ret_ty = fn_ty.fnReturnType(mod); const fn_info = mod.typeToFunc(fn_ty).?; - const first_param_sret = firstParamSRet(fn_info.cc, Type.fromInterned(fn_info.return_type), mod); + const first_param_sret = firstParamSRet(fn_info.cc, Type.fromInterned(fn_info.return_type), pt); const callee: ?InternPool.DeclIndex = blk: { - const func_val = (try func.air.value(pl_op.operand, mod)) orelse break :blk null; + const func_val = (try func.air.value(pl_op.operand, pt)) orelse break :blk null; if (func_val.getFunction(mod)) |function| { _ = try func.bin_file.getOrCreateAtomForDecl(function.owner_decl); @@ -2195,7 +2209,7 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif } else if (func_val.getExternFunc(mod)) |extern_func| { const ext_decl = mod.declPtr(extern_func.decl); const ext_info = mod.typeToFunc(ext_decl.typeOf(mod)).?; - var func_type = try genFunctype(func.gpa, ext_info.cc, ext_info.param_types.get(ip), Type.fromInterned(ext_info.return_type), mod); + var func_type = try genFunctype(func.gpa, ext_info.cc, ext_info.param_types.get(ip), Type.fromInterned(ext_info.return_type), pt); defer func_type.deinit(func.gpa); const atom_index = try func.bin_file.getOrCreateAtomForDecl(extern_func.decl); const atom = func.bin_file.getAtomPtr(atom_index); @@ -2230,7 +2244,7 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif const arg_val = try func.resolveInst(arg); const arg_ty = func.typeOf(arg); - if (!arg_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; + if (!arg_ty.hasRuntimeBitsIgnoreComptime(pt)) continue; try func.lowerArg(mod.typeToFunc(fn_ty).?.cc, arg_ty, arg_val); } @@ -2245,7 +2259,7 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif const operand = try func.resolveInst(pl_op.operand); try func.emitWValue(operand); - var fn_type = try genFunctype(func.gpa, fn_info.cc, fn_info.param_types.get(ip), Type.fromInterned(fn_info.return_type), mod); + var fn_type = try genFunctype(func.gpa, fn_info.cc, fn_info.param_types.get(ip), Type.fromInterned(fn_info.return_type), pt); defer fn_type.deinit(func.gpa); const fn_type_index = try func.bin_file.zigObjectPtr().?.putOrGetFuncType(func.gpa, fn_type); @@ -2253,7 +2267,7 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif } const result_value = result_value: { - if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod) and !ret_ty.isError(mod)) { + if (!ret_ty.hasRuntimeBitsIgnoreComptime(pt) and !ret_ty.isError(mod)) { break :result_value WValue{ .none = {} }; } else if (ret_ty.isNoReturn(mod)) { try func.addTag(.@"unreachable"); @@ -2264,7 +2278,7 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif } else if (mod.typeToFunc(fn_ty).?.cc == .C and ret_ty.zigTypeTag(mod) == .Struct or ret_ty.zigTypeTag(mod) == .Union) { const result_local = try func.allocLocal(ret_ty); try func.addLabel(.local_set, result_local.local.value); - const scalar_type = abi.scalarType(ret_ty, mod); + const scalar_type = abi.scalarType(ret_ty, pt); const result = try func.allocStack(scalar_type); try func.store(result, result_local, scalar_type, 0); break :result_value result; @@ -2287,7 +2301,8 @@ fn airAlloc(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airStore(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void { - const mod = func.bin_file.base.comp.module.?; + const pt = func.pt; + const mod = pt.zcu; if (safety) { // TODO if the value is undef, write 0xaa bytes to dest } else { @@ -2306,13 +2321,13 @@ fn airStore(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void } else { // at this point we have a non-natural alignment, we must // load the value, and then shift+or the rhs into the result location. - const int_elem_ty = try mod.intType(.unsigned, ptr_info.packed_offset.host_size * 8); + const int_elem_ty = try pt.intType(.unsigned, ptr_info.packed_offset.host_size * 8); - if (isByRef(int_elem_ty, mod)) { + if (isByRef(int_elem_ty, pt)) { return func.fail("TODO: airStore for pointers to bitfields with backing type larger than 64bits", .{}); } - var mask = @as(u64, @intCast((@as(u65, 1) << @as(u7, @intCast(ty.bitSize(mod)))) - 1)); + var mask = @as(u64, @intCast((@as(u65, 1) << @as(u7, @intCast(ty.bitSize(pt)))) - 1)); mask <<= @as(u6, @intCast(ptr_info.packed_offset.bit_offset)); mask ^= ~@as(u64, 0); const shift_val = if (ptr_info.packed_offset.host_size <= 4) @@ -2324,9 +2339,9 @@ fn airStore(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void else WValue{ .imm64 = mask }; const wrap_mask_val = if (ptr_info.packed_offset.host_size <= 4) - WValue{ .imm32 = @truncate(~@as(u64, 0) >> @intCast(64 - ty.bitSize(mod))) } + WValue{ .imm32 = @truncate(~@as(u64, 0) >> @intCast(64 - ty.bitSize(pt))) } else - WValue{ .imm64 = ~@as(u64, 0) >> @intCast(64 - ty.bitSize(mod)) }; + WValue{ .imm64 = ~@as(u64, 0) >> @intCast(64 - ty.bitSize(pt)) }; try func.emitWValue(lhs); const loaded = try func.load(lhs, int_elem_ty, 0); @@ -2346,12 +2361,13 @@ fn airStore(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerError!void { assert(!(lhs != .stack and rhs == .stack)); - const mod = func.bin_file.base.comp.module.?; - const abi_size = ty.abiSize(mod); + const pt = func.pt; + const mod = pt.zcu; + const abi_size = ty.abiSize(pt); switch (ty.zigTypeTag(mod)) { .ErrorUnion => { const pl_ty = ty.errorUnionPayload(mod); - if (!pl_ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (!pl_ty.hasRuntimeBitsIgnoreComptime(pt)) { return func.store(lhs, rhs, Type.anyerror, 0); } @@ -2363,7 +2379,7 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE return func.store(lhs, rhs, Type.usize, 0); } const pl_ty = ty.optionalChild(mod); - if (!pl_ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (!pl_ty.hasRuntimeBitsIgnoreComptime(pt)) { return func.store(lhs, rhs, Type.u8, 0); } if (pl_ty.zigTypeTag(mod) == .ErrorSet) { @@ -2373,11 +2389,11 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE const len = @as(u32, @intCast(abi_size)); return func.memcpy(lhs, rhs, .{ .imm32 = len }); }, - .Struct, .Array, .Union => if (isByRef(ty, mod)) { + .Struct, .Array, .Union => if (isByRef(ty, pt)) { const len = @as(u32, @intCast(abi_size)); return func.memcpy(lhs, rhs, .{ .imm32 = len }); }, - .Vector => switch (determineSimdStoreStrategy(ty, mod)) { + .Vector => switch (determineSimdStoreStrategy(ty, pt)) { .unrolled => { const len: u32 = @intCast(abi_size); return func.memcpy(lhs, rhs, .{ .imm32 = len }); @@ -2391,7 +2407,7 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE try func.mir_extra.appendSlice(func.gpa, &[_]u32{ std.wasm.simdOpcode(.v128_store), offset + lhs.offset(), - @intCast(ty.abiAlignment(mod).toByteUnits() orelse 0), + @intCast(ty.abiAlignment(pt).toByteUnits() orelse 0), }); return func.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } }); }, @@ -2421,11 +2437,11 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE try func.store(.{ .stack = {} }, msb, Type.u64, 8 + lhs.offset()); return; } else if (abi_size > 16) { - try func.memcpy(lhs, rhs, .{ .imm32 = @as(u32, @intCast(ty.abiSize(mod))) }); + try func.memcpy(lhs, rhs, .{ .imm32 = @as(u32, @intCast(ty.abiSize(pt))) }); }, else => if (abi_size > 8) { return func.fail("TODO: `store` for type `{}` with abisize `{d}`", .{ - ty.fmt(func.bin_file.base.comp.module.?), + ty.fmt(pt), abi_size, }); }, @@ -2435,7 +2451,7 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE // into lhs, so we calculate that and emit that instead try func.lowerToStack(rhs); - const valtype = typeToValtype(ty, mod); + const valtype = typeToValtype(ty, pt); const opcode = buildOpcode(.{ .valtype1 = valtype, .width = @as(u8, @intCast(abi_size * 8)), @@ -2447,23 +2463,24 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE Mir.Inst.Tag.fromOpcode(opcode), .{ .offset = offset + lhs.offset(), - .alignment = @intCast(ty.abiAlignment(mod).toByteUnits().?), + .alignment = @intCast(ty.abiAlignment(pt).toByteUnits().?), }, ); } fn airLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const mod = func.bin_file.base.comp.module.?; + const pt = func.pt; + const mod = pt.zcu; const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const operand = try func.resolveInst(ty_op.operand); const ty = ty_op.ty.toType(); const ptr_ty = func.typeOf(ty_op.operand); const ptr_info = ptr_ty.ptrInfo(mod); - if (!ty.hasRuntimeBitsIgnoreComptime(mod)) return func.finishAir(inst, .none, &.{ty_op.operand}); + if (!ty.hasRuntimeBitsIgnoreComptime(pt)) return func.finishAir(inst, .none, &.{ty_op.operand}); const result = result: { - if (isByRef(ty, mod)) { + if (isByRef(ty, pt)) { const new_local = try func.allocStack(ty); try func.store(new_local, operand, ty, 0); break :result new_local; @@ -2476,7 +2493,7 @@ fn airLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { // at this point we have a non-natural alignment, we must // shift the value to obtain the correct bit. - const int_elem_ty = try mod.intType(.unsigned, ptr_info.packed_offset.host_size * 8); + const int_elem_ty = try pt.intType(.unsigned, ptr_info.packed_offset.host_size * 8); const shift_val = if (ptr_info.packed_offset.host_size <= 4) WValue{ .imm32 = ptr_info.packed_offset.bit_offset } else if (ptr_info.packed_offset.host_size <= 8) @@ -2496,7 +2513,8 @@ fn airLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { /// Loads an operand from the linear memory section. /// NOTE: Leaves the value on the stack. fn load(func: *CodeGen, operand: WValue, ty: Type, offset: u32) InnerError!WValue { - const mod = func.bin_file.base.comp.module.?; + const pt = func.pt; + const mod = pt.zcu; // load local's value from memory by its stack position try func.emitWValue(operand); @@ -2507,15 +2525,15 @@ fn load(func: *CodeGen, operand: WValue, ty: Type, offset: u32) InnerError!WValu try func.mir_extra.appendSlice(func.gpa, &[_]u32{ std.wasm.simdOpcode(.v128_load), offset + operand.offset(), - @intCast(ty.abiAlignment(mod).toByteUnits().?), + @intCast(ty.abiAlignment(pt).toByteUnits().?), }); try func.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } }); return WValue{ .stack = {} }; } - const abi_size: u8 = @intCast(ty.abiSize(mod)); + const abi_size: u8 = @intCast(ty.abiSize(pt)); const opcode = buildOpcode(.{ - .valtype1 = typeToValtype(ty, mod), + .valtype1 = typeToValtype(ty, pt), .width = abi_size * 8, .op = .load, .signedness = if (ty.isSignedInt(mod)) .signed else .unsigned, @@ -2525,7 +2543,7 @@ fn load(func: *CodeGen, operand: WValue, ty: Type, offset: u32) InnerError!WValu Mir.Inst.Tag.fromOpcode(opcode), .{ .offset = offset + operand.offset(), - .alignment = @intCast(ty.abiAlignment(mod).toByteUnits().?), + .alignment = @intCast(ty.abiAlignment(pt).toByteUnits().?), }, ); @@ -2533,13 +2551,14 @@ fn load(func: *CodeGen, operand: WValue, ty: Type, offset: u32) InnerError!WValu } fn airArg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const mod = func.bin_file.base.comp.module.?; + const pt = func.pt; + const mod = pt.zcu; const arg_index = func.arg_index; const arg = func.args[arg_index]; const cc = mod.typeToFunc(func.decl.typeOf(mod)).?.cc; const arg_ty = func.typeOfIndex(inst); if (cc == .C) { - const arg_classes = abi.classifyType(arg_ty, mod); + const arg_classes = abi.classifyType(arg_ty, pt); for (arg_classes) |class| { if (class != .none) { func.arg_index += 1; @@ -2552,7 +2571,7 @@ fn airArg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { if (arg_ty.zigTypeTag(mod) != .Int and arg_ty.zigTypeTag(mod) != .Float) { return func.fail( "TODO: Implement C-ABI argument for type '{}'", - .{arg_ty.fmt(func.bin_file.base.comp.module.?)}, + .{arg_ty.fmt(pt)}, ); } const result = try func.allocStack(arg_ty); @@ -2579,7 +2598,7 @@ fn airArg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void { - const mod = func.bin_file.base.comp.module.?; + const pt = func.pt; const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const lhs = try func.resolveInst(bin_op.lhs); const rhs = try func.resolveInst(bin_op.rhs); @@ -2593,10 +2612,10 @@ fn airBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void { // For big integers we can ignore this as we will call into compiler-rt which handles this. const result = switch (op) { .shr, .shl => res: { - const lhs_wasm_bits = toWasmBits(@as(u16, @intCast(lhs_ty.bitSize(mod)))) orelse { + const lhs_wasm_bits = toWasmBits(@intCast(lhs_ty.bitSize(pt))) orelse { return func.fail("TODO: implement '{s}' for types larger than 128 bits", .{@tagName(op)}); }; - const rhs_wasm_bits = toWasmBits(@as(u16, @intCast(rhs_ty.bitSize(mod)))).?; + const rhs_wasm_bits = toWasmBits(@intCast(rhs_ty.bitSize(pt))).?; const new_rhs = if (lhs_wasm_bits != rhs_wasm_bits and lhs_wasm_bits != 128) blk: { const tmp = try func.intcast(rhs, rhs_ty, lhs_ty); break :blk try tmp.toLocal(func, lhs_ty); @@ -2616,7 +2635,8 @@ fn airBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void { /// Performs a binary operation on the given `WValue`'s /// NOTE: THis leaves the value on top of the stack. fn binOp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError!WValue { - const mod = func.bin_file.base.comp.module.?; + const pt = func.pt; + const mod = pt.zcu; assert(!(lhs != .stack and rhs == .stack)); if (ty.isAnyFloat()) { @@ -2624,20 +2644,20 @@ fn binOp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError! return func.floatOp(float_op, ty, &.{ lhs, rhs }); } - if (isByRef(ty, mod)) { + if (isByRef(ty, pt)) { if (ty.zigTypeTag(mod) == .Int) { return func.binOpBigInt(lhs, rhs, ty, op); } else { return func.fail( "TODO: Implement binary operation for type: {}", - .{ty.fmt(func.bin_file.base.comp.module.?)}, + .{ty.fmt(pt)}, ); } } const opcode: wasm.Opcode = buildOpcode(.{ .op = op, - .valtype1 = typeToValtype(ty, mod), + .valtype1 = typeToValtype(ty, pt), .signedness = if (ty.isSignedInt(mod)) .signed else .unsigned, }); try func.emitWValue(lhs); @@ -2649,7 +2669,8 @@ fn binOp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError! } fn binOpBigInt(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError!WValue { - const mod = func.bin_file.base.comp.module.?; + const pt = func.pt; + const mod = pt.zcu; const int_info = ty.intInfo(mod); if (int_info.bits > 128) { return func.fail("TODO: Implement binary operation for big integers larger than 128 bits", .{}); @@ -2785,7 +2806,8 @@ const FloatOp = enum { }; fn airAbs(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const mod = func.bin_file.base.comp.module.?; + const pt = func.pt; + const mod = pt.zcu; const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const operand = try func.resolveInst(ty_op.operand); const ty = func.typeOf(ty_op.operand); @@ -2793,7 +2815,7 @@ fn airAbs(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { switch (scalar_ty.zigTypeTag(mod)) { .Int => if (ty.zigTypeTag(mod) == .Vector) { - return func.fail("TODO implement airAbs for {}", .{ty.fmt(mod)}); + return func.fail("TODO implement airAbs for {}", .{ty.fmt(pt)}); } else { const int_bits = ty.intInfo(mod).bits; const wasm_bits = toWasmBits(int_bits) orelse { @@ -2877,7 +2899,8 @@ fn airUnaryFloatOp(func: *CodeGen, inst: Air.Inst.Index, op: FloatOp) InnerError } fn floatOp(func: *CodeGen, float_op: FloatOp, ty: Type, args: []const WValue) InnerError!WValue { - const mod = func.bin_file.base.comp.module.?; + const pt = func.pt; + const mod = pt.zcu; if (ty.zigTypeTag(mod) == .Vector) { return func.fail("TODO: Implement floatOps for vectors", .{}); } @@ -2893,7 +2916,7 @@ fn floatOp(func: *CodeGen, float_op: FloatOp, ty: Type, args: []const WValue) In for (args) |operand| { try func.emitWValue(operand); } - const opcode = buildOpcode(.{ .op = op, .valtype1 = typeToValtype(ty, mod) }); + const opcode = buildOpcode(.{ .op = op, .valtype1 = typeToValtype(ty, pt) }); try func.addTag(Mir.Inst.Tag.fromOpcode(opcode)); return .stack; } @@ -2983,7 +3006,8 @@ fn floatNeg(func: *CodeGen, ty: Type, arg: WValue) InnerError!WValue { } fn airWrapBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void { - const mod = func.bin_file.base.comp.module.?; + const pt = func.pt; + const mod = pt.zcu; const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const lhs = try func.resolveInst(bin_op.lhs); @@ -3002,10 +3026,10 @@ fn airWrapBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void { // For big integers we can ignore this as we will call into compiler-rt which handles this. const result = switch (op) { .shr, .shl => res: { - const lhs_wasm_bits = toWasmBits(@as(u16, @intCast(lhs_ty.bitSize(mod)))) orelse { + const lhs_wasm_bits = toWasmBits(@intCast(lhs_ty.bitSize(pt))) orelse { return func.fail("TODO: implement '{s}' for types larger than 128 bits", .{@tagName(op)}); }; - const rhs_wasm_bits = toWasmBits(@as(u16, @intCast(rhs_ty.bitSize(mod)))).?; + const rhs_wasm_bits = toWasmBits(@intCast(rhs_ty.bitSize(pt))).?; const new_rhs = if (lhs_wasm_bits != rhs_wasm_bits and lhs_wasm_bits != 128) blk: { const tmp = try func.intcast(rhs, rhs_ty, lhs_ty); break :blk try tmp.toLocal(func, lhs_ty); @@ -3034,9 +3058,10 @@ fn wrapBinOp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerEr /// Asserts `Type` is <= 128 bits. /// NOTE: When the Type is <= 64 bits, leaves the value on top of the stack, if wrapping was needed. fn wrapOperand(func: *CodeGen, operand: WValue, ty: Type) InnerError!WValue { - const mod = func.bin_file.base.comp.module.?; - assert(ty.abiSize(mod) <= 16); - const int_bits = @as(u16, @intCast(ty.bitSize(mod))); // TODO use ty.intInfo(mod).bits + const pt = func.pt; + const mod = pt.zcu; + assert(ty.abiSize(pt) <= 16); + const int_bits: u16 = @intCast(ty.bitSize(pt)); // TODO use ty.intInfo(mod).bits const wasm_bits = toWasmBits(int_bits) orelse { return func.fail("TODO: Implement wrapOperand for bitsize '{d}'", .{int_bits}); }; @@ -3098,13 +3123,14 @@ fn wrapOperand(func: *CodeGen, operand: WValue, ty: Type) InnerError!WValue { } fn lowerPtr(func: *CodeGen, ptr_val: InternPool.Index, prev_offset: u64) InnerError!WValue { - const zcu = func.bin_file.base.comp.module.?; + const pt = func.pt; + const zcu = pt.zcu; const ptr = zcu.intern_pool.indexToKey(ptr_val).ptr; const offset: u64 = prev_offset + ptr.byte_offset; return switch (ptr.base_addr) { .decl => |decl| return func.lowerDeclRefValue(decl, @intCast(offset)), .anon_decl => |ad| return func.lowerAnonDeclRef(ad, @intCast(offset)), - .int => return func.lowerConstant(try zcu.intValue(Type.usize, offset), Type.usize), + .int => return func.lowerConstant(try pt.intValue(Type.usize, offset), Type.usize), .eu_payload => return func.fail("Wasm TODO: lower error union payload pointer", .{}), .opt_payload => |opt_ptr| return func.lowerPtr(opt_ptr, offset), .field => |field| { @@ -3120,13 +3146,13 @@ fn lowerPtr(func: *CodeGen, ptr_val: InternPool.Index, prev_offset: u64) InnerEr }; }, .Struct => switch (base_ty.containerLayout(zcu)) { - .auto => base_ty.structFieldOffset(@intCast(field.index), zcu), + .auto => base_ty.structFieldOffset(@intCast(field.index), pt), .@"extern", .@"packed" => unreachable, }, .Union => switch (base_ty.containerLayout(zcu)) { .auto => off: { // Keep in sync with the `un` case of `generateSymbol`. - const layout = base_ty.unionGetLayout(zcu); + const layout = base_ty.unionGetLayout(pt); if (layout.payload_size == 0) break :off 0; if (layout.tag_size == 0) break :off 0; if (layout.tag_align.compare(.gte, layout.payload_align)) { @@ -3152,17 +3178,18 @@ fn lowerAnonDeclRef( anon_decl: InternPool.Key.Ptr.BaseAddr.AnonDecl, offset: u32, ) InnerError!WValue { - const mod = func.bin_file.base.comp.module.?; + const pt = func.pt; + const mod = pt.zcu; const decl_val = anon_decl.val; const ty = Type.fromInterned(mod.intern_pool.typeOf(decl_val)); const is_fn_body = ty.zigTypeTag(mod) == .Fn; - if (!is_fn_body and !ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (!is_fn_body and !ty.hasRuntimeBitsIgnoreComptime(pt)) { return WValue{ .imm32 = 0xaaaaaaaa }; } const decl_align = mod.intern_pool.indexToKey(anon_decl.orig_ty).ptr_type.flags.alignment; - const res = try func.bin_file.lowerAnonDecl(decl_val, decl_align, func.decl.navSrcLoc(mod)); + const res = try func.bin_file.lowerAnonDecl(pt, decl_val, decl_align, func.decl.navSrcLoc(mod)); switch (res) { .ok => {}, .fail => |em| { @@ -3180,7 +3207,8 @@ fn lowerAnonDeclRef( } fn lowerDeclRefValue(func: *CodeGen, decl_index: InternPool.DeclIndex, offset: u32) InnerError!WValue { - const mod = func.bin_file.base.comp.module.?; + const pt = func.pt; + const mod = pt.zcu; const decl = mod.declPtr(decl_index); // check if decl is an alias to a function, in which case we @@ -3195,7 +3223,7 @@ fn lowerDeclRefValue(func: *CodeGen, decl_index: InternPool.DeclIndex, offset: u } } const decl_ty = decl.typeOf(mod); - if (decl_ty.zigTypeTag(mod) != .Fn and !decl_ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (decl_ty.zigTypeTag(mod) != .Fn and !decl_ty.hasRuntimeBitsIgnoreComptime(pt)) { return WValue{ .imm32 = 0xaaaaaaaa }; } @@ -3212,8 +3240,9 @@ fn lowerDeclRefValue(func: *CodeGen, decl_index: InternPool.DeclIndex, offset: u /// Asserts that `isByRef` returns `false` for `ty`. fn lowerConstant(func: *CodeGen, val: Value, ty: Type) InnerError!WValue { - const mod = func.bin_file.base.comp.module.?; - assert(!isByRef(ty, mod)); + const pt = func.pt; + const mod = pt.zcu; + assert(!isByRef(ty, pt)); const ip = &mod.intern_pool; if (val.isUndefDeep(mod)) return func.emitUndefined(ty); @@ -3261,13 +3290,13 @@ fn lowerConstant(func: *CodeGen, val: Value, ty: Type) InnerError!WValue { const int_info = ty.intInfo(mod); switch (int_info.signedness) { .signed => switch (int_info.bits) { - 0...32 => return WValue{ .imm32 = @bitCast(@as(i32, @intCast(val.toSignedInt(mod)))) }, - 33...64 => return WValue{ .imm64 = @bitCast(val.toSignedInt(mod)) }, + 0...32 => return WValue{ .imm32 = @bitCast(@as(i32, @intCast(val.toSignedInt(pt)))) }, + 33...64 => return WValue{ .imm64 = @bitCast(val.toSignedInt(pt)) }, else => unreachable, }, .unsigned => switch (int_info.bits) { - 0...32 => return WValue{ .imm32 = @intCast(val.toUnsignedInt(mod)) }, - 33...64 => return WValue{ .imm64 = val.toUnsignedInt(mod) }, + 0...32 => return WValue{ .imm32 = @intCast(val.toUnsignedInt(pt)) }, + 33...64 => return WValue{ .imm64 = val.toUnsignedInt(pt) }, else => unreachable, }, } @@ -3277,22 +3306,22 @@ fn lowerConstant(func: *CodeGen, val: Value, ty: Type) InnerError!WValue { return WValue{ .imm32 = int }; }, .error_union => |error_union| { - const err_int_ty = try mod.errorIntType(); + const err_int_ty = try pt.errorIntType(); const err_ty, const err_val = switch (error_union.val) { .err_name => |err_name| .{ ty.errorUnionSet(mod), - Value.fromInterned((try mod.intern(.{ .err = .{ + Value.fromInterned(try pt.intern(.{ .err = .{ .ty = ty.errorUnionSet(mod).toIntern(), .name = err_name, - } }))), + } })), }, .payload => .{ err_int_ty, - try mod.intValue(err_int_ty, 0), + try pt.intValue(err_int_ty, 0), }, }; const payload_type = ty.errorUnionPayload(mod); - if (!payload_type.hasRuntimeBitsIgnoreComptime(mod)) { + if (!payload_type.hasRuntimeBitsIgnoreComptime(pt)) { // We use the error type directly as the type. return func.lowerConstant(err_val, err_ty); } @@ -3318,7 +3347,7 @@ fn lowerConstant(func: *CodeGen, val: Value, ty: Type) InnerError!WValue { .field => |base_index| ptr = ip.indexToKey(base_index.base).ptr, .arr_elem, .comptime_field, .comptime_alloc => unreachable, }; - return .{ .memory = try func.bin_file.lowerUnnamedConst(val, owner_decl) }; + return .{ .memory = try func.bin_file.lowerUnnamedConst(pt, val, owner_decl) }; }, .ptr => return func.lowerPtr(val.toIntern(), 0), .opt => if (ty.optionalReprIsPayload(mod)) { @@ -3332,11 +3361,11 @@ fn lowerConstant(func: *CodeGen, val: Value, ty: Type) InnerError!WValue { return WValue{ .imm32 = @intFromBool(!val.isNull(mod)) }; }, .aggregate => switch (ip.indexToKey(ty.ip_index)) { - .array_type => return func.fail("Wasm TODO: LowerConstant for {}", .{ty.fmt(mod)}), + .array_type => return func.fail("Wasm TODO: LowerConstant for {}", .{ty.fmt(pt)}), .vector_type => { - assert(determineSimdStoreStrategy(ty, mod) == .direct); + assert(determineSimdStoreStrategy(ty, pt) == .direct); var buf: [16]u8 = undefined; - val.writeToMemory(ty, mod, &buf) catch unreachable; + val.writeToMemory(ty, pt, &buf) catch unreachable; return func.storeSimdImmd(buf); }, .struct_type => { @@ -3345,9 +3374,9 @@ fn lowerConstant(func: *CodeGen, val: Value, ty: Type) InnerError!WValue { // are by-ref types. assert(struct_type.layout == .@"packed"); var buf: [8]u8 = .{0} ** 8; // zero the buffer so we do not read 0xaa as integer - val.writeToPackedMemory(ty, mod, &buf, 0) catch unreachable; + val.writeToPackedMemory(ty, pt, &buf, 0) catch unreachable; const backing_int_ty = Type.fromInterned(struct_type.backingIntType(ip).*); - const int_val = try mod.intValue( + const int_val = try pt.intValue( backing_int_ty, mem.readInt(u64, &buf, .little), ); @@ -3358,7 +3387,7 @@ fn lowerConstant(func: *CodeGen, val: Value, ty: Type) InnerError!WValue { .un => |un| { // in this case we have a packed union which will not be passed by reference. const constant_ty = if (un.tag == .none) - try ty.unionBackingType(mod) + try ty.unionBackingType(pt) else field_ty: { const union_obj = mod.typeToUnion(ty).?; const field_index = mod.unionTagFieldIndex(union_obj, Value.fromInterned(un.tag)).?; @@ -3379,7 +3408,8 @@ fn storeSimdImmd(func: *CodeGen, value: [16]u8) !WValue { } fn emitUndefined(func: *CodeGen, ty: Type) InnerError!WValue { - const mod = func.bin_file.base.comp.module.?; + const pt = func.pt; + const mod = pt.zcu; const ip = &mod.intern_pool; switch (ty.zigTypeTag(mod)) { .Bool, .ErrorSet => return WValue{ .imm32 = 0xaaaaaaaa }, @@ -3421,15 +3451,16 @@ fn emitUndefined(func: *CodeGen, ty: Type) InnerError!WValue { /// It's illegal to provide a value with a type that cannot be represented /// as an integer value. fn valueAsI32(func: *const CodeGen, val: Value, ty: Type) i32 { - const mod = func.bin_file.base.comp.module.?; + const pt = func.pt; + const mod = pt.zcu; switch (val.ip_index) { .none => {}, .bool_true => return 1, .bool_false => return 0, else => return switch (mod.intern_pool.indexToKey(val.ip_index)) { - .enum_tag => |enum_tag| intIndexAsI32(&mod.intern_pool, enum_tag.int, mod), - .int => |int| intStorageAsI32(int.storage, mod), + .enum_tag => |enum_tag| intIndexAsI32(&mod.intern_pool, enum_tag.int, pt), + .int => |int| intStorageAsI32(int.storage, pt), .ptr => |ptr| { assert(ptr.base_addr == .int); return @intCast(ptr.byte_offset); @@ -3445,17 +3476,17 @@ fn valueAsI32(func: *const CodeGen, val: Value, ty: Type) i32 { }; } -fn intIndexAsI32(ip: *const InternPool, int: InternPool.Index, mod: *Zcu) i32 { - return intStorageAsI32(ip.indexToKey(int).int.storage, mod); +fn intIndexAsI32(ip: *const InternPool, int: InternPool.Index, pt: Zcu.PerThread) i32 { + return intStorageAsI32(ip.indexToKey(int).int.storage, pt); } -fn intStorageAsI32(storage: InternPool.Key.Int.Storage, mod: *Zcu) i32 { +fn intStorageAsI32(storage: InternPool.Key.Int.Storage, pt: Zcu.PerThread) i32 { return switch (storage) { .i64 => |x| @as(i32, @intCast(x)), .u64 => |x| @as(i32, @bitCast(@as(u32, @intCast(x)))), .big_int => unreachable, - .lazy_align => |ty| @as(i32, @bitCast(@as(u32, @intCast(Type.fromInterned(ty).abiAlignment(mod).toByteUnits() orelse 0)))), - .lazy_size => |ty| @as(i32, @bitCast(@as(u32, @intCast(Type.fromInterned(ty).abiSize(mod))))), + .lazy_align => |ty| @as(i32, @bitCast(@as(u32, @intCast(Type.fromInterned(ty).abiAlignment(pt).toByteUnits() orelse 0)))), + .lazy_size => |ty| @as(i32, @bitCast(@as(u32, @intCast(Type.fromInterned(ty).abiSize(pt))))), }; } @@ -3466,12 +3497,12 @@ fn airBlock(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn lowerBlock(func: *CodeGen, inst: Air.Inst.Index, block_ty: Type, body: []const Air.Inst.Index) InnerError!void { - const mod = func.bin_file.base.comp.module.?; - const wasm_block_ty = genBlockType(block_ty, mod); + const pt = func.pt; + const wasm_block_ty = genBlockType(block_ty, pt); // if wasm_block_ty is non-empty, we create a register to store the temporary value const block_result: WValue = if (wasm_block_ty != wasm.block_empty) blk: { - const ty: Type = if (isByRef(block_ty, mod)) Type.u32 else block_ty; + const ty: Type = if (isByRef(block_ty, pt)) Type.u32 else block_ty; break :blk try func.ensureAllocLocal(ty); // make sure it's a clean local as it may never get overwritten } else WValue.none; @@ -3583,10 +3614,11 @@ fn airCmp(func: *CodeGen, inst: Air.Inst.Index, op: std.math.CompareOperator) In /// NOTE: This leaves the result on top of the stack, rather than a new local. fn cmp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: std.math.CompareOperator) InnerError!WValue { assert(!(lhs != .stack and rhs == .stack)); - const mod = func.bin_file.base.comp.module.?; + const pt = func.pt; + const mod = pt.zcu; if (ty.zigTypeTag(mod) == .Optional and !ty.optionalReprIsPayload(mod)) { const payload_ty = ty.optionalChild(mod); - if (payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (payload_ty.hasRuntimeBitsIgnoreComptime(pt)) { // When we hit this case, we must check the value of optionals // that are not pointers. This means first checking against non-null for // both lhs and rhs, as well as checking the payload are matching of lhs and rhs @@ -3594,7 +3626,7 @@ fn cmp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: std.math.CompareO } } else if (ty.isAnyFloat()) { return func.cmpFloat(ty, lhs, rhs, op); - } else if (isByRef(ty, mod)) { + } else if (isByRef(ty, pt)) { return func.cmpBigInt(lhs, rhs, ty, op); } @@ -3612,7 +3644,7 @@ fn cmp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: std.math.CompareO try func.lowerToStack(rhs); const opcode: wasm.Opcode = buildOpcode(.{ - .valtype1 = typeToValtype(ty, mod), + .valtype1 = typeToValtype(ty, pt), .op = switch (op) { .lt => .lt, .lte => .le, @@ -3683,8 +3715,8 @@ fn airCmpLtErrorsLen(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const errors_len = WValue{ .memory = @intFromEnum(sym_index) }; try func.emitWValue(operand); - const mod = func.bin_file.base.comp.module.?; - const err_int_ty = try mod.errorIntType(); + const pt = func.pt; + const err_int_ty = try pt.errorIntType(); const errors_len_val = try func.load(errors_len, err_int_ty, 0); const result = try func.cmp(.stack, errors_len_val, err_int_ty, .lt); @@ -3692,12 +3724,12 @@ fn airCmpLtErrorsLen(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const mod = func.bin_file.base.comp.module.?; + const pt = func.pt; const br = func.air.instructions.items(.data)[@intFromEnum(inst)].br; const block = func.blocks.get(br.block_inst).?; // if operand has codegen bits we should break with a value - if (func.typeOf(br.operand).hasRuntimeBitsIgnoreComptime(mod)) { + if (func.typeOf(br.operand).hasRuntimeBitsIgnoreComptime(pt)) { const operand = try func.resolveInst(br.operand); try func.lowerToStack(operand); @@ -3719,7 +3751,8 @@ fn airNot(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const operand = try func.resolveInst(ty_op.operand); const operand_ty = func.typeOf(ty_op.operand); - const mod = func.bin_file.base.comp.module.?; + const pt = func.pt; + const mod = pt.zcu; const result = result: { if (operand_ty.zigTypeTag(mod) == .Bool) { @@ -3731,7 +3764,7 @@ fn airNot(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } else { const int_info = operand_ty.intInfo(mod); const wasm_bits = toWasmBits(int_info.bits) orelse { - return func.fail("TODO: Implement binary NOT for {}", .{operand_ty.fmt(mod)}); + return func.fail("TODO: Implement binary NOT for {}", .{operand_ty.fmt(pt)}); }; switch (wasm_bits) { @@ -3798,13 +3831,14 @@ fn airUnreachable(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airBitcast(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const mod = func.bin_file.base.comp.module.?; + const pt = func.pt; + const mod = pt.zcu; const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const operand = try func.resolveInst(ty_op.operand); const wanted_ty = func.typeOfIndex(inst); const given_ty = func.typeOf(ty_op.operand); - const bit_size = given_ty.bitSize(mod); + const bit_size = given_ty.bitSize(pt); const needs_wrapping = (given_ty.isSignedInt(mod) != wanted_ty.isSignedInt(mod)) and bit_size != 32 and bit_size != 64 and bit_size != 128; @@ -3814,7 +3848,7 @@ fn airBitcast(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { break :result try bitcast_result.toLocal(func, wanted_ty); } - if (isByRef(given_ty, mod) and !isByRef(wanted_ty, mod)) { + if (isByRef(given_ty, pt) and !isByRef(wanted_ty, pt)) { const loaded_memory = try func.load(operand, wanted_ty, 0); if (needs_wrapping) { break :result try (try func.wrapOperand(loaded_memory, wanted_ty)).toLocal(func, wanted_ty); @@ -3822,7 +3856,7 @@ fn airBitcast(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { break :result try loaded_memory.toLocal(func, wanted_ty); } } - if (!isByRef(given_ty, mod) and isByRef(wanted_ty, mod)) { + if (!isByRef(given_ty, pt) and isByRef(wanted_ty, pt)) { const stack_memory = try func.allocStack(wanted_ty); try func.store(stack_memory, operand, given_ty, 0); if (needs_wrapping) { @@ -3842,17 +3876,18 @@ fn airBitcast(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn bitcast(func: *CodeGen, wanted_ty: Type, given_ty: Type, operand: WValue) InnerError!WValue { - const mod = func.bin_file.base.comp.module.?; + const pt = func.pt; + const mod = pt.zcu; // if we bitcast a float to or from an integer we must use the 'reinterpret' instruction if (!(wanted_ty.isAnyFloat() or given_ty.isAnyFloat())) return operand; if (wanted_ty.ip_index == .f16_type or given_ty.ip_index == .f16_type) return operand; - if (wanted_ty.bitSize(mod) > 64) return operand; + if (wanted_ty.bitSize(pt) > 64) return operand; assert((wanted_ty.isInt(mod) and given_ty.isAnyFloat()) or (wanted_ty.isAnyFloat() and given_ty.isInt(mod))); const opcode = buildOpcode(.{ .op = .reinterpret, - .valtype1 = typeToValtype(wanted_ty, mod), - .valtype2 = typeToValtype(given_ty, mod), + .valtype1 = typeToValtype(wanted_ty, pt), + .valtype2 = typeToValtype(given_ty, pt), }); try func.emitWValue(operand); try func.addTag(Mir.Inst.Tag.fromOpcode(opcode)); @@ -3860,7 +3895,8 @@ fn bitcast(func: *CodeGen, wanted_ty: Type, given_ty: Type, operand: WValue) Inn } fn airStructFieldPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const mod = func.bin_file.base.comp.module.?; + const pt = func.pt; + const mod = pt.zcu; const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = func.air.extraData(Air.StructField, ty_pl.payload); @@ -3872,7 +3908,8 @@ fn airStructFieldPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airStructFieldPtrIndex(func: *CodeGen, inst: Air.Inst.Index, index: u32) InnerError!void { - const mod = func.bin_file.base.comp.module.?; + const pt = func.pt; + const mod = pt.zcu; const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const struct_ptr = try func.resolveInst(ty_op.operand); const struct_ptr_ty = func.typeOf(ty_op.operand); @@ -3891,7 +3928,8 @@ fn structFieldPtr( struct_ty: Type, index: u32, ) InnerError!WValue { - const mod = func.bin_file.base.comp.module.?; + const pt = func.pt; + const mod = pt.zcu; const result_ty = func.typeOfIndex(inst); const struct_ptr_ty_info = struct_ptr_ty.ptrInfo(mod); @@ -3902,12 +3940,12 @@ fn structFieldPtr( break :offset @as(u32, 0); } const struct_type = mod.typeToStruct(struct_ty).?; - break :offset @divExact(mod.structPackedFieldBitOffset(struct_type, index) + struct_ptr_ty_info.packed_offset.bit_offset, 8); + break :offset @divExact(pt.structPackedFieldBitOffset(struct_type, index) + struct_ptr_ty_info.packed_offset.bit_offset, 8); }, .Union => 0, else => unreachable, }, - else => struct_ty.structFieldOffset(index, mod), + else => struct_ty.structFieldOffset(index, pt), }; // save a load and store when we can simply reuse the operand if (offset == 0) { @@ -3922,7 +3960,8 @@ fn structFieldPtr( } fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const mod = func.bin_file.base.comp.module.?; + const pt = func.pt; + const mod = pt.zcu; const ip = &mod.intern_pool; const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const struct_field = func.air.extraData(Air.StructField, ty_pl.payload).data; @@ -3931,13 +3970,13 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const operand = try func.resolveInst(struct_field.struct_operand); const field_index = struct_field.field_index; const field_ty = struct_ty.structFieldType(field_index, mod); - if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) return func.finishAir(inst, .none, &.{struct_field.struct_operand}); + if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) return func.finishAir(inst, .none, &.{struct_field.struct_operand}); const result = switch (struct_ty.containerLayout(mod)) { .@"packed" => switch (struct_ty.zigTypeTag(mod)) { .Struct => result: { const packed_struct = mod.typeToPackedStruct(struct_ty).?; - const offset = mod.structPackedFieldBitOffset(packed_struct, field_index); + const offset = pt.structPackedFieldBitOffset(packed_struct, field_index); const backing_ty = Type.fromInterned(packed_struct.backingIntType(ip).*); const wasm_bits = toWasmBits(backing_ty.intInfo(mod).bits) orelse { return func.fail("TODO: airStructFieldVal for packed structs larger than 128 bits", .{}); @@ -3956,7 +3995,7 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { try func.binOp(operand, const_wvalue, backing_ty, .shr); if (field_ty.zigTypeTag(mod) == .Float) { - const int_type = try mod.intType(.unsigned, @as(u16, @intCast(field_ty.bitSize(mod)))); + const int_type = try pt.intType(.unsigned, @as(u16, @intCast(field_ty.bitSize(pt)))); const truncated = try func.trunc(shifted_value, int_type, backing_ty); const bitcasted = try func.bitcast(field_ty, int_type, truncated); break :result try bitcasted.toLocal(func, field_ty); @@ -3965,7 +4004,7 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { // we can simply reuse the operand. break :result func.reuseOperand(struct_field.struct_operand, operand); } else if (field_ty.isPtrAtRuntime(mod)) { - const int_type = try mod.intType(.unsigned, @as(u16, @intCast(field_ty.bitSize(mod)))); + const int_type = try pt.intType(.unsigned, @as(u16, @intCast(field_ty.bitSize(pt)))); const truncated = try func.trunc(shifted_value, int_type, backing_ty); break :result try truncated.toLocal(func, field_ty); } @@ -3973,8 +4012,8 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { break :result try truncated.toLocal(func, field_ty); }, .Union => result: { - if (isByRef(struct_ty, mod)) { - if (!isByRef(field_ty, mod)) { + if (isByRef(struct_ty, pt)) { + if (!isByRef(field_ty, pt)) { const val = try func.load(operand, field_ty, 0); break :result try val.toLocal(func, field_ty); } else { @@ -3984,14 +4023,14 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } } - const union_int_type = try mod.intType(.unsigned, @as(u16, @intCast(struct_ty.bitSize(mod)))); + const union_int_type = try pt.intType(.unsigned, @as(u16, @intCast(struct_ty.bitSize(pt)))); if (field_ty.zigTypeTag(mod) == .Float) { - const int_type = try mod.intType(.unsigned, @as(u16, @intCast(field_ty.bitSize(mod)))); + const int_type = try pt.intType(.unsigned, @as(u16, @intCast(field_ty.bitSize(pt)))); const truncated = try func.trunc(operand, int_type, union_int_type); const bitcasted = try func.bitcast(field_ty, int_type, truncated); break :result try bitcasted.toLocal(func, field_ty); } else if (field_ty.isPtrAtRuntime(mod)) { - const int_type = try mod.intType(.unsigned, @as(u16, @intCast(field_ty.bitSize(mod)))); + const int_type = try pt.intType(.unsigned, @as(u16, @intCast(field_ty.bitSize(pt)))); const truncated = try func.trunc(operand, int_type, union_int_type); break :result try truncated.toLocal(func, field_ty); } @@ -4001,10 +4040,10 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { else => unreachable, }, else => result: { - const offset = std.math.cast(u32, struct_ty.structFieldOffset(field_index, mod)) orelse { - return func.fail("Field type '{}' too big to fit into stack frame", .{field_ty.fmt(mod)}); + const offset = std.math.cast(u32, struct_ty.structFieldOffset(field_index, pt)) orelse { + return func.fail("Field type '{}' too big to fit into stack frame", .{field_ty.fmt(pt)}); }; - if (isByRef(field_ty, mod)) { + if (isByRef(field_ty, pt)) { switch (operand) { .stack_offset => |stack_offset| { break :result WValue{ .stack_offset = .{ .value = stack_offset.value + offset, .references = 1 } }; @@ -4021,7 +4060,8 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const mod = func.bin_file.base.comp.module.?; + const pt = func.pt; + const mod = pt.zcu; // result type is always 'noreturn' const blocktype = wasm.block_empty; const pl_op = func.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; @@ -4055,7 +4095,7 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { errdefer func.gpa.free(values); for (items, 0..) |ref, i| { - const item_val = (try func.air.value(ref, mod)).?; + const item_val = (try func.air.value(ref, pt)).?; const int_val = func.valueAsI32(item_val, target_ty); if (lowest_maybe == null or int_val < lowest_maybe.?) { lowest_maybe = int_val; @@ -4078,7 +4118,7 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { // When the target is an integer size larger than u32, we have no way to use the value // as an index, therefore we also use an if/else-chain for those cases. // TODO: Benchmark this to find a proper value, LLVM seems to draw the line at '40~45'. - const is_sparse = highest - lowest > 50 or target_ty.bitSize(mod) > 32; + const is_sparse = highest - lowest > 50 or target_ty.bitSize(pt) > 32; const else_body: []const Air.Inst.Index = @ptrCast(func.air.extra[extra_index..][0..switch_br.data.else_body_len]); const has_else_body = else_body.len != 0; @@ -4150,7 +4190,7 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const val = try func.lowerConstant(case.values[0].value, target_ty); try func.emitWValue(val); const opcode = buildOpcode(.{ - .valtype1 = typeToValtype(target_ty, mod), + .valtype1 = typeToValtype(target_ty, pt), .op = .ne, // not equal, because we want to jump out of this block if it does not match the condition. .signedness = signedness, }); @@ -4164,7 +4204,7 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const val = try func.lowerConstant(value.value, target_ty); try func.emitWValue(val); const opcode = buildOpcode(.{ - .valtype1 = typeToValtype(target_ty, mod), + .valtype1 = typeToValtype(target_ty, pt), .op = .eq, .signedness = signedness, }); @@ -4201,7 +4241,8 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airIsErr(func: *CodeGen, inst: Air.Inst.Index, opcode: wasm.Opcode) InnerError!void { - const mod = func.bin_file.base.comp.module.?; + const pt = func.pt; + const mod = pt.zcu; const un_op = func.air.instructions.items(.data)[@intFromEnum(inst)].un_op; const operand = try func.resolveInst(un_op); const err_union_ty = func.typeOf(un_op); @@ -4217,10 +4258,10 @@ fn airIsErr(func: *CodeGen, inst: Air.Inst.Index, opcode: wasm.Opcode) InnerErro } try func.emitWValue(operand); - if (pl_ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (pl_ty.hasRuntimeBitsIgnoreComptime(pt)) { try func.addMemArg(.i32_load16_u, .{ - .offset = operand.offset() + @as(u32, @intCast(errUnionErrorOffset(pl_ty, mod))), - .alignment = @intCast(Type.anyerror.abiAlignment(mod).toByteUnits().?), + .offset = operand.offset() + @as(u32, @intCast(errUnionErrorOffset(pl_ty, pt))), + .alignment = @intCast(Type.anyerror.abiAlignment(pt).toByteUnits().?), }); } @@ -4236,7 +4277,8 @@ fn airIsErr(func: *CodeGen, inst: Air.Inst.Index, opcode: wasm.Opcode) InnerErro } fn airUnwrapErrUnionPayload(func: *CodeGen, inst: Air.Inst.Index, op_is_ptr: bool) InnerError!void { - const mod = func.bin_file.base.comp.module.?; + const pt = func.pt; + const mod = pt.zcu; const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const operand = try func.resolveInst(ty_op.operand); @@ -4245,15 +4287,15 @@ fn airUnwrapErrUnionPayload(func: *CodeGen, inst: Air.Inst.Index, op_is_ptr: boo const payload_ty = err_ty.errorUnionPayload(mod); const result = result: { - if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) { if (op_is_ptr) { break :result func.reuseOperand(ty_op.operand, operand); } break :result WValue{ .none = {} }; } - const pl_offset = @as(u32, @intCast(errUnionPayloadOffset(payload_ty, mod))); - if (op_is_ptr or isByRef(payload_ty, mod)) { + const pl_offset = @as(u32, @intCast(errUnionPayloadOffset(payload_ty, pt))); + if (op_is_ptr or isByRef(payload_ty, pt)) { break :result try func.buildPointerOffset(operand, pl_offset, .new); } @@ -4264,7 +4306,8 @@ fn airUnwrapErrUnionPayload(func: *CodeGen, inst: Air.Inst.Index, op_is_ptr: boo } fn airUnwrapErrUnionError(func: *CodeGen, inst: Air.Inst.Index, op_is_ptr: bool) InnerError!void { - const mod = func.bin_file.base.comp.module.?; + const pt = func.pt; + const mod = pt.zcu; const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const operand = try func.resolveInst(ty_op.operand); @@ -4277,18 +4320,18 @@ fn airUnwrapErrUnionError(func: *CodeGen, inst: Air.Inst.Index, op_is_ptr: bool) break :result WValue{ .imm32 = 0 }; } - if (op_is_ptr or !payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (op_is_ptr or !payload_ty.hasRuntimeBitsIgnoreComptime(pt)) { break :result func.reuseOperand(ty_op.operand, operand); } - const error_val = try func.load(operand, Type.anyerror, @as(u32, @intCast(errUnionErrorOffset(payload_ty, mod)))); + const error_val = try func.load(operand, Type.anyerror, @intCast(errUnionErrorOffset(payload_ty, pt))); break :result try error_val.toLocal(func, Type.anyerror); }; func.finishAir(inst, result, &.{ty_op.operand}); } fn airWrapErrUnionPayload(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const mod = func.bin_file.base.comp.module.?; + const pt = func.pt; const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const operand = try func.resolveInst(ty_op.operand); @@ -4296,18 +4339,18 @@ fn airWrapErrUnionPayload(func: *CodeGen, inst: Air.Inst.Index) InnerError!void const pl_ty = func.typeOf(ty_op.operand); const result = result: { - if (!pl_ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (!pl_ty.hasRuntimeBitsIgnoreComptime(pt)) { break :result func.reuseOperand(ty_op.operand, operand); } const err_union = try func.allocStack(err_ty); - const payload_ptr = try func.buildPointerOffset(err_union, @as(u32, @intCast(errUnionPayloadOffset(pl_ty, mod))), .new); + const payload_ptr = try func.buildPointerOffset(err_union, @as(u32, @intCast(errUnionPayloadOffset(pl_ty, pt))), .new); try func.store(payload_ptr, operand, pl_ty, 0); // ensure we also write '0' to the error part, so any present stack value gets overwritten by it. try func.emitWValue(err_union); try func.addImm32(0); - const err_val_offset = @as(u32, @intCast(errUnionErrorOffset(pl_ty, mod))); + const err_val_offset: u32 = @intCast(errUnionErrorOffset(pl_ty, pt)); try func.addMemArg(.i32_store16, .{ .offset = err_union.offset() + err_val_offset, .alignment = 2, @@ -4318,7 +4361,8 @@ fn airWrapErrUnionPayload(func: *CodeGen, inst: Air.Inst.Index) InnerError!void } fn airWrapErrUnionErr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const mod = func.bin_file.base.comp.module.?; + const pt = func.pt; + const mod = pt.zcu; const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const operand = try func.resolveInst(ty_op.operand); @@ -4326,17 +4370,17 @@ fn airWrapErrUnionErr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const pl_ty = err_ty.errorUnionPayload(mod); const result = result: { - if (!pl_ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (!pl_ty.hasRuntimeBitsIgnoreComptime(pt)) { break :result func.reuseOperand(ty_op.operand, operand); } const err_union = try func.allocStack(err_ty); // store error value - try func.store(err_union, operand, Type.anyerror, @as(u32, @intCast(errUnionErrorOffset(pl_ty, mod)))); + try func.store(err_union, operand, Type.anyerror, @intCast(errUnionErrorOffset(pl_ty, pt))); // write 'undefined' to the payload - const payload_ptr = try func.buildPointerOffset(err_union, @as(u32, @intCast(errUnionPayloadOffset(pl_ty, mod))), .new); - const len = @as(u32, @intCast(err_ty.errorUnionPayload(mod).abiSize(mod))); + const payload_ptr = try func.buildPointerOffset(err_union, @as(u32, @intCast(errUnionPayloadOffset(pl_ty, pt))), .new); + const len = @as(u32, @intCast(err_ty.errorUnionPayload(mod).abiSize(pt))); try func.memset(Type.u8, payload_ptr, .{ .imm32 = len }, .{ .imm32 = 0xaa }); break :result err_union; @@ -4350,16 +4394,17 @@ fn airIntcast(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const ty = ty_op.ty.toType(); const operand = try func.resolveInst(ty_op.operand); const operand_ty = func.typeOf(ty_op.operand); - const mod = func.bin_file.base.comp.module.?; + const pt = func.pt; + const mod = pt.zcu; if (ty.zigTypeTag(mod) == .Vector or operand_ty.zigTypeTag(mod) == .Vector) { return func.fail("todo Wasm intcast for vectors", .{}); } - if (ty.abiSize(mod) > 16 or operand_ty.abiSize(mod) > 16) { + if (ty.abiSize(pt) > 16 or operand_ty.abiSize(pt) > 16) { return func.fail("todo Wasm intcast for bitsize > 128", .{}); } - const op_bits = toWasmBits(@as(u16, @intCast(operand_ty.bitSize(mod)))).?; - const wanted_bits = toWasmBits(@as(u16, @intCast(ty.bitSize(mod)))).?; + const op_bits = toWasmBits(@intCast(operand_ty.bitSize(pt))).?; + const wanted_bits = toWasmBits(@intCast(ty.bitSize(pt))).?; const result = if (op_bits == wanted_bits) func.reuseOperand(ty_op.operand, operand) else @@ -4373,9 +4418,10 @@ fn airIntcast(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { /// Asserts type's bitsize <= 128 /// NOTE: May leave the result on the top of the stack. fn intcast(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerError!WValue { - const mod = func.bin_file.base.comp.module.?; - const given_bitsize = @as(u16, @intCast(given.bitSize(mod))); - const wanted_bitsize = @as(u16, @intCast(wanted.bitSize(mod))); + const pt = func.pt; + const mod = pt.zcu; + const given_bitsize = @as(u16, @intCast(given.bitSize(pt))); + const wanted_bitsize = @as(u16, @intCast(wanted.bitSize(pt))); assert(given_bitsize <= 128); assert(wanted_bitsize <= 128); @@ -4422,7 +4468,8 @@ fn intcast(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerErro } fn airIsNull(func: *CodeGen, inst: Air.Inst.Index, opcode: wasm.Opcode, op_kind: enum { value, ptr }) InnerError!void { - const mod = func.bin_file.base.comp.module.?; + const pt = func.pt; + const mod = pt.zcu; const un_op = func.air.instructions.items(.data)[@intFromEnum(inst)].un_op; const operand = try func.resolveInst(un_op); @@ -4436,15 +4483,16 @@ fn airIsNull(func: *CodeGen, inst: Air.Inst.Index, opcode: wasm.Opcode, op_kind: /// For a given type and operand, checks if it's considered `null`. /// NOTE: Leaves the result on the stack fn isNull(func: *CodeGen, operand: WValue, optional_ty: Type, opcode: wasm.Opcode) InnerError!WValue { - const mod = func.bin_file.base.comp.module.?; + const pt = func.pt; + const mod = pt.zcu; try func.emitWValue(operand); const payload_ty = optional_ty.optionalChild(mod); if (!optional_ty.optionalReprIsPayload(mod)) { // When payload is zero-bits, we can treat operand as a value, rather than // a pointer to the stack value - if (payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { - const offset = std.math.cast(u32, payload_ty.abiSize(mod)) orelse { - return func.fail("Optional type {} too big to fit into stack frame", .{optional_ty.fmt(mod)}); + if (payload_ty.hasRuntimeBitsIgnoreComptime(pt)) { + const offset = std.math.cast(u32, payload_ty.abiSize(pt)) orelse { + return func.fail("Optional type {} too big to fit into stack frame", .{optional_ty.fmt(pt)}); }; try func.addMemArg(.i32_load8_u, .{ .offset = operand.offset() + offset, .alignment = 1 }); } @@ -4464,11 +4512,12 @@ fn isNull(func: *CodeGen, operand: WValue, optional_ty: Type, opcode: wasm.Opcod } fn airOptionalPayload(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const mod = func.bin_file.base.comp.module.?; + const pt = func.pt; + const mod = pt.zcu; const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const opt_ty = func.typeOf(ty_op.operand); const payload_ty = func.typeOfIndex(inst); - if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) { return func.finishAir(inst, .none, &.{ty_op.operand}); } @@ -4476,7 +4525,7 @@ fn airOptionalPayload(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const operand = try func.resolveInst(ty_op.operand); if (opt_ty.optionalReprIsPayload(mod)) break :result func.reuseOperand(ty_op.operand, operand); - if (isByRef(payload_ty, mod)) { + if (isByRef(payload_ty, pt)) { break :result try func.buildPointerOffset(operand, 0, .new); } @@ -4487,14 +4536,15 @@ fn airOptionalPayload(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airOptionalPayloadPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const mod = func.bin_file.base.comp.module.?; + const pt = func.pt; + const mod = pt.zcu; const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const operand = try func.resolveInst(ty_op.operand); const opt_ty = func.typeOf(ty_op.operand).childType(mod); const result = result: { const payload_ty = opt_ty.optionalChild(mod); - if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod) or opt_ty.optionalReprIsPayload(mod)) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt) or opt_ty.optionalReprIsPayload(mod)) { break :result func.reuseOperand(ty_op.operand, operand); } @@ -4504,12 +4554,13 @@ fn airOptionalPayloadPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airOptionalPayloadPtrSet(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const mod = func.bin_file.base.comp.module.?; + const pt = func.pt; + const mod = pt.zcu; const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const operand = try func.resolveInst(ty_op.operand); const opt_ty = func.typeOf(ty_op.operand).childType(mod); const payload_ty = opt_ty.optionalChild(mod); - if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) { return func.fail("TODO: Implement OptionalPayloadPtrSet for optional with zero-sized type {}", .{payload_ty.fmtDebug()}); } @@ -4517,8 +4568,8 @@ fn airOptionalPayloadPtrSet(func: *CodeGen, inst: Air.Inst.Index) InnerError!voi return func.finishAir(inst, operand, &.{ty_op.operand}); } - const offset = std.math.cast(u32, payload_ty.abiSize(mod)) orelse { - return func.fail("Optional type {} too big to fit into stack frame", .{opt_ty.fmt(mod)}); + const offset = std.math.cast(u32, payload_ty.abiSize(pt)) orelse { + return func.fail("Optional type {} too big to fit into stack frame", .{opt_ty.fmt(pt)}); }; try func.emitWValue(operand); @@ -4532,10 +4583,11 @@ fn airOptionalPayloadPtrSet(func: *CodeGen, inst: Air.Inst.Index) InnerError!voi fn airWrapOptional(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const payload_ty = func.typeOf(ty_op.operand); - const mod = func.bin_file.base.comp.module.?; + const pt = func.pt; + const mod = pt.zcu; const result = result: { - if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) { const non_null_bit = try func.allocStack(Type.u1); try func.emitWValue(non_null_bit); try func.addImm32(1); @@ -4548,8 +4600,8 @@ fn airWrapOptional(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { if (op_ty.optionalReprIsPayload(mod)) { break :result func.reuseOperand(ty_op.operand, operand); } - const offset = std.math.cast(u32, payload_ty.abiSize(mod)) orelse { - return func.fail("Optional type {} too big to fit into stack frame", .{op_ty.fmt(mod)}); + const offset = std.math.cast(u32, payload_ty.abiSize(pt)) orelse { + return func.fail("Optional type {} too big to fit into stack frame", .{op_ty.fmt(pt)}); }; // Create optional type, set the non-null bit, and store the operand inside the optional type @@ -4589,14 +4641,15 @@ fn airSliceLen(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airSliceElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const mod = func.bin_file.base.comp.module.?; + const pt = func.pt; + const mod = pt.zcu; const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const slice_ty = func.typeOf(bin_op.lhs); const slice = try func.resolveInst(bin_op.lhs); const index = try func.resolveInst(bin_op.rhs); const elem_ty = slice_ty.childType(mod); - const elem_size = elem_ty.abiSize(mod); + const elem_size = elem_ty.abiSize(pt); // load pointer onto stack _ = try func.load(slice, Type.usize, 0); @@ -4610,7 +4663,7 @@ fn airSliceElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const result_ptr = try func.allocLocal(Type.usize); try func.addLabel(.local_set, result_ptr.local.value); - const result = if (!isByRef(elem_ty, mod)) result: { + const result = if (!isByRef(elem_ty, pt)) result: { const elem_val = try func.load(result_ptr, elem_ty, 0); break :result try elem_val.toLocal(func, elem_ty); } else result_ptr; @@ -4619,12 +4672,13 @@ fn airSliceElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airSliceElemPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const mod = func.bin_file.base.comp.module.?; + const pt = func.pt; + const mod = pt.zcu; const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const bin_op = func.air.extraData(Air.Bin, ty_pl.payload).data; const elem_ty = ty_pl.ty.toType().childType(mod); - const elem_size = elem_ty.abiSize(mod); + const elem_size = elem_ty.abiSize(pt); const slice = try func.resolveInst(bin_op.lhs); const index = try func.resolveInst(bin_op.rhs); @@ -4672,14 +4726,14 @@ fn airTrunc(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { /// Truncates a given operand to a given type, discarding any overflown bits. /// NOTE: Resulting value is left on the stack. fn trunc(func: *CodeGen, operand: WValue, wanted_ty: Type, given_ty: Type) InnerError!WValue { - const mod = func.bin_file.base.comp.module.?; - const given_bits = @as(u16, @intCast(given_ty.bitSize(mod))); + const pt = func.pt; + const given_bits = @as(u16, @intCast(given_ty.bitSize(pt))); if (toWasmBits(given_bits) == null) { return func.fail("TODO: Implement wasm integer truncation for integer bitsize: {d}", .{given_bits}); } var result = try func.intcast(operand, given_ty, wanted_ty); - const wanted_bits = @as(u16, @intCast(wanted_ty.bitSize(mod))); + const wanted_bits = @as(u16, @intCast(wanted_ty.bitSize(pt))); const wasm_bits = toWasmBits(wanted_bits).?; if (wasm_bits != wanted_bits) { result = try func.wrapOperand(result, wanted_ty); @@ -4696,7 +4750,8 @@ fn airIntFromBool(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airArrayToSlice(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const mod = func.bin_file.base.comp.module.?; + const pt = func.pt; + const mod = pt.zcu; const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const operand = try func.resolveInst(ty_op.operand); @@ -4707,7 +4762,7 @@ fn airArrayToSlice(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const slice_local = try func.allocStack(slice_ty); // store the array ptr in the slice - if (array_ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (array_ty.hasRuntimeBitsIgnoreComptime(pt)) { try func.store(slice_local, operand, Type.usize, 0); } @@ -4719,7 +4774,8 @@ fn airArrayToSlice(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airIntFromPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const mod = func.bin_file.base.comp.module.?; + const pt = func.pt; + const mod = pt.zcu; const un_op = func.air.instructions.items(.data)[@intFromEnum(inst)].un_op; const operand = try func.resolveInst(un_op); const ptr_ty = func.typeOf(un_op); @@ -4734,14 +4790,15 @@ fn airIntFromPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airPtrElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const mod = func.bin_file.base.comp.module.?; + const pt = func.pt; + const mod = pt.zcu; const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const ptr_ty = func.typeOf(bin_op.lhs); const ptr = try func.resolveInst(bin_op.lhs); const index = try func.resolveInst(bin_op.rhs); const elem_ty = ptr_ty.childType(mod); - const elem_size = elem_ty.abiSize(mod); + const elem_size = elem_ty.abiSize(pt); // load pointer onto the stack if (ptr_ty.isSlice(mod)) { @@ -4759,7 +4816,7 @@ fn airPtrElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const elem_result = val: { var result = try func.allocLocal(Type.usize); try func.addLabel(.local_set, result.local.value); - if (isByRef(elem_ty, mod)) { + if (isByRef(elem_ty, pt)) { break :val result; } defer result.free(func); // only free if it's not returned like above @@ -4771,13 +4828,14 @@ fn airPtrElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airPtrElemPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const mod = func.bin_file.base.comp.module.?; + const pt = func.pt; + const mod = pt.zcu; const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const bin_op = func.air.extraData(Air.Bin, ty_pl.payload).data; const ptr_ty = func.typeOf(bin_op.lhs); const elem_ty = ty_pl.ty.toType().childType(mod); - const elem_size = elem_ty.abiSize(mod); + const elem_size = elem_ty.abiSize(pt); const ptr = try func.resolveInst(bin_op.lhs); const index = try func.resolveInst(bin_op.rhs); @@ -4801,7 +4859,8 @@ fn airPtrElemPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airPtrBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void { - const mod = func.bin_file.base.comp.module.?; + const pt = func.pt; + const mod = pt.zcu; const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const bin_op = func.air.extraData(Air.Bin, ty_pl.payload).data; @@ -4813,13 +4872,13 @@ fn airPtrBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void { else => ptr_ty.childType(mod), }; - const valtype = typeToValtype(Type.usize, mod); + const valtype = typeToValtype(Type.usize, pt); const mul_opcode = buildOpcode(.{ .valtype1 = valtype, .op = .mul }); const bin_opcode = buildOpcode(.{ .valtype1 = valtype, .op = op }); try func.lowerToStack(ptr); try func.emitWValue(offset); - try func.addImm32(@intCast(pointee_ty.abiSize(mod))); + try func.addImm32(@intCast(pointee_ty.abiSize(pt))); try func.addTag(Mir.Inst.Tag.fromOpcode(mul_opcode)); try func.addTag(Mir.Inst.Tag.fromOpcode(bin_opcode)); @@ -4829,7 +4888,8 @@ fn airPtrBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void { } fn airMemset(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void { - const mod = func.bin_file.base.comp.module.?; + const pt = func.pt; + const mod = pt.zcu; if (safety) { // TODO if the value is undef, write 0xaa bytes to dest } else { @@ -4862,8 +4922,8 @@ fn airMemset(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void /// this to wasm's memset instruction. When the feature is not present, /// we implement it manually. fn memset(func: *CodeGen, elem_ty: Type, ptr: WValue, len: WValue, value: WValue) InnerError!void { - const mod = func.bin_file.base.comp.module.?; - const abi_size = @as(u32, @intCast(elem_ty.abiSize(mod))); + const pt = func.pt; + const abi_size = @as(u32, @intCast(elem_ty.abiSize(pt))); // When bulk_memory is enabled, we lower it to wasm's memset instruction. // If not, we lower it ourselves. @@ -4951,16 +5011,17 @@ fn memset(func: *CodeGen, elem_ty: Type, ptr: WValue, len: WValue, value: WValue } fn airArrayElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const mod = func.bin_file.base.comp.module.?; + const pt = func.pt; + const mod = pt.zcu; const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const array_ty = func.typeOf(bin_op.lhs); const array = try func.resolveInst(bin_op.lhs); const index = try func.resolveInst(bin_op.rhs); const elem_ty = array_ty.childType(mod); - const elem_size = elem_ty.abiSize(mod); + const elem_size = elem_ty.abiSize(pt); - if (isByRef(array_ty, mod)) { + if (isByRef(array_ty, pt)) { try func.lowerToStack(array); try func.emitWValue(index); try func.addImm32(@intCast(elem_size)); @@ -4971,7 +5032,7 @@ fn airArrayElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { switch (index) { inline .imm32, .imm64 => |lane| { - const opcode: wasm.SimdOpcode = switch (elem_ty.bitSize(mod)) { + const opcode: wasm.SimdOpcode = switch (elem_ty.bitSize(pt)) { 8 => if (elem_ty.isSignedInt(mod)) .i8x16_extract_lane_s else .i8x16_extract_lane_u, 16 => if (elem_ty.isSignedInt(mod)) .i16x8_extract_lane_s else .i16x8_extract_lane_u, 32 => if (elem_ty.isInt(mod)) .i32x4_extract_lane else .f32x4_extract_lane, @@ -5007,7 +5068,7 @@ fn airArrayElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { var result = try func.allocLocal(Type.usize); try func.addLabel(.local_set, result.local.value); - if (isByRef(elem_ty, mod)) { + if (isByRef(elem_ty, pt)) { break :val result; } defer result.free(func); // only free if no longer needed and not returned like above @@ -5020,7 +5081,8 @@ fn airArrayElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airIntFromFloat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const mod = func.bin_file.base.comp.module.?; + const pt = func.pt; + const mod = pt.zcu; const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const operand = try func.resolveInst(ty_op.operand); @@ -5054,8 +5116,8 @@ fn airIntFromFloat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { try func.emitWValue(operand); const op = buildOpcode(.{ .op = .trunc, - .valtype1 = typeToValtype(dest_ty, mod), - .valtype2 = typeToValtype(op_ty, mod), + .valtype1 = typeToValtype(dest_ty, pt), + .valtype2 = typeToValtype(op_ty, pt), .signedness = dest_info.signedness, }); try func.addTag(Mir.Inst.Tag.fromOpcode(op)); @@ -5065,7 +5127,8 @@ fn airIntFromFloat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airFloatFromInt(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const mod = func.bin_file.base.comp.module.?; + const pt = func.pt; + const mod = pt.zcu; const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const operand = try func.resolveInst(ty_op.operand); @@ -5099,8 +5162,8 @@ fn airFloatFromInt(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { try func.emitWValue(operand); const op = buildOpcode(.{ .op = .convert, - .valtype1 = typeToValtype(dest_ty, mod), - .valtype2 = typeToValtype(op_ty, mod), + .valtype1 = typeToValtype(dest_ty, pt), + .valtype2 = typeToValtype(op_ty, pt), .signedness = op_info.signedness, }); try func.addTag(Mir.Inst.Tag.fromOpcode(op)); @@ -5111,19 +5174,20 @@ fn airFloatFromInt(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airSplat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const mod = func.bin_file.base.comp.module.?; + const pt = func.pt; + const mod = pt.zcu; const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const operand = try func.resolveInst(ty_op.operand); const ty = func.typeOfIndex(inst); const elem_ty = ty.childType(mod); - if (determineSimdStoreStrategy(ty, mod) == .direct) blk: { + if (determineSimdStoreStrategy(ty, pt) == .direct) blk: { switch (operand) { // when the operand lives in the linear memory section, we can directly // load and splat the value at once. Meaning we do not first have to load // the scalar value onto the stack. .stack_offset, .memory, .memory_offset => { - const opcode = switch (elem_ty.bitSize(mod)) { + const opcode = switch (elem_ty.bitSize(pt)) { 8 => std.wasm.simdOpcode(.v128_load8_splat), 16 => std.wasm.simdOpcode(.v128_load16_splat), 32 => std.wasm.simdOpcode(.v128_load32_splat), @@ -5138,14 +5202,14 @@ fn airSplat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { try func.mir_extra.appendSlice(func.gpa, &[_]u32{ opcode, operand.offset(), - @intCast(elem_ty.abiAlignment(mod).toByteUnits().?), + @intCast(elem_ty.abiAlignment(pt).toByteUnits().?), }); try func.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } }); try func.addLabel(.local_set, result.local.value); return func.finishAir(inst, result, &.{ty_op.operand}); }, .local => { - const opcode = switch (elem_ty.bitSize(mod)) { + const opcode = switch (elem_ty.bitSize(pt)) { 8 => std.wasm.simdOpcode(.i8x16_splat), 16 => std.wasm.simdOpcode(.i16x8_splat), 32 => if (elem_ty.isInt(mod)) std.wasm.simdOpcode(.i32x4_splat) else std.wasm.simdOpcode(.f32x4_splat), @@ -5163,14 +5227,14 @@ fn airSplat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { else => unreachable, } } - const elem_size = elem_ty.bitSize(mod); + const elem_size = elem_ty.bitSize(pt); const vector_len = @as(usize, @intCast(ty.vectorLen(mod))); if ((!std.math.isPowerOfTwo(elem_size) or elem_size % 8 != 0) and vector_len > 1) { return func.fail("TODO: WebAssembly `@splat` for arbitrary element bitsize {d}", .{elem_size}); } const result = try func.allocStack(ty); - const elem_byte_size = @as(u32, @intCast(elem_ty.abiSize(mod))); + const elem_byte_size = @as(u32, @intCast(elem_ty.abiSize(pt))); var index: usize = 0; var offset: u32 = 0; while (index < vector_len) : (index += 1) { @@ -5190,7 +5254,8 @@ fn airSelect(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airShuffle(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const mod = func.bin_file.base.comp.module.?; + const pt = func.pt; + const mod = pt.zcu; const inst_ty = func.typeOfIndex(inst); const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = func.air.extraData(Air.Shuffle, ty_pl.payload).data; @@ -5201,14 +5266,14 @@ fn airShuffle(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const mask_len = extra.mask_len; const child_ty = inst_ty.childType(mod); - const elem_size = child_ty.abiSize(mod); + const elem_size = child_ty.abiSize(pt); // TODO: One of them could be by ref; handle in loop - if (isByRef(func.typeOf(extra.a), mod) or isByRef(inst_ty, mod)) { + if (isByRef(func.typeOf(extra.a), pt) or isByRef(inst_ty, pt)) { const result = try func.allocStack(inst_ty); for (0..mask_len) |index| { - const value = (try mask.elemValue(mod, index)).toSignedInt(mod); + const value = (try mask.elemValue(pt, index)).toSignedInt(pt); try func.emitWValue(result); @@ -5228,7 +5293,7 @@ fn airShuffle(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { var lanes = mem.asBytes(operands[1..]); for (0..@as(usize, @intCast(mask_len))) |index| { - const mask_elem = (try mask.elemValue(mod, index)).toSignedInt(mod); + const mask_elem = (try mask.elemValue(pt, index)).toSignedInt(pt); const base_index = if (mask_elem >= 0) @as(u8, @intCast(@as(i64, @intCast(elem_size)) * mask_elem)) else @@ -5259,7 +5324,8 @@ fn airReduce(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const mod = func.bin_file.base.comp.module.?; + const pt = func.pt; + const mod = pt.zcu; const ip = &mod.intern_pool; const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const result_ty = func.typeOfIndex(inst); @@ -5271,7 +5337,7 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { .Array => { const result = try func.allocStack(result_ty); const elem_ty = result_ty.childType(mod); - const elem_size = @as(u32, @intCast(elem_ty.abiSize(mod))); + const elem_size = @as(u32, @intCast(elem_ty.abiSize(pt))); const sentinel = if (result_ty.sentinel(mod)) |sent| blk: { break :blk try func.lowerConstant(sent, elem_ty); } else null; @@ -5279,7 +5345,7 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { // When the element type is by reference, we must copy the entire // value. It is therefore safer to move the offset pointer and store // each value individually, instead of using store offsets. - if (isByRef(elem_ty, mod)) { + if (isByRef(elem_ty, pt)) { // copy stack pointer into a temporary local, which is // moved for each element to store each value in the right position. const offset = try func.buildPointerOffset(result, 0, .new); @@ -5309,7 +5375,7 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { }, .Struct => switch (result_ty.containerLayout(mod)) { .@"packed" => { - if (isByRef(result_ty, mod)) { + if (isByRef(result_ty, pt)) { return func.fail("TODO: airAggregateInit for packed structs larger than 64 bits", .{}); } const packed_struct = mod.typeToPackedStruct(result_ty).?; @@ -5318,7 +5384,7 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { // ensure the result is zero'd const result = try func.allocLocal(backing_type); - if (backing_type.bitSize(mod) <= 32) + if (backing_type.bitSize(pt) <= 32) try func.addImm32(0) else try func.addImm64(0); @@ -5327,16 +5393,16 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { var current_bit: u16 = 0; for (elements, 0..) |elem, elem_index| { const field_ty = Type.fromInterned(field_types.get(ip)[elem_index]); - if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; + if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) continue; - const shift_val = if (backing_type.bitSize(mod) <= 32) + const shift_val = if (backing_type.bitSize(pt) <= 32) WValue{ .imm32 = current_bit } else WValue{ .imm64 = current_bit }; const value = try func.resolveInst(elem); - const value_bit_size: u16 = @intCast(field_ty.bitSize(mod)); - const int_ty = try mod.intType(.unsigned, value_bit_size); + const value_bit_size: u16 = @intCast(field_ty.bitSize(pt)); + const int_ty = try pt.intType(.unsigned, value_bit_size); // load our current result on stack so we can perform all transformations // using only stack values. Saving the cost of loads and stores. @@ -5359,10 +5425,10 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const offset = try func.buildPointerOffset(result, 0, .new); // pointer to offset var prev_field_offset: u64 = 0; for (elements, 0..) |elem, elem_index| { - if ((try result_ty.structFieldValueComptime(mod, elem_index)) != null) continue; + if (try result_ty.structFieldValueComptime(pt, elem_index) != null) continue; const elem_ty = result_ty.structFieldType(elem_index, mod); - const field_offset = result_ty.structFieldOffset(elem_index, mod); + const field_offset = result_ty.structFieldOffset(elem_index, pt); _ = try func.buildPointerOffset(offset, @intCast(field_offset - prev_field_offset), .modify); prev_field_offset = field_offset; @@ -5389,14 +5455,15 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airUnionInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const mod = func.bin_file.base.comp.module.?; + const pt = func.pt; + const mod = pt.zcu; const ip = &mod.intern_pool; const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = func.air.extraData(Air.UnionInit, ty_pl.payload).data; const result = result: { const union_ty = func.typeOfIndex(inst); - const layout = union_ty.unionGetLayout(mod); + const layout = union_ty.unionGetLayout(pt); const union_obj = mod.typeToUnion(union_ty).?; const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[extra.field_index]); const field_name = union_obj.loadTagType(ip).names.get(ip)[extra.field_index]; @@ -5404,22 +5471,22 @@ fn airUnionInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const tag_int = blk: { const tag_ty = union_ty.unionTagTypeHypothetical(mod); const enum_field_index = tag_ty.enumFieldIndex(field_name, mod).?; - const tag_val = try mod.enumValueFieldIndex(tag_ty, enum_field_index); + const tag_val = try pt.enumValueFieldIndex(tag_ty, enum_field_index); break :blk try func.lowerConstant(tag_val, tag_ty); }; if (layout.payload_size == 0) { if (layout.tag_size == 0) { break :result WValue{ .none = {} }; } - assert(!isByRef(union_ty, mod)); + assert(!isByRef(union_ty, pt)); break :result tag_int; } - if (isByRef(union_ty, mod)) { + if (isByRef(union_ty, pt)) { const result_ptr = try func.allocStack(union_ty); const payload = try func.resolveInst(extra.init); if (layout.tag_align.compare(.gte, layout.payload_align)) { - if (isByRef(field_ty, mod)) { + if (isByRef(field_ty, pt)) { const payload_ptr = try func.buildPointerOffset(result_ptr, layout.tag_size, .new); try func.store(payload_ptr, payload, field_ty, 0); } else { @@ -5443,14 +5510,14 @@ fn airUnionInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { break :result result_ptr; } else { const operand = try func.resolveInst(extra.init); - const union_int_type = try mod.intType(.unsigned, @as(u16, @intCast(union_ty.bitSize(mod)))); + const union_int_type = try pt.intType(.unsigned, @as(u16, @intCast(union_ty.bitSize(pt)))); if (field_ty.zigTypeTag(mod) == .Float) { - const int_type = try mod.intType(.unsigned, @intCast(field_ty.bitSize(mod))); + const int_type = try pt.intType(.unsigned, @intCast(field_ty.bitSize(pt))); const bitcasted = try func.bitcast(field_ty, int_type, operand); const casted = try func.trunc(bitcasted, int_type, union_int_type); break :result try casted.toLocal(func, field_ty); } else if (field_ty.isPtrAtRuntime(mod)) { - const int_type = try mod.intType(.unsigned, @intCast(field_ty.bitSize(mod))); + const int_type = try pt.intType(.unsigned, @intCast(field_ty.bitSize(pt))); const casted = try func.intcast(operand, int_type, union_int_type); break :result try casted.toLocal(func, field_ty); } @@ -5488,8 +5555,9 @@ fn airWasmMemoryGrow(func: *CodeGen, inst: Air.Inst.Index) !void { } fn cmpOptionals(func: *CodeGen, lhs: WValue, rhs: WValue, operand_ty: Type, op: std.math.CompareOperator) InnerError!WValue { - const mod = func.bin_file.base.comp.module.?; - assert(operand_ty.hasRuntimeBitsIgnoreComptime(mod)); + const pt = func.pt; + const mod = pt.zcu; + assert(operand_ty.hasRuntimeBitsIgnoreComptime(pt)); assert(op == .eq or op == .neq); const payload_ty = operand_ty.optionalChild(mod); @@ -5506,7 +5574,7 @@ fn cmpOptionals(func: *CodeGen, lhs: WValue, rhs: WValue, operand_ty: Type, op: _ = try func.load(lhs, payload_ty, 0); _ = try func.load(rhs, payload_ty, 0); - const opcode = buildOpcode(.{ .op = .ne, .valtype1 = typeToValtype(payload_ty, mod) }); + const opcode = buildOpcode(.{ .op = .ne, .valtype1 = typeToValtype(payload_ty, pt) }); try func.addTag(Mir.Inst.Tag.fromOpcode(opcode)); try func.addLabel(.br_if, 0); @@ -5524,11 +5592,12 @@ fn cmpOptionals(func: *CodeGen, lhs: WValue, rhs: WValue, operand_ty: Type, op: /// NOTE: Leaves the result of the comparison on top of the stack. /// TODO: Lower this to compiler_rt call when bitsize > 128 fn cmpBigInt(func: *CodeGen, lhs: WValue, rhs: WValue, operand_ty: Type, op: std.math.CompareOperator) InnerError!WValue { - const mod = func.bin_file.base.comp.module.?; - assert(operand_ty.abiSize(mod) >= 16); + const pt = func.pt; + const mod = pt.zcu; + assert(operand_ty.abiSize(pt) >= 16); assert(!(lhs != .stack and rhs == .stack)); - if (operand_ty.bitSize(mod) > 128) { - return func.fail("TODO: Support cmpBigInt for integer bitsize: '{d}'", .{operand_ty.bitSize(mod)}); + if (operand_ty.bitSize(pt) > 128) { + return func.fail("TODO: Support cmpBigInt for integer bitsize: '{d}'", .{operand_ty.bitSize(pt)}); } var lhs_high_bit = try (try func.load(lhs, Type.u64, 0)).toLocal(func, Type.u64); @@ -5566,11 +5635,12 @@ fn cmpBigInt(func: *CodeGen, lhs: WValue, rhs: WValue, operand_ty: Type, op: std } fn airSetUnionTag(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const mod = func.bin_file.base.comp.module.?; + const pt = func.pt; + const mod = pt.zcu; const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const un_ty = func.typeOf(bin_op.lhs).childType(mod); const tag_ty = func.typeOf(bin_op.rhs); - const layout = un_ty.unionGetLayout(mod); + const layout = un_ty.unionGetLayout(pt); if (layout.tag_size == 0) return func.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs }); const union_ptr = try func.resolveInst(bin_op.lhs); @@ -5590,12 +5660,12 @@ fn airSetUnionTag(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airGetUnionTag(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const mod = func.bin_file.base.comp.module.?; + const pt = func.pt; const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const un_ty = func.typeOf(ty_op.operand); const tag_ty = func.typeOfIndex(inst); - const layout = un_ty.unionGetLayout(mod); + const layout = un_ty.unionGetLayout(pt); if (layout.tag_size == 0) return func.finishAir(inst, .none, &.{ty_op.operand}); const operand = try func.resolveInst(ty_op.operand); @@ -5695,7 +5765,8 @@ fn fptrunc(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerErro } fn airErrUnionPayloadPtrSet(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const mod = func.bin_file.base.comp.module.?; + const pt = func.pt; + const mod = pt.zcu; const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const err_set_ty = func.typeOf(ty_op.operand).childType(mod); @@ -5707,27 +5778,28 @@ fn airErrUnionPayloadPtrSet(func: *CodeGen, inst: Air.Inst.Index) InnerError!voi operand, .{ .imm32 = 0 }, Type.anyerror, - @as(u32, @intCast(errUnionErrorOffset(payload_ty, mod))), + @intCast(errUnionErrorOffset(payload_ty, pt)), ); const result = result: { - if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) { break :result func.reuseOperand(ty_op.operand, operand); } - break :result try func.buildPointerOffset(operand, @as(u32, @intCast(errUnionPayloadOffset(payload_ty, mod))), .new); + break :result try func.buildPointerOffset(operand, @as(u32, @intCast(errUnionPayloadOffset(payload_ty, pt))), .new); }; func.finishAir(inst, result, &.{ty_op.operand}); } fn airFieldParentPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const mod = func.bin_file.base.comp.module.?; + const pt = func.pt; + const mod = pt.zcu; const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = func.air.extraData(Air.FieldParentPtr, ty_pl.payload).data; const field_ptr = try func.resolveInst(extra.field_ptr); const parent_ty = ty_pl.ty.toType().childType(mod); - const field_offset = parent_ty.structFieldOffset(extra.field_index, mod); + const field_offset = parent_ty.structFieldOffset(extra.field_index, pt); const result = if (field_offset != 0) result: { const base = try func.buildPointerOffset(field_ptr, 0, .new); @@ -5742,7 +5814,8 @@ fn airFieldParentPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn sliceOrArrayPtr(func: *CodeGen, ptr: WValue, ptr_ty: Type) InnerError!WValue { - const mod = func.bin_file.base.comp.module.?; + const pt = func.pt; + const mod = pt.zcu; if (ptr_ty.isSlice(mod)) { return func.slicePtr(ptr); } else { @@ -5751,7 +5824,8 @@ fn sliceOrArrayPtr(func: *CodeGen, ptr: WValue, ptr_ty: Type) InnerError!WValue } fn airMemcpy(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const mod = func.bin_file.base.comp.module.?; + const pt = func.pt; + const mod = pt.zcu; const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const dst = try func.resolveInst(bin_op.lhs); const dst_ty = func.typeOf(bin_op.lhs); @@ -5761,16 +5835,16 @@ fn airMemcpy(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const len = switch (dst_ty.ptrSize(mod)) { .Slice => blk: { const slice_len = try func.sliceLen(dst); - if (ptr_elem_ty.abiSize(mod) != 1) { + if (ptr_elem_ty.abiSize(pt) != 1) { try func.emitWValue(slice_len); - try func.emitWValue(.{ .imm32 = @as(u32, @intCast(ptr_elem_ty.abiSize(mod))) }); + try func.emitWValue(.{ .imm32 = @as(u32, @intCast(ptr_elem_ty.abiSize(pt))) }); try func.addTag(.i32_mul); try func.addLabel(.local_set, slice_len.local.value); } break :blk slice_len; }, .One => @as(WValue, .{ - .imm32 = @as(u32, @intCast(ptr_elem_ty.arrayLen(mod) * ptr_elem_ty.childType(mod).abiSize(mod))), + .imm32 = @as(u32, @intCast(ptr_elem_ty.arrayLen(mod) * ptr_elem_ty.childType(mod).abiSize(pt))), }), .C, .Many => unreachable, }; @@ -5791,7 +5865,8 @@ fn airRetAddr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airPopcount(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const mod = func.bin_file.base.comp.module.?; + const pt = func.pt; + const mod = pt.zcu; const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const operand = try func.resolveInst(ty_op.operand); @@ -5812,14 +5887,14 @@ fn airPopcount(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { 32 => { try func.emitWValue(operand); if (op_ty.isSignedInt(mod) and bits != wasm_bits) { - _ = try func.wrapOperand(.stack, try mod.intType(.unsigned, bits)); + _ = try func.wrapOperand(.stack, try pt.intType(.unsigned, bits)); } try func.addTag(.i32_popcnt); }, 64 => { try func.emitWValue(operand); if (op_ty.isSignedInt(mod) and bits != wasm_bits) { - _ = try func.wrapOperand(.stack, try mod.intType(.unsigned, bits)); + _ = try func.wrapOperand(.stack, try pt.intType(.unsigned, bits)); } try func.addTag(.i64_popcnt); try func.addTag(.i32_wrap_i64); @@ -5830,7 +5905,7 @@ fn airPopcount(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { try func.addTag(.i64_popcnt); _ = try func.load(operand, Type.u64, 8); if (op_ty.isSignedInt(mod) and bits != wasm_bits) { - _ = try func.wrapOperand(.stack, try mod.intType(.unsigned, bits - 64)); + _ = try func.wrapOperand(.stack, try pt.intType(.unsigned, bits - 64)); } try func.addTag(.i64_popcnt); try func.addTag(.i64_add); @@ -5845,7 +5920,8 @@ fn airPopcount(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airBitReverse(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const mod = func.bin_file.base.comp.module.?; + const pt = func.pt; + const mod = pt.zcu; const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const operand = try func.resolveInst(ty_op.operand); @@ -5956,10 +6032,10 @@ fn airErrorName(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { // // As the names are global and the slice elements are constant, we do not have // to make a copy of the ptr+value but can point towards them directly. - const error_table_symbol = try func.bin_file.getErrorTableSymbol(); + const pt = func.pt; + const error_table_symbol = try func.bin_file.getErrorTableSymbol(pt); const name_ty = Type.slice_const_u8_sentinel_0; - const mod = func.bin_file.base.comp.module.?; - const abi_size = name_ty.abiSize(mod); + const abi_size = name_ty.abiSize(pt); const error_name_value: WValue = .{ .memory = error_table_symbol }; // emitting this will create a relocation try func.emitWValue(error_name_value); @@ -5998,7 +6074,8 @@ fn airAddSubWithOverflow(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerErro const lhs = try func.resolveInst(extra.lhs); const rhs = try func.resolveInst(extra.rhs); const lhs_ty = func.typeOf(extra.lhs); - const mod = func.bin_file.base.comp.module.?; + const pt = func.pt; + const mod = pt.zcu; if (lhs_ty.zigTypeTag(mod) == .Vector) { return func.fail("TODO: Implement overflow arithmetic for vectors", .{}); @@ -6044,14 +6121,15 @@ fn airAddSubWithOverflow(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerErro const result_ptr = try func.allocStack(func.typeOfIndex(inst)); try func.store(result_ptr, result, lhs_ty, 0); - const offset = @as(u32, @intCast(lhs_ty.abiSize(mod))); + const offset = @as(u32, @intCast(lhs_ty.abiSize(pt))); try func.store(result_ptr, overflow_local, Type.u1, offset); func.finishAir(inst, result_ptr, &.{ extra.lhs, extra.rhs }); } fn addSubWithOverflowBigInt(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, result_ty: Type, op: Op) InnerError!WValue { - const mod = func.bin_file.base.comp.module.?; + const pt = func.pt; + const mod = pt.zcu; assert(op == .add or op == .sub); const int_info = ty.intInfo(mod); const is_signed = int_info.signedness == .signed; @@ -6116,7 +6194,8 @@ fn addSubWithOverflowBigInt(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, } fn airShlWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const mod = func.bin_file.base.comp.module.?; + const pt = func.pt; + const mod = pt.zcu; const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = func.air.extraData(Air.Bin, ty_pl.payload).data; @@ -6159,7 +6238,7 @@ fn airShlWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const result_ptr = try func.allocStack(func.typeOfIndex(inst)); try func.store(result_ptr, result, lhs_ty, 0); - const offset = @as(u32, @intCast(lhs_ty.abiSize(mod))); + const offset = @as(u32, @intCast(lhs_ty.abiSize(pt))); try func.store(result_ptr, overflow_local, Type.u1, offset); func.finishAir(inst, result_ptr, &.{ extra.lhs, extra.rhs }); @@ -6172,7 +6251,8 @@ fn airMulWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const lhs = try func.resolveInst(extra.lhs); const rhs = try func.resolveInst(extra.rhs); const lhs_ty = func.typeOf(extra.lhs); - const mod = func.bin_file.base.comp.module.?; + const pt = func.pt; + const mod = pt.zcu; if (lhs_ty.zigTypeTag(mod) == .Vector) { return func.fail("TODO: Implement overflow arithmetic for vectors", .{}); @@ -6332,7 +6412,7 @@ fn airMulWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const result_ptr = try func.allocStack(func.typeOfIndex(inst)); try func.store(result_ptr, bin_op_local, lhs_ty, 0); - const offset = @as(u32, @intCast(lhs_ty.abiSize(mod))); + const offset = @as(u32, @intCast(lhs_ty.abiSize(pt))); try func.store(result_ptr, overflow_bit, Type.u1, offset); func.finishAir(inst, result_ptr, &.{ extra.lhs, extra.rhs }); @@ -6340,7 +6420,8 @@ fn airMulWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { fn airMaxMin(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void { assert(op == .max or op == .min); - const mod = func.bin_file.base.comp.module.?; + const pt = func.pt; + const mod = pt.zcu; const target = mod.getTarget(); const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; @@ -6349,7 +6430,7 @@ fn airMaxMin(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void { return func.fail("TODO: `@maximum` and `@minimum` for vectors", .{}); } - if (ty.abiSize(mod) > 16) { + if (ty.abiSize(pt) > 16) { return func.fail("TODO: `@maximum` and `@minimum` for types larger than 16 bytes", .{}); } @@ -6377,14 +6458,15 @@ fn airMaxMin(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void { } // store result in local - const result_ty = if (isByRef(ty, mod)) Type.u32 else ty; + const result_ty = if (isByRef(ty, pt)) Type.u32 else ty; const result = try func.allocLocal(result_ty); try func.addLabel(.local_set, result.local.value); func.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs }); } fn airMulAdd(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const mod = func.bin_file.base.comp.module.?; + const pt = func.pt; + const mod = pt.zcu; const pl_op = func.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; const bin_op = func.air.extraData(Air.Bin, pl_op.payload).data; @@ -6418,7 +6500,8 @@ fn airMulAdd(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airClz(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const mod = func.bin_file.base.comp.module.?; + const pt = func.pt; + const mod = pt.zcu; const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const ty = func.typeOf(ty_op.operand); @@ -6471,7 +6554,8 @@ fn airClz(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airCtz(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const mod = func.bin_file.base.comp.module.?; + const pt = func.pt; + const mod = pt.zcu; const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const ty = func.typeOf(ty_op.operand); @@ -6558,7 +6642,8 @@ fn airDbgInlineBlock(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { fn airDbgVar(func: *CodeGen, inst: Air.Inst.Index, is_ptr: bool) InnerError!void { if (func.debug_output != .dwarf) return func.finishAir(inst, .none, &.{}); - const mod = func.bin_file.base.comp.module.?; + const pt = func.pt; + const mod = pt.zcu; const pl_op = func.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; const ty = func.typeOf(pl_op.operand); const operand = try func.resolveInst(pl_op.operand); @@ -6591,7 +6676,8 @@ fn airTry(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airTryPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const mod = func.bin_file.base.comp.module.?; + const pt = func.pt; + const mod = pt.zcu; const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = func.air.extraData(Air.TryPtr, ty_pl.payload); const err_union_ptr = try func.resolveInst(extra.data.ptr); @@ -6609,13 +6695,14 @@ fn lowerTry( err_union_ty: Type, operand_is_ptr: bool, ) InnerError!WValue { - const mod = func.bin_file.base.comp.module.?; + const pt = func.pt; + const mod = pt.zcu; if (operand_is_ptr) { return func.fail("TODO: lowerTry for pointers", .{}); } const pl_ty = err_union_ty.errorUnionPayload(mod); - const pl_has_bits = pl_ty.hasRuntimeBitsIgnoreComptime(mod); + const pl_has_bits = pl_ty.hasRuntimeBitsIgnoreComptime(pt); if (!err_union_ty.errorUnionSet(mod).errorSetIsEmpty(mod)) { // Block we can jump out of when error is not set @@ -6624,10 +6711,10 @@ fn lowerTry( // check if the error tag is set for the error union. try func.emitWValue(err_union); if (pl_has_bits) { - const err_offset = @as(u32, @intCast(errUnionErrorOffset(pl_ty, mod))); + const err_offset: u32 = @intCast(errUnionErrorOffset(pl_ty, pt)); try func.addMemArg(.i32_load16_u, .{ .offset = err_union.offset() + err_offset, - .alignment = @intCast(Type.anyerror.abiAlignment(mod).toByteUnits().?), + .alignment = @intCast(Type.anyerror.abiAlignment(pt).toByteUnits().?), }); } try func.addTag(.i32_eqz); @@ -6649,8 +6736,8 @@ fn lowerTry( return WValue{ .none = {} }; } - const pl_offset = @as(u32, @intCast(errUnionPayloadOffset(pl_ty, mod))); - if (isByRef(pl_ty, mod)) { + const pl_offset: u32 = @intCast(errUnionPayloadOffset(pl_ty, pt)); + if (isByRef(pl_ty, pt)) { return buildPointerOffset(func, err_union, pl_offset, .new); } const payload = try func.load(err_union, pl_ty, pl_offset); @@ -6658,7 +6745,8 @@ fn lowerTry( } fn airByteSwap(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const mod = func.bin_file.base.comp.module.?; + const pt = func.pt; + const mod = pt.zcu; const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const ty = func.typeOfIndex(inst); @@ -6744,7 +6832,8 @@ fn airDivTrunc(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { fn airDivFloor(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const mod = func.bin_file.base.comp.module.?; + const pt = func.pt; + const mod = pt.zcu; const ty = func.typeOfIndex(inst); const lhs = try func.resolveInst(bin_op.lhs); const rhs = try func.resolveInst(bin_op.rhs); @@ -6864,7 +6953,8 @@ fn airRem(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { fn airMod(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const mod = func.bin_file.base.comp.module.?; + const pt = func.pt; + const mod = pt.zcu; const ty = func.typeOfIndex(inst); const lhs = try func.resolveInst(bin_op.lhs); const rhs = try func.resolveInst(bin_op.rhs); @@ -6901,7 +6991,8 @@ fn airSatBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void { assert(op == .add or op == .sub); const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const mod = func.bin_file.base.comp.module.?; + const pt = func.pt; + const mod = pt.zcu; const ty = func.typeOfIndex(inst); const lhs = try func.resolveInst(bin_op.lhs); const rhs = try func.resolveInst(bin_op.rhs); @@ -6949,11 +7040,12 @@ fn airSatBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void { } fn signedSat(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError!WValue { - const mod = func.bin_file.base.comp.module.?; + const pt = func.pt; + const mod = pt.zcu; const int_info = ty.intInfo(mod); const wasm_bits = toWasmBits(int_info.bits).?; const is_wasm_bits = wasm_bits == int_info.bits; - const ext_ty = if (!is_wasm_bits) try mod.intType(int_info.signedness, wasm_bits) else ty; + const ext_ty = if (!is_wasm_bits) try pt.intType(int_info.signedness, wasm_bits) else ty; const max_val: u64 = @as(u64, @intCast((@as(u65, 1) << @as(u7, @intCast(int_info.bits - 1))) - 1)); const min_val: i64 = (-@as(i64, @intCast(@as(u63, @intCast(max_val))))) - 1; @@ -7007,7 +7099,8 @@ fn signedSat(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerEr fn airShlSat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const mod = func.bin_file.base.comp.module.?; + const pt = func.pt; + const mod = pt.zcu; const ty = func.typeOfIndex(inst); const int_info = ty.intInfo(mod); const is_signed = int_info.signedness == .signed; @@ -7061,7 +7154,7 @@ fn airShlSat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { 64 => WValue{ .imm64 = shift_size }, else => unreachable, }; - const ext_ty = try mod.intType(int_info.signedness, wasm_bits); + const ext_ty = try pt.intType(int_info.signedness, wasm_bits); var shl_res = try (try func.binOp(lhs, shift_value, ext_ty, .shl)).toLocal(func, ext_ty); defer shl_res.free(func); @@ -7128,13 +7221,14 @@ fn callIntrinsic( }; // Always pass over C-ABI - const mod = func.bin_file.base.comp.module.?; - var func_type = try genFunctype(func.gpa, .C, param_types, return_type, mod); + const pt = func.pt; + const mod = pt.zcu; + var func_type = try genFunctype(func.gpa, .C, param_types, return_type, pt); defer func_type.deinit(func.gpa); const func_type_index = try func.bin_file.zigObjectPtr().?.putOrGetFuncType(func.gpa, func_type); try func.bin_file.addOrUpdateImport(name, symbol_index, null, func_type_index); - const want_sret_param = firstParamSRet(.C, return_type, mod); + const want_sret_param = firstParamSRet(.C, return_type, pt); // if we want return as first param, we allocate a pointer to stack, // and emit it as our first argument const sret = if (want_sret_param) blk: { @@ -7146,14 +7240,14 @@ fn callIntrinsic( // Lower all arguments to the stack before we call our function for (args, 0..) |arg, arg_i| { assert(!(want_sret_param and arg == .stack)); - assert(Type.fromInterned(param_types[arg_i]).hasRuntimeBitsIgnoreComptime(mod)); + assert(Type.fromInterned(param_types[arg_i]).hasRuntimeBitsIgnoreComptime(pt)); try func.lowerArg(.C, Type.fromInterned(param_types[arg_i]), arg); } // Actually call our intrinsic try func.addLabel(.call, @intFromEnum(symbol_index)); - if (!return_type.hasRuntimeBitsIgnoreComptime(mod)) { + if (!return_type.hasRuntimeBitsIgnoreComptime(pt)) { return WValue.none; } else if (return_type.isNoReturn(mod)) { try func.addTag(.@"unreachable"); @@ -7181,7 +7275,8 @@ fn airTagName(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 { - const mod = func.bin_file.base.comp.module.?; + const pt = func.pt; + const mod = pt.zcu; const ip = &mod.intern_pool; const enum_decl_index = enum_ty.getOwnerDecl(mod); @@ -7199,7 +7294,7 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 { const int_tag_ty = enum_ty.intTagType(mod); - if (int_tag_ty.bitSize(mod) > 64) { + if (int_tag_ty.bitSize(pt) > 64) { return func.fail("TODO: Implement @tagName for enums with tag size larger than 64 bits", .{}); } @@ -7225,16 +7320,17 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 { const tag_name_len = tag_name.length(ip); // for each tag name, create an unnamed const, // and then get a pointer to its value. - const name_ty = try mod.arrayType(.{ + const name_ty = try pt.arrayType(.{ .len = tag_name_len, .child = .u8_type, .sentinel = .zero_u8, }); - const name_val = try mod.intern(.{ .aggregate = .{ + const name_val = try pt.intern(.{ .aggregate = .{ .ty = name_ty.toIntern(), .storage = .{ .bytes = tag_name.toString() }, } }); const tag_sym_index = try func.bin_file.lowerUnnamedConst( + pt, Value.fromInterned(name_val), enum_decl_index, ); @@ -7247,7 +7343,7 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 { try writer.writeByte(std.wasm.opcode(.local_get)); try leb.writeUleb128(writer, @as(u32, 1)); - const tag_val = try mod.enumValueFieldIndex(enum_ty, @intCast(tag_index)); + const tag_val = try pt.enumValueFieldIndex(enum_ty, @intCast(tag_index)); const tag_value = try func.lowerConstant(tag_val, enum_ty); switch (tag_value) { @@ -7334,13 +7430,14 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 { try writer.writeByte(std.wasm.opcode(.end)); const slice_ty = Type.slice_const_u8_sentinel_0; - const func_type = try genFunctype(arena, .Unspecified, &.{int_tag_ty.ip_index}, slice_ty, mod); + const func_type = try genFunctype(arena, .Unspecified, &.{int_tag_ty.ip_index}, slice_ty, pt); const sym_index = try func.bin_file.createFunction(func_name, func_type, &body_list, &relocs); return @intFromEnum(sym_index); } fn airErrorSetHasValue(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const mod = func.bin_file.base.comp.module.?; + const pt = func.pt; + const mod = pt.zcu; const ip = &mod.intern_pool; const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; @@ -7426,7 +7523,8 @@ inline fn useAtomicFeature(func: *const CodeGen) bool { } fn airCmpxchg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const mod = func.bin_file.base.comp.module.?; + const pt = func.pt; + const mod = pt.zcu; const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = func.air.extraData(Air.Cmpxchg, ty_pl.payload).data; @@ -7445,7 +7543,7 @@ fn airCmpxchg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { try func.emitWValue(ptr_operand); try func.lowerToStack(expected_val); try func.lowerToStack(new_val); - try func.addAtomicMemArg(switch (ty.abiSize(mod)) { + try func.addAtomicMemArg(switch (ty.abiSize(pt)) { 1 => .i32_atomic_rmw8_cmpxchg_u, 2 => .i32_atomic_rmw16_cmpxchg_u, 4 => .i32_atomic_rmw_cmpxchg, @@ -7453,14 +7551,14 @@ fn airCmpxchg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { else => |size| return func.fail("TODO: implement `@cmpxchg` for types with abi size '{d}'", .{size}), }, .{ .offset = ptr_operand.offset(), - .alignment = @intCast(ty.abiAlignment(mod).toByteUnits().?), + .alignment = @intCast(ty.abiAlignment(pt).toByteUnits().?), }); try func.addLabel(.local_tee, val_local.local.value); _ = try func.cmp(.stack, expected_val, ty, .eq); try func.addLabel(.local_set, cmp_result.local.value); break :val val_local; } else val: { - if (ty.abiSize(mod) > 8) { + if (ty.abiSize(pt) > 8) { return func.fail("TODO: Implement `@cmpxchg` for types larger than abi size of 8 bytes", .{}); } const ptr_val = try WValue.toLocal(try func.load(ptr_operand, ty, 0), func, ty); @@ -7476,7 +7574,7 @@ fn airCmpxchg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { break :val ptr_val; }; - const result_ptr = if (isByRef(result_ty, mod)) val: { + const result_ptr = if (isByRef(result_ty, pt)) val: { try func.emitWValue(cmp_result); try func.addImm32(~@as(u32, 0)); try func.addTag(.i32_xor); @@ -7484,7 +7582,7 @@ fn airCmpxchg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { try func.addTag(.i32_and); const and_result = try WValue.toLocal(.stack, func, Type.bool); const result_ptr = try func.allocStack(result_ty); - try func.store(result_ptr, and_result, Type.bool, @as(u32, @intCast(ty.abiSize(mod)))); + try func.store(result_ptr, and_result, Type.bool, @as(u32, @intCast(ty.abiSize(pt)))); try func.store(result_ptr, ptr_val, ty, 0); break :val result_ptr; } else val: { @@ -7499,13 +7597,13 @@ fn airCmpxchg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airAtomicLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const mod = func.bin_file.base.comp.module.?; + const pt = func.pt; const atomic_load = func.air.instructions.items(.data)[@intFromEnum(inst)].atomic_load; const ptr = try func.resolveInst(atomic_load.ptr); const ty = func.typeOfIndex(inst); if (func.useAtomicFeature()) { - const tag: wasm.AtomicsOpcode = switch (ty.abiSize(mod)) { + const tag: wasm.AtomicsOpcode = switch (ty.abiSize(pt)) { 1 => .i32_atomic_load8_u, 2 => .i32_atomic_load16_u, 4 => .i32_atomic_load, @@ -7515,7 +7613,7 @@ fn airAtomicLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { try func.emitWValue(ptr); try func.addAtomicMemArg(tag, .{ .offset = ptr.offset(), - .alignment = @intCast(ty.abiAlignment(mod).toByteUnits().?), + .alignment = @intCast(ty.abiAlignment(pt).toByteUnits().?), }); } else { _ = try func.load(ptr, ty, 0); @@ -7526,7 +7624,8 @@ fn airAtomicLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const mod = func.bin_file.base.comp.module.?; + const pt = func.pt; + const mod = pt.zcu; const pl_op = func.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; const extra = func.air.extraData(Air.AtomicRmw, pl_op.payload).data; @@ -7550,7 +7649,7 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { try func.emitWValue(ptr); try func.emitWValue(value); if (op == .Nand) { - const wasm_bits = toWasmBits(@as(u16, @intCast(ty.bitSize(mod)))).?; + const wasm_bits = toWasmBits(@intCast(ty.bitSize(pt))).?; const and_res = try func.binOp(value, operand, ty, .@"and"); if (wasm_bits == 32) @@ -7567,7 +7666,7 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { try func.addTag(.select); } try func.addAtomicMemArg( - switch (ty.abiSize(mod)) { + switch (ty.abiSize(pt)) { 1 => .i32_atomic_rmw8_cmpxchg_u, 2 => .i32_atomic_rmw16_cmpxchg_u, 4 => .i32_atomic_rmw_cmpxchg, @@ -7576,7 +7675,7 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { }, .{ .offset = ptr.offset(), - .alignment = @intCast(ty.abiAlignment(mod).toByteUnits().?), + .alignment = @intCast(ty.abiAlignment(pt).toByteUnits().?), }, ); const select_res = try func.allocLocal(ty); @@ -7595,7 +7694,7 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { else => { try func.emitWValue(ptr); try func.emitWValue(operand); - const tag: wasm.AtomicsOpcode = switch (ty.abiSize(mod)) { + const tag: wasm.AtomicsOpcode = switch (ty.abiSize(pt)) { 1 => switch (op) { .Xchg => .i32_atomic_rmw8_xchg_u, .Add => .i32_atomic_rmw8_add_u, @@ -7636,7 +7735,7 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { }; try func.addAtomicMemArg(tag, .{ .offset = ptr.offset(), - .alignment = @intCast(ty.abiAlignment(mod).toByteUnits().?), + .alignment = @intCast(ty.abiAlignment(pt).toByteUnits().?), }); const result = try WValue.toLocal(.stack, func, ty); return func.finishAir(inst, result, &.{ pl_op.operand, extra.operand }); @@ -7681,7 +7780,7 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { try func.store(.stack, .stack, ty, ptr.offset()); }, .Nand => { - const wasm_bits = toWasmBits(@as(u16, @intCast(ty.bitSize(mod)))).?; + const wasm_bits = toWasmBits(@intCast(ty.bitSize(pt))).?; try func.emitWValue(ptr); const and_res = try func.binOp(result, operand, ty, .@"and"); @@ -7701,7 +7800,8 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airFence(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const zcu = func.bin_file.base.comp.module.?; + const pt = func.pt; + const zcu = pt.zcu; // Only when the atomic feature is enabled, and we're not building // for a single-threaded build, can we emit the `fence` instruction. // In all other cases, we emit no instructions for a fence. @@ -7715,7 +7815,8 @@ fn airFence(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airAtomicStore(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const mod = func.bin_file.base.comp.module.?; + const pt = func.pt; + const mod = pt.zcu; const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const ptr = try func.resolveInst(bin_op.lhs); @@ -7724,7 +7825,7 @@ fn airAtomicStore(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const ty = ptr_ty.childType(mod); if (func.useAtomicFeature()) { - const tag: wasm.AtomicsOpcode = switch (ty.abiSize(mod)) { + const tag: wasm.AtomicsOpcode = switch (ty.abiSize(pt)) { 1 => .i32_atomic_store8, 2 => .i32_atomic_store16, 4 => .i32_atomic_store, @@ -7735,7 +7836,7 @@ fn airAtomicStore(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { try func.lowerToStack(operand); try func.addAtomicMemArg(tag, .{ .offset = ptr.offset(), - .alignment = @intCast(ty.abiAlignment(mod).toByteUnits().?), + .alignment = @intCast(ty.abiAlignment(pt).toByteUnits().?), }); } else { try func.store(ptr, operand, ty, 0); @@ -7754,11 +7855,13 @@ fn airFrameAddress(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn typeOf(func: *CodeGen, inst: Air.Inst.Ref) Type { - const mod = func.bin_file.base.comp.module.?; + const pt = func.pt; + const mod = pt.zcu; return func.air.typeOf(inst, &mod.intern_pool); } fn typeOfIndex(func: *CodeGen, inst: Air.Inst.Index) Type { - const mod = func.bin_file.base.comp.module.?; + const pt = func.pt; + const mod = pt.zcu; return func.air.typeOfIndex(inst, &mod.intern_pool); } diff --git a/src/arch/wasm/abi.zig b/src/arch/wasm/abi.zig index 23097990ac74..1b61be1e848c 100644 --- a/src/arch/wasm/abi.zig +++ b/src/arch/wasm/abi.zig @@ -22,15 +22,16 @@ const direct: [2]Class = .{ .direct, .none }; /// Classifies a given Zig type to determine how they must be passed /// or returned as value within a wasm function. /// When all elements result in `.none`, no value must be passed in or returned. -pub fn classifyType(ty: Type, mod: *Zcu) [2]Class { +pub fn classifyType(ty: Type, pt: Zcu.PerThread) [2]Class { + const mod = pt.zcu; const ip = &mod.intern_pool; const target = mod.getTarget(); - if (!ty.hasRuntimeBitsIgnoreComptime(mod)) return none; + if (!ty.hasRuntimeBitsIgnoreComptime(pt)) return none; switch (ty.zigTypeTag(mod)) { .Struct => { - const struct_type = mod.typeToStruct(ty).?; + const struct_type = pt.zcu.typeToStruct(ty).?; if (struct_type.layout == .@"packed") { - if (ty.bitSize(mod) <= 64) return direct; + if (ty.bitSize(pt) <= 64) return direct; return .{ .direct, .direct }; } if (struct_type.field_types.len > 1) { @@ -40,13 +41,13 @@ pub fn classifyType(ty: Type, mod: *Zcu) [2]Class { const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[0]); const explicit_align = struct_type.fieldAlign(ip, 0); if (explicit_align != .none) { - if (explicit_align.compareStrict(.gt, field_ty.abiAlignment(mod))) + if (explicit_align.compareStrict(.gt, field_ty.abiAlignment(pt))) return memory; } - return classifyType(field_ty, mod); + return classifyType(field_ty, pt); }, .Int, .Enum, .ErrorSet => { - const int_bits = ty.intInfo(mod).bits; + const int_bits = ty.intInfo(pt.zcu).bits; if (int_bits <= 64) return direct; if (int_bits <= 128) return .{ .direct, .direct }; return memory; @@ -61,24 +62,24 @@ pub fn classifyType(ty: Type, mod: *Zcu) [2]Class { .Vector => return direct, .Array => return memory, .Optional => { - assert(ty.isPtrLikeOptional(mod)); + assert(ty.isPtrLikeOptional(pt.zcu)); return direct; }, .Pointer => { - assert(!ty.isSlice(mod)); + assert(!ty.isSlice(pt.zcu)); return direct; }, .Union => { - const union_obj = mod.typeToUnion(ty).?; + const union_obj = pt.zcu.typeToUnion(ty).?; if (union_obj.getLayout(ip) == .@"packed") { - if (ty.bitSize(mod) <= 64) return direct; + if (ty.bitSize(pt) <= 64) return direct; return .{ .direct, .direct }; } - const layout = ty.unionGetLayout(mod); + const layout = ty.unionGetLayout(pt); assert(layout.tag_size == 0); if (union_obj.field_types.len > 1) return memory; const first_field_ty = Type.fromInterned(union_obj.field_types.get(ip)[0]); - return classifyType(first_field_ty, mod); + return classifyType(first_field_ty, pt); }, .ErrorUnion, .Frame, @@ -100,28 +101,29 @@ pub fn classifyType(ty: Type, mod: *Zcu) [2]Class { /// Returns the scalar type a given type can represent. /// Asserts given type can be represented as scalar, such as /// a struct with a single scalar field. -pub fn scalarType(ty: Type, mod: *Zcu) Type { +pub fn scalarType(ty: Type, pt: Zcu.PerThread) Type { + const mod = pt.zcu; const ip = &mod.intern_pool; switch (ty.zigTypeTag(mod)) { .Struct => { if (mod.typeToPackedStruct(ty)) |packed_struct| { - return scalarType(Type.fromInterned(packed_struct.backingIntType(ip).*), mod); + return scalarType(Type.fromInterned(packed_struct.backingIntType(ip).*), pt); } else { assert(ty.structFieldCount(mod) == 1); - return scalarType(ty.structFieldType(0, mod), mod); + return scalarType(ty.structFieldType(0, mod), pt); } }, .Union => { const union_obj = mod.typeToUnion(ty).?; if (union_obj.getLayout(ip) != .@"packed") { - const layout = mod.getUnionLayout(union_obj); + const layout = pt.getUnionLayout(union_obj); if (layout.payload_size == 0 and layout.tag_size != 0) { - return scalarType(ty.unionTagTypeSafety(mod).?, mod); + return scalarType(ty.unionTagTypeSafety(mod).?, pt); } assert(union_obj.field_types.len == 1); } const first_field_ty = Type.fromInterned(union_obj.field_types.get(ip)[0]); - return scalarType(first_field_ty, mod); + return scalarType(first_field_ty, pt); }, else => return ty, } diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 538b7400422a..dafeed00b80e 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -19,7 +19,7 @@ const CodeGenError = codegen.CodeGenError; const Compilation = @import("../../Compilation.zig"); const DebugInfoOutput = codegen.DebugInfoOutput; const DW = std.dwarf; -const ErrorMsg = Module.ErrorMsg; +const ErrorMsg = Zcu.ErrorMsg; const Result = codegen.Result; const Emit = @import("Emit.zig"); const Liveness = @import("../../Liveness.zig"); @@ -27,8 +27,6 @@ const Lower = @import("Lower.zig"); const Mir = @import("Mir.zig"); const Package = @import("../../Package.zig"); const Zcu = @import("../../Zcu.zig"); -/// Deprecated. -const Module = Zcu; const InternPool = @import("../../InternPool.zig"); const Alignment = InternPool.Alignment; const Target = std.Target; @@ -52,6 +50,7 @@ const FrameIndex = bits.FrameIndex; const InnerError = CodeGenError || error{OutOfRegisters}; gpa: Allocator, +pt: Zcu.PerThread, air: Air, liveness: Liveness, bin_file: *link.File, @@ -74,7 +73,7 @@ va_info: union { ret_mcv: InstTracking, fn_type: Type, arg_index: u32, -src_loc: Module.LazySrcLoc, +src_loc: Zcu.LazySrcLoc, eflags_inst: ?Air.Inst.Index = null, @@ -120,18 +119,18 @@ const Owner = union(enum) { func_index: InternPool.Index, lazy_sym: link.File.LazySymbol, - fn getDecl(owner: Owner, mod: *Module) InternPool.DeclIndex { + fn getDecl(owner: Owner, zcu: *Zcu) InternPool.DeclIndex { return switch (owner) { - .func_index => |func_index| mod.funcOwnerDeclIndex(func_index), - .lazy_sym => |lazy_sym| lazy_sym.ty.getOwnerDecl(mod), + .func_index => |func_index| zcu.funcOwnerDeclIndex(func_index), + .lazy_sym => |lazy_sym| lazy_sym.ty.getOwnerDecl(zcu), }; } fn getSymbolIndex(owner: Owner, ctx: *Self) !u32 { + const pt = ctx.pt; switch (owner) { .func_index => |func_index| { - const mod = ctx.bin_file.comp.module.?; - const decl_index = mod.funcOwnerDeclIndex(func_index); + const decl_index = ctx.pt.zcu.funcOwnerDeclIndex(func_index); if (ctx.bin_file.cast(link.File.Elf)) |elf_file| { return elf_file.zigObjectPtr().?.getOrCreateMetadataForDecl(elf_file, decl_index); } else if (ctx.bin_file.cast(link.File.MachO)) |macho_file| { @@ -145,17 +144,17 @@ const Owner = union(enum) { }, .lazy_sym => |lazy_sym| { if (ctx.bin_file.cast(link.File.Elf)) |elf_file| { - return elf_file.zigObjectPtr().?.getOrCreateMetadataForLazySymbol(elf_file, lazy_sym) catch |err| + return elf_file.zigObjectPtr().?.getOrCreateMetadataForLazySymbol(elf_file, pt, lazy_sym) catch |err| ctx.fail("{s} creating lazy symbol", .{@errorName(err)}); } else if (ctx.bin_file.cast(link.File.MachO)) |macho_file| { - return macho_file.getZigObject().?.getOrCreateMetadataForLazySymbol(macho_file, lazy_sym) catch |err| + return macho_file.getZigObject().?.getOrCreateMetadataForLazySymbol(macho_file, pt, lazy_sym) catch |err| ctx.fail("{s} creating lazy symbol", .{@errorName(err)}); } else if (ctx.bin_file.cast(link.File.Coff)) |coff_file| { - const atom = coff_file.getOrCreateAtomForLazySymbol(lazy_sym) catch |err| + const atom = coff_file.getOrCreateAtomForLazySymbol(pt, lazy_sym) catch |err| return ctx.fail("{s} creating lazy symbol", .{@errorName(err)}); return coff_file.getAtom(atom).getSymbolIndex().?; } else if (ctx.bin_file.cast(link.File.Plan9)) |p9_file| { - return p9_file.getOrCreateAtomForLazySymbol(lazy_sym) catch |err| + return p9_file.getOrCreateAtomForLazySymbol(pt, lazy_sym) catch |err| return ctx.fail("{s} creating lazy symbol", .{@errorName(err)}); } else unreachable; }, @@ -753,14 +752,14 @@ const FrameAlloc = struct { .ref_count = 0, }; } - fn initType(ty: Type, mod: *Module) FrameAlloc { + fn initType(ty: Type, pt: Zcu.PerThread) FrameAlloc { return init(.{ - .size = ty.abiSize(mod), - .alignment = ty.abiAlignment(mod), + .size = ty.abiSize(pt), + .alignment = ty.abiAlignment(pt), }); } - fn initSpill(ty: Type, mod: *Module) FrameAlloc { - const abi_size = ty.abiSize(mod); + fn initSpill(ty: Type, pt: Zcu.PerThread) FrameAlloc { + const abi_size = ty.abiSize(pt); const spill_size = if (abi_size < 8) math.ceilPowerOfTwoAssert(u64, abi_size) else @@ -768,7 +767,7 @@ const FrameAlloc = struct { return init(.{ .size = spill_size, .pad = @intCast(spill_size - abi_size), - .alignment = ty.abiAlignment(mod).maxStrict( + .alignment = ty.abiAlignment(pt).maxStrict( Alignment.fromNonzeroByteUnits(@min(spill_size, 8)), ), }); @@ -777,7 +776,7 @@ const FrameAlloc = struct { const StackAllocation = struct { inst: ?Air.Inst.Index, - /// TODO do we need size? should be determined by inst.ty.abiSize(mod) + /// TODO do we need size? should be determined by inst.ty.abiSize(pt) size: u32, }; @@ -795,16 +794,17 @@ const Self = @This(); pub fn generate( bin_file: *link.File, - src_loc: Module.LazySrcLoc, + pt: Zcu.PerThread, + src_loc: Zcu.LazySrcLoc, func_index: InternPool.Index, air: Air, liveness: Liveness, code: *std.ArrayList(u8), debug_output: DebugInfoOutput, ) CodeGenError!Result { - const comp = bin_file.comp; - const gpa = comp.gpa; - const zcu = comp.module.?; + const zcu = pt.zcu; + const gpa = zcu.gpa; + const comp = zcu.comp; const func = zcu.funcInfo(func_index); const fn_owner_decl = zcu.declPtr(func.owner_decl); assert(fn_owner_decl.has_tv); @@ -812,8 +812,9 @@ pub fn generate( const namespace = zcu.namespacePtr(fn_owner_decl.src_namespace); const mod = namespace.fileScope(zcu).mod; - var function = Self{ + var function: Self = .{ .gpa = gpa, + .pt = pt, .air = air, .liveness = liveness, .target = &mod.resolved_target.result, @@ -882,11 +883,11 @@ pub fn generate( function.args = call_info.args; function.ret_mcv = call_info.return_value; function.frame_allocs.set(@intFromEnum(FrameIndex.ret_addr), FrameAlloc.init(.{ - .size = Type.usize.abiSize(zcu), - .alignment = Type.usize.abiAlignment(zcu).min(call_info.stack_align), + .size = Type.usize.abiSize(pt), + .alignment = Type.usize.abiAlignment(pt).min(call_info.stack_align), })); function.frame_allocs.set(@intFromEnum(FrameIndex.base_ptr), FrameAlloc.init(.{ - .size = Type.usize.abiSize(zcu), + .size = Type.usize.abiSize(pt), .alignment = Alignment.min( call_info.stack_align, Alignment.fromNonzeroByteUnits(function.target.stackAlignment()), @@ -971,7 +972,8 @@ pub fn generate( pub fn generateLazy( bin_file: *link.File, - src_loc: Module.LazySrcLoc, + pt: Zcu.PerThread, + src_loc: Zcu.LazySrcLoc, lazy_sym: link.File.LazySymbol, code: *std.ArrayList(u8), debug_output: DebugInfoOutput, @@ -980,8 +982,9 @@ pub fn generateLazy( const gpa = comp.gpa; // This function is for generating global code, so we use the root module. const mod = comp.root_mod; - var function = Self{ + var function: Self = .{ .gpa = gpa, + .pt = pt, .air = undefined, .liveness = undefined, .target = &mod.resolved_target.result, @@ -1065,7 +1068,7 @@ pub fn generateLazy( } const FormatDeclData = struct { - mod: *Module, + zcu: *Zcu, decl_index: InternPool.DeclIndex, }; fn formatDecl( @@ -1074,11 +1077,11 @@ fn formatDecl( _: std.fmt.FormatOptions, writer: anytype, ) @TypeOf(writer).Error!void { - try data.mod.declPtr(data.decl_index).renderFullyQualifiedName(data.mod, writer); + try data.zcu.declPtr(data.decl_index).renderFullyQualifiedName(data.zcu, writer); } fn fmtDecl(self: *Self, decl_index: InternPool.DeclIndex) std.fmt.Formatter(formatDecl) { return .{ .data = .{ - .mod = self.bin_file.comp.module.?, + .zcu = self.pt.zcu, .decl_index = decl_index, } }; } @@ -1095,7 +1098,7 @@ fn formatAir( ) @TypeOf(writer).Error!void { @import("../../print_air.zig").dumpInst( data.inst, - data.self.bin_file.comp.module.?, + data.self.pt, data.self.air, data.self.liveness, ); @@ -1746,7 +1749,8 @@ fn asmMemoryRegisterImmediate( } fn gen(self: *Self) InnerError!void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const fn_info = mod.typeToFunc(self.fn_type).?; const cc = abi.resolveCallingConvention(fn_info.cc, self.target.*); if (cc != .Naked) { @@ -1764,7 +1768,7 @@ fn gen(self: *Self) InnerError!void { // The address where to store the return value for the caller is in a // register which the callee is free to clobber. Therefore, we purposely // spill it to stack immediately. - const frame_index = try self.allocFrameIndex(FrameAlloc.initSpill(Type.usize, mod)); + const frame_index = try self.allocFrameIndex(FrameAlloc.initSpill(Type.usize, pt)); try self.genSetMem( .{ .frame = frame_index }, 0, @@ -1800,7 +1804,7 @@ fn gen(self: *Self) InnerError!void { try self.asmRegisterImmediate(.{ ._, .cmp }, .al, Immediate.u(info.fp_count)); const skip_sse_reloc = try self.asmJccReloc(.na, undefined); - const vec_2_f64 = try mod.vectorType(.{ .len = 2, .child = .f64_type }); + const vec_2_f64 = try pt.vectorType(.{ .len = 2, .child = .f64_type }); for (abi.SysV.c_abi_sse_param_regs[info.fp_count..], info.fp_count..) |reg, reg_i| try self.genSetMem( .{ .frame = reg_save_area_fi }, @@ -1951,7 +1955,8 @@ fn gen(self: *Self) InnerError!void { } fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const ip = &mod.intern_pool; const air_tags = self.air.instructions.items(.tag); @@ -2222,12 +2227,13 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { } fn genLazy(self: *Self, lazy_sym: link.File.LazySymbol) InnerError!void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const ip = &mod.intern_pool; switch (lazy_sym.ty.zigTypeTag(mod)) { .Enum => { const enum_ty = lazy_sym.ty; - wip_mir_log.debug("{}.@tagName:", .{enum_ty.fmt(mod)}); + wip_mir_log.debug("{}.@tagName:", .{enum_ty.fmt(pt)}); const resolved_cc = abi.resolveCallingConvention(.Unspecified, self.target.*); const param_regs = abi.getCAbiIntParamRegs(resolved_cc); @@ -2249,7 +2255,7 @@ fn genLazy(self: *Self, lazy_sym: link.File.LazySymbol) InnerError!void { const tag_names = enum_ty.enumFields(mod); for (exitlude_jump_relocs, 0..) |*exitlude_jump_reloc, tag_index| { const tag_name_len = tag_names.get(ip)[tag_index].length(ip); - const tag_val = try mod.enumValueFieldIndex(enum_ty, @intCast(tag_index)); + const tag_val = try pt.enumValueFieldIndex(enum_ty, @intCast(tag_index)); const tag_mcv = try self.genTypedValue(tag_val); try self.genBinOpMir(.{ ._, .cmp }, enum_ty, enum_mcv, tag_mcv); const skip_reloc = try self.asmJccReloc(.ne, undefined); @@ -2282,7 +2288,7 @@ fn genLazy(self: *Self, lazy_sym: link.File.LazySymbol) InnerError!void { }, else => return self.fail( "TODO implement {s} for {}", - .{ @tagName(lazy_sym.kind), lazy_sym.ty.fmt(mod) }, + .{ @tagName(lazy_sym.kind), lazy_sym.ty.fmt(pt) }, ), } } @@ -2481,14 +2487,15 @@ fn allocFrameIndex(self: *Self, alloc: FrameAlloc) !FrameIndex { /// Use a pointer instruction as the basis for allocating stack memory. fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !FrameIndex { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const ptr_ty = self.typeOfIndex(inst); const val_ty = ptr_ty.childType(mod); return self.allocFrameIndex(FrameAlloc.init(.{ - .size = math.cast(u32, val_ty.abiSize(mod)) orelse { - return self.fail("type '{}' too big to fit into stack frame", .{val_ty.fmt(mod)}); + .size = math.cast(u32, val_ty.abiSize(pt)) orelse { + return self.fail("type '{}' too big to fit into stack frame", .{val_ty.fmt(pt)}); }, - .alignment = ptr_ty.ptrAlignment(mod).max(.@"1"), + .alignment = ptr_ty.ptrAlignment(pt).max(.@"1"), })); } @@ -2501,9 +2508,10 @@ fn allocTempRegOrMem(self: *Self, elem_ty: Type, reg_ok: bool) !MCValue { } fn allocRegOrMemAdvanced(self: *Self, ty: Type, inst: ?Air.Inst.Index, reg_ok: bool) !MCValue { - const mod = self.bin_file.comp.module.?; - const abi_size = math.cast(u32, ty.abiSize(mod)) orelse { - return self.fail("type '{}' too big to fit into stack frame", .{ty.fmt(mod)}); + const pt = self.pt; + const mod = pt.zcu; + const abi_size = math.cast(u32, ty.abiSize(pt)) orelse { + return self.fail("type '{}' too big to fit into stack frame", .{ty.fmt(pt)}); }; if (reg_ok) need_mem: { @@ -2529,12 +2537,13 @@ fn allocRegOrMemAdvanced(self: *Self, ty: Type, inst: ?Air.Inst.Index, reg_ok: b } } - const frame_index = try self.allocFrameIndex(FrameAlloc.initSpill(ty, mod)); + const frame_index = try self.allocFrameIndex(FrameAlloc.initSpill(ty, pt)); return .{ .load_frame = .{ .index = frame_index } }; } fn regClassForType(self: *Self, ty: Type) RegisterManager.RegisterBitSet { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; return switch (ty.zigTypeTag(mod)) { .Float => switch (ty.floatBits(self.target.*)) { 80 => abi.RegisterClass.x87, @@ -2849,7 +2858,8 @@ fn airFptrunc(self: *Self, inst: Air.Inst.Index) !void { } fn airFpext(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const dst_ty = self.typeOfIndex(inst); const dst_scalar_ty = dst_ty.scalarType(mod); @@ -2892,14 +2902,14 @@ fn airFpext(self: *Self, inst: Air.Inst.Index) !void { } }, &.{src_scalar_ty}, &.{.{ .air_ref = ty_op.operand }}); } - const src_abi_size: u32 = @intCast(src_ty.abiSize(mod)); + const src_abi_size: u32 = @intCast(src_ty.abiSize(pt)); const src_mcv = try self.resolveInst(ty_op.operand); const dst_mcv = if (src_mcv.isRegister() and self.reuseOperand(inst, ty_op.operand, 0, src_mcv)) src_mcv else try self.copyToRegisterWithInstTracking(inst, dst_ty, src_mcv); const dst_reg = dst_mcv.getReg().?; - const dst_alias = registerAlias(dst_reg, @intCast(@max(dst_ty.abiSize(mod), 16))); + const dst_alias = registerAlias(dst_reg, @intCast(@max(dst_ty.abiSize(pt), 16))); const dst_lock = self.register_manager.lockReg(dst_reg); defer if (dst_lock) |lock| self.register_manager.unlockReg(lock); @@ -2978,19 +2988,20 @@ fn airFpext(self: *Self, inst: Air.Inst.Index) !void { } break :result dst_mcv; } orelse return self.fail("TODO implement airFpext from {} to {}", .{ - src_ty.fmt(mod), dst_ty.fmt(mod), + src_ty.fmt(pt), dst_ty.fmt(pt), }); return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } fn airIntCast(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const src_ty = self.typeOf(ty_op.operand); const dst_ty = self.typeOfIndex(inst); const result = @as(?MCValue, result: { - const dst_abi_size: u32 = @intCast(dst_ty.abiSize(mod)); + const dst_abi_size: u32 = @intCast(dst_ty.abiSize(pt)); const src_int_info = src_ty.intInfo(mod); const dst_int_info = dst_ty.intInfo(mod); @@ -3001,13 +3012,13 @@ fn airIntCast(self: *Self, inst: Air.Inst.Index) !void { const src_mcv = try self.resolveInst(ty_op.operand); if (dst_ty.isVector(mod)) { - const src_abi_size: u32 = @intCast(src_ty.abiSize(mod)); + const src_abi_size: u32 = @intCast(src_ty.abiSize(pt)); const max_abi_size = @max(dst_abi_size, src_abi_size); if (max_abi_size > @as(u32, if (self.hasFeature(.avx2)) 32 else 16)) break :result null; const has_avx = self.hasFeature(.avx); - const dst_elem_abi_size = dst_ty.childType(mod).abiSize(mod); - const src_elem_abi_size = src_ty.childType(mod).abiSize(mod); + const dst_elem_abi_size = dst_ty.childType(mod).abiSize(pt); + const src_elem_abi_size = src_ty.childType(mod).abiSize(pt); switch (math.order(dst_elem_abi_size, src_elem_abi_size)) { .lt => { const mir_tag: Mir.Inst.FixedTag = switch (dst_elem_abi_size) { @@ -3236,19 +3247,20 @@ fn airIntCast(self: *Self, inst: Air.Inst.Index) !void { break :result dst_mcv; }) orelse return self.fail("TODO implement airIntCast from {} to {}", .{ - src_ty.fmt(mod), dst_ty.fmt(mod), + src_ty.fmt(pt), dst_ty.fmt(pt), }); return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } fn airTrunc(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const dst_ty = self.typeOfIndex(inst); - const dst_abi_size: u32 = @intCast(dst_ty.abiSize(mod)); + const dst_abi_size: u32 = @intCast(dst_ty.abiSize(pt)); const src_ty = self.typeOf(ty_op.operand); - const src_abi_size: u32 = @intCast(src_ty.abiSize(mod)); + const src_abi_size: u32 = @intCast(src_ty.abiSize(pt)); const result = result: { const src_mcv = try self.resolveInst(ty_op.operand); @@ -3278,9 +3290,9 @@ fn airTrunc(self: *Self, inst: Air.Inst.Index) !void { if (dst_ty.zigTypeTag(mod) == .Vector) { assert(src_ty.zigTypeTag(mod) == .Vector and dst_ty.vectorLen(mod) == src_ty.vectorLen(mod)); const dst_elem_ty = dst_ty.childType(mod); - const dst_elem_abi_size: u32 = @intCast(dst_elem_ty.abiSize(mod)); + const dst_elem_abi_size: u32 = @intCast(dst_elem_ty.abiSize(pt)); const src_elem_ty = src_ty.childType(mod); - const src_elem_abi_size: u32 = @intCast(src_elem_ty.abiSize(mod)); + const src_elem_abi_size: u32 = @intCast(src_elem_ty.abiSize(pt)); const mir_tag = @as(?Mir.Inst.FixedTag, switch (dst_elem_abi_size) { 1 => switch (src_elem_abi_size) { @@ -3305,20 +3317,20 @@ fn airTrunc(self: *Self, inst: Air.Inst.Index) !void { else => null, }, else => null, - }) orelse return self.fail("TODO implement airTrunc for {}", .{dst_ty.fmt(mod)}); + }) orelse return self.fail("TODO implement airTrunc for {}", .{dst_ty.fmt(pt)}); const dst_info = dst_elem_ty.intInfo(mod); const src_info = src_elem_ty.intInfo(mod); - const mask_val = try mod.intValue(src_elem_ty, @as(u64, math.maxInt(u64)) >> @intCast(64 - dst_info.bits)); + const mask_val = try pt.intValue(src_elem_ty, @as(u64, math.maxInt(u64)) >> @intCast(64 - dst_info.bits)); - const splat_ty = try mod.vectorType(.{ + const splat_ty = try pt.vectorType(.{ .len = @intCast(@divExact(@as(u64, if (src_abi_size > 16) 256 else 128), src_info.bits)), .child = src_elem_ty.ip_index, }); - const splat_abi_size: u32 = @intCast(splat_ty.abiSize(mod)); + const splat_abi_size: u32 = @intCast(splat_ty.abiSize(pt)); - const splat_val = try mod.intern(.{ .aggregate = .{ + const splat_val = try pt.intern(.{ .aggregate = .{ .ty = splat_ty.ip_index, .storage = .{ .repeated_elem = mask_val.ip_index }, } }); @@ -3375,7 +3387,7 @@ fn airTrunc(self: *Self, inst: Air.Inst.Index) !void { } } else if (dst_abi_size <= 16) { const dst_info = dst_ty.intInfo(mod); - const high_ty = try mod.intType(dst_info.signedness, dst_info.bits - 64); + const high_ty = try pt.intType(dst_info.signedness, dst_info.bits - 64); if (self.regExtraBits(high_ty) > 0) { try self.truncateRegister(high_ty, dst_mcv.register_pair[1].to64()); } @@ -3400,12 +3412,12 @@ fn airIntFromBool(self: *Self, inst: Air.Inst.Index) !void { } fn airSlice(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const slice_ty = self.typeOfIndex(inst); - const frame_index = try self.allocFrameIndex(FrameAlloc.initSpill(slice_ty, mod)); + const frame_index = try self.allocFrameIndex(FrameAlloc.initSpill(slice_ty, pt)); const ptr_ty = self.typeOf(bin_op.lhs); try self.genSetMem(.{ .frame = frame_index }, 0, ptr_ty, .{ .air_ref = bin_op.lhs }, .{}); @@ -3413,7 +3425,7 @@ fn airSlice(self: *Self, inst: Air.Inst.Index) !void { const len_ty = self.typeOf(bin_op.rhs); try self.genSetMem( .{ .frame = frame_index }, - @intCast(ptr_ty.abiSize(mod)), + @intCast(ptr_ty.abiSize(pt)), len_ty, .{ .air_ref = bin_op.rhs }, .{}, @@ -3430,14 +3442,15 @@ fn airUnOp(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void { } fn airBinOp(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const dst_mcv = try self.genBinOp(inst, tag, bin_op.lhs, bin_op.rhs); const dst_ty = self.typeOfIndex(inst); if (dst_ty.isAbiInt(mod)) { - const abi_size: u32 = @intCast(dst_ty.abiSize(mod)); - const bit_size: u32 = @intCast(dst_ty.bitSize(mod)); + const abi_size: u32 = @intCast(dst_ty.abiSize(pt)); + const bit_size: u32 = @intCast(dst_ty.bitSize(pt)); if (abi_size * 8 > bit_size) { const dst_lock = switch (dst_mcv) { .register => |dst_reg| self.register_manager.lockRegAssumeUnused(dst_reg), @@ -3452,7 +3465,7 @@ fn airBinOp(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void { const tmp_lock = self.register_manager.lockRegAssumeUnused(tmp_reg); defer self.register_manager.unlockReg(tmp_lock); - const hi_ty = try mod.intType(.unsigned, @intCast((dst_ty.bitSize(mod) - 1) % 64 + 1)); + const hi_ty = try pt.intType(.unsigned, @intCast((dst_ty.bitSize(pt) - 1) % 64 + 1)); const hi_mcv = dst_mcv.address().offset(@intCast(bit_size / 64 * 8)).deref(); try self.genSetReg(tmp_reg, hi_ty, hi_mcv, .{}); try self.truncateRegister(dst_ty, tmp_reg); @@ -3471,7 +3484,8 @@ fn airPtrArithmetic(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void } fn activeIntBits(self: *Self, dst_air: Air.Inst.Ref) u16 { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const air_tag = self.air.instructions.items(.tag); const air_data = self.air.instructions.items(.data); @@ -3497,7 +3511,7 @@ fn activeIntBits(self: *Self, dst_air: Air.Inst.Ref) u16 { } } else if (dst_air.toInterned()) |ip_index| { var space: Value.BigIntSpace = undefined; - const src_int = Value.fromInterned(ip_index).toBigInt(&space, mod); + const src_int = Value.fromInterned(ip_index).toBigInt(&space, pt); return @as(u16, @intCast(src_int.bitCountTwosComp())) + @intFromBool(src_int.positive and dst_info.signedness == .signed); } @@ -3505,7 +3519,8 @@ fn activeIntBits(self: *Self, dst_air: Air.Inst.Ref) u16 { } fn airMulDivBinOp(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const result = result: { const tag = self.air.instructions.items(.tag)[@intFromEnum(inst)]; @@ -3514,10 +3529,10 @@ fn airMulDivBinOp(self: *Self, inst: Air.Inst.Index) !void { .Float, .Vector => break :result try self.genBinOp(inst, tag, bin_op.lhs, bin_op.rhs), else => {}, } - const dst_abi_size: u32 = @intCast(dst_ty.abiSize(mod)); + const dst_abi_size: u32 = @intCast(dst_ty.abiSize(pt)); const dst_info = dst_ty.intInfo(mod); - const src_ty = try mod.intType(dst_info.signedness, switch (tag) { + const src_ty = try pt.intType(dst_info.signedness, switch (tag) { else => unreachable, .mul, .mul_wrap => @max( self.activeIntBits(bin_op.lhs), @@ -3526,7 +3541,7 @@ fn airMulDivBinOp(self: *Self, inst: Air.Inst.Index) !void { ), .div_trunc, .div_floor, .div_exact, .rem, .mod => dst_info.bits, }); - const src_abi_size: u32 = @intCast(src_ty.abiSize(mod)); + const src_abi_size: u32 = @intCast(src_ty.abiSize(pt)); if (dst_abi_size == 16 and src_abi_size == 16) switch (tag) { else => unreachable, @@ -3539,7 +3554,7 @@ fn airMulDivBinOp(self: *Self, inst: Air.Inst.Index) !void { state: State, reloc: Mir.Inst.Index, } = if (signed and tag == .div_floor) state: { - const frame_index = try self.allocFrameIndex(FrameAlloc.initType(Type.usize, mod)); + const frame_index = try self.allocFrameIndex(FrameAlloc.initType(Type.usize, pt)); try self.asmMemoryImmediate( .{ ._, .mov }, .{ .base = .{ .frame = frame_index }, .mod = .{ .rm = .{ .size = .qword } } }, @@ -3614,7 +3629,7 @@ fn airMulDivBinOp(self: *Self, inst: Air.Inst.Index) !void { .rem, .mod => "mod", else => unreachable, }, - intCompilerRtAbiName(@intCast(dst_ty.bitSize(mod))), + intCompilerRtAbiName(@intCast(dst_ty.bitSize(pt))), }) catch unreachable, } }, &.{ src_ty, src_ty }, @@ -3643,7 +3658,7 @@ fn airMulDivBinOp(self: *Self, inst: Air.Inst.Index) !void { .return_type = dst_ty.toIntern(), .param_types = &.{ src_ty.toIntern(), src_ty.toIntern() }, .callee = std.fmt.bufPrint(&callee_buf, "__div{c}i3", .{ - intCompilerRtAbiName(@intCast(dst_ty.bitSize(mod))), + intCompilerRtAbiName(@intCast(dst_ty.bitSize(pt))), }) catch unreachable, } }, &.{ src_ty, src_ty }, @@ -3734,12 +3749,13 @@ fn airMulDivBinOp(self: *Self, inst: Air.Inst.Index) !void { } fn airAddSat(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const ty = self.typeOf(bin_op.lhs); - if (ty.zigTypeTag(mod) == .Vector or ty.abiSize(mod) > 8) return self.fail( + if (ty.zigTypeTag(mod) == .Vector or ty.abiSize(pt) > 8) return self.fail( "TODO implement airAddSat for {}", - .{ty.fmt(mod)}, + .{ty.fmt(pt)}, ); const lhs_mcv = try self.resolveInst(bin_op.lhs); @@ -3804,7 +3820,7 @@ fn airAddSat(self: *Self, inst: Air.Inst.Index) !void { break :cc .o; } else cc: { try self.genSetReg(limit_reg, ty, .{ - .immediate = @as(u64, math.maxInt(u64)) >> @intCast(64 - ty.bitSize(mod)), + .immediate = @as(u64, math.maxInt(u64)) >> @intCast(64 - ty.bitSize(pt)), }, .{}); try self.genBinOpMir(.{ ._, .add }, ty, dst_mcv, rhs_mcv); @@ -3815,7 +3831,7 @@ fn airAddSat(self: *Self, inst: Air.Inst.Index) !void { break :cc .c; }; - const cmov_abi_size = @max(@as(u32, @intCast(ty.abiSize(mod))), 2); + const cmov_abi_size = @max(@as(u32, @intCast(ty.abiSize(pt))), 2); try self.asmCmovccRegisterRegister( cc, registerAlias(dst_reg, cmov_abi_size), @@ -3834,12 +3850,13 @@ fn airAddSat(self: *Self, inst: Air.Inst.Index) !void { } fn airSubSat(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const ty = self.typeOf(bin_op.lhs); - if (ty.zigTypeTag(mod) == .Vector or ty.abiSize(mod) > 8) return self.fail( + if (ty.zigTypeTag(mod) == .Vector or ty.abiSize(pt) > 8) return self.fail( "TODO implement airSubSat for {}", - .{ty.fmt(mod)}, + .{ty.fmt(pt)}, ); const lhs_mcv = try self.resolveInst(bin_op.lhs); @@ -3908,7 +3925,7 @@ fn airSubSat(self: *Self, inst: Air.Inst.Index) !void { break :cc .c; }; - const cmov_abi_size = @max(@as(u32, @intCast(ty.abiSize(mod))), 2); + const cmov_abi_size = @max(@as(u32, @intCast(ty.abiSize(pt))), 2); try self.asmCmovccRegisterRegister( cc, registerAlias(dst_reg, cmov_abi_size), @@ -3927,13 +3944,14 @@ fn airSubSat(self: *Self, inst: Air.Inst.Index) !void { } fn airMulSat(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const ty = self.typeOf(bin_op.lhs); const result = result: { if (ty.toIntern() == .i128_type) { - const ptr_c_int = try mod.singleMutPtrType(Type.c_int); + const ptr_c_int = try pt.singleMutPtrType(Type.c_int); const overflow = try self.allocTempRegOrMem(Type.c_int, false); const dst_mcv = try self.genCall(.{ .lib = .{ @@ -4010,9 +4028,9 @@ fn airMulSat(self: *Self, inst: Air.Inst.Index) !void { break :result dst_mcv; } - if (ty.zigTypeTag(mod) == .Vector or ty.abiSize(mod) > 8) return self.fail( + if (ty.zigTypeTag(mod) == .Vector or ty.abiSize(pt) > 8) return self.fail( "TODO implement airMulSat for {}", - .{ty.fmt(mod)}, + .{ty.fmt(pt)}, ); try self.spillRegisters(&.{ .rax, .rcx, .rdx }); @@ -4061,7 +4079,7 @@ fn airMulSat(self: *Self, inst: Air.Inst.Index) !void { }; const dst_mcv = try self.genMulDivBinOp(.mul, inst, ty, ty, lhs_mcv, rhs_mcv); - const cmov_abi_size = @max(@as(u32, @intCast(ty.abiSize(mod))), 2); + const cmov_abi_size = @max(@as(u32, @intCast(ty.abiSize(pt))), 2); try self.asmCmovccRegisterRegister( cc, registerAlias(dst_mcv.register, cmov_abi_size), @@ -4073,7 +4091,8 @@ fn airMulSat(self: *Self, inst: Air.Inst.Index) !void { } fn airAddSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const result: MCValue = result: { @@ -4109,17 +4128,17 @@ fn airAddSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void { } const frame_index = - try self.allocFrameIndex(FrameAlloc.initSpill(tuple_ty, mod)); + try self.allocFrameIndex(FrameAlloc.initSpill(tuple_ty, pt)); try self.genSetMem( .{ .frame = frame_index }, - @intCast(tuple_ty.structFieldOffset(1, mod)), + @intCast(tuple_ty.structFieldOffset(1, pt)), Type.u1, .{ .eflags = cc }, .{}, ); try self.genSetMem( .{ .frame = frame_index }, - @intCast(tuple_ty.structFieldOffset(0, mod)), + @intCast(tuple_ty.structFieldOffset(0, pt)), ty, partial_mcv, .{}, @@ -4128,7 +4147,7 @@ fn airAddSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void { } const frame_index = - try self.allocFrameIndex(FrameAlloc.initSpill(tuple_ty, mod)); + try self.allocFrameIndex(FrameAlloc.initSpill(tuple_ty, pt)); try self.genSetFrameTruncatedOverflowCompare(tuple_ty, frame_index, partial_mcv, cc); break :result .{ .load_frame = .{ .index = frame_index } }; }, @@ -4139,7 +4158,8 @@ fn airAddSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void { } fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const result: MCValue = result: { @@ -4186,17 +4206,17 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void { } const frame_index = - try self.allocFrameIndex(FrameAlloc.initSpill(tuple_ty, mod)); + try self.allocFrameIndex(FrameAlloc.initSpill(tuple_ty, pt)); try self.genSetMem( .{ .frame = frame_index }, - @intCast(tuple_ty.structFieldOffset(1, mod)), + @intCast(tuple_ty.structFieldOffset(1, pt)), tuple_ty.structFieldType(1, mod), .{ .eflags = cc }, .{}, ); try self.genSetMem( .{ .frame = frame_index }, - @intCast(tuple_ty.structFieldOffset(0, mod)), + @intCast(tuple_ty.structFieldOffset(0, pt)), tuple_ty.structFieldType(0, mod), partial_mcv, .{}, @@ -4205,7 +4225,7 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void { } const frame_index = - try self.allocFrameIndex(FrameAlloc.initSpill(tuple_ty, mod)); + try self.allocFrameIndex(FrameAlloc.initSpill(tuple_ty, pt)); try self.genSetFrameTruncatedOverflowCompare(tuple_ty, frame_index, partial_mcv, cc); break :result .{ .load_frame = .{ .index = frame_index } }; }, @@ -4222,7 +4242,8 @@ fn genSetFrameTruncatedOverflowCompare( src_mcv: MCValue, overflow_cc: ?Condition, ) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const src_lock = switch (src_mcv) { .register => |reg| self.register_manager.lockReg(reg), else => null, @@ -4233,12 +4254,12 @@ fn genSetFrameTruncatedOverflowCompare( const int_info = ty.intInfo(mod); const hi_bits = (int_info.bits - 1) % 64 + 1; - const hi_ty = try mod.intType(int_info.signedness, hi_bits); + const hi_ty = try pt.intType(int_info.signedness, hi_bits); const limb_bits: u16 = @intCast(if (int_info.bits <= 64) self.regBitSize(ty) else 64); - const limb_ty = try mod.intType(int_info.signedness, limb_bits); + const limb_ty = try pt.intType(int_info.signedness, limb_bits); - const rest_ty = try mod.intType(.unsigned, int_info.bits - hi_bits); + const rest_ty = try pt.intType(.unsigned, int_info.bits - hi_bits); const temp_regs = try self.register_manager.allocRegs(3, .{null} ** 3, abi.RegisterClass.gp); @@ -4269,7 +4290,7 @@ fn genSetFrameTruncatedOverflowCompare( ); } - const payload_off: i32 = @intCast(tuple_ty.structFieldOffset(0, mod)); + const payload_off: i32 = @intCast(tuple_ty.structFieldOffset(0, pt)); if (hi_limb_off > 0) try self.genSetMem( .{ .frame = frame_index }, payload_off, @@ -4286,7 +4307,7 @@ fn genSetFrameTruncatedOverflowCompare( ); try self.genSetMem( .{ .frame = frame_index }, - @intCast(tuple_ty.structFieldOffset(1, mod)), + @intCast(tuple_ty.structFieldOffset(1, pt)), tuple_ty.structFieldType(1, mod), if (overflow_cc) |_| .{ .register = overflow_reg.to8() } else .{ .eflags = .ne }, .{}, @@ -4294,18 +4315,19 @@ fn genSetFrameTruncatedOverflowCompare( } fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const tuple_ty = self.typeOfIndex(inst); const dst_ty = self.typeOf(bin_op.lhs); const result: MCValue = switch (dst_ty.zigTypeTag(mod)) { - .Vector => return self.fail("TODO implement airMulWithOverflow for {}", .{dst_ty.fmt(mod)}), + .Vector => return self.fail("TODO implement airMulWithOverflow for {}", .{dst_ty.fmt(pt)}), .Int => result: { const dst_info = dst_ty.intInfo(mod); if (dst_info.bits > 128 and dst_info.signedness == .unsigned) { const slow_inc = self.hasFeature(.slow_incdec); - const abi_size: u32 = @intCast(dst_ty.abiSize(mod)); + const abi_size: u32 = @intCast(dst_ty.abiSize(pt)); const limb_len = math.divCeil(u32, abi_size, 8) catch unreachable; try self.spillRegisters(&.{ .rax, .rcx, .rdx }); @@ -4316,7 +4338,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { try self.genInlineMemset( dst_mcv.address(), .{ .immediate = 0 }, - .{ .immediate = tuple_ty.abiSize(mod) }, + .{ .immediate = tuple_ty.abiSize(pt) }, .{}, ); const lhs_mcv = try self.resolveInst(bin_op.lhs); @@ -4356,7 +4378,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { .index = temp_regs[3].to64(), .scale = .@"8", .disp = dst_mcv.load_frame.off + - @as(i32, @intCast(tuple_ty.structFieldOffset(0, mod))), + @as(i32, @intCast(tuple_ty.structFieldOffset(0, pt))), } }, }, .rdx); try self.asmSetccRegister(.c, .cl); @@ -4380,7 +4402,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { .index = temp_regs[3].to64(), .scale = .@"8", .disp = dst_mcv.load_frame.off + - @as(i32, @intCast(tuple_ty.structFieldOffset(0, mod))), + @as(i32, @intCast(tuple_ty.structFieldOffset(0, pt))), } }, }, .rax); try self.asmSetccRegister(.c, .ch); @@ -4429,7 +4451,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { .mod = .{ .rm = .{ .size = .byte, .disp = dst_mcv.load_frame.off + - @as(i32, @intCast(tuple_ty.structFieldOffset(1, mod))), + @as(i32, @intCast(tuple_ty.structFieldOffset(1, pt))), } }, }, Immediate.u(1)); self.performReloc(no_overflow); @@ -4453,11 +4475,11 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const lhs_active_bits = self.activeIntBits(bin_op.lhs); const rhs_active_bits = self.activeIntBits(bin_op.rhs); const src_bits = @max(lhs_active_bits, rhs_active_bits, dst_info.bits / 2); - const src_ty = try mod.intType(dst_info.signedness, src_bits); + const src_ty = try pt.intType(dst_info.signedness, src_bits); if (src_bits > 64 and src_bits <= 128 and dst_info.bits > 64 and dst_info.bits <= 128) switch (dst_info.signedness) { .signed => { - const ptr_c_int = try mod.singleMutPtrType(Type.c_int); + const ptr_c_int = try pt.singleMutPtrType(Type.c_int); const overflow = try self.allocTempRegOrMem(Type.c_int, false); const result = try self.genCall(.{ .lib = .{ .return_type = .i128_type, @@ -4472,7 +4494,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const dst_mcv = try self.allocRegOrMem(inst, false); try self.genSetMem( .{ .frame = dst_mcv.load_frame.index }, - @intCast(tuple_ty.structFieldOffset(0, mod)), + @intCast(tuple_ty.structFieldOffset(0, pt)), tuple_ty.structFieldType(0, mod), result, .{}, @@ -4484,7 +4506,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { ); try self.genSetMem( .{ .frame = dst_mcv.load_frame.index }, - @intCast(tuple_ty.structFieldOffset(1, mod)), + @intCast(tuple_ty.structFieldOffset(1, pt)), tuple_ty.structFieldType(1, mod), .{ .eflags = .ne }, .{}, @@ -4596,14 +4618,14 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const dst_mcv = try self.allocRegOrMem(inst, false); try self.genSetMem( .{ .frame = dst_mcv.load_frame.index }, - @intCast(tuple_ty.structFieldOffset(0, mod)), + @intCast(tuple_ty.structFieldOffset(0, pt)), tuple_ty.structFieldType(0, mod), .{ .register_pair = .{ .rax, .rdx } }, .{}, ); try self.genSetMem( .{ .frame = dst_mcv.load_frame.index }, - @intCast(tuple_ty.structFieldOffset(1, mod)), + @intCast(tuple_ty.structFieldOffset(1, pt)), tuple_ty.structFieldType(1, mod), .{ .register = tmp_regs[1] }, .{}, @@ -4636,7 +4658,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { self.eflags_inst = inst; break :result .{ .register_overflow = .{ .reg = reg, .eflags = cc } }; } else { - const frame_index = try self.allocFrameIndex(FrameAlloc.initSpill(tuple_ty, mod)); + const frame_index = try self.allocFrameIndex(FrameAlloc.initSpill(tuple_ty, pt)); try self.genSetFrameTruncatedOverflowCompare(tuple_ty, frame_index, partial_mcv, cc); break :result .{ .load_frame = .{ .index = frame_index } }; }, @@ -4644,21 +4666,21 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { // For now, this is the only supported multiply that doesn't fit in a register. if (dst_info.bits > 128 or src_bits != 64) return self.fail("TODO implement airWithOverflow from {} to {}", .{ - src_ty.fmt(mod), dst_ty.fmt(mod), + src_ty.fmt(pt), dst_ty.fmt(pt), }); - const frame_index = try self.allocFrameIndex(FrameAlloc.initSpill(tuple_ty, mod)); + const frame_index = try self.allocFrameIndex(FrameAlloc.initSpill(tuple_ty, pt)); if (dst_info.bits >= lhs_active_bits + rhs_active_bits) { try self.genSetMem( .{ .frame = frame_index }, - @intCast(tuple_ty.structFieldOffset(0, mod)), + @intCast(tuple_ty.structFieldOffset(0, pt)), tuple_ty.structFieldType(0, mod), partial_mcv, .{}, ); try self.genSetMem( .{ .frame = frame_index }, - @intCast(tuple_ty.structFieldOffset(1, mod)), + @intCast(tuple_ty.structFieldOffset(1, pt)), tuple_ty.structFieldType(1, mod), .{ .immediate = 0 }, // cc being set is impossible .{}, @@ -4682,8 +4704,8 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { /// Clobbers .rax and .rdx registers. /// Quotient is saved in .rax and remainder in .rdx. fn genIntMulDivOpMir(self: *Self, tag: Mir.Inst.FixedTag, ty: Type, lhs: MCValue, rhs: MCValue) !void { - const mod = self.bin_file.comp.module.?; - const abi_size: u32 = @intCast(ty.abiSize(mod)); + const pt = self.pt; + const abi_size: u32 = @intCast(ty.abiSize(pt)); const bit_size: u32 = @intCast(self.regBitSize(ty)); if (abi_size > 8) { return self.fail("TODO implement genIntMulDivOpMir for ABI size larger than 8", .{}); @@ -4732,8 +4754,9 @@ fn genIntMulDivOpMir(self: *Self, tag: Mir.Inst.FixedTag, ty: Type, lhs: MCValue /// Always returns a register. /// Clobbers .rax and .rdx registers. fn genInlineIntDivFloor(self: *Self, ty: Type, lhs: MCValue, rhs: MCValue) !MCValue { - const mod = self.bin_file.comp.module.?; - const abi_size: u32 = @intCast(ty.abiSize(mod)); + const pt = self.pt; + const mod = pt.zcu; + const abi_size: u32 = @intCast(ty.abiSize(pt)); const int_info = ty.intInfo(mod); const dividend = switch (lhs) { .register => |reg| reg, @@ -4784,7 +4807,8 @@ fn genInlineIntDivFloor(self: *Self, ty: Type, lhs: MCValue, rhs: MCValue) !MCVa } fn airShlShrBinOp(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const air_tags = self.air.instructions.items(.tag); @@ -4811,7 +4835,7 @@ fn airShlShrBinOp(self: *Self, inst: Air.Inst.Index) !void { const tmp_lock = self.register_manager.lockRegAssumeUnused(tmp_reg); defer self.register_manager.unlockReg(tmp_lock); - const lhs_bits: u31 = @intCast(lhs_ty.bitSize(mod)); + const lhs_bits: u31 = @intCast(lhs_ty.bitSize(pt)); const tmp_ty = if (lhs_bits > 64) Type.usize else lhs_ty; const off = frame_addr.off + (lhs_bits - 1) / 64 * 8; try self.genSetReg( @@ -4922,11 +4946,11 @@ fn airShlShrBinOp(self: *Self, inst: Air.Inst.Index) !void { .shl, .shl_exact => if (self.hasFeature(.avx2)) .{ .vp_q, .sll } else null, }, }, - })) |mir_tag| if (try self.air.value(bin_op.rhs, mod)) |rhs_val| { + })) |mir_tag| if (try self.air.value(bin_op.rhs, pt)) |rhs_val| { switch (mod.intern_pool.indexToKey(rhs_val.toIntern())) { .aggregate => |rhs_aggregate| switch (rhs_aggregate.storage) { .repeated_elem => |rhs_elem| { - const abi_size: u32 = @intCast(lhs_ty.abiSize(mod)); + const abi_size: u32 = @intCast(lhs_ty.abiSize(pt)); const lhs_mcv = try self.resolveInst(bin_op.lhs); const dst_reg, const lhs_reg = if (lhs_mcv.isRegister() and @@ -4946,7 +4970,7 @@ fn airShlShrBinOp(self: *Self, inst: Air.Inst.Index) !void { self.register_manager.unlockReg(lock); const shift_imm = - Immediate.u(@intCast(Value.fromInterned(rhs_elem).toUnsignedInt(mod))); + Immediate.u(@intCast(Value.fromInterned(rhs_elem).toUnsignedInt(pt))); if (self.hasFeature(.avx)) try self.asmRegisterRegisterImmediate( mir_tag, registerAlias(dst_reg, abi_size), @@ -4968,7 +4992,7 @@ fn airShlShrBinOp(self: *Self, inst: Air.Inst.Index) !void { } } else if (bin_op.rhs.toIndex()) |rhs_inst| switch (air_tags[@intFromEnum(rhs_inst)]) { .splat => { - const abi_size: u32 = @intCast(lhs_ty.abiSize(mod)); + const abi_size: u32 = @intCast(lhs_ty.abiSize(pt)); const lhs_mcv = try self.resolveInst(bin_op.lhs); const dst_reg, const lhs_reg = if (lhs_mcv.isRegister() and @@ -4991,13 +5015,13 @@ fn airShlShrBinOp(self: *Self, inst: Air.Inst.Index) !void { const shift_lock = self.register_manager.lockRegAssumeUnused(shift_reg); defer self.register_manager.unlockReg(shift_lock); - const mask_ty = try mod.vectorType(.{ .len = 16, .child = .u8_type }); - const mask_mcv = try self.genTypedValue(Value.fromInterned(try mod.intern(.{ .aggregate = .{ + const mask_ty = try pt.vectorType(.{ .len = 16, .child = .u8_type }); + const mask_mcv = try self.genTypedValue(Value.fromInterned(try pt.intern(.{ .aggregate = .{ .ty = mask_ty.toIntern(), .storage = .{ .elems = &([1]InternPool.Index{ - (try rhs_ty.childType(mod).maxIntScalar(mod, Type.u8)).toIntern(), + (try rhs_ty.childType(mod).maxIntScalar(pt, Type.u8)).toIntern(), } ++ [1]InternPool.Index{ - (try mod.intValue(Type.u8, 0)).toIntern(), + (try pt.intValue(Type.u8, 0)).toIntern(), } ** 15) }, } }))); const mask_addr_reg = @@ -5045,7 +5069,7 @@ fn airShlShrBinOp(self: *Self, inst: Air.Inst.Index) !void { }, else => {}, } - return self.fail("TODO implement airShlShrBinOp for {}", .{lhs_ty.fmt(mod)}); + return self.fail("TODO implement airShlShrBinOp for {}", .{lhs_ty.fmt(pt)}); }; return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } @@ -5058,11 +5082,11 @@ fn airShlSat(self: *Self, inst: Air.Inst.Index) !void { } fn airOptionalPayload(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const result: MCValue = result: { const pl_ty = self.typeOfIndex(inst); - if (!pl_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result .none; + if (!pl_ty.hasRuntimeBitsIgnoreComptime(pt)) break :result .none; const opt_mcv = try self.resolveInst(ty_op.operand); if (self.reuseOperand(inst, ty_op.operand, 0, opt_mcv)) { @@ -5104,7 +5128,8 @@ fn airOptionalPayloadPtr(self: *Self, inst: Air.Inst.Index) !void { } fn airOptionalPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const result = result: { const dst_ty = self.typeOfIndex(inst); @@ -5130,7 +5155,7 @@ fn airOptionalPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void { try self.copyToRegisterWithInstTracking(inst, dst_ty, src_mcv); const pl_ty = dst_ty.childType(mod); - const pl_abi_size: i32 = @intCast(pl_ty.abiSize(mod)); + const pl_abi_size: i32 = @intCast(pl_ty.abiSize(pt)); try self.genSetMem( .{ .reg = dst_mcv.getReg().? }, pl_abi_size, @@ -5144,7 +5169,8 @@ fn airOptionalPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void { } fn airUnwrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const err_union_ty = self.typeOf(ty_op.operand); const err_ty = err_union_ty.errorUnionSet(mod); @@ -5156,11 +5182,11 @@ fn airUnwrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void { break :result MCValue{ .immediate = 0 }; } - if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) { break :result operand; } - const err_off = errUnionErrorOffset(payload_ty, mod); + const err_off = errUnionErrorOffset(payload_ty, pt); switch (operand) { .register => |reg| { // TODO reuse operand @@ -5197,7 +5223,8 @@ fn airUnwrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void { // *(E!T) -> E fn airUnwrapErrUnionErrPtr(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const src_ty = self.typeOf(ty_op.operand); @@ -5217,8 +5244,8 @@ fn airUnwrapErrUnionErrPtr(self: *Self, inst: Air.Inst.Index) !void { const eu_ty = src_ty.childType(mod); const pl_ty = eu_ty.errorUnionPayload(mod); const err_ty = eu_ty.errorUnionSet(mod); - const err_off: i32 = @intCast(errUnionErrorOffset(pl_ty, mod)); - const err_abi_size: u32 = @intCast(err_ty.abiSize(mod)); + const err_off: i32 = @intCast(errUnionErrorOffset(pl_ty, pt)); + const err_abi_size: u32 = @intCast(err_ty.abiSize(pt)); try self.asmRegisterMemory( .{ ._, .mov }, registerAlias(dst_reg, err_abi_size), @@ -5244,7 +5271,8 @@ fn airUnwrapErrUnionPayloadPtr(self: *Self, inst: Air.Inst.Index) !void { } fn airErrUnionPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const result: MCValue = result: { const src_ty = self.typeOf(ty_op.operand); @@ -5259,8 +5287,8 @@ fn airErrUnionPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void { const eu_ty = src_ty.childType(mod); const pl_ty = eu_ty.errorUnionPayload(mod); const err_ty = eu_ty.errorUnionSet(mod); - const err_off: i32 = @intCast(errUnionErrorOffset(pl_ty, mod)); - const err_abi_size: u32 = @intCast(err_ty.abiSize(mod)); + const err_off: i32 = @intCast(errUnionErrorOffset(pl_ty, pt)); + const err_abi_size: u32 = @intCast(err_ty.abiSize(pt)); try self.asmMemoryImmediate( .{ ._, .mov }, .{ @@ -5283,8 +5311,8 @@ fn airErrUnionPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void { const dst_lock = self.register_manager.lockReg(dst_reg); defer if (dst_lock) |lock| self.register_manager.unlockReg(lock); - const pl_off: i32 = @intCast(errUnionPayloadOffset(pl_ty, mod)); - const dst_abi_size: u32 = @intCast(dst_ty.abiSize(mod)); + const pl_off: i32 = @intCast(errUnionPayloadOffset(pl_ty, pt)); + const dst_abi_size: u32 = @intCast(dst_ty.abiSize(pt)); try self.asmRegisterMemory( .{ ._, .lea }, registerAlias(dst_reg, dst_abi_size), @@ -5304,13 +5332,14 @@ fn genUnwrapErrUnionPayloadMir( err_union_ty: Type, err_union: MCValue, ) !MCValue { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const payload_ty = err_union_ty.errorUnionPayload(mod); const result: MCValue = result: { - if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result .none; + if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) break :result .none; - const payload_off: u31 = @intCast(errUnionPayloadOffset(payload_ty, mod)); + const payload_off: u31 = @intCast(errUnionPayloadOffset(payload_ty, pt)); switch (err_union) { .load_frame => |frame_addr| break :result .{ .load_frame = .{ .index = frame_addr.index, @@ -5353,12 +5382,13 @@ fn genUnwrapErrUnionPayloadPtrMir( ptr_ty: Type, ptr_mcv: MCValue, ) !MCValue { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const err_union_ty = ptr_ty.childType(mod); const payload_ty = err_union_ty.errorUnionPayload(mod); const result: MCValue = result: { - const payload_off = errUnionPayloadOffset(payload_ty, mod); + const payload_off = errUnionPayloadOffset(payload_ty, pt); const result_mcv: MCValue = if (maybe_inst) |inst| try self.copyToRegisterWithInstTracking(inst, ptr_ty, ptr_mcv) else @@ -5387,11 +5417,12 @@ fn airSaveErrReturnTraceIndex(self: *Self, inst: Air.Inst.Index) !void { } fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const result: MCValue = result: { const pl_ty = self.typeOf(ty_op.operand); - if (!pl_ty.hasRuntimeBits(mod)) break :result .{ .immediate = 1 }; + if (!pl_ty.hasRuntimeBits(pt)) break :result .{ .immediate = 1 }; const opt_ty = self.typeOfIndex(inst); const pl_mcv = try self.resolveInst(ty_op.operand); @@ -5408,7 +5439,7 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void { try self.genCopy(pl_ty, opt_mcv, pl_mcv, .{}); if (!same_repr) { - const pl_abi_size: i32 = @intCast(pl_ty.abiSize(mod)); + const pl_abi_size: i32 = @intCast(pl_ty.abiSize(pt)); switch (opt_mcv) { else => unreachable, @@ -5441,7 +5472,8 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void { /// T to E!T fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const eu_ty = ty_op.ty.toType(); @@ -5450,11 +5482,11 @@ fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void { const operand = try self.resolveInst(ty_op.operand); const result: MCValue = result: { - if (!pl_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result .{ .immediate = 0 }; + if (!pl_ty.hasRuntimeBitsIgnoreComptime(pt)) break :result .{ .immediate = 0 }; - const frame_index = try self.allocFrameIndex(FrameAlloc.initSpill(eu_ty, mod)); - const pl_off: i32 = @intCast(errUnionPayloadOffset(pl_ty, mod)); - const err_off: i32 = @intCast(errUnionErrorOffset(pl_ty, mod)); + const frame_index = try self.allocFrameIndex(FrameAlloc.initSpill(eu_ty, pt)); + const pl_off: i32 = @intCast(errUnionPayloadOffset(pl_ty, pt)); + const err_off: i32 = @intCast(errUnionErrorOffset(pl_ty, pt)); try self.genSetMem(.{ .frame = frame_index }, pl_off, pl_ty, operand, .{}); try self.genSetMem(.{ .frame = frame_index }, err_off, err_ty, .{ .immediate = 0 }, .{}); break :result .{ .load_frame = .{ .index = frame_index } }; @@ -5464,7 +5496,8 @@ fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void { /// E to E!T fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const eu_ty = ty_op.ty.toType(); @@ -5472,11 +5505,11 @@ fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void { const err_ty = eu_ty.errorUnionSet(mod); const result: MCValue = result: { - if (!pl_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result try self.resolveInst(ty_op.operand); + if (!pl_ty.hasRuntimeBitsIgnoreComptime(pt)) break :result try self.resolveInst(ty_op.operand); - const frame_index = try self.allocFrameIndex(FrameAlloc.initSpill(eu_ty, mod)); - const pl_off: i32 = @intCast(errUnionPayloadOffset(pl_ty, mod)); - const err_off: i32 = @intCast(errUnionErrorOffset(pl_ty, mod)); + const frame_index = try self.allocFrameIndex(FrameAlloc.initSpill(eu_ty, pt)); + const pl_off: i32 = @intCast(errUnionPayloadOffset(pl_ty, pt)); + const err_off: i32 = @intCast(errUnionErrorOffset(pl_ty, pt)); try self.genSetMem(.{ .frame = frame_index }, pl_off, pl_ty, .undef, .{}); const operand = try self.resolveInst(ty_op.operand); try self.genSetMem(.{ .frame = frame_index }, err_off, err_ty, operand, .{}); @@ -5523,7 +5556,7 @@ fn airSliceLen(self: *Self, inst: Air.Inst.Index) !void { } fn airPtrSliceLenPtr(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const src_ty = self.typeOf(ty_op.operand); @@ -5544,7 +5577,7 @@ fn airPtrSliceLenPtr(self: *Self, inst: Air.Inst.Index) !void { const dst_lock = self.register_manager.lockReg(dst_reg); defer if (dst_lock) |lock| self.register_manager.unlockReg(lock); - const dst_abi_size: u32 = @intCast(dst_ty.abiSize(mod)); + const dst_abi_size: u32 = @intCast(dst_ty.abiSize(pt)); try self.asmRegisterMemory( .{ ._, .lea }, registerAlias(dst_reg, dst_abi_size), @@ -5591,7 +5624,8 @@ fn elemOffset(self: *Self, index_ty: Type, index: MCValue, elem_size: u64) !Regi } fn genSliceElemPtr(self: *Self, lhs: Air.Inst.Ref, rhs: Air.Inst.Ref) !MCValue { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const slice_ty = self.typeOf(lhs); const slice_mcv = try self.resolveInst(lhs); const slice_mcv_lock: ?RegisterLock = switch (slice_mcv) { @@ -5601,7 +5635,7 @@ fn genSliceElemPtr(self: *Self, lhs: Air.Inst.Ref, rhs: Air.Inst.Ref) !MCValue { defer if (slice_mcv_lock) |lock| self.register_manager.unlockReg(lock); const elem_ty = slice_ty.childType(mod); - const elem_size = elem_ty.abiSize(mod); + const elem_size = elem_ty.abiSize(pt); const slice_ptr_field_type = slice_ty.slicePtrFieldType(mod); const index_ty = self.typeOf(rhs); @@ -5627,12 +5661,13 @@ fn genSliceElemPtr(self: *Self, lhs: Air.Inst.Ref, rhs: Air.Inst.Ref) !MCValue { } fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const result: MCValue = result: { const elem_ty = self.typeOfIndex(inst); - if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result .none; + if (!elem_ty.hasRuntimeBitsIgnoreComptime(pt)) break :result .none; const slice_ty = self.typeOf(bin_op.lhs); const slice_ptr_field_type = slice_ty.slicePtrFieldType(mod); @@ -5652,7 +5687,8 @@ fn airSliceElemPtr(self: *Self, inst: Air.Inst.Index) !void { } fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const result: MCValue = result: { @@ -5675,7 +5711,7 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void { defer if (index_lock) |lock| self.register_manager.unlockReg(lock); try self.spillEflagsIfOccupied(); - if (array_ty.isVector(mod) and elem_ty.bitSize(mod) == 1) { + if (array_ty.isVector(mod) and elem_ty.bitSize(pt) == 1) { const index_reg = switch (index_mcv) { .register => |reg| reg, else => try self.copyToTmpRegister(index_ty, index_mcv), @@ -5688,7 +5724,7 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void { index_reg.to64(), ), .sse => { - const frame_index = try self.allocFrameIndex(FrameAlloc.initType(array_ty, mod)); + const frame_index = try self.allocFrameIndex(FrameAlloc.initType(array_ty, pt)); try self.genSetMem(.{ .frame = frame_index }, 0, array_ty, array_mcv, .{}); try self.asmMemoryRegister( .{ ._, .bt }, @@ -5717,7 +5753,7 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void { index_reg.to64(), ), else => return self.fail("TODO airArrayElemVal for {s} of {}", .{ - @tagName(array_mcv), array_ty.fmt(mod), + @tagName(array_mcv), array_ty.fmt(pt), }), } @@ -5726,14 +5762,14 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void { break :result .{ .register = dst_reg }; } - const elem_abi_size = elem_ty.abiSize(mod); + const elem_abi_size = elem_ty.abiSize(pt); const addr_reg = try self.register_manager.allocReg(null, abi.RegisterClass.gp); const addr_lock = self.register_manager.lockRegAssumeUnused(addr_reg); defer self.register_manager.unlockReg(addr_lock); switch (array_mcv) { .register => { - const frame_index = try self.allocFrameIndex(FrameAlloc.initType(array_ty, mod)); + const frame_index = try self.allocFrameIndex(FrameAlloc.initType(array_ty, pt)); try self.genSetMem(.{ .frame = frame_index }, 0, array_ty, array_mcv, .{}); try self.asmRegisterMemory( .{ ._, .lea }, @@ -5757,7 +5793,7 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void { => try self.genSetReg(addr_reg, Type.usize, array_mcv.address(), .{}), .lea_symbol, .lea_direct, .lea_tlv => unreachable, else => return self.fail("TODO airArrayElemVal_val for {s} of {}", .{ - @tagName(array_mcv), array_ty.fmt(mod), + @tagName(array_mcv), array_ty.fmt(pt), }), } @@ -5781,7 +5817,8 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void { } fn airPtrElemVal(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const ptr_ty = self.typeOf(bin_op.lhs); @@ -5790,9 +5827,9 @@ fn airPtrElemVal(self: *Self, inst: Air.Inst.Index) !void { const result = result: { const elem_ty = ptr_ty.elemType2(mod); - if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result .none; + if (!elem_ty.hasRuntimeBitsIgnoreComptime(pt)) break :result .none; - const elem_abi_size: u32 = @intCast(elem_ty.abiSize(mod)); + const elem_abi_size: u32 = @intCast(elem_ty.abiSize(pt)); const index_ty = self.typeOf(bin_op.rhs); const index_mcv = try self.resolveInst(bin_op.rhs); const index_lock = switch (index_mcv) { @@ -5831,7 +5868,8 @@ fn airPtrElemVal(self: *Self, inst: Air.Inst.Index) !void { } fn airPtrElemPtr(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; @@ -5854,7 +5892,7 @@ fn airPtrElemPtr(self: *Self, inst: Air.Inst.Index) !void { } const elem_ty = base_ptr_ty.elemType2(mod); - const elem_abi_size = elem_ty.abiSize(mod); + const elem_abi_size = elem_ty.abiSize(pt); const index_ty = self.typeOf(extra.rhs); const index_mcv = try self.resolveInst(extra.rhs); const index_lock: ?RegisterLock = switch (index_mcv) { @@ -5876,12 +5914,13 @@ fn airPtrElemPtr(self: *Self, inst: Air.Inst.Index) !void { } fn airSetUnionTag(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const ptr_union_ty = self.typeOf(bin_op.lhs); const union_ty = ptr_union_ty.childType(mod); const tag_ty = self.typeOf(bin_op.rhs); - const layout = union_ty.unionGetLayout(mod); + const layout = union_ty.unionGetLayout(pt); if (layout.tag_size == 0) { return self.finishAir(inst, .none, .{ bin_op.lhs, bin_op.rhs, .none }); @@ -5913,19 +5952,19 @@ fn airSetUnionTag(self: *Self, inst: Air.Inst.Index) !void { break :blk MCValue{ .register = reg }; } else ptr; - const ptr_tag_ty = try mod.adjustPtrTypeChild(ptr_union_ty, tag_ty); + const ptr_tag_ty = try pt.adjustPtrTypeChild(ptr_union_ty, tag_ty); try self.store(ptr_tag_ty, adjusted_ptr, tag, .{}); return self.finishAir(inst, .none, .{ bin_op.lhs, bin_op.rhs, .none }); } fn airGetUnionTag(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const tag_ty = self.typeOfIndex(inst); const union_ty = self.typeOf(ty_op.operand); - const layout = union_ty.unionGetLayout(mod); + const layout = union_ty.unionGetLayout(pt); if (layout.tag_size == 0) { return self.finishAir(inst, .none, .{ ty_op.operand, .none, .none }); @@ -5939,7 +5978,7 @@ fn airGetUnionTag(self: *Self, inst: Air.Inst.Index) !void { }; defer if (operand_lock) |lock| self.register_manager.unlockReg(lock); - const tag_abi_size = tag_ty.abiSize(mod); + const tag_abi_size = tag_ty.abiSize(pt); const dst_mcv: MCValue = blk: { switch (operand) { .load_frame => |frame_addr| { @@ -5983,7 +6022,8 @@ fn airGetUnionTag(self: *Self, inst: Air.Inst.Index) !void { } fn airClz(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const result = result: { try self.spillEflagsIfOccupied(); @@ -5991,7 +6031,7 @@ fn airClz(self: *Self, inst: Air.Inst.Index) !void { const dst_ty = self.typeOfIndex(inst); const src_ty = self.typeOf(ty_op.operand); if (src_ty.zigTypeTag(mod) == .Vector) return self.fail("TODO implement airClz for {}", .{ - src_ty.fmt(mod), + src_ty.fmt(pt), }); const src_mcv = try self.resolveInst(ty_op.operand); @@ -6010,8 +6050,8 @@ fn airClz(self: *Self, inst: Air.Inst.Index) !void { const dst_lock = self.register_manager.lockRegAssumeUnused(dst_reg); defer self.register_manager.unlockReg(dst_lock); - const abi_size: u31 = @intCast(src_ty.abiSize(mod)); - const src_bits: u31 = @intCast(src_ty.bitSize(mod)); + const abi_size: u31 = @intCast(src_ty.abiSize(pt)); + const src_bits: u31 = @intCast(src_ty.bitSize(pt)); const has_lzcnt = self.hasFeature(.lzcnt); if (src_bits > @as(u32, if (has_lzcnt) 128 else 64)) { const limbs_len = math.divCeil(u32, abi_size, 8) catch unreachable; @@ -6121,7 +6161,7 @@ fn airClz(self: *Self, inst: Air.Inst.Index) !void { } assert(src_bits <= 64); - const cmov_abi_size = @max(@as(u32, @intCast(dst_ty.abiSize(mod))), 2); + const cmov_abi_size = @max(@as(u32, @intCast(dst_ty.abiSize(pt))), 2); if (math.isPowerOfTwo(src_bits)) { const imm_reg = try self.copyToTmpRegister(dst_ty, .{ .immediate = src_bits ^ (src_bits - 1), @@ -6179,7 +6219,8 @@ fn airClz(self: *Self, inst: Air.Inst.Index) !void { } fn airCtz(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const result = result: { try self.spillEflagsIfOccupied(); @@ -6187,7 +6228,7 @@ fn airCtz(self: *Self, inst: Air.Inst.Index) !void { const dst_ty = self.typeOfIndex(inst); const src_ty = self.typeOf(ty_op.operand); if (src_ty.zigTypeTag(mod) == .Vector) return self.fail("TODO implement airCtz for {}", .{ - src_ty.fmt(mod), + src_ty.fmt(pt), }); const src_mcv = try self.resolveInst(ty_op.operand); @@ -6206,8 +6247,8 @@ fn airCtz(self: *Self, inst: Air.Inst.Index) !void { const dst_lock = self.register_manager.lockReg(dst_reg); defer if (dst_lock) |lock| self.register_manager.unlockReg(lock); - const abi_size: u31 = @intCast(src_ty.abiSize(mod)); - const src_bits: u31 = @intCast(src_ty.bitSize(mod)); + const abi_size: u31 = @intCast(src_ty.abiSize(pt)); + const src_bits: u31 = @intCast(src_ty.bitSize(pt)); const has_bmi = self.hasFeature(.bmi); if (src_bits > @as(u32, if (has_bmi) 128 else 64)) { const limbs_len = math.divCeil(u32, abi_size, 8) catch unreachable; @@ -6328,7 +6369,7 @@ fn airCtz(self: *Self, inst: Air.Inst.Index) !void { try self.genBinOpMir(.{ ._, .bsf }, wide_ty, dst_mcv, .{ .register = wide_reg }); } else try self.genBinOpMir(.{ ._, .bsf }, src_ty, dst_mcv, mat_src_mcv); - const cmov_abi_size = @max(@as(u32, @intCast(dst_ty.abiSize(mod))), 2); + const cmov_abi_size = @max(@as(u32, @intCast(dst_ty.abiSize(pt))), 2); try self.asmCmovccRegisterRegister( .z, registerAlias(dst_reg, cmov_abi_size), @@ -6340,15 +6381,16 @@ fn airCtz(self: *Self, inst: Air.Inst.Index) !void { } fn airPopCount(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const result: MCValue = result: { try self.spillEflagsIfOccupied(); const src_ty = self.typeOf(ty_op.operand); - const src_abi_size: u32 = @intCast(src_ty.abiSize(mod)); + const src_abi_size: u32 = @intCast(src_ty.abiSize(pt)); if (src_ty.zigTypeTag(mod) == .Vector or src_abi_size > 16) - return self.fail("TODO implement airPopCount for {}", .{src_ty.fmt(mod)}); + return self.fail("TODO implement airPopCount for {}", .{src_ty.fmt(pt)}); const src_mcv = try self.resolveInst(ty_op.operand); const mat_src_mcv = switch (src_mcv) { @@ -6385,7 +6427,7 @@ fn airPopCount(self: *Self, inst: Air.Inst.Index) !void { else .{ .register = mat_src_mcv.register_pair[0] }, false); const src_info = src_ty.intInfo(mod); - const hi_ty = try mod.intType(src_info.signedness, (src_info.bits - 1) % 64 + 1); + const hi_ty = try pt.intType(src_info.signedness, (src_info.bits - 1) % 64 + 1); try self.genPopCount(tmp_regs[1], hi_ty, if (mat_src_mcv.isMemory()) mat_src_mcv.address().offset(8).deref() else @@ -6403,16 +6445,16 @@ fn genPopCount( src_mcv: MCValue, dst_contains_src: bool, ) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; - const src_abi_size: u32 = @intCast(src_ty.abiSize(mod)); + const src_abi_size: u32 = @intCast(src_ty.abiSize(pt)); if (self.hasFeature(.popcnt)) return self.genBinOpMir( .{ ._, .popcnt }, if (src_abi_size > 1) src_ty else Type.u32, .{ .register = dst_reg }, if (src_abi_size > 1) src_mcv else src: { if (!dst_contains_src) try self.genSetReg(dst_reg, src_ty, src_mcv, .{}); - try self.truncateRegister(try src_ty.toUnsigned(mod), dst_reg); + try self.truncateRegister(try src_ty.toUnsigned(pt), dst_reg); break :src .{ .register = dst_reg }; }, ); @@ -6495,13 +6537,14 @@ fn genByteSwap( src_mcv: MCValue, mem_ok: bool, ) !MCValue { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const has_movbe = self.hasFeature(.movbe); if (src_ty.zigTypeTag(mod) == .Vector) return self.fail( "TODO implement genByteSwap for {}", - .{src_ty.fmt(mod)}, + .{src_ty.fmt(pt)}, ); const src_lock = switch (src_mcv) { @@ -6510,7 +6553,7 @@ fn genByteSwap( }; defer if (src_lock) |lock| self.register_manager.unlockReg(lock); - const abi_size: u32 = @intCast(src_ty.abiSize(mod)); + const abi_size: u32 = @intCast(src_ty.abiSize(pt)); switch (abi_size) { 0 => unreachable, 1 => return if ((mem_ok or src_mcv.isRegister()) and @@ -6658,11 +6701,12 @@ fn genByteSwap( } fn airByteSwap(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const src_ty = self.typeOf(ty_op.operand); - const src_bits: u32 = @intCast(src_ty.bitSize(mod)); + const src_bits: u32 = @intCast(src_ty.bitSize(pt)); const src_mcv = try self.resolveInst(ty_op.operand); const dst_mcv = try self.genByteSwap(inst, src_ty, src_mcv, true); @@ -6674,18 +6718,19 @@ fn airByteSwap(self: *Self, inst: Air.Inst.Index) !void { src_ty, dst_mcv, if (src_bits > 256) Type.u16 else Type.u8, - .{ .immediate = src_ty.abiSize(mod) * 8 - src_bits }, + .{ .immediate = src_ty.abiSize(pt) * 8 - src_bits }, ); return self.finishAir(inst, dst_mcv, .{ ty_op.operand, .none, .none }); } fn airBitReverse(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const src_ty = self.typeOf(ty_op.operand); - const abi_size: u32 = @intCast(src_ty.abiSize(mod)); - const bit_size: u32 = @intCast(src_ty.bitSize(mod)); + const abi_size: u32 = @intCast(src_ty.abiSize(pt)); + const bit_size: u32 = @intCast(src_ty.bitSize(pt)); const src_mcv = try self.resolveInst(ty_op.operand); const dst_mcv = try self.genByteSwap(inst, src_ty, src_mcv, false); @@ -6802,14 +6847,15 @@ fn airBitReverse(self: *Self, inst: Air.Inst.Index) !void { } fn floatSign(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, ty: Type) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const tag = self.air.instructions.items(.tag)[@intFromEnum(inst)]; const result = result: { const scalar_bits = ty.scalarType(mod).floatBits(self.target.*); if (scalar_bits == 80) { if (ty.zigTypeTag(mod) != .Float) return self.fail("TODO implement floatSign for {}", .{ - ty.fmt(mod), + ty.fmt(pt), }); const src_mcv = try self.resolveInst(operand); @@ -6829,11 +6875,11 @@ fn floatSign(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, ty: Type) break :result dst_mcv; } - const abi_size: u32 = switch (ty.abiSize(mod)) { + const abi_size: u32 = switch (ty.abiSize(pt)) { 1...16 => 16, 17...32 => 32, else => return self.fail("TODO implement floatSign for {}", .{ - ty.fmt(mod), + ty.fmt(pt), }), }; @@ -6852,14 +6898,14 @@ fn floatSign(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, ty: Type) const dst_lock = self.register_manager.lockReg(dst_reg); defer if (dst_lock) |lock| self.register_manager.unlockReg(lock); - const vec_ty = try mod.vectorType(.{ + const vec_ty = try pt.vectorType(.{ .len = @divExact(abi_size * 8, scalar_bits), - .child = (try mod.intType(.signed, scalar_bits)).ip_index, + .child = (try pt.intType(.signed, scalar_bits)).ip_index, }); const sign_mcv = try self.genTypedValue(switch (tag) { - .neg => try vec_ty.minInt(mod, vec_ty), - .abs => try vec_ty.maxInt(mod, vec_ty), + .neg => try vec_ty.minInt(pt, vec_ty), + .abs => try vec_ty.maxInt(pt, vec_ty), else => unreachable, }); const sign_mem: Memory = if (sign_mcv.isMemory()) @@ -6891,7 +6937,7 @@ fn floatSign(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, ty: Type) .abs => .{ .v_pd, .@"and" }, else => unreachable, }, - 80 => return self.fail("TODO implement floatSign for {}", .{ty.fmt(mod)}), + 80 => return self.fail("TODO implement floatSign for {}", .{ty.fmt(pt)}), else => unreachable, }, registerAlias(dst_reg, abi_size), @@ -6917,7 +6963,7 @@ fn floatSign(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, ty: Type) .abs => .{ ._pd, .@"and" }, else => unreachable, }, - 80 => return self.fail("TODO implement floatSign for {}", .{ty.fmt(mod)}), + 80 => return self.fail("TODO implement floatSign for {}", .{ty.fmt(pt)}), else => unreachable, }, registerAlias(dst_reg, abi_size), @@ -6978,7 +7024,8 @@ fn airRound(self: *Self, inst: Air.Inst.Index, mode: RoundMode) !void { } fn getRoundTag(self: *Self, ty: Type) ?Mir.Inst.FixedTag { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; return if (self.hasFeature(.sse4_1)) switch (ty.zigTypeTag(mod)) { .Float => switch (ty.floatBits(self.target.*)) { 32 => if (self.hasFeature(.avx)) .{ .v_ss, .round } else .{ ._ss, .round }, @@ -7010,11 +7057,12 @@ fn getRoundTag(self: *Self, ty: Type) ?Mir.Inst.FixedTag { } fn genRoundLibcall(self: *Self, ty: Type, src_mcv: MCValue, mode: RoundMode) !MCValue { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; if (self.getRoundTag(ty)) |_| return .none; if (ty.zigTypeTag(mod) != .Float) - return self.fail("TODO implement genRound for {}", .{ty.fmt(mod)}); + return self.fail("TODO implement genRound for {}", .{ty.fmt(pt)}); var callee_buf: ["__trunc?".len]u8 = undefined; return try self.genCall(.{ .lib = .{ @@ -7034,12 +7082,12 @@ fn genRoundLibcall(self: *Self, ty: Type, src_mcv: MCValue, mode: RoundMode) !MC } fn genRound(self: *Self, ty: Type, dst_reg: Register, src_mcv: MCValue, mode: RoundMode) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; const mir_tag = self.getRoundTag(ty) orelse { const result = try self.genRoundLibcall(ty, src_mcv, mode); return self.genSetReg(dst_reg, ty, result, .{}); }; - const abi_size: u32 = @intCast(ty.abiSize(mod)); + const abi_size: u32 = @intCast(ty.abiSize(pt)); const dst_alias = registerAlias(dst_reg, abi_size); switch (mir_tag[0]) { .v_ss, .v_sd => if (src_mcv.isMemory()) try self.asmRegisterRegisterMemoryImmediate( @@ -7076,14 +7124,15 @@ fn genRound(self: *Self, ty: Type, dst_reg: Register, src_mcv: MCValue, mode: Ro } fn airAbs(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const ty = self.typeOf(ty_op.operand); const result: MCValue = result: { const mir_tag = @as(?Mir.Inst.FixedTag, switch (ty.zigTypeTag(mod)) { else => null, - .Int => switch (ty.abiSize(mod)) { + .Int => switch (ty.abiSize(pt)) { 0 => unreachable, 1...8 => { try self.spillEflagsIfOccupied(); @@ -7092,7 +7141,7 @@ fn airAbs(self: *Self, inst: Air.Inst.Index) !void { try self.genUnOpMir(.{ ._, .neg }, ty, dst_mcv); - const cmov_abi_size = @max(@as(u32, @intCast(ty.abiSize(mod))), 2); + const cmov_abi_size = @max(@as(u32, @intCast(ty.abiSize(pt))), 2); switch (src_mcv) { .register => |val_reg| try self.asmCmovccRegisterRegister( .l, @@ -7151,7 +7200,7 @@ fn airAbs(self: *Self, inst: Air.Inst.Index) !void { break :result dst_mcv; }, else => { - const abi_size: u31 = @intCast(ty.abiSize(mod)); + const abi_size: u31 = @intCast(ty.abiSize(pt)); const limb_len = math.divCeil(u31, abi_size, 8) catch unreachable; const tmp_regs = @@ -7249,9 +7298,9 @@ fn airAbs(self: *Self, inst: Air.Inst.Index) !void { }, .Float => return self.floatSign(inst, ty_op.operand, ty), }, - }) orelse return self.fail("TODO implement airAbs for {}", .{ty.fmt(mod)}); + }) orelse return self.fail("TODO implement airAbs for {}", .{ty.fmt(pt)}); - const abi_size: u32 = @intCast(ty.abiSize(mod)); + const abi_size: u32 = @intCast(ty.abiSize(pt)); const src_mcv = try self.resolveInst(ty_op.operand); const dst_reg = if (src_mcv.isRegister() and self.reuseOperand(inst, ty_op.operand, 0, src_mcv)) src_mcv.getReg().? @@ -7276,10 +7325,11 @@ fn airAbs(self: *Self, inst: Air.Inst.Index) !void { } fn airSqrt(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; const ty = self.typeOf(un_op); - const abi_size: u32 = @intCast(ty.abiSize(mod)); + const abi_size: u32 = @intCast(ty.abiSize(pt)); const result: MCValue = result: { switch (ty.zigTypeTag(mod)) { @@ -7408,7 +7458,7 @@ fn airSqrt(self: *Self, inst: Air.Inst.Index) !void { }, else => unreachable, }) orelse return self.fail("TODO implement airSqrt for {}", .{ - ty.fmt(mod), + ty.fmt(pt), }); switch (mir_tag[0]) { .v_ss, .v_sd => if (src_mcv.isMemory()) try self.asmRegisterRegisterMemory( @@ -7521,14 +7571,15 @@ fn reuseOperandAdvanced( } fn packedLoad(self: *Self, dst_mcv: MCValue, ptr_ty: Type, ptr_mcv: MCValue) InnerError!void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const ptr_info = ptr_ty.ptrInfo(mod); const val_ty = Type.fromInterned(ptr_info.child); - if (!val_ty.hasRuntimeBitsIgnoreComptime(mod)) return; - const val_abi_size: u32 = @intCast(val_ty.abiSize(mod)); + if (!val_ty.hasRuntimeBitsIgnoreComptime(pt)) return; + const val_abi_size: u32 = @intCast(val_ty.abiSize(pt)); - const val_bit_size: u32 = @intCast(val_ty.bitSize(mod)); + const val_bit_size: u32 = @intCast(val_ty.bitSize(pt)); const ptr_bit_off = ptr_info.packed_offset.bit_offset + switch (ptr_info.flags.vector_index) { .none => 0, .runtime => unreachable, @@ -7566,7 +7617,7 @@ fn packedLoad(self: *Self, dst_mcv: MCValue, ptr_ty: Type, ptr_mcv: MCValue) Inn return; } - if (val_abi_size > 8) return self.fail("TODO implement packed load of {}", .{val_ty.fmt(mod)}); + if (val_abi_size > 8) return self.fail("TODO implement packed load of {}", .{val_ty.fmt(pt)}); const limb_abi_size: u31 = @min(val_abi_size, 8); const limb_abi_bits = limb_abi_size * 8; @@ -7633,9 +7684,10 @@ fn packedLoad(self: *Self, dst_mcv: MCValue, ptr_ty: Type, ptr_mcv: MCValue) Inn } fn load(self: *Self, dst_mcv: MCValue, ptr_ty: Type, ptr_mcv: MCValue) InnerError!void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const dst_ty = ptr_ty.childType(mod); - if (!dst_ty.hasRuntimeBitsIgnoreComptime(mod)) return; + if (!dst_ty.hasRuntimeBitsIgnoreComptime(pt)) return; switch (ptr_mcv) { .none, .unreach, @@ -7675,18 +7727,19 @@ fn load(self: *Self, dst_mcv: MCValue, ptr_ty: Type, ptr_mcv: MCValue) InnerErro } fn airLoad(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const elem_ty = self.typeOfIndex(inst); const result: MCValue = result: { - if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result .none; + if (!elem_ty.hasRuntimeBitsIgnoreComptime(pt)) break :result .none; try self.spillRegisters(&.{ .rdi, .rsi, .rcx }); const reg_locks = self.register_manager.lockRegsAssumeUnused(3, .{ .rdi, .rsi, .rcx }); defer for (reg_locks) |lock| self.register_manager.unlockReg(lock); const ptr_ty = self.typeOf(ty_op.operand); - const elem_size = elem_ty.abiSize(mod); + const elem_size = elem_ty.abiSize(pt); const elem_rc = self.regClassForType(elem_ty); const ptr_rc = self.regClassForType(ptr_ty); @@ -7706,7 +7759,7 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void { try self.load(dst_mcv, ptr_ty, ptr_mcv); } - if (elem_ty.isAbiInt(mod) and elem_size * 8 > elem_ty.bitSize(mod)) { + if (elem_ty.isAbiInt(mod) and elem_size * 8 > elem_ty.bitSize(pt)) { const high_mcv: MCValue = switch (dst_mcv) { .register => |dst_reg| .{ .register = dst_reg }, .register_pair => |dst_regs| .{ .register = dst_regs[1] }, @@ -7733,16 +7786,17 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void { } fn packedStore(self: *Self, ptr_ty: Type, ptr_mcv: MCValue, src_mcv: MCValue) InnerError!void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const ptr_info = ptr_ty.ptrInfo(mod); const src_ty = Type.fromInterned(ptr_info.child); - if (!src_ty.hasRuntimeBitsIgnoreComptime(mod)) return; + if (!src_ty.hasRuntimeBitsIgnoreComptime(pt)) return; const limb_abi_size: u16 = @min(ptr_info.packed_offset.host_size, 8); const limb_abi_bits = limb_abi_size * 8; - const limb_ty = try mod.intType(.unsigned, limb_abi_bits); + const limb_ty = try pt.intType(.unsigned, limb_abi_bits); - const src_bit_size = src_ty.bitSize(mod); + const src_bit_size = src_ty.bitSize(pt); const ptr_bit_off = ptr_info.packed_offset.bit_offset + switch (ptr_info.flags.vector_index) { .none => 0, .runtime => unreachable, @@ -7827,7 +7881,7 @@ fn packedStore(self: *Self, ptr_ty: Type, ptr_mcv: MCValue, src_mcv: MCValue) In limb_mem, registerAlias(tmp_reg, limb_abi_size), ); - } else return self.fail("TODO: implement packed store of {}", .{src_ty.fmt(mod)}); + } else return self.fail("TODO: implement packed store of {}", .{src_ty.fmt(pt)}); } } @@ -7838,9 +7892,10 @@ fn store( src_mcv: MCValue, opts: CopyOptions, ) InnerError!void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const src_ty = ptr_ty.childType(mod); - if (!src_ty.hasRuntimeBitsIgnoreComptime(mod)) return; + if (!src_ty.hasRuntimeBitsIgnoreComptime(pt)) return; switch (ptr_mcv) { .none, .unreach, @@ -7880,7 +7935,8 @@ fn store( } fn airStore(self: *Self, inst: Air.Inst.Index, safety: bool) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; result: { @@ -7918,15 +7974,16 @@ fn airStructFieldPtrIndex(self: *Self, inst: Air.Inst.Index, index: u8) !void { } fn fieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, index: u32) !MCValue { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const ptr_field_ty = self.typeOfIndex(inst); const ptr_container_ty = self.typeOf(operand); const container_ty = ptr_container_ty.childType(mod); const field_off: i32 = switch (container_ty.containerLayout(mod)) { - .auto, .@"extern" => @intCast(container_ty.structFieldOffset(index, mod)), + .auto, .@"extern" => @intCast(container_ty.structFieldOffset(index, pt)), .@"packed" => @divExact(@as(i32, ptr_container_ty.ptrInfo(mod).packed_offset.bit_offset) + - (if (mod.typeToStruct(container_ty)) |struct_obj| mod.structPackedFieldBitOffset(struct_obj, index) else 0) - + (if (mod.typeToStruct(container_ty)) |struct_obj| pt.structPackedFieldBitOffset(struct_obj, index) else 0) - ptr_field_ty.ptrInfo(mod).packed_offset.bit_offset, 8), }; @@ -7940,7 +7997,8 @@ fn fieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, index: u32 } fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = self.air.extraData(Air.StructField, ty_pl.payload).data; const result: MCValue = result: { @@ -7950,14 +8008,14 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { const container_ty = self.typeOf(operand); const container_rc = self.regClassForType(container_ty); const field_ty = container_ty.structFieldType(index, mod); - if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result .none; + if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) break :result .none; const field_rc = self.regClassForType(field_ty); const field_is_gp = field_rc.supersetOf(abi.RegisterClass.gp); const src_mcv = try self.resolveInst(operand); const field_off: u32 = switch (container_ty.containerLayout(mod)) { - .auto, .@"extern" => @intCast(container_ty.structFieldOffset(extra.field_index, mod) * 8), - .@"packed" => if (mod.typeToStruct(container_ty)) |struct_obj| mod.structPackedFieldBitOffset(struct_obj, extra.field_index) else 0, + .auto, .@"extern" => @intCast(container_ty.structFieldOffset(extra.field_index, pt) * 8), + .@"packed" => if (mod.typeToStruct(container_ty)) |struct_obj| pt.structPackedFieldBitOffset(struct_obj, extra.field_index) else 0, }; switch (src_mcv) { @@ -7988,7 +8046,7 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { ); } if (abi.RegisterClass.gp.isSet(RegisterManager.indexOfRegIntoTracked(dst_reg).?) and - container_ty.abiSize(mod) * 8 > field_ty.bitSize(mod)) + container_ty.abiSize(pt) * 8 > field_ty.bitSize(pt)) try self.truncateRegister(field_ty, dst_reg); break :result if (field_off == 0 or field_rc.supersetOf(abi.RegisterClass.gp)) @@ -8000,7 +8058,7 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { const src_regs_lock = self.register_manager.lockRegsAssumeUnused(2, src_regs); defer for (src_regs_lock) |lock| self.register_manager.unlockReg(lock); - const field_bit_size: u32 = @intCast(field_ty.bitSize(mod)); + const field_bit_size: u32 = @intCast(field_ty.bitSize(pt)); const src_reg = if (field_off + field_bit_size <= 64) src_regs[0] else if (field_off >= 64) @@ -8044,7 +8102,7 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { } if (field_bit_size < 128) try self.truncateRegister( - try mod.intType(.unsigned, @intCast(field_bit_size - 64)), + try pt.intType(.unsigned, @intCast(field_bit_size - 64)), dst_regs[1], ); break :result if (field_rc.supersetOf(abi.RegisterClass.gp)) @@ -8099,14 +8157,14 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { } }, .load_frame => |frame_addr| { - const field_abi_size: u32 = @intCast(field_ty.abiSize(mod)); + const field_abi_size: u32 = @intCast(field_ty.abiSize(pt)); if (field_off % 8 == 0) { const field_byte_off = @divExact(field_off, 8); const off_mcv = src_mcv.address().offset(@intCast(field_byte_off)).deref(); - const field_bit_size = field_ty.bitSize(mod); + const field_bit_size = field_ty.bitSize(pt); if (field_abi_size <= 8) { - const int_ty = try mod.intType( + const int_ty = try pt.intType( if (field_ty.isAbiInt(mod)) field_ty.intInfo(mod).signedness else .unsigned, @intCast(field_bit_size), ); @@ -8127,7 +8185,7 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { try self.copyToRegisterWithInstTracking(inst, field_ty, dst_mcv); } - const container_abi_size: u32 = @intCast(container_ty.abiSize(mod)); + const container_abi_size: u32 = @intCast(container_ty.abiSize(pt)); const dst_mcv = if (field_byte_off + field_abi_size <= container_abi_size and self.reuseOperand(inst, operand, 0, src_mcv)) off_mcv @@ -8228,16 +8286,17 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { } fn airFieldParentPtr(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = self.air.extraData(Air.FieldParentPtr, ty_pl.payload).data; const inst_ty = self.typeOfIndex(inst); const parent_ty = inst_ty.childType(mod); const field_off: i32 = switch (parent_ty.containerLayout(mod)) { - .auto, .@"extern" => @intCast(parent_ty.structFieldOffset(extra.field_index, mod)), + .auto, .@"extern" => @intCast(parent_ty.structFieldOffset(extra.field_index, pt)), .@"packed" => @divExact(@as(i32, inst_ty.ptrInfo(mod).packed_offset.bit_offset) + - (if (mod.typeToStruct(parent_ty)) |struct_obj| mod.structPackedFieldBitOffset(struct_obj, extra.field_index) else 0) - + (if (mod.typeToStruct(parent_ty)) |struct_obj| pt.structPackedFieldBitOffset(struct_obj, extra.field_index) else 0) - self.typeOf(extra.field_ptr).ptrInfo(mod).packed_offset.bit_offset, 8), }; @@ -8252,10 +8311,11 @@ fn airFieldParentPtr(self: *Self, inst: Air.Inst.Index) !void { } fn genUnOp(self: *Self, maybe_inst: ?Air.Inst.Index, tag: Air.Inst.Tag, src_air: Air.Inst.Ref) !MCValue { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const src_ty = self.typeOf(src_air); if (src_ty.zigTypeTag(mod) == .Vector) - return self.fail("TODO implement genUnOp for {}", .{src_ty.fmt(mod)}); + return self.fail("TODO implement genUnOp for {}", .{src_ty.fmt(pt)}); var src_mcv = try self.resolveInst(src_air); switch (src_mcv) { @@ -8290,7 +8350,7 @@ fn genUnOp(self: *Self, maybe_inst: ?Air.Inst.Index, tag: Air.Inst.Tag, src_air: }; defer if (dst_lock) |lock| self.register_manager.unlockReg(lock); - const abi_size: u16 = @intCast(src_ty.abiSize(mod)); + const abi_size: u16 = @intCast(src_ty.abiSize(pt)); switch (tag) { .not => { const limb_abi_size: u16 = @min(abi_size, 8); @@ -8304,7 +8364,7 @@ fn genUnOp(self: *Self, maybe_inst: ?Air.Inst.Index, tag: Air.Inst.Tag, src_air: .signed => abi_size * 8, .unsigned => int_info.bits, } - byte_off * 8, limb_abi_size * 8)); - const limb_ty = try mod.intType(int_info.signedness, limb_bits); + const limb_ty = try pt.intType(int_info.signedness, limb_bits); const limb_mcv = switch (byte_off) { 0 => dst_mcv, else => dst_mcv.address().offset(byte_off).deref(), @@ -8340,9 +8400,9 @@ fn genUnOp(self: *Self, maybe_inst: ?Air.Inst.Index, tag: Air.Inst.Tag, src_air: } fn genUnOpMir(self: *Self, mir_tag: Mir.Inst.FixedTag, dst_ty: Type, dst_mcv: MCValue) !void { - const mod = self.bin_file.comp.module.?; - const abi_size: u32 = @intCast(dst_ty.abiSize(mod)); - if (abi_size > 8) return self.fail("TODO implement {} for {}", .{ mir_tag, dst_ty.fmt(mod) }); + const pt = self.pt; + const abi_size: u32 = @intCast(dst_ty.abiSize(pt)); + if (abi_size > 8) return self.fail("TODO implement {} for {}", .{ mir_tag, dst_ty.fmt(pt) }); switch (dst_mcv) { .none, .unreach, @@ -8389,9 +8449,9 @@ fn genShiftBinOpMir( rhs_ty: Type, rhs_mcv: MCValue, ) !void { - const mod = self.bin_file.comp.module.?; - const abi_size: u32 = @intCast(lhs_ty.abiSize(mod)); - const shift_abi_size: u32 = @intCast(rhs_ty.abiSize(mod)); + const pt = self.pt; + const abi_size: u32 = @intCast(lhs_ty.abiSize(pt)); + const shift_abi_size: u32 = @intCast(rhs_ty.abiSize(pt)); try self.spillEflagsIfOccupied(); if (abi_size > 16) { @@ -9046,9 +9106,10 @@ fn genShiftBinOp( lhs_ty: Type, rhs_ty: Type, ) !MCValue { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; if (lhs_ty.zigTypeTag(mod) == .Vector) return self.fail("TODO implement genShiftBinOp for {}", .{ - lhs_ty.fmt(mod), + lhs_ty.fmt(pt), }); try self.register_manager.getKnownReg(.rcx, null); @@ -9104,13 +9165,14 @@ fn genMulDivBinOp( lhs_mcv: MCValue, rhs_mcv: MCValue, ) !MCValue { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; if (dst_ty.zigTypeTag(mod) == .Vector or dst_ty.zigTypeTag(mod) == .Float) return self.fail( "TODO implement genMulDivBinOp for {s} from {} to {}", - .{ @tagName(tag), src_ty.fmt(mod), dst_ty.fmt(mod) }, + .{ @tagName(tag), src_ty.fmt(pt), dst_ty.fmt(pt) }, ); - const dst_abi_size: u32 = @intCast(dst_ty.abiSize(mod)); - const src_abi_size: u32 = @intCast(src_ty.abiSize(mod)); + const dst_abi_size: u32 = @intCast(dst_ty.abiSize(pt)); + const src_abi_size: u32 = @intCast(src_ty.abiSize(pt)); assert(self.register_manager.isRegFree(.rax)); assert(self.register_manager.isRegFree(.rcx)); @@ -9299,13 +9361,13 @@ fn genMulDivBinOp( .signed => {}, .unsigned => { const dst_mcv = try self.allocRegOrMemAdvanced(dst_ty, maybe_inst, false); - const manyptr_u32_ty = try mod.ptrType(.{ + const manyptr_u32_ty = try pt.ptrType(.{ .child = .u32_type, .flags = .{ .size = .Many, }, }); - const manyptr_const_u32_ty = try mod.ptrType(.{ + const manyptr_const_u32_ty = try pt.ptrType(.{ .child = .u32_type, .flags = .{ .size = .Many, @@ -9348,7 +9410,7 @@ fn genMulDivBinOp( } return self.fail( "TODO implement genMulDivBinOp for {s} from {} to {}", - .{ @tagName(tag), src_ty.fmt(mod), dst_ty.fmt(mod) }, + .{ @tagName(tag), src_ty.fmt(pt), dst_ty.fmt(pt) }, ); } const ty = if (dst_abi_size <= 8) dst_ty else src_ty; @@ -9515,10 +9577,11 @@ fn genBinOp( lhs_air: Air.Inst.Ref, rhs_air: Air.Inst.Ref, ) !MCValue { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const lhs_ty = self.typeOf(lhs_air); const rhs_ty = self.typeOf(rhs_air); - const abi_size: u32 = @intCast(lhs_ty.abiSize(mod)); + const abi_size: u32 = @intCast(lhs_ty.abiSize(pt)); if (lhs_ty.isRuntimeFloat()) libcall: { const float_bits = lhs_ty.floatBits(self.target.*); @@ -9556,7 +9619,7 @@ fn genBinOp( floatLibcAbiSuffix(lhs_ty), }), else => return self.fail("TODO implement genBinOp for {s} {}", .{ - @tagName(air_tag), lhs_ty.fmt(mod), + @tagName(air_tag), lhs_ty.fmt(pt), }), } catch unreachable; const result = try self.genCall(.{ .lib = .{ @@ -9668,7 +9731,7 @@ fn genBinOp( break :adjusted .{ .register = dst_reg }; }, 80, 128 => return self.fail("TODO implement genBinOp for {s} of {}", .{ - @tagName(air_tag), lhs_ty.fmt(mod), + @tagName(air_tag), lhs_ty.fmt(pt), }), else => unreachable, }; @@ -9700,8 +9763,8 @@ fn genBinOp( }; if (sse_op and ((lhs_ty.scalarType(mod).isRuntimeFloat() and lhs_ty.scalarType(mod).floatBits(self.target.*) == 80) or - lhs_ty.abiSize(mod) > @as(u6, if (self.hasFeature(.avx)) 32 else 16))) - return self.fail("TODO implement genBinOp for {s} {}", .{ @tagName(air_tag), lhs_ty.fmt(mod) }); + lhs_ty.abiSize(pt) > @as(u6, if (self.hasFeature(.avx)) 32 else 16))) + return self.fail("TODO implement genBinOp for {s} {}", .{ @tagName(air_tag), lhs_ty.fmt(pt) }); const maybe_mask_reg = switch (air_tag) { else => null, @@ -9857,7 +9920,7 @@ fn genBinOp( const tmp_lock = self.register_manager.lockRegAssumeUnused(tmp_reg); defer self.register_manager.unlockReg(tmp_lock); - const elem_size = lhs_ty.elemType2(mod).abiSize(mod); + const elem_size = lhs_ty.elemType2(mod).abiSize(pt); try self.genIntMulComplexOpMir(rhs_ty, tmp_mcv, .{ .immediate = elem_size }); try self.genBinOpMir( switch (air_tag) { @@ -10003,7 +10066,7 @@ fn genBinOp( }, }; - const cmov_abi_size = @max(@as(u32, @intCast(lhs_ty.abiSize(mod))), 2); + const cmov_abi_size = @max(@as(u32, @intCast(lhs_ty.abiSize(pt))), 2); const tmp_reg = switch (dst_mcv) { .register => |reg| reg, else => try self.copyToTmpRegister(lhs_ty, dst_mcv), @@ -10082,7 +10145,7 @@ fn genBinOp( }, else => return self.fail("TODO implement genBinOp for {s} {}", .{ - @tagName(air_tag), lhs_ty.fmt(mod), + @tagName(air_tag), lhs_ty.fmt(pt), }), } return dst_mcv; @@ -10835,7 +10898,7 @@ fn genBinOp( }, }, }) orelse return self.fail("TODO implement genBinOp for {s} {}", .{ - @tagName(air_tag), lhs_ty.fmt(mod), + @tagName(air_tag), lhs_ty.fmt(pt), }); const lhs_copy_reg = if (maybe_mask_reg) |_| registerAlias( @@ -10978,7 +11041,7 @@ fn genBinOp( }, else => unreachable, }) orelse return self.fail("TODO implement genBinOp for {s} {}", .{ - @tagName(air_tag), lhs_ty.fmt(mod), + @tagName(air_tag), lhs_ty.fmt(pt), }), mask_reg, rhs_copy_reg, @@ -11010,7 +11073,7 @@ fn genBinOp( }, else => unreachable, }) orelse return self.fail("TODO implement genBinOp for {s} {}", .{ - @tagName(air_tag), lhs_ty.fmt(mod), + @tagName(air_tag), lhs_ty.fmt(pt), }), dst_reg, dst_reg, @@ -11046,7 +11109,7 @@ fn genBinOp( }, else => unreachable, }) orelse return self.fail("TODO implement genBinOp for {s} {}", .{ - @tagName(air_tag), lhs_ty.fmt(mod), + @tagName(air_tag), lhs_ty.fmt(pt), }), mask_reg, mask_reg, @@ -11077,7 +11140,7 @@ fn genBinOp( }, else => unreachable, }) orelse return self.fail("TODO implement genBinOp for {s} {}", .{ - @tagName(air_tag), lhs_ty.fmt(mod), + @tagName(air_tag), lhs_ty.fmt(pt), }), dst_reg, lhs_copy_reg.?, @@ -11107,7 +11170,7 @@ fn genBinOp( }, else => unreachable, }) orelse return self.fail("TODO implement genBinOp for {s} {}", .{ - @tagName(air_tag), lhs_ty.fmt(mod), + @tagName(air_tag), lhs_ty.fmt(pt), }); try self.asmRegisterRegister(.{ mir_fixes, .@"and" }, dst_reg, mask_reg); try self.asmRegisterRegister(.{ mir_fixes, .andn }, mask_reg, lhs_copy_reg.?); @@ -11125,8 +11188,8 @@ fn genBinOp( .cmp_gte, .cmp_neq, => { - const unsigned_ty = try lhs_ty.toUnsigned(mod); - const not_mcv = try self.genTypedValue(try unsigned_ty.maxInt(mod, unsigned_ty)); + const unsigned_ty = try lhs_ty.toUnsigned(pt); + const not_mcv = try self.genTypedValue(try unsigned_ty.maxInt(pt, unsigned_ty)); const not_mem: Memory = if (not_mcv.isMemory()) try not_mcv.mem(self, Memory.Size.fromSize(abi_size)) else @@ -11195,8 +11258,9 @@ fn genBinOpMir( dst_mcv: MCValue, src_mcv: MCValue, ) !void { - const mod = self.bin_file.comp.module.?; - const abi_size: u32 = @intCast(ty.abiSize(mod)); + const pt = self.pt; + const mod = pt.zcu; + const abi_size: u32 = @intCast(ty.abiSize(pt)); try self.spillEflagsIfOccupied(); switch (dst_mcv) { .none, @@ -11358,7 +11422,7 @@ fn genBinOpMir( .load_got, .load_tlv, => { - const ptr_ty = try mod.singleConstPtrType(ty); + const ptr_ty = try pt.singleConstPtrType(ty); const addr_reg = try self.copyToTmpRegister(ptr_ty, src_mcv.address()); return self.genBinOpMir(mir_limb_tag, ty, dst_mcv, .{ .indirect = .{ .reg = addr_reg, .off = off }, @@ -11619,8 +11683,8 @@ fn genBinOpMir( /// Performs multi-operand integer multiplication between dst_mcv and src_mcv, storing the result in dst_mcv. /// Does not support byte-size operands. fn genIntMulComplexOpMir(self: *Self, dst_ty: Type, dst_mcv: MCValue, src_mcv: MCValue) InnerError!void { - const mod = self.bin_file.comp.module.?; - const abi_size: u32 = @intCast(dst_ty.abiSize(mod)); + const pt = self.pt; + const abi_size: u32 = @intCast(dst_ty.abiSize(pt)); try self.spillEflagsIfOccupied(); switch (dst_mcv) { .none, @@ -11746,7 +11810,8 @@ fn genIntMulComplexOpMir(self: *Self, dst_ty: Type, dst_mcv: MCValue, src_mcv: M } fn airArg(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; // skip zero-bit arguments as they don't have a corresponding arg instruction var arg_index = self.arg_index; while (self.args[arg_index] == .none) arg_index += 1; @@ -11808,7 +11873,7 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void { try self.genInlineMemset( dst_mcv.address().offset(@intFromBool(regs_frame_addr.regs > 0)), .{ .immediate = 0 }, - .{ .immediate = arg_ty.abiSize(mod) - @intFromBool(regs_frame_addr.regs > 0) }, + .{ .immediate = arg_ty.abiSize(pt) - @intFromBool(regs_frame_addr.regs > 0) }, .{}, ); @@ -11865,7 +11930,8 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void { } fn genArgDbgInfo(self: Self, ty: Type, name: [:0]const u8, mcv: MCValue) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; switch (self.debug_output) { .dwarf => |dw| { const loc: link.File.Dwarf.DeclState.DbgInfoLoc = switch (mcv) { @@ -11901,7 +11967,8 @@ fn genVarDbgInfo( mcv: MCValue, name: [:0]const u8, ) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const is_ptr = switch (tag) { .dbg_var_ptr => true, .dbg_var_val => false, @@ -12020,7 +12087,8 @@ fn genCall(self: *Self, info: union(enum) { callee: []const u8, }, }, arg_types: []const Type, args: []const MCValue) !MCValue { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const fn_ty = switch (info) { .air => |callee| fn_info: { @@ -12031,7 +12099,7 @@ fn genCall(self: *Self, info: union(enum) { else => unreachable, }; }, - .lib => |lib| try mod.funcType(.{ + .lib => |lib| try pt.funcType(.{ .param_types = lib.param_types, .return_type = lib.return_type, .cc = .C, @@ -12101,7 +12169,7 @@ fn genCall(self: *Self, info: union(enum) { try reg_locks.appendSlice(&self.register_manager.lockRegs(2, regs)); }, .indirect => |reg_off| { - frame_index.* = try self.allocFrameIndex(FrameAlloc.initType(arg_ty, mod)); + frame_index.* = try self.allocFrameIndex(FrameAlloc.initType(arg_ty, pt)); try self.genSetMem(.{ .frame = frame_index.* }, 0, arg_ty, src_arg, .{}); try self.register_manager.getReg(reg_off.reg, null); try reg_locks.append(self.register_manager.lockReg(reg_off.reg)); @@ -12173,7 +12241,7 @@ fn genCall(self: *Self, info: union(enum) { .none, .unreach => {}, .indirect => |reg_off| { const ret_ty = Type.fromInterned(fn_info.return_type); - const frame_index = try self.allocFrameIndex(FrameAlloc.initSpill(ret_ty, mod)); + const frame_index = try self.allocFrameIndex(FrameAlloc.initSpill(ret_ty, pt)); try self.genSetReg(reg_off.reg, Type.usize, .{ .lea_frame = .{ .index = frame_index, .off = -reg_off.off }, }, .{}); @@ -12188,14 +12256,14 @@ fn genCall(self: *Self, info: union(enum) { .none, .load_frame => {}, .register => |dst_reg| switch (fn_info.cc) { else => try self.genSetReg( - registerAlias(dst_reg, @intCast(arg_ty.abiSize(mod))), + registerAlias(dst_reg, @intCast(arg_ty.abiSize(pt))), arg_ty, src_arg, .{}, ), .C, .SysV, .Win64 => { const promoted_ty = self.promoteInt(arg_ty); - const promoted_abi_size: u32 = @intCast(promoted_ty.abiSize(mod)); + const promoted_abi_size: u32 = @intCast(promoted_ty.abiSize(pt)); const dst_alias = registerAlias(dst_reg, promoted_abi_size); try self.genSetReg(dst_alias, promoted_ty, src_arg, .{}); if (promoted_ty.toIntern() != arg_ty.toIntern()) @@ -12246,7 +12314,7 @@ fn genCall(self: *Self, info: union(enum) { // Due to incremental compilation, how function calls are generated depends // on linking. switch (info) { - .air => |callee| if (try self.air.value(callee, mod)) |func_value| { + .air => |callee| if (try self.air.value(callee, pt)) |func_value| { const func_key = mod.intern_pool.indexToKey(func_value.ip_index); switch (switch (func_key) { else => func_key, @@ -12332,7 +12400,8 @@ fn genCall(self: *Self, info: union(enum) { } fn airRet(self: *Self, inst: Air.Inst.Index, safety: bool) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; const ret_ty = self.fn_type.fnReturnType(mod); @@ -12387,7 +12456,8 @@ fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void { } fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; var ty = self.typeOf(bin_op.lhs); var null_compare: ?Mir.Inst.Index = null; @@ -12457,9 +12527,9 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { }, .Optional => if (!ty.optionalReprIsPayload(mod)) { const opt_ty = ty; - const opt_abi_size: u31 = @intCast(opt_ty.abiSize(mod)); + const opt_abi_size: u31 = @intCast(opt_ty.abiSize(pt)); ty = opt_ty.optionalChild(mod); - const payload_abi_size: u31 = @intCast(ty.abiSize(mod)); + const payload_abi_size: u31 = @intCast(ty.abiSize(pt)); const temp_lhs_reg = try self.register_manager.allocReg(null, abi.RegisterClass.gp); const temp_lhs_lock = self.register_manager.lockRegAssumeUnused(temp_lhs_reg); @@ -12518,7 +12588,7 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { switch (ty.zigTypeTag(mod)) { else => { - const abi_size: u16 = @intCast(ty.abiSize(mod)); + const abi_size: u16 = @intCast(ty.abiSize(pt)); const may_flip: enum { may_flip, must_flip, @@ -12845,7 +12915,8 @@ fn airCmpVector(self: *Self, inst: Air.Inst.Index) !void { } fn airCmpLtErrorsLen(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; const addr_reg = try self.register_manager.allocReg(null, abi.RegisterClass.gp); @@ -12856,7 +12927,7 @@ fn airCmpLtErrorsLen(self: *Self, inst: Air.Inst.Index) !void { try self.spillEflagsIfOccupied(); const op_ty = self.typeOf(un_op); - const op_abi_size: u32 = @intCast(op_ty.abiSize(mod)); + const op_abi_size: u32 = @intCast(op_ty.abiSize(pt)); const op_mcv = try self.resolveInst(un_op); const dst_reg = switch (op_mcv) { .register => |reg| reg, @@ -12987,8 +13058,8 @@ fn airDbgVar(self: *Self, inst: Air.Inst.Index) !void { } fn genCondBrMir(self: *Self, ty: Type, mcv: MCValue) !Mir.Inst.Index { - const mod = self.bin_file.comp.module.?; - const abi_size = ty.abiSize(mod); + const pt = self.pt; + const abi_size = ty.abiSize(pt); switch (mcv) { .eflags => |cc| { // Here we map the opposites since the jump is to the false branch. @@ -13060,7 +13131,8 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void { } fn isNull(self: *Self, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MCValue { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; switch (opt_mcv) { .register_overflow => |ro| return .{ .eflags = ro.eflags.negate() }, else => {}, @@ -13073,7 +13145,7 @@ fn isNull(self: *Self, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MC const some_info: struct { off: i32, ty: Type } = if (opt_ty.optionalReprIsPayload(mod)) .{ .off = 0, .ty = if (pl_ty.isSlice(mod)) pl_ty.slicePtrFieldType(mod) else pl_ty } else - .{ .off = @intCast(pl_ty.abiSize(mod)), .ty = Type.bool }; + .{ .off = @intCast(pl_ty.abiSize(pt)), .ty = Type.bool }; self.eflags_inst = inst; switch (opt_mcv) { @@ -13098,14 +13170,14 @@ fn isNull(self: *Self, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MC .register => |opt_reg| { if (some_info.off == 0) { - const some_abi_size: u32 = @intCast(some_info.ty.abiSize(mod)); + const some_abi_size: u32 = @intCast(some_info.ty.abiSize(pt)); const alias_reg = registerAlias(opt_reg, some_abi_size); assert(some_abi_size * 8 == alias_reg.bitSize()); try self.asmRegisterRegister(.{ ._, .@"test" }, alias_reg, alias_reg); return .{ .eflags = .z }; } assert(some_info.ty.ip_index == .bool_type); - const opt_abi_size: u32 = @intCast(opt_ty.abiSize(mod)); + const opt_abi_size: u32 = @intCast(opt_ty.abiSize(pt)); try self.asmRegisterImmediate( .{ ._, .bt }, registerAlias(opt_reg, opt_abi_size), @@ -13125,7 +13197,7 @@ fn isNull(self: *Self, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MC defer self.register_manager.unlockReg(addr_reg_lock); try self.genSetReg(addr_reg, Type.usize, opt_mcv.address(), .{}); - const some_abi_size: u32 = @intCast(some_info.ty.abiSize(mod)); + const some_abi_size: u32 = @intCast(some_info.ty.abiSize(pt)); try self.asmMemoryImmediate( .{ ._, .cmp }, .{ @@ -13141,7 +13213,7 @@ fn isNull(self: *Self, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MC }, .indirect, .load_frame => { - const some_abi_size: u32 = @intCast(some_info.ty.abiSize(mod)); + const some_abi_size: u32 = @intCast(some_info.ty.abiSize(pt)); try self.asmMemoryImmediate( .{ ._, .cmp }, switch (opt_mcv) { @@ -13169,7 +13241,8 @@ fn isNull(self: *Self, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MC } fn isNullPtr(self: *Self, inst: Air.Inst.Index, ptr_ty: Type, ptr_mcv: MCValue) !MCValue { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const opt_ty = ptr_ty.childType(mod); const pl_ty = opt_ty.optionalChild(mod); @@ -13178,7 +13251,7 @@ fn isNullPtr(self: *Self, inst: Air.Inst.Index, ptr_ty: Type, ptr_mcv: MCValue) const some_info: struct { off: i32, ty: Type } = if (opt_ty.optionalReprIsPayload(mod)) .{ .off = 0, .ty = if (pl_ty.isSlice(mod)) pl_ty.slicePtrFieldType(mod) else pl_ty } else - .{ .off = @intCast(pl_ty.abiSize(mod)), .ty = Type.bool }; + .{ .off = @intCast(pl_ty.abiSize(pt)), .ty = Type.bool }; const ptr_reg = switch (ptr_mcv) { .register => |reg| reg, @@ -13187,7 +13260,7 @@ fn isNullPtr(self: *Self, inst: Air.Inst.Index, ptr_ty: Type, ptr_mcv: MCValue) const ptr_lock = self.register_manager.lockReg(ptr_reg); defer if (ptr_lock) |lock| self.register_manager.unlockReg(lock); - const some_abi_size: u32 = @intCast(some_info.ty.abiSize(mod)); + const some_abi_size: u32 = @intCast(some_info.ty.abiSize(pt)); try self.asmMemoryImmediate( .{ ._, .cmp }, .{ @@ -13205,13 +13278,14 @@ fn isNullPtr(self: *Self, inst: Air.Inst.Index, ptr_ty: Type, ptr_mcv: MCValue) } fn isErr(self: *Self, maybe_inst: ?Air.Inst.Index, eu_ty: Type, eu_mcv: MCValue) !MCValue { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const err_ty = eu_ty.errorUnionSet(mod); if (err_ty.errorSetIsEmpty(mod)) return MCValue{ .immediate = 0 }; // always false try self.spillEflagsIfOccupied(); - const err_off: u31 = @intCast(errUnionErrorOffset(eu_ty.errorUnionPayload(mod), mod)); + const err_off: u31 = @intCast(errUnionErrorOffset(eu_ty.errorUnionPayload(mod), pt)); switch (eu_mcv) { .register => |reg| { const eu_lock = self.register_manager.lockReg(reg); @@ -13253,7 +13327,8 @@ fn isErr(self: *Self, maybe_inst: ?Air.Inst.Index, eu_ty: Type, eu_mcv: MCValue) } fn isErrPtr(self: *Self, maybe_inst: ?Air.Inst.Index, ptr_ty: Type, ptr_mcv: MCValue) !MCValue { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const eu_ty = ptr_ty.childType(mod); const err_ty = eu_ty.errorUnionSet(mod); if (err_ty.errorSetIsEmpty(mod)) return MCValue{ .immediate = 0 }; // always false @@ -13267,7 +13342,7 @@ fn isErrPtr(self: *Self, maybe_inst: ?Air.Inst.Index, ptr_ty: Type, ptr_mcv: MCV const ptr_lock = self.register_manager.lockReg(ptr_reg); defer if (ptr_lock) |lock| self.register_manager.unlockReg(lock); - const err_off: u31 = @intCast(errUnionErrorOffset(eu_ty.errorUnionPayload(mod), mod)); + const err_off: u31 = @intCast(errUnionErrorOffset(eu_ty.errorUnionPayload(mod), pt)); try self.asmMemoryImmediate( .{ ._, .cmp }, .{ @@ -13539,12 +13614,12 @@ fn performReloc(self: *Self, reloc: Mir.Inst.Index) void { } fn airBr(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; const br = self.air.instructions.items(.data)[@intFromEnum(inst)].br; const block_ty = self.typeOfIndex(br.block_inst); const block_unused = - !block_ty.hasRuntimeBitsIgnoreComptime(mod) or self.liveness.isUnused(br.block_inst); + !block_ty.hasRuntimeBitsIgnoreComptime(pt) or self.liveness.isUnused(br.block_inst); const block_tracking = self.inst_tracking.getPtr(br.block_inst).?; const block_data = self.blocks.getPtr(br.block_inst).?; const first_br = block_data.relocs.items.len == 0; @@ -13600,7 +13675,8 @@ fn airBr(self: *Self, inst: Air.Inst.Index) !void { } fn airAsm(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = self.air.extraData(Air.Asm, ty_pl.payload); const clobbers_len: u31 = @truncate(extra.data.flags); @@ -13664,7 +13740,7 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void { 'x' => abi.RegisterClass.sse, else => unreachable, }) orelse return self.fail("ran out of registers lowering inline asm", .{}), - @intCast(ty.abiSize(mod)), + @intCast(ty.abiSize(pt)), ) else if (mem.eql(u8, rest, "m")) if (output != .none) null else return self.fail( @@ -13734,7 +13810,7 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void { break :arg input_mcv; const reg = try self.register_manager.allocReg(null, rc); try self.genSetReg(reg, ty, input_mcv, .{}); - break :arg .{ .register = registerAlias(reg, @intCast(ty.abiSize(mod))) }; + break :arg .{ .register = registerAlias(reg, @intCast(ty.abiSize(pt))) }; } else if (mem.eql(u8, constraint, "i") or mem.eql(u8, constraint, "n")) switch (input_mcv) { .immediate => |imm| .{ .immediate = imm }, @@ -14310,18 +14386,19 @@ const MoveStrategy = union(enum) { } }; fn moveStrategy(self: *Self, ty: Type, class: Register.Class, aligned: bool) !MoveStrategy { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; switch (class) { .general_purpose, .segment => return .{ .move = .{ ._, .mov } }, .x87 => return .x87_load_store, .mmx => {}, .sse => switch (ty.zigTypeTag(mod)) { else => { - const classes = mem.sliceTo(&abi.classifySystemV(ty, mod, self.target.*, .other), .none); + const classes = mem.sliceTo(&abi.classifySystemV(ty, pt, self.target.*, .other), .none); assert(std.mem.indexOfNone(abi.Class, classes, &.{ .integer, .sse, .sseup, .memory, .float, .float_combine, }) == null); - const abi_size = ty.abiSize(mod); + const abi_size = ty.abiSize(pt); if (abi_size < 4 or std.mem.indexOfScalar(abi.Class, classes, .integer) != null) switch (abi_size) { 1 => if (self.hasFeature(.avx)) return .{ .vex_insert_extract = .{ @@ -14532,7 +14609,7 @@ fn moveStrategy(self: *Self, ty: Type, class: Register.Class, aligned: bool) !Mo }, .ip => {}, } - return self.fail("TODO moveStrategy for {}", .{ty.fmt(mod)}); + return self.fail("TODO moveStrategy for {}", .{ty.fmt(pt)}); } const CopyOptions = struct { @@ -14540,7 +14617,7 @@ const CopyOptions = struct { }; fn genCopy(self: *Self, ty: Type, dst_mcv: MCValue, src_mcv: MCValue, opts: CopyOptions) InnerError!void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; const src_lock = if (src_mcv.getReg()) |reg| self.register_manager.lockReg(reg) else null; defer if (src_lock) |lock| self.register_manager.unlockReg(lock); @@ -14601,7 +14678,7 @@ fn genCopy(self: *Self, ty: Type, dst_mcv: MCValue, src_mcv: MCValue, opts: Copy opts, ), else => return self.fail("TODO implement genCopy for {s} of {}", .{ - @tagName(src_mcv), ty.fmt(mod), + @tagName(src_mcv), ty.fmt(pt), }), }; defer if (src_info) |info| self.register_manager.unlockReg(info.addr_lock); @@ -14617,7 +14694,7 @@ fn genCopy(self: *Self, ty: Type, dst_mcv: MCValue, src_mcv: MCValue, opts: Copy } }, else => unreachable, }, opts); - part_disp += @intCast(dst_ty.abiSize(mod)); + part_disp += @intCast(dst_ty.abiSize(pt)); } }, .indirect => |reg_off| try self.genSetMem( @@ -14658,9 +14735,10 @@ fn genSetReg( src_mcv: MCValue, opts: CopyOptions, ) InnerError!void { - const mod = self.bin_file.comp.module.?; - const abi_size: u32 = @intCast(ty.abiSize(mod)); - if (ty.bitSize(mod) > dst_reg.bitSize()) + const pt = self.pt; + const mod = pt.zcu; + const abi_size: u32 = @intCast(ty.abiSize(pt)); + if (ty.bitSize(pt) > dst_reg.bitSize()) return self.fail("genSetReg called with a value larger than dst_reg", .{}); switch (src_mcv) { .none, @@ -14686,7 +14764,7 @@ fn genSetReg( ), else => unreachable, }, - .segment, .x87, .mmx, .sse => try self.genSetReg(dst_reg, ty, try self.genTypedValue(try mod.undefValue(ty)), opts), + .segment, .x87, .mmx, .sse => try self.genSetReg(dst_reg, ty, try self.genTypedValue(try pt.undefValue(ty)), opts), .ip => unreachable, }, .eflags => |cc| try self.asmSetccRegister(cc, dst_reg.to8()), @@ -14797,7 +14875,7 @@ fn genSetReg( 80 => null, else => unreachable, }, - }) orelse return self.fail("TODO implement genSetReg for {}", .{ty.fmt(mod)}), + }) orelse return self.fail("TODO implement genSetReg for {}", .{ty.fmt(pt)}), registerAlias(dst_reg, abi_size), registerAlias(src_reg, abi_size), ), @@ -14847,7 +14925,7 @@ fn genSetReg( return (try self.moveStrategy( ty, dst_reg.class(), - ty.abiAlignment(mod).check(@as(u32, @bitCast(small_addr))), + ty.abiAlignment(pt).check(@as(u32, @bitCast(small_addr))), )).read(self, registerAlias(dst_reg, abi_size), .{ .base = .{ .reg = .ds }, .mod = .{ .rm = .{ @@ -14967,8 +15045,9 @@ fn genSetMem( src_mcv: MCValue, opts: CopyOptions, ) InnerError!void { - const mod = self.bin_file.comp.module.?; - const abi_size: u32 = @intCast(ty.abiSize(mod)); + const pt = self.pt; + const mod = pt.zcu; + const abi_size: u32 = @intCast(ty.abiSize(pt)); const dst_ptr_mcv: MCValue = switch (base) { .none => .{ .immediate = @bitCast(@as(i64, disp)) }, .reg => |base_reg| .{ .register_offset = .{ .reg = base_reg, .off = disp } }, @@ -15094,21 +15173,21 @@ fn genSetMem( var part_disp: i32 = disp; for (try self.splitType(ty), src_regs) |src_ty, src_reg| { try self.genSetMem(base, part_disp, src_ty, .{ .register = src_reg }, opts); - part_disp += @intCast(src_ty.abiSize(mod)); + part_disp += @intCast(src_ty.abiSize(pt)); } }, .register_overflow => |ro| switch (ty.zigTypeTag(mod)) { .Struct => { try self.genSetMem( base, - disp + @as(i32, @intCast(ty.structFieldOffset(0, mod))), + disp + @as(i32, @intCast(ty.structFieldOffset(0, pt))), ty.structFieldType(0, mod), .{ .register = ro.reg }, opts, ); try self.genSetMem( base, - disp + @as(i32, @intCast(ty.structFieldOffset(1, mod))), + disp + @as(i32, @intCast(ty.structFieldOffset(1, pt))), ty.structFieldType(1, mod), .{ .eflags = ro.eflags }, opts, @@ -15120,14 +15199,14 @@ fn genSetMem( try self.genSetMem(base, disp, child_ty, .{ .register = ro.reg }, opts); try self.genSetMem( base, - disp + @as(i32, @intCast(child_ty.abiSize(mod))), + disp + @as(i32, @intCast(child_ty.abiSize(pt))), Type.bool, .{ .eflags = ro.eflags }, opts, ); }, else => return self.fail("TODO implement genSetMem for {s} of {}", .{ - @tagName(src_mcv), ty.fmt(mod), + @tagName(src_mcv), ty.fmt(pt), }), }, .register_offset, @@ -15236,8 +15315,9 @@ fn genLazySymbolRef( reg: Register, lazy_sym: link.File.LazySymbol, ) InnerError!void { + const pt = self.pt; if (self.bin_file.cast(link.File.Elf)) |elf_file| { - const sym_index = elf_file.zigObjectPtr().?.getOrCreateMetadataForLazySymbol(elf_file, lazy_sym) catch |err| + const sym_index = elf_file.zigObjectPtr().?.getOrCreateMetadataForLazySymbol(elf_file, pt, lazy_sym) catch |err| return self.fail("{s} creating lazy symbol", .{@errorName(err)}); const sym = elf_file.symbol(sym_index); if (self.mod.pic) { @@ -15273,7 +15353,7 @@ fn genLazySymbolRef( } } } else if (self.bin_file.cast(link.File.Plan9)) |p9_file| { - const atom_index = p9_file.getOrCreateAtomForLazySymbol(lazy_sym) catch |err| + const atom_index = p9_file.getOrCreateAtomForLazySymbol(pt, lazy_sym) catch |err| return self.fail("{s} creating lazy symbol", .{@errorName(err)}); var atom = p9_file.getAtom(atom_index); _ = atom.getOrCreateOffsetTableEntry(p9_file); @@ -15300,7 +15380,7 @@ fn genLazySymbolRef( else => unreachable, } } else if (self.bin_file.cast(link.File.Coff)) |coff_file| { - const atom_index = coff_file.getOrCreateAtomForLazySymbol(lazy_sym) catch |err| + const atom_index = coff_file.getOrCreateAtomForLazySymbol(pt, lazy_sym) catch |err| return self.fail("{s} creating lazy symbol", .{@errorName(err)}); const sym_index = coff_file.getAtom(atom_index).getSymbolIndex().?; switch (tag) { @@ -15314,7 +15394,7 @@ fn genLazySymbolRef( else => unreachable, } } else if (self.bin_file.cast(link.File.MachO)) |macho_file| { - const sym_index = macho_file.getZigObject().?.getOrCreateMetadataForLazySymbol(macho_file, lazy_sym) catch |err| + const sym_index = macho_file.getZigObject().?.getOrCreateMetadataForLazySymbol(macho_file, pt, lazy_sym) catch |err| return self.fail("{s} creating lazy symbol", .{@errorName(err)}); const sym = macho_file.getSymbol(sym_index); switch (tag) { @@ -15353,7 +15433,8 @@ fn airIntFromPtr(self: *Self, inst: Air.Inst.Index) !void { } fn airBitCast(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const dst_ty = self.typeOfIndex(inst); const src_ty = self.typeOf(ty_op.operand); @@ -15366,10 +15447,10 @@ fn airBitCast(self: *Self, inst: Air.Inst.Index) !void { const src_lock = if (src_mcv.getReg()) |reg| self.register_manager.lockReg(reg) else null; defer if (src_lock) |lock| self.register_manager.unlockReg(lock); - const dst_mcv = if (dst_rc.supersetOf(src_rc) and dst_ty.abiSize(mod) <= src_ty.abiSize(mod) and + const dst_mcv = if (dst_rc.supersetOf(src_rc) and dst_ty.abiSize(pt) <= src_ty.abiSize(pt) and self.reuseOperand(inst, ty_op.operand, 0, src_mcv)) src_mcv else dst: { const dst_mcv = try self.allocRegOrMem(inst, true); - try self.genCopy(switch (math.order(dst_ty.abiSize(mod), src_ty.abiSize(mod))) { + try self.genCopy(switch (math.order(dst_ty.abiSize(pt), src_ty.abiSize(pt))) { .lt => dst_ty, .eq => if (!dst_mcv.isMemory() or src_mcv.isMemory()) dst_ty else src_ty, .gt => src_ty, @@ -15382,8 +15463,8 @@ fn airBitCast(self: *Self, inst: Air.Inst.Index) !void { if (dst_ty.isAbiInt(mod) and src_ty.isAbiInt(mod) and dst_ty.intInfo(mod).signedness == src_ty.intInfo(mod).signedness) break :result dst_mcv; - const abi_size = dst_ty.abiSize(mod); - const bit_size = dst_ty.bitSize(mod); + const abi_size = dst_ty.abiSize(pt); + const bit_size = dst_ty.bitSize(pt); if (abi_size * 8 <= bit_size or dst_ty.isVector(mod)) break :result dst_mcv; const dst_limbs_len = math.divCeil(i32, @intCast(bit_size), 64) catch unreachable; @@ -15412,7 +15493,8 @@ fn airBitCast(self: *Self, inst: Air.Inst.Index) !void { } fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const slice_ty = self.typeOfIndex(inst); @@ -15421,11 +15503,11 @@ fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void { const array_ty = ptr_ty.childType(mod); const array_len = array_ty.arrayLen(mod); - const frame_index = try self.allocFrameIndex(FrameAlloc.initSpill(slice_ty, mod)); + const frame_index = try self.allocFrameIndex(FrameAlloc.initSpill(slice_ty, pt)); try self.genSetMem(.{ .frame = frame_index }, 0, ptr_ty, ptr, .{}); try self.genSetMem( .{ .frame = frame_index }, - @intCast(ptr_ty.abiSize(mod)), + @intCast(ptr_ty.abiSize(pt)), Type.usize, .{ .immediate = array_len }, .{}, @@ -15436,14 +15518,15 @@ fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void { } fn airFloatFromInt(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const dst_ty = self.typeOfIndex(inst); const dst_bits = dst_ty.floatBits(self.target.*); const src_ty = self.typeOf(ty_op.operand); - const src_bits: u32 = @intCast(src_ty.bitSize(mod)); + const src_bits: u32 = @intCast(src_ty.bitSize(pt)); const src_signedness = if (src_ty.isAbiInt(mod)) src_ty.intInfo(mod).signedness else .unsigned; const src_size = math.divCeil(u32, @max(switch (src_signedness) { @@ -15458,7 +15541,7 @@ fn airFloatFromInt(self: *Self, inst: Air.Inst.Index) !void { else => unreachable, }) { if (src_bits > 128) return self.fail("TODO implement airFloatFromInt from {} to {}", .{ - src_ty.fmt(mod), dst_ty.fmt(mod), + src_ty.fmt(pt), dst_ty.fmt(pt), }); var callee_buf: ["__floatun?i?f".len]u8 = undefined; @@ -15500,7 +15583,7 @@ fn airFloatFromInt(self: *Self, inst: Air.Inst.Index) !void { }, else => null, }) orelse return self.fail("TODO implement airFloatFromInt from {} to {}", .{ - src_ty.fmt(mod), dst_ty.fmt(mod), + src_ty.fmt(pt), dst_ty.fmt(pt), }); const dst_alias = dst_reg.to128(); const src_alias = registerAlias(src_reg, src_size); @@ -15515,11 +15598,12 @@ fn airFloatFromInt(self: *Self, inst: Air.Inst.Index) !void { } fn airIntFromFloat(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const dst_ty = self.typeOfIndex(inst); - const dst_bits: u32 = @intCast(dst_ty.bitSize(mod)); + const dst_bits: u32 = @intCast(dst_ty.bitSize(pt)); const dst_signedness = if (dst_ty.isAbiInt(mod)) dst_ty.intInfo(mod).signedness else .unsigned; const dst_size = math.divCeil(u32, @max(switch (dst_signedness) { @@ -15537,7 +15621,7 @@ fn airIntFromFloat(self: *Self, inst: Air.Inst.Index) !void { else => unreachable, }) { if (dst_bits > 128) return self.fail("TODO implement airIntFromFloat from {} to {}", .{ - src_ty.fmt(mod), dst_ty.fmt(mod), + src_ty.fmt(pt), dst_ty.fmt(pt), }); var callee_buf: ["__fixuns?f?i".len]u8 = undefined; @@ -15586,13 +15670,13 @@ fn airIntFromFloat(self: *Self, inst: Air.Inst.Index) !void { } fn airCmpxchg(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = self.air.extraData(Air.Cmpxchg, ty_pl.payload).data; const ptr_ty = self.typeOf(extra.ptr); const val_ty = self.typeOf(extra.expected_value); - const val_abi_size: u32 = @intCast(val_ty.abiSize(mod)); + const val_abi_size: u32 = @intCast(val_ty.abiSize(pt)); try self.spillRegisters(&.{ .rax, .rdx, .rbx, .rcx }); const regs_lock = self.register_manager.lockRegsAssumeUnused(4, .{ .rax, .rdx, .rbx, .rcx }); @@ -15682,7 +15766,8 @@ fn atomicOp( rmw_op: ?std.builtin.AtomicRmwOp, order: std.builtin.AtomicOrder, ) InnerError!MCValue { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const ptr_lock = switch (ptr_mcv) { .register => |reg| self.register_manager.lockReg(reg), else => null, @@ -15695,7 +15780,7 @@ fn atomicOp( }; defer if (val_lock) |lock| self.register_manager.unlockReg(lock); - const val_abi_size: u32 = @intCast(val_ty.abiSize(mod)); + const val_abi_size: u32 = @intCast(val_ty.abiSize(pt)); const mem_size = Memory.Size.fromSize(val_abi_size); const ptr_mem: Memory = switch (ptr_mcv) { .immediate, .register, .register_offset, .lea_frame => try ptr_mcv.deref().mem(self, mem_size), @@ -15809,7 +15894,7 @@ fn atomicOp( }, else => unreachable, }) orelse return self.fail("TODO implement atomicOp of {s} for {}", .{ - @tagName(op), val_ty.fmt(mod), + @tagName(op), val_ty.fmt(pt), }); try self.genSetReg(sse_reg, val_ty, .{ .register = .rax }, .{}); switch (mir_tag[0]) { @@ -16086,7 +16171,8 @@ fn airAtomicStore(self: *Self, inst: Air.Inst.Index, order: std.builtin.AtomicOr } fn airMemset(self: *Self, inst: Air.Inst.Index, safety: bool) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; result: { @@ -16112,7 +16198,7 @@ fn airMemset(self: *Self, inst: Air.Inst.Index, safety: bool) !void { }; defer if (src_val_lock) |lock| self.register_manager.unlockReg(lock); - const elem_abi_size: u31 = @intCast(elem_ty.abiSize(mod)); + const elem_abi_size: u31 = @intCast(elem_ty.abiSize(pt)); if (elem_abi_size == 1) { const ptr: MCValue = switch (dst_ptr_ty.ptrSize(mod)) { @@ -16185,7 +16271,7 @@ fn airMemset(self: *Self, inst: Air.Inst.Index, safety: bool) !void { self.performReloc(skip_reloc); }, .One => { - const elem_ptr_ty = try mod.singleMutPtrType(elem_ty); + const elem_ptr_ty = try pt.singleMutPtrType(elem_ty); const len = dst_ptr_ty.childType(mod).arrayLen(mod); @@ -16214,7 +16300,8 @@ fn airMemset(self: *Self, inst: Air.Inst.Index, safety: bool) !void { } fn airMemcpy(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; try self.spillRegisters(&.{ .rdi, .rsi, .rcx }); @@ -16246,13 +16333,13 @@ fn airMemcpy(self: *Self, inst: Air.Inst.Index) !void { .{ .i_, .mul }, len_reg, try dst_ptr.address().offset(8).deref().mem(self, .qword), - Immediate.s(@intCast(dst_ptr_ty.childType(mod).abiSize(mod))), + Immediate.s(@intCast(dst_ptr_ty.childType(mod).abiSize(pt))), ); break :len .{ .register = len_reg }; }, .One => len: { const array_ty = dst_ptr_ty.childType(mod); - break :len .{ .immediate = array_ty.arrayLen(mod) * array_ty.childType(mod).abiSize(mod) }; + break :len .{ .immediate = array_ty.arrayLen(mod) * array_ty.childType(mod).abiSize(pt) }; }, .C, .Many => unreachable, }; @@ -16269,7 +16356,8 @@ fn airMemcpy(self: *Self, inst: Air.Inst.Index) !void { } fn airTagName(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; const inst_ty = self.typeOfIndex(inst); const enum_ty = self.typeOf(un_op); @@ -16278,8 +16366,8 @@ fn airTagName(self: *Self, inst: Air.Inst.Index) !void { // We need a properly aligned and sized call frame to be able to call this function. { const needed_call_frame = FrameAlloc.init(.{ - .size = inst_ty.abiSize(mod), - .alignment = inst_ty.abiAlignment(mod), + .size = inst_ty.abiSize(pt), + .alignment = inst_ty.abiAlignment(pt), }); const frame_allocs_slice = self.frame_allocs.slice(); const stack_frame_size = @@ -16311,7 +16399,8 @@ fn airTagName(self: *Self, inst: Air.Inst.Index) !void { } fn airErrorName(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; const err_ty = self.typeOf(un_op); @@ -16413,7 +16502,8 @@ fn airErrorName(self: *Self, inst: Air.Inst.Index) !void { } fn airSplat(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const vector_ty = self.typeOfIndex(inst); const vector_len = vector_ty.vectorLen(mod); @@ -16495,15 +16585,15 @@ fn airSplat(self: *Self, inst: Air.Inst.Index) !void { const src_mcv = try self.resolveInst(ty_op.operand); if (src_mcv.isMemory()) try self.asmRegisterMemory( mir_tag, - registerAlias(dst_reg, @intCast(vector_ty.abiSize(mod))), + registerAlias(dst_reg, @intCast(vector_ty.abiSize(pt))), try src_mcv.mem(self, self.memSize(scalar_ty)), ) else { if (mir_tag[0] == .v_i128) break :avx2; try self.genSetReg(dst_reg, scalar_ty, src_mcv, .{}); try self.asmRegisterRegister( mir_tag, - registerAlias(dst_reg, @intCast(vector_ty.abiSize(mod))), - registerAlias(dst_reg, @intCast(scalar_ty.abiSize(mod))), + registerAlias(dst_reg, @intCast(vector_ty.abiSize(pt))), + registerAlias(dst_reg, @intCast(scalar_ty.abiSize(pt))), ); } break :result .{ .register = dst_reg }; @@ -16515,7 +16605,7 @@ fn airSplat(self: *Self, inst: Air.Inst.Index) !void { try self.genSetReg(dst_reg, scalar_ty, .{ .air_ref = ty_op.operand }, .{}); if (vector_len == 1) break :result .{ .register = dst_reg }; - const dst_alias = registerAlias(dst_reg, @intCast(vector_ty.abiSize(mod))); + const dst_alias = registerAlias(dst_reg, @intCast(vector_ty.abiSize(pt))); const scalar_bits = scalar_ty.intInfo(mod).bits; if (switch (scalar_bits) { 1...8 => true, @@ -16745,20 +16835,21 @@ fn airSplat(self: *Self, inst: Air.Inst.Index) !void { else => unreachable, }, } - return self.fail("TODO implement airSplat for {}", .{vector_ty.fmt(mod)}); + return self.fail("TODO implement airSplat for {}", .{vector_ty.fmt(pt)}); }; return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } fn airSelect(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; const extra = self.air.extraData(Air.Bin, pl_op.payload).data; const ty = self.typeOfIndex(inst); const vec_len = ty.vectorLen(mod); const elem_ty = ty.childType(mod); - const elem_abi_size: u32 = @intCast(elem_ty.abiSize(mod)); - const abi_size: u32 = @intCast(ty.abiSize(mod)); + const elem_abi_size: u32 = @intCast(elem_ty.abiSize(pt)); + const abi_size: u32 = @intCast(ty.abiSize(pt)); const pred_ty = self.typeOf(pl_op.operand); const result = result: { @@ -16878,17 +16969,17 @@ fn airSelect(self: *Self, inst: Air.Inst.Index) !void { else => unreachable, }), ); - } else return self.fail("TODO implement airSelect for {}", .{ty.fmt(mod)}); + } else return self.fail("TODO implement airSelect for {}", .{ty.fmt(pt)}); const elem_bits: u16 = @intCast(elem_abi_size * 8); - const mask_elem_ty = try mod.intType(.unsigned, elem_bits); - const mask_ty = try mod.vectorType(.{ .len = vec_len, .child = mask_elem_ty.toIntern() }); + const mask_elem_ty = try pt.intType(.unsigned, elem_bits); + const mask_ty = try pt.vectorType(.{ .len = vec_len, .child = mask_elem_ty.toIntern() }); if (!pred_fits_in_elem) if (self.hasFeature(.ssse3)) { var mask_elems: [32]InternPool.Index = undefined; - for (mask_elems[0..vec_len], 0..) |*elem, bit| elem.* = try mod.intern(.{ .int = .{ + for (mask_elems[0..vec_len], 0..) |*elem, bit| elem.* = try pt.intern(.{ .int = .{ .ty = mask_elem_ty.toIntern(), .storage = .{ .u64 = bit / elem_bits }, } }); - const mask_mcv = try self.genTypedValue(Value.fromInterned(try mod.intern(.{ .aggregate = .{ + const mask_mcv = try self.genTypedValue(Value.fromInterned(try pt.intern(.{ .aggregate = .{ .ty = mask_ty.toIntern(), .storage = .{ .elems = mask_elems[0..vec_len] }, } }))); @@ -16906,14 +16997,14 @@ fn airSelect(self: *Self, inst: Air.Inst.Index) !void { mask_alias, mask_mem, ); - } else return self.fail("TODO implement airSelect for {}", .{ty.fmt(mod)}); + } else return self.fail("TODO implement airSelect for {}", .{ty.fmt(pt)}); { var mask_elems: [32]InternPool.Index = undefined; - for (mask_elems[0..vec_len], 0..) |*elem, bit| elem.* = try mod.intern(.{ .int = .{ + for (mask_elems[0..vec_len], 0..) |*elem, bit| elem.* = try pt.intern(.{ .int = .{ .ty = mask_elem_ty.toIntern(), .storage = .{ .u64 = @as(u32, 1) << @intCast(bit & (elem_bits - 1)) }, } }); - const mask_mcv = try self.genTypedValue(Value.fromInterned(try mod.intern(.{ .aggregate = .{ + const mask_mcv = try self.genTypedValue(Value.fromInterned(try pt.intern(.{ .aggregate = .{ .ty = mask_ty.toIntern(), .storage = .{ .elems = mask_elems[0..vec_len] }, } }))); @@ -17014,7 +17105,7 @@ fn airSelect(self: *Self, inst: Air.Inst.Index) !void { else => null, }, }, - }) orelse return self.fail("TODO implement airSelect for {}", .{ty.fmt(mod)}); + }) orelse return self.fail("TODO implement airSelect for {}", .{ty.fmt(pt)}); if (has_avx) { const rhs_alias = if (rhs_mcv.isRegister()) registerAlias(rhs_mcv.getReg().?, abi_size) @@ -17061,7 +17152,7 @@ fn airSelect(self: *Self, inst: Air.Inst.Index) !void { 16, 80, 128 => null, else => unreachable, }, - }) orelse return self.fail("TODO implement airSelect for {}", .{ty.fmt(mod)}); + }) orelse return self.fail("TODO implement airSelect for {}", .{ty.fmt(pt)}); try self.asmRegisterRegister(.{ mir_fixes, .@"and" }, dst_alias, mask_alias); if (rhs_mcv.isMemory()) try self.asmRegisterMemory( .{ mir_fixes, .andn }, @@ -17083,18 +17174,19 @@ fn airSelect(self: *Self, inst: Air.Inst.Index) !void { } fn airShuffle(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = self.air.extraData(Air.Shuffle, ty_pl.payload).data; const dst_ty = self.typeOfIndex(inst); const elem_ty = dst_ty.childType(mod); - const elem_abi_size: u16 = @intCast(elem_ty.abiSize(mod)); - const dst_abi_size: u32 = @intCast(dst_ty.abiSize(mod)); + const elem_abi_size: u16 = @intCast(elem_ty.abiSize(pt)); + const dst_abi_size: u32 = @intCast(dst_ty.abiSize(pt)); const lhs_ty = self.typeOf(extra.a); - const lhs_abi_size: u32 = @intCast(lhs_ty.abiSize(mod)); + const lhs_abi_size: u32 = @intCast(lhs_ty.abiSize(pt)); const rhs_ty = self.typeOf(extra.b); - const rhs_abi_size: u32 = @intCast(rhs_ty.abiSize(mod)); + const rhs_abi_size: u32 = @intCast(rhs_ty.abiSize(pt)); const max_abi_size = @max(dst_abi_size, lhs_abi_size, rhs_abi_size); const ExpectedContents = [32]?i32; @@ -17106,11 +17198,11 @@ fn airShuffle(self: *Self, inst: Air.Inst.Index) !void { defer allocator.free(mask_elems); for (mask_elems, 0..) |*mask_elem, elem_index| { const mask_elem_val = - Value.fromInterned(extra.mask).elemValue(mod, elem_index) catch unreachable; + Value.fromInterned(extra.mask).elemValue(pt, elem_index) catch unreachable; mask_elem.* = if (mask_elem_val.isUndef(mod)) null else - @intCast(mask_elem_val.toSignedInt(mod)); + @intCast(mask_elem_val.toSignedInt(pt)); } const has_avx = self.hasFeature(.avx); @@ -17626,8 +17718,8 @@ fn airShuffle(self: *Self, inst: Air.Inst.Index) !void { else self.hasFeature(.avx2)) 32 else 16)) break :blendv; - const select_mask_elem_ty = try mod.intType(.unsigned, elem_abi_size * 8); - const select_mask_ty = try mod.vectorType(.{ + const select_mask_elem_ty = try pt.intType(.unsigned, elem_abi_size * 8); + const select_mask_ty = try pt.vectorType(.{ .len = @intCast(mask_elems.len), .child = select_mask_elem_ty.toIntern(), }); @@ -17643,11 +17735,11 @@ fn airShuffle(self: *Self, inst: Air.Inst.Index) !void { if (mask_elem_index != elem_index) break :blendv; select_mask_elem.* = (if (mask_elem < 0) - try select_mask_elem_ty.maxIntScalar(mod, select_mask_elem_ty) + try select_mask_elem_ty.maxIntScalar(pt, select_mask_elem_ty) else - try select_mask_elem_ty.minIntScalar(mod, select_mask_elem_ty)).toIntern(); + try select_mask_elem_ty.minIntScalar(pt, select_mask_elem_ty)).toIntern(); } - const select_mask_mcv = try self.genTypedValue(Value.fromInterned(try mod.intern(.{ .aggregate = .{ + const select_mask_mcv = try self.genTypedValue(Value.fromInterned(try pt.intern(.{ .aggregate = .{ .ty = select_mask_ty.toIntern(), .storage = .{ .elems = select_mask_elems[0..mask_elems.len] }, } }))); @@ -17783,7 +17875,7 @@ fn airShuffle(self: *Self, inst: Air.Inst.Index) !void { var lhs_mask_elems: [16]InternPool.Index = undefined; for (lhs_mask_elems[0..max_abi_size], 0..) |*lhs_mask_elem, byte_index| { const elem_index = byte_index / elem_abi_size; - lhs_mask_elem.* = try mod.intern(.{ .int = .{ + lhs_mask_elem.* = try pt.intern(.{ .int = .{ .ty = .u8_type, .storage = .{ .u64 = if (elem_index >= mask_elems.len) 0b1_00_00000 else elem: { const mask_elem = mask_elems[elem_index] orelse break :elem 0b1_00_00000; @@ -17794,8 +17886,8 @@ fn airShuffle(self: *Self, inst: Air.Inst.Index) !void { } }, } }); } - const lhs_mask_ty = try mod.vectorType(.{ .len = max_abi_size, .child = .u8_type }); - const lhs_mask_mcv = try self.genTypedValue(Value.fromInterned(try mod.intern(.{ .aggregate = .{ + const lhs_mask_ty = try pt.vectorType(.{ .len = max_abi_size, .child = .u8_type }); + const lhs_mask_mcv = try self.genTypedValue(Value.fromInterned(try pt.intern(.{ .aggregate = .{ .ty = lhs_mask_ty.toIntern(), .storage = .{ .elems = lhs_mask_elems[0..max_abi_size] }, } }))); @@ -17817,7 +17909,7 @@ fn airShuffle(self: *Self, inst: Air.Inst.Index) !void { var rhs_mask_elems: [16]InternPool.Index = undefined; for (rhs_mask_elems[0..max_abi_size], 0..) |*rhs_mask_elem, byte_index| { const elem_index = byte_index / elem_abi_size; - rhs_mask_elem.* = try mod.intern(.{ .int = .{ + rhs_mask_elem.* = try pt.intern(.{ .int = .{ .ty = .u8_type, .storage = .{ .u64 = if (elem_index >= mask_elems.len) 0b1_00_00000 else elem: { const mask_elem = mask_elems[elem_index] orelse break :elem 0b1_00_00000; @@ -17828,8 +17920,8 @@ fn airShuffle(self: *Self, inst: Air.Inst.Index) !void { } }, } }); } - const rhs_mask_ty = try mod.vectorType(.{ .len = max_abi_size, .child = .u8_type }); - const rhs_mask_mcv = try self.genTypedValue(Value.fromInterned(try mod.intern(.{ .aggregate = .{ + const rhs_mask_ty = try pt.vectorType(.{ .len = max_abi_size, .child = .u8_type }); + const rhs_mask_mcv = try self.genTypedValue(Value.fromInterned(try pt.intern(.{ .aggregate = .{ .ty = rhs_mask_ty.toIntern(), .storage = .{ .elems = rhs_mask_elems[0..max_abi_size] }, } }))); @@ -17881,14 +17973,15 @@ fn airShuffle(self: *Self, inst: Air.Inst.Index) !void { break :result null; }) orelse return self.fail("TODO implement airShuffle from {} and {} to {} with {}", .{ - lhs_ty.fmt(mod), rhs_ty.fmt(mod), dst_ty.fmt(mod), - Value.fromInterned(extra.mask).fmtValue(mod, null), + lhs_ty.fmt(pt), rhs_ty.fmt(pt), dst_ty.fmt(pt), + Value.fromInterned(extra.mask).fmtValue(pt, null), }); return self.finishAir(inst, result, .{ extra.a, extra.b, .none }); } fn airReduce(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const reduce = self.air.instructions.items(.data)[@intFromEnum(inst)].reduce; const result: MCValue = result: { @@ -17898,9 +17991,9 @@ fn airReduce(self: *Self, inst: Air.Inst.Index) !void { const operand_mcv = try self.resolveInst(reduce.operand); const mask_len = (math.cast(u6, operand_ty.vectorLen(mod)) orelse - return self.fail("TODO implement airReduce for {}", .{operand_ty.fmt(mod)})); + return self.fail("TODO implement airReduce for {}", .{operand_ty.fmt(pt)})); const mask = (@as(u64, 1) << mask_len) - 1; - const abi_size: u32 = @intCast(operand_ty.abiSize(mod)); + const abi_size: u32 = @intCast(operand_ty.abiSize(pt)); switch (reduce.operation) { .Or => { if (operand_mcv.isMemory()) try self.asmMemoryImmediate( @@ -17936,16 +18029,17 @@ fn airReduce(self: *Self, inst: Air.Inst.Index) !void { try self.asmRegisterRegister(.{ ._, .@"test" }, tmp_reg, tmp_reg); break :result .{ .eflags = .z }; }, - else => return self.fail("TODO implement airReduce for {}", .{operand_ty.fmt(mod)}), + else => return self.fail("TODO implement airReduce for {}", .{operand_ty.fmt(pt)}), } } - return self.fail("TODO implement airReduce for {}", .{operand_ty.fmt(mod)}); + return self.fail("TODO implement airReduce for {}", .{operand_ty.fmt(pt)}); }; return self.finishAir(inst, result, .{ reduce.operand, .none, .none }); } fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const result_ty = self.typeOfIndex(inst); const len: usize = @intCast(result_ty.arrayLen(mod)); const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; @@ -17953,30 +18047,30 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { const result: MCValue = result: { switch (result_ty.zigTypeTag(mod)) { .Struct => { - const frame_index = try self.allocFrameIndex(FrameAlloc.initSpill(result_ty, mod)); + const frame_index = try self.allocFrameIndex(FrameAlloc.initSpill(result_ty, pt)); if (result_ty.containerLayout(mod) == .@"packed") { const struct_obj = mod.typeToStruct(result_ty).?; try self.genInlineMemset( .{ .lea_frame = .{ .index = frame_index } }, .{ .immediate = 0 }, - .{ .immediate = result_ty.abiSize(mod) }, + .{ .immediate = result_ty.abiSize(pt) }, .{}, ); for (elements, 0..) |elem, elem_i_usize| { const elem_i: u32 = @intCast(elem_i_usize); - if ((try result_ty.structFieldValueComptime(mod, elem_i)) != null) continue; + if ((try result_ty.structFieldValueComptime(pt, elem_i)) != null) continue; const elem_ty = result_ty.structFieldType(elem_i, mod); - const elem_bit_size: u32 = @intCast(elem_ty.bitSize(mod)); + const elem_bit_size: u32 = @intCast(elem_ty.bitSize(pt)); if (elem_bit_size > 64) { return self.fail( "TODO airAggregateInit implement packed structs with large fields", .{}, ); } - const elem_abi_size: u32 = @intCast(elem_ty.abiSize(mod)); + const elem_abi_size: u32 = @intCast(elem_ty.abiSize(pt)); const elem_abi_bits = elem_abi_size * 8; - const elem_off = mod.structPackedFieldBitOffset(struct_obj, elem_i); + const elem_off = pt.structPackedFieldBitOffset(struct_obj, elem_i); const elem_byte_off: i32 = @intCast(elem_off / elem_abi_bits * elem_abi_size); const elem_bit_off = elem_off % elem_abi_bits; const elem_mcv = try self.resolveInst(elem); @@ -18046,10 +18140,10 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { } } } else for (elements, 0..) |elem, elem_i| { - if ((try result_ty.structFieldValueComptime(mod, elem_i)) != null) continue; + if ((try result_ty.structFieldValueComptime(pt, elem_i)) != null) continue; const elem_ty = result_ty.structFieldType(elem_i, mod); - const elem_off: i32 = @intCast(result_ty.structFieldOffset(elem_i, mod)); + const elem_off: i32 = @intCast(result_ty.structFieldOffset(elem_i, pt)); const elem_mcv = try self.resolveInst(elem); const mat_elem_mcv = switch (elem_mcv) { .load_tlv => |sym_index| MCValue{ .lea_tlv = sym_index }, @@ -18062,7 +18156,7 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { .Array, .Vector => { const elem_ty = result_ty.childType(mod); if (result_ty.isVector(mod) and elem_ty.toIntern() == .bool_type) { - const result_size: u32 = @intCast(result_ty.abiSize(mod)); + const result_size: u32 = @intCast(result_ty.abiSize(pt)); const dst_reg = try self.register_manager.allocReg(inst, abi.RegisterClass.gp); try self.asmRegisterRegister( .{ ._, .xor }, @@ -18093,8 +18187,8 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { } break :result .{ .register = dst_reg }; } else { - const frame_index = try self.allocFrameIndex(FrameAlloc.initSpill(result_ty, mod)); - const elem_size: u32 = @intCast(elem_ty.abiSize(mod)); + const frame_index = try self.allocFrameIndex(FrameAlloc.initSpill(result_ty, pt)); + const elem_size: u32 = @intCast(elem_ty.abiSize(pt)); for (elements, 0..) |elem, elem_i| { const elem_mcv = try self.resolveInst(elem); @@ -18136,18 +18230,19 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { } fn airUnionInit(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const ip = &mod.intern_pool; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = self.air.extraData(Air.UnionInit, ty_pl.payload).data; const result: MCValue = result: { const union_ty = self.typeOfIndex(inst); - const layout = union_ty.unionGetLayout(mod); + const layout = union_ty.unionGetLayout(pt); const src_ty = self.typeOf(extra.init); const src_mcv = try self.resolveInst(extra.init); if (layout.tag_size == 0) { - if (layout.abi_size <= src_ty.abiSize(mod) and + if (layout.abi_size <= src_ty.abiSize(pt) and self.reuseOperand(inst, extra.init, 0, src_mcv)) break :result src_mcv; const dst_mcv = try self.allocRegOrMem(inst, true); @@ -18161,9 +18256,9 @@ fn airUnionInit(self: *Self, inst: Air.Inst.Index) !void { const field_name = union_obj.loadTagType(ip).names.get(ip)[extra.field_index]; const tag_ty = Type.fromInterned(union_obj.enum_tag_ty); const field_index = tag_ty.enumFieldIndex(field_name, mod).?; - const tag_val = try mod.enumValueFieldIndex(tag_ty, field_index); - const tag_int_val = try tag_val.intFromEnum(tag_ty, mod); - const tag_int = tag_int_val.toUnsignedInt(mod); + const tag_val = try pt.enumValueFieldIndex(tag_ty, field_index); + const tag_int_val = try tag_val.intFromEnum(tag_ty, pt); + const tag_int = tag_int_val.toUnsignedInt(pt); const tag_off: i32 = if (layout.tag_align.compare(.lt, layout.payload_align)) @intCast(layout.payload_size) else @@ -18192,7 +18287,8 @@ fn airPrefetch(self: *Self, inst: Air.Inst.Index) !void { } fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; const extra = self.air.extraData(Air.Bin, pl_op.payload).data; const ty = self.typeOfIndex(inst); @@ -18205,7 +18301,7 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void { else => unreachable, }) { if (ty.zigTypeTag(mod) != .Float) return self.fail("TODO implement airMulAdd for {}", .{ - ty.fmt(mod), + ty.fmt(pt), }); var callee_buf: ["__fma?".len]u8 = undefined; @@ -18334,12 +18430,12 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void { else => unreachable, } else - unreachable) orelse return self.fail("TODO implement airMulAdd for {}", .{ty.fmt(mod)}); + unreachable) orelse return self.fail("TODO implement airMulAdd for {}", .{ty.fmt(pt)}); var mops: [3]MCValue = undefined; for (order, mcvs) |mop_index, mcv| mops[mop_index - 1] = mcv; - const abi_size: u32 = @intCast(ty.abiSize(mod)); + const abi_size: u32 = @intCast(ty.abiSize(pt)); const mop1_reg = registerAlias(mops[0].getReg().?, abi_size); const mop2_reg = registerAlias(mops[1].getReg().?, abi_size); if (mops[2].isRegister()) try self.asmRegisterRegisterRegister( @@ -18359,9 +18455,10 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void { } fn airVaStart(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const va_list_ty = self.air.instructions.items(.data)[@intFromEnum(inst)].ty; - const ptr_anyopaque_ty = try mod.singleMutPtrType(Type.anyopaque); + const ptr_anyopaque_ty = try pt.singleMutPtrType(Type.anyopaque); const result: MCValue = switch (abi.resolveCallingConvention( self.fn_type.fnCallingConvention(mod), @@ -18369,7 +18466,7 @@ fn airVaStart(self: *Self, inst: Air.Inst.Index) !void { )) { .SysV => result: { const info = self.va_info.sysv; - const dst_fi = try self.allocFrameIndex(FrameAlloc.initSpill(va_list_ty, mod)); + const dst_fi = try self.allocFrameIndex(FrameAlloc.initSpill(va_list_ty, pt)); var field_off: u31 = 0; // gp_offset: c_uint, try self.genSetMem( @@ -18379,7 +18476,7 @@ fn airVaStart(self: *Self, inst: Air.Inst.Index) !void { .{ .immediate = info.gp_count * 8 }, .{}, ); - field_off += @intCast(Type.c_uint.abiSize(mod)); + field_off += @intCast(Type.c_uint.abiSize(pt)); // fp_offset: c_uint, try self.genSetMem( .{ .frame = dst_fi }, @@ -18388,7 +18485,7 @@ fn airVaStart(self: *Self, inst: Air.Inst.Index) !void { .{ .immediate = abi.SysV.c_abi_int_param_regs.len * 8 + info.fp_count * 16 }, .{}, ); - field_off += @intCast(Type.c_uint.abiSize(mod)); + field_off += @intCast(Type.c_uint.abiSize(pt)); // overflow_arg_area: *anyopaque, try self.genSetMem( .{ .frame = dst_fi }, @@ -18397,7 +18494,7 @@ fn airVaStart(self: *Self, inst: Air.Inst.Index) !void { .{ .lea_frame = info.overflow_arg_area }, .{}, ); - field_off += @intCast(ptr_anyopaque_ty.abiSize(mod)); + field_off += @intCast(ptr_anyopaque_ty.abiSize(pt)); // reg_save_area: *anyopaque, try self.genSetMem( .{ .frame = dst_fi }, @@ -18406,7 +18503,7 @@ fn airVaStart(self: *Self, inst: Air.Inst.Index) !void { .{ .lea_frame = info.reg_save_area }, .{}, ); - field_off += @intCast(ptr_anyopaque_ty.abiSize(mod)); + field_off += @intCast(ptr_anyopaque_ty.abiSize(pt)); break :result .{ .load_frame = .{ .index = dst_fi } }; }, .Win64 => return self.fail("TODO implement c_va_start for Win64", .{}), @@ -18416,11 +18513,12 @@ fn airVaStart(self: *Self, inst: Air.Inst.Index) !void { } fn airVaArg(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const ty = self.typeOfIndex(inst); const promote_ty = self.promoteVarArg(ty); - const ptr_anyopaque_ty = try mod.singleMutPtrType(Type.anyopaque); + const ptr_anyopaque_ty = try pt.singleMutPtrType(Type.anyopaque); const unused = self.liveness.isUnused(inst); const result: MCValue = switch (abi.resolveCallingConvention( @@ -18454,7 +18552,7 @@ fn airVaArg(self: *Self, inst: Air.Inst.Index) !void { const overflow_arg_area: MCValue = .{ .indirect = .{ .reg = ptr_arg_list_reg, .off = 8 } }; const reg_save_area: MCValue = .{ .indirect = .{ .reg = ptr_arg_list_reg, .off = 16 } }; - const classes = mem.sliceTo(&abi.classifySystemV(promote_ty, mod, self.target.*, .arg), .none); + const classes = mem.sliceTo(&abi.classifySystemV(promote_ty, pt, self.target.*, .arg), .none); switch (classes[0]) { .integer => { assert(classes.len == 1); @@ -18489,7 +18587,7 @@ fn airVaArg(self: *Self, inst: Air.Inst.Index) !void { .base = .{ .reg = addr_reg }, .mod = .{ .rm = .{ .size = .qword, - .disp = @intCast(@max(promote_ty.abiSize(mod), 8)), + .disp = @intCast(@max(promote_ty.abiSize(pt), 8)), } }, }); try self.genCopy( @@ -18537,7 +18635,7 @@ fn airVaArg(self: *Self, inst: Air.Inst.Index) !void { .base = .{ .reg = addr_reg }, .mod = .{ .rm = .{ .size = .qword, - .disp = @intCast(@max(promote_ty.abiSize(mod), 8)), + .disp = @intCast(@max(promote_ty.abiSize(pt), 8)), } }, }); try self.genCopy( @@ -18557,7 +18655,7 @@ fn airVaArg(self: *Self, inst: Air.Inst.Index) !void { unreachable; }, else => return self.fail("TODO implement c_va_arg for {} on SysV", .{ - promote_ty.fmt(mod), + promote_ty.fmt(pt), }), } @@ -18627,11 +18725,11 @@ fn airVaEnd(self: *Self, inst: Air.Inst.Index) !void { } fn resolveInst(self: *Self, ref: Air.Inst.Ref) InnerError!MCValue { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; const ty = self.typeOf(ref); // If the type has no codegen bits, no need to store it. - if (!ty.hasRuntimeBitsIgnoreComptime(mod)) return .none; + if (!ty.hasRuntimeBitsIgnoreComptime(pt)) return .none; const mcv = if (ref.toIndex()) |inst| mcv: { break :mcv self.inst_tracking.getPtr(inst).?.short; @@ -18705,8 +18803,8 @@ fn limitImmediateType(self: *Self, operand: Air.Inst.Ref, comptime T: type) !MCV } fn genTypedValue(self: *Self, val: Value) InnerError!MCValue { - const mod = self.bin_file.comp.module.?; - return switch (try codegen.genTypedValue(self.bin_file, self.src_loc, val, self.owner.getDecl(mod))) { + const pt = self.pt; + return switch (try codegen.genTypedValue(self.bin_file, pt, self.src_loc, val, self.owner.getDecl(pt.zcu))) { .mcv => |mcv| switch (mcv) { .none => .none, .undef => .undef, @@ -18745,7 +18843,8 @@ fn resolveCallingConventionValues( var_args: []const Type, stack_frame_base: FrameIndex, ) !CallMCValues { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const ip = &mod.intern_pool; const cc = fn_info.cc; const param_types = try self.gpa.alloc(Type, fn_info.param_types.len + var_args.len); @@ -18788,7 +18887,7 @@ fn resolveCallingConventionValues( .SysV => {}, .Win64 => { // Align the stack to 16bytes before allocating shadow stack space (if any). - result.stack_byte_count += @intCast(4 * Type.usize.abiSize(mod)); + result.stack_byte_count += @intCast(4 * Type.usize.abiSize(pt)); }, else => unreachable, } @@ -18796,7 +18895,7 @@ fn resolveCallingConventionValues( // Return values if (ret_ty.zigTypeTag(mod) == .NoReturn) { result.return_value = InstTracking.init(.unreach); - } else if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) { + } else if (!ret_ty.hasRuntimeBitsIgnoreComptime(pt)) { // TODO: is this even possible for C calling convention? result.return_value = InstTracking.init(.none); } else { @@ -18804,15 +18903,15 @@ fn resolveCallingConventionValues( var ret_tracking_i: usize = 0; const classes = switch (resolved_cc) { - .SysV => mem.sliceTo(&abi.classifySystemV(ret_ty, mod, self.target.*, .ret), .none), - .Win64 => &.{abi.classifyWindows(ret_ty, mod)}, + .SysV => mem.sliceTo(&abi.classifySystemV(ret_ty, pt, self.target.*, .ret), .none), + .Win64 => &.{abi.classifyWindows(ret_ty, pt)}, else => unreachable, }; for (classes) |class| switch (class) { .integer => { const ret_int_reg = registerAlias( abi.getCAbiIntReturnRegs(resolved_cc)[ret_int_reg_i], - @intCast(@min(ret_ty.abiSize(mod), 8)), + @intCast(@min(ret_ty.abiSize(pt), 8)), ); ret_int_reg_i += 1; @@ -18822,7 +18921,7 @@ fn resolveCallingConventionValues( .sse, .float, .float_combine, .win_i128 => { const ret_sse_reg = registerAlias( abi.getCAbiSseReturnRegs(resolved_cc)[ret_sse_reg_i], - @intCast(ret_ty.abiSize(mod)), + @intCast(ret_ty.abiSize(pt)), ); ret_sse_reg_i += 1; @@ -18865,7 +18964,7 @@ fn resolveCallingConventionValues( // Input params for (param_types, result.args) |ty, *arg| { - assert(ty.hasRuntimeBitsIgnoreComptime(mod)); + assert(ty.hasRuntimeBitsIgnoreComptime(pt)); switch (resolved_cc) { .SysV => {}, .Win64 => { @@ -18879,8 +18978,8 @@ fn resolveCallingConventionValues( var arg_mcv_i: usize = 0; const classes = switch (resolved_cc) { - .SysV => mem.sliceTo(&abi.classifySystemV(ty, mod, self.target.*, .arg), .none), - .Win64 => &.{abi.classifyWindows(ty, mod)}, + .SysV => mem.sliceTo(&abi.classifySystemV(ty, pt, self.target.*, .arg), .none), + .Win64 => &.{abi.classifyWindows(ty, pt)}, else => unreachable, }; for (classes) |class| switch (class) { @@ -18890,7 +18989,7 @@ fn resolveCallingConventionValues( const param_int_reg = registerAlias( abi.getCAbiIntParamRegs(resolved_cc)[param_int_reg_i], - @intCast(@min(ty.abiSize(mod), 8)), + @intCast(@min(ty.abiSize(pt), 8)), ); param_int_reg_i += 1; @@ -18903,7 +19002,7 @@ fn resolveCallingConventionValues( const param_sse_reg = registerAlias( abi.getCAbiSseParamRegs(resolved_cc)[param_sse_reg_i], - @intCast(ty.abiSize(mod)), + @intCast(ty.abiSize(pt)), ); param_sse_reg_i += 1; @@ -18916,7 +19015,7 @@ fn resolveCallingConventionValues( .x87, .x87up, .complex_x87, .memory => break, else => unreachable, }, - .Win64 => if (ty.abiSize(mod) > 8) { + .Win64 => if (ty.abiSize(pt) > 8) { const param_int_reg = abi.getCAbiIntParamRegs(resolved_cc)[param_int_reg_i].to64(); param_int_reg_i += 1; @@ -18938,7 +19037,7 @@ fn resolveCallingConventionValues( const frame_elems_len = ty.vectorLen(mod) - remaining_param_int_regs; const frame_elem_size = mem.alignForward( u64, - ty.childType(mod).abiSize(mod), + ty.childType(mod).abiSize(pt), frame_elem_align, ); const frame_size: u31 = @intCast(frame_elems_len * frame_elem_size); @@ -18962,9 +19061,9 @@ fn resolveCallingConventionValues( continue; } - const param_size: u31 = @intCast(ty.abiSize(mod)); + const param_size: u31 = @intCast(ty.abiSize(pt)); const param_align: u31 = - @intCast(@max(ty.abiAlignment(mod).toByteUnits().?, 8)); + @intCast(@max(ty.abiAlignment(pt).toByteUnits().?, 8)); result.stack_byte_count = mem.alignForward(u31, result.stack_byte_count, param_align); arg.* = .{ .load_frame = .{ @@ -18984,11 +19083,11 @@ fn resolveCallingConventionValues( // Return values if (ret_ty.zigTypeTag(mod) == .NoReturn) { result.return_value = InstTracking.init(.unreach); - } else if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) { + } else if (!ret_ty.hasRuntimeBitsIgnoreComptime(pt)) { result.return_value = InstTracking.init(.none); } else { const ret_reg = abi.getCAbiIntReturnRegs(resolved_cc)[0]; - const ret_ty_size: u31 = @intCast(ret_ty.abiSize(mod)); + const ret_ty_size: u31 = @intCast(ret_ty.abiSize(pt)); if (ret_ty_size <= 8 and !ret_ty.isRuntimeFloat()) { const aliased_reg = registerAlias(ret_reg, ret_ty_size); result.return_value = .{ .short = .{ .register = aliased_reg }, .long = .none }; @@ -19003,12 +19102,12 @@ fn resolveCallingConventionValues( // Input params for (param_types, result.args) |ty, *arg| { - if (!ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (!ty.hasRuntimeBitsIgnoreComptime(pt)) { arg.* = .none; continue; } - const param_size: u31 = @intCast(ty.abiSize(mod)); - const param_align: u31 = @intCast(ty.abiAlignment(mod).toByteUnits().?); + const param_size: u31 = @intCast(ty.abiSize(pt)); + const param_align: u31 = @intCast(ty.abiAlignment(pt).toByteUnits().?); result.stack_byte_count = mem.alignForward(u31, result.stack_byte_count, param_align); arg.* = .{ .load_frame = .{ @@ -19093,47 +19192,49 @@ fn registerAlias(reg: Register, size_bytes: u32) Register { } fn memSize(self: *Self, ty: Type) Memory.Size { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; return switch (ty.zigTypeTag(mod)) { .Float => Memory.Size.fromBitSize(ty.floatBits(self.target.*)), - else => Memory.Size.fromSize(@intCast(ty.abiSize(mod))), + else => Memory.Size.fromSize(@intCast(ty.abiSize(pt))), }; } fn splitType(self: *Self, ty: Type) ![2]Type { - const mod = self.bin_file.comp.module.?; - const classes = mem.sliceTo(&abi.classifySystemV(ty, mod, self.target.*, .other), .none); + const pt = self.pt; + const classes = mem.sliceTo(&abi.classifySystemV(ty, pt, self.target.*, .other), .none); var parts: [2]Type = undefined; if (classes.len == 2) for (&parts, classes, 0..) |*part, class, part_i| { part.* = switch (class) { .integer => switch (part_i) { 0 => Type.u64, 1 => part: { - const elem_size = ty.abiAlignment(mod).minStrict(.@"8").toByteUnits().?; - const elem_ty = try mod.intType(.unsigned, @intCast(elem_size * 8)); - break :part switch (@divExact(ty.abiSize(mod) - 8, elem_size)) { + const elem_size = ty.abiAlignment(pt).minStrict(.@"8").toByteUnits().?; + const elem_ty = try pt.intType(.unsigned, @intCast(elem_size * 8)); + break :part switch (@divExact(ty.abiSize(pt) - 8, elem_size)) { 1 => elem_ty, - else => |len| try mod.arrayType(.{ .len = len, .child = elem_ty.toIntern() }), + else => |len| try pt.arrayType(.{ .len = len, .child = elem_ty.toIntern() }), }; }, else => unreachable, }, .float => Type.f32, - .float_combine => try mod.arrayType(.{ .len = 2, .child = .f32_type }), + .float_combine => try pt.arrayType(.{ .len = 2, .child = .f32_type }), .sse => Type.f64, else => break, }; - } else if (parts[0].abiSize(mod) + parts[1].abiSize(mod) == ty.abiSize(mod)) return parts; - return self.fail("TODO implement splitType for {}", .{ty.fmt(mod)}); + } else if (parts[0].abiSize(pt) + parts[1].abiSize(pt) == ty.abiSize(pt)) return parts; + return self.fail("TODO implement splitType for {}", .{ty.fmt(pt)}); } /// Truncates the value in the register in place. /// Clobbers any remaining bits. fn truncateRegister(self: *Self, ty: Type, reg: Register) !void { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const int_info = if (ty.isAbiInt(mod)) ty.intInfo(mod) else std.builtin.Type.Int{ .signedness = .unsigned, - .bits = @intCast(ty.bitSize(mod)), + .bits = @intCast(ty.bitSize(pt)), }; const shift = math.cast(u6, 64 - int_info.bits % 64) orelse return; try self.spillEflagsIfOccupied(); @@ -19177,8 +19278,9 @@ fn truncateRegister(self: *Self, ty: Type, reg: Register) !void { } fn regBitSize(self: *Self, ty: Type) u64 { - const mod = self.bin_file.comp.module.?; - const abi_size = ty.abiSize(mod); + const pt = self.pt; + const mod = pt.zcu; + const abi_size = ty.abiSize(pt); return switch (ty.zigTypeTag(mod)) { else => switch (abi_size) { 1 => 8, @@ -19196,8 +19298,7 @@ fn regBitSize(self: *Self, ty: Type) u64 { } fn regExtraBits(self: *Self, ty: Type) u64 { - const mod = self.bin_file.comp.module.?; - return self.regBitSize(ty) - ty.bitSize(mod); + return self.regBitSize(ty) - ty.bitSize(self.pt); } fn hasFeature(self: *Self, feature: Target.x86.Feature) bool { @@ -19211,12 +19312,14 @@ fn hasAllFeatures(self: *Self, features: anytype) bool { } fn typeOf(self: *Self, inst: Air.Inst.Ref) Type { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; return self.air.typeOf(inst, &mod.intern_pool); } fn typeOfIndex(self: *Self, inst: Air.Inst.Index) Type { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; return self.air.typeOfIndex(inst, &mod.intern_pool); } @@ -19268,7 +19371,8 @@ fn floatLibcAbiSuffix(ty: Type) []const u8 { } fn promoteInt(self: *Self, ty: Type) Type { - const mod = self.bin_file.comp.module.?; + const pt = self.pt; + const mod = pt.zcu; const int_info: InternPool.Key.IntType = switch (ty.toIntern()) { .bool_type => .{ .signedness = .unsigned, .bits = 1 }, else => if (ty.isAbiInt(mod)) ty.intInfo(mod) else return ty, diff --git a/src/arch/x86_64/Lower.zig b/src/arch/x86_64/Lower.zig index 852d19132d2b..73f4f1d617c5 100644 --- a/src/arch/x86_64/Lower.zig +++ b/src/arch/x86_64/Lower.zig @@ -8,7 +8,7 @@ allocator: Allocator, mir: Mir, cc: std.builtin.CallingConvention, err_msg: ?*ErrorMsg = null, -src_loc: Module.LazySrcLoc, +src_loc: Zcu.LazySrcLoc, result_insts_len: u8 = undefined, result_relocs_len: u8 = undefined, result_insts: [ @@ -657,7 +657,7 @@ const std = @import("std"); const Air = @import("../../Air.zig"); const Allocator = std.mem.Allocator; -const ErrorMsg = Module.ErrorMsg; +const ErrorMsg = Zcu.ErrorMsg; const Immediate = bits.Immediate; const Instruction = encoder.Instruction; const Lower = @This(); @@ -665,8 +665,6 @@ const Memory = Instruction.Memory; const Mir = @import("Mir.zig"); const Mnemonic = Instruction.Mnemonic; const Zcu = @import("../../Zcu.zig"); -/// Deprecated. -const Module = Zcu; const Operand = Instruction.Operand; const Prefix = Instruction.Prefix; const Register = bits.Register; diff --git a/src/arch/x86_64/abi.zig b/src/arch/x86_64/abi.zig index 05c0c9626ca2..6f4bd6f3565a 100644 --- a/src/arch/x86_64/abi.zig +++ b/src/arch/x86_64/abi.zig @@ -44,7 +44,7 @@ pub const Class = enum { } }; -pub fn classifyWindows(ty: Type, zcu: *Zcu) Class { +pub fn classifyWindows(ty: Type, pt: Zcu.PerThread) Class { // https://docs.microsoft.com/en-gb/cpp/build/x64-calling-convention?view=vs-2017 // "There's a strict one-to-one correspondence between a function call's arguments // and the registers used for those arguments. Any argument that doesn't fit in 8 @@ -53,7 +53,7 @@ pub fn classifyWindows(ty: Type, zcu: *Zcu) Class { // "All floating point operations are done using the 16 XMM registers." // "Structs and unions of size 8, 16, 32, or 64 bits, and __m64 types, are passed // as if they were integers of the same size." - switch (ty.zigTypeTag(zcu)) { + switch (ty.zigTypeTag(pt.zcu)) { .Pointer, .Int, .Bool, @@ -68,12 +68,12 @@ pub fn classifyWindows(ty: Type, zcu: *Zcu) Class { .ErrorUnion, .AnyFrame, .Frame, - => switch (ty.abiSize(zcu)) { + => switch (ty.abiSize(pt)) { 0 => unreachable, 1, 2, 4, 8 => return .integer, - else => switch (ty.zigTypeTag(zcu)) { + else => switch (ty.zigTypeTag(pt.zcu)) { .Int => return .win_i128, - .Struct, .Union => if (ty.containerLayout(zcu) == .@"packed") { + .Struct, .Union => if (ty.containerLayout(pt.zcu) == .@"packed") { return .win_i128; } else { return .memory; @@ -100,14 +100,14 @@ pub const Context = enum { ret, arg, field, other }; /// There are a maximum of 8 possible return slots. Returned values are in /// the beginning of the array; unused slots are filled with .none. -pub fn classifySystemV(ty: Type, zcu: *Zcu, target: std.Target, ctx: Context) [8]Class { +pub fn classifySystemV(ty: Type, pt: Zcu.PerThread, target: std.Target, ctx: Context) [8]Class { const memory_class = [_]Class{ .memory, .none, .none, .none, .none, .none, .none, .none, }; var result = [1]Class{.none} ** 8; - switch (ty.zigTypeTag(zcu)) { - .Pointer => switch (ty.ptrSize(zcu)) { + switch (ty.zigTypeTag(pt.zcu)) { + .Pointer => switch (ty.ptrSize(pt.zcu)) { .Slice => { result[0] = .integer; result[1] = .integer; @@ -119,7 +119,7 @@ pub fn classifySystemV(ty: Type, zcu: *Zcu, target: std.Target, ctx: Context) [8 }, }, .Int, .Enum, .ErrorSet => { - const bits = ty.intInfo(zcu).bits; + const bits = ty.intInfo(pt.zcu).bits; if (bits <= 64) { result[0] = .integer; return result; @@ -185,8 +185,8 @@ pub fn classifySystemV(ty: Type, zcu: *Zcu, target: std.Target, ctx: Context) [8 else => unreachable, }, .Vector => { - const elem_ty = ty.childType(zcu); - const bits = elem_ty.bitSize(zcu) * ty.arrayLen(zcu); + const elem_ty = ty.childType(pt.zcu); + const bits = elem_ty.bitSize(pt) * ty.arrayLen(pt.zcu); if (elem_ty.toIntern() == .bool_type) { if (bits <= 32) return .{ .integer, .none, .none, .none, @@ -250,7 +250,7 @@ pub fn classifySystemV(ty: Type, zcu: *Zcu, target: std.Target, ctx: Context) [8 return memory_class; }, .Optional => { - if (ty.isPtrLikeOptional(zcu)) { + if (ty.isPtrLikeOptional(pt.zcu)) { result[0] = .integer; return result; } @@ -261,8 +261,8 @@ pub fn classifySystemV(ty: Type, zcu: *Zcu, target: std.Target, ctx: Context) [8 // it contains unaligned fields, it has class MEMORY" // "If the size of the aggregate exceeds a single eightbyte, each is classified // separately.". - const ty_size = ty.abiSize(zcu); - switch (ty.containerLayout(zcu)) { + const ty_size = ty.abiSize(pt); + switch (ty.containerLayout(pt.zcu)) { .auto, .@"extern" => {}, .@"packed" => { assert(ty_size <= 16); @@ -274,10 +274,10 @@ pub fn classifySystemV(ty: Type, zcu: *Zcu, target: std.Target, ctx: Context) [8 if (ty_size > 64) return memory_class; - _ = if (zcu.typeToStruct(ty)) |loaded_struct| - classifySystemVStruct(&result, 0, loaded_struct, zcu, target) - else if (zcu.typeToUnion(ty)) |loaded_union| - classifySystemVUnion(&result, 0, loaded_union, zcu, target) + _ = if (pt.zcu.typeToStruct(ty)) |loaded_struct| + classifySystemVStruct(&result, 0, loaded_struct, pt, target) + else if (pt.zcu.typeToUnion(ty)) |loaded_union| + classifySystemVUnion(&result, 0, loaded_union, pt, target) else unreachable; @@ -306,7 +306,7 @@ pub fn classifySystemV(ty: Type, zcu: *Zcu, target: std.Target, ctx: Context) [8 return result; }, .Array => { - const ty_size = ty.abiSize(zcu); + const ty_size = ty.abiSize(pt); if (ty_size <= 8) { result[0] = .integer; return result; @@ -326,10 +326,10 @@ fn classifySystemVStruct( result: *[8]Class, starting_byte_offset: u64, loaded_struct: InternPool.LoadedStructType, - zcu: *Zcu, + pt: Zcu.PerThread, target: std.Target, ) u64 { - const ip = &zcu.intern_pool; + const ip = &pt.zcu.intern_pool; var byte_offset = starting_byte_offset; var field_it = loaded_struct.iterateRuntimeOrder(ip); while (field_it.next()) |field_index| { @@ -338,29 +338,29 @@ fn classifySystemVStruct( byte_offset = std.mem.alignForward( u64, byte_offset, - field_align.toByteUnits() orelse field_ty.abiAlignment(zcu).toByteUnits().?, + field_align.toByteUnits() orelse field_ty.abiAlignment(pt).toByteUnits().?, ); - if (zcu.typeToStruct(field_ty)) |field_loaded_struct| { + if (pt.zcu.typeToStruct(field_ty)) |field_loaded_struct| { switch (field_loaded_struct.layout) { .auto, .@"extern" => { - byte_offset = classifySystemVStruct(result, byte_offset, field_loaded_struct, zcu, target); + byte_offset = classifySystemVStruct(result, byte_offset, field_loaded_struct, pt, target); continue; }, .@"packed" => {}, } - } else if (zcu.typeToUnion(field_ty)) |field_loaded_union| { + } else if (pt.zcu.typeToUnion(field_ty)) |field_loaded_union| { switch (field_loaded_union.getLayout(ip)) { .auto, .@"extern" => { - byte_offset = classifySystemVUnion(result, byte_offset, field_loaded_union, zcu, target); + byte_offset = classifySystemVUnion(result, byte_offset, field_loaded_union, pt, target); continue; }, .@"packed" => {}, } } - const field_classes = std.mem.sliceTo(&classifySystemV(field_ty, zcu, target, .field), .none); + const field_classes = std.mem.sliceTo(&classifySystemV(field_ty, pt, target, .field), .none); for (result[@intCast(byte_offset / 8)..][0..field_classes.len], field_classes) |*result_class, field_class| result_class.* = result_class.combineSystemV(field_class); - byte_offset += field_ty.abiSize(zcu); + byte_offset += field_ty.abiSize(pt); } const final_byte_offset = starting_byte_offset + loaded_struct.size(ip).*; std.debug.assert(final_byte_offset == std.mem.alignForward( @@ -375,30 +375,30 @@ fn classifySystemVUnion( result: *[8]Class, starting_byte_offset: u64, loaded_union: InternPool.LoadedUnionType, - zcu: *Zcu, + pt: Zcu.PerThread, target: std.Target, ) u64 { - const ip = &zcu.intern_pool; + const ip = &pt.zcu.intern_pool; for (0..loaded_union.field_types.len) |field_index| { const field_ty = Type.fromInterned(loaded_union.field_types.get(ip)[field_index]); - if (zcu.typeToStruct(field_ty)) |field_loaded_struct| { + if (pt.zcu.typeToStruct(field_ty)) |field_loaded_struct| { switch (field_loaded_struct.layout) { .auto, .@"extern" => { - _ = classifySystemVStruct(result, starting_byte_offset, field_loaded_struct, zcu, target); + _ = classifySystemVStruct(result, starting_byte_offset, field_loaded_struct, pt, target); continue; }, .@"packed" => {}, } - } else if (zcu.typeToUnion(field_ty)) |field_loaded_union| { + } else if (pt.zcu.typeToUnion(field_ty)) |field_loaded_union| { switch (field_loaded_union.getLayout(ip)) { .auto, .@"extern" => { - _ = classifySystemVUnion(result, starting_byte_offset, field_loaded_union, zcu, target); + _ = classifySystemVUnion(result, starting_byte_offset, field_loaded_union, pt, target); continue; }, .@"packed" => {}, } } - const field_classes = std.mem.sliceTo(&classifySystemV(field_ty, zcu, target, .field), .none); + const field_classes = std.mem.sliceTo(&classifySystemV(field_ty, pt, target, .field), .none); for (result[@intCast(starting_byte_offset / 8)..][0..field_classes.len], field_classes) |*result_class, field_class| result_class.* = result_class.combineSystemV(field_class); } diff --git a/src/codegen.zig b/src/codegen.zig index 059b4fa7d490..5fc8ef174f1c 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -13,12 +13,10 @@ const trace = @import("tracy.zig").trace; const Air = @import("Air.zig"); const Allocator = mem.Allocator; const Compilation = @import("Compilation.zig"); -const ErrorMsg = Module.ErrorMsg; +const ErrorMsg = Zcu.ErrorMsg; const InternPool = @import("InternPool.zig"); const Liveness = @import("Liveness.zig"); const Zcu = @import("Zcu.zig"); -/// Deprecated. -const Module = Zcu; const Target = std.Target; const Type = @import("Type.zig"); const Value = @import("Value.zig"); @@ -47,14 +45,15 @@ pub const DebugInfoOutput = union(enum) { pub fn generateFunction( lf: *link.File, - src_loc: Module.LazySrcLoc, + pt: Zcu.PerThread, + src_loc: Zcu.LazySrcLoc, func_index: InternPool.Index, air: Air, liveness: Liveness, code: *std.ArrayList(u8), debug_output: DebugInfoOutput, ) CodeGenError!Result { - const zcu = lf.comp.module.?; + const zcu = pt.zcu; const func = zcu.funcInfo(func_index); const decl = zcu.declPtr(func.owner_decl); const namespace = zcu.namespacePtr(decl.src_namespace); @@ -62,35 +61,36 @@ pub fn generateFunction( switch (target.cpu.arch) { .arm, .armeb, - => return @import("arch/arm/CodeGen.zig").generate(lf, src_loc, func_index, air, liveness, code, debug_output), + => return @import("arch/arm/CodeGen.zig").generate(lf, pt, src_loc, func_index, air, liveness, code, debug_output), .aarch64, .aarch64_be, .aarch64_32, - => return @import("arch/aarch64/CodeGen.zig").generate(lf, src_loc, func_index, air, liveness, code, debug_output), - .riscv64 => return @import("arch/riscv64/CodeGen.zig").generate(lf, src_loc, func_index, air, liveness, code, debug_output), - .sparc64 => return @import("arch/sparc64/CodeGen.zig").generate(lf, src_loc, func_index, air, liveness, code, debug_output), - .x86_64 => return @import("arch/x86_64/CodeGen.zig").generate(lf, src_loc, func_index, air, liveness, code, debug_output), + => return @import("arch/aarch64/CodeGen.zig").generate(lf, pt, src_loc, func_index, air, liveness, code, debug_output), + .riscv64 => return @import("arch/riscv64/CodeGen.zig").generate(lf, pt, src_loc, func_index, air, liveness, code, debug_output), + .sparc64 => return @import("arch/sparc64/CodeGen.zig").generate(lf, pt, src_loc, func_index, air, liveness, code, debug_output), + .x86_64 => return @import("arch/x86_64/CodeGen.zig").generate(lf, pt, src_loc, func_index, air, liveness, code, debug_output), .wasm32, .wasm64, - => return @import("arch/wasm/CodeGen.zig").generate(lf, src_loc, func_index, air, liveness, code, debug_output), + => return @import("arch/wasm/CodeGen.zig").generate(lf, pt, src_loc, func_index, air, liveness, code, debug_output), else => unreachable, } } pub fn generateLazyFunction( lf: *link.File, - src_loc: Module.LazySrcLoc, + pt: Zcu.PerThread, + src_loc: Zcu.LazySrcLoc, lazy_sym: link.File.LazySymbol, code: *std.ArrayList(u8), debug_output: DebugInfoOutput, ) CodeGenError!Result { - const zcu = lf.comp.module.?; + const zcu = pt.zcu; const decl_index = lazy_sym.ty.getOwnerDecl(zcu); const decl = zcu.declPtr(decl_index); const namespace = zcu.namespacePtr(decl.src_namespace); const target = namespace.fileScope(zcu).mod.resolved_target.result; switch (target.cpu.arch) { - .x86_64 => return @import("arch/x86_64/CodeGen.zig").generateLazy(lf, src_loc, lazy_sym, code, debug_output), + .x86_64 => return @import("arch/x86_64/CodeGen.zig").generateLazy(lf, pt, src_loc, lazy_sym, code, debug_output), else => unreachable, } } @@ -105,7 +105,8 @@ fn writeFloat(comptime F: type, f: F, target: Target, endian: std.builtin.Endian pub fn generateLazySymbol( bin_file: *link.File, - src_loc: Module.LazySrcLoc, + pt: Zcu.PerThread, + src_loc: Zcu.LazySrcLoc, lazy_sym: link.File.LazySymbol, // TODO don't use an "out" parameter like this; put it in the result instead alignment: *Alignment, @@ -119,25 +120,24 @@ pub fn generateLazySymbol( defer tracy.end(); const comp = bin_file.comp; - const zcu = comp.module.?; - const ip = &zcu.intern_pool; + const ip = &pt.zcu.intern_pool; const target = comp.root_mod.resolved_target.result; const endian = target.cpu.arch.endian(); const gpa = comp.gpa; log.debug("generateLazySymbol: kind = {s}, ty = {}", .{ @tagName(lazy_sym.kind), - lazy_sym.ty.fmt(zcu), + lazy_sym.ty.fmt(pt), }); if (lazy_sym.kind == .code) { alignment.* = target_util.defaultFunctionAlignment(target); - return generateLazyFunction(bin_file, src_loc, lazy_sym, code, debug_output); + return generateLazyFunction(bin_file, pt, src_loc, lazy_sym, code, debug_output); } - if (lazy_sym.ty.isAnyError(zcu)) { + if (lazy_sym.ty.isAnyError(pt.zcu)) { alignment.* = .@"4"; - const err_names = zcu.global_error_set.keys(); + const err_names = pt.zcu.global_error_set.keys(); mem.writeInt(u32, try code.addManyAsArray(4), @intCast(err_names.len), endian); var offset = code.items.len; try code.resize((1 + err_names.len + 1) * 4); @@ -151,9 +151,9 @@ pub fn generateLazySymbol( } mem.writeInt(u32, code.items[offset..][0..4], @intCast(code.items.len), endian); return Result.ok; - } else if (lazy_sym.ty.zigTypeTag(zcu) == .Enum) { + } else if (lazy_sym.ty.zigTypeTag(pt.zcu) == .Enum) { alignment.* = .@"1"; - const tag_names = lazy_sym.ty.enumFields(zcu); + const tag_names = lazy_sym.ty.enumFields(pt.zcu); for (0..tag_names.len) |tag_index| { const tag_name = tag_names.get(ip)[tag_index].toSlice(ip); try code.ensureUnusedCapacity(tag_name.len + 1); @@ -165,13 +165,14 @@ pub fn generateLazySymbol( gpa, src_loc, "TODO implement generateLazySymbol for {s} {}", - .{ @tagName(lazy_sym.kind), lazy_sym.ty.fmt(zcu) }, + .{ @tagName(lazy_sym.kind), lazy_sym.ty.fmt(pt) }, ) }; } pub fn generateSymbol( bin_file: *link.File, - src_loc: Module.LazySrcLoc, + pt: Zcu.PerThread, + src_loc: Zcu.LazySrcLoc, val: Value, code: *std.ArrayList(u8), debug_output: DebugInfoOutput, @@ -180,17 +181,17 @@ pub fn generateSymbol( const tracy = trace(@src()); defer tracy.end(); - const mod = bin_file.comp.module.?; + const mod = pt.zcu; const ip = &mod.intern_pool; const ty = val.typeOf(mod); const target = mod.getTarget(); const endian = target.cpu.arch.endian(); - log.debug("generateSymbol: val = {}", .{val.fmtValue(mod, null)}); + log.debug("generateSymbol: val = {}", .{val.fmtValue(pt, null)}); if (val.isUndefDeep(mod)) { - const abi_size = math.cast(usize, ty.abiSize(mod)) orelse return error.Overflow; + const abi_size = math.cast(usize, ty.abiSize(pt)) orelse return error.Overflow; try code.appendNTimes(0xaa, abi_size); return .ok; } @@ -236,9 +237,9 @@ pub fn generateSymbol( .empty_enum_value, => unreachable, // non-runtime values .int => { - const abi_size = math.cast(usize, ty.abiSize(mod)) orelse return error.Overflow; + const abi_size = math.cast(usize, ty.abiSize(pt)) orelse return error.Overflow; var space: Value.BigIntSpace = undefined; - const int_val = val.toBigInt(&space, mod); + const int_val = val.toBigInt(&space, pt); int_val.writeTwosComplement(try code.addManyAsSlice(abi_size), endian); }, .err => |err| { @@ -252,14 +253,14 @@ pub fn generateSymbol( .payload => 0, }; - if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) { try code.writer().writeInt(u16, err_val, endian); return .ok; } - const payload_align = payload_ty.abiAlignment(mod); - const error_align = Type.anyerror.abiAlignment(mod); - const abi_align = ty.abiAlignment(mod); + const payload_align = payload_ty.abiAlignment(pt); + const error_align = Type.anyerror.abiAlignment(pt); + const abi_align = ty.abiAlignment(pt); // error value first when its type is larger than the error union's payload if (error_align.order(payload_align) == .gt) { @@ -269,8 +270,8 @@ pub fn generateSymbol( // emit payload part of the error union { const begin = code.items.len; - switch (try generateSymbol(bin_file, src_loc, Value.fromInterned(switch (error_union.val) { - .err_name => try mod.intern(.{ .undef = payload_ty.toIntern() }), + switch (try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(switch (error_union.val) { + .err_name => try pt.intern(.{ .undef = payload_ty.toIntern() }), .payload => |payload| payload, }), code, debug_output, reloc_info)) { .ok => {}, @@ -300,7 +301,7 @@ pub fn generateSymbol( }, .enum_tag => |enum_tag| { const int_tag_ty = ty.intTagType(mod); - switch (try generateSymbol(bin_file, src_loc, try mod.getCoerced(Value.fromInterned(enum_tag.int), int_tag_ty), code, debug_output, reloc_info)) { + switch (try generateSymbol(bin_file, pt, src_loc, try pt.getCoerced(Value.fromInterned(enum_tag.int), int_tag_ty), code, debug_output, reloc_info)) { .ok => {}, .fail => |em| return .{ .fail = em }, } @@ -311,21 +312,21 @@ pub fn generateSymbol( .f64 => |f64_val| writeFloat(f64, f64_val, target, endian, try code.addManyAsArray(8)), .f80 => |f80_val| { writeFloat(f80, f80_val, target, endian, try code.addManyAsArray(10)); - const abi_size = math.cast(usize, ty.abiSize(mod)) orelse return error.Overflow; + const abi_size = math.cast(usize, ty.abiSize(pt)) orelse return error.Overflow; try code.appendNTimes(0, abi_size - 10); }, .f128 => |f128_val| writeFloat(f128, f128_val, target, endian, try code.addManyAsArray(16)), }, - .ptr => switch (try lowerPtr(bin_file, src_loc, val.toIntern(), code, debug_output, reloc_info, 0)) { + .ptr => switch (try lowerPtr(bin_file, pt, src_loc, val.toIntern(), code, debug_output, reloc_info, 0)) { .ok => {}, .fail => |em| return .{ .fail = em }, }, .slice => |slice| { - switch (try generateSymbol(bin_file, src_loc, Value.fromInterned(slice.ptr), code, debug_output, reloc_info)) { + switch (try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(slice.ptr), code, debug_output, reloc_info)) { .ok => {}, .fail => |em| return .{ .fail = em }, } - switch (try generateSymbol(bin_file, src_loc, Value.fromInterned(slice.len), code, debug_output, reloc_info)) { + switch (try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(slice.len), code, debug_output, reloc_info)) { .ok => {}, .fail => |em| return .{ .fail = em }, } @@ -333,11 +334,11 @@ pub fn generateSymbol( .opt => { const payload_type = ty.optionalChild(mod); const payload_val = val.optionalValue(mod); - const abi_size = math.cast(usize, ty.abiSize(mod)) orelse return error.Overflow; + const abi_size = math.cast(usize, ty.abiSize(pt)) orelse return error.Overflow; if (ty.optionalReprIsPayload(mod)) { if (payload_val) |value| { - switch (try generateSymbol(bin_file, src_loc, value, code, debug_output, reloc_info)) { + switch (try generateSymbol(bin_file, pt, src_loc, value, code, debug_output, reloc_info)) { .ok => {}, .fail => |em| return Result{ .fail = em }, } @@ -345,10 +346,12 @@ pub fn generateSymbol( try code.appendNTimes(0, abi_size); } } else { - const padding = abi_size - (math.cast(usize, payload_type.abiSize(mod)) orelse return error.Overflow) - 1; - if (payload_type.hasRuntimeBits(mod)) { - const value = payload_val orelse Value.fromInterned((try mod.intern(.{ .undef = payload_type.toIntern() }))); - switch (try generateSymbol(bin_file, src_loc, value, code, debug_output, reloc_info)) { + const padding = abi_size - (math.cast(usize, payload_type.abiSize(pt)) orelse return error.Overflow) - 1; + if (payload_type.hasRuntimeBits(pt)) { + const value = payload_val orelse Value.fromInterned(try pt.intern(.{ + .undef = payload_type.toIntern(), + })); + switch (try generateSymbol(bin_file, pt, src_loc, value, code, debug_output, reloc_info)) { .ok => {}, .fail => |em| return Result{ .fail = em }, } @@ -363,7 +366,7 @@ pub fn generateSymbol( .elems, .repeated_elem => { var index: u64 = 0; while (index < array_type.lenIncludingSentinel()) : (index += 1) { - switch (try generateSymbol(bin_file, src_loc, Value.fromInterned(switch (aggregate.storage) { + switch (try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(switch (aggregate.storage) { .bytes => unreachable, .elems => |elems| elems[@intCast(index)], .repeated_elem => |elem| if (index < array_type.len) @@ -378,8 +381,7 @@ pub fn generateSymbol( }, }, .vector_type => |vector_type| { - const abi_size = math.cast(usize, ty.abiSize(mod)) orelse - return error.Overflow; + const abi_size = math.cast(usize, ty.abiSize(pt)) orelse return error.Overflow; if (vector_type.child == .bool_type) { const bytes = try code.addManyAsSlice(abi_size); @memset(bytes, 0xaa); @@ -424,7 +426,7 @@ pub fn generateSymbol( .elems, .repeated_elem => { var index: u64 = 0; while (index < vector_type.len) : (index += 1) { - switch (try generateSymbol(bin_file, src_loc, Value.fromInterned(switch (aggregate.storage) { + switch (try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(switch (aggregate.storage) { .bytes => unreachable, .elems => |elems| elems[ math.cast(usize, index) orelse return error.Overflow @@ -439,7 +441,7 @@ pub fn generateSymbol( } const padding = abi_size - - (math.cast(usize, Type.fromInterned(vector_type.child).abiSize(mod) * vector_type.len) orelse + (math.cast(usize, Type.fromInterned(vector_type.child).abiSize(pt) * vector_type.len) orelse return error.Overflow); if (padding > 0) try code.appendNTimes(0, padding); } @@ -452,10 +454,10 @@ pub fn generateSymbol( 0.., ) |field_ty, comptime_val, index| { if (comptime_val != .none) continue; - if (!Type.fromInterned(field_ty).hasRuntimeBits(mod)) continue; + if (!Type.fromInterned(field_ty).hasRuntimeBits(pt)) continue; const field_val = switch (aggregate.storage) { - .bytes => |bytes| try ip.get(mod.gpa, .{ .int = .{ + .bytes => |bytes| try pt.intern(.{ .int = .{ .ty = field_ty, .storage = .{ .u64 = bytes.at(index, ip) }, } }), @@ -463,14 +465,14 @@ pub fn generateSymbol( .repeated_elem => |elem| elem, }; - switch (try generateSymbol(bin_file, src_loc, Value.fromInterned(field_val), code, debug_output, reloc_info)) { + switch (try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(field_val), code, debug_output, reloc_info)) { .ok => {}, .fail => |em| return Result{ .fail = em }, } const unpadded_field_end = code.items.len - struct_begin; // Pad struct members if required - const padded_field_end = ty.structFieldOffset(index + 1, mod); + const padded_field_end = ty.structFieldOffset(index + 1, pt); const padding = math.cast(usize, padded_field_end - unpadded_field_end) orelse return error.Overflow; @@ -483,15 +485,14 @@ pub fn generateSymbol( const struct_type = ip.loadStructType(ty.toIntern()); switch (struct_type.layout) { .@"packed" => { - const abi_size = math.cast(usize, ty.abiSize(mod)) orelse - return error.Overflow; + const abi_size = math.cast(usize, ty.abiSize(pt)) orelse return error.Overflow; const current_pos = code.items.len; try code.appendNTimes(0, abi_size); var bits: u16 = 0; for (struct_type.field_types.get(ip), 0..) |field_ty, index| { const field_val = switch (aggregate.storage) { - .bytes => |bytes| try ip.get(mod.gpa, .{ .int = .{ + .bytes => |bytes| try pt.intern(.{ .int = .{ .ty = field_ty, .storage = .{ .u64 = bytes.at(index, ip) }, } }), @@ -502,18 +503,18 @@ pub fn generateSymbol( // pointer may point to a decl which must be marked used // but can also result in a relocation. Therefore we handle those separately. if (Type.fromInterned(field_ty).zigTypeTag(mod) == .Pointer) { - const field_size = math.cast(usize, Type.fromInterned(field_ty).abiSize(mod)) orelse + const field_size = math.cast(usize, Type.fromInterned(field_ty).abiSize(pt)) orelse return error.Overflow; var tmp_list = try std.ArrayList(u8).initCapacity(code.allocator, field_size); defer tmp_list.deinit(); - switch (try generateSymbol(bin_file, src_loc, Value.fromInterned(field_val), &tmp_list, debug_output, reloc_info)) { + switch (try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(field_val), &tmp_list, debug_output, reloc_info)) { .ok => @memcpy(code.items[current_pos..][0..tmp_list.items.len], tmp_list.items), .fail => |em| return Result{ .fail = em }, } } else { - Value.fromInterned(field_val).writeToPackedMemory(Type.fromInterned(field_ty), mod, code.items[current_pos..], bits) catch unreachable; + Value.fromInterned(field_val).writeToPackedMemory(Type.fromInterned(field_ty), pt, code.items[current_pos..], bits) catch unreachable; } - bits += @intCast(Type.fromInterned(field_ty).bitSize(mod)); + bits += @intCast(Type.fromInterned(field_ty).bitSize(pt)); } }, .auto, .@"extern" => { @@ -524,10 +525,10 @@ pub fn generateSymbol( var it = struct_type.iterateRuntimeOrder(ip); while (it.next()) |field_index| { const field_ty = field_types[field_index]; - if (!Type.fromInterned(field_ty).hasRuntimeBits(mod)) continue; + if (!Type.fromInterned(field_ty).hasRuntimeBits(pt)) continue; const field_val = switch (ip.indexToKey(val.toIntern()).aggregate.storage) { - .bytes => |bytes| try ip.get(mod.gpa, .{ .int = .{ + .bytes => |bytes| try pt.intern(.{ .int = .{ .ty = field_ty, .storage = .{ .u64 = bytes.at(field_index, ip) }, } }), @@ -541,7 +542,7 @@ pub fn generateSymbol( ) orelse return error.Overflow; if (padding > 0) try code.appendNTimes(0, padding); - switch (try generateSymbol(bin_file, src_loc, Value.fromInterned(field_val), code, debug_output, reloc_info)) { + switch (try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(field_val), code, debug_output, reloc_info)) { .ok => {}, .fail => |em| return Result{ .fail = em }, } @@ -562,15 +563,15 @@ pub fn generateSymbol( else => unreachable, }, .un => |un| { - const layout = ty.unionGetLayout(mod); + const layout = ty.unionGetLayout(pt); if (layout.payload_size == 0) { - return generateSymbol(bin_file, src_loc, Value.fromInterned(un.tag), code, debug_output, reloc_info); + return generateSymbol(bin_file, pt, src_loc, Value.fromInterned(un.tag), code, debug_output, reloc_info); } // Check if we should store the tag first. if (layout.tag_size > 0 and layout.tag_align.compare(.gte, layout.payload_align)) { - switch (try generateSymbol(bin_file, src_loc, Value.fromInterned(un.tag), code, debug_output, reloc_info)) { + switch (try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(un.tag), code, debug_output, reloc_info)) { .ok => {}, .fail => |em| return Result{ .fail = em }, } @@ -580,28 +581,28 @@ pub fn generateSymbol( if (un.tag != .none) { const field_index = ty.unionTagFieldIndex(Value.fromInterned(un.tag), mod).?; const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]); - if (!field_ty.hasRuntimeBits(mod)) { + if (!field_ty.hasRuntimeBits(pt)) { try code.appendNTimes(0xaa, math.cast(usize, layout.payload_size) orelse return error.Overflow); } else { - switch (try generateSymbol(bin_file, src_loc, Value.fromInterned(un.val), code, debug_output, reloc_info)) { + switch (try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(un.val), code, debug_output, reloc_info)) { .ok => {}, .fail => |em| return Result{ .fail = em }, } - const padding = math.cast(usize, layout.payload_size - field_ty.abiSize(mod)) orelse return error.Overflow; + const padding = math.cast(usize, layout.payload_size - field_ty.abiSize(pt)) orelse return error.Overflow; if (padding > 0) { try code.appendNTimes(0, padding); } } } else { - switch (try generateSymbol(bin_file, src_loc, Value.fromInterned(un.val), code, debug_output, reloc_info)) { + switch (try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(un.val), code, debug_output, reloc_info)) { .ok => {}, .fail => |em| return Result{ .fail = em }, } } if (layout.tag_size > 0 and layout.tag_align.compare(.lt, layout.payload_align)) { - switch (try generateSymbol(bin_file, src_loc, Value.fromInterned(un.tag), code, debug_output, reloc_info)) { + switch (try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(un.tag), code, debug_output, reloc_info)) { .ok => {}, .fail => |em| return Result{ .fail = em }, } @@ -618,22 +619,24 @@ pub fn generateSymbol( fn lowerPtr( bin_file: *link.File, - src_loc: Module.LazySrcLoc, + pt: Zcu.PerThread, + src_loc: Zcu.LazySrcLoc, ptr_val: InternPool.Index, code: *std.ArrayList(u8), debug_output: DebugInfoOutput, reloc_info: RelocInfo, prev_offset: u64, ) CodeGenError!Result { - const zcu = bin_file.comp.module.?; + const zcu = pt.zcu; const ptr = zcu.intern_pool.indexToKey(ptr_val).ptr; const offset: u64 = prev_offset + ptr.byte_offset; return switch (ptr.base_addr) { - .decl => |decl| try lowerDeclRef(bin_file, src_loc, decl, code, debug_output, reloc_info, offset), - .anon_decl => |ad| try lowerAnonDeclRef(bin_file, src_loc, ad, code, debug_output, reloc_info, offset), - .int => try generateSymbol(bin_file, src_loc, try zcu.intValue(Type.usize, offset), code, debug_output, reloc_info), + .decl => |decl| try lowerDeclRef(bin_file, pt, src_loc, decl, code, debug_output, reloc_info, offset), + .anon_decl => |ad| try lowerAnonDeclRef(bin_file, pt, src_loc, ad, code, debug_output, reloc_info, offset), + .int => try generateSymbol(bin_file, pt, src_loc, try pt.intValue(Type.usize, offset), code, debug_output, reloc_info), .eu_payload => |eu_ptr| try lowerPtr( bin_file, + pt, src_loc, eu_ptr, code, @@ -641,11 +644,12 @@ fn lowerPtr( reloc_info, offset + errUnionPayloadOffset( Value.fromInterned(eu_ptr).typeOf(zcu).childType(zcu).errorUnionPayload(zcu), - zcu, + pt, ), ), .opt_payload => |opt_ptr| try lowerPtr( bin_file, + pt, src_loc, opt_ptr, code, @@ -666,12 +670,12 @@ fn lowerPtr( }; }, .Struct, .Union => switch (base_ty.containerLayout(zcu)) { - .auto => base_ty.structFieldOffset(@intCast(field.index), zcu), + .auto => base_ty.structFieldOffset(@intCast(field.index), pt), .@"extern", .@"packed" => unreachable, }, else => unreachable, }; - return lowerPtr(bin_file, src_loc, field.base, code, debug_output, reloc_info, offset + field_off); + return lowerPtr(bin_file, pt, src_loc, field.base, code, debug_output, reloc_info, offset + field_off); }, .arr_elem, .comptime_field, .comptime_alloc => unreachable, }; @@ -683,7 +687,8 @@ const RelocInfo = struct { fn lowerAnonDeclRef( lf: *link.File, - src_loc: Module.LazySrcLoc, + pt: Zcu.PerThread, + src_loc: Zcu.LazySrcLoc, anon_decl: InternPool.Key.Ptr.BaseAddr.AnonDecl, code: *std.ArrayList(u8), debug_output: DebugInfoOutput, @@ -691,22 +696,21 @@ fn lowerAnonDeclRef( offset: u64, ) CodeGenError!Result { _ = debug_output; - const zcu = lf.comp.module.?; - const ip = &zcu.intern_pool; + const ip = &pt.zcu.intern_pool; const target = lf.comp.root_mod.resolved_target.result; const ptr_width_bytes = @divExact(target.ptrBitWidth(), 8); const decl_val = anon_decl.val; const decl_ty = Type.fromInterned(ip.typeOf(decl_val)); - log.debug("lowerAnonDecl: ty = {}", .{decl_ty.fmt(zcu)}); - const is_fn_body = decl_ty.zigTypeTag(zcu) == .Fn; - if (!is_fn_body and !decl_ty.hasRuntimeBits(zcu)) { + log.debug("lowerAnonDecl: ty = {}", .{decl_ty.fmt(pt)}); + const is_fn_body = decl_ty.zigTypeTag(pt.zcu) == .Fn; + if (!is_fn_body and !decl_ty.hasRuntimeBits(pt)) { try code.appendNTimes(0xaa, ptr_width_bytes); return Result.ok; } const decl_align = ip.indexToKey(anon_decl.orig_ty).ptr_type.flags.alignment; - const res = try lf.lowerAnonDecl(decl_val, decl_align, src_loc); + const res = try lf.lowerAnonDecl(pt, decl_val, decl_align, src_loc); switch (res) { .ok => {}, .fail => |em| return .{ .fail = em }, @@ -730,7 +734,8 @@ fn lowerAnonDeclRef( fn lowerDeclRef( lf: *link.File, - src_loc: Module.LazySrcLoc, + pt: Zcu.PerThread, + src_loc: Zcu.LazySrcLoc, decl_index: InternPool.DeclIndex, code: *std.ArrayList(u8), debug_output: DebugInfoOutput, @@ -739,14 +744,14 @@ fn lowerDeclRef( ) CodeGenError!Result { _ = src_loc; _ = debug_output; - const zcu = lf.comp.module.?; + const zcu = pt.zcu; const decl = zcu.declPtr(decl_index); const namespace = zcu.namespacePtr(decl.src_namespace); const target = namespace.fileScope(zcu).mod.resolved_target.result; const ptr_width = target.ptrBitWidth(); const is_fn_body = decl.typeOf(zcu).zigTypeTag(zcu) == .Fn; - if (!is_fn_body and !decl.typeOf(zcu).hasRuntimeBits(zcu)) { + if (!is_fn_body and !decl.typeOf(zcu).hasRuntimeBits(pt)) { try code.appendNTimes(0xaa, @divExact(ptr_width, 8)); return Result.ok; } @@ -814,7 +819,7 @@ pub const GenResult = union(enum) { fn fail( gpa: Allocator, - src_loc: Module.LazySrcLoc, + src_loc: Zcu.LazySrcLoc, comptime format: []const u8, args: anytype, ) Allocator.Error!GenResult { @@ -825,14 +830,15 @@ pub const GenResult = union(enum) { fn genDeclRef( lf: *link.File, - src_loc: Module.LazySrcLoc, + pt: Zcu.PerThread, + src_loc: Zcu.LazySrcLoc, val: Value, ptr_decl_index: InternPool.DeclIndex, ) CodeGenError!GenResult { - const zcu = lf.comp.module.?; + const zcu = pt.zcu; const ip = &zcu.intern_pool; const ty = val.typeOf(zcu); - log.debug("genDeclRef: val = {}", .{val.fmtValue(zcu, null)}); + log.debug("genDeclRef: val = {}", .{val.fmtValue(pt, null)}); const ptr_decl = zcu.declPtr(ptr_decl_index); const namespace = zcu.namespacePtr(ptr_decl.src_namespace); @@ -848,7 +854,7 @@ fn genDeclRef( }; const decl = zcu.declPtr(decl_index); - if (!decl.typeOf(zcu).isFnOrHasRuntimeBitsIgnoreComptime(zcu)) { + if (!decl.typeOf(zcu).isFnOrHasRuntimeBitsIgnoreComptime(pt)) { const imm: u64 = switch (ptr_bytes) { 1 => 0xaa, 2 => 0xaaaa, @@ -865,12 +871,12 @@ fn genDeclRef( // TODO this feels clunky. Perhaps we should check for it in `genTypedValue`? if (ty.castPtrToFn(zcu)) |fn_ty| { if (zcu.typeToFunc(fn_ty).?.is_generic) { - return GenResult.mcv(.{ .immediate = fn_ty.abiAlignment(zcu).toByteUnits().? }); + return GenResult.mcv(.{ .immediate = fn_ty.abiAlignment(pt).toByteUnits().? }); } } else if (ty.zigTypeTag(zcu) == .Pointer) { const elem_ty = ty.elemType2(zcu); - if (!elem_ty.hasRuntimeBits(zcu)) { - return GenResult.mcv(.{ .immediate = elem_ty.abiAlignment(zcu).toByteUnits().? }); + if (!elem_ty.hasRuntimeBits(pt)) { + return GenResult.mcv(.{ .immediate = elem_ty.abiAlignment(pt).toByteUnits().? }); } } @@ -931,15 +937,15 @@ fn genDeclRef( fn genUnnamedConst( lf: *link.File, - src_loc: Module.LazySrcLoc, + pt: Zcu.PerThread, + src_loc: Zcu.LazySrcLoc, val: Value, owner_decl_index: InternPool.DeclIndex, ) CodeGenError!GenResult { - const zcu = lf.comp.module.?; const gpa = lf.comp.gpa; - log.debug("genUnnamedConst: val = {}", .{val.fmtValue(zcu, null)}); + log.debug("genUnnamedConst: val = {}", .{val.fmtValue(pt, null)}); - const local_sym_index = lf.lowerUnnamedConst(val, owner_decl_index) catch |err| { + const local_sym_index = lf.lowerUnnamedConst(pt, val, owner_decl_index) catch |err| { return GenResult.fail(gpa, src_loc, "lowering unnamed constant failed: {s}", .{@errorName(err)}); }; switch (lf.tag) { @@ -970,15 +976,16 @@ fn genUnnamedConst( pub fn genTypedValue( lf: *link.File, - src_loc: Module.LazySrcLoc, + pt: Zcu.PerThread, + src_loc: Zcu.LazySrcLoc, val: Value, owner_decl_index: InternPool.DeclIndex, ) CodeGenError!GenResult { - const zcu = lf.comp.module.?; + const zcu = pt.zcu; const ip = &zcu.intern_pool; const ty = val.typeOf(zcu); - log.debug("genTypedValue: val = {}", .{val.fmtValue(zcu, null)}); + log.debug("genTypedValue: val = {}", .{val.fmtValue(pt, null)}); if (val.isUndef(zcu)) return GenResult.mcv(.undef); @@ -990,7 +997,7 @@ pub fn genTypedValue( if (!ty.isSlice(zcu)) switch (ip.indexToKey(val.toIntern())) { .ptr => |ptr| if (ptr.byte_offset == 0) switch (ptr.base_addr) { - .decl => |decl| return genDeclRef(lf, src_loc, val, decl), + .decl => |decl| return genDeclRef(lf, pt, src_loc, val, decl), else => {}, }, else => {}, @@ -1007,7 +1014,7 @@ pub fn genTypedValue( .none => {}, else => switch (ip.indexToKey(val.toIntern())) { .int => { - return GenResult.mcv(.{ .immediate = val.toUnsignedInt(zcu) }); + return GenResult.mcv(.{ .immediate = val.toUnsignedInt(pt) }); }, else => {}, }, @@ -1017,8 +1024,8 @@ pub fn genTypedValue( const info = ty.intInfo(zcu); if (info.bits <= ptr_bits) { const unsigned: u64 = switch (info.signedness) { - .signed => @bitCast(val.toSignedInt(zcu)), - .unsigned => val.toUnsignedInt(zcu), + .signed => @bitCast(val.toSignedInt(pt)), + .unsigned => val.toUnsignedInt(pt), }; return GenResult.mcv(.{ .immediate = unsigned }); } @@ -1030,11 +1037,12 @@ pub fn genTypedValue( if (ty.isPtrLikeOptional(zcu)) { return genTypedValue( lf, + pt, src_loc, val.optionalValue(zcu) orelse return GenResult.mcv(.{ .immediate = 0 }), owner_decl_index, ); - } else if (ty.abiSize(zcu) == 1) { + } else if (ty.abiSize(pt) == 1) { return GenResult.mcv(.{ .immediate = @intFromBool(!val.isNull(zcu)) }); } }, @@ -1042,6 +1050,7 @@ pub fn genTypedValue( const enum_tag = ip.indexToKey(val.toIntern()).enum_tag; return genTypedValue( lf, + pt, src_loc, Value.fromInterned(enum_tag.int), owner_decl_index, @@ -1055,14 +1064,15 @@ pub fn genTypedValue( .ErrorUnion => { const err_type = ty.errorUnionSet(zcu); const payload_type = ty.errorUnionPayload(zcu); - if (!payload_type.hasRuntimeBitsIgnoreComptime(zcu)) { + if (!payload_type.hasRuntimeBitsIgnoreComptime(pt)) { // We use the error type directly as the type. - const err_int_ty = try zcu.errorIntType(); + const err_int_ty = try pt.errorIntType(); switch (ip.indexToKey(val.toIntern()).error_union.val) { .err_name => |err_name| return genTypedValue( lf, + pt, src_loc, - Value.fromInterned(try zcu.intern(.{ .err = .{ + Value.fromInterned(try pt.intern(.{ .err = .{ .ty = err_type.toIntern(), .name = err_name, } })), @@ -1070,8 +1080,9 @@ pub fn genTypedValue( ), .payload => return genTypedValue( lf, + pt, src_loc, - try zcu.intValue(err_int_ty, 0), + try pt.intValue(err_int_ty, 0), owner_decl_index, ), } @@ -1090,26 +1101,26 @@ pub fn genTypedValue( else => {}, } - return genUnnamedConst(lf, src_loc, val, owner_decl_index); + return genUnnamedConst(lf, pt, src_loc, val, owner_decl_index); } -pub fn errUnionPayloadOffset(payload_ty: Type, mod: *Module) u64 { - if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) return 0; - const payload_align = payload_ty.abiAlignment(mod); - const error_align = Type.anyerror.abiAlignment(mod); - if (payload_align.compare(.gte, error_align) or !payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { +pub fn errUnionPayloadOffset(payload_ty: Type, pt: Zcu.PerThread) u64 { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) return 0; + const payload_align = payload_ty.abiAlignment(pt); + const error_align = Type.anyerror.abiAlignment(pt); + if (payload_align.compare(.gte, error_align) or !payload_ty.hasRuntimeBitsIgnoreComptime(pt)) { return 0; } else { - return payload_align.forward(Type.anyerror.abiSize(mod)); + return payload_align.forward(Type.anyerror.abiSize(pt)); } } -pub fn errUnionErrorOffset(payload_ty: Type, mod: *Module) u64 { - if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) return 0; - const payload_align = payload_ty.abiAlignment(mod); - const error_align = Type.anyerror.abiAlignment(mod); - if (payload_align.compare(.gte, error_align) and payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { - return error_align.forward(payload_ty.abiSize(mod)); +pub fn errUnionErrorOffset(payload_ty: Type, pt: Zcu.PerThread) u64 { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) return 0; + const payload_align = payload_ty.abiAlignment(pt); + const error_align = Type.anyerror.abiAlignment(pt); + if (payload_align.compare(.gte, error_align) and payload_ty.hasRuntimeBitsIgnoreComptime(pt)) { + return error_align.forward(payload_ty.abiSize(pt)); } else { return 0; } diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 2fd3d2b164d4..2fa8a98cbb64 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -333,15 +333,15 @@ pub const Function = struct { const gop = try f.value_map.getOrPut(ref); if (gop.found_existing) return gop.value_ptr.*; - const zcu = f.object.dg.zcu; - const val = (try f.air.value(ref, zcu)).?; + const pt = f.object.dg.pt; + const val = (try f.air.value(ref, pt)).?; const ty = f.typeOf(ref); - const result: CValue = if (lowersToArray(ty, zcu)) result: { + const result: CValue = if (lowersToArray(ty, pt)) result: { const writer = f.object.codeHeaderWriter(); const decl_c_value = try f.allocLocalValue(.{ .ctype = try f.ctypeFromType(ty, .complete), - .alignas = CType.AlignAs.fromAbiAlignment(ty.abiAlignment(zcu)), + .alignas = CType.AlignAs.fromAbiAlignment(ty.abiAlignment(pt)), }); const gpa = f.object.dg.gpa; try f.allocs.put(gpa, decl_c_value.new_local, false); @@ -358,7 +358,7 @@ pub const Function = struct { } fn wantSafety(f: *Function) bool { - return switch (f.object.dg.zcu.optimizeMode()) { + return switch (f.object.dg.pt.zcu.optimizeMode()) { .Debug, .ReleaseSafe => true, .ReleaseFast, .ReleaseSmall => false, }; @@ -379,7 +379,7 @@ pub const Function = struct { fn allocLocal(f: *Function, inst: ?Air.Inst.Index, ty: Type) !CValue { return f.allocAlignedLocal(inst, .{ .ctype = try f.ctypeFromType(ty, .complete), - .alignas = CType.AlignAs.fromAbiAlignment(ty.abiAlignment(f.object.dg.zcu)), + .alignas = CType.AlignAs.fromAbiAlignment(ty.abiAlignment(f.object.dg.pt)), }); } @@ -500,7 +500,8 @@ pub const Function = struct { fn getLazyFnName(f: *Function, key: LazyFnKey, data: LazyFnValue.Data) ![]const u8 { const gpa = f.object.dg.gpa; - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; const ctype_pool = &f.object.dg.ctype_pool; const gop = try f.lazy_fns.getOrPut(gpa, key); @@ -539,13 +540,11 @@ pub const Function = struct { } fn typeOf(f: *Function, inst: Air.Inst.Ref) Type { - const zcu = f.object.dg.zcu; - return f.air.typeOf(inst, &zcu.intern_pool); + return f.air.typeOf(inst, &f.object.dg.pt.zcu.intern_pool); } fn typeOfIndex(f: *Function, inst: Air.Inst.Index) Type { - const zcu = f.object.dg.zcu; - return f.air.typeOfIndex(inst, &zcu.intern_pool); + return f.air.typeOfIndex(inst, &f.object.dg.pt.zcu.intern_pool); } fn copyCValue(f: *Function, ctype: CType, dst: CValue, src: CValue) !void { @@ -608,7 +607,7 @@ pub const Object = struct { /// This data is available both when outputting .c code and when outputting an .h file. pub const DeclGen = struct { gpa: mem.Allocator, - zcu: *Zcu, + pt: Zcu.PerThread, mod: *Module, pass: Pass, is_naked_fn: bool, @@ -634,7 +633,7 @@ pub const DeclGen = struct { fn fail(dg: *DeclGen, comptime format: []const u8, args: anytype) error{ AnalysisFail, OutOfMemory } { @setCold(true); - const zcu = dg.zcu; + const zcu = dg.pt.zcu; const decl_index = dg.pass.decl; const decl = zcu.declPtr(decl_index); const src_loc = decl.navSrcLoc(zcu); @@ -648,7 +647,8 @@ pub const DeclGen = struct { anon_decl: InternPool.Key.Ptr.BaseAddr.AnonDecl, location: ValueRenderLocation, ) error{ OutOfMemory, AnalysisFail }!void { - const zcu = dg.zcu; + const pt = dg.pt; + const zcu = pt.zcu; const ip = &zcu.intern_pool; const ctype_pool = &dg.ctype_pool; const decl_val = Value.fromInterned(anon_decl.val); @@ -656,7 +656,7 @@ pub const DeclGen = struct { // Render an undefined pointer if we have a pointer to a zero-bit or comptime type. const ptr_ty = Type.fromInterned(anon_decl.orig_ty); - if (ptr_ty.isPtrAtRuntime(zcu) and !decl_ty.isFnOrHasRuntimeBits(zcu)) { + if (ptr_ty.isPtrAtRuntime(zcu) and !decl_ty.isFnOrHasRuntimeBits(pt)) { return dg.writeCValue(writer, .{ .undef = ptr_ty }); } @@ -696,7 +696,7 @@ pub const DeclGen = struct { // alignment. If there is already an entry, keep the greater alignment. const explicit_alignment = ptr_type.flags.alignment; if (explicit_alignment != .none) { - const abi_alignment = Type.fromInterned(ptr_type.child).abiAlignment(zcu); + const abi_alignment = Type.fromInterned(ptr_type.child).abiAlignment(pt); if (explicit_alignment.order(abi_alignment).compare(.gt)) { const aligned_gop = try dg.aligned_anon_decls.getOrPut(dg.gpa, anon_decl.val); aligned_gop.value_ptr.* = if (aligned_gop.found_existing) @@ -713,15 +713,16 @@ pub const DeclGen = struct { decl_index: InternPool.DeclIndex, location: ValueRenderLocation, ) error{ OutOfMemory, AnalysisFail }!void { - const zcu = dg.zcu; + const pt = dg.pt; + const zcu = pt.zcu; const ctype_pool = &dg.ctype_pool; const decl = zcu.declPtr(decl_index); assert(decl.has_tv); // Render an undefined pointer if we have a pointer to a zero-bit or comptime type. const decl_ty = decl.typeOf(zcu); - const ptr_ty = try decl.declPtrType(zcu); - if (!decl_ty.isFnOrHasRuntimeBits(zcu)) { + const ptr_ty = try decl.declPtrType(pt); + if (!decl_ty.isFnOrHasRuntimeBits(pt)) { return dg.writeCValue(writer, .{ .undef = ptr_ty }); } @@ -756,12 +757,13 @@ pub const DeclGen = struct { derivation: Value.PointerDeriveStep, location: ValueRenderLocation, ) error{ OutOfMemory, AnalysisFail }!void { - const zcu = dg.zcu; + const pt = dg.pt; + const zcu = pt.zcu; switch (derivation) { .comptime_alloc_ptr, .comptime_field_ptr => unreachable, .int => |int| { const ptr_ctype = try dg.ctypeFromType(int.ptr_ty, .complete); - const addr_val = try zcu.intValue(Type.usize, int.addr); + const addr_val = try pt.intValue(Type.usize, int.addr); try writer.writeByte('('); try dg.renderCType(writer, ptr_ctype); try writer.print("){x}", .{try dg.fmtIntLiteral(addr_val, .Other)}); @@ -777,12 +779,12 @@ pub const DeclGen = struct { }, .field_ptr => |field| { - const parent_ptr_ty = try field.parent.ptrType(zcu); + const parent_ptr_ty = try field.parent.ptrType(pt); // Ensure complete type definition is available before accessing fields. _ = try dg.ctypeFromType(parent_ptr_ty.childType(zcu), .complete); - switch (fieldLocation(parent_ptr_ty, field.result_ptr_ty, field.field_idx, zcu)) { + switch (fieldLocation(parent_ptr_ty, field.result_ptr_ty, field.field_idx, pt)) { .begin => { const ptr_ctype = try dg.ctypeFromType(field.result_ptr_ty, .complete); try writer.writeByte('('); @@ -801,7 +803,7 @@ pub const DeclGen = struct { try writer.writeByte('('); try dg.renderCType(writer, ptr_ctype); try writer.writeByte(')'); - const offset_val = try zcu.intValue(Type.usize, byte_offset); + const offset_val = try pt.intValue(Type.usize, byte_offset); try writer.writeAll("((char *)"); try dg.renderPointer(writer, field.parent.*, location); try writer.print(" + {})", .{try dg.fmtIntLiteral(offset_val, .Other)}); @@ -809,7 +811,7 @@ pub const DeclGen = struct { } }, - .elem_ptr => |elem| if (!(try elem.parent.ptrType(zcu)).childType(zcu).hasRuntimeBits(zcu)) { + .elem_ptr => |elem| if (!(try elem.parent.ptrType(pt)).childType(zcu).hasRuntimeBits(pt)) { // Element type is zero-bit, so lowers to `void`. The index is irrelevant; just cast the pointer. const ptr_ctype = try dg.ctypeFromType(elem.result_ptr_ty, .complete); try writer.writeByte('('); @@ -817,11 +819,11 @@ pub const DeclGen = struct { try writer.writeByte(')'); try dg.renderPointer(writer, elem.parent.*, location); } else { - const index_val = try zcu.intValue(Type.usize, elem.elem_idx); + const index_val = try pt.intValue(Type.usize, elem.elem_idx); // We want to do pointer arithmetic on a pointer to the element type. // We might have a pointer-to-array. In this case, we must cast first. const result_ctype = try dg.ctypeFromType(elem.result_ptr_ty, .complete); - const parent_ctype = try dg.ctypeFromType(try elem.parent.ptrType(zcu), .complete); + const parent_ctype = try dg.ctypeFromType(try elem.parent.ptrType(pt), .complete); if (result_ctype.eql(parent_ctype)) { // The pointer already has an appropriate type - just do the arithmetic. try writer.writeByte('('); @@ -846,7 +848,7 @@ pub const DeclGen = struct { if (oac.byte_offset == 0) { try dg.renderPointer(writer, oac.parent.*, location); } else { - const offset_val = try zcu.intValue(Type.usize, oac.byte_offset); + const offset_val = try pt.intValue(Type.usize, oac.byte_offset); try writer.writeAll("((char *)"); try dg.renderPointer(writer, oac.parent.*, location); try writer.print(" + {})", .{try dg.fmtIntLiteral(offset_val, .Other)}); @@ -856,8 +858,7 @@ pub const DeclGen = struct { } fn renderErrorName(dg: *DeclGen, writer: anytype, err_name: InternPool.NullTerminatedString) !void { - const zcu = dg.zcu; - const ip = &zcu.intern_pool; + const ip = &dg.pt.zcu.intern_pool; try writer.print("zig_error_{}", .{fmtIdent(err_name.toSlice(ip))}); } @@ -867,7 +868,8 @@ pub const DeclGen = struct { val: Value, location: ValueRenderLocation, ) error{ OutOfMemory, AnalysisFail }!void { - const zcu = dg.zcu; + const pt = dg.pt; + const zcu = pt.zcu; const ip = &zcu.intern_pool; const target = &dg.mod.resolved_target.result; const ctype_pool = &dg.ctype_pool; @@ -927,7 +929,7 @@ pub const DeclGen = struct { try writer.writeAll("(("); try dg.renderCType(writer, ctype); try writer.print("){x})", .{try dg.fmtIntLiteral( - try zcu.intValue(Type.usize, val.toUnsignedInt(zcu)), + try pt.intValue(Type.usize, val.toUnsignedInt(pt)), .Other, )}); }, @@ -974,10 +976,10 @@ pub const DeclGen = struct { .enum_tag => |enum_tag| try dg.renderValue(writer, Value.fromInterned(enum_tag.int), location), .float => { const bits = ty.floatBits(target.*); - const f128_val = val.toFloat(f128, zcu); + const f128_val = val.toFloat(f128, pt); // All unsigned ints matching float types are pre-allocated. - const repr_ty = zcu.intType(.unsigned, bits) catch unreachable; + const repr_ty = pt.intType(.unsigned, bits) catch unreachable; assert(bits <= 128); var repr_val_limbs: [BigInt.calcTwosCompLimbCount(128)]BigIntLimb = undefined; @@ -988,10 +990,10 @@ pub const DeclGen = struct { }; switch (bits) { - 16 => repr_val_big.set(@as(u16, @bitCast(val.toFloat(f16, zcu)))), - 32 => repr_val_big.set(@as(u32, @bitCast(val.toFloat(f32, zcu)))), - 64 => repr_val_big.set(@as(u64, @bitCast(val.toFloat(f64, zcu)))), - 80 => repr_val_big.set(@as(u80, @bitCast(val.toFloat(f80, zcu)))), + 16 => repr_val_big.set(@as(u16, @bitCast(val.toFloat(f16, pt)))), + 32 => repr_val_big.set(@as(u32, @bitCast(val.toFloat(f32, pt)))), + 64 => repr_val_big.set(@as(u64, @bitCast(val.toFloat(f64, pt)))), + 80 => repr_val_big.set(@as(u80, @bitCast(val.toFloat(f80, pt)))), 128 => repr_val_big.set(@as(u128, @bitCast(f128_val))), else => unreachable, } @@ -1002,10 +1004,10 @@ pub const DeclGen = struct { try dg.renderTypeForBuiltinFnName(writer, ty); try writer.writeByte('('); switch (bits) { - 16 => try writer.print("{x}", .{val.toFloat(f16, zcu)}), - 32 => try writer.print("{x}", .{val.toFloat(f32, zcu)}), - 64 => try writer.print("{x}", .{val.toFloat(f64, zcu)}), - 80 => try writer.print("{x}", .{val.toFloat(f80, zcu)}), + 16 => try writer.print("{x}", .{val.toFloat(f16, pt)}), + 32 => try writer.print("{x}", .{val.toFloat(f32, pt)}), + 64 => try writer.print("{x}", .{val.toFloat(f64, pt)}), + 80 => try writer.print("{x}", .{val.toFloat(f80, pt)}), 128 => try writer.print("{x}", .{f128_val}), else => unreachable, } @@ -1045,10 +1047,10 @@ pub const DeclGen = struct { if (std.math.isNan(f128_val)) switch (bits) { // We only actually need to pass the significand, but it will get // properly masked anyway, so just pass the whole value. - 16 => try writer.print("\"0x{x}\"", .{@as(u16, @bitCast(val.toFloat(f16, zcu)))}), - 32 => try writer.print("\"0x{x}\"", .{@as(u32, @bitCast(val.toFloat(f32, zcu)))}), - 64 => try writer.print("\"0x{x}\"", .{@as(u64, @bitCast(val.toFloat(f64, zcu)))}), - 80 => try writer.print("\"0x{x}\"", .{@as(u80, @bitCast(val.toFloat(f80, zcu)))}), + 16 => try writer.print("\"0x{x}\"", .{@as(u16, @bitCast(val.toFloat(f16, pt)))}), + 32 => try writer.print("\"0x{x}\"", .{@as(u32, @bitCast(val.toFloat(f32, pt)))}), + 64 => try writer.print("\"0x{x}\"", .{@as(u64, @bitCast(val.toFloat(f64, pt)))}), + 80 => try writer.print("\"0x{x}\"", .{@as(u80, @bitCast(val.toFloat(f80, pt)))}), 128 => try writer.print("\"0x{x}\"", .{@as(u128, @bitCast(f128_val))}), else => unreachable, }; @@ -1056,7 +1058,7 @@ pub const DeclGen = struct { empty = false; } try writer.print("{x}", .{try dg.fmtIntLiteral( - try zcu.intValue_big(repr_ty, repr_val_big.toConst()), + try pt.intValue_big(repr_ty, repr_val_big.toConst()), location, )}); if (!empty) try writer.writeByte(')'); @@ -1084,7 +1086,7 @@ pub const DeclGen = struct { .ptr => { var arena = std.heap.ArenaAllocator.init(zcu.gpa); defer arena.deinit(); - const derivation = try val.pointerDerivation(arena.allocator(), zcu); + const derivation = try val.pointerDerivation(arena.allocator(), pt); try dg.renderPointer(writer, derivation, location); }, .opt => |opt| switch (ctype.info(ctype_pool)) { @@ -1167,15 +1169,15 @@ pub const DeclGen = struct { try literal.start(); var index: usize = 0; while (index < ai.len) : (index += 1) { - const elem_val = try val.elemValue(zcu, index); + const elem_val = try val.elemValue(pt, index); const elem_val_u8: u8 = if (elem_val.isUndef(zcu)) undefPattern(u8) else - @intCast(elem_val.toUnsignedInt(zcu)); + @intCast(elem_val.toUnsignedInt(pt)); try literal.writeChar(elem_val_u8); } if (ai.sentinel) |s| { - const s_u8: u8 = @intCast(s.toUnsignedInt(zcu)); + const s_u8: u8 = @intCast(s.toUnsignedInt(pt)); if (s_u8 != 0) try literal.writeChar(s_u8); } try literal.end(); @@ -1184,7 +1186,7 @@ pub const DeclGen = struct { var index: usize = 0; while (index < ai.len) : (index += 1) { if (index != 0) try writer.writeByte(','); - const elem_val = try val.elemValue(zcu, index); + const elem_val = try val.elemValue(pt, index); try dg.renderValue(writer, elem_val, initializer_type); } if (ai.sentinel) |s| { @@ -1207,13 +1209,13 @@ pub const DeclGen = struct { const comptime_val = tuple.values.get(ip)[field_index]; if (comptime_val != .none) continue; const field_ty = Type.fromInterned(tuple.types.get(ip)[field_index]); - if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue; + if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) continue; if (!empty) try writer.writeByte(','); const field_val = Value.fromInterned( switch (ip.indexToKey(val.toIntern()).aggregate.storage) { - .bytes => |bytes| try ip.get(zcu.gpa, .{ .int = .{ + .bytes => |bytes| try pt.intern(.{ .int = .{ .ty = field_ty.toIntern(), .storage = .{ .u64 = bytes.at(field_index, ip) }, } }), @@ -1242,12 +1244,12 @@ pub const DeclGen = struct { var need_comma = false; while (field_it.next()) |field_index| { const field_ty = Type.fromInterned(loaded_struct.field_types.get(ip)[field_index]); - if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue; + if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) continue; if (need_comma) try writer.writeByte(','); need_comma = true; const field_val = switch (ip.indexToKey(val.toIntern()).aggregate.storage) { - .bytes => |bytes| try ip.get(zcu.gpa, .{ .int = .{ + .bytes => |bytes| try pt.intern(.{ .int = .{ .ty = field_ty.toIntern(), .storage = .{ .u64 = bytes.at(field_index, ip) }, } }), @@ -1262,14 +1264,14 @@ pub const DeclGen = struct { const int_info = ty.intInfo(zcu); const bits = Type.smallestUnsignedBits(int_info.bits - 1); - const bit_offset_ty = try zcu.intType(.unsigned, bits); + const bit_offset_ty = try pt.intType(.unsigned, bits); var bit_offset: u64 = 0; var eff_num_fields: usize = 0; for (0..loaded_struct.field_types.len) |field_index| { const field_ty = Type.fromInterned(loaded_struct.field_types.get(ip)[field_index]); - if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue; + if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) continue; eff_num_fields += 1; } @@ -1277,7 +1279,7 @@ pub const DeclGen = struct { try writer.writeByte('('); try dg.renderUndefValue(writer, ty, location); try writer.writeByte(')'); - } else if (ty.bitSize(zcu) > 64) { + } else if (ty.bitSize(pt) > 64) { // zig_or_u128(zig_or_u128(zig_shl_u128(a, a_off), zig_shl_u128(b, b_off)), zig_shl_u128(c, c_off)) var num_or = eff_num_fields - 1; while (num_or > 0) : (num_or -= 1) { @@ -1290,10 +1292,10 @@ pub const DeclGen = struct { var needs_closing_paren = false; for (0..loaded_struct.field_types.len) |field_index| { const field_ty = Type.fromInterned(loaded_struct.field_types.get(ip)[field_index]); - if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue; + if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) continue; const field_val = switch (ip.indexToKey(val.toIntern()).aggregate.storage) { - .bytes => |bytes| try ip.get(zcu.gpa, .{ .int = .{ + .bytes => |bytes| try pt.intern(.{ .int = .{ .ty = field_ty.toIntern(), .storage = .{ .u64 = bytes.at(field_index, ip) }, } }), @@ -1307,7 +1309,7 @@ pub const DeclGen = struct { try writer.writeByte('('); try dg.renderIntCast(writer, ty, cast_context, field_ty, .FunctionArgument); try writer.writeAll(", "); - try dg.renderValue(writer, try zcu.intValue(bit_offset_ty, bit_offset), .FunctionArgument); + try dg.renderValue(writer, try pt.intValue(bit_offset_ty, bit_offset), .FunctionArgument); try writer.writeByte(')'); } else { try dg.renderIntCast(writer, ty, cast_context, field_ty, .FunctionArgument); @@ -1316,7 +1318,7 @@ pub const DeclGen = struct { if (needs_closing_paren) try writer.writeByte(')'); if (eff_index != eff_num_fields - 1) try writer.writeAll(", "); - bit_offset += field_ty.bitSize(zcu); + bit_offset += field_ty.bitSize(pt); needs_closing_paren = true; eff_index += 1; } @@ -1326,7 +1328,7 @@ pub const DeclGen = struct { var empty = true; for (0..loaded_struct.field_types.len) |field_index| { const field_ty = Type.fromInterned(loaded_struct.field_types.get(ip)[field_index]); - if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue; + if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) continue; if (!empty) try writer.writeAll(" | "); try writer.writeByte('('); @@ -1334,7 +1336,7 @@ pub const DeclGen = struct { try writer.writeByte(')'); const field_val = switch (ip.indexToKey(val.toIntern()).aggregate.storage) { - .bytes => |bytes| try ip.get(zcu.gpa, .{ .int = .{ + .bytes => |bytes| try pt.intern(.{ .int = .{ .ty = field_ty.toIntern(), .storage = .{ .u64 = bytes.at(field_index, ip) }, } }), @@ -1345,12 +1347,12 @@ pub const DeclGen = struct { if (bit_offset != 0) { try dg.renderValue(writer, Value.fromInterned(field_val), .Other); try writer.writeAll(" << "); - try dg.renderValue(writer, try zcu.intValue(bit_offset_ty, bit_offset), .FunctionArgument); + try dg.renderValue(writer, try pt.intValue(bit_offset_ty, bit_offset), .FunctionArgument); } else { try dg.renderValue(writer, Value.fromInterned(field_val), .Other); } - bit_offset += field_ty.bitSize(zcu); + bit_offset += field_ty.bitSize(pt); empty = false; } try writer.writeByte(')'); @@ -1363,7 +1365,7 @@ pub const DeclGen = struct { .un => |un| { const loaded_union = ip.loadUnionType(ty.toIntern()); if (un.tag == .none) { - const backing_ty = try ty.unionBackingType(zcu); + const backing_ty = try ty.unionBackingType(pt); switch (loaded_union.getLayout(ip)) { .@"packed" => { if (!location.isInitializer()) { @@ -1378,7 +1380,7 @@ pub const DeclGen = struct { return dg.fail("TODO: C backend: implement extern union backing type rendering in static initializers", .{}); } - const ptr_ty = try zcu.singleConstPtrType(ty); + const ptr_ty = try pt.singleConstPtrType(ty); try writer.writeAll("*(("); try dg.renderType(writer, ptr_ty); try writer.writeAll(")("); @@ -1400,7 +1402,7 @@ pub const DeclGen = struct { const field_ty = Type.fromInterned(loaded_union.field_types.get(ip)[field_index]); const field_name = loaded_union.loadTagType(ip).names.get(ip)[field_index]; if (loaded_union.getLayout(ip) == .@"packed") { - if (field_ty.hasRuntimeBits(zcu)) { + if (field_ty.hasRuntimeBits(pt)) { if (field_ty.isPtrAtRuntime(zcu)) { try writer.writeByte('('); try dg.renderCType(writer, ctype); @@ -1431,7 +1433,7 @@ pub const DeclGen = struct { ), .payload => { try writer.writeByte('{'); - if (field_ty.hasRuntimeBits(zcu)) { + if (field_ty.hasRuntimeBits(pt)) { try writer.print(" .{ } = ", .{fmtIdent(field_name.toSlice(ip))}); try dg.renderValue( writer, @@ -1443,7 +1445,7 @@ pub const DeclGen = struct { const inner_field_ty = Type.fromInterned( loaded_union.field_types.get(ip)[inner_field_index], ); - if (!inner_field_ty.hasRuntimeBits(zcu)) continue; + if (!inner_field_ty.hasRuntimeBits(pt)) continue; try dg.renderUndefValue(writer, inner_field_ty, initializer_type); break; } @@ -1464,7 +1466,8 @@ pub const DeclGen = struct { ty: Type, location: ValueRenderLocation, ) error{ OutOfMemory, AnalysisFail }!void { - const zcu = dg.zcu; + const pt = dg.pt; + const zcu = pt.zcu; const ip = &zcu.intern_pool; const target = &dg.mod.resolved_target.result; const ctype_pool = &dg.ctype_pool; @@ -1490,7 +1493,7 @@ pub const DeclGen = struct { => { const bits = ty.floatBits(target.*); // All unsigned ints matching float types are pre-allocated. - const repr_ty = zcu.intType(.unsigned, bits) catch unreachable; + const repr_ty = dg.pt.intType(.unsigned, bits) catch unreachable; try writer.writeAll("zig_make_"); try dg.renderTypeForBuiltinFnName(writer, ty); @@ -1515,14 +1518,14 @@ pub const DeclGen = struct { .error_set_type, .inferred_error_set_type, => return writer.print("{x}", .{ - try dg.fmtIntLiteral(try zcu.undefValue(ty), location), + try dg.fmtIntLiteral(try pt.undefValue(ty), location), }), .ptr_type => |ptr_type| switch (ptr_type.flags.size) { .One, .Many, .C => { try writer.writeAll("(("); try dg.renderCType(writer, ctype); return writer.print("){x})", .{ - try dg.fmtIntLiteral(try zcu.undefValue(Type.usize), .Other), + try dg.fmtIntLiteral(try pt.undefValue(Type.usize), .Other), }); }, .Slice => { @@ -1536,7 +1539,7 @@ pub const DeclGen = struct { const ptr_ty = ty.slicePtrFieldType(zcu); try dg.renderType(writer, ptr_ty); return writer.print("){x}, {0x}}}", .{ - try dg.fmtIntLiteral(try zcu.undefValue(Type.usize), .Other), + try dg.fmtIntLiteral(try dg.pt.undefValue(Type.usize), .Other), }); }, }, @@ -1591,7 +1594,7 @@ pub const DeclGen = struct { var need_comma = false; while (field_it.next()) |field_index| { const field_ty = Type.fromInterned(loaded_struct.field_types.get(ip)[field_index]); - if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue; + if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) continue; if (need_comma) try writer.writeByte(','); need_comma = true; @@ -1600,7 +1603,7 @@ pub const DeclGen = struct { return writer.writeByte('}'); }, .@"packed" => return writer.print("{x}", .{ - try dg.fmtIntLiteral(try zcu.undefValue(ty), .Other), + try dg.fmtIntLiteral(try pt.undefValue(ty), .Other), }), } }, @@ -1616,7 +1619,7 @@ pub const DeclGen = struct { for (0..anon_struct_info.types.len) |field_index| { if (anon_struct_info.values.get(ip)[field_index] != .none) continue; const field_ty = Type.fromInterned(anon_struct_info.types.get(ip)[field_index]); - if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue; + if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) continue; if (need_comma) try writer.writeByte(','); need_comma = true; @@ -1654,7 +1657,7 @@ pub const DeclGen = struct { const inner_field_ty = Type.fromInterned( loaded_union.field_types.get(ip)[inner_field_index], ); - if (!inner_field_ty.hasRuntimeBits(zcu)) continue; + if (!inner_field_ty.hasRuntimeBits(pt)) continue; try dg.renderUndefValue( writer, inner_field_ty, @@ -1670,7 +1673,7 @@ pub const DeclGen = struct { if (has_tag) try writer.writeByte('}'); }, .@"packed" => return writer.print("{x}", .{ - try dg.fmtIntLiteral(try zcu.undefValue(ty), .Other), + try dg.fmtIntLiteral(try pt.undefValue(ty), .Other), }), } }, @@ -1775,7 +1778,7 @@ pub const DeclGen = struct { }, }, ) !void { - const zcu = dg.zcu; + const zcu = dg.pt.zcu; const ip = &zcu.intern_pool; const fn_ty = fn_val.typeOf(zcu); @@ -1856,7 +1859,7 @@ pub const DeclGen = struct { fn ctypeFromType(dg: *DeclGen, ty: Type, kind: CType.Kind) !CType { defer std.debug.assert(dg.scratch.items.len == 0); - return dg.ctype_pool.fromType(dg.gpa, &dg.scratch, ty, dg.zcu, dg.mod, kind); + return dg.ctype_pool.fromType(dg.gpa, &dg.scratch, ty, dg.pt, dg.mod, kind); } fn byteSize(dg: *DeclGen, ctype: CType) u64 { @@ -1879,8 +1882,8 @@ pub const DeclGen = struct { } fn renderCType(dg: *DeclGen, w: anytype, ctype: CType) error{OutOfMemory}!void { - _ = try renderTypePrefix(dg.pass, &dg.ctype_pool, dg.zcu, w, ctype, .suffix, .{}); - try renderTypeSuffix(dg.pass, &dg.ctype_pool, dg.zcu, w, ctype, .suffix, .{}); + _ = try renderTypePrefix(dg.pass, &dg.ctype_pool, dg.pt.zcu, w, ctype, .suffix, .{}); + try renderTypeSuffix(dg.pass, &dg.ctype_pool, dg.pt.zcu, w, ctype, .suffix, .{}); } const IntCastContext = union(enum) { @@ -1904,18 +1907,18 @@ pub const DeclGen = struct { } }; fn intCastIsNoop(dg: *DeclGen, dest_ty: Type, src_ty: Type) bool { - const zcu = dg.zcu; - const dest_bits = dest_ty.bitSize(zcu); - const dest_int_info = dest_ty.intInfo(zcu); + const pt = dg.pt; + const dest_bits = dest_ty.bitSize(pt); + const dest_int_info = dest_ty.intInfo(pt.zcu); - const src_is_ptr = src_ty.isPtrAtRuntime(zcu); + const src_is_ptr = src_ty.isPtrAtRuntime(pt.zcu); const src_eff_ty: Type = if (src_is_ptr) switch (dest_int_info.signedness) { .unsigned => Type.usize, .signed => Type.isize, } else src_ty; - const src_bits = src_eff_ty.bitSize(zcu); - const src_int_info = if (src_eff_ty.isAbiInt(zcu)) src_eff_ty.intInfo(zcu) else null; + const src_bits = src_eff_ty.bitSize(pt); + const src_int_info = if (src_eff_ty.isAbiInt(pt.zcu)) src_eff_ty.intInfo(pt.zcu) else null; if (dest_bits <= 64 and src_bits <= 64) { const needs_cast = src_int_info == null or (toCIntBits(dest_int_info.bits) != toCIntBits(src_int_info.?.bits) or @@ -1944,8 +1947,9 @@ pub const DeclGen = struct { src_ty: Type, location: ValueRenderLocation, ) !void { - const zcu = dg.zcu; - const dest_bits = dest_ty.bitSize(zcu); + const pt = dg.pt; + const zcu = pt.zcu; + const dest_bits = dest_ty.bitSize(pt); const dest_int_info = dest_ty.intInfo(zcu); const src_is_ptr = src_ty.isPtrAtRuntime(zcu); @@ -1954,7 +1958,7 @@ pub const DeclGen = struct { .signed => Type.isize, } else src_ty; - const src_bits = src_eff_ty.bitSize(zcu); + const src_bits = src_eff_ty.bitSize(pt); const src_int_info = if (src_eff_ty.isAbiInt(zcu)) src_eff_ty.intInfo(zcu) else null; if (dest_bits <= 64 and src_bits <= 64) { const needs_cast = src_int_info == null or @@ -2035,7 +2039,7 @@ pub const DeclGen = struct { qualifiers, CType.AlignAs.fromAlignment(.{ .@"align" = alignment, - .abi = ty.abiAlignment(dg.zcu), + .abi = ty.abiAlignment(dg.pt), }), ); } @@ -2048,6 +2052,7 @@ pub const DeclGen = struct { qualifiers: CQualifiers, alignas: CType.AlignAs, ) error{ OutOfMemory, AnalysisFail }!void { + const zcu = dg.pt.zcu; switch (alignas.abiOrder()) { .lt => try w.print("zig_under_align({}) ", .{alignas.toByteUnits()}), .eq => {}, @@ -2055,10 +2060,10 @@ pub const DeclGen = struct { } try w.print("{}", .{ - try renderTypePrefix(dg.pass, &dg.ctype_pool, dg.zcu, w, ctype, .suffix, qualifiers), + try renderTypePrefix(dg.pass, &dg.ctype_pool, zcu, w, ctype, .suffix, qualifiers), }); try dg.writeName(w, name); - try renderTypeSuffix(dg.pass, &dg.ctype_pool, dg.zcu, w, ctype, .suffix, .{}); + try renderTypeSuffix(dg.pass, &dg.ctype_pool, zcu, w, ctype, .suffix, .{}); } fn writeName(dg: *DeclGen, w: anytype, c_value: CValue) !void { @@ -2162,7 +2167,7 @@ pub const DeclGen = struct { decl_index: InternPool.DeclIndex, variable: InternPool.Key.Variable, ) !void { - const zcu = dg.zcu; + const zcu = dg.pt.zcu; const decl = zcu.declPtr(decl_index); const fwd = dg.fwdDeclWriter(); try fwd.writeAll(if (variable.is_extern) "zig_extern " else "static "); @@ -2180,7 +2185,7 @@ pub const DeclGen = struct { } fn renderDeclName(dg: *DeclGen, writer: anytype, decl_index: InternPool.DeclIndex) !void { - const zcu = dg.zcu; + const zcu = dg.pt.zcu; const ip = &zcu.intern_pool; const decl = zcu.declPtr(decl_index); @@ -2236,15 +2241,15 @@ pub const DeclGen = struct { .bits => {}, } - const zcu = dg.zcu; - const int_info = if (ty.isAbiInt(zcu)) ty.intInfo(zcu) else std.builtin.Type.Int{ + const pt = dg.pt; + const int_info = if (ty.isAbiInt(pt.zcu)) ty.intInfo(pt.zcu) else std.builtin.Type.Int{ .signedness = .unsigned, - .bits = @as(u16, @intCast(ty.bitSize(zcu))), + .bits = @as(u16, @intCast(ty.bitSize(pt))), }; if (is_big) try writer.print(", {}", .{int_info.signedness == .signed}); try writer.print(", {}", .{try dg.fmtIntLiteral( - try zcu.intValue(if (is_big) Type.u16 else Type.u8, int_info.bits), + try pt.intValue(if (is_big) Type.u16 else Type.u8, int_info.bits), .FunctionArgument, )}); } @@ -2254,7 +2259,7 @@ pub const DeclGen = struct { val: Value, loc: ValueRenderLocation, ) !std.fmt.Formatter(formatIntLiteral) { - const zcu = dg.zcu; + const zcu = dg.pt.zcu; const kind = loc.toCTypeKind(); const ty = val.typeOf(zcu); return std.fmt.Formatter(formatIntLiteral){ .data = .{ @@ -2616,7 +2621,8 @@ pub fn genGlobalAsm(zcu: *Zcu, writer: anytype) !void { } pub fn genErrDecls(o: *Object) !void { - const zcu = o.dg.zcu; + const pt = o.dg.pt; + const zcu = pt.zcu; const ip = &zcu.intern_pool; const writer = o.writer(); @@ -2628,7 +2634,7 @@ pub fn genErrDecls(o: *Object) !void { for (zcu.global_error_set.keys()[1..], 1..) |name_nts, value| { const name = name_nts.toSlice(ip); max_name_len = @max(name.len, max_name_len); - const err_val = try zcu.intern(.{ .err = .{ + const err_val = try pt.intern(.{ .err = .{ .ty = .anyerror_type, .name = name_nts, } }); @@ -2649,12 +2655,12 @@ pub fn genErrDecls(o: *Object) !void { @memcpy(name_buf[name_prefix.len..][0..name_slice.len], name_slice); const identifier = name_buf[0 .. name_prefix.len + name_slice.len]; - const name_ty = try zcu.arrayType(.{ + const name_ty = try pt.arrayType(.{ .len = name_slice.len, .child = .u8_type, .sentinel = .zero_u8, }); - const name_val = try zcu.intern(.{ .aggregate = .{ + const name_val = try pt.intern(.{ .aggregate = .{ .ty = name_ty.toIntern(), .storage = .{ .bytes = name.toString() }, } }); @@ -2673,7 +2679,7 @@ pub fn genErrDecls(o: *Object) !void { try writer.writeAll(";\n"); } - const name_array_ty = try zcu.arrayType(.{ + const name_array_ty = try pt.arrayType(.{ .len = zcu.global_error_set.count(), .child = .slice_const_u8_sentinel_0_type, }); @@ -2693,14 +2699,15 @@ pub fn genErrDecls(o: *Object) !void { if (value != 0) try writer.writeByte(','); try writer.print("{{" ++ name_prefix ++ "{}, {}}}", .{ fmtIdent(name), - try o.dg.fmtIntLiteral(try zcu.intValue(Type.usize, name.len), .StaticInitializer), + try o.dg.fmtIntLiteral(try pt.intValue(Type.usize, name.len), .StaticInitializer), }); } try writer.writeAll("};\n"); } pub fn genLazyFn(o: *Object, lazy_ctype_pool: *const CType.Pool, lazy_fn: LazyFnMap.Entry) !void { - const zcu = o.dg.zcu; + const pt = o.dg.pt; + const zcu = pt.zcu; const ip = &zcu.intern_pool; const ctype_pool = &o.dg.ctype_pool; const w = o.writer(); @@ -2721,20 +2728,20 @@ pub fn genLazyFn(o: *Object, lazy_ctype_pool: *const CType.Pool, lazy_fn: LazyFn for (0..tag_names.len) |tag_index| { const tag_name = tag_names.get(ip)[tag_index]; const tag_name_len = tag_name.length(ip); - const tag_val = try zcu.enumValueFieldIndex(enum_ty, @intCast(tag_index)); + const tag_val = try pt.enumValueFieldIndex(enum_ty, @intCast(tag_index)); - const name_ty = try zcu.arrayType(.{ + const name_ty = try pt.arrayType(.{ .len = tag_name_len, .child = .u8_type, .sentinel = .zero_u8, }); - const name_val = try zcu.intern(.{ .aggregate = .{ + const name_val = try pt.intern(.{ .aggregate = .{ .ty = name_ty.toIntern(), .storage = .{ .bytes = tag_name.toString() }, } }); try w.print(" case {}: {{\n static ", .{ - try o.dg.fmtIntLiteral(try tag_val.intFromEnum(enum_ty, zcu), .Other), + try o.dg.fmtIntLiteral(try tag_val.intFromEnum(enum_ty, pt), .Other), }); try o.dg.renderTypeAndName(w, name_ty, .{ .identifier = "name" }, Const, .none, .complete); try w.writeAll(" = "); @@ -2743,7 +2750,7 @@ pub fn genLazyFn(o: *Object, lazy_ctype_pool: *const CType.Pool, lazy_fn: LazyFn try o.dg.renderType(w, name_slice_ty); try w.print("){{{}, {}}};\n", .{ fmtIdent("name"), - try o.dg.fmtIntLiteral(try zcu.intValue(Type.usize, tag_name_len), .Other), + try o.dg.fmtIntLiteral(try pt.intValue(Type.usize, tag_name_len), .Other), }); try w.writeAll(" }\n"); @@ -2788,7 +2795,7 @@ pub fn genFunc(f: *Function) !void { defer tracy.end(); const o = &f.object; - const zcu = o.dg.zcu; + const zcu = o.dg.pt.zcu; const gpa = o.dg.gpa; const decl_index = o.dg.pass.decl; const decl = zcu.declPtr(decl_index); @@ -2879,12 +2886,13 @@ pub fn genDecl(o: *Object) !void { const tracy = trace(@src()); defer tracy.end(); - const zcu = o.dg.zcu; + const pt = o.dg.pt; + const zcu = pt.zcu; const decl_index = o.dg.pass.decl; const decl = zcu.declPtr(decl_index); const decl_ty = decl.typeOf(zcu); - if (!decl_ty.isFnOrHasRuntimeBitsIgnoreComptime(zcu)) return; + if (!decl_ty.isFnOrHasRuntimeBitsIgnoreComptime(pt)) return; if (decl.val.getExternFunc(zcu)) |_| { const fwd = o.dg.fwdDeclWriter(); try fwd.writeAll("zig_extern "); @@ -2928,7 +2936,7 @@ pub fn genDeclValue( alignment: Alignment, @"linksection": InternPool.OptionalNullTerminatedString, ) !void { - const zcu = o.dg.zcu; + const zcu = o.dg.pt.zcu; const ty = val.typeOf(zcu); const fwd = o.dg.fwdDeclWriter(); @@ -2946,7 +2954,7 @@ pub fn genDeclValue( } pub fn genExports(dg: *DeclGen, exported: Zcu.Exported, export_indices: []const u32) !void { - const zcu = dg.zcu; + const zcu = dg.pt.zcu; const ip = &zcu.intern_pool; const fwd = dg.fwdDeclWriter(); @@ -3088,7 +3096,7 @@ fn genBodyResolveState(f: *Function, inst: Air.Inst.Index, leading_deaths: []con } fn genBodyInner(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, OutOfMemory }!void { - const zcu = f.object.dg.zcu; + const zcu = f.object.dg.pt.zcu; const ip = &zcu.intern_pool; const air_tags = f.air.instructions.items(.tag); const air_datas = f.air.instructions.items(.data); @@ -3388,10 +3396,10 @@ fn airSliceField(f: *Function, inst: Air.Inst.Index, is_ptr: bool, field_name: [ } fn airPtrElemVal(f: *Function, inst: Air.Inst.Index) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; const inst_ty = f.typeOfIndex(inst); const bin_op = f.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - if (!inst_ty.hasRuntimeBitsIgnoreComptime(zcu)) { + if (!inst_ty.hasRuntimeBitsIgnoreComptime(pt)) { try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); return .none; } @@ -3414,13 +3422,14 @@ fn airPtrElemVal(f: *Function, inst: Air.Inst.Index) !CValue { } fn airPtrElemPtr(f: *Function, inst: Air.Inst.Index) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; const ty_pl = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const bin_op = f.air.extraData(Air.Bin, ty_pl.payload).data; const inst_ty = f.typeOfIndex(inst); const ptr_ty = f.typeOf(bin_op.lhs); - const elem_has_bits = ptr_ty.elemType2(zcu).hasRuntimeBitsIgnoreComptime(zcu); + const elem_has_bits = ptr_ty.elemType2(zcu).hasRuntimeBitsIgnoreComptime(pt); const ptr = try f.resolveInst(bin_op.lhs); const index = try f.resolveInst(bin_op.rhs); @@ -3449,10 +3458,10 @@ fn airPtrElemPtr(f: *Function, inst: Air.Inst.Index) !CValue { } fn airSliceElemVal(f: *Function, inst: Air.Inst.Index) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; const inst_ty = f.typeOfIndex(inst); const bin_op = f.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - if (!inst_ty.hasRuntimeBitsIgnoreComptime(zcu)) { + if (!inst_ty.hasRuntimeBitsIgnoreComptime(pt)) { try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); return .none; } @@ -3475,14 +3484,15 @@ fn airSliceElemVal(f: *Function, inst: Air.Inst.Index) !CValue { } fn airSliceElemPtr(f: *Function, inst: Air.Inst.Index) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; const ty_pl = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const bin_op = f.air.extraData(Air.Bin, ty_pl.payload).data; const inst_ty = f.typeOfIndex(inst); const slice_ty = f.typeOf(bin_op.lhs); const elem_ty = slice_ty.elemType2(zcu); - const elem_has_bits = elem_ty.hasRuntimeBitsIgnoreComptime(zcu); + const elem_has_bits = elem_ty.hasRuntimeBitsIgnoreComptime(pt); const slice = try f.resolveInst(bin_op.lhs); const index = try f.resolveInst(bin_op.rhs); @@ -3505,10 +3515,10 @@ fn airSliceElemPtr(f: *Function, inst: Air.Inst.Index) !CValue { } fn airArrayElemVal(f: *Function, inst: Air.Inst.Index) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; const bin_op = f.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const inst_ty = f.typeOfIndex(inst); - if (!inst_ty.hasRuntimeBitsIgnoreComptime(zcu)) { + if (!inst_ty.hasRuntimeBitsIgnoreComptime(pt)) { try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); return .none; } @@ -3531,40 +3541,40 @@ fn airArrayElemVal(f: *Function, inst: Air.Inst.Index) !CValue { } fn airAlloc(f: *Function, inst: Air.Inst.Index) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; const inst_ty = f.typeOfIndex(inst); const elem_ty = inst_ty.childType(zcu); - if (!elem_ty.isFnOrHasRuntimeBitsIgnoreComptime(zcu)) return .{ .undef = inst_ty }; + if (!elem_ty.isFnOrHasRuntimeBitsIgnoreComptime(pt)) return .{ .undef = inst_ty }; const local = try f.allocLocalValue(.{ .ctype = try f.ctypeFromType(elem_ty, .complete), .alignas = CType.AlignAs.fromAlignment(.{ .@"align" = inst_ty.ptrInfo(zcu).flags.alignment, - .abi = elem_ty.abiAlignment(zcu), + .abi = elem_ty.abiAlignment(pt), }), }); log.debug("%{d}: allocated unfreeable t{d}", .{ inst, local.new_local }); - const gpa = f.object.dg.zcu.gpa; - try f.allocs.put(gpa, local.new_local, true); + try f.allocs.put(zcu.gpa, local.new_local, true); return .{ .local_ref = local.new_local }; } fn airRetPtr(f: *Function, inst: Air.Inst.Index) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; const inst_ty = f.typeOfIndex(inst); const elem_ty = inst_ty.childType(zcu); - if (!elem_ty.isFnOrHasRuntimeBitsIgnoreComptime(zcu)) return .{ .undef = inst_ty }; + if (!elem_ty.isFnOrHasRuntimeBitsIgnoreComptime(pt)) return .{ .undef = inst_ty }; const local = try f.allocLocalValue(.{ .ctype = try f.ctypeFromType(elem_ty, .complete), .alignas = CType.AlignAs.fromAlignment(.{ .@"align" = inst_ty.ptrInfo(zcu).flags.alignment, - .abi = elem_ty.abiAlignment(zcu), + .abi = elem_ty.abiAlignment(pt), }), }); log.debug("%{d}: allocated unfreeable t{d}", .{ inst, local.new_local }); - const gpa = f.object.dg.zcu.gpa; - try f.allocs.put(gpa, local.new_local, true); + try f.allocs.put(zcu.gpa, local.new_local, true); return .{ .local_ref = local.new_local }; } @@ -3593,7 +3603,8 @@ fn airArg(f: *Function, inst: Air.Inst.Index) !CValue { } fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; const ty_op = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const ptr_ty = f.typeOf(ty_op.operand); @@ -3601,7 +3612,7 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue { const ptr_info = ptr_scalar_ty.ptrInfo(zcu); const src_ty = Type.fromInterned(ptr_info.child); - if (!src_ty.hasRuntimeBitsIgnoreComptime(zcu)) { + if (!src_ty.hasRuntimeBitsIgnoreComptime(pt)) { try reap(f, inst, &.{ty_op.operand}); return .none; } @@ -3611,10 +3622,10 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue { try reap(f, inst, &.{ty_op.operand}); const is_aligned = if (ptr_info.flags.alignment != .none) - ptr_info.flags.alignment.order(src_ty.abiAlignment(zcu)).compare(.gte) + ptr_info.flags.alignment.order(src_ty.abiAlignment(pt)).compare(.gte) else true; - const is_array = lowersToArray(src_ty, zcu); + const is_array = lowersToArray(src_ty, pt); const need_memcpy = !is_aligned or is_array; const writer = f.object.writer(); @@ -3634,12 +3645,12 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue { try writer.writeAll("))"); } else if (ptr_info.packed_offset.host_size > 0 and ptr_info.flags.vector_index == .none) { const host_bits: u16 = ptr_info.packed_offset.host_size * 8; - const host_ty = try zcu.intType(.unsigned, host_bits); + const host_ty = try pt.intType(.unsigned, host_bits); - const bit_offset_ty = try zcu.intType(.unsigned, Type.smallestUnsignedBits(host_bits - 1)); - const bit_offset_val = try zcu.intValue(bit_offset_ty, ptr_info.packed_offset.bit_offset); + const bit_offset_ty = try pt.intType(.unsigned, Type.smallestUnsignedBits(host_bits - 1)); + const bit_offset_val = try pt.intValue(bit_offset_ty, ptr_info.packed_offset.bit_offset); - const field_ty = try zcu.intType(.unsigned, @as(u16, @intCast(src_ty.bitSize(zcu)))); + const field_ty = try pt.intType(.unsigned, @as(u16, @intCast(src_ty.bitSize(pt)))); try f.writeCValue(writer, local, .Other); try v.elem(f, writer); @@ -3650,9 +3661,9 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue { try writer.writeAll("(("); try f.renderType(writer, field_ty); try writer.writeByte(')'); - const cant_cast = host_ty.isInt(zcu) and host_ty.bitSize(zcu) > 64; + const cant_cast = host_ty.isInt(zcu) and host_ty.bitSize(pt) > 64; if (cant_cast) { - if (field_ty.bitSize(zcu) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{}); + if (field_ty.bitSize(pt) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{}); try writer.writeAll("zig_lo_"); try f.object.dg.renderTypeForBuiltinFnName(writer, host_ty); try writer.writeByte('('); @@ -3680,7 +3691,8 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue { } fn airRet(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; const un_op = f.air.instructions.items(.data)[@intFromEnum(inst)].un_op; const writer = f.object.writer(); const op_inst = un_op.toIndex(); @@ -3695,11 +3707,11 @@ fn airRet(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !CValue { const operand = try f.resolveInst(un_op); try reap(f, inst, &.{un_op}); var deref = is_ptr; - const is_array = lowersToArray(ret_ty, zcu); + const is_array = lowersToArray(ret_ty, pt); const ret_val = if (is_array) ret_val: { const array_local = try f.allocAlignedLocal(inst, .{ .ctype = ret_ctype, - .alignas = CType.AlignAs.fromAbiAlignment(ret_ty.abiAlignment(f.object.dg.zcu)), + .alignas = CType.AlignAs.fromAbiAlignment(ret_ty.abiAlignment(pt)), }); try writer.writeAll("memcpy("); try f.writeCValueMember(writer, array_local, .{ .identifier = "array" }); @@ -3733,7 +3745,8 @@ fn airRet(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !CValue { } fn airIntCast(f: *Function, inst: Air.Inst.Index) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; const ty_op = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const operand = try f.resolveInst(ty_op.operand); @@ -3760,7 +3773,8 @@ fn airIntCast(f: *Function, inst: Air.Inst.Index) !CValue { } fn airTrunc(f: *Function, inst: Air.Inst.Index) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; const ty_op = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const operand = try f.resolveInst(ty_op.operand); @@ -3809,13 +3823,13 @@ fn airTrunc(f: *Function, inst: Air.Inst.Index) !CValue { try f.writeCValue(writer, operand, .FunctionArgument); try v.elem(f, writer); try writer.print(", {x})", .{ - try f.fmtIntLiteral(try inst_scalar_ty.maxIntScalar(zcu, scalar_ty)), + try f.fmtIntLiteral(try inst_scalar_ty.maxIntScalar(pt, scalar_ty)), }); }, .signed => { const c_bits = toCIntBits(scalar_int_info.bits) orelse return f.fail("TODO: C backend: implement integer types larger than 128 bits", .{}); - const shift_val = try zcu.intValue(Type.u8, c_bits - dest_bits); + const shift_val = try pt.intValue(Type.u8, c_bits - dest_bits); try writer.writeAll("zig_shr_"); try f.object.dg.renderTypeForBuiltinFnName(writer, scalar_ty); @@ -3860,7 +3874,8 @@ fn airIntFromBool(f: *Function, inst: Air.Inst.Index) !CValue { } fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; // *a = b; const bin_op = f.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; @@ -3871,7 +3886,7 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { const ptr_val = try f.resolveInst(bin_op.lhs); const src_ty = f.typeOf(bin_op.rhs); - const val_is_undef = if (try f.air.value(bin_op.rhs, zcu)) |v| v.isUndefDeep(zcu) else false; + const val_is_undef = if (try f.air.value(bin_op.rhs, pt)) |v| v.isUndefDeep(zcu) else false; if (val_is_undef) { try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); @@ -3887,10 +3902,10 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { } const is_aligned = if (ptr_info.flags.alignment != .none) - ptr_info.flags.alignment.order(src_ty.abiAlignment(zcu)).compare(.gte) + ptr_info.flags.alignment.order(src_ty.abiAlignment(pt)).compare(.gte) else true; - const is_array = lowersToArray(Type.fromInterned(ptr_info.child), zcu); + const is_array = lowersToArray(Type.fromInterned(ptr_info.child), pt); const need_memcpy = !is_aligned or is_array; const src_val = try f.resolveInst(bin_op.rhs); @@ -3901,7 +3916,7 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { if (need_memcpy) { // For this memcpy to safely work we need the rhs to have the same // underlying type as the lhs (i.e. they must both be arrays of the same underlying type). - assert(src_ty.eql(Type.fromInterned(ptr_info.child), f.object.dg.zcu)); + assert(src_ty.eql(Type.fromInterned(ptr_info.child), zcu)); // If the source is a constant, writeCValue will emit a brace initialization // so work around this by initializing into new local. @@ -3932,12 +3947,12 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { try v.end(f, inst, writer); } else if (ptr_info.packed_offset.host_size > 0 and ptr_info.flags.vector_index == .none) { const host_bits = ptr_info.packed_offset.host_size * 8; - const host_ty = try zcu.intType(.unsigned, host_bits); + const host_ty = try pt.intType(.unsigned, host_bits); - const bit_offset_ty = try zcu.intType(.unsigned, Type.smallestUnsignedBits(host_bits - 1)); - const bit_offset_val = try zcu.intValue(bit_offset_ty, ptr_info.packed_offset.bit_offset); + const bit_offset_ty = try pt.intType(.unsigned, Type.smallestUnsignedBits(host_bits - 1)); + const bit_offset_val = try pt.intValue(bit_offset_ty, ptr_info.packed_offset.bit_offset); - const src_bits = src_ty.bitSize(zcu); + const src_bits = src_ty.bitSize(pt); const ExpectedContents = [BigInt.Managed.default_capacity]BigIntLimb; var stack align(@alignOf(ExpectedContents)) = @@ -3950,7 +3965,7 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { try mask.shiftLeft(&mask, ptr_info.packed_offset.bit_offset); try mask.bitNotWrap(&mask, .unsigned, host_bits); - const mask_val = try zcu.intValue_big(host_ty, mask.toConst()); + const mask_val = try pt.intValue_big(host_ty, mask.toConst()); const v = try Vectorize.start(f, inst, writer, ptr_ty); const a = try Assignment.start(f, writer, src_scalar_ctype); @@ -3967,9 +3982,9 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { try writer.print(", {x}), zig_shl_", .{try f.fmtIntLiteral(mask_val)}); try f.object.dg.renderTypeForBuiltinFnName(writer, host_ty); try writer.writeByte('('); - const cant_cast = host_ty.isInt(zcu) and host_ty.bitSize(zcu) > 64; + const cant_cast = host_ty.isInt(zcu) and host_ty.bitSize(pt) > 64; if (cant_cast) { - if (src_ty.bitSize(zcu) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{}); + if (src_ty.bitSize(pt) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{}); try writer.writeAll("zig_make_"); try f.object.dg.renderTypeForBuiltinFnName(writer, host_ty); try writer.writeAll("(0, "); @@ -4013,7 +4028,8 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { } fn airOverflow(f: *Function, inst: Air.Inst.Index, operation: []const u8, info: BuiltinInfo) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; const ty_pl = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const bin_op = f.air.extraData(Air.Bin, ty_pl.payload).data; @@ -4051,7 +4067,8 @@ fn airOverflow(f: *Function, inst: Air.Inst.Index, operation: []const u8, info: } fn airNot(f: *Function, inst: Air.Inst.Index) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; const ty_op = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const operand_ty = f.typeOf(ty_op.operand); const scalar_ty = operand_ty.scalarType(zcu); @@ -4084,11 +4101,12 @@ fn airBinOp( operation: []const u8, info: BuiltinInfo, ) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; const bin_op = f.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const operand_ty = f.typeOf(bin_op.lhs); const scalar_ty = operand_ty.scalarType(zcu); - if ((scalar_ty.isInt(zcu) and scalar_ty.bitSize(zcu) > 64) or scalar_ty.isRuntimeFloat()) + if ((scalar_ty.isInt(zcu) and scalar_ty.bitSize(pt) > 64) or scalar_ty.isRuntimeFloat()) return try airBinBuiltinCall(f, inst, operation, info); const lhs = try f.resolveInst(bin_op.lhs); @@ -4122,11 +4140,12 @@ fn airCmpOp( data: anytype, operator: std.math.CompareOperator, ) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; const lhs_ty = f.typeOf(data.lhs); const scalar_ty = lhs_ty.scalarType(zcu); - const scalar_bits = scalar_ty.bitSize(zcu); + const scalar_bits = scalar_ty.bitSize(pt); if (scalar_ty.isInt(zcu) and scalar_bits > 64) return airCmpBuiltinCall( f, @@ -4170,12 +4189,13 @@ fn airEquality( inst: Air.Inst.Index, operator: std.math.CompareOperator, ) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; const ctype_pool = &f.object.dg.ctype_pool; const bin_op = f.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const operand_ty = f.typeOf(bin_op.lhs); - const operand_bits = operand_ty.bitSize(zcu); + const operand_bits = operand_ty.bitSize(pt); if (operand_ty.isAbiInt(zcu) and operand_bits > 64) return airCmpBuiltinCall( f, @@ -4256,7 +4276,8 @@ fn airCmpLtErrorsLen(f: *Function, inst: Air.Inst.Index) !CValue { } fn airPtrAddSub(f: *Function, inst: Air.Inst.Index, operator: u8) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; const ty_pl = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const bin_op = f.air.extraData(Air.Bin, ty_pl.payload).data; @@ -4267,7 +4288,7 @@ fn airPtrAddSub(f: *Function, inst: Air.Inst.Index, operator: u8) !CValue { const inst_ty = f.typeOfIndex(inst); const inst_scalar_ty = inst_ty.scalarType(zcu); const elem_ty = inst_scalar_ty.elemType2(zcu); - if (!elem_ty.hasRuntimeBitsIgnoreComptime(zcu)) return f.moveCValue(inst, inst_ty, lhs); + if (!elem_ty.hasRuntimeBitsIgnoreComptime(pt)) return f.moveCValue(inst, inst_ty, lhs); const inst_scalar_ctype = try f.ctypeFromType(inst_scalar_ty, .complete); const local = try f.allocLocal(inst, inst_ty); @@ -4299,13 +4320,14 @@ fn airPtrAddSub(f: *Function, inst: Air.Inst.Index, operator: u8) !CValue { } fn airMinMax(f: *Function, inst: Air.Inst.Index, operator: u8, operation: []const u8) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; const bin_op = f.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const inst_ty = f.typeOfIndex(inst); const inst_scalar_ty = inst_ty.scalarType(zcu); - if ((inst_scalar_ty.isInt(zcu) and inst_scalar_ty.bitSize(zcu) > 64) or inst_scalar_ty.isRuntimeFloat()) + if ((inst_scalar_ty.isInt(zcu) and inst_scalar_ty.bitSize(pt) > 64) or inst_scalar_ty.isRuntimeFloat()) return try airBinBuiltinCall(f, inst, operation, .none); const lhs = try f.resolveInst(bin_op.lhs); @@ -4339,7 +4361,8 @@ fn airMinMax(f: *Function, inst: Air.Inst.Index, operator: u8, operation: []cons } fn airSlice(f: *Function, inst: Air.Inst.Index) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; const ty_pl = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const bin_op = f.air.extraData(Air.Bin, ty_pl.payload).data; @@ -4374,7 +4397,8 @@ fn airCall( inst: Air.Inst.Index, modifier: std.builtin.CallModifier, ) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; // Not even allowed to call panic in a naked function. if (f.object.dg.is_naked_fn) return .none; @@ -4398,7 +4422,7 @@ fn airCall( if (!arg_ctype.eql(try f.ctypeFromType(arg_ty, .complete))) { const array_local = try f.allocAlignedLocal(inst, .{ .ctype = arg_ctype, - .alignas = CType.AlignAs.fromAbiAlignment(arg_ty.abiAlignment(zcu)), + .alignas = CType.AlignAs.fromAbiAlignment(arg_ty.abiAlignment(pt)), }); try writer.writeAll("memcpy("); try f.writeCValueMember(writer, array_local, .{ .identifier = "array" }); @@ -4445,7 +4469,7 @@ fn airCall( } else { const local = try f.allocAlignedLocal(inst, .{ .ctype = ret_ctype, - .alignas = CType.AlignAs.fromAbiAlignment(ret_ty.abiAlignment(zcu)), + .alignas = CType.AlignAs.fromAbiAlignment(ret_ty.abiAlignment(pt)), }); try f.writeCValue(writer, local, .Other); try writer.writeAll(" = "); @@ -4456,7 +4480,7 @@ fn airCall( callee: { known: { const fn_decl = fn_decl: { - const callee_val = (try f.air.value(pl_op.operand, zcu)) orelse break :known; + const callee_val = (try f.air.value(pl_op.operand, pt)) orelse break :known; break :fn_decl switch (zcu.intern_pool.indexToKey(callee_val.toIntern())) { .extern_func => |extern_func| extern_func.decl, .func => |func| func.owner_decl, @@ -4499,7 +4523,7 @@ fn airCall( try writer.writeAll(");\n"); const result = result: { - if (result_local == .none or !lowersToArray(ret_ty, zcu)) + if (result_local == .none or !lowersToArray(ret_ty, pt)) break :result result_local; const array_local = try f.allocLocal(inst, ret_ty); @@ -4533,7 +4557,8 @@ fn airDbgStmt(f: *Function, inst: Air.Inst.Index) !CValue { } fn airDbgInlineBlock(f: *Function, inst: Air.Inst.Index) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; const ty_pl = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = f.air.extraData(Air.DbgInlineBlock, ty_pl.payload); const owner_decl = zcu.funcOwnerDeclPtr(extra.data.func); @@ -4545,10 +4570,11 @@ fn airDbgInlineBlock(f: *Function, inst: Air.Inst.Index) !CValue { } fn airDbgVar(f: *Function, inst: Air.Inst.Index) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; const pl_op = f.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; const name = f.air.nullTerminatedString(pl_op.payload); - const operand_is_undef = if (try f.air.value(pl_op.operand, zcu)) |v| v.isUndefDeep(zcu) else false; + const operand_is_undef = if (try f.air.value(pl_op.operand, pt)) |v| v.isUndefDeep(zcu) else false; if (!operand_is_undef) _ = try f.resolveInst(pl_op.operand); try reap(f, inst, &.{pl_op.operand}); @@ -4564,7 +4590,8 @@ fn airBlock(f: *Function, inst: Air.Inst.Index) !CValue { } fn lowerBlock(f: *Function, inst: Air.Inst.Index, body: []const Air.Inst.Index) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; const liveness_block = f.liveness.getBlock(inst); const block_id: usize = f.next_block_index; @@ -4572,7 +4599,7 @@ fn lowerBlock(f: *Function, inst: Air.Inst.Index, body: []const Air.Inst.Index) const writer = f.object.writer(); const inst_ty = f.typeOfIndex(inst); - const result = if (inst_ty.hasRuntimeBitsIgnoreComptime(zcu) and !f.liveness.isUnused(inst)) + const result = if (inst_ty.hasRuntimeBitsIgnoreComptime(pt) and !f.liveness.isUnused(inst)) try f.allocLocal(inst, inst_ty) else .none; @@ -4611,7 +4638,8 @@ fn airTry(f: *Function, inst: Air.Inst.Index) !CValue { } fn airTryPtr(f: *Function, inst: Air.Inst.Index) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; const ty_pl = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = f.air.extraData(Air.TryPtr, ty_pl.payload); const body: []const Air.Inst.Index = @ptrCast(f.air.extra[extra.end..][0..extra.data.body_len]); @@ -4627,13 +4655,14 @@ fn lowerTry( err_union_ty: Type, is_ptr: bool, ) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; const err_union = try f.resolveInst(operand); const inst_ty = f.typeOfIndex(inst); const liveness_condbr = f.liveness.getCondBr(inst); const writer = f.object.writer(); const payload_ty = err_union_ty.errorUnionPayload(zcu); - const payload_has_bits = payload_ty.hasRuntimeBitsIgnoreComptime(zcu); + const payload_has_bits = payload_ty.hasRuntimeBitsIgnoreComptime(pt); if (!err_union_ty.errorUnionSet(zcu).errorSetIsEmpty(zcu)) { try writer.writeAll("if ("); @@ -4725,7 +4754,8 @@ fn airBitcast(f: *Function, inst: Air.Inst.Index) !CValue { } fn bitcast(f: *Function, dest_ty: Type, operand: CValue, operand_ty: Type) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; const target = &f.object.dg.mod.resolved_target.result; const ctype_pool = &f.object.dg.ctype_pool; const writer = f.object.writer(); @@ -4771,7 +4801,7 @@ fn bitcast(f: *Function, dest_ty: Type, operand: CValue, operand_ty: Type) !CVal try writer.writeAll(", sizeof("); try f.renderType( writer, - if (dest_ty.abiSize(zcu) <= operand_ty.abiSize(zcu)) dest_ty else operand_ty, + if (dest_ty.abiSize(pt) <= operand_ty.abiSize(pt)) dest_ty else operand_ty, ); try writer.writeAll("));\n"); @@ -4805,7 +4835,7 @@ fn bitcast(f: *Function, dest_ty: Type, operand: CValue, operand_ty: Type) !CVal try writer.writeByte('('); } try writer.writeAll("zig_wrap_"); - const info_ty = try zcu.intType(dest_info.signedness, bits); + const info_ty = try pt.intType(dest_info.signedness, bits); if (wrap_ctype) |ctype| try f.object.dg.renderCTypeForBuiltinFnName(writer, ctype) else @@ -4935,7 +4965,8 @@ fn airCondBr(f: *Function, inst: Air.Inst.Index) !CValue { } fn airSwitchBr(f: *Function, inst: Air.Inst.Index) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; const pl_op = f.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; const condition = try f.resolveInst(pl_op.operand); try reap(f, inst, &.{pl_op.operand}); @@ -4979,16 +5010,16 @@ fn airSwitchBr(f: *Function, inst: Air.Inst.Index) !CValue { for (items) |item| { try f.object.indent_writer.insertNewline(); try writer.writeAll("case "); - const item_value = try f.air.value(item, zcu); - if (item_value.?.getUnsignedInt(zcu)) |item_int| try writer.print("{}\n", .{ - try f.fmtIntLiteral(try zcu.intValue(lowered_condition_ty, item_int)), + const item_value = try f.air.value(item, pt); + if (item_value.?.getUnsignedInt(pt)) |item_int| try writer.print("{}\n", .{ + try f.fmtIntLiteral(try pt.intValue(lowered_condition_ty, item_int)), }) else { if (condition_ty.isPtrAtRuntime(zcu)) { try writer.writeByte('('); try f.renderType(writer, Type.usize); try writer.writeByte(')'); } - try f.object.dg.renderValue(writer, (try f.air.value(item, zcu)).?, .Other); + try f.object.dg.renderValue(writer, (try f.air.value(item, pt)).?, .Other); } try writer.writeByte(':'); } @@ -5026,13 +5057,14 @@ fn airSwitchBr(f: *Function, inst: Air.Inst.Index) !CValue { } fn asmInputNeedsLocal(f: *Function, constraint: []const u8, value: CValue) bool { - const target = &f.object.dg.mod.resolved_target.result; + const dg = f.object.dg; + const target = &dg.mod.resolved_target.result; return switch (constraint[0]) { '{' => true, 'i', 'r' => false, 'I' => !target.cpu.arch.isArmOrThumb(), else => switch (value) { - .constant => |val| switch (f.object.dg.zcu.intern_pool.indexToKey(val.toIntern())) { + .constant => |val| switch (dg.pt.zcu.intern_pool.indexToKey(val.toIntern())) { .ptr => |ptr| if (ptr.byte_offset == 0) switch (ptr.base_addr) { .decl => false, else => true, @@ -5045,7 +5077,8 @@ fn asmInputNeedsLocal(f: *Function, constraint: []const u8, value: CValue) bool } fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; const ty_pl = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = f.air.extraData(Air.Asm, ty_pl.payload); const is_volatile = @as(u1, @truncate(extra.data.flags >> 31)) != 0; @@ -5060,10 +5093,10 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue { const result = result: { const writer = f.object.writer(); const inst_ty = f.typeOfIndex(inst); - const inst_local = if (inst_ty.hasRuntimeBitsIgnoreComptime(zcu)) local: { + const inst_local = if (inst_ty.hasRuntimeBitsIgnoreComptime(pt)) local: { const inst_local = try f.allocLocalValue(.{ .ctype = try f.ctypeFromType(inst_ty, .complete), - .alignas = CType.AlignAs.fromAbiAlignment(inst_ty.abiAlignment(zcu)), + .alignas = CType.AlignAs.fromAbiAlignment(inst_ty.abiAlignment(pt)), }); if (f.wantSafety()) { try f.writeCValue(writer, inst_local, .Other); @@ -5096,7 +5129,7 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue { try writer.writeAll("register "); const output_local = try f.allocLocalValue(.{ .ctype = try f.ctypeFromType(output_ty, .complete), - .alignas = CType.AlignAs.fromAbiAlignment(output_ty.abiAlignment(zcu)), + .alignas = CType.AlignAs.fromAbiAlignment(output_ty.abiAlignment(pt)), }); try f.allocs.put(gpa, output_local.new_local, false); try f.object.dg.renderTypeAndName(writer, output_ty, output_local, .{}, .none, .complete); @@ -5131,7 +5164,7 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue { if (is_reg) try writer.writeAll("register "); const input_local = try f.allocLocalValue(.{ .ctype = try f.ctypeFromType(input_ty, .complete), - .alignas = CType.AlignAs.fromAbiAlignment(input_ty.abiAlignment(zcu)), + .alignas = CType.AlignAs.fromAbiAlignment(input_ty.abiAlignment(pt)), }); try f.allocs.put(gpa, input_local.new_local, false); try f.object.dg.renderTypeAndName(writer, input_ty, input_local, Const, .none, .complete); @@ -5314,7 +5347,8 @@ fn airIsNull( operator: std.math.CompareOperator, is_ptr: bool, ) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; const ctype_pool = &f.object.dg.ctype_pool; const un_op = f.air.instructions.items(.data)[@intFromEnum(inst)].un_op; @@ -5369,7 +5403,8 @@ fn airIsNull( } fn airOptionalPayload(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; const ctype_pool = &f.object.dg.ctype_pool; const ty_op = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; @@ -5404,7 +5439,8 @@ fn airOptionalPayload(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !CValue } fn airOptionalPayloadPtrSet(f: *Function, inst: Air.Inst.Index) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; const ty_op = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const writer = f.object.writer(); const operand = try f.resolveInst(ty_op.operand); @@ -5458,21 +5494,22 @@ fn fieldLocation( container_ptr_ty: Type, field_ptr_ty: Type, field_index: u32, - zcu: *Zcu, + pt: Zcu.PerThread, ) union(enum) { begin: void, field: CValue, byte_offset: u64, } { + const zcu = pt.zcu; const ip = &zcu.intern_pool; const container_ty = Type.fromInterned(ip.indexToKey(container_ptr_ty.toIntern()).ptr_type.child); switch (ip.indexToKey(container_ty.toIntern())) { .struct_type => { const loaded_struct = ip.loadStructType(container_ty.toIntern()); return switch (loaded_struct.layout) { - .auto, .@"extern" => if (!container_ty.hasRuntimeBitsIgnoreComptime(zcu)) + .auto, .@"extern" => if (!container_ty.hasRuntimeBitsIgnoreComptime(pt)) .begin - else if (!field_ptr_ty.childType(zcu).hasRuntimeBitsIgnoreComptime(zcu)) + else if (!field_ptr_ty.childType(zcu).hasRuntimeBitsIgnoreComptime(pt)) .{ .byte_offset = loaded_struct.offsets.get(ip)[field_index] } else .{ .field = if (loaded_struct.fieldName(ip, field_index).unwrap()) |field_name| @@ -5480,16 +5517,16 @@ fn fieldLocation( else .{ .field = field_index } }, .@"packed" => if (field_ptr_ty.ptrInfo(zcu).packed_offset.host_size == 0) - .{ .byte_offset = @divExact(zcu.structPackedFieldBitOffset(loaded_struct, field_index) + + .{ .byte_offset = @divExact(pt.structPackedFieldBitOffset(loaded_struct, field_index) + container_ptr_ty.ptrInfo(zcu).packed_offset.bit_offset, 8) } else .begin, }; }, - .anon_struct_type => |anon_struct_info| return if (!container_ty.hasRuntimeBitsIgnoreComptime(zcu)) + .anon_struct_type => |anon_struct_info| return if (!container_ty.hasRuntimeBitsIgnoreComptime(pt)) .begin - else if (!field_ptr_ty.childType(zcu).hasRuntimeBitsIgnoreComptime(zcu)) - .{ .byte_offset = container_ty.structFieldOffset(field_index, zcu) } + else if (!field_ptr_ty.childType(zcu).hasRuntimeBitsIgnoreComptime(pt)) + .{ .byte_offset = container_ty.structFieldOffset(field_index, pt) } else .{ .field = if (anon_struct_info.fieldName(ip, field_index).unwrap()) |field_name| .{ .identifier = field_name.toSlice(ip) } @@ -5500,8 +5537,8 @@ fn fieldLocation( switch (loaded_union.getLayout(ip)) { .auto, .@"extern" => { const field_ty = Type.fromInterned(loaded_union.field_types.get(ip)[field_index]); - if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) - return if (loaded_union.hasTag(ip) and !container_ty.unionHasAllZeroBitFieldTypes(zcu)) + if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) + return if (loaded_union.hasTag(ip) and !container_ty.unionHasAllZeroBitFieldTypes(pt)) .{ .field = .{ .identifier = "payload" } } else .begin; @@ -5546,7 +5583,8 @@ fn airStructFieldPtrIndex(f: *Function, inst: Air.Inst.Index, index: u8) !CValue } fn airFieldParentPtr(f: *Function, inst: Air.Inst.Index) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; const ty_pl = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = f.air.extraData(Air.FieldParentPtr, ty_pl.payload).data; @@ -5564,10 +5602,10 @@ fn airFieldParentPtr(f: *Function, inst: Air.Inst.Index) !CValue { try f.renderType(writer, container_ptr_ty); try writer.writeByte(')'); - switch (fieldLocation(container_ptr_ty, field_ptr_ty, extra.field_index, zcu)) { + switch (fieldLocation(container_ptr_ty, field_ptr_ty, extra.field_index, pt)) { .begin => try f.writeCValue(writer, field_ptr_val, .Initializer), .field => |field| { - const u8_ptr_ty = try zcu.adjustPtrTypeChild(field_ptr_ty, Type.u8); + const u8_ptr_ty = try pt.adjustPtrTypeChild(field_ptr_ty, Type.u8); try writer.writeAll("(("); try f.renderType(writer, u8_ptr_ty); @@ -5580,14 +5618,14 @@ fn airFieldParentPtr(f: *Function, inst: Air.Inst.Index) !CValue { try writer.writeAll("))"); }, .byte_offset => |byte_offset| { - const u8_ptr_ty = try zcu.adjustPtrTypeChild(field_ptr_ty, Type.u8); + const u8_ptr_ty = try pt.adjustPtrTypeChild(field_ptr_ty, Type.u8); try writer.writeAll("(("); try f.renderType(writer, u8_ptr_ty); try writer.writeByte(')'); try f.writeCValue(writer, field_ptr_val, .Other); try writer.print(" - {})", .{ - try f.fmtIntLiteral(try zcu.intValue(Type.usize, byte_offset)), + try f.fmtIntLiteral(try pt.intValue(Type.usize, byte_offset)), }); }, } @@ -5603,7 +5641,8 @@ fn fieldPtr( container_ptr_val: CValue, field_index: u32, ) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; const container_ty = container_ptr_ty.childType(zcu); const field_ptr_ty = f.typeOfIndex(inst); @@ -5617,21 +5656,21 @@ fn fieldPtr( try f.renderType(writer, field_ptr_ty); try writer.writeByte(')'); - switch (fieldLocation(container_ptr_ty, field_ptr_ty, field_index, zcu)) { + switch (fieldLocation(container_ptr_ty, field_ptr_ty, field_index, pt)) { .begin => try f.writeCValue(writer, container_ptr_val, .Initializer), .field => |field| { try writer.writeByte('&'); try f.writeCValueDerefMember(writer, container_ptr_val, field); }, .byte_offset => |byte_offset| { - const u8_ptr_ty = try zcu.adjustPtrTypeChild(field_ptr_ty, Type.u8); + const u8_ptr_ty = try pt.adjustPtrTypeChild(field_ptr_ty, Type.u8); try writer.writeAll("(("); try f.renderType(writer, u8_ptr_ty); try writer.writeByte(')'); try f.writeCValue(writer, container_ptr_val, .Other); try writer.print(" + {})", .{ - try f.fmtIntLiteral(try zcu.intValue(Type.usize, byte_offset)), + try f.fmtIntLiteral(try pt.intValue(Type.usize, byte_offset)), }); }, } @@ -5641,13 +5680,14 @@ fn fieldPtr( } fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; const ip = &zcu.intern_pool; const ty_pl = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = f.air.extraData(Air.StructField, ty_pl.payload).data; const inst_ty = f.typeOfIndex(inst); - if (!inst_ty.hasRuntimeBitsIgnoreComptime(zcu)) { + if (!inst_ty.hasRuntimeBitsIgnoreComptime(pt)) { try reap(f, inst, &.{extra.struct_operand}); return .none; } @@ -5671,15 +5711,15 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue { .@"packed" => { const int_info = struct_ty.intInfo(zcu); - const bit_offset_ty = try zcu.intType(.unsigned, Type.smallestUnsignedBits(int_info.bits - 1)); + const bit_offset_ty = try pt.intType(.unsigned, Type.smallestUnsignedBits(int_info.bits - 1)); - const bit_offset = zcu.structPackedFieldBitOffset(loaded_struct, extra.field_index); + const bit_offset = pt.structPackedFieldBitOffset(loaded_struct, extra.field_index); const field_int_signedness = if (inst_ty.isAbiInt(zcu)) inst_ty.intInfo(zcu).signedness else .unsigned; - const field_int_ty = try zcu.intType(field_int_signedness, @as(u16, @intCast(inst_ty.bitSize(zcu)))); + const field_int_ty = try pt.intType(field_int_signedness, @as(u16, @intCast(inst_ty.bitSize(pt)))); const temp_local = try f.allocLocal(inst, field_int_ty); try f.writeCValue(writer, temp_local, .Other); @@ -5690,7 +5730,7 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue { try writer.writeByte(')'); const cant_cast = int_info.bits > 64; if (cant_cast) { - if (field_int_ty.bitSize(zcu) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{}); + if (field_int_ty.bitSize(pt) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{}); try writer.writeAll("zig_lo_"); try f.object.dg.renderTypeForBuiltinFnName(writer, struct_ty); try writer.writeByte('('); @@ -5702,12 +5742,12 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue { } try f.writeCValue(writer, struct_byval, .Other); if (bit_offset > 0) try writer.print(", {})", .{ - try f.fmtIntLiteral(try zcu.intValue(bit_offset_ty, bit_offset)), + try f.fmtIntLiteral(try pt.intValue(bit_offset_ty, bit_offset)), }); if (cant_cast) try writer.writeByte(')'); try f.object.dg.renderBuiltinInfo(writer, field_int_ty, .bits); try writer.writeAll(");\n"); - if (inst_ty.eql(field_int_ty, f.object.dg.zcu)) return temp_local; + if (inst_ty.eql(field_int_ty, zcu)) return temp_local; const local = try f.allocLocal(inst, inst_ty); if (local.new_local != temp_local.new_local) { @@ -5783,7 +5823,8 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue { /// *(E!T) -> E /// Note that the result is never a pointer. fn airUnwrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; const ty_op = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const inst_ty = f.typeOfIndex(inst); @@ -5797,7 +5838,7 @@ fn airUnwrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue { const payload_ty = error_union_ty.errorUnionPayload(zcu); const local = try f.allocLocal(inst, inst_ty); - if (!payload_ty.hasRuntimeBits(zcu) and operand == .local and operand.local == local.new_local) { + if (!payload_ty.hasRuntimeBits(pt) and operand == .local and operand.local == local.new_local) { // The store will be 'x = x'; elide it. return local; } @@ -5806,11 +5847,11 @@ fn airUnwrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue { try f.writeCValue(writer, local, .Other); try writer.writeAll(" = "); - if (!payload_ty.hasRuntimeBits(zcu)) + if (!payload_ty.hasRuntimeBits(pt)) try f.writeCValue(writer, operand, .Other) else if (error_ty.errorSetIsEmpty(zcu)) try writer.print("{}", .{ - try f.fmtIntLiteral(try zcu.intValue(try zcu.errorIntType(), 0)), + try f.fmtIntLiteral(try pt.intValue(try pt.errorIntType(), 0)), }) else if (operand_is_ptr) try f.writeCValueDerefMember(writer, operand, .{ .identifier = "error" }) @@ -5821,7 +5862,8 @@ fn airUnwrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue { } fn airUnwrapErrUnionPay(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; const ty_op = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const inst_ty = f.typeOfIndex(inst); @@ -5831,7 +5873,7 @@ fn airUnwrapErrUnionPay(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !CValu const error_union_ty = if (is_ptr) operand_ty.childType(zcu) else operand_ty; const writer = f.object.writer(); - if (!error_union_ty.errorUnionPayload(zcu).hasRuntimeBits(zcu)) { + if (!error_union_ty.errorUnionPayload(zcu).hasRuntimeBits(pt)) { if (!is_ptr) return .none; const local = try f.allocLocal(inst, inst_ty); @@ -5896,12 +5938,13 @@ fn airWrapOptional(f: *Function, inst: Air.Inst.Index) !CValue { } fn airWrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; const ty_op = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const inst_ty = f.typeOfIndex(inst); const payload_ty = inst_ty.errorUnionPayload(zcu); - const repr_is_err = !payload_ty.hasRuntimeBitsIgnoreComptime(zcu); + const repr_is_err = !payload_ty.hasRuntimeBitsIgnoreComptime(pt); const err_ty = inst_ty.errorUnionSet(zcu); const err = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); @@ -5935,7 +5978,8 @@ fn airWrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue { } fn airErrUnionPayloadPtrSet(f: *Function, inst: Air.Inst.Index) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; const writer = f.object.writer(); const ty_op = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const inst_ty = f.typeOfIndex(inst); @@ -5944,12 +5988,12 @@ fn airErrUnionPayloadPtrSet(f: *Function, inst: Air.Inst.Index) !CValue { const error_union_ty = operand_ty.childType(zcu); const payload_ty = error_union_ty.errorUnionPayload(zcu); - const err_int_ty = try zcu.errorIntType(); - const no_err = try zcu.intValue(err_int_ty, 0); + const err_int_ty = try pt.errorIntType(); + const no_err = try pt.intValue(err_int_ty, 0); try reap(f, inst, &.{ty_op.operand}); // First, set the non-error value. - if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) { const a = try Assignment.start(f, writer, try f.ctypeFromType(operand_ty, .complete)); try f.writeCValueDeref(writer, operand); try a.assign(f, writer); @@ -5994,13 +6038,14 @@ fn airSaveErrReturnTraceIndex(f: *Function, inst: Air.Inst.Index) !CValue { } fn airWrapErrUnionPay(f: *Function, inst: Air.Inst.Index) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; const ty_op = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const inst_ty = f.typeOfIndex(inst); const payload_ty = inst_ty.errorUnionPayload(zcu); const payload = try f.resolveInst(ty_op.operand); - const repr_is_err = !payload_ty.hasRuntimeBitsIgnoreComptime(zcu); + const repr_is_err = !payload_ty.hasRuntimeBitsIgnoreComptime(pt); const err_ty = inst_ty.errorUnionSet(zcu); try reap(f, inst, &.{ty_op.operand}); @@ -6020,14 +6065,15 @@ fn airWrapErrUnionPay(f: *Function, inst: Air.Inst.Index) !CValue { else try f.writeCValueMember(writer, local, .{ .identifier = "error" }); try a.assign(f, writer); - try f.object.dg.renderValue(writer, try zcu.intValue(try zcu.errorIntType(), 0), .Other); + try f.object.dg.renderValue(writer, try pt.intValue(try pt.errorIntType(), 0), .Other); try a.end(f, writer); } return local; } fn airIsErr(f: *Function, inst: Air.Inst.Index, is_ptr: bool, operator: []const u8) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; const un_op = f.air.instructions.items(.data)[@intFromEnum(inst)].un_op; const writer = f.object.writer(); @@ -6042,9 +6088,9 @@ fn airIsErr(f: *Function, inst: Air.Inst.Index, is_ptr: bool, operator: []const const a = try Assignment.start(f, writer, CType.bool); try f.writeCValue(writer, local, .Other); try a.assign(f, writer); - const err_int_ty = try zcu.errorIntType(); + const err_int_ty = try pt.errorIntType(); if (!error_ty.errorSetIsEmpty(zcu)) - if (payload_ty.hasRuntimeBits(zcu)) + if (payload_ty.hasRuntimeBits(pt)) if (is_ptr) try f.writeCValueDerefMember(writer, operand, .{ .identifier = "error" }) else @@ -6052,17 +6098,18 @@ fn airIsErr(f: *Function, inst: Air.Inst.Index, is_ptr: bool, operator: []const else try f.writeCValue(writer, operand, .Other) else - try f.object.dg.renderValue(writer, try zcu.intValue(err_int_ty, 0), .Other); + try f.object.dg.renderValue(writer, try pt.intValue(err_int_ty, 0), .Other); try writer.writeByte(' '); try writer.writeAll(operator); try writer.writeByte(' '); - try f.object.dg.renderValue(writer, try zcu.intValue(err_int_ty, 0), .Other); + try f.object.dg.renderValue(writer, try pt.intValue(err_int_ty, 0), .Other); try a.end(f, writer); return local; } fn airArrayToSlice(f: *Function, inst: Air.Inst.Index) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; const ctype_pool = &f.object.dg.ctype_pool; const ty_op = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; @@ -6096,7 +6143,7 @@ fn airArrayToSlice(f: *Function, inst: Air.Inst.Index) !CValue { if (operand_child_ctype.info(ctype_pool) == .array) { try writer.writeByte('&'); try f.writeCValueDeref(writer, operand); - try writer.print("[{}]", .{try f.fmtIntLiteral(try zcu.intValue(Type.usize, 0))}); + try writer.print("[{}]", .{try f.fmtIntLiteral(try pt.intValue(Type.usize, 0))}); } else try f.writeCValue(writer, operand, .Initializer); } try a.end(f, writer); @@ -6106,7 +6153,7 @@ fn airArrayToSlice(f: *Function, inst: Air.Inst.Index) !CValue { try f.writeCValueMember(writer, local, .{ .identifier = "len" }); try a.assign(f, writer); try writer.print("{}", .{ - try f.fmtIntLiteral(try zcu.intValue(Type.usize, array_ty.arrayLen(zcu))), + try f.fmtIntLiteral(try pt.intValue(Type.usize, array_ty.arrayLen(zcu))), }); try a.end(f, writer); } @@ -6115,7 +6162,8 @@ fn airArrayToSlice(f: *Function, inst: Air.Inst.Index) !CValue { } fn airFloatCast(f: *Function, inst: Air.Inst.Index) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; const ty_op = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const inst_ty = f.typeOfIndex(inst); @@ -6165,7 +6213,8 @@ fn airFloatCast(f: *Function, inst: Air.Inst.Index) !CValue { } fn airIntFromPtr(f: *Function, inst: Air.Inst.Index) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; const un_op = f.air.instructions.items(.data)[@intFromEnum(inst)].un_op; const operand = try f.resolveInst(un_op); @@ -6194,7 +6243,8 @@ fn airUnBuiltinCall( operation: []const u8, info: BuiltinInfo, ) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; const operand = try f.resolveInst(operand_ref); try reap(f, inst, &.{operand_ref}); @@ -6237,7 +6287,8 @@ fn airBinBuiltinCall( operation: []const u8, info: BuiltinInfo, ) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; const bin_op = f.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const operand_ty = f.typeOf(bin_op.lhs); @@ -6292,7 +6343,8 @@ fn airCmpBuiltinCall( operation: enum { cmp, operator }, info: BuiltinInfo, ) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; const lhs = try f.resolveInst(data.lhs); const rhs = try f.resolveInst(data.rhs); try reap(f, inst, &.{ data.lhs, data.rhs }); @@ -6333,7 +6385,7 @@ fn airCmpBuiltinCall( try writer.writeByte(')'); if (!ref_ret) try writer.print("{s}{}", .{ compareOperatorC(operator), - try f.fmtIntLiteral(try zcu.intValue(Type.i32, 0)), + try f.fmtIntLiteral(try pt.intValue(Type.i32, 0)), }); try writer.writeAll(";\n"); try v.end(f, inst, writer); @@ -6342,7 +6394,8 @@ fn airCmpBuiltinCall( } fn airCmpxchg(f: *Function, inst: Air.Inst.Index, flavor: [*:0]const u8) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; const ty_pl = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = f.air.extraData(Air.Cmpxchg, ty_pl.payload).data; const inst_ty = f.typeOfIndex(inst); @@ -6358,7 +6411,7 @@ fn airCmpxchg(f: *Function, inst: Air.Inst.Index, flavor: [*:0]const u8) !CValue try reap(f, inst, &.{ extra.ptr, extra.expected_value, extra.new_value }); const repr_ty = if (ty.isRuntimeFloat()) - zcu.intType(.unsigned, @as(u16, @intCast(ty.abiSize(zcu) * 8))) catch unreachable + pt.intType(.unsigned, @as(u16, @intCast(ty.abiSize(pt) * 8))) catch unreachable else ty; @@ -6448,7 +6501,8 @@ fn airCmpxchg(f: *Function, inst: Air.Inst.Index, flavor: [*:0]const u8) !CValue } fn airAtomicRmw(f: *Function, inst: Air.Inst.Index) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; const pl_op = f.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; const extra = f.air.extraData(Air.AtomicRmw, pl_op.payload).data; const inst_ty = f.typeOfIndex(inst); @@ -6461,10 +6515,10 @@ fn airAtomicRmw(f: *Function, inst: Air.Inst.Index) !CValue { const operand_mat = try Materialize.start(f, inst, ty, operand); try reap(f, inst, &.{ pl_op.operand, extra.operand }); - const repr_bits = @as(u16, @intCast(ty.abiSize(zcu) * 8)); + const repr_bits = @as(u16, @intCast(ty.abiSize(pt) * 8)); const is_float = ty.isRuntimeFloat(); const is_128 = repr_bits == 128; - const repr_ty = if (is_float) zcu.intType(.unsigned, repr_bits) catch unreachable else ty; + const repr_ty = if (is_float) pt.intType(.unsigned, repr_bits) catch unreachable else ty; const local = try f.allocLocal(inst, inst_ty); try writer.print("zig_atomicrmw_{s}", .{toAtomicRmwSuffix(extra.op())}); @@ -6503,7 +6557,8 @@ fn airAtomicRmw(f: *Function, inst: Air.Inst.Index) !CValue { } fn airAtomicLoad(f: *Function, inst: Air.Inst.Index) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; const atomic_load = f.air.instructions.items(.data)[@intFromEnum(inst)].atomic_load; const ptr = try f.resolveInst(atomic_load.ptr); try reap(f, inst, &.{atomic_load.ptr}); @@ -6511,7 +6566,7 @@ fn airAtomicLoad(f: *Function, inst: Air.Inst.Index) !CValue { const ty = ptr_ty.childType(zcu); const repr_ty = if (ty.isRuntimeFloat()) - zcu.intType(.unsigned, @as(u16, @intCast(ty.abiSize(zcu) * 8))) catch unreachable + pt.intType(.unsigned, @as(u16, @intCast(ty.abiSize(pt) * 8))) catch unreachable else ty; @@ -6539,7 +6594,8 @@ fn airAtomicLoad(f: *Function, inst: Air.Inst.Index) !CValue { } fn airAtomicStore(f: *Function, inst: Air.Inst.Index, order: [*:0]const u8) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; const bin_op = f.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const ptr_ty = f.typeOf(bin_op.lhs); const ty = ptr_ty.childType(zcu); @@ -6551,7 +6607,7 @@ fn airAtomicStore(f: *Function, inst: Air.Inst.Index, order: [*:0]const u8) !CVa try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); const repr_ty = if (ty.isRuntimeFloat()) - zcu.intType(.unsigned, @as(u16, @intCast(ty.abiSize(zcu) * 8))) catch unreachable + pt.intType(.unsigned, @as(u16, @intCast(ty.abiSize(pt) * 8))) catch unreachable else ty; @@ -6574,7 +6630,8 @@ fn airAtomicStore(f: *Function, inst: Air.Inst.Index, order: [*:0]const u8) !CVa } fn writeSliceOrPtr(f: *Function, writer: anytype, ptr: CValue, ptr_ty: Type) !void { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; if (ptr_ty.isSlice(zcu)) { try f.writeCValueMember(writer, ptr, .{ .identifier = "ptr" }); } else { @@ -6583,14 +6640,15 @@ fn writeSliceOrPtr(f: *Function, writer: anytype, ptr: CValue, ptr_ty: Type) !vo } fn airMemset(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; const bin_op = f.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const dest_ty = f.typeOf(bin_op.lhs); const dest_slice = try f.resolveInst(bin_op.lhs); const value = try f.resolveInst(bin_op.rhs); const elem_ty = f.typeOf(bin_op.rhs); - const elem_abi_size = elem_ty.abiSize(zcu); - const val_is_undef = if (try f.air.value(bin_op.rhs, zcu)) |val| val.isUndefDeep(zcu) else false; + const elem_abi_size = elem_ty.abiSize(pt); + const val_is_undef = if (try f.air.value(bin_op.rhs, pt)) |val| val.isUndefDeep(zcu) else false; const writer = f.object.writer(); if (val_is_undef) { @@ -6628,7 +6686,7 @@ fn airMemset(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { // For the assignment in this loop, the array pointer needs to get // casted to a regular pointer, otherwise an error like this occurs: // error: array type 'uint32_t[20]' (aka 'unsigned int[20]') is not assignable - const elem_ptr_ty = try zcu.ptrType(.{ + const elem_ptr_ty = try pt.ptrType(.{ .child = elem_ty.toIntern(), .flags = .{ .size = .C, @@ -6640,7 +6698,7 @@ fn airMemset(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { try writer.writeAll("for ("); try f.writeCValue(writer, index, .Other); try writer.writeAll(" = "); - try f.object.dg.renderValue(writer, try zcu.intValue(Type.usize, 0), .Initializer); + try f.object.dg.renderValue(writer, try pt.intValue(Type.usize, 0), .Initializer); try writer.writeAll("; "); try f.writeCValue(writer, index, .Other); try writer.writeAll(" != "); @@ -6705,7 +6763,8 @@ fn airMemset(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { } fn airMemcpy(f: *Function, inst: Air.Inst.Index) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; const bin_op = f.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const dest_ptr = try f.resolveInst(bin_op.lhs); const src_ptr = try f.resolveInst(bin_op.rhs); @@ -6733,10 +6792,11 @@ fn airMemcpy(f: *Function, inst: Air.Inst.Index) !CValue { } fn writeArrayLen(f: *Function, writer: ArrayListWriter, dest_ptr: CValue, dest_ty: Type) !void { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; switch (dest_ty.ptrSize(zcu)) { .One => try writer.print("{}", .{ - try f.fmtIntLiteral(try zcu.intValue(Type.usize, dest_ty.childType(zcu).arrayLen(zcu))), + try f.fmtIntLiteral(try pt.intValue(Type.usize, dest_ty.childType(zcu).arrayLen(zcu))), }), .Many, .C => unreachable, .Slice => try f.writeCValueMember(writer, dest_ptr, .{ .identifier = "len" }), @@ -6744,14 +6804,15 @@ fn writeArrayLen(f: *Function, writer: ArrayListWriter, dest_ptr: CValue, dest_t } fn airSetUnionTag(f: *Function, inst: Air.Inst.Index) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; const bin_op = f.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const union_ptr = try f.resolveInst(bin_op.lhs); const new_tag = try f.resolveInst(bin_op.rhs); try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); const union_ty = f.typeOf(bin_op.lhs).childType(zcu); - const layout = union_ty.unionGetLayout(zcu); + const layout = union_ty.unionGetLayout(pt); if (layout.tag_size == 0) return .none; const tag_ty = union_ty.unionTagTypeSafety(zcu).?; @@ -6765,14 +6826,14 @@ fn airSetUnionTag(f: *Function, inst: Air.Inst.Index) !CValue { } fn airGetUnionTag(f: *Function, inst: Air.Inst.Index) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; const ty_op = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const operand = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); const union_ty = f.typeOf(ty_op.operand); - const layout = union_ty.unionGetLayout(zcu); + const layout = union_ty.unionGetLayout(pt); if (layout.tag_size == 0) return .none; const inst_ty = f.typeOfIndex(inst); @@ -6787,7 +6848,8 @@ fn airGetUnionTag(f: *Function, inst: Air.Inst.Index) !CValue { } fn airTagName(f: *Function, inst: Air.Inst.Index) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; const un_op = f.air.instructions.items(.data)[@intFromEnum(inst)].un_op; const inst_ty = f.typeOfIndex(inst); @@ -6824,7 +6886,8 @@ fn airErrorName(f: *Function, inst: Air.Inst.Index) !CValue { } fn airSplat(f: *Function, inst: Air.Inst.Index) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; const ty_op = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const operand = try f.resolveInst(ty_op.operand); @@ -6879,7 +6942,7 @@ fn airSelect(f: *Function, inst: Air.Inst.Index) !CValue { } fn airShuffle(f: *Function, inst: Air.Inst.Index) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; const ty_pl = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = f.air.extraData(Air.Shuffle, ty_pl.payload).data; @@ -6895,11 +6958,11 @@ fn airShuffle(f: *Function, inst: Air.Inst.Index) !CValue { for (0..extra.mask_len) |index| { try f.writeCValue(writer, local, .Other); try writer.writeByte('['); - try f.object.dg.renderValue(writer, try zcu.intValue(Type.usize, index), .Other); + try f.object.dg.renderValue(writer, try pt.intValue(Type.usize, index), .Other); try writer.writeAll("] = "); - const mask_elem = (try mask.elemValue(zcu, index)).toSignedInt(zcu); - const src_val = try zcu.intValue(Type.usize, @as(u64, @intCast(mask_elem ^ mask_elem >> 63))); + const mask_elem = (try mask.elemValue(pt, index)).toSignedInt(pt); + const src_val = try pt.intValue(Type.usize, @as(u64, @intCast(mask_elem ^ mask_elem >> 63))); try f.writeCValue(writer, if (mask_elem >= 0) lhs else rhs, .Other); try writer.writeByte('['); @@ -6911,7 +6974,8 @@ fn airShuffle(f: *Function, inst: Air.Inst.Index) !CValue { } fn airReduce(f: *Function, inst: Air.Inst.Index) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; const reduce = f.air.instructions.items(.data)[@intFromEnum(inst)].reduce; const scalar_ty = f.typeOfIndex(inst); @@ -6920,7 +6984,7 @@ fn airReduce(f: *Function, inst: Air.Inst.Index) !CValue { const operand_ty = f.typeOf(reduce.operand); const writer = f.object.writer(); - const use_operator = scalar_ty.bitSize(zcu) <= 64; + const use_operator = scalar_ty.bitSize(pt) <= 64; const op: union(enum) { const Func = struct { operation: []const u8, info: BuiltinInfo = .none }; builtin: Func, @@ -6971,37 +7035,37 @@ fn airReduce(f: *Function, inst: Air.Inst.Index) !CValue { try f.object.dg.renderValue(writer, switch (reduce.operation) { .Or, .Xor => switch (scalar_ty.zigTypeTag(zcu)) { .Bool => Value.false, - .Int => try zcu.intValue(scalar_ty, 0), + .Int => try pt.intValue(scalar_ty, 0), else => unreachable, }, .And => switch (scalar_ty.zigTypeTag(zcu)) { .Bool => Value.true, .Int => switch (scalar_ty.intInfo(zcu).signedness) { - .unsigned => try scalar_ty.maxIntScalar(zcu, scalar_ty), - .signed => try zcu.intValue(scalar_ty, -1), + .unsigned => try scalar_ty.maxIntScalar(pt, scalar_ty), + .signed => try pt.intValue(scalar_ty, -1), }, else => unreachable, }, .Add => switch (scalar_ty.zigTypeTag(zcu)) { - .Int => try zcu.intValue(scalar_ty, 0), - .Float => try zcu.floatValue(scalar_ty, 0.0), + .Int => try pt.intValue(scalar_ty, 0), + .Float => try pt.floatValue(scalar_ty, 0.0), else => unreachable, }, .Mul => switch (scalar_ty.zigTypeTag(zcu)) { - .Int => try zcu.intValue(scalar_ty, 1), - .Float => try zcu.floatValue(scalar_ty, 1.0), + .Int => try pt.intValue(scalar_ty, 1), + .Float => try pt.floatValue(scalar_ty, 1.0), else => unreachable, }, .Min => switch (scalar_ty.zigTypeTag(zcu)) { .Bool => Value.true, - .Int => try scalar_ty.maxIntScalar(zcu, scalar_ty), - .Float => try zcu.floatValue(scalar_ty, std.math.nan(f128)), + .Int => try scalar_ty.maxIntScalar(pt, scalar_ty), + .Float => try pt.floatValue(scalar_ty, std.math.nan(f128)), else => unreachable, }, .Max => switch (scalar_ty.zigTypeTag(zcu)) { .Bool => Value.false, - .Int => try scalar_ty.minIntScalar(zcu, scalar_ty), - .Float => try zcu.floatValue(scalar_ty, std.math.nan(f128)), + .Int => try scalar_ty.minIntScalar(pt, scalar_ty), + .Float => try pt.floatValue(scalar_ty, std.math.nan(f128)), else => unreachable, }, }, .Initializer); @@ -7046,7 +7110,8 @@ fn airReduce(f: *Function, inst: Air.Inst.Index) !CValue { } fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; const ip = &zcu.intern_pool; const ty_pl = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const inst_ty = f.typeOfIndex(inst); @@ -7096,7 +7161,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { var field_it = loaded_struct.iterateRuntimeOrder(ip); while (field_it.next()) |field_index| { const field_ty = Type.fromInterned(loaded_struct.field_types.get(ip)[field_index]); - if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue; + if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) continue; const a = try Assignment.start(f, writer, try f.ctypeFromType(field_ty, .complete)); try f.writeCValueMember(writer, local, if (loaded_struct.fieldName(ip, field_index).unwrap()) |field_name| @@ -7113,7 +7178,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { try writer.writeAll(" = "); const int_info = inst_ty.intInfo(zcu); - const bit_offset_ty = try zcu.intType(.unsigned, Type.smallestUnsignedBits(int_info.bits - 1)); + const bit_offset_ty = try pt.intType(.unsigned, Type.smallestUnsignedBits(int_info.bits - 1)); var bit_offset: u64 = 0; @@ -7121,7 +7186,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { for (0..elements.len) |field_index| { if (inst_ty.structFieldIsComptime(field_index, zcu)) continue; const field_ty = inst_ty.structFieldType(field_index, zcu); - if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue; + if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) continue; if (!empty) { try writer.writeAll("zig_or_"); @@ -7134,7 +7199,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { for (resolved_elements, 0..) |element, field_index| { if (inst_ty.structFieldIsComptime(field_index, zcu)) continue; const field_ty = inst_ty.structFieldType(field_index, zcu); - if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue; + if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) continue; if (!empty) try writer.writeAll(", "); // TODO: Skip this entire shift if val is 0? @@ -7160,13 +7225,13 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { } try writer.print(", {}", .{ - try f.fmtIntLiteral(try zcu.intValue(bit_offset_ty, bit_offset)), + try f.fmtIntLiteral(try pt.intValue(bit_offset_ty, bit_offset)), }); try f.object.dg.renderBuiltinInfo(writer, inst_ty, .bits); try writer.writeByte(')'); if (!empty) try writer.writeByte(')'); - bit_offset += field_ty.bitSize(zcu); + bit_offset += field_ty.bitSize(pt); empty = false; } try writer.writeAll(";\n"); @@ -7176,7 +7241,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { .anon_struct_type => |anon_struct_info| for (0..anon_struct_info.types.len) |field_index| { if (anon_struct_info.values.get(ip)[field_index] != .none) continue; const field_ty = Type.fromInterned(anon_struct_info.types.get(ip)[field_index]); - if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue; + if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) continue; const a = try Assignment.start(f, writer, try f.ctypeFromType(field_ty, .complete)); try f.writeCValueMember(writer, local, if (anon_struct_info.fieldName(ip, field_index).unwrap()) |field_name| @@ -7194,7 +7259,8 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { } fn airUnionInit(f: *Function, inst: Air.Inst.Index) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; const ip = &zcu.intern_pool; const ty_pl = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = f.air.extraData(Air.UnionInit, ty_pl.payload).data; @@ -7211,15 +7277,15 @@ fn airUnionInit(f: *Function, inst: Air.Inst.Index) !CValue { if (loaded_union.getLayout(ip) == .@"packed") return f.moveCValue(inst, union_ty, payload); const field: CValue = if (union_ty.unionTagTypeSafety(zcu)) |tag_ty| field: { - const layout = union_ty.unionGetLayout(zcu); + const layout = union_ty.unionGetLayout(pt); if (layout.tag_size != 0) { const field_index = tag_ty.enumFieldIndex(field_name, zcu).?; - const tag_val = try zcu.enumValueFieldIndex(tag_ty, field_index); + const tag_val = try pt.enumValueFieldIndex(tag_ty, field_index); const a = try Assignment.start(f, writer, try f.ctypeFromType(tag_ty, .complete)); try f.writeCValueMember(writer, local, .{ .identifier = "tag" }); try a.assign(f, writer); - try writer.print("{}", .{try f.fmtIntLiteral(try tag_val.intFromEnum(tag_ty, zcu))}); + try writer.print("{}", .{try f.fmtIntLiteral(try tag_val.intFromEnum(tag_ty, pt))}); try a.end(f, writer); } break :field .{ .payload_identifier = field_name.toSlice(ip) }; @@ -7234,7 +7300,8 @@ fn airUnionInit(f: *Function, inst: Air.Inst.Index) !CValue { } fn airPrefetch(f: *Function, inst: Air.Inst.Index) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; const prefetch = f.air.instructions.items(.data)[@intFromEnum(inst)].prefetch; const ptr_ty = f.typeOf(prefetch.ptr); @@ -7291,7 +7358,8 @@ fn airWasmMemoryGrow(f: *Function, inst: Air.Inst.Index) !CValue { } fn airMulAdd(f: *Function, inst: Air.Inst.Index) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; const pl_op = f.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; const bin_op = f.air.extraData(Air.Bin, pl_op.payload).data; @@ -7326,7 +7394,8 @@ fn airMulAdd(f: *Function, inst: Air.Inst.Index) !CValue { } fn airCVaStart(f: *Function, inst: Air.Inst.Index) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; const inst_ty = f.typeOfIndex(inst); const decl_index = f.object.dg.pass.decl; const decl = zcu.declPtr(decl_index); @@ -7699,7 +7768,8 @@ fn formatIntLiteral( options: std.fmt.FormatOptions, writer: anytype, ) @TypeOf(writer).Error!void { - const zcu = data.dg.zcu; + const pt = data.dg.pt; + const zcu = pt.zcu; const target = &data.dg.mod.resolved_target.result; const ctype_pool = &data.dg.ctype_pool; @@ -7732,7 +7802,7 @@ fn formatIntLiteral( }; undef_int.truncate(undef_int.toConst(), data.int_info.signedness, data.int_info.bits); break :blk undef_int.toConst(); - } else data.val.toBigInt(&int_buf, zcu); + } else data.val.toBigInt(&int_buf, pt); assert(int.fitsInTwosComp(data.int_info.signedness, data.int_info.bits)); const c_bits: usize = @intCast(data.ctype.byteSize(ctype_pool, data.dg.mod) * 8); @@ -7866,7 +7936,7 @@ fn formatIntLiteral( .int_info = c_limb_int_info, .kind = data.kind, .ctype = c_limb_ctype, - .val = try zcu.intValue_big(Type.comptime_int, c_limb_mut.toConst()), + .val = try pt.intValue_big(Type.comptime_int, c_limb_mut.toConst()), }, fmt, options, writer); } } @@ -7940,17 +8010,18 @@ const Vectorize = struct { index: CValue = .none, pub fn start(f: *Function, inst: Air.Inst.Index, writer: anytype, ty: Type) !Vectorize { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; return if (ty.zigTypeTag(zcu) == .Vector) index: { const local = try f.allocLocal(inst, Type.usize); try writer.writeAll("for ("); try f.writeCValue(writer, local, .Other); - try writer.print(" = {d}; ", .{try f.fmtIntLiteral(try zcu.intValue(Type.usize, 0))}); + try writer.print(" = {d}; ", .{try f.fmtIntLiteral(try pt.intValue(Type.usize, 0))}); try f.writeCValue(writer, local, .Other); - try writer.print(" < {d}; ", .{try f.fmtIntLiteral(try zcu.intValue(Type.usize, ty.vectorLen(zcu)))}); + try writer.print(" < {d}; ", .{try f.fmtIntLiteral(try pt.intValue(Type.usize, ty.vectorLen(zcu)))}); try f.writeCValue(writer, local, .Other); - try writer.print(" += {d}) {{\n", .{try f.fmtIntLiteral(try zcu.intValue(Type.usize, 1))}); + try writer.print(" += {d}) {{\n", .{try f.fmtIntLiteral(try pt.intValue(Type.usize, 1))}); f.object.indent_writer.pushIndent(); break :index .{ .index = local }; @@ -7974,10 +8045,10 @@ const Vectorize = struct { } }; -fn lowersToArray(ty: Type, zcu: *Zcu) bool { - return switch (ty.zigTypeTag(zcu)) { +fn lowersToArray(ty: Type, pt: Zcu.PerThread) bool { + return switch (ty.zigTypeTag(pt.zcu)) { .Array, .Vector => return true, - else => return ty.isAbiInt(zcu) and toCIntBits(@as(u32, @intCast(ty.bitSize(zcu)))) == null, + else => return ty.isAbiInt(pt.zcu) and toCIntBits(@as(u32, @intCast(ty.bitSize(pt)))) == null, }; } diff --git a/src/codegen/c/Type.zig b/src/codegen/c/Type.zig index 0a0d84f06105..6d98aaafcb12 100644 --- a/src/codegen/c/Type.zig +++ b/src/codegen/c/Type.zig @@ -1339,11 +1339,11 @@ pub const Pool = struct { allocator: std.mem.Allocator, scratch: *std.ArrayListUnmanaged(u32), ty: Type, - zcu: *Zcu, + pt: Zcu.PerThread, mod: *Module, kind: Kind, ) !CType { - const ip = &zcu.intern_pool; + const ip = &pt.zcu.intern_pool; switch (ty.toIntern()) { .u0_type, .i0_type, @@ -1400,7 +1400,7 @@ pub const Pool = struct { allocator, scratch, Type.fromInterned(ip.loadEnumType(ip_index).tag_ty), - zcu, + pt, mod, kind, ), @@ -1409,7 +1409,7 @@ pub const Pool = struct { .adhoc_inferred_error_set_type, => return pool.fromIntInfo(allocator, .{ .signedness = .unsigned, - .bits = zcu.errorSetBits(), + .bits = pt.zcu.errorSetBits(), }, mod, kind), .manyptr_u8_type, => return pool.getPointer(allocator, .{ @@ -1492,13 +1492,13 @@ pub const Pool = struct { allocator, scratch, Type.fromInterned(ptr_info.child), - zcu, + pt, mod, .forward, ), .alignas = AlignAs.fromAlignment(.{ .@"align" = ptr_info.flags.alignment, - .abi = Type.fromInterned(ptr_info.child).abiAlignment(zcu), + .abi = Type.fromInterned(ptr_info.child).abiAlignment(pt), }), }; break :elem_ctype if (elem.alignas.abiOrder().compare(.gte)) @@ -1535,7 +1535,7 @@ pub const Pool = struct { allocator, scratch, Type.fromInterned(ip.slicePtrType(ip_index)), - zcu, + pt, mod, kind, ), @@ -1560,7 +1560,7 @@ pub const Pool = struct { allocator, scratch, elem_type, - zcu, + pt, mod, kind.noParameter(), ); @@ -1574,7 +1574,7 @@ pub const Pool = struct { .{ .name = .{ .index = .array }, .ctype = array_ctype, - .alignas = AlignAs.fromAbiAlignment(elem_type.abiAlignment(zcu)), + .alignas = AlignAs.fromAbiAlignment(elem_type.abiAlignment(pt)), }, }; return pool.fromFields(allocator, .@"struct", &fields, kind); @@ -1586,7 +1586,7 @@ pub const Pool = struct { allocator, scratch, elem_type, - zcu, + pt, mod, kind.noParameter(), ); @@ -1600,7 +1600,7 @@ pub const Pool = struct { .{ .name = .{ .index = .array }, .ctype = vector_ctype, - .alignas = AlignAs.fromAbiAlignment(elem_type.abiAlignment(zcu)), + .alignas = AlignAs.fromAbiAlignment(elem_type.abiAlignment(pt)), }, }; return pool.fromFields(allocator, .@"struct", &fields, kind); @@ -1611,7 +1611,7 @@ pub const Pool = struct { allocator, scratch, Type.fromInterned(payload_type), - zcu, + pt, mod, kind.noParameter(), ); @@ -1635,7 +1635,7 @@ pub const Pool = struct { .name = .{ .index = .payload }, .ctype = payload_ctype, .alignas = AlignAs.fromAbiAlignment( - Type.fromInterned(payload_type).abiAlignment(zcu), + Type.fromInterned(payload_type).abiAlignment(pt), ), }, }; @@ -1643,7 +1643,7 @@ pub const Pool = struct { }, .anyframe_type => unreachable, .error_union_type => |error_union_info| { - const error_set_bits = zcu.errorSetBits(); + const error_set_bits = pt.zcu.errorSetBits(); const error_set_ctype = try pool.fromIntInfo(allocator, .{ .signedness = .unsigned, .bits = error_set_bits, @@ -1654,7 +1654,7 @@ pub const Pool = struct { allocator, scratch, payload_type, - zcu, + pt, mod, kind.noParameter(), ); @@ -1671,7 +1671,7 @@ pub const Pool = struct { .{ .name = .{ .index = .payload }, .ctype = payload_ctype, - .alignas = AlignAs.fromAbiAlignment(payload_type.abiAlignment(zcu)), + .alignas = AlignAs.fromAbiAlignment(payload_type.abiAlignment(pt)), }, }; return pool.fromFields(allocator, .@"struct", &fields, kind); @@ -1685,7 +1685,7 @@ pub const Pool = struct { .tag = .@"struct", .name = .{ .owner_decl = loaded_struct.decl.unwrap().? }, }); - if (kind.isForward()) return if (ty.hasRuntimeBitsIgnoreComptime(zcu)) + if (kind.isForward()) return if (ty.hasRuntimeBitsIgnoreComptime(pt)) fwd_decl else CType.void; @@ -1706,7 +1706,7 @@ pub const Pool = struct { allocator, scratch, field_type, - zcu, + pt, mod, kind.noParameter(), ); @@ -1718,7 +1718,7 @@ pub const Pool = struct { String.fromUnnamed(@intCast(field_index)); const field_alignas = AlignAs.fromAlignment(.{ .@"align" = loaded_struct.fieldAlign(ip, field_index), - .abi = field_type.abiAlignment(zcu), + .abi = field_type.abiAlignment(pt), }); pool.addHashedExtraAssumeCapacityTo(scratch, &hasher, Field, .{ .name = field_name.index, @@ -1745,7 +1745,7 @@ pub const Pool = struct { allocator, scratch, Type.fromInterned(loaded_struct.backingIntType(ip).*), - zcu, + pt, mod, kind, ), @@ -1766,7 +1766,7 @@ pub const Pool = struct { allocator, scratch, field_type, - zcu, + pt, mod, kind.noParameter(), ); @@ -1780,7 +1780,7 @@ pub const Pool = struct { .name = field_name.index, .ctype = field_ctype.index, .flags = .{ .alignas = AlignAs.fromAbiAlignment( - field_type.abiAlignment(zcu), + field_type.abiAlignment(pt), ) }, }); } @@ -1806,7 +1806,7 @@ pub const Pool = struct { extra_index, ); } - const fwd_decl = try pool.fromType(allocator, scratch, ty, zcu, mod, .forward); + const fwd_decl = try pool.fromType(allocator, scratch, ty, pt, mod, .forward); try pool.ensureUnusedCapacity(allocator, 1); const extra_index = try pool.addHashedExtra(allocator, &hasher, Aggregate, .{ .fwd_decl = fwd_decl.index, @@ -1824,7 +1824,7 @@ pub const Pool = struct { .tag = if (has_tag) .@"struct" else .@"union", .name = .{ .owner_decl = loaded_union.decl }, }); - if (kind.isForward()) return if (ty.hasRuntimeBitsIgnoreComptime(zcu)) + if (kind.isForward()) return if (ty.hasRuntimeBitsIgnoreComptime(pt)) fwd_decl else CType.void; @@ -1847,7 +1847,7 @@ pub const Pool = struct { allocator, scratch, field_type, - zcu, + pt, mod, kind.noParameter(), ); @@ -1858,7 +1858,7 @@ pub const Pool = struct { ); const field_alignas = AlignAs.fromAlignment(.{ .@"align" = loaded_union.fieldAlign(ip, field_index), - .abi = field_type.abiAlignment(zcu), + .abi = field_type.abiAlignment(pt), }); pool.addHashedExtraAssumeCapacityTo(scratch, &hasher, Field, .{ .name = field_name.index, @@ -1895,7 +1895,7 @@ pub const Pool = struct { allocator, scratch, tag_type, - zcu, + pt, mod, kind.noParameter(), ); @@ -1903,7 +1903,7 @@ pub const Pool = struct { struct_fields[struct_fields_len] = .{ .name = .{ .index = .tag }, .ctype = tag_ctype, - .alignas = AlignAs.fromAbiAlignment(tag_type.abiAlignment(zcu)), + .alignas = AlignAs.fromAbiAlignment(tag_type.abiAlignment(pt)), }; struct_fields_len += 1; } @@ -1951,7 +1951,7 @@ pub const Pool = struct { }, .@"packed" => return pool.fromIntInfo(allocator, .{ .signedness = .unsigned, - .bits = @intCast(ty.bitSize(zcu)), + .bits = @intCast(ty.bitSize(pt)), }, mod, kind), } }, @@ -1960,7 +1960,7 @@ pub const Pool = struct { allocator, scratch, Type.fromInterned(ip.loadEnumType(ip_index).tag_ty), - zcu, + pt, mod, kind, ), @@ -1975,7 +1975,7 @@ pub const Pool = struct { allocator, scratch, return_type, - zcu, + pt, mod, kind.asParameter(), ) else CType.void; @@ -1987,7 +1987,7 @@ pub const Pool = struct { allocator, scratch, param_type, - zcu, + pt, mod, kind.asParameter(), ); @@ -2011,7 +2011,7 @@ pub const Pool = struct { .inferred_error_set_type, => return pool.fromIntInfo(allocator, .{ .signedness = .unsigned, - .bits = zcu.errorSetBits(), + .bits = pt.zcu.errorSetBits(), }, mod, kind), .undef, diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 6efef20f22ba..ca574070bf73 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -15,8 +15,6 @@ const link = @import("../link.zig"); const Compilation = @import("../Compilation.zig"); const build_options = @import("build_options"); const Zcu = @import("../Zcu.zig"); -/// Deprecated. -const Module = Zcu; const InternPool = @import("../InternPool.zig"); const Package = @import("../Package.zig"); const Air = @import("../Air.zig"); @@ -810,7 +808,7 @@ pub const Object = struct { gpa: Allocator, builder: Builder, - module: *Module, + pt: Zcu.PerThread, debug_compile_unit: Builder.Metadata, @@ -820,7 +818,7 @@ pub const Object = struct { debug_enums: std.ArrayListUnmanaged(Builder.Metadata), debug_globals: std.ArrayListUnmanaged(Builder.Metadata), - debug_file_map: std.AutoHashMapUnmanaged(*const Module.File, Builder.Metadata), + debug_file_map: std.AutoHashMapUnmanaged(*const Zcu.File, Builder.Metadata), debug_type_map: std.AutoHashMapUnmanaged(Type, Builder.Metadata), debug_unresolved_namespace_scopes: std.AutoArrayHashMapUnmanaged(InternPool.NamespaceIndex, Builder.Metadata), @@ -992,7 +990,10 @@ pub const Object = struct { obj.* = .{ .gpa = gpa, .builder = builder, - .module = comp.module.?, + .pt = .{ + .zcu = comp.module.?, + .tid = .main, + }, .debug_compile_unit = debug_compile_unit, .debug_enums_fwd_ref = debug_enums_fwd_ref, .debug_globals_fwd_ref = debug_globals_fwd_ref, @@ -1033,7 +1034,8 @@ pub const Object = struct { // If o.error_name_table is null, then it was not referenced by any instructions. if (o.error_name_table == .none) return; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const error_name_list = mod.global_error_set.keys(); const llvm_errors = try mod.gpa.alloc(Builder.Constant, error_name_list.len); @@ -1072,7 +1074,7 @@ pub const Object = struct { table_variable_index.setMutability(.constant, &o.builder); table_variable_index.setUnnamedAddr(.unnamed_addr, &o.builder); table_variable_index.setAlignment( - slice_ty.abiAlignment(mod).toLlvm(), + slice_ty.abiAlignment(pt).toLlvm(), &o.builder, ); @@ -1083,8 +1085,7 @@ pub const Object = struct { // If there is no such function in the module, it means the source code does not need it. const name = o.builder.strtabStringIfExists(lt_errors_fn_name) orelse return; const llvm_fn = o.builder.getGlobal(name) orelse return; - const mod = o.module; - const errors_len = mod.global_error_set.count(); + const errors_len = o.pt.zcu.global_error_set.count(); var wip = try Builder.WipFunction.init(&o.builder, .{ .function = llvm_fn.ptrConst(&o.builder).kind.function, @@ -1106,10 +1107,8 @@ pub const Object = struct { } fn genModuleLevelAssembly(object: *Object) !void { - const mod = object.module; - const writer = object.builder.setModuleAsm(); - for (mod.global_assembly.values()) |assembly| { + for (object.pt.zcu.global_assembly.values()) |assembly| { try writer.print("{s}\n", .{assembly}); } try object.builder.finishModuleAsm(); @@ -1131,6 +1130,9 @@ pub const Object = struct { }; pub fn emit(self: *Object, options: EmitOptions) !void { + const zcu = self.pt.zcu; + const comp = zcu.comp; + { try self.genErrorNameTable(); try self.genCmpLtErrorsLenFunction(); @@ -1143,8 +1145,8 @@ pub const Object = struct { const namespace_index = self.debug_unresolved_namespace_scopes.keys()[i]; const fwd_ref = self.debug_unresolved_namespace_scopes.values()[i]; - const namespace = self.module.namespacePtr(namespace_index); - const debug_type = try self.lowerDebugType(namespace.getType(self.module)); + const namespace = zcu.namespacePtr(namespace_index); + const debug_type = try self.lowerDebugType(namespace.getType(zcu)); self.builder.debugForwardReferenceSetType(fwd_ref, debug_type); } @@ -1206,12 +1208,12 @@ pub const Object = struct { try file.writeAll(ptr[0..(bitcode.len * 4)]); } - if (!build_options.have_llvm or !self.module.comp.config.use_lib_llvm) { + if (!build_options.have_llvm or !comp.config.use_lib_llvm) { log.err("emitting without libllvm not implemented", .{}); return error.FailedToEmit; } - initializeLLVMTarget(self.module.comp.root_mod.resolved_target.result.cpu.arch); + initializeLLVMTarget(comp.root_mod.resolved_target.result.cpu.arch); const context: *llvm.Context = llvm.Context.create(); errdefer context.dispose(); @@ -1247,8 +1249,8 @@ pub const Object = struct { @panic("Invalid LLVM triple"); } - const optimize_mode = self.module.comp.root_mod.optimize_mode; - const pic = self.module.comp.root_mod.pic; + const optimize_mode = comp.root_mod.optimize_mode; + const pic = comp.root_mod.pic; const opt_level: llvm.CodeGenOptLevel = if (optimize_mode == .Debug) .None @@ -1257,12 +1259,12 @@ pub const Object = struct { const reloc_mode: llvm.RelocMode = if (pic) .PIC - else if (self.module.comp.config.link_mode == .dynamic) + else if (comp.config.link_mode == .dynamic) llvm.RelocMode.DynamicNoPIC else .Static; - const code_model: llvm.CodeModel = switch (self.module.comp.root_mod.code_model) { + const code_model: llvm.CodeModel = switch (comp.root_mod.code_model) { .default => .Default, .tiny => .Tiny, .small => .Small, @@ -1277,24 +1279,24 @@ pub const Object = struct { var target_machine = llvm.TargetMachine.create( target, target_triple_sentinel, - if (self.module.comp.root_mod.resolved_target.result.cpu.model.llvm_name) |s| s.ptr else null, - self.module.comp.root_mod.resolved_target.llvm_cpu_features.?, + if (comp.root_mod.resolved_target.result.cpu.model.llvm_name) |s| s.ptr else null, + comp.root_mod.resolved_target.llvm_cpu_features.?, opt_level, reloc_mode, code_model, - self.module.comp.function_sections, - self.module.comp.data_sections, + comp.function_sections, + comp.data_sections, float_abi, - if (target_util.llvmMachineAbi(self.module.comp.root_mod.resolved_target.result)) |s| s.ptr else null, + if (target_util.llvmMachineAbi(comp.root_mod.resolved_target.result)) |s| s.ptr else null, ); errdefer target_machine.dispose(); if (pic) module.setModulePICLevel(); - if (self.module.comp.config.pie) module.setModulePIELevel(); + if (comp.config.pie) module.setModulePIELevel(); if (code_model != .Default) module.setModuleCodeModel(code_model); - if (self.module.comp.llvm_opt_bisect_limit >= 0) { - context.setOptBisectLimit(self.module.comp.llvm_opt_bisect_limit); + if (comp.llvm_opt_bisect_limit >= 0) { + context.setOptBisectLimit(comp.llvm_opt_bisect_limit); } // Unfortunately, LLVM shits the bed when we ask for both binary and assembly. @@ -1352,11 +1354,13 @@ pub const Object = struct { pub fn updateFunc( o: *Object, - zcu: *Module, + pt: Zcu.PerThread, func_index: InternPool.Index, air: Air, liveness: Liveness, ) !void { + assert(std.meta.eql(pt, o.pt)); + const zcu = pt.zcu; const comp = zcu.comp; const func = zcu.funcInfo(func_index); const decl_index = func.owner_decl; @@ -1437,7 +1441,7 @@ pub const Object = struct { var llvm_arg_i: u32 = 0; // This gets the LLVM values from the function and stores them in `dg.args`. - const sret = firstParamSRet(fn_info, zcu, target); + const sret = firstParamSRet(fn_info, pt, target); const ret_ptr: Builder.Value = if (sret) param: { const param = wip.arg(llvm_arg_i); llvm_arg_i += 1; @@ -1478,8 +1482,8 @@ pub const Object = struct { const param_ty = Type.fromInterned(fn_info.param_types.get(ip)[param_index]); const param = wip.arg(llvm_arg_i); - if (isByRef(param_ty, zcu)) { - const alignment = param_ty.abiAlignment(zcu).toLlvm(); + if (isByRef(param_ty, pt)) { + const alignment = param_ty.abiAlignment(pt).toLlvm(); const param_llvm_ty = param.typeOfWip(&wip); const arg_ptr = try buildAllocaInner(&wip, param_llvm_ty, alignment, target); _ = try wip.store(.normal, param, arg_ptr, alignment); @@ -1495,12 +1499,12 @@ pub const Object = struct { const param_ty = Type.fromInterned(fn_info.param_types.get(ip)[it.zig_index - 1]); const param_llvm_ty = try o.lowerType(param_ty); const param = wip.arg(llvm_arg_i); - const alignment = param_ty.abiAlignment(zcu).toLlvm(); + const alignment = param_ty.abiAlignment(pt).toLlvm(); try o.addByRefParamAttrs(&attributes, llvm_arg_i, alignment, it.byval_attr, param_llvm_ty); llvm_arg_i += 1; - if (isByRef(param_ty, zcu)) { + if (isByRef(param_ty, pt)) { args.appendAssumeCapacity(param); } else { args.appendAssumeCapacity(try wip.load(.normal, param_llvm_ty, param, alignment, "")); @@ -1510,12 +1514,12 @@ pub const Object = struct { const param_ty = Type.fromInterned(fn_info.param_types.get(ip)[it.zig_index - 1]); const param_llvm_ty = try o.lowerType(param_ty); const param = wip.arg(llvm_arg_i); - const alignment = param_ty.abiAlignment(zcu).toLlvm(); + const alignment = param_ty.abiAlignment(pt).toLlvm(); try attributes.addParamAttr(llvm_arg_i, .noundef, &o.builder); llvm_arg_i += 1; - if (isByRef(param_ty, zcu)) { + if (isByRef(param_ty, pt)) { args.appendAssumeCapacity(param); } else { args.appendAssumeCapacity(try wip.load(.normal, param_llvm_ty, param, alignment, "")); @@ -1528,11 +1532,11 @@ pub const Object = struct { llvm_arg_i += 1; const param_llvm_ty = try o.lowerType(param_ty); - const alignment = param_ty.abiAlignment(zcu).toLlvm(); + const alignment = param_ty.abiAlignment(pt).toLlvm(); const arg_ptr = try buildAllocaInner(&wip, param_llvm_ty, alignment, target); _ = try wip.store(.normal, param, arg_ptr, alignment); - args.appendAssumeCapacity(if (isByRef(param_ty, zcu)) + args.appendAssumeCapacity(if (isByRef(param_ty, pt)) arg_ptr else try wip.load(.normal, param_llvm_ty, arg_ptr, alignment, "")); @@ -1556,7 +1560,7 @@ pub const Object = struct { const elem_align = (if (ptr_info.flags.alignment != .none) @as(InternPool.Alignment, ptr_info.flags.alignment) else - Type.fromInterned(ptr_info.child).abiAlignment(zcu).max(.@"1")).toLlvm(); + Type.fromInterned(ptr_info.child).abiAlignment(pt).max(.@"1")).toLlvm(); try attributes.addParamAttr(llvm_arg_i, .{ .@"align" = elem_align }, &o.builder); const ptr_param = wip.arg(llvm_arg_i); llvm_arg_i += 1; @@ -1573,7 +1577,7 @@ pub const Object = struct { const field_types = it.types_buffer[0..it.types_len]; const param_ty = Type.fromInterned(fn_info.param_types.get(ip)[it.zig_index - 1]); const param_llvm_ty = try o.lowerType(param_ty); - const param_alignment = param_ty.abiAlignment(zcu).toLlvm(); + const param_alignment = param_ty.abiAlignment(pt).toLlvm(); const arg_ptr = try buildAllocaInner(&wip, param_llvm_ty, param_alignment, target); const llvm_ty = try o.builder.structType(.normal, field_types); for (0..field_types.len) |field_i| { @@ -1585,7 +1589,7 @@ pub const Object = struct { _ = try wip.store(.normal, param, field_ptr, alignment); } - const is_by_ref = isByRef(param_ty, zcu); + const is_by_ref = isByRef(param_ty, pt); args.appendAssumeCapacity(if (is_by_ref) arg_ptr else @@ -1603,11 +1607,11 @@ pub const Object = struct { const param = wip.arg(llvm_arg_i); llvm_arg_i += 1; - const alignment = param_ty.abiAlignment(zcu).toLlvm(); + const alignment = param_ty.abiAlignment(pt).toLlvm(); const arg_ptr = try buildAllocaInner(&wip, param_llvm_ty, alignment, target); _ = try wip.store(.normal, param, arg_ptr, alignment); - args.appendAssumeCapacity(if (isByRef(param_ty, zcu)) + args.appendAssumeCapacity(if (isByRef(param_ty, pt)) arg_ptr else try wip.load(.normal, param_llvm_ty, arg_ptr, alignment, "")); @@ -1618,11 +1622,11 @@ pub const Object = struct { const param = wip.arg(llvm_arg_i); llvm_arg_i += 1; - const alignment = param_ty.abiAlignment(zcu).toLlvm(); + const alignment = param_ty.abiAlignment(pt).toLlvm(); const arg_ptr = try buildAllocaInner(&wip, param_llvm_ty, alignment, target); _ = try wip.store(.normal, param, arg_ptr, alignment); - args.appendAssumeCapacity(if (isByRef(param_ty, zcu)) + args.appendAssumeCapacity(if (isByRef(param_ty, pt)) arg_ptr else try wip.load(.normal, param_llvm_ty, arg_ptr, alignment, "")); @@ -1700,8 +1704,9 @@ pub const Object = struct { try fg.wip.finish(); } - pub fn updateDecl(self: *Object, module: *Module, decl_index: InternPool.DeclIndex) !void { - const decl = module.declPtr(decl_index); + pub fn updateDecl(self: *Object, pt: Zcu.PerThread, decl_index: InternPool.DeclIndex) !void { + assert(std.meta.eql(pt, self.pt)); + const decl = pt.zcu.declPtr(decl_index); var dg: DeclGen = .{ .object = self, .decl = decl, @@ -1711,7 +1716,7 @@ pub const Object = struct { dg.genDecl() catch |err| switch (err) { error.CodegenFail => { decl.analysis = .codegen_failure; - try module.failed_analysis.put(module.gpa, InternPool.AnalUnit.wrap(.{ .decl = decl_index }), dg.err_msg.?); + try pt.zcu.failed_analysis.put(pt.zcu.gpa, InternPool.AnalUnit.wrap(.{ .decl = decl_index }), dg.err_msg.?); dg.err_msg = null; return; }, @@ -1721,10 +1726,12 @@ pub const Object = struct { pub fn updateExports( self: *Object, - zcu: *Zcu, - exported: Module.Exported, + pt: Zcu.PerThread, + exported: Zcu.Exported, export_indices: []const u32, ) link.File.UpdateExportsError!void { + assert(std.meta.eql(pt, self.pt)); + const zcu = pt.zcu; const decl_index = switch (exported) { .decl_index => |i| i, .value => |val| return updateExportedValue(self, zcu, val, export_indices), @@ -1748,7 +1755,7 @@ pub const Object = struct { fn updateExportedValue( o: *Object, - mod: *Module, + mod: *Zcu, exported_value: InternPool.Index, export_indices: []const u32, ) link.File.UpdateExportsError!void { @@ -1783,7 +1790,7 @@ pub const Object = struct { fn updateExportedGlobal( o: *Object, - mod: *Module, + mod: *Zcu, global_index: Builder.Global.Index, export_indices: []const u32, ) link.File.UpdateExportsError!void { @@ -1879,7 +1886,7 @@ pub const Object = struct { global.delete(&self.builder); } - fn getDebugFile(o: *Object, file: *const Module.File) Allocator.Error!Builder.Metadata { + fn getDebugFile(o: *Object, file: *const Zcu.File) Allocator.Error!Builder.Metadata { const gpa = o.gpa; const gop = try o.debug_file_map.getOrPut(gpa, file); errdefer assert(o.debug_file_map.remove(file)); @@ -1909,7 +1916,8 @@ pub const Object = struct { const gpa = o.gpa; const target = o.target; - const zcu = o.module; + const pt = o.pt; + const zcu = pt.zcu; const ip = &zcu.intern_pool; if (o.debug_type_map.get(ty)) |debug_type| return debug_type; @@ -1931,7 +1939,7 @@ pub const Object = struct { const name = try o.allocTypeName(ty); defer gpa.free(name); const builder_name = try o.builder.metadataString(name); - const debug_bits = ty.abiSize(zcu) * 8; // lldb cannot handle non-byte sized types + const debug_bits = ty.abiSize(pt) * 8; // lldb cannot handle non-byte sized types const debug_int_type = switch (info.signedness) { .signed => try o.builder.debugSignedType(builder_name, debug_bits), .unsigned => try o.builder.debugUnsignedType(builder_name, debug_bits), @@ -1941,9 +1949,9 @@ pub const Object = struct { }, .Enum => { const owner_decl_index = ty.getOwnerDecl(zcu); - const owner_decl = o.module.declPtr(owner_decl_index); + const owner_decl = zcu.declPtr(owner_decl_index); - if (!ty.hasRuntimeBitsIgnoreComptime(zcu)) { + if (!ty.hasRuntimeBitsIgnoreComptime(pt)) { const debug_enum_type = try o.makeEmptyNamespaceDebugType(owner_decl_index); try o.debug_type_map.put(gpa, ty, debug_enum_type); return debug_enum_type; @@ -1961,7 +1969,7 @@ pub const Object = struct { for (enum_type.names.get(ip), 0..) |field_name_ip, i| { var bigint_space: Value.BigIntSpace = undefined; const bigint = if (enum_type.values.len != 0) - Value.fromInterned(enum_type.values.get(ip)[i]).toBigInt(&bigint_space, zcu) + Value.fromInterned(enum_type.values.get(ip)[i]).toBigInt(&bigint_space, pt) else std.math.big.int.Mutable.init(&bigint_space.limbs, i).toConst(); @@ -1986,8 +1994,8 @@ pub const Object = struct { scope, owner_decl.typeSrcLine(zcu) + 1, // Line try o.lowerDebugType(int_ty), - ty.abiSize(zcu) * 8, - (ty.abiAlignment(zcu).toByteUnits() orelse 0) * 8, + ty.abiSize(pt) * 8, + (ty.abiAlignment(pt).toByteUnits() orelse 0) * 8, try o.builder.debugTuple(enumerators), ); @@ -2027,10 +2035,10 @@ pub const Object = struct { ptr_info.flags.is_const or ptr_info.flags.is_volatile or ptr_info.flags.size == .Many or ptr_info.flags.size == .C or - !Type.fromInterned(ptr_info.child).hasRuntimeBitsIgnoreComptime(zcu)) + !Type.fromInterned(ptr_info.child).hasRuntimeBitsIgnoreComptime(pt)) { - const bland_ptr_ty = try zcu.ptrType(.{ - .child = if (!Type.fromInterned(ptr_info.child).hasRuntimeBitsIgnoreComptime(zcu)) + const bland_ptr_ty = try pt.ptrType(.{ + .child = if (!Type.fromInterned(ptr_info.child).hasRuntimeBitsIgnoreComptime(pt)) .anyopaque_type else ptr_info.child, @@ -2060,10 +2068,10 @@ pub const Object = struct { defer gpa.free(name); const line = 0; - const ptr_size = ptr_ty.abiSize(zcu); - const ptr_align = ptr_ty.abiAlignment(zcu); - const len_size = len_ty.abiSize(zcu); - const len_align = len_ty.abiAlignment(zcu); + const ptr_size = ptr_ty.abiSize(pt); + const ptr_align = ptr_ty.abiAlignment(pt); + const len_size = len_ty.abiSize(pt); + const len_align = len_ty.abiAlignment(pt); const len_offset = len_align.forward(ptr_size); @@ -2095,8 +2103,8 @@ pub const Object = struct { o.debug_compile_unit, // Scope line, .none, // Underlying type - ty.abiSize(zcu) * 8, - (ty.abiAlignment(zcu).toByteUnits() orelse 0) * 8, + ty.abiSize(pt) * 8, + (ty.abiAlignment(pt).toByteUnits() orelse 0) * 8, try o.builder.debugTuple(&.{ debug_ptr_type, debug_len_type, @@ -2124,7 +2132,7 @@ pub const Object = struct { 0, // Line debug_elem_ty, target.ptrBitWidth(), - (ty.ptrAlignment(zcu).toByteUnits() orelse 0) * 8, + (ty.ptrAlignment(pt).toByteUnits() orelse 0) * 8, 0, // Offset ); @@ -2149,7 +2157,7 @@ pub const Object = struct { const name = try o.allocTypeName(ty); defer gpa.free(name); const owner_decl_index = ty.getOwnerDecl(zcu); - const owner_decl = o.module.declPtr(owner_decl_index); + const owner_decl = zcu.declPtr(owner_decl_index); const file_scope = zcu.namespacePtr(owner_decl.src_namespace).fileScope(zcu); const debug_opaque_type = try o.builder.debugStructType( try o.builder.metadataString(name), @@ -2171,8 +2179,8 @@ pub const Object = struct { .none, // Scope 0, // Line try o.lowerDebugType(ty.childType(zcu)), - ty.abiSize(zcu) * 8, - (ty.abiAlignment(zcu).toByteUnits() orelse 0) * 8, + ty.abiSize(pt) * 8, + (ty.abiAlignment(pt).toByteUnits() orelse 0) * 8, try o.builder.debugTuple(&.{ try o.builder.debugSubrange( try o.builder.debugConstant(try o.builder.intConst(.i64, 0)), @@ -2214,8 +2222,8 @@ pub const Object = struct { .none, // Scope 0, // Line debug_elem_type, - ty.abiSize(zcu) * 8, - (ty.abiAlignment(zcu).toByteUnits() orelse 0) * 8, + ty.abiSize(pt) * 8, + (ty.abiAlignment(pt).toByteUnits() orelse 0) * 8, try o.builder.debugTuple(&.{ try o.builder.debugSubrange( try o.builder.debugConstant(try o.builder.intConst(.i64, 0)), @@ -2231,7 +2239,7 @@ pub const Object = struct { const name = try o.allocTypeName(ty); defer gpa.free(name); const child_ty = ty.optionalChild(zcu); - if (!child_ty.hasRuntimeBitsIgnoreComptime(zcu)) { + if (!child_ty.hasRuntimeBitsIgnoreComptime(pt)) { const debug_bool_type = try o.builder.debugBoolType( try o.builder.metadataString(name), 8, @@ -2258,10 +2266,10 @@ pub const Object = struct { } const non_null_ty = Type.u8; - const payload_size = child_ty.abiSize(zcu); - const payload_align = child_ty.abiAlignment(zcu); - const non_null_size = non_null_ty.abiSize(zcu); - const non_null_align = non_null_ty.abiAlignment(zcu); + const payload_size = child_ty.abiSize(pt); + const payload_align = child_ty.abiAlignment(pt); + const non_null_size = non_null_ty.abiSize(pt); + const non_null_align = non_null_ty.abiAlignment(pt); const non_null_offset = non_null_align.forward(payload_size); const debug_data_type = try o.builder.debugMemberType( @@ -2292,8 +2300,8 @@ pub const Object = struct { o.debug_compile_unit, // Scope 0, // Line .none, // Underlying type - ty.abiSize(zcu) * 8, - (ty.abiAlignment(zcu).toByteUnits() orelse 0) * 8, + ty.abiSize(pt) * 8, + (ty.abiAlignment(pt).toByteUnits() orelse 0) * 8, try o.builder.debugTuple(&.{ debug_data_type, debug_some_type, @@ -2310,7 +2318,7 @@ pub const Object = struct { }, .ErrorUnion => { const payload_ty = ty.errorUnionPayload(zcu); - if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) { // TODO: Maybe remove? const debug_error_union_type = try o.lowerDebugType(Type.anyerror); try o.debug_type_map.put(gpa, ty, debug_error_union_type); @@ -2320,10 +2328,10 @@ pub const Object = struct { const name = try o.allocTypeName(ty); defer gpa.free(name); - const error_size = Type.anyerror.abiSize(zcu); - const error_align = Type.anyerror.abiAlignment(zcu); - const payload_size = payload_ty.abiSize(zcu); - const payload_align = payload_ty.abiAlignment(zcu); + const error_size = Type.anyerror.abiSize(pt); + const error_align = Type.anyerror.abiAlignment(pt); + const payload_size = payload_ty.abiSize(pt); + const payload_align = payload_ty.abiAlignment(pt); var error_index: u32 = undefined; var payload_index: u32 = undefined; @@ -2371,8 +2379,8 @@ pub const Object = struct { o.debug_compile_unit, // Sope 0, // Line .none, // Underlying type - ty.abiSize(zcu) * 8, - (ty.abiAlignment(zcu).toByteUnits() orelse 0) * 8, + ty.abiSize(pt) * 8, + (ty.abiAlignment(pt).toByteUnits() orelse 0) * 8, try o.builder.debugTuple(&fields), ); @@ -2399,8 +2407,8 @@ pub const Object = struct { const info = Type.fromInterned(backing_int_ty).intInfo(zcu); const builder_name = try o.builder.metadataString(name); const debug_int_type = switch (info.signedness) { - .signed => try o.builder.debugSignedType(builder_name, ty.abiSize(zcu) * 8), - .unsigned => try o.builder.debugUnsignedType(builder_name, ty.abiSize(zcu) * 8), + .signed => try o.builder.debugSignedType(builder_name, ty.abiSize(pt) * 8), + .unsigned => try o.builder.debugUnsignedType(builder_name, ty.abiSize(pt) * 8), }; try o.debug_type_map.put(gpa, ty, debug_int_type); return debug_int_type; @@ -2420,10 +2428,10 @@ pub const Object = struct { const debug_fwd_ref = try o.builder.debugForwardReference(); for (tuple.types.get(ip), tuple.values.get(ip), 0..) |field_ty, field_val, i| { - if (field_val != .none or !Type.fromInterned(field_ty).hasRuntimeBits(zcu)) continue; + if (field_val != .none or !Type.fromInterned(field_ty).hasRuntimeBits(pt)) continue; - const field_size = Type.fromInterned(field_ty).abiSize(zcu); - const field_align = Type.fromInterned(field_ty).abiAlignment(zcu); + const field_size = Type.fromInterned(field_ty).abiSize(pt); + const field_align = Type.fromInterned(field_ty).abiAlignment(pt); const field_offset = field_align.forward(offset); offset = field_offset + field_size; @@ -2451,8 +2459,8 @@ pub const Object = struct { o.debug_compile_unit, // Scope 0, // Line .none, // Underlying type - ty.abiSize(zcu) * 8, - (ty.abiAlignment(zcu).toByteUnits() orelse 0) * 8, + ty.abiSize(pt) * 8, + (ty.abiAlignment(pt).toByteUnits() orelse 0) * 8, try o.builder.debugTuple(fields.items), ); @@ -2479,7 +2487,7 @@ pub const Object = struct { else => {}, } - if (!ty.hasRuntimeBitsIgnoreComptime(zcu)) { + if (!ty.hasRuntimeBitsIgnoreComptime(pt)) { const owner_decl_index = ty.getOwnerDecl(zcu); const debug_struct_type = try o.makeEmptyNamespaceDebugType(owner_decl_index); try o.debug_type_map.put(gpa, ty, debug_struct_type); @@ -2502,14 +2510,14 @@ pub const Object = struct { var it = struct_type.iterateRuntimeOrder(ip); while (it.next()) |field_index| { const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_index]); - if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue; - const field_size = field_ty.abiSize(zcu); - const field_align = zcu.structFieldAlignment( + if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) continue; + const field_size = field_ty.abiSize(pt); + const field_align = pt.structFieldAlignment( struct_type.fieldAlign(ip, field_index), field_ty, struct_type.layout, ); - const field_offset = ty.structFieldOffset(field_index, zcu); + const field_offset = ty.structFieldOffset(field_index, pt); const field_name = struct_type.fieldName(ip, field_index).unwrap() orelse try ip.getOrPutStringFmt(gpa, "{d}", .{field_index}, .no_embedded_nulls); @@ -2532,8 +2540,8 @@ pub const Object = struct { o.debug_compile_unit, // Scope 0, // Line .none, // Underlying type - ty.abiSize(zcu) * 8, - (ty.abiAlignment(zcu).toByteUnits() orelse 0) * 8, + ty.abiSize(pt) * 8, + (ty.abiAlignment(pt).toByteUnits() orelse 0) * 8, try o.builder.debugTuple(fields.items), ); @@ -2553,7 +2561,7 @@ pub const Object = struct { const union_type = ip.loadUnionType(ty.toIntern()); if (!union_type.haveFieldTypes(ip) or - !ty.hasRuntimeBitsIgnoreComptime(zcu) or + !ty.hasRuntimeBitsIgnoreComptime(pt) or !union_type.haveLayout(ip)) { const debug_union_type = try o.makeEmptyNamespaceDebugType(owner_decl_index); @@ -2561,7 +2569,7 @@ pub const Object = struct { return debug_union_type; } - const layout = zcu.getUnionLayout(union_type); + const layout = pt.getUnionLayout(union_type); const debug_fwd_ref = try o.builder.debugForwardReference(); @@ -2575,8 +2583,8 @@ pub const Object = struct { o.debug_compile_unit, // Scope 0, // Line .none, // Underlying type - ty.abiSize(zcu) * 8, - (ty.abiAlignment(zcu).toByteUnits() orelse 0) * 8, + ty.abiSize(pt) * 8, + (ty.abiAlignment(pt).toByteUnits() orelse 0) * 8, try o.builder.debugTuple( &.{try o.lowerDebugType(Type.fromInterned(union_type.enum_tag_ty))}, ), @@ -2603,12 +2611,12 @@ pub const Object = struct { for (0..tag_type.names.len) |field_index| { const field_ty = union_type.field_types.get(ip)[field_index]; - if (!Type.fromInterned(field_ty).hasRuntimeBitsIgnoreComptime(zcu)) continue; + if (!Type.fromInterned(field_ty).hasRuntimeBitsIgnoreComptime(pt)) continue; - const field_size = Type.fromInterned(field_ty).abiSize(zcu); + const field_size = Type.fromInterned(field_ty).abiSize(pt); const field_align: InternPool.Alignment = switch (union_type.flagsPtr(ip).layout) { .@"packed" => .none, - .auto, .@"extern" => zcu.unionFieldNormalAlignment(union_type, @intCast(field_index)), + .auto, .@"extern" => pt.unionFieldNormalAlignment(union_type, @intCast(field_index)), }; const field_name = tag_type.names.get(ip)[field_index]; @@ -2637,8 +2645,8 @@ pub const Object = struct { o.debug_compile_unit, // Scope 0, // Line .none, // Underlying type - ty.abiSize(zcu) * 8, - (ty.abiAlignment(zcu).toByteUnits() orelse 0) * 8, + ty.abiSize(pt) * 8, + (ty.abiAlignment(pt).toByteUnits() orelse 0) * 8, try o.builder.debugTuple(fields.items), ); @@ -2696,8 +2704,8 @@ pub const Object = struct { o.debug_compile_unit, // Scope 0, // Line .none, // Underlying type - ty.abiSize(zcu) * 8, - (ty.abiAlignment(zcu).toByteUnits() orelse 0) * 8, + ty.abiSize(pt) * 8, + (ty.abiAlignment(pt).toByteUnits() orelse 0) * 8, try o.builder.debugTuple(&full_fields), ); @@ -2718,13 +2726,13 @@ pub const Object = struct { try debug_param_types.ensureUnusedCapacity(3 + fn_info.param_types.len); // Return type goes first. - if (Type.fromInterned(fn_info.return_type).hasRuntimeBitsIgnoreComptime(zcu)) { - const sret = firstParamSRet(fn_info, zcu, target); + if (Type.fromInterned(fn_info.return_type).hasRuntimeBitsIgnoreComptime(pt)) { + const sret = firstParamSRet(fn_info, pt, target); const ret_ty = if (sret) Type.void else Type.fromInterned(fn_info.return_type); debug_param_types.appendAssumeCapacity(try o.lowerDebugType(ret_ty)); if (sret) { - const ptr_ty = try zcu.singleMutPtrType(Type.fromInterned(fn_info.return_type)); + const ptr_ty = try pt.singleMutPtrType(Type.fromInterned(fn_info.return_type)); debug_param_types.appendAssumeCapacity(try o.lowerDebugType(ptr_ty)); } } else { @@ -2732,18 +2740,18 @@ pub const Object = struct { } if (Type.fromInterned(fn_info.return_type).isError(zcu) and - o.module.comp.config.any_error_tracing) + zcu.comp.config.any_error_tracing) { - const ptr_ty = try zcu.singleMutPtrType(try o.getStackTraceType()); + const ptr_ty = try pt.singleMutPtrType(try o.getStackTraceType()); debug_param_types.appendAssumeCapacity(try o.lowerDebugType(ptr_ty)); } for (0..fn_info.param_types.len) |i| { const param_ty = Type.fromInterned(fn_info.param_types.get(ip)[i]); - if (!param_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue; + if (!param_ty.hasRuntimeBitsIgnoreComptime(pt)) continue; - if (isByRef(param_ty, zcu)) { - const ptr_ty = try zcu.singleMutPtrType(param_ty); + if (isByRef(param_ty, pt)) { + const ptr_ty = try pt.singleMutPtrType(param_ty); debug_param_types.appendAssumeCapacity(try o.lowerDebugType(ptr_ty)); } else { debug_param_types.appendAssumeCapacity(try o.lowerDebugType(param_ty)); @@ -2770,7 +2778,7 @@ pub const Object = struct { } fn namespaceToDebugScope(o: *Object, namespace_index: InternPool.NamespaceIndex) !Builder.Metadata { - const zcu = o.module; + const zcu = o.pt.zcu; const namespace = zcu.namespacePtr(namespace_index); const file_scope = namespace.fileScope(zcu); if (namespace.parent == .none) return try o.getDebugFile(file_scope); @@ -2783,7 +2791,7 @@ pub const Object = struct { } fn makeEmptyNamespaceDebugType(o: *Object, decl_index: InternPool.DeclIndex) !Builder.Metadata { - const zcu = o.module; + const zcu = o.pt.zcu; const decl = zcu.declPtr(decl_index); const file_scope = zcu.namespacePtr(decl.src_namespace).fileScope(zcu); return o.builder.debugStructType( @@ -2799,7 +2807,7 @@ pub const Object = struct { } fn getStackTraceType(o: *Object) Allocator.Error!Type { - const zcu = o.module; + const zcu = o.pt.zcu; const std_mod = zcu.std_mod; const std_file_imported = zcu.importPkg(std_mod) catch unreachable; @@ -2807,13 +2815,13 @@ pub const Object = struct { const builtin_str = try zcu.intern_pool.getOrPutString(zcu.gpa, "builtin", .no_embedded_nulls); const std_file_root_decl = zcu.fileRootDecl(std_file_imported.file_index); const std_namespace = zcu.namespacePtr(zcu.declPtr(std_file_root_decl.unwrap().?).src_namespace); - const builtin_decl = std_namespace.decls.getKeyAdapted(builtin_str, Module.DeclAdapter{ .zcu = zcu }).?; + const builtin_decl = std_namespace.decls.getKeyAdapted(builtin_str, Zcu.DeclAdapter{ .zcu = zcu }).?; const stack_trace_str = try zcu.intern_pool.getOrPutString(zcu.gpa, "StackTrace", .no_embedded_nulls); // buffer is only used for int_type, `builtin` is a struct. const builtin_ty = zcu.declPtr(builtin_decl).val.toType(); const builtin_namespace = zcu.namespacePtrUnwrap(builtin_ty.getNamespaceIndex(zcu)).?; - const stack_trace_decl_index = builtin_namespace.decls.getKeyAdapted(stack_trace_str, Module.DeclAdapter{ .zcu = zcu }).?; + const stack_trace_decl_index = builtin_namespace.decls.getKeyAdapted(stack_trace_str, Zcu.DeclAdapter{ .zcu = zcu }).?; const stack_trace_decl = zcu.declPtr(stack_trace_decl_index); // Sema should have ensured that StackTrace was analyzed. @@ -2824,7 +2832,7 @@ pub const Object = struct { fn allocTypeName(o: *Object, ty: Type) Allocator.Error![:0]const u8 { var buffer = std.ArrayList(u8).init(o.gpa); errdefer buffer.deinit(); - try ty.print(buffer.writer(), o.module); + try ty.print(buffer.writer(), o.pt); return buffer.toOwnedSliceSentinel(0); } @@ -2835,7 +2843,8 @@ pub const Object = struct { o: *Object, decl_index: InternPool.DeclIndex, ) Allocator.Error!Builder.Function.Index { - const zcu = o.module; + const pt = o.pt; + const zcu = pt.zcu; const ip = &zcu.intern_pool; const gpa = o.gpa; const decl = zcu.declPtr(decl_index); @@ -2848,7 +2857,7 @@ pub const Object = struct { assert(decl.has_tv); const fn_info = zcu.typeToFunc(zig_fn_type).?; const target = owner_mod.resolved_target.result; - const sret = firstParamSRet(fn_info, zcu, target); + const sret = firstParamSRet(fn_info, pt, target); const is_extern = decl.isExtern(zcu); const function_index = try o.builder.addFunction( @@ -2929,14 +2938,14 @@ pub const Object = struct { .byval => { const param_index = it.zig_index - 1; const param_ty = Type.fromInterned(fn_info.param_types.get(ip)[param_index]); - if (!isByRef(param_ty, zcu)) { + if (!isByRef(param_ty, pt)) { try o.addByValParamAttrs(&attributes, param_ty, param_index, fn_info, it.llvm_index - 1); } }, .byref => { const param_ty = Type.fromInterned(fn_info.param_types.get(ip)[it.zig_index - 1]); const param_llvm_ty = try o.lowerType(param_ty); - const alignment = param_ty.abiAlignment(zcu); + const alignment = param_ty.abiAlignment(pt); try o.addByRefParamAttrs(&attributes, it.llvm_index - 1, alignment.toLlvm(), it.byval_attr, param_llvm_ty); }, .byref_mut => try attributes.addParamAttr(it.llvm_index - 1, .noundef, &o.builder), @@ -2964,7 +2973,7 @@ pub const Object = struct { attributes: *Builder.FunctionAttributes.Wip, owner_mod: *Package.Module, ) Allocator.Error!void { - const comp = o.module.comp; + const comp = o.pt.zcu.comp; if (!owner_mod.red_zone) { try attributes.addFnAttr(.noredzone, &o.builder); @@ -3039,7 +3048,7 @@ pub const Object = struct { } errdefer assert(o.anon_decl_map.remove(decl_val)); - const mod = o.module; + const mod = o.pt.zcu; const decl_ty = mod.intern_pool.typeOf(decl_val); const variable_index = try o.builder.addVariable( @@ -3065,7 +3074,7 @@ pub const Object = struct { if (gop.found_existing) return gop.value_ptr.ptr(&o.builder).kind.variable; errdefer assert(o.decl_map.remove(decl_index)); - const zcu = o.module; + const zcu = o.pt.zcu; const decl = zcu.declPtr(decl_index); const is_extern = decl.isExtern(zcu); @@ -3100,11 +3109,12 @@ pub const Object = struct { } fn errorIntType(o: *Object) Allocator.Error!Builder.Type { - return o.builder.intType(o.module.errorSetBits()); + return o.builder.intType(o.pt.zcu.errorSetBits()); } fn lowerType(o: *Object, t: Type) Allocator.Error!Builder.Type { - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const target = mod.getTarget(); const ip = &mod.intern_pool; return switch (t.toIntern()) { @@ -3230,7 +3240,7 @@ pub const Object = struct { ), .opt_type => |child_ty| { // Must stay in sync with `opt_payload` logic in `lowerPtr`. - if (!Type.fromInterned(child_ty).hasRuntimeBitsIgnoreComptime(mod)) return .i8; + if (!Type.fromInterned(child_ty).hasRuntimeBitsIgnoreComptime(pt)) return .i8; const payload_ty = try o.lowerType(Type.fromInterned(child_ty)); if (t.optionalReprIsPayload(mod)) return payload_ty; @@ -3238,8 +3248,8 @@ pub const Object = struct { comptime assert(optional_layout_version == 3); var fields: [3]Builder.Type = .{ payload_ty, .i8, undefined }; var fields_len: usize = 2; - const offset = Type.fromInterned(child_ty).abiSize(mod) + 1; - const abi_size = t.abiSize(mod); + const offset = Type.fromInterned(child_ty).abiSize(pt) + 1; + const abi_size = t.abiSize(pt); const padding_len = abi_size - offset; if (padding_len > 0) { fields[2] = try o.builder.arrayType(padding_len, .i8); @@ -3252,16 +3262,16 @@ pub const Object = struct { // Must stay in sync with `codegen.errUnionPayloadOffset`. // See logic in `lowerPtr`. const error_type = try o.errorIntType(); - if (!Type.fromInterned(error_union_type.payload_type).hasRuntimeBitsIgnoreComptime(mod)) + if (!Type.fromInterned(error_union_type.payload_type).hasRuntimeBitsIgnoreComptime(pt)) return error_type; const payload_type = try o.lowerType(Type.fromInterned(error_union_type.payload_type)); - const err_int_ty = try mod.errorIntType(); + const err_int_ty = try o.pt.errorIntType(); - const payload_align = Type.fromInterned(error_union_type.payload_type).abiAlignment(mod); - const error_align = err_int_ty.abiAlignment(mod); + const payload_align = Type.fromInterned(error_union_type.payload_type).abiAlignment(pt); + const error_align = err_int_ty.abiAlignment(pt); - const payload_size = Type.fromInterned(error_union_type.payload_type).abiSize(mod); - const error_size = err_int_ty.abiSize(mod); + const payload_size = Type.fromInterned(error_union_type.payload_type).abiSize(pt); + const error_size = err_int_ty.abiSize(pt); var fields: [3]Builder.Type = undefined; var fields_len: usize = 2; @@ -3317,12 +3327,12 @@ pub const Object = struct { var it = struct_type.iterateRuntimeOrder(ip); while (it.next()) |field_index| { const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_index]); - const field_align = mod.structFieldAlignment( + const field_align = pt.structFieldAlignment( struct_type.fieldAlign(ip, field_index), field_ty, struct_type.layout, ); - const field_ty_align = field_ty.abiAlignment(mod); + const field_ty_align = field_ty.abiAlignment(pt); if (field_align.compare(.lt, field_ty_align)) struct_kind = .@"packed"; big_align = big_align.max(field_align); const prev_offset = offset; @@ -3334,7 +3344,7 @@ pub const Object = struct { try o.builder.arrayType(padding_len, .i8), ); - if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) { // This is a zero-bit field. If there are runtime bits after this field, // map to the next LLVM field (which we know exists): otherwise, don't // map the field, indicating it's at the end of the struct. @@ -3353,7 +3363,7 @@ pub const Object = struct { }, @intCast(llvm_field_types.items.len)); try llvm_field_types.append(o.gpa, try o.lowerType(field_ty)); - offset += field_ty.abiSize(mod); + offset += field_ty.abiSize(pt); } { const prev_offset = offset; @@ -3386,7 +3396,7 @@ pub const Object = struct { var offset: u64 = 0; var big_align: InternPool.Alignment = .none; - const struct_size = t.abiSize(mod); + const struct_size = t.abiSize(pt); for ( anon_struct_type.types.get(ip), @@ -3395,7 +3405,7 @@ pub const Object = struct { ) |field_ty, field_val, field_index| { if (field_val != .none) continue; - const field_align = Type.fromInterned(field_ty).abiAlignment(mod); + const field_align = Type.fromInterned(field_ty).abiAlignment(pt); big_align = big_align.max(field_align); const prev_offset = offset; offset = field_align.forward(offset); @@ -3405,7 +3415,7 @@ pub const Object = struct { o.gpa, try o.builder.arrayType(padding_len, .i8), ); - if (!Type.fromInterned(field_ty).hasRuntimeBitsIgnoreComptime(mod)) { + if (!Type.fromInterned(field_ty).hasRuntimeBitsIgnoreComptime(pt)) { // This is a zero-bit field. If there are runtime bits after this field, // map to the next LLVM field (which we know exists): otherwise, don't // map the field, indicating it's at the end of the struct. @@ -3423,7 +3433,7 @@ pub const Object = struct { }, @intCast(llvm_field_types.items.len)); try llvm_field_types.append(o.gpa, try o.lowerType(Type.fromInterned(field_ty))); - offset += Type.fromInterned(field_ty).abiSize(mod); + offset += Type.fromInterned(field_ty).abiSize(pt); } { const prev_offset = offset; @@ -3440,10 +3450,10 @@ pub const Object = struct { if (o.type_map.get(t.toIntern())) |value| return value; const union_obj = ip.loadUnionType(t.toIntern()); - const layout = mod.getUnionLayout(union_obj); + const layout = pt.getUnionLayout(union_obj); if (union_obj.flagsPtr(ip).layout == .@"packed") { - const int_ty = try o.builder.intType(@intCast(t.bitSize(mod))); + const int_ty = try o.builder.intType(@intCast(t.bitSize(pt))); try o.type_map.put(o.gpa, t.toIntern(), int_ty); return int_ty; } @@ -3552,18 +3562,20 @@ pub const Object = struct { /// being a zero bit type, but it should still be lowered as an i8 in such case. /// There are other similar cases handled here as well. fn lowerPtrElemTy(o: *Object, elem_ty: Type) Allocator.Error!Builder.Type { - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const lower_elem_ty = switch (elem_ty.zigTypeTag(mod)) { .Opaque => true, .Fn => !mod.typeToFunc(elem_ty).?.is_generic, - .Array => elem_ty.childType(mod).hasRuntimeBitsIgnoreComptime(mod), - else => elem_ty.hasRuntimeBitsIgnoreComptime(mod), + .Array => elem_ty.childType(mod).hasRuntimeBitsIgnoreComptime(pt), + else => elem_ty.hasRuntimeBitsIgnoreComptime(pt), }; return if (lower_elem_ty) try o.lowerType(elem_ty) else .i8; } fn lowerTypeFn(o: *Object, fn_info: InternPool.Key.FuncType) Allocator.Error!Builder.Type { - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const ip = &mod.intern_pool; const target = mod.getTarget(); const ret_ty = try lowerFnRetTy(o, fn_info); @@ -3571,14 +3583,14 @@ pub const Object = struct { var llvm_params = std.ArrayListUnmanaged(Builder.Type){}; defer llvm_params.deinit(o.gpa); - if (firstParamSRet(fn_info, mod, target)) { + if (firstParamSRet(fn_info, pt, target)) { try llvm_params.append(o.gpa, .ptr); } if (Type.fromInterned(fn_info.return_type).isError(mod) and mod.comp.config.any_error_tracing) { - const ptr_ty = try mod.singleMutPtrType(try o.getStackTraceType()); + const ptr_ty = try pt.singleMutPtrType(try o.getStackTraceType()); try llvm_params.append(o.gpa, try o.lowerType(ptr_ty)); } @@ -3595,7 +3607,7 @@ pub const Object = struct { .abi_sized_int => { const param_ty = Type.fromInterned(fn_info.param_types.get(ip)[it.zig_index - 1]); try llvm_params.append(o.gpa, try o.builder.intType( - @intCast(param_ty.abiSize(mod) * 8), + @intCast(param_ty.abiSize(pt) * 8), )); }, .slice => { @@ -3633,7 +3645,8 @@ pub const Object = struct { } fn lowerValueToInt(o: *Object, llvm_int_ty: Builder.Type, arg_val: InternPool.Index) Error!Builder.Constant { - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const ip = &mod.intern_pool; const target = mod.getTarget(); @@ -3666,15 +3679,15 @@ pub const Object = struct { var running_int = try o.builder.intConst(llvm_int_ty, 0); var running_bits: u16 = 0; for (struct_type.field_types.get(ip), 0..) |field_ty, field_index| { - if (!Type.fromInterned(field_ty).hasRuntimeBitsIgnoreComptime(mod)) continue; + if (!Type.fromInterned(field_ty).hasRuntimeBitsIgnoreComptime(pt)) continue; const shift_rhs = try o.builder.intConst(llvm_int_ty, running_bits); - const field_val = try o.lowerValueToInt(llvm_int_ty, (try val.fieldValue(mod, field_index)).toIntern()); + const field_val = try o.lowerValueToInt(llvm_int_ty, (try val.fieldValue(pt, field_index)).toIntern()); const shifted = try o.builder.binConst(.shl, field_val, shift_rhs); running_int = try o.builder.binConst(.xor, running_int, shifted); - const ty_bit_size: u16 = @intCast(Type.fromInterned(field_ty).bitSize(mod)); + const ty_bit_size: u16 = @intCast(Type.fromInterned(field_ty).bitSize(pt)); running_bits += ty_bit_size; } return running_int; @@ -3683,7 +3696,7 @@ pub const Object = struct { else => unreachable, }, .un => |un| { - const layout = ty.unionGetLayout(mod); + const layout = ty.unionGetLayout(pt); if (layout.payload_size == 0) return o.lowerValue(un.tag); const union_obj = mod.typeToUnion(ty).?; @@ -3701,7 +3714,7 @@ pub const Object = struct { } const field_index = mod.unionTagFieldIndex(union_obj, Value.fromInterned(un.tag)).?; const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]); - if (!field_ty.hasRuntimeBits(mod)) return o.builder.intConst(llvm_int_ty, 0); + if (!field_ty.hasRuntimeBits(pt)) return o.builder.intConst(llvm_int_ty, 0); return o.lowerValueToInt(llvm_int_ty, un.val); }, .simple_value => |simple_value| switch (simple_value) { @@ -3715,7 +3728,7 @@ pub const Object = struct { .opt => {}, // pointer like optional expected else => unreachable, } - const bits = ty.bitSize(mod); + const bits = ty.bitSize(pt); const bytes: usize = @intCast(std.mem.alignForward(u64, bits, 8) / 8); var stack = std.heap.stackFallback(32, o.gpa); @@ -3729,12 +3742,7 @@ pub const Object = struct { defer allocator.free(limbs); @memset(limbs, 0); - val.writeToPackedMemory( - ty, - mod, - std.mem.sliceAsBytes(limbs)[0..bytes], - 0, - ) catch unreachable; + val.writeToPackedMemory(ty, pt, std.mem.sliceAsBytes(limbs)[0..bytes], 0) catch unreachable; if (builtin.target.cpu.arch.endian() == .little) { if (target.cpu.arch.endian() == .big) @@ -3752,7 +3760,8 @@ pub const Object = struct { } fn lowerValue(o: *Object, arg_val: InternPool.Index) Error!Builder.Constant { - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const ip = &mod.intern_pool; const target = mod.getTarget(); @@ -3811,7 +3820,7 @@ pub const Object = struct { }, .int => { var bigint_space: Value.BigIntSpace = undefined; - const bigint = val.toBigInt(&bigint_space, mod); + const bigint = val.toBigInt(&bigint_space, pt); return lowerBigInt(o, ty, bigint); }, .err => |err| { @@ -3821,24 +3830,24 @@ pub const Object = struct { }, .error_union => |error_union| { const err_val = switch (error_union.val) { - .err_name => |err_name| try mod.intern(.{ .err = .{ + .err_name => |err_name| try pt.intern(.{ .err = .{ .ty = ty.errorUnionSet(mod).toIntern(), .name = err_name, } }), - .payload => (try mod.intValue(try mod.errorIntType(), 0)).toIntern(), + .payload => (try pt.intValue(try pt.errorIntType(), 0)).toIntern(), }; - const err_int_ty = try mod.errorIntType(); + const err_int_ty = try pt.errorIntType(); const payload_type = ty.errorUnionPayload(mod); - if (!payload_type.hasRuntimeBitsIgnoreComptime(mod)) { + if (!payload_type.hasRuntimeBitsIgnoreComptime(pt)) { // We use the error type directly as the type. return o.lowerValue(err_val); } - const payload_align = payload_type.abiAlignment(mod); - const error_align = err_int_ty.abiAlignment(mod); + const payload_align = payload_type.abiAlignment(pt); + const error_align = err_int_ty.abiAlignment(pt); const llvm_error_value = try o.lowerValue(err_val); const llvm_payload_value = try o.lowerValue(switch (error_union.val) { - .err_name => try mod.intern(.{ .undef = payload_type.toIntern() }), + .err_name => try pt.intern(.{ .undef = payload_type.toIntern() }), .payload => |payload| payload, }); @@ -3869,16 +3878,16 @@ pub const Object = struct { .enum_tag => |enum_tag| o.lowerValue(enum_tag.int), .float => switch (ty.floatBits(target)) { 16 => if (backendSupportsF16(target)) - try o.builder.halfConst(val.toFloat(f16, mod)) + try o.builder.halfConst(val.toFloat(f16, pt)) else - try o.builder.intConst(.i16, @as(i16, @bitCast(val.toFloat(f16, mod)))), - 32 => try o.builder.floatConst(val.toFloat(f32, mod)), - 64 => try o.builder.doubleConst(val.toFloat(f64, mod)), + try o.builder.intConst(.i16, @as(i16, @bitCast(val.toFloat(f16, pt)))), + 32 => try o.builder.floatConst(val.toFloat(f32, pt)), + 64 => try o.builder.doubleConst(val.toFloat(f64, pt)), 80 => if (backendSupportsF80(target)) - try o.builder.x86_fp80Const(val.toFloat(f80, mod)) + try o.builder.x86_fp80Const(val.toFloat(f80, pt)) else - try o.builder.intConst(.i80, @as(i80, @bitCast(val.toFloat(f80, mod)))), - 128 => try o.builder.fp128Const(val.toFloat(f128, mod)), + try o.builder.intConst(.i80, @as(i80, @bitCast(val.toFloat(f80, pt)))), + 128 => try o.builder.fp128Const(val.toFloat(f128, pt)), else => unreachable, }, .ptr => try o.lowerPtr(arg_val, 0), @@ -3891,7 +3900,7 @@ pub const Object = struct { const payload_ty = ty.optionalChild(mod); const non_null_bit = try o.builder.intConst(.i8, @intFromBool(opt.val != .none)); - if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) { return non_null_bit; } const llvm_ty = try o.lowerType(ty); @@ -3909,7 +3918,7 @@ pub const Object = struct { var fields: [3]Builder.Type = undefined; var vals: [3]Builder.Constant = undefined; vals[0] = try o.lowerValue(switch (opt.val) { - .none => try mod.intern(.{ .undef = payload_ty.toIntern() }), + .none => try pt.intern(.{ .undef = payload_ty.toIntern() }), else => |payload| payload, }); vals[1] = non_null_bit; @@ -4058,9 +4067,9 @@ pub const Object = struct { 0.., ) |field_ty, field_val, field_index| { if (field_val != .none) continue; - if (!Type.fromInterned(field_ty).hasRuntimeBitsIgnoreComptime(mod)) continue; + if (!Type.fromInterned(field_ty).hasRuntimeBitsIgnoreComptime(pt)) continue; - const field_align = Type.fromInterned(field_ty).abiAlignment(mod); + const field_align = Type.fromInterned(field_ty).abiAlignment(pt); big_align = big_align.max(field_align); const prev_offset = offset; offset = field_align.forward(offset); @@ -4076,13 +4085,13 @@ pub const Object = struct { } vals[llvm_index] = - try o.lowerValue((try val.fieldValue(mod, field_index)).toIntern()); + try o.lowerValue((try val.fieldValue(pt, field_index)).toIntern()); fields[llvm_index] = vals[llvm_index].typeOf(&o.builder); if (fields[llvm_index] != struct_ty.structFields(&o.builder)[llvm_index]) need_unnamed = true; llvm_index += 1; - offset += Type.fromInterned(field_ty).abiSize(mod); + offset += Type.fromInterned(field_ty).abiSize(pt); } { const prev_offset = offset; @@ -4109,7 +4118,7 @@ pub const Object = struct { if (struct_type.layout == .@"packed") { comptime assert(Type.packed_struct_layout_version == 2); - const bits = ty.bitSize(mod); + const bits = ty.bitSize(pt); const llvm_int_ty = try o.builder.intType(@intCast(bits)); return o.lowerValueToInt(llvm_int_ty, arg_val); @@ -4138,7 +4147,7 @@ pub const Object = struct { var field_it = struct_type.iterateRuntimeOrder(ip); while (field_it.next()) |field_index| { const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_index]); - const field_align = mod.structFieldAlignment( + const field_align = pt.structFieldAlignment( struct_type.fieldAlign(ip, field_index), field_ty, struct_type.layout, @@ -4158,20 +4167,20 @@ pub const Object = struct { llvm_index += 1; } - if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) { // This is a zero-bit field - we only needed it for the alignment. continue; } vals[llvm_index] = try o.lowerValue( - (try val.fieldValue(mod, field_index)).toIntern(), + (try val.fieldValue(pt, field_index)).toIntern(), ); fields[llvm_index] = vals[llvm_index].typeOf(&o.builder); if (fields[llvm_index] != struct_ty.structFields(&o.builder)[llvm_index]) need_unnamed = true; llvm_index += 1; - offset += field_ty.abiSize(mod); + offset += field_ty.abiSize(pt); } { const prev_offset = offset; @@ -4195,7 +4204,7 @@ pub const Object = struct { }, .un => |un| { const union_ty = try o.lowerType(ty); - const layout = ty.unionGetLayout(mod); + const layout = ty.unionGetLayout(pt); if (layout.payload_size == 0) return o.lowerValue(un.tag); const union_obj = mod.typeToUnion(ty).?; @@ -4206,8 +4215,8 @@ pub const Object = struct { const field_index = mod.unionTagFieldIndex(union_obj, Value.fromInterned(un.tag)).?; const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]); if (container_layout == .@"packed") { - if (!field_ty.hasRuntimeBits(mod)) return o.builder.intConst(union_ty, 0); - const bits = ty.bitSize(mod); + if (!field_ty.hasRuntimeBits(pt)) return o.builder.intConst(union_ty, 0); + const bits = ty.bitSize(pt); const llvm_int_ty = try o.builder.intType(@intCast(bits)); return o.lowerValueToInt(llvm_int_ty, arg_val); @@ -4219,7 +4228,7 @@ pub const Object = struct { // must pointer cast to the expected type before accessing the union. need_unnamed = layout.most_aligned_field != field_index; - if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) { const padding_len = layout.payload_size; break :p try o.builder.undefConst(try o.builder.arrayType(padding_len, .i8)); } @@ -4228,7 +4237,7 @@ pub const Object = struct { if (payload_ty != union_ty.structFields(&o.builder)[ @intFromBool(layout.tag_align.compare(.gte, layout.payload_align)) ]) need_unnamed = true; - const field_size = field_ty.abiSize(mod); + const field_size = field_ty.abiSize(pt); if (field_size == layout.payload_size) break :p payload; const padding_len = layout.payload_size - field_size; const padding_ty = try o.builder.arrayType(padding_len, .i8); @@ -4239,7 +4248,7 @@ pub const Object = struct { } else p: { assert(layout.tag_size == 0); if (container_layout == .@"packed") { - const bits = ty.bitSize(mod); + const bits = ty.bitSize(pt); const llvm_int_ty = try o.builder.intType(@intCast(bits)); return o.lowerValueToInt(llvm_int_ty, arg_val); @@ -4286,7 +4295,7 @@ pub const Object = struct { ty: Type, bigint: std.math.big.int.Const, ) Allocator.Error!Builder.Constant { - const mod = o.module; + const mod = o.pt.zcu; return o.builder.bigIntConst(try o.builder.intType(ty.intInfo(mod).bits), bigint); } @@ -4295,7 +4304,8 @@ pub const Object = struct { ptr_val: InternPool.Index, prev_offset: u64, ) Error!Builder.Constant { - const zcu = o.module; + const pt = o.pt; + const zcu = pt.zcu; const ptr = zcu.intern_pool.indexToKey(ptr_val).ptr; const offset: u64 = prev_offset + ptr.byte_offset; return switch (ptr.base_addr) { @@ -4320,7 +4330,7 @@ pub const Object = struct { eu_ptr, offset + @import("../codegen.zig").errUnionPayloadOffset( Value.fromInterned(eu_ptr).typeOf(zcu).childType(zcu), - zcu, + pt, ), ), .opt_payload => |opt_ptr| try o.lowerPtr(opt_ptr, offset), @@ -4336,7 +4346,7 @@ pub const Object = struct { }; }, .Struct, .Union => switch (agg_ty.containerLayout(zcu)) { - .auto => agg_ty.structFieldOffset(@intCast(field.index), zcu), + .auto => agg_ty.structFieldOffset(@intCast(field.index), pt), .@"extern", .@"packed" => unreachable, }, else => unreachable, @@ -4353,7 +4363,8 @@ pub const Object = struct { o: *Object, anon_decl: InternPool.Key.Ptr.BaseAddr.AnonDecl, ) Error!Builder.Constant { - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const ip = &mod.intern_pool; const decl_val = anon_decl.val; const decl_ty = Type.fromInterned(ip.typeOf(decl_val)); @@ -4370,14 +4381,14 @@ pub const Object = struct { const ptr_ty = Type.fromInterned(anon_decl.orig_ty); const is_fn_body = decl_ty.zigTypeTag(mod) == .Fn; - if ((!is_fn_body and !decl_ty.hasRuntimeBits(mod)) or + if ((!is_fn_body and !decl_ty.hasRuntimeBits(pt)) or (is_fn_body and mod.typeToFunc(decl_ty).?.is_generic)) return o.lowerPtrToVoid(ptr_ty); if (is_fn_body) @panic("TODO"); const llvm_addr_space = toLlvmAddressSpace(ptr_ty.ptrAddressSpace(mod), target); - const alignment = ptr_ty.ptrAlignment(mod); + const alignment = ptr_ty.ptrAlignment(pt); const llvm_global = (try o.resolveGlobalAnonDecl(decl_val, llvm_addr_space, alignment)).ptrConst(&o.builder).global; const llvm_val = try o.builder.convConst( @@ -4389,7 +4400,8 @@ pub const Object = struct { } fn lowerDeclRefValue(o: *Object, decl_index: InternPool.DeclIndex) Allocator.Error!Builder.Constant { - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; // In the case of something like: // fn foo() void {} @@ -4408,10 +4420,10 @@ pub const Object = struct { } const decl_ty = decl.typeOf(mod); - const ptr_ty = try decl.declPtrType(mod); + const ptr_ty = try decl.declPtrType(pt); const is_fn_body = decl_ty.zigTypeTag(mod) == .Fn; - if ((!is_fn_body and !decl_ty.hasRuntimeBits(mod)) or + if ((!is_fn_body and !decl_ty.hasRuntimeBits(pt)) or (is_fn_body and mod.typeToFunc(decl_ty).?.is_generic)) { return o.lowerPtrToVoid(ptr_ty); @@ -4431,7 +4443,7 @@ pub const Object = struct { } fn lowerPtrToVoid(o: *Object, ptr_ty: Type) Allocator.Error!Builder.Constant { - const mod = o.module; + const mod = o.pt.zcu; // Even though we are pointing at something which has zero bits (e.g. `void`), // Pointers are defined to have bits. So we must return something here. // The value cannot be undefined, because we use the `nonnull` annotation @@ -4459,20 +4471,21 @@ pub const Object = struct { /// RMW exchange of floating-point values is bitcasted to same-sized integer /// types to work around a LLVM deficiency when targeting ARM/AArch64. fn getAtomicAbiType(o: *Object, ty: Type, is_rmw_xchg: bool) Allocator.Error!Builder.Type { - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const int_ty = switch (ty.zigTypeTag(mod)) { .Int => ty, .Enum => ty.intTagType(mod), .Float => { if (!is_rmw_xchg) return .none; - return o.builder.intType(@intCast(ty.abiSize(mod) * 8)); + return o.builder.intType(@intCast(ty.abiSize(pt) * 8)); }, .Bool => return .i8, else => return .none, }; const bit_count = int_ty.intInfo(mod).bits; if (!std.math.isPowerOfTwo(bit_count) or (bit_count % 8) != 0) { - return o.builder.intType(@intCast(int_ty.abiSize(mod) * 8)); + return o.builder.intType(@intCast(int_ty.abiSize(pt) * 8)); } else { return .none; } @@ -4486,7 +4499,8 @@ pub const Object = struct { fn_info: InternPool.Key.FuncType, llvm_arg_i: u32, ) Allocator.Error!void { - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; if (param_ty.isPtrAtRuntime(mod)) { const ptr_info = param_ty.ptrInfo(mod); if (math.cast(u5, param_index)) |i| { @@ -4507,7 +4521,7 @@ pub const Object = struct { const elem_align = if (ptr_info.flags.alignment != .none) ptr_info.flags.alignment else - Type.fromInterned(ptr_info.child).abiAlignment(mod).max(.@"1"); + Type.fromInterned(ptr_info.child).abiAlignment(pt).max(.@"1"); try attributes.addParamAttr(llvm_arg_i, .{ .@"align" = elem_align.toLlvm() }, &o.builder); } else if (ccAbiPromoteInt(fn_info.cc, mod, param_ty)) |s| switch (s) { .signed => try attributes.addParamAttr(llvm_arg_i, .signext, &o.builder), @@ -4540,7 +4554,7 @@ pub const Object = struct { const name = try o.builder.strtabString(lt_errors_fn_name); if (o.builder.getGlobal(name)) |llvm_fn| return llvm_fn.ptrConst(&o.builder).kind.function; - const zcu = o.module; + const zcu = o.pt.zcu; const target = zcu.root_mod.resolved_target.result; const function_index = try o.builder.addFunction( try o.builder.fnType(.i1, &.{try o.errorIntType()}, .normal), @@ -4559,7 +4573,8 @@ pub const Object = struct { } fn getEnumTagNameFunction(o: *Object, enum_ty: Type) !Builder.Function.Index { - const zcu = o.module; + const pt = o.pt; + const zcu = pt.zcu; const ip = &zcu.intern_pool; const enum_type = ip.loadEnumType(enum_ty.toIntern()); @@ -4618,7 +4633,7 @@ pub const Object = struct { const return_block = try wip.block(1, "Name"); const this_tag_int_value = try o.lowerValue( - (try zcu.enumValueFieldIndex(enum_ty, @intCast(field_index))).toIntern(), + (try pt.enumValueFieldIndex(enum_ty, @intCast(field_index))).toIntern(), ); try wip_switch.addCase(this_tag_int_value, return_block, &wip); @@ -4636,13 +4651,13 @@ pub const Object = struct { pub const DeclGen = struct { object: *Object, - decl: *Module.Decl, + decl: *Zcu.Decl, decl_index: InternPool.DeclIndex, - err_msg: ?*Module.ErrorMsg, + err_msg: ?*Zcu.ErrorMsg, fn ownerModule(dg: DeclGen) *Package.Module { const o = dg.object; - const zcu = o.module; + const zcu = o.pt.zcu; const namespace = zcu.namespacePtr(dg.decl.src_namespace); const file_scope = namespace.fileScope(zcu); return file_scope.mod; @@ -4653,15 +4668,15 @@ pub const DeclGen = struct { assert(dg.err_msg == null); const o = dg.object; const gpa = o.gpa; - const mod = o.module; - const src_loc = dg.decl.navSrcLoc(mod); - dg.err_msg = try Module.ErrorMsg.create(gpa, src_loc, "TODO (LLVM): " ++ format, args); + const src_loc = dg.decl.navSrcLoc(o.pt.zcu); + dg.err_msg = try Zcu.ErrorMsg.create(gpa, src_loc, "TODO (LLVM): " ++ format, args); return error.CodegenFail; } fn genDecl(dg: *DeclGen) !void { const o = dg.object; - const zcu = o.module; + const pt = o.pt; + const zcu = pt.zcu; const ip = &zcu.intern_pool; const decl = dg.decl; const decl_index = dg.decl_index; @@ -4672,7 +4687,7 @@ pub const DeclGen = struct { } else { const variable_index = try o.resolveGlobalDecl(decl_index); variable_index.setAlignment( - decl.getAlignment(zcu).toLlvm(), + decl.getAlignment(pt).toLlvm(), &o.builder, ); if (decl.@"linksection".toSlice(ip)) |section| @@ -4833,23 +4848,21 @@ pub const FuncGen = struct { const gop = try self.func_inst_table.getOrPut(gpa, inst); if (gop.found_existing) return gop.value_ptr.*; - const o = self.dg.object; - const mod = o.module; - const llvm_val = try self.resolveValue((try self.air.value(inst, mod)).?); + const llvm_val = try self.resolveValue((try self.air.value(inst, self.dg.object.pt)).?); gop.value_ptr.* = llvm_val.toValue(); return llvm_val.toValue(); } fn resolveValue(self: *FuncGen, val: Value) Error!Builder.Constant { const o = self.dg.object; - const mod = o.module; - const ty = val.typeOf(mod); + const pt = o.pt; + const ty = val.typeOf(pt.zcu); const llvm_val = try o.lowerValue(val.toIntern()); - if (!isByRef(ty, mod)) return llvm_val; + if (!isByRef(ty, pt)) return llvm_val; // We have an LLVM value but we need to create a global constant and // set the value as its initializer, and then return a pointer to the global. - const target = mod.getTarget(); + const target = pt.zcu.getTarget(); const variable_index = try o.builder.addVariable( .empty, llvm_val.typeOf(&o.builder), @@ -4859,7 +4872,7 @@ pub const FuncGen = struct { variable_index.setLinkage(.private, &o.builder); variable_index.setMutability(.constant, &o.builder); variable_index.setUnnamedAddr(.unnamed_addr, &o.builder); - variable_index.setAlignment(ty.abiAlignment(mod).toLlvm(), &o.builder); + variable_index.setAlignment(ty.abiAlignment(pt).toLlvm(), &o.builder); return o.builder.convConst( variable_index.toConst(&o.builder), try o.builder.ptrType(toLlvmAddressSpace(.generic, target)), @@ -4868,10 +4881,10 @@ pub const FuncGen = struct { fn resolveNullOptUsize(self: *FuncGen) Error!Builder.Constant { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; if (o.null_opt_usize == .no_init) { - o.null_opt_usize = try self.resolveValue(Value.fromInterned(try mod.intern(.{ .opt = .{ - .ty = try mod.intern(.{ .opt_type = .usize_type }), + o.null_opt_usize = try self.resolveValue(Value.fromInterned(try pt.intern(.{ .opt = .{ + .ty = try pt.intern(.{ .opt_type = .usize_type }), .val = .none, } }))); } @@ -4880,7 +4893,7 @@ pub const FuncGen = struct { fn genBody(self: *FuncGen, body: []const Air.Inst.Index) Error!void { const o = self.dg.object; - const mod = o.module; + const mod = o.pt.zcu; const ip = &mod.intern_pool; const air_tags = self.air.instructions.items(.tag); for (body, 0..) |inst, i| { @@ -5145,7 +5158,8 @@ pub const FuncGen = struct { if (maybe_inline_func) |inline_func| { const o = self.dg.object; - const zcu = o.module; + const pt = o.pt; + const zcu = pt.zcu; const func = zcu.funcInfo(inline_func); const decl_index = func.owner_decl; @@ -5161,7 +5175,7 @@ pub const FuncGen = struct { const fqn = try decl.fullyQualifiedName(zcu); - const fn_ty = try zcu.funcType(.{ + const fn_ty = try pt.funcType(.{ .param_types = &.{}, .return_type = .void_type, }); @@ -5228,7 +5242,8 @@ pub const FuncGen = struct { const extra = self.air.extraData(Air.Call, pl_op.payload); const args: []const Air.Inst.Ref = @ptrCast(self.air.extra[extra.end..][0..extra.data.args_len]); const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const ip = &mod.intern_pool; const callee_ty = self.typeOf(pl_op.operand); const zig_fn_ty = switch (callee_ty.zigTypeTag(mod)) { @@ -5240,7 +5255,7 @@ pub const FuncGen = struct { const return_type = Type.fromInterned(fn_info.return_type); const llvm_fn = try self.resolveInst(pl_op.operand); const target = mod.getTarget(); - const sret = firstParamSRet(fn_info, mod, target); + const sret = firstParamSRet(fn_info, pt, target); var llvm_args = std.ArrayList(Builder.Value).init(self.gpa); defer llvm_args.deinit(); @@ -5258,14 +5273,13 @@ pub const FuncGen = struct { const llvm_ret_ty = try o.lowerType(return_type); try attributes.addParamAttr(0, .{ .sret = llvm_ret_ty }, &o.builder); - const alignment = return_type.abiAlignment(mod).toLlvm(); + const alignment = return_type.abiAlignment(pt).toLlvm(); const ret_ptr = try self.buildAllocaWorkaround(return_type, alignment); try llvm_args.append(ret_ptr); break :blk ret_ptr; }; - const err_return_tracing = return_type.isError(mod) and - o.module.comp.config.any_error_tracing; + const err_return_tracing = return_type.isError(mod) and mod.comp.config.any_error_tracing; if (err_return_tracing) { assert(self.err_ret_trace != .none); try llvm_args.append(self.err_ret_trace); @@ -5279,8 +5293,8 @@ pub const FuncGen = struct { const param_ty = self.typeOf(arg); const llvm_arg = try self.resolveInst(arg); const llvm_param_ty = try o.lowerType(param_ty); - if (isByRef(param_ty, mod)) { - const alignment = param_ty.abiAlignment(mod).toLlvm(); + if (isByRef(param_ty, pt)) { + const alignment = param_ty.abiAlignment(pt).toLlvm(); const loaded = try self.wip.load(.normal, llvm_param_ty, llvm_arg, alignment, ""); try llvm_args.append(loaded); } else { @@ -5291,10 +5305,10 @@ pub const FuncGen = struct { const arg = args[it.zig_index - 1]; const param_ty = self.typeOf(arg); const llvm_arg = try self.resolveInst(arg); - if (isByRef(param_ty, mod)) { + if (isByRef(param_ty, pt)) { try llvm_args.append(llvm_arg); } else { - const alignment = param_ty.abiAlignment(mod).toLlvm(); + const alignment = param_ty.abiAlignment(pt).toLlvm(); const param_llvm_ty = llvm_arg.typeOfWip(&self.wip); const arg_ptr = try self.buildAlloca(param_llvm_ty, alignment); _ = try self.wip.store(.normal, llvm_arg, arg_ptr, alignment); @@ -5306,10 +5320,10 @@ pub const FuncGen = struct { const param_ty = self.typeOf(arg); const llvm_arg = try self.resolveInst(arg); - const alignment = param_ty.abiAlignment(mod).toLlvm(); + const alignment = param_ty.abiAlignment(pt).toLlvm(); const param_llvm_ty = try o.lowerType(param_ty); const arg_ptr = try self.buildAllocaWorkaround(param_ty, alignment); - if (isByRef(param_ty, mod)) { + if (isByRef(param_ty, pt)) { const loaded = try self.wip.load(.normal, param_llvm_ty, llvm_arg, alignment, ""); _ = try self.wip.store(.normal, loaded, arg_ptr, alignment); } else { @@ -5321,16 +5335,16 @@ pub const FuncGen = struct { const arg = args[it.zig_index - 1]; const param_ty = self.typeOf(arg); const llvm_arg = try self.resolveInst(arg); - const int_llvm_ty = try o.builder.intType(@intCast(param_ty.abiSize(mod) * 8)); + const int_llvm_ty = try o.builder.intType(@intCast(param_ty.abiSize(pt) * 8)); - if (isByRef(param_ty, mod)) { - const alignment = param_ty.abiAlignment(mod).toLlvm(); + if (isByRef(param_ty, pt)) { + const alignment = param_ty.abiAlignment(pt).toLlvm(); const loaded = try self.wip.load(.normal, int_llvm_ty, llvm_arg, alignment, ""); try llvm_args.append(loaded); } else { // LLVM does not allow bitcasting structs so we must allocate // a local, store as one type, and then load as another type. - const alignment = param_ty.abiAlignment(mod).toLlvm(); + const alignment = param_ty.abiAlignment(pt).toLlvm(); const int_ptr = try self.buildAllocaWorkaround(param_ty, alignment); _ = try self.wip.store(.normal, llvm_arg, int_ptr, alignment); const loaded = try self.wip.load(.normal, int_llvm_ty, int_ptr, alignment, ""); @@ -5349,9 +5363,9 @@ pub const FuncGen = struct { const param_ty = self.typeOf(arg); const llvm_types = it.types_buffer[0..it.types_len]; const llvm_arg = try self.resolveInst(arg); - const is_by_ref = isByRef(param_ty, mod); + const is_by_ref = isByRef(param_ty, pt); const arg_ptr = if (is_by_ref) llvm_arg else ptr: { - const alignment = param_ty.abiAlignment(mod).toLlvm(); + const alignment = param_ty.abiAlignment(pt).toLlvm(); const ptr = try self.buildAlloca(llvm_arg.typeOfWip(&self.wip), alignment); _ = try self.wip.store(.normal, llvm_arg, ptr, alignment); break :ptr ptr; @@ -5377,8 +5391,8 @@ pub const FuncGen = struct { const arg = args[it.zig_index - 1]; const arg_ty = self.typeOf(arg); var llvm_arg = try self.resolveInst(arg); - const alignment = arg_ty.abiAlignment(mod).toLlvm(); - if (!isByRef(arg_ty, mod)) { + const alignment = arg_ty.abiAlignment(pt).toLlvm(); + if (!isByRef(arg_ty, pt)) { const ptr = try self.buildAlloca(llvm_arg.typeOfWip(&self.wip), alignment); _ = try self.wip.store(.normal, llvm_arg, ptr, alignment); llvm_arg = ptr; @@ -5395,8 +5409,8 @@ pub const FuncGen = struct { const arg = args[it.zig_index - 1]; const arg_ty = self.typeOf(arg); var llvm_arg = try self.resolveInst(arg); - const alignment = arg_ty.abiAlignment(mod).toLlvm(); - if (!isByRef(arg_ty, mod)) { + const alignment = arg_ty.abiAlignment(pt).toLlvm(); + if (!isByRef(arg_ty, pt)) { const ptr = try self.buildAlloca(llvm_arg.typeOfWip(&self.wip), alignment); _ = try self.wip.store(.normal, llvm_arg, ptr, alignment); llvm_arg = ptr; @@ -5418,7 +5432,7 @@ pub const FuncGen = struct { .byval => { const param_index = it.zig_index - 1; const param_ty = Type.fromInterned(fn_info.param_types.get(ip)[param_index]); - if (!isByRef(param_ty, mod)) { + if (!isByRef(param_ty, pt)) { try o.addByValParamAttrs(&attributes, param_ty, param_index, fn_info, it.llvm_index - 1); } }, @@ -5426,7 +5440,7 @@ pub const FuncGen = struct { const param_index = it.zig_index - 1; const param_ty = Type.fromInterned(fn_info.param_types.get(ip)[param_index]); const param_llvm_ty = try o.lowerType(param_ty); - const alignment = param_ty.abiAlignment(mod).toLlvm(); + const alignment = param_ty.abiAlignment(pt).toLlvm(); try o.addByRefParamAttrs(&attributes, it.llvm_index - 1, alignment, it.byval_attr, param_llvm_ty); }, .byref_mut => try attributes.addParamAttr(it.llvm_index - 1, .noundef, &o.builder), @@ -5460,7 +5474,7 @@ pub const FuncGen = struct { const elem_align = (if (ptr_info.flags.alignment != .none) @as(InternPool.Alignment, ptr_info.flags.alignment) else - Type.fromInterned(ptr_info.child).abiAlignment(mod).max(.@"1")).toLlvm(); + Type.fromInterned(ptr_info.child).abiAlignment(pt).max(.@"1")).toLlvm(); try attributes.addParamAttr(llvm_arg_i, .{ .@"align" = elem_align }, &o.builder); }, }; @@ -5485,17 +5499,17 @@ pub const FuncGen = struct { return .none; } - if (self.liveness.isUnused(inst) or !return_type.hasRuntimeBitsIgnoreComptime(mod)) { + if (self.liveness.isUnused(inst) or !return_type.hasRuntimeBitsIgnoreComptime(pt)) { return .none; } const llvm_ret_ty = try o.lowerType(return_type); if (ret_ptr) |rp| { - if (isByRef(return_type, mod)) { + if (isByRef(return_type, pt)) { return rp; } else { // our by-ref status disagrees with sret so we must load. - const return_alignment = return_type.abiAlignment(mod).toLlvm(); + const return_alignment = return_type.abiAlignment(pt).toLlvm(); return self.wip.load(.normal, llvm_ret_ty, rp, return_alignment, ""); } } @@ -5506,19 +5520,19 @@ pub const FuncGen = struct { // In this case the function return type is honoring the calling convention by having // a different LLVM type than the usual one. We solve this here at the callsite // by using our canonical type, then loading it if necessary. - const alignment = return_type.abiAlignment(mod).toLlvm(); + const alignment = return_type.abiAlignment(pt).toLlvm(); const rp = try self.buildAlloca(abi_ret_ty, alignment); _ = try self.wip.store(.normal, call, rp, alignment); - return if (isByRef(return_type, mod)) + return if (isByRef(return_type, pt)) rp else try self.wip.load(.normal, llvm_ret_ty, rp, alignment, ""); } - if (isByRef(return_type, mod)) { + if (isByRef(return_type, pt)) { // our by-ref status disagrees with sret so we must allocate, store, // and return the allocation pointer. - const alignment = return_type.abiAlignment(mod).toLlvm(); + const alignment = return_type.abiAlignment(pt).toLlvm(); const rp = try self.buildAlloca(llvm_ret_ty, alignment); _ = try self.wip.store(.normal, call, rp, alignment); return rp; @@ -5527,9 +5541,9 @@ pub const FuncGen = struct { } } - fn buildSimplePanic(fg: *FuncGen, panic_id: Module.PanicId) !void { + fn buildSimplePanic(fg: *FuncGen, panic_id: Zcu.PanicId) !void { const o = fg.dg.object; - const mod = o.module; + const mod = o.pt.zcu; const msg_decl_index = mod.panic_messages[@intFromEnum(panic_id)].unwrap().?; const msg_decl = mod.declPtr(msg_decl_index); const msg_len = msg_decl.typeOf(mod).childType(mod).arrayLen(mod); @@ -5567,15 +5581,16 @@ pub const FuncGen = struct { fn airRet(self: *FuncGen, inst: Air.Inst.Index, safety: bool) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; const ret_ty = self.typeOf(un_op); if (self.ret_ptr != .none) { - const ptr_ty = try mod.singleMutPtrType(ret_ty); + const ptr_ty = try pt.singleMutPtrType(ret_ty); const operand = try self.resolveInst(un_op); - const val_is_undef = if (try self.air.value(un_op, mod)) |val| val.isUndefDeep(mod) else false; + const val_is_undef = if (try self.air.value(un_op, pt)) |val| val.isUndefDeep(mod) else false; if (val_is_undef and safety) undef: { const ptr_info = ptr_ty.ptrInfo(mod); const needs_bitmask = (ptr_info.packed_offset.host_size != 0); @@ -5585,10 +5600,10 @@ pub const FuncGen = struct { // https://github.com/ziglang/zig/issues/15337 break :undef; } - const len = try o.builder.intValue(try o.lowerType(Type.usize), ret_ty.abiSize(mod)); + const len = try o.builder.intValue(try o.lowerType(Type.usize), ret_ty.abiSize(pt)); _ = try self.wip.callMemSet( self.ret_ptr, - ptr_ty.ptrAlignment(mod).toLlvm(), + ptr_ty.ptrAlignment(pt).toLlvm(), try o.builder.intValue(.i8, 0xaa), len, if (ptr_ty.isVolatilePtr(mod)) .@"volatile" else .normal, @@ -5615,7 +5630,7 @@ pub const FuncGen = struct { return .none; } const fn_info = mod.typeToFunc(self.dg.decl.typeOf(mod)).?; - if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (!ret_ty.hasRuntimeBitsIgnoreComptime(pt)) { if (Type.fromInterned(fn_info.return_type).isError(mod)) { // Functions with an empty error set are emitted with an error code // return type and return zero so they can be function pointers coerced @@ -5629,13 +5644,13 @@ pub const FuncGen = struct { const abi_ret_ty = try lowerFnRetTy(o, fn_info); const operand = try self.resolveInst(un_op); - const val_is_undef = if (try self.air.value(un_op, mod)) |val| val.isUndefDeep(mod) else false; - const alignment = ret_ty.abiAlignment(mod).toLlvm(); + const val_is_undef = if (try self.air.value(un_op, pt)) |val| val.isUndefDeep(mod) else false; + const alignment = ret_ty.abiAlignment(pt).toLlvm(); if (val_is_undef and safety) { const llvm_ret_ty = operand.typeOfWip(&self.wip); const rp = try self.buildAlloca(llvm_ret_ty, alignment); - const len = try o.builder.intValue(try o.lowerType(Type.usize), ret_ty.abiSize(mod)); + const len = try o.builder.intValue(try o.lowerType(Type.usize), ret_ty.abiSize(pt)); _ = try self.wip.callMemSet( rp, alignment, @@ -5651,7 +5666,7 @@ pub const FuncGen = struct { return .none; } - if (isByRef(ret_ty, mod)) { + if (isByRef(ret_ty, pt)) { // operand is a pointer however self.ret_ptr is null so that means // we need to return a value. _ = try self.wip.ret(try self.wip.load(.normal, abi_ret_ty, operand, alignment, "")); @@ -5672,12 +5687,13 @@ pub const FuncGen = struct { fn airRetLoad(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; const ptr_ty = self.typeOf(un_op); const ret_ty = ptr_ty.childType(mod); const fn_info = mod.typeToFunc(self.dg.decl.typeOf(mod)).?; - if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (!ret_ty.hasRuntimeBitsIgnoreComptime(pt)) { if (Type.fromInterned(fn_info.return_type).isError(mod)) { // Functions with an empty error set are emitted with an error code // return type and return zero so they can be function pointers coerced @@ -5694,7 +5710,7 @@ pub const FuncGen = struct { } const ptr = try self.resolveInst(un_op); const abi_ret_ty = try lowerFnRetTy(o, fn_info); - const alignment = ret_ty.abiAlignment(mod).toLlvm(); + const alignment = ret_ty.abiAlignment(pt).toLlvm(); _ = try self.wip.ret(try self.wip.load(.normal, abi_ret_ty, ptr, alignment, "")); return .none; } @@ -5711,17 +5727,17 @@ pub const FuncGen = struct { fn airCVaCopy(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; + const pt = o.pt; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const src_list = try self.resolveInst(ty_op.operand); const va_list_ty = ty_op.ty.toType(); const llvm_va_list_ty = try o.lowerType(va_list_ty); - const mod = o.module; - const result_alignment = va_list_ty.abiAlignment(mod).toLlvm(); + const result_alignment = va_list_ty.abiAlignment(pt).toLlvm(); const dest_list = try self.buildAllocaWorkaround(va_list_ty, result_alignment); _ = try self.wip.callIntrinsic(.normal, .none, .va_copy, &.{}, &.{ dest_list, src_list }, ""); - return if (isByRef(va_list_ty, mod)) + return if (isByRef(va_list_ty, pt)) dest_list else try self.wip.load(.normal, llvm_va_list_ty, dest_list, result_alignment, ""); @@ -5737,15 +5753,15 @@ pub const FuncGen = struct { fn airCVaStart(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; const va_list_ty = self.typeOfIndex(inst); const llvm_va_list_ty = try o.lowerType(va_list_ty); - const result_alignment = va_list_ty.abiAlignment(mod).toLlvm(); + const result_alignment = va_list_ty.abiAlignment(pt).toLlvm(); const dest_list = try self.buildAllocaWorkaround(va_list_ty, result_alignment); _ = try self.wip.callIntrinsic(.normal, .none, .va_start, &.{}, &.{dest_list}, ""); - return if (isByRef(va_list_ty, mod)) + return if (isByRef(va_list_ty, pt)) dest_list else try self.wip.load(.normal, llvm_va_list_ty, dest_list, result_alignment, ""); @@ -5802,21 +5818,22 @@ pub const FuncGen = struct { rhs: Builder.Value, ) Allocator.Error!Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const scalar_ty = operand_ty.scalarType(mod); const int_ty = switch (scalar_ty.zigTypeTag(mod)) { .Enum => scalar_ty.intTagType(mod), .Int, .Bool, .Pointer, .ErrorSet => scalar_ty, .Optional => blk: { const payload_ty = operand_ty.optionalChild(mod); - if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod) or + if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt) or operand_ty.optionalReprIsPayload(mod)) { break :blk operand_ty; } // We need to emit instructions to check for equality/inequality // of optionals that are not pointers. - const is_by_ref = isByRef(scalar_ty, mod); + const is_by_ref = isByRef(scalar_ty, pt); const opt_llvm_ty = try o.lowerType(scalar_ty); const lhs_non_null = try self.optCmpNull(.ne, opt_llvm_ty, lhs, is_by_ref); const rhs_non_null = try self.optCmpNull(.ne, opt_llvm_ty, rhs, is_by_ref); @@ -5908,7 +5925,8 @@ pub const FuncGen = struct { body: []const Air.Inst.Index, ) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const inst_ty = self.typeOfIndex(inst); if (inst_ty.isNoReturn(mod)) { @@ -5916,7 +5934,7 @@ pub const FuncGen = struct { return .none; } - const have_block_result = inst_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod); + const have_block_result = inst_ty.isFnOrHasRuntimeBitsIgnoreComptime(pt); var breaks: BreakList = if (have_block_result) .{ .list = .{} } else .{ .len = 0 }; defer if (have_block_result) breaks.list.deinit(self.gpa); @@ -5940,7 +5958,7 @@ pub const FuncGen = struct { // a pointer to it. LLVM IR allows the call instruction to use function bodies instead // of function pointers, however the phi makes it a runtime value and therefore // the LLVM type has to be wrapped in a pointer. - if (inst_ty.zigTypeTag(mod) == .Fn or isByRef(inst_ty, mod)) { + if (inst_ty.zigTypeTag(mod) == .Fn or isByRef(inst_ty, pt)) { break :ty .ptr; } break :ty raw_llvm_ty; @@ -5958,13 +5976,13 @@ pub const FuncGen = struct { fn airBr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; + const pt = o.pt; const branch = self.air.instructions.items(.data)[@intFromEnum(inst)].br; const block = self.blocks.get(branch.block_inst).?; // Add the values to the lists only if the break provides a value. const operand_ty = self.typeOf(branch.operand); - const mod = o.module; - if (operand_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) { + if (operand_ty.isFnOrHasRuntimeBitsIgnoreComptime(pt)) { const val = try self.resolveInst(branch.operand); // For the phi node, we need the basic blocks and the values of the @@ -5998,7 +6016,7 @@ pub const FuncGen = struct { fn airTry(self: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; const inst = body_tail[0]; const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; const err_union = try self.resolveInst(pl_op.operand); @@ -6006,14 +6024,14 @@ pub const FuncGen = struct { const body: []const Air.Inst.Index = @ptrCast(self.air.extra[extra.end..][0..extra.data.body_len]); const err_union_ty = self.typeOf(pl_op.operand); const payload_ty = self.typeOfIndex(inst); - const can_elide_load = if (isByRef(payload_ty, mod)) self.canElideLoad(body_tail) else false; + const can_elide_load = if (isByRef(payload_ty, pt)) self.canElideLoad(body_tail) else false; const is_unused = self.liveness.isUnused(inst); return lowerTry(self, err_union, body, err_union_ty, false, can_elide_load, is_unused); } fn airTryPtr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const mod = o.pt.zcu; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = self.air.extraData(Air.TryPtr, ty_pl.payload); const err_union_ptr = try self.resolveInst(extra.data.ptr); @@ -6033,9 +6051,10 @@ pub const FuncGen = struct { is_unused: bool, ) !Builder.Value { const o = fg.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const payload_ty = err_union_ty.errorUnionPayload(mod); - const payload_has_bits = payload_ty.hasRuntimeBitsIgnoreComptime(mod); + const payload_has_bits = payload_ty.hasRuntimeBitsIgnoreComptime(pt); const err_union_llvm_ty = try o.lowerType(err_union_ty); const error_type = try o.errorIntType(); @@ -6048,8 +6067,8 @@ pub const FuncGen = struct { else err_union; } - const err_field_index = try errUnionErrorOffset(payload_ty, mod); - if (operand_is_ptr or isByRef(err_union_ty, mod)) { + const err_field_index = try errUnionErrorOffset(payload_ty, pt); + if (operand_is_ptr or isByRef(err_union_ty, pt)) { const err_field_ptr = try fg.wip.gepStruct(err_union_llvm_ty, err_union, err_field_index, ""); // TODO add alignment to this load @@ -6077,13 +6096,13 @@ pub const FuncGen = struct { } if (is_unused) return .none; if (!payload_has_bits) return if (operand_is_ptr) err_union else .none; - const offset = try errUnionPayloadOffset(payload_ty, mod); + const offset = try errUnionPayloadOffset(payload_ty, pt); if (operand_is_ptr) { return fg.wip.gepStruct(err_union_llvm_ty, err_union, offset, ""); - } else if (isByRef(err_union_ty, mod)) { + } else if (isByRef(err_union_ty, pt)) { const payload_ptr = try fg.wip.gepStruct(err_union_llvm_ty, err_union, offset, ""); - const payload_alignment = payload_ty.abiAlignment(mod).toLlvm(); - if (isByRef(payload_ty, mod)) { + const payload_alignment = payload_ty.abiAlignment(pt).toLlvm(); + if (isByRef(payload_ty, pt)) { if (can_elide_load) return payload_ptr; @@ -6161,7 +6180,7 @@ pub const FuncGen = struct { fn airLoop(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const mod = o.pt.zcu; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const loop = self.air.extraData(Air.Block, ty_pl.payload); const body: []const Air.Inst.Index = @ptrCast(self.air.extra[loop.end..][0..loop.data.body_len]); @@ -6185,7 +6204,8 @@ pub const FuncGen = struct { fn airArrayToSlice(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const operand_ty = self.typeOf(ty_op.operand); const array_ty = operand_ty.childType(mod); @@ -6193,7 +6213,7 @@ pub const FuncGen = struct { const len = try o.builder.intValue(llvm_usize, array_ty.arrayLen(mod)); const slice_llvm_ty = try o.lowerType(self.typeOfIndex(inst)); const operand = try self.resolveInst(ty_op.operand); - if (!array_ty.hasRuntimeBitsIgnoreComptime(mod)) + if (!array_ty.hasRuntimeBitsIgnoreComptime(pt)) return self.wip.buildAggregate(slice_llvm_ty, &.{ operand, len }, ""); const ptr = try self.wip.gep(.inbounds, try o.lowerType(array_ty), operand, &.{ try o.builder.intValue(llvm_usize, 0), try o.builder.intValue(llvm_usize, 0), @@ -6203,7 +6223,8 @@ pub const FuncGen = struct { fn airFloatFromInt(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const workaround_operand = try self.resolveInst(ty_op.operand); @@ -6213,7 +6234,7 @@ pub const FuncGen = struct { const operand = o: { // Work around LLVM bug. See https://github.com/ziglang/zig/issues/17381. - const bit_size = operand_scalar_ty.bitSize(mod); + const bit_size = operand_scalar_ty.bitSize(pt); for ([_]u8{ 8, 16, 32, 64, 128 }) |b| { if (bit_size < b) { break :o try self.wip.cast( @@ -6241,7 +6262,7 @@ pub const FuncGen = struct { "", ); - const rt_int_bits = compilerRtIntBits(@intCast(operand_scalar_ty.bitSize(mod))); + const rt_int_bits = compilerRtIntBits(@intCast(operand_scalar_ty.bitSize(pt))); const rt_int_ty = try o.builder.intType(rt_int_bits); var extended = try self.wip.conv( if (is_signed_int) .signed else .unsigned, @@ -6287,7 +6308,8 @@ pub const FuncGen = struct { _ = fast; const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const target = mod.getTarget(); const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; @@ -6309,7 +6331,7 @@ pub const FuncGen = struct { ); } - const rt_int_bits = compilerRtIntBits(@intCast(dest_scalar_ty.bitSize(mod))); + const rt_int_bits = compilerRtIntBits(@intCast(dest_scalar_ty.bitSize(pt))); const ret_ty = try o.builder.intType(rt_int_bits); const libc_ret_ty = if (rt_int_bits == 128 and (target.os.tag == .windows and target.cpu.arch == .x86_64)) b: { // On Windows x86-64, "ti" functions must use Vector(2, u64) instead of the standard @@ -6348,19 +6370,20 @@ pub const FuncGen = struct { fn sliceOrArrayPtr(fg: *FuncGen, ptr: Builder.Value, ty: Type) Allocator.Error!Builder.Value { const o = fg.dg.object; - const mod = o.module; + const mod = o.pt.zcu; return if (ty.isSlice(mod)) fg.wip.extractValue(ptr, &.{0}, "") else ptr; } fn sliceOrArrayLenInBytes(fg: *FuncGen, ptr: Builder.Value, ty: Type) Allocator.Error!Builder.Value { const o = fg.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const llvm_usize = try o.lowerType(Type.usize); switch (ty.ptrSize(mod)) { .Slice => { const len = try fg.wip.extractValue(ptr, &.{1}, ""); const elem_ty = ty.childType(mod); - const abi_size = elem_ty.abiSize(mod); + const abi_size = elem_ty.abiSize(pt); if (abi_size == 1) return len; const abi_size_llvm_val = try o.builder.intValue(llvm_usize, abi_size); return fg.wip.bin(.@"mul nuw", len, abi_size_llvm_val, ""); @@ -6368,7 +6391,7 @@ pub const FuncGen = struct { .One => { const array_ty = ty.childType(mod); const elem_ty = array_ty.childType(mod); - const abi_size = elem_ty.abiSize(mod); + const abi_size = elem_ty.abiSize(pt); return o.builder.intValue(llvm_usize, array_ty.arrayLen(mod) * abi_size); }, .Many, .C => unreachable, @@ -6383,7 +6406,7 @@ pub const FuncGen = struct { fn airPtrSliceFieldPtr(self: *FuncGen, inst: Air.Inst.Index, index: c_uint) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const mod = o.pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const slice_ptr = try self.resolveInst(ty_op.operand); const slice_ptr_ty = self.typeOf(ty_op.operand); @@ -6394,7 +6417,8 @@ pub const FuncGen = struct { fn airSliceElemVal(self: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const inst = body_tail[0]; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const slice_ty = self.typeOf(bin_op.lhs); @@ -6404,11 +6428,11 @@ pub const FuncGen = struct { const llvm_elem_ty = try o.lowerPtrElemTy(elem_ty); const base_ptr = try self.wip.extractValue(slice, &.{0}, ""); const ptr = try self.wip.gep(.inbounds, llvm_elem_ty, base_ptr, &.{index}, ""); - if (isByRef(elem_ty, mod)) { + if (isByRef(elem_ty, pt)) { if (self.canElideLoad(body_tail)) return ptr; - const elem_alignment = elem_ty.abiAlignment(mod).toLlvm(); + const elem_alignment = elem_ty.abiAlignment(pt).toLlvm(); return self.loadByRef(ptr, elem_ty, elem_alignment, .normal); } @@ -6417,7 +6441,7 @@ pub const FuncGen = struct { fn airSliceElemPtr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const mod = o.pt.zcu; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const slice_ty = self.typeOf(bin_op.lhs); @@ -6431,7 +6455,8 @@ pub const FuncGen = struct { fn airArrayElemVal(self: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const inst = body_tail[0]; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; @@ -6440,15 +6465,15 @@ pub const FuncGen = struct { const rhs = try self.resolveInst(bin_op.rhs); const array_llvm_ty = try o.lowerType(array_ty); const elem_ty = array_ty.childType(mod); - if (isByRef(array_ty, mod)) { + if (isByRef(array_ty, pt)) { const indices: [2]Builder.Value = .{ try o.builder.intValue(try o.lowerType(Type.usize), 0), rhs, }; - if (isByRef(elem_ty, mod)) { + if (isByRef(elem_ty, pt)) { const elem_ptr = try self.wip.gep(.inbounds, array_llvm_ty, array_llvm_val, &indices, ""); if (canElideLoad(self, body_tail)) return elem_ptr; - const elem_alignment = elem_ty.abiAlignment(mod).toLlvm(); + const elem_alignment = elem_ty.abiAlignment(pt).toLlvm(); return self.loadByRef(elem_ptr, elem_ty, elem_alignment, .normal); } else { const elem_ptr = @@ -6463,7 +6488,8 @@ pub const FuncGen = struct { fn airPtrElemVal(self: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const inst = body_tail[0]; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const ptr_ty = self.typeOf(bin_op.lhs); @@ -6477,9 +6503,9 @@ pub const FuncGen = struct { &.{ try o.builder.intValue(try o.lowerType(Type.usize), 0), rhs } else &.{rhs}, ""); - if (isByRef(elem_ty, mod)) { + if (isByRef(elem_ty, pt)) { if (self.canElideLoad(body_tail)) return ptr; - const elem_alignment = elem_ty.abiAlignment(mod).toLlvm(); + const elem_alignment = elem_ty.abiAlignment(pt).toLlvm(); return self.loadByRef(ptr, elem_ty, elem_alignment, .normal); } @@ -6488,12 +6514,13 @@ pub const FuncGen = struct { fn airPtrElemPtr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const ptr_ty = self.typeOf(bin_op.lhs); const elem_ty = ptr_ty.childType(mod); - if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) return self.resolveInst(bin_op.lhs); + if (!elem_ty.hasRuntimeBitsIgnoreComptime(pt)) return self.resolveInst(bin_op.lhs); const base_ptr = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); @@ -6530,7 +6557,8 @@ pub const FuncGen = struct { fn airStructFieldVal(self: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const inst = body_tail[0]; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const struct_field = self.air.extraData(Air.StructField, ty_pl.payload).data; @@ -6538,27 +6566,27 @@ pub const FuncGen = struct { const struct_llvm_val = try self.resolveInst(struct_field.struct_operand); const field_index = struct_field.field_index; const field_ty = struct_ty.structFieldType(field_index, mod); - if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) return .none; + if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) return .none; - if (!isByRef(struct_ty, mod)) { - assert(!isByRef(field_ty, mod)); + if (!isByRef(struct_ty, pt)) { + assert(!isByRef(field_ty, pt)); switch (struct_ty.zigTypeTag(mod)) { .Struct => switch (struct_ty.containerLayout(mod)) { .@"packed" => { const struct_type = mod.typeToStruct(struct_ty).?; - const bit_offset = mod.structPackedFieldBitOffset(struct_type, field_index); + const bit_offset = pt.structPackedFieldBitOffset(struct_type, field_index); const containing_int = struct_llvm_val; const shift_amt = try o.builder.intValue(containing_int.typeOfWip(&self.wip), bit_offset); const shifted_value = try self.wip.bin(.lshr, containing_int, shift_amt, ""); const elem_llvm_ty = try o.lowerType(field_ty); if (field_ty.zigTypeTag(mod) == .Float or field_ty.zigTypeTag(mod) == .Vector) { - const same_size_int = try o.builder.intType(@intCast(field_ty.bitSize(mod))); + const same_size_int = try o.builder.intType(@intCast(field_ty.bitSize(pt))); const truncated_int = try self.wip.cast(.trunc, shifted_value, same_size_int, ""); return self.wip.cast(.bitcast, truncated_int, elem_llvm_ty, ""); } else if (field_ty.isPtrAtRuntime(mod)) { - const same_size_int = try o.builder.intType(@intCast(field_ty.bitSize(mod))); + const same_size_int = try o.builder.intType(@intCast(field_ty.bitSize(pt))); const truncated_int = try self.wip.cast(.trunc, shifted_value, same_size_int, ""); return self.wip.cast(.inttoptr, truncated_int, elem_llvm_ty, ""); @@ -6575,12 +6603,12 @@ pub const FuncGen = struct { const containing_int = struct_llvm_val; const elem_llvm_ty = try o.lowerType(field_ty); if (field_ty.zigTypeTag(mod) == .Float or field_ty.zigTypeTag(mod) == .Vector) { - const same_size_int = try o.builder.intType(@intCast(field_ty.bitSize(mod))); + const same_size_int = try o.builder.intType(@intCast(field_ty.bitSize(pt))); const truncated_int = try self.wip.cast(.trunc, containing_int, same_size_int, ""); return self.wip.cast(.bitcast, truncated_int, elem_llvm_ty, ""); } else if (field_ty.isPtrAtRuntime(mod)) { - const same_size_int = try o.builder.intType(@intCast(field_ty.bitSize(mod))); + const same_size_int = try o.builder.intType(@intCast(field_ty.bitSize(pt))); const truncated_int = try self.wip.cast(.trunc, containing_int, same_size_int, ""); return self.wip.cast(.inttoptr, truncated_int, elem_llvm_ty, ""); @@ -6599,12 +6627,12 @@ pub const FuncGen = struct { const llvm_field_index = o.llvmFieldIndex(struct_ty, field_index).?; const field_ptr = try self.wip.gepStruct(struct_llvm_ty, struct_llvm_val, llvm_field_index, ""); - const alignment = struct_ty.structFieldAlign(field_index, mod); - const field_ptr_ty = try mod.ptrType(.{ + const alignment = struct_ty.structFieldAlign(field_index, pt); + const field_ptr_ty = try pt.ptrType(.{ .child = field_ty.toIntern(), .flags = .{ .alignment = alignment }, }); - if (isByRef(field_ty, mod)) { + if (isByRef(field_ty, pt)) { if (canElideLoad(self, body_tail)) return field_ptr; @@ -6617,12 +6645,12 @@ pub const FuncGen = struct { }, .Union => { const union_llvm_ty = try o.lowerType(struct_ty); - const layout = struct_ty.unionGetLayout(mod); + const layout = struct_ty.unionGetLayout(pt); const payload_index = @intFromBool(layout.tag_align.compare(.gte, layout.payload_align)); const field_ptr = try self.wip.gepStruct(union_llvm_ty, struct_llvm_val, payload_index, ""); const payload_alignment = layout.payload_align.toLlvm(); - if (isByRef(field_ty, mod)) { + if (isByRef(field_ty, pt)) { if (canElideLoad(self, body_tail)) return field_ptr; return self.loadByRef(field_ptr, field_ty, payload_alignment, .normal); } else { @@ -6635,14 +6663,15 @@ pub const FuncGen = struct { fn airFieldParentPtr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = self.air.extraData(Air.FieldParentPtr, ty_pl.payload).data; const field_ptr = try self.resolveInst(extra.field_ptr); const parent_ty = ty_pl.ty.toType().childType(mod); - const field_offset = parent_ty.structFieldOffset(extra.field_index, mod); + const field_offset = parent_ty.structFieldOffset(extra.field_index, pt); if (field_offset == 0) return field_ptr; const res_ty = try o.lowerType(ty_pl.ty.toType()); @@ -6696,7 +6725,7 @@ pub const FuncGen = struct { fn airDbgVarPtr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const mod = o.pt.zcu; const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; const operand = try self.resolveInst(pl_op.operand); const name = self.air.nullTerminatedString(pl_op.payload); @@ -6743,9 +6772,9 @@ pub const FuncGen = struct { try o.lowerDebugType(operand_ty), ); - const zcu = o.module; + const pt = o.pt; const owner_mod = self.dg.ownerModule(); - if (isByRef(operand_ty, zcu)) { + if (isByRef(operand_ty, pt)) { _ = try self.wip.callIntrinsic( .normal, .none, @@ -6759,7 +6788,7 @@ pub const FuncGen = struct { "", ); } else if (owner_mod.optimize_mode == .Debug) { - const alignment = operand_ty.abiAlignment(zcu).toLlvm(); + const alignment = operand_ty.abiAlignment(pt).toLlvm(); const alloca = try self.buildAlloca(operand.typeOfWip(&self.wip), alignment); _ = try self.wip.store(.normal, operand, alloca, alignment); _ = try self.wip.callIntrinsic( @@ -6830,7 +6859,8 @@ pub const FuncGen = struct { // This stores whether we need to add an elementtype attribute and // if so, the element type itself. const llvm_param_attrs = try arena.alloc(Builder.Type, max_param_count); - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const target = mod.getTarget(); var llvm_ret_i: usize = 0; @@ -6930,13 +6960,13 @@ pub const FuncGen = struct { const arg_llvm_value = try self.resolveInst(input); const arg_ty = self.typeOf(input); - const is_by_ref = isByRef(arg_ty, mod); + const is_by_ref = isByRef(arg_ty, pt); if (is_by_ref) { if (constraintAllowsMemory(constraint)) { llvm_param_values[llvm_param_i] = arg_llvm_value; llvm_param_types[llvm_param_i] = arg_llvm_value.typeOfWip(&self.wip); } else { - const alignment = arg_ty.abiAlignment(mod).toLlvm(); + const alignment = arg_ty.abiAlignment(pt).toLlvm(); const arg_llvm_ty = try o.lowerType(arg_ty); const load_inst = try self.wip.load(.normal, arg_llvm_ty, arg_llvm_value, alignment, ""); @@ -6948,7 +6978,7 @@ pub const FuncGen = struct { llvm_param_values[llvm_param_i] = arg_llvm_value; llvm_param_types[llvm_param_i] = arg_llvm_value.typeOfWip(&self.wip); } else { - const alignment = arg_ty.abiAlignment(mod).toLlvm(); + const alignment = arg_ty.abiAlignment(pt).toLlvm(); const arg_ptr = try self.buildAlloca(arg_llvm_value.typeOfWip(&self.wip), alignment); _ = try self.wip.store(.normal, arg_llvm_value, arg_ptr, alignment); llvm_param_values[llvm_param_i] = arg_ptr; @@ -7000,7 +7030,7 @@ pub const FuncGen = struct { llvm_param_values[llvm_param_i] = llvm_rw_val; llvm_param_types[llvm_param_i] = llvm_rw_val.typeOfWip(&self.wip); } else { - const alignment = rw_ty.abiAlignment(mod).toLlvm(); + const alignment = rw_ty.abiAlignment(pt).toLlvm(); const loaded = try self.wip.load(.normal, llvm_elem_ty, llvm_rw_val, alignment, ""); llvm_param_values[llvm_param_i] = loaded; llvm_param_types[llvm_param_i] = llvm_elem_ty; @@ -7161,7 +7191,7 @@ pub const FuncGen = struct { const output_ptr = try self.resolveInst(output); const output_ptr_ty = self.typeOf(output); - const alignment = output_ptr_ty.ptrAlignment(mod).toLlvm(); + const alignment = output_ptr_ty.ptrAlignment(pt).toLlvm(); _ = try self.wip.store(.normal, output_value, output_ptr, alignment); } else { ret_val = output_value; @@ -7179,7 +7209,8 @@ pub const FuncGen = struct { cond: Builder.IntegerCondition, ) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; const operand = try self.resolveInst(un_op); const operand_ty = self.typeOf(un_op); @@ -7204,7 +7235,7 @@ pub const FuncGen = struct { comptime assert(optional_layout_version == 3); - if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) { const loaded = if (operand_is_ptr) try self.wip.load(.normal, optional_llvm_ty, operand, .default, "") else @@ -7212,7 +7243,7 @@ pub const FuncGen = struct { return self.wip.icmp(cond, loaded, try o.builder.intValue(.i8, 0), ""); } - const is_by_ref = operand_is_ptr or isByRef(optional_ty, mod); + const is_by_ref = operand_is_ptr or isByRef(optional_ty, pt); return self.optCmpNull(cond, optional_llvm_ty, operand, is_by_ref); } @@ -7223,7 +7254,8 @@ pub const FuncGen = struct { operand_is_ptr: bool, ) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; const operand = try self.resolveInst(un_op); const operand_ty = self.typeOf(un_op); @@ -7241,7 +7273,7 @@ pub const FuncGen = struct { return val.toValue(); } - if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) { const loaded = if (operand_is_ptr) try self.wip.load(.normal, try o.lowerType(err_union_ty), operand, .default, "") else @@ -7249,9 +7281,9 @@ pub const FuncGen = struct { return self.wip.icmp(cond, loaded, zero, ""); } - const err_field_index = try errUnionErrorOffset(payload_ty, mod); + const err_field_index = try errUnionErrorOffset(payload_ty, pt); - const loaded = if (operand_is_ptr or isByRef(err_union_ty, mod)) loaded: { + const loaded = if (operand_is_ptr or isByRef(err_union_ty, pt)) loaded: { const err_union_llvm_ty = try o.lowerType(err_union_ty); const err_field_ptr = try self.wip.gepStruct(err_union_llvm_ty, operand, err_field_index, ""); @@ -7262,12 +7294,13 @@ pub const FuncGen = struct { fn airOptionalPayloadPtr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const operand = try self.resolveInst(ty_op.operand); const optional_ty = self.typeOf(ty_op.operand).childType(mod); const payload_ty = optional_ty.optionalChild(mod); - if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) { // We have a pointer to a zero-bit value and we need to return // a pointer to a zero-bit value. return operand; @@ -7283,13 +7316,14 @@ pub const FuncGen = struct { comptime assert(optional_layout_version == 3); const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const operand = try self.resolveInst(ty_op.operand); const optional_ty = self.typeOf(ty_op.operand).childType(mod); const payload_ty = optional_ty.optionalChild(mod); const non_null_bit = try o.builder.intValue(.i8, 1); - if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) { // We have a pointer to a i8. We need to set it to 1 and then return the same pointer. _ = try self.wip.store(.normal, non_null_bit, operand, .default); return operand; @@ -7314,13 +7348,14 @@ pub const FuncGen = struct { fn airOptionalPayload(self: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const inst = body_tail[0]; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const operand = try self.resolveInst(ty_op.operand); const optional_ty = self.typeOf(ty_op.operand); const payload_ty = self.typeOfIndex(inst); - if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) return .none; + if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) return .none; if (optional_ty.optionalReprIsPayload(mod)) { // Payload value is the same as the optional value. @@ -7328,7 +7363,7 @@ pub const FuncGen = struct { } const opt_llvm_ty = try o.lowerType(optional_ty); - const can_elide_load = if (isByRef(payload_ty, mod)) self.canElideLoad(body_tail) else false; + const can_elide_load = if (isByRef(payload_ty, pt)) self.canElideLoad(body_tail) else false; return self.optPayloadHandle(opt_llvm_ty, operand, optional_ty, can_elide_load); } @@ -7338,7 +7373,8 @@ pub const FuncGen = struct { operand_is_ptr: bool, ) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const inst = body_tail[0]; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const operand = try self.resolveInst(ty_op.operand); @@ -7347,17 +7383,17 @@ pub const FuncGen = struct { const result_ty = self.typeOfIndex(inst); const payload_ty = if (operand_is_ptr) result_ty.childType(mod) else result_ty; - if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) { return if (operand_is_ptr) operand else .none; } - const offset = try errUnionPayloadOffset(payload_ty, mod); + const offset = try errUnionPayloadOffset(payload_ty, pt); const err_union_llvm_ty = try o.lowerType(err_union_ty); if (operand_is_ptr) { return self.wip.gepStruct(err_union_llvm_ty, operand, offset, ""); - } else if (isByRef(err_union_ty, mod)) { - const payload_alignment = payload_ty.abiAlignment(mod).toLlvm(); + } else if (isByRef(err_union_ty, pt)) { + const payload_alignment = payload_ty.abiAlignment(pt).toLlvm(); const payload_ptr = try self.wip.gepStruct(err_union_llvm_ty, operand, offset, ""); - if (isByRef(payload_ty, mod)) { + if (isByRef(payload_ty, pt)) { if (self.canElideLoad(body_tail)) return payload_ptr; return self.loadByRef(payload_ptr, payload_ty, payload_alignment, .normal); } @@ -7373,7 +7409,8 @@ pub const FuncGen = struct { operand_is_ptr: bool, ) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const operand = try self.resolveInst(ty_op.operand); const operand_ty = self.typeOf(ty_op.operand); @@ -7388,14 +7425,14 @@ pub const FuncGen = struct { } const payload_ty = err_union_ty.errorUnionPayload(mod); - if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) { if (!operand_is_ptr) return operand; return self.wip.load(.normal, error_type, operand, .default, ""); } - const offset = try errUnionErrorOffset(payload_ty, mod); + const offset = try errUnionErrorOffset(payload_ty, pt); - if (operand_is_ptr or isByRef(err_union_ty, mod)) { + if (operand_is_ptr or isByRef(err_union_ty, pt)) { const err_union_llvm_ty = try o.lowerType(err_union_ty); const err_field_ptr = try self.wip.gepStruct(err_union_llvm_ty, operand, offset, ""); return self.wip.load(.normal, error_type, err_field_ptr, .default, ""); @@ -7406,22 +7443,23 @@ pub const FuncGen = struct { fn airErrUnionPayloadPtrSet(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const operand = try self.resolveInst(ty_op.operand); const err_union_ty = self.typeOf(ty_op.operand).childType(mod); const payload_ty = err_union_ty.errorUnionPayload(mod); const non_error_val = try o.builder.intValue(try o.errorIntType(), 0); - if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) { _ = try self.wip.store(.normal, non_error_val, operand, .default); return operand; } const err_union_llvm_ty = try o.lowerType(err_union_ty); { - const err_int_ty = try mod.errorIntType(); - const error_alignment = err_int_ty.abiAlignment(mod).toLlvm(); - const error_offset = try errUnionErrorOffset(payload_ty, mod); + const err_int_ty = try pt.errorIntType(); + const error_alignment = err_int_ty.abiAlignment(pt).toLlvm(); + const error_offset = try errUnionErrorOffset(payload_ty, pt); // First set the non-error value. const non_null_ptr = try self.wip.gepStruct(err_union_llvm_ty, operand, error_offset, ""); _ = try self.wip.store(.normal, non_error_val, non_null_ptr, error_alignment); @@ -7429,7 +7467,7 @@ pub const FuncGen = struct { // Then return the payload pointer (only if it is used). if (self.liveness.isUnused(inst)) return .none; - const payload_offset = try errUnionPayloadOffset(payload_ty, mod); + const payload_offset = try errUnionPayloadOffset(payload_ty, pt); return self.wip.gepStruct(err_union_llvm_ty, operand, payload_offset, ""); } @@ -7446,19 +7484,21 @@ pub const FuncGen = struct { fn airSaveErrReturnTraceIndex(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; + const pt = o.pt; + const mod = pt.zcu; + const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const struct_ty = ty_pl.ty.toType(); const field_index = ty_pl.payload; - const mod = o.module; const struct_llvm_ty = try o.lowerType(struct_ty); const llvm_field_index = o.llvmFieldIndex(struct_ty, field_index).?; assert(self.err_ret_trace != .none); const field_ptr = try self.wip.gepStruct(struct_llvm_ty, self.err_ret_trace, llvm_field_index, ""); - const field_alignment = struct_ty.structFieldAlign(field_index, mod); + const field_alignment = struct_ty.structFieldAlign(field_index, pt); const field_ty = struct_ty.structFieldType(field_index, mod); - const field_ptr_ty = try mod.ptrType(.{ + const field_ptr_ty = try pt.ptrType(.{ .child = field_ty.toIntern(), .flags = .{ .alignment = field_alignment }, }); @@ -7490,29 +7530,30 @@ pub const FuncGen = struct { fn airWrapOptional(self: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const inst = body_tail[0]; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const payload_ty = self.typeOf(ty_op.operand); const non_null_bit = try o.builder.intValue(.i8, 1); comptime assert(optional_layout_version == 3); - if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) return non_null_bit; + if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) return non_null_bit; const operand = try self.resolveInst(ty_op.operand); const optional_ty = self.typeOfIndex(inst); if (optional_ty.optionalReprIsPayload(mod)) return operand; const llvm_optional_ty = try o.lowerType(optional_ty); - if (isByRef(optional_ty, mod)) { + if (isByRef(optional_ty, pt)) { const directReturn = self.isNextRet(body_tail); const optional_ptr = if (directReturn) self.ret_ptr else brk: { - const alignment = optional_ty.abiAlignment(mod).toLlvm(); + const alignment = optional_ty.abiAlignment(pt).toLlvm(); const optional_ptr = try self.buildAllocaWorkaround(optional_ty, alignment); break :brk optional_ptr; }; const payload_ptr = try self.wip.gepStruct(llvm_optional_ty, optional_ptr, 0, ""); - const payload_ptr_ty = try mod.singleMutPtrType(payload_ty); + const payload_ptr_ty = try pt.singleMutPtrType(payload_ty); try self.store(payload_ptr, payload_ptr_ty, operand, .none); const non_null_ptr = try self.wip.gepStruct(llvm_optional_ty, optional_ptr, 1, ""); _ = try self.wip.store(.normal, non_null_bit, non_null_ptr, .default); @@ -7523,36 +7564,36 @@ pub const FuncGen = struct { fn airWrapErrUnionPayload(self: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; const inst = body_tail[0]; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const err_un_ty = self.typeOfIndex(inst); const operand = try self.resolveInst(ty_op.operand); const payload_ty = self.typeOf(ty_op.operand); - if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) { return operand; } const ok_err_code = try o.builder.intValue(try o.errorIntType(), 0); const err_un_llvm_ty = try o.lowerType(err_un_ty); - const payload_offset = try errUnionPayloadOffset(payload_ty, mod); - const error_offset = try errUnionErrorOffset(payload_ty, mod); - if (isByRef(err_un_ty, mod)) { + const payload_offset = try errUnionPayloadOffset(payload_ty, pt); + const error_offset = try errUnionErrorOffset(payload_ty, pt); + if (isByRef(err_un_ty, pt)) { const directReturn = self.isNextRet(body_tail); const result_ptr = if (directReturn) self.ret_ptr else brk: { - const alignment = err_un_ty.abiAlignment(mod).toLlvm(); + const alignment = err_un_ty.abiAlignment(pt).toLlvm(); const result_ptr = try self.buildAllocaWorkaround(err_un_ty, alignment); break :brk result_ptr; }; const err_ptr = try self.wip.gepStruct(err_un_llvm_ty, result_ptr, error_offset, ""); - const err_int_ty = try mod.errorIntType(); - const error_alignment = err_int_ty.abiAlignment(mod).toLlvm(); + const err_int_ty = try pt.errorIntType(); + const error_alignment = err_int_ty.abiAlignment(pt).toLlvm(); _ = try self.wip.store(.normal, ok_err_code, err_ptr, error_alignment); const payload_ptr = try self.wip.gepStruct(err_un_llvm_ty, result_ptr, payload_offset, ""); - const payload_ptr_ty = try mod.singleMutPtrType(payload_ty); + const payload_ptr_ty = try pt.singleMutPtrType(payload_ty); try self.store(payload_ptr, payload_ptr_ty, operand, .none); return result_ptr; } @@ -7564,33 +7605,34 @@ pub const FuncGen = struct { fn airWrapErrUnionErr(self: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const inst = body_tail[0]; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const err_un_ty = self.typeOfIndex(inst); const payload_ty = err_un_ty.errorUnionPayload(mod); const operand = try self.resolveInst(ty_op.operand); - if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) return operand; + if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) return operand; const err_un_llvm_ty = try o.lowerType(err_un_ty); - const payload_offset = try errUnionPayloadOffset(payload_ty, mod); - const error_offset = try errUnionErrorOffset(payload_ty, mod); - if (isByRef(err_un_ty, mod)) { + const payload_offset = try errUnionPayloadOffset(payload_ty, pt); + const error_offset = try errUnionErrorOffset(payload_ty, pt); + if (isByRef(err_un_ty, pt)) { const directReturn = self.isNextRet(body_tail); const result_ptr = if (directReturn) self.ret_ptr else brk: { - const alignment = err_un_ty.abiAlignment(mod).toLlvm(); + const alignment = err_un_ty.abiAlignment(pt).toLlvm(); const result_ptr = try self.buildAllocaWorkaround(err_un_ty, alignment); break :brk result_ptr; }; const err_ptr = try self.wip.gepStruct(err_un_llvm_ty, result_ptr, error_offset, ""); - const err_int_ty = try mod.errorIntType(); - const error_alignment = err_int_ty.abiAlignment(mod).toLlvm(); + const err_int_ty = try pt.errorIntType(); + const error_alignment = err_int_ty.abiAlignment(pt).toLlvm(); _ = try self.wip.store(.normal, operand, err_ptr, error_alignment); const payload_ptr = try self.wip.gepStruct(err_un_llvm_ty, result_ptr, payload_offset, ""); - const payload_ptr_ty = try mod.singleMutPtrType(payload_ty); + const payload_ptr_ty = try pt.singleMutPtrType(payload_ty); // TODO store undef to payload_ptr _ = payload_ptr; _ = payload_ptr_ty; @@ -7624,7 +7666,8 @@ pub const FuncGen = struct { fn airVectorStoreElem(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const data = self.air.instructions.items(.data)[@intFromEnum(inst)].vector_store_elem; const extra = self.air.extraData(Air.Bin, data.payload).data; @@ -7636,7 +7679,7 @@ pub const FuncGen = struct { const access_kind: Builder.MemoryAccessKind = if (vector_ptr_ty.isVolatilePtr(mod)) .@"volatile" else .normal; const elem_llvm_ty = try o.lowerType(vector_ptr_ty.childType(mod)); - const alignment = vector_ptr_ty.ptrAlignment(mod).toLlvm(); + const alignment = vector_ptr_ty.ptrAlignment(pt).toLlvm(); const loaded = try self.wip.load(access_kind, elem_llvm_ty, vector_ptr, alignment, ""); const new_vector = try self.wip.insertElement(loaded, operand, index, ""); @@ -7646,7 +7689,7 @@ pub const FuncGen = struct { fn airMin(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const mod = o.pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); @@ -7666,7 +7709,7 @@ pub const FuncGen = struct { fn airMax(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const mod = o.pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); @@ -7696,7 +7739,7 @@ pub const FuncGen = struct { fn airAdd(self: *FuncGen, inst: Air.Inst.Index, fast: Builder.FastMathKind) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const mod = o.pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); @@ -7714,7 +7757,7 @@ pub const FuncGen = struct { unsigned_intrinsic: Builder.Intrinsic, ) !Builder.Value { const o = fg.dg.object; - const mod = o.module; + const mod = o.pt.zcu; const bin_op = fg.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const lhs = try fg.resolveInst(bin_op.lhs); @@ -7762,7 +7805,7 @@ pub const FuncGen = struct { fn airAddSat(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const mod = o.pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); @@ -7782,7 +7825,7 @@ pub const FuncGen = struct { fn airSub(self: *FuncGen, inst: Air.Inst.Index, fast: Builder.FastMathKind) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const mod = o.pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); @@ -7803,7 +7846,7 @@ pub const FuncGen = struct { fn airSubSat(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const mod = o.pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); @@ -7823,7 +7866,7 @@ pub const FuncGen = struct { fn airMul(self: *FuncGen, inst: Air.Inst.Index, fast: Builder.FastMathKind) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const mod = o.pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); @@ -7844,7 +7887,7 @@ pub const FuncGen = struct { fn airMulSat(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const mod = o.pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); @@ -7873,7 +7916,7 @@ pub const FuncGen = struct { fn airDivTrunc(self: *FuncGen, inst: Air.Inst.Index, fast: Builder.FastMathKind) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const mod = o.pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); @@ -7889,7 +7932,7 @@ pub const FuncGen = struct { fn airDivFloor(self: *FuncGen, inst: Air.Inst.Index, fast: Builder.FastMathKind) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const mod = o.pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); @@ -7921,7 +7964,7 @@ pub const FuncGen = struct { fn airDivExact(self: *FuncGen, inst: Air.Inst.Index, fast: Builder.FastMathKind) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const mod = o.pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); @@ -7939,7 +7982,7 @@ pub const FuncGen = struct { fn airRem(self: *FuncGen, inst: Air.Inst.Index, fast: Builder.FastMathKind) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const mod = o.pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); @@ -7956,7 +7999,7 @@ pub const FuncGen = struct { fn airMod(self: *FuncGen, inst: Air.Inst.Index, fast: Builder.FastMathKind) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const mod = o.pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); @@ -7992,7 +8035,7 @@ pub const FuncGen = struct { fn airPtrAdd(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const mod = o.pt.zcu; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const ptr = try self.resolveInst(bin_op.lhs); @@ -8014,7 +8057,7 @@ pub const FuncGen = struct { fn airPtrSub(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const mod = o.pt.zcu; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const ptr = try self.resolveInst(bin_op.lhs); @@ -8042,7 +8085,8 @@ pub const FuncGen = struct { unsigned_intrinsic: Builder.Intrinsic, ) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; @@ -8065,8 +8109,8 @@ pub const FuncGen = struct { const result_index = o.llvmFieldIndex(inst_ty, 0).?; const overflow_index = o.llvmFieldIndex(inst_ty, 1).?; - if (isByRef(inst_ty, mod)) { - const result_alignment = inst_ty.abiAlignment(mod).toLlvm(); + if (isByRef(inst_ty, pt)) { + const result_alignment = inst_ty.abiAlignment(pt).toLlvm(); const alloca_inst = try self.buildAllocaWorkaround(inst_ty, result_alignment); { const field_ptr = try self.wip.gepStruct(llvm_inst_ty, alloca_inst, result_index, ""); @@ -8135,7 +8179,7 @@ pub const FuncGen = struct { return o.builder.addFunction( try o.builder.fnType(return_type, param_types, .normal), fn_name, - toLlvmAddressSpace(.generic, o.module.getTarget()), + toLlvmAddressSpace(.generic, o.pt.zcu.getTarget()), ); } @@ -8149,8 +8193,8 @@ pub const FuncGen = struct { params: [2]Builder.Value, ) !Builder.Value { const o = self.dg.object; - const mod = o.module; - const target = o.module.getTarget(); + const mod = o.pt.zcu; + const target = mod.getTarget(); const scalar_ty = ty.scalarType(mod); const scalar_llvm_ty = try o.lowerType(scalar_ty); @@ -8255,7 +8299,7 @@ pub const FuncGen = struct { params: [params_len]Builder.Value, ) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const mod = o.pt.zcu; const target = mod.getTarget(); const scalar_ty = ty.scalarType(mod); const llvm_ty = try o.lowerType(ty); @@ -8396,7 +8440,8 @@ pub const FuncGen = struct { fn airShlWithOverflow(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; @@ -8422,8 +8467,8 @@ pub const FuncGen = struct { const result_index = o.llvmFieldIndex(dest_ty, 0).?; const overflow_index = o.llvmFieldIndex(dest_ty, 1).?; - if (isByRef(dest_ty, mod)) { - const result_alignment = dest_ty.abiAlignment(mod).toLlvm(); + if (isByRef(dest_ty, pt)) { + const result_alignment = dest_ty.abiAlignment(pt).toLlvm(); const alloca_inst = try self.buildAllocaWorkaround(dest_ty, result_alignment); { const field_ptr = try self.wip.gepStruct(llvm_dest_ty, alloca_inst, result_index, ""); @@ -8466,7 +8511,7 @@ pub const FuncGen = struct { fn airShlExact(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const mod = o.pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const lhs = try self.resolveInst(bin_op.lhs); @@ -8497,7 +8542,8 @@ pub const FuncGen = struct { fn airShlSat(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const lhs = try self.resolveInst(bin_op.lhs); @@ -8505,7 +8551,7 @@ pub const FuncGen = struct { const lhs_ty = self.typeOf(bin_op.lhs); const lhs_scalar_ty = lhs_ty.scalarType(mod); - const lhs_bits = lhs_scalar_ty.bitSize(mod); + const lhs_bits = lhs_scalar_ty.bitSize(pt); const casted_rhs = try self.wip.conv(.unsigned, rhs, try o.lowerType(lhs_ty), ""); @@ -8539,7 +8585,7 @@ pub const FuncGen = struct { fn airShr(self: *FuncGen, inst: Air.Inst.Index, is_exact: bool) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const mod = o.pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const lhs = try self.resolveInst(bin_op.lhs); @@ -8558,7 +8604,7 @@ pub const FuncGen = struct { fn airAbs(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const mod = o.pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const operand = try self.resolveInst(ty_op.operand); const operand_ty = self.typeOf(ty_op.operand); @@ -8580,7 +8626,7 @@ pub const FuncGen = struct { fn airIntCast(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const mod = o.pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const dest_ty = self.typeOfIndex(inst); const dest_llvm_ty = try o.lowerType(dest_ty); @@ -8604,7 +8650,7 @@ pub const FuncGen = struct { fn airFptrunc(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const mod = o.pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const operand = try self.resolveInst(ty_op.operand); const operand_ty = self.typeOf(ty_op.operand); @@ -8638,7 +8684,7 @@ pub const FuncGen = struct { fn airFpext(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const mod = o.pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const operand = try self.resolveInst(ty_op.operand); const operand_ty = self.typeOf(ty_op.operand); @@ -8696,9 +8742,10 @@ pub const FuncGen = struct { fn bitCast(self: *FuncGen, operand: Builder.Value, operand_ty: Type, inst_ty: Type) !Builder.Value { const o = self.dg.object; - const mod = o.module; - const operand_is_ref = isByRef(operand_ty, mod); - const result_is_ref = isByRef(inst_ty, mod); + const pt = o.pt; + const mod = pt.zcu; + const operand_is_ref = isByRef(operand_ty, pt); + const result_is_ref = isByRef(inst_ty, pt); const llvm_dest_ty = try o.lowerType(inst_ty); if (operand_is_ref and result_is_ref) { @@ -8721,9 +8768,9 @@ pub const FuncGen = struct { if (!result_is_ref) { return self.dg.todo("implement bitcast vector to non-ref array", .{}); } - const alignment = inst_ty.abiAlignment(mod).toLlvm(); + const alignment = inst_ty.abiAlignment(pt).toLlvm(); const array_ptr = try self.buildAllocaWorkaround(inst_ty, alignment); - const bitcast_ok = elem_ty.bitSize(mod) == elem_ty.abiSize(mod) * 8; + const bitcast_ok = elem_ty.bitSize(pt) == elem_ty.abiSize(pt) * 8; if (bitcast_ok) { _ = try self.wip.store(.normal, operand, array_ptr, alignment); } else { @@ -8748,11 +8795,11 @@ pub const FuncGen = struct { const llvm_vector_ty = try o.lowerType(inst_ty); if (!operand_is_ref) return self.dg.todo("implement bitcast non-ref array to vector", .{}); - const bitcast_ok = elem_ty.bitSize(mod) == elem_ty.abiSize(mod) * 8; + const bitcast_ok = elem_ty.bitSize(pt) == elem_ty.abiSize(pt) * 8; if (bitcast_ok) { // The array is aligned to the element's alignment, while the vector might have a completely // different alignment. This means we need to enforce the alignment of this load. - const alignment = elem_ty.abiAlignment(mod).toLlvm(); + const alignment = elem_ty.abiAlignment(pt).toLlvm(); return self.wip.load(.normal, llvm_vector_ty, operand, alignment, ""); } else { // If the ABI size of the element type is not evenly divisible by size in bits; @@ -8777,24 +8824,25 @@ pub const FuncGen = struct { } if (operand_is_ref) { - const alignment = operand_ty.abiAlignment(mod).toLlvm(); + const alignment = operand_ty.abiAlignment(pt).toLlvm(); return self.wip.load(.normal, llvm_dest_ty, operand, alignment, ""); } if (result_is_ref) { - const alignment = operand_ty.abiAlignment(mod).max(inst_ty.abiAlignment(mod)).toLlvm(); + const alignment = operand_ty.abiAlignment(pt).max(inst_ty.abiAlignment(pt)).toLlvm(); const result_ptr = try self.buildAllocaWorkaround(inst_ty, alignment); _ = try self.wip.store(.normal, operand, result_ptr, alignment); return result_ptr; } if (llvm_dest_ty.isStruct(&o.builder) or - ((operand_ty.zigTypeTag(mod) == .Vector or inst_ty.zigTypeTag(mod) == .Vector) and operand_ty.bitSize(mod) != inst_ty.bitSize(mod))) + ((operand_ty.zigTypeTag(mod) == .Vector or inst_ty.zigTypeTag(mod) == .Vector) and + operand_ty.bitSize(pt) != inst_ty.bitSize(pt))) { // Both our operand and our result are values, not pointers, // but LLVM won't let us bitcast struct values or vectors with padding bits. // Therefore, we store operand to alloca, then load for result. - const alignment = operand_ty.abiAlignment(mod).max(inst_ty.abiAlignment(mod)).toLlvm(); + const alignment = operand_ty.abiAlignment(pt).max(inst_ty.abiAlignment(pt)).toLlvm(); const result_ptr = try self.buildAllocaWorkaround(inst_ty, alignment); _ = try self.wip.store(.normal, operand, result_ptr, alignment); return self.wip.load(.normal, llvm_dest_ty, result_ptr, alignment, ""); @@ -8811,7 +8859,8 @@ pub const FuncGen = struct { fn airArg(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const arg_val = self.args[self.arg_index]; self.arg_index += 1; @@ -8847,7 +8896,7 @@ pub const FuncGen = struct { }; const owner_mod = self.dg.ownerModule(); - if (isByRef(inst_ty, mod)) { + if (isByRef(inst_ty, pt)) { _ = try self.wip.callIntrinsic( .normal, .none, @@ -8861,7 +8910,7 @@ pub const FuncGen = struct { "", ); } else if (owner_mod.optimize_mode == .Debug) { - const alignment = inst_ty.abiAlignment(mod).toLlvm(); + const alignment = inst_ty.abiAlignment(pt).toLlvm(); const alloca = try self.buildAlloca(arg_val.typeOfWip(&self.wip), alignment); _ = try self.wip.store(.normal, arg_val, alloca, alignment); _ = try self.wip.callIntrinsic( @@ -8897,27 +8946,29 @@ pub const FuncGen = struct { fn airAlloc(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const ptr_ty = self.typeOfIndex(inst); const pointee_type = ptr_ty.childType(mod); - if (!pointee_type.isFnOrHasRuntimeBitsIgnoreComptime(mod)) + if (!pointee_type.isFnOrHasRuntimeBitsIgnoreComptime(pt)) return (try o.lowerPtrToVoid(ptr_ty)).toValue(); //const pointee_llvm_ty = try o.lowerType(pointee_type); - const alignment = ptr_ty.ptrAlignment(mod).toLlvm(); + const alignment = ptr_ty.ptrAlignment(pt).toLlvm(); return self.buildAllocaWorkaround(pointee_type, alignment); } fn airRetPtr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const ptr_ty = self.typeOfIndex(inst); const ret_ty = ptr_ty.childType(mod); - if (!ret_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) + if (!ret_ty.isFnOrHasRuntimeBitsIgnoreComptime(pt)) return (try o.lowerPtrToVoid(ptr_ty)).toValue(); if (self.ret_ptr != .none) return self.ret_ptr; //const ret_llvm_ty = try o.lowerType(ret_ty); - const alignment = ptr_ty.ptrAlignment(mod).toLlvm(); + const alignment = ptr_ty.ptrAlignment(pt).toLlvm(); return self.buildAllocaWorkaround(ret_ty, alignment); } @@ -8928,7 +8979,7 @@ pub const FuncGen = struct { llvm_ty: Builder.Type, alignment: Builder.Alignment, ) Allocator.Error!Builder.Value { - const target = self.dg.object.module.getTarget(); + const target = self.dg.object.pt.zcu.getTarget(); return buildAllocaInner(&self.wip, llvm_ty, alignment, target); } @@ -8939,18 +8990,19 @@ pub const FuncGen = struct { alignment: Builder.Alignment, ) Allocator.Error!Builder.Value { const o = self.dg.object; - return self.buildAlloca(try o.builder.arrayType(ty.abiSize(o.module), .i8), alignment); + return self.buildAlloca(try o.builder.arrayType(ty.abiSize(o.pt), .i8), alignment); } fn airStore(self: *FuncGen, inst: Air.Inst.Index, safety: bool) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const dest_ptr = try self.resolveInst(bin_op.lhs); const ptr_ty = self.typeOf(bin_op.lhs); const operand_ty = ptr_ty.childType(mod); - const val_is_undef = if (try self.air.value(bin_op.rhs, mod)) |val| val.isUndefDeep(mod) else false; + const val_is_undef = if (try self.air.value(bin_op.rhs, pt)) |val| val.isUndefDeep(mod) else false; if (val_is_undef) { const ptr_info = ptr_ty.ptrInfo(mod); const needs_bitmask = (ptr_info.packed_offset.host_size != 0); @@ -8964,10 +9016,10 @@ pub const FuncGen = struct { // Even if safety is disabled, we still emit a memset to undefined since it conveys // extra information to LLVM. However, safety makes the difference between using // 0xaa or actual undefined for the fill byte. - const len = try o.builder.intValue(try o.lowerType(Type.usize), operand_ty.abiSize(mod)); + const len = try o.builder.intValue(try o.lowerType(Type.usize), operand_ty.abiSize(pt)); _ = try self.wip.callMemSet( dest_ptr, - ptr_ty.ptrAlignment(mod).toLlvm(), + ptr_ty.ptrAlignment(pt).toLlvm(), if (safety) try o.builder.intValue(.i8, 0xaa) else try o.builder.undefValue(.i8), len, if (ptr_ty.isVolatilePtr(mod)) .@"volatile" else .normal, @@ -8992,7 +9044,7 @@ pub const FuncGen = struct { /// The first instruction of `body_tail` is the one whose copy we want to elide. fn canElideLoad(fg: *FuncGen, body_tail: []const Air.Inst.Index) bool { const o = fg.dg.object; - const mod = o.module; + const mod = o.pt.zcu; const ip = &mod.intern_pool; for (body_tail[1..]) |body_inst| { switch (fg.liveness.categorizeOperand(fg.air, body_inst, body_tail[0], ip)) { @@ -9008,7 +9060,8 @@ pub const FuncGen = struct { fn airLoad(fg: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value { const o = fg.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const inst = body_tail[0]; const ty_op = fg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const ptr_ty = fg.typeOf(ty_op.operand); @@ -9016,7 +9069,7 @@ pub const FuncGen = struct { const ptr = try fg.resolveInst(ty_op.operand); elide: { - if (!isByRef(Type.fromInterned(ptr_info.child), mod)) break :elide; + if (!isByRef(Type.fromInterned(ptr_info.child), pt)) break :elide; if (!canElideLoad(fg, body_tail)) break :elide; return ptr; } @@ -9040,7 +9093,7 @@ pub const FuncGen = struct { _ = inst; const o = self.dg.object; const llvm_usize = try o.lowerType(Type.usize); - if (!target_util.supportsReturnAddress(o.module.getTarget())) { + if (!target_util.supportsReturnAddress(o.pt.zcu.getTarget())) { // https://github.com/ziglang/zig/issues/11946 return o.builder.intValue(llvm_usize, 0); } @@ -9068,7 +9121,8 @@ pub const FuncGen = struct { kind: Builder.Function.Instruction.CmpXchg.Kind, ) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = self.air.extraData(Air.Cmpxchg, ty_pl.payload).data; const ptr = try self.resolveInst(extra.ptr); @@ -9095,7 +9149,7 @@ pub const FuncGen = struct { self.sync_scope, toLlvmAtomicOrdering(extra.successOrder()), toLlvmAtomicOrdering(extra.failureOrder()), - ptr_ty.ptrAlignment(mod).toLlvm(), + ptr_ty.ptrAlignment(pt).toLlvm(), "", ); @@ -9118,7 +9172,8 @@ pub const FuncGen = struct { fn airAtomicRmw(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; const extra = self.air.extraData(Air.AtomicRmw, pl_op.payload).data; const ptr = try self.resolveInst(pl_op.operand); @@ -9134,7 +9189,7 @@ pub const FuncGen = struct { const access_kind: Builder.MemoryAccessKind = if (ptr_ty.isVolatilePtr(mod)) .@"volatile" else .normal; - const ptr_alignment = ptr_ty.ptrAlignment(mod).toLlvm(); + const ptr_alignment = ptr_ty.ptrAlignment(pt).toLlvm(); if (llvm_abi_ty != .none) { // operand needs widening and truncating or bitcasting. @@ -9181,19 +9236,20 @@ pub const FuncGen = struct { fn airAtomicLoad(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const atomic_load = self.air.instructions.items(.data)[@intFromEnum(inst)].atomic_load; const ptr = try self.resolveInst(atomic_load.ptr); const ptr_ty = self.typeOf(atomic_load.ptr); const info = ptr_ty.ptrInfo(mod); const elem_ty = Type.fromInterned(info.child); - if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) return .none; + if (!elem_ty.hasRuntimeBitsIgnoreComptime(pt)) return .none; const ordering = toLlvmAtomicOrdering(atomic_load.order); const llvm_abi_ty = try o.getAtomicAbiType(elem_ty, false); const ptr_alignment = (if (info.flags.alignment != .none) @as(InternPool.Alignment, info.flags.alignment) else - Type.fromInterned(info.child).abiAlignment(mod)).toLlvm(); + Type.fromInterned(info.child).abiAlignment(pt)).toLlvm(); const access_kind: Builder.MemoryAccessKind = if (info.flags.is_volatile) .@"volatile" else .normal; const elem_llvm_ty = try o.lowerType(elem_ty); @@ -9228,11 +9284,12 @@ pub const FuncGen = struct { ordering: Builder.AtomicOrdering, ) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const ptr_ty = self.typeOf(bin_op.lhs); const operand_ty = ptr_ty.childType(mod); - if (!operand_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) return .none; + if (!operand_ty.isFnOrHasRuntimeBitsIgnoreComptime(pt)) return .none; const ptr = try self.resolveInst(bin_op.lhs); var element = try self.resolveInst(bin_op.rhs); const llvm_abi_ty = try o.getAtomicAbiType(operand_ty, false); @@ -9252,12 +9309,13 @@ pub const FuncGen = struct { fn airMemset(self: *FuncGen, inst: Air.Inst.Index, safety: bool) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const dest_slice = try self.resolveInst(bin_op.lhs); const ptr_ty = self.typeOf(bin_op.lhs); const elem_ty = self.typeOf(bin_op.rhs); - const dest_ptr_align = ptr_ty.ptrAlignment(mod).toLlvm(); + const dest_ptr_align = ptr_ty.ptrAlignment(pt).toLlvm(); const dest_ptr = try self.sliceOrArrayPtr(dest_slice, ptr_ty); const access_kind: Builder.MemoryAccessKind = if (ptr_ty.isVolatilePtr(mod)) .@"volatile" else .normal; @@ -9270,7 +9328,7 @@ pub const FuncGen = struct { ptr_ty.isSlice(mod) and std.Target.wasm.featureSetHas(o.target.cpu.features, .bulk_memory); - if (try self.air.value(bin_op.rhs, mod)) |elem_val| { + if (try self.air.value(bin_op.rhs, pt)) |elem_val| { if (elem_val.isUndefDeep(mod)) { // Even if safety is disabled, we still emit a memset to undefined since it conveys // extra information to LLVM. However, safety makes the difference between using @@ -9296,7 +9354,7 @@ pub const FuncGen = struct { // repeating byte pattern, for example, `@as(u64, 0)` has a // repeating byte pattern of 0 bytes. In such case, the memset // intrinsic can be used. - if (try elem_val.hasRepeatedByteRepr(elem_ty, mod)) |byte_val| { + if (try elem_val.hasRepeatedByteRepr(elem_ty, pt)) |byte_val| { const fill_byte = try o.builder.intValue(.i8, byte_val); const len = try self.sliceOrArrayLenInBytes(dest_slice, ptr_ty); if (intrinsic_len0_traps) { @@ -9309,7 +9367,7 @@ pub const FuncGen = struct { } const value = try self.resolveInst(bin_op.rhs); - const elem_abi_size = elem_ty.abiSize(mod); + const elem_abi_size = elem_ty.abiSize(pt); if (elem_abi_size == 1) { // In this case we can take advantage of LLVM's intrinsic. @@ -9361,9 +9419,9 @@ pub const FuncGen = struct { _ = try self.wip.brCond(end, body_block, end_block); self.wip.cursor = .{ .block = body_block }; - const elem_abi_align = elem_ty.abiAlignment(mod); + const elem_abi_align = elem_ty.abiAlignment(pt); const it_ptr_align = InternPool.Alignment.fromLlvm(dest_ptr_align).min(elem_abi_align).toLlvm(); - if (isByRef(elem_ty, mod)) { + if (isByRef(elem_ty, pt)) { _ = try self.wip.callMemCpy( it_ptr.toValue(), it_ptr_align, @@ -9405,7 +9463,8 @@ pub const FuncGen = struct { fn airMemcpy(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const dest_slice = try self.resolveInst(bin_op.lhs); const dest_ptr_ty = self.typeOf(bin_op.lhs); @@ -9434,9 +9493,9 @@ pub const FuncGen = struct { self.wip.cursor = .{ .block = memcpy_block }; _ = try self.wip.callMemCpy( dest_ptr, - dest_ptr_ty.ptrAlignment(mod).toLlvm(), + dest_ptr_ty.ptrAlignment(pt).toLlvm(), src_ptr, - src_ptr_ty.ptrAlignment(mod).toLlvm(), + src_ptr_ty.ptrAlignment(pt).toLlvm(), len, access_kind, ); @@ -9447,9 +9506,9 @@ pub const FuncGen = struct { _ = try self.wip.callMemCpy( dest_ptr, - dest_ptr_ty.ptrAlignment(mod).toLlvm(), + dest_ptr_ty.ptrAlignment(pt).toLlvm(), src_ptr, - src_ptr_ty.ptrAlignment(mod).toLlvm(), + src_ptr_ty.ptrAlignment(pt).toLlvm(), len, access_kind, ); @@ -9458,10 +9517,11 @@ pub const FuncGen = struct { fn airSetUnionTag(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const un_ty = self.typeOf(bin_op.lhs).childType(mod); - const layout = un_ty.unionGetLayout(mod); + const layout = un_ty.unionGetLayout(pt); if (layout.tag_size == 0) return .none; const union_ptr = try self.resolveInst(bin_op.lhs); const new_tag = try self.resolveInst(bin_op.rhs); @@ -9479,13 +9539,13 @@ pub const FuncGen = struct { fn airGetUnionTag(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const un_ty = self.typeOf(ty_op.operand); - const layout = un_ty.unionGetLayout(mod); + const layout = un_ty.unionGetLayout(pt); if (layout.tag_size == 0) return .none; const union_handle = try self.resolveInst(ty_op.operand); - if (isByRef(un_ty, mod)) { + if (isByRef(un_ty, pt)) { const llvm_un_ty = try o.lowerType(un_ty); if (layout.payload_size == 0) return self.wip.load(.normal, llvm_un_ty, union_handle, .default, ""); @@ -9554,7 +9614,7 @@ pub const FuncGen = struct { fn airByteSwap(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const mod = o.pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const operand_ty = self.typeOf(ty_op.operand); var bits = operand_ty.intInfo(mod).bits; @@ -9588,7 +9648,7 @@ pub const FuncGen = struct { fn airErrorSetHasValue(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const mod = o.pt.zcu; const ip = &mod.intern_pool; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const operand = try self.resolveInst(ty_op.operand); @@ -9638,7 +9698,8 @@ pub const FuncGen = struct { fn getIsNamedEnumValueFunction(self: *FuncGen, enum_ty: Type) !Builder.Function.Index { const o = self.dg.object; - const zcu = o.module; + const pt = o.pt; + const zcu = pt.zcu; const enum_type = zcu.intern_pool.loadEnumType(enum_ty.toIntern()); // TODO: detect when the type changes and re-emit this function. @@ -9678,7 +9739,7 @@ pub const FuncGen = struct { for (0..enum_type.names.len) |field_index| { const this_tag_int_value = try o.lowerValue( - (try zcu.enumValueFieldIndex(enum_ty, @intCast(field_index))).toIntern(), + (try pt.enumValueFieldIndex(enum_ty, @intCast(field_index))).toIntern(), ); try wip_switch.addCase(this_tag_int_value, named_block, &wip); } @@ -9745,7 +9806,8 @@ pub const FuncGen = struct { fn airShuffle(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = self.air.extraData(Air.Shuffle, ty_pl.payload).data; const a = try self.resolveInst(extra.a); @@ -9763,11 +9825,11 @@ pub const FuncGen = struct { defer self.gpa.free(values); for (values, 0..) |*val, i| { - const elem = try mask.elemValue(mod, i); + const elem = try mask.elemValue(pt, i); if (elem.isUndef(mod)) { val.* = try o.builder.undefConst(.i32); } else { - const int = elem.toSignedInt(mod); + const int = elem.toSignedInt(pt); const unsigned: u32 = @intCast(if (int >= 0) int else ~int + a_len); val.* = try o.builder.intConst(.i32, unsigned); } @@ -9854,7 +9916,7 @@ pub const FuncGen = struct { fn airReduce(self: *FuncGen, inst: Air.Inst.Index, fast: Builder.FastMathKind) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const mod = o.pt.zcu; const target = mod.getTarget(); const reduce = self.air.instructions.items(.data)[@intFromEnum(inst)].reduce; @@ -9964,7 +10026,8 @@ pub const FuncGen = struct { fn airAggregateInit(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const ip = &mod.intern_pool; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const result_ty = self.typeOfIndex(inst); @@ -9986,16 +10049,16 @@ pub const FuncGen = struct { if (mod.typeToPackedStruct(result_ty)) |struct_type| { const backing_int_ty = struct_type.backingIntType(ip).*; assert(backing_int_ty != .none); - const big_bits = Type.fromInterned(backing_int_ty).bitSize(mod); + const big_bits = Type.fromInterned(backing_int_ty).bitSize(pt); const int_ty = try o.builder.intType(@intCast(big_bits)); comptime assert(Type.packed_struct_layout_version == 2); var running_int = try o.builder.intValue(int_ty, 0); var running_bits: u16 = 0; for (elements, struct_type.field_types.get(ip)) |elem, field_ty| { - if (!Type.fromInterned(field_ty).hasRuntimeBitsIgnoreComptime(mod)) continue; + if (!Type.fromInterned(field_ty).hasRuntimeBitsIgnoreComptime(pt)) continue; const non_int_val = try self.resolveInst(elem); - const ty_bit_size: u16 = @intCast(Type.fromInterned(field_ty).bitSize(mod)); + const ty_bit_size: u16 = @intCast(Type.fromInterned(field_ty).bitSize(pt)); const small_int_ty = try o.builder.intType(ty_bit_size); const small_int_val = if (Type.fromInterned(field_ty).isPtrAtRuntime(mod)) try self.wip.cast(.ptrtoint, non_int_val, small_int_ty, "") @@ -10013,23 +10076,23 @@ pub const FuncGen = struct { assert(result_ty.containerLayout(mod) != .@"packed"); - if (isByRef(result_ty, mod)) { + if (isByRef(result_ty, pt)) { // TODO in debug builds init to undef so that the padding will be 0xaa // even if we fully populate the fields. - const alignment = result_ty.abiAlignment(mod).toLlvm(); + const alignment = result_ty.abiAlignment(pt).toLlvm(); const alloca_inst = try self.buildAllocaWorkaround(result_ty, alignment); for (elements, 0..) |elem, i| { - if ((try result_ty.structFieldValueComptime(mod, i)) != null) continue; + if ((try result_ty.structFieldValueComptime(pt, i)) != null) continue; const llvm_elem = try self.resolveInst(elem); const llvm_i = o.llvmFieldIndex(result_ty, i).?; const field_ptr = try self.wip.gepStruct(llvm_result_ty, alloca_inst, llvm_i, ""); - const field_ptr_ty = try mod.ptrType(.{ + const field_ptr_ty = try pt.ptrType(.{ .child = self.typeOf(elem).toIntern(), .flags = .{ - .alignment = result_ty.structFieldAlign(i, mod), + .alignment = result_ty.structFieldAlign(i, pt), }, }); try self.store(field_ptr, field_ptr_ty, llvm_elem, .none); @@ -10039,7 +10102,7 @@ pub const FuncGen = struct { } else { var result = try o.builder.poisonValue(llvm_result_ty); for (elements, 0..) |elem, i| { - if ((try result_ty.structFieldValueComptime(mod, i)) != null) continue; + if ((try result_ty.structFieldValueComptime(pt, i)) != null) continue; const llvm_elem = try self.resolveInst(elem); const llvm_i = o.llvmFieldIndex(result_ty, i).?; @@ -10049,15 +10112,15 @@ pub const FuncGen = struct { } }, .Array => { - assert(isByRef(result_ty, mod)); + assert(isByRef(result_ty, pt)); const llvm_usize = try o.lowerType(Type.usize); const usize_zero = try o.builder.intValue(llvm_usize, 0); - const alignment = result_ty.abiAlignment(mod).toLlvm(); + const alignment = result_ty.abiAlignment(pt).toLlvm(); const alloca_inst = try self.buildAllocaWorkaround(result_ty, alignment); const array_info = result_ty.arrayInfo(mod); - const elem_ptr_ty = try mod.ptrType(.{ + const elem_ptr_ty = try pt.ptrType(.{ .child = array_info.elem_type.toIntern(), }); @@ -10084,21 +10147,22 @@ pub const FuncGen = struct { fn airUnionInit(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const ip = &mod.intern_pool; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = self.air.extraData(Air.UnionInit, ty_pl.payload).data; const union_ty = self.typeOfIndex(inst); const union_llvm_ty = try o.lowerType(union_ty); - const layout = union_ty.unionGetLayout(mod); + const layout = union_ty.unionGetLayout(pt); const union_obj = mod.typeToUnion(union_ty).?; if (union_obj.getLayout(ip) == .@"packed") { - const big_bits = union_ty.bitSize(mod); + const big_bits = union_ty.bitSize(pt); const int_llvm_ty = try o.builder.intType(@intCast(big_bits)); const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[extra.field_index]); const non_int_val = try self.resolveInst(extra.init); - const small_int_ty = try o.builder.intType(@intCast(field_ty.bitSize(mod))); + const small_int_ty = try o.builder.intType(@intCast(field_ty.bitSize(pt))); const small_int_val = if (field_ty.isPtrAtRuntime(mod)) try self.wip.cast(.ptrtoint, non_int_val, small_int_ty, "") else @@ -10110,19 +10174,19 @@ pub const FuncGen = struct { const tag_ty = union_ty.unionTagTypeHypothetical(mod); const union_field_name = union_obj.loadTagType(ip).names.get(ip)[extra.field_index]; const enum_field_index = tag_ty.enumFieldIndex(union_field_name, mod).?; - const tag_val = try mod.enumValueFieldIndex(tag_ty, enum_field_index); - break :blk try tag_val.intFromEnum(tag_ty, mod); + const tag_val = try pt.enumValueFieldIndex(tag_ty, enum_field_index); + break :blk try tag_val.intFromEnum(tag_ty, pt); }; if (layout.payload_size == 0) { if (layout.tag_size == 0) { return .none; } - assert(!isByRef(union_ty, mod)); + assert(!isByRef(union_ty, pt)); var big_int_space: Value.BigIntSpace = undefined; - const tag_big_int = tag_int_val.toBigInt(&big_int_space, mod); + const tag_big_int = tag_int_val.toBigInt(&big_int_space, pt); return try o.builder.bigIntValue(union_llvm_ty, tag_big_int); } - assert(isByRef(union_ty, mod)); + assert(isByRef(union_ty, pt)); // The llvm type of the alloca will be the named LLVM union type, and will not // necessarily match the format that we need, depending on which tag is active. // We must construct the correct unnamed struct type here, in order to then set @@ -10132,14 +10196,14 @@ pub const FuncGen = struct { const llvm_payload = try self.resolveInst(extra.init); const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[extra.field_index]); const field_llvm_ty = try o.lowerType(field_ty); - const field_size = field_ty.abiSize(mod); - const field_align = mod.unionFieldNormalAlignment(union_obj, extra.field_index); + const field_size = field_ty.abiSize(pt); + const field_align = pt.unionFieldNormalAlignment(union_obj, extra.field_index); const llvm_usize = try o.lowerType(Type.usize); const usize_zero = try o.builder.intValue(llvm_usize, 0); const llvm_union_ty = t: { const payload_ty = p: { - if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) { const padding_len = layout.payload_size; break :p try o.builder.arrayType(padding_len, .i8); } @@ -10169,7 +10233,7 @@ pub const FuncGen = struct { // Now we follow the layout as expressed above with GEP instructions to set the // tag and the payload. - const field_ptr_ty = try mod.ptrType(.{ + const field_ptr_ty = try pt.ptrType(.{ .child = field_ty.toIntern(), .flags = .{ .alignment = field_align }, }); @@ -10195,9 +10259,9 @@ pub const FuncGen = struct { const field_ptr = try self.wip.gep(.inbounds, llvm_union_ty, result_ptr, &indices, ""); const tag_ty = try o.lowerType(Type.fromInterned(union_obj.enum_tag_ty)); var big_int_space: Value.BigIntSpace = undefined; - const tag_big_int = tag_int_val.toBigInt(&big_int_space, mod); + const tag_big_int = tag_int_val.toBigInt(&big_int_space, pt); const llvm_tag = try o.builder.bigIntValue(tag_ty, tag_big_int); - const tag_alignment = Type.fromInterned(union_obj.enum_tag_ty).abiAlignment(mod).toLlvm(); + const tag_alignment = Type.fromInterned(union_obj.enum_tag_ty).abiAlignment(pt).toLlvm(); _ = try self.wip.store(.normal, llvm_tag, field_ptr, tag_alignment); } @@ -10223,7 +10287,7 @@ pub const FuncGen = struct { // by the target. // To work around this, don't emit llvm.prefetch in this case. // See https://bugs.llvm.org/show_bug.cgi?id=21037 - const mod = o.module; + const mod = o.pt.zcu; const target = mod.getTarget(); switch (prefetch.cache) { .instruction => switch (target.cpu.arch) { @@ -10279,7 +10343,7 @@ pub const FuncGen = struct { fn airWorkItemId(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const target = o.module.getTarget(); + const target = o.pt.zcu.getTarget(); assert(target.cpu.arch == .amdgcn); // TODO is to port this function to other GPU architectures const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; @@ -10289,7 +10353,7 @@ pub const FuncGen = struct { fn airWorkGroupSize(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const target = o.module.getTarget(); + const target = o.pt.zcu.getTarget(); assert(target.cpu.arch == .amdgcn); // TODO is to port this function to other GPU architectures const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; @@ -10312,7 +10376,7 @@ pub const FuncGen = struct { fn airWorkGroupId(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const target = o.module.getTarget(); + const target = o.pt.zcu.getTarget(); assert(target.cpu.arch == .amdgcn); // TODO is to port this function to other GPU architectures const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; @@ -10322,7 +10386,7 @@ pub const FuncGen = struct { fn getErrorNameTable(self: *FuncGen) Allocator.Error!Builder.Variable.Index { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; const table = o.error_name_table; if (table != .none) return table; @@ -10334,7 +10398,7 @@ pub const FuncGen = struct { variable_index.setMutability(.constant, &o.builder); variable_index.setUnnamedAddr(.unnamed_addr, &o.builder); variable_index.setAlignment( - Type.slice_const_u8_sentinel_0.abiAlignment(mod).toLlvm(), + Type.slice_const_u8_sentinel_0.abiAlignment(pt).toLlvm(), &o.builder, ); @@ -10372,15 +10436,16 @@ pub const FuncGen = struct { can_elide_load: bool, ) !Builder.Value { const o = fg.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const payload_ty = opt_ty.optionalChild(mod); - if (isByRef(opt_ty, mod)) { + if (isByRef(opt_ty, pt)) { // We have a pointer and we need to return a pointer to the first field. const payload_ptr = try fg.wip.gepStruct(opt_llvm_ty, opt_handle, 0, ""); - const payload_alignment = payload_ty.abiAlignment(mod).toLlvm(); - if (isByRef(payload_ty, mod)) { + const payload_alignment = payload_ty.abiAlignment(pt).toLlvm(); + if (isByRef(payload_ty, pt)) { if (can_elide_load) return payload_ptr; @@ -10389,7 +10454,7 @@ pub const FuncGen = struct { return fg.loadTruncate(.normal, payload_ty, payload_ptr, payload_alignment); } - assert(!isByRef(payload_ty, mod)); + assert(!isByRef(payload_ty, pt)); return fg.wip.extractValue(opt_handle, &.{0}, ""); } @@ -10400,12 +10465,12 @@ pub const FuncGen = struct { non_null_bit: Builder.Value, ) !Builder.Value { const o = self.dg.object; + const pt = o.pt; const optional_llvm_ty = try o.lowerType(optional_ty); const non_null_field = try self.wip.cast(.zext, non_null_bit, .i8, ""); - const mod = o.module; - if (isByRef(optional_ty, mod)) { - const payload_alignment = optional_ty.abiAlignment(mod).toLlvm(); + if (isByRef(optional_ty, pt)) { + const payload_alignment = optional_ty.abiAlignment(pt).toLlvm(); const alloca_inst = try self.buildAllocaWorkaround(optional_ty, payload_alignment); { @@ -10432,7 +10497,8 @@ pub const FuncGen = struct { field_index: u32, ) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const struct_ty = struct_ptr_ty.childType(mod); switch (struct_ty.zigTypeTag(mod)) { .Struct => switch (struct_ty.containerLayout(mod)) { @@ -10452,7 +10518,7 @@ pub const FuncGen = struct { // We have a pointer to a packed struct field that happens to be byte-aligned. // Offset our operand pointer by the correct number of bytes. - const byte_offset = @divExact(mod.structPackedFieldBitOffset(struct_type, field_index) + struct_ptr_ty_info.packed_offset.bit_offset, 8); + const byte_offset = @divExact(pt.structPackedFieldBitOffset(struct_type, field_index) + struct_ptr_ty_info.packed_offset.bit_offset, 8); if (byte_offset == 0) return struct_ptr; const usize_ty = try o.lowerType(Type.usize); const llvm_index = try o.builder.intValue(usize_ty, byte_offset); @@ -10470,14 +10536,14 @@ pub const FuncGen = struct { // the struct. const llvm_index = try o.builder.intValue( try o.lowerType(Type.usize), - @intFromBool(struct_ty.hasRuntimeBitsIgnoreComptime(mod)), + @intFromBool(struct_ty.hasRuntimeBitsIgnoreComptime(pt)), ); return self.wip.gep(.inbounds, struct_llvm_ty, struct_ptr, &.{llvm_index}, ""); } }, }, .Union => { - const layout = struct_ty.unionGetLayout(mod); + const layout = struct_ty.unionGetLayout(pt); if (layout.payload_size == 0 or struct_ty.containerLayout(mod) == .@"packed") return struct_ptr; const payload_index = @intFromBool(layout.tag_align.compare(.gte, layout.payload_align)); const union_llvm_ty = try o.lowerType(struct_ty); @@ -10500,9 +10566,10 @@ pub const FuncGen = struct { // => so load the byte aligned value and trunc the unwanted bits. const o = fg.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const payload_llvm_ty = try o.lowerType(payload_ty); - const abi_size = payload_ty.abiSize(mod); + const abi_size = payload_ty.abiSize(pt); // llvm bug workarounds: const workaround_explicit_mask = o.target.cpu.arch == .powerpc and abi_size >= 4; @@ -10522,7 +10589,7 @@ pub const FuncGen = struct { const shifted = if (payload_llvm_ty != load_llvm_ty and o.target.cpu.arch.endian() == .big) try fg.wip.bin(.lshr, loaded, try o.builder.intValue( load_llvm_ty, - (payload_ty.abiSize(mod) - (std.math.divCeil(u64, payload_ty.bitSize(mod), 8) catch unreachable)) * 8, + (payload_ty.abiSize(pt) - (std.math.divCeil(u64, payload_ty.bitSize(pt), 8) catch unreachable)) * 8, ), "") else loaded; @@ -10546,11 +10613,11 @@ pub const FuncGen = struct { access_kind: Builder.MemoryAccessKind, ) !Builder.Value { const o = fg.dg.object; - const mod = o.module; + const pt = o.pt; //const pointee_llvm_ty = try o.lowerType(pointee_type); - const result_align = InternPool.Alignment.fromLlvm(ptr_alignment).max(pointee_type.abiAlignment(mod)).toLlvm(); + const result_align = InternPool.Alignment.fromLlvm(ptr_alignment).max(pointee_type.abiAlignment(pt)).toLlvm(); const result_ptr = try fg.buildAllocaWorkaround(pointee_type, result_align); - const size_bytes = pointee_type.abiSize(mod); + const size_bytes = pointee_type.abiSize(pt); _ = try fg.wip.callMemCpy( result_ptr, result_align, @@ -10567,15 +10634,16 @@ pub const FuncGen = struct { /// For isByRef=false types, it creates a load instruction and returns it. fn load(self: *FuncGen, ptr: Builder.Value, ptr_ty: Type) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const info = ptr_ty.ptrInfo(mod); const elem_ty = Type.fromInterned(info.child); - if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) return .none; + if (!elem_ty.hasRuntimeBitsIgnoreComptime(pt)) return .none; const ptr_alignment = (if (info.flags.alignment != .none) @as(InternPool.Alignment, info.flags.alignment) else - elem_ty.abiAlignment(mod)).toLlvm(); + elem_ty.abiAlignment(pt)).toLlvm(); const access_kind: Builder.MemoryAccessKind = if (info.flags.is_volatile) .@"volatile" else .normal; @@ -10591,7 +10659,7 @@ pub const FuncGen = struct { } if (info.packed_offset.host_size == 0) { - if (isByRef(elem_ty, mod)) { + if (isByRef(elem_ty, pt)) { return self.loadByRef(ptr, elem_ty, ptr_alignment, access_kind); } return self.loadTruncate(access_kind, elem_ty, ptr, ptr_alignment); @@ -10601,13 +10669,13 @@ pub const FuncGen = struct { const containing_int = try self.wip.load(access_kind, containing_int_ty, ptr, ptr_alignment, ""); - const elem_bits = ptr_ty.childType(mod).bitSize(mod); + const elem_bits = ptr_ty.childType(mod).bitSize(pt); const shift_amt = try o.builder.intValue(containing_int_ty, info.packed_offset.bit_offset); const shifted_value = try self.wip.bin(.lshr, containing_int, shift_amt, ""); const elem_llvm_ty = try o.lowerType(elem_ty); - if (isByRef(elem_ty, mod)) { - const result_align = elem_ty.abiAlignment(mod).toLlvm(); + if (isByRef(elem_ty, pt)) { + const result_align = elem_ty.abiAlignment(pt).toLlvm(); const result_ptr = try self.buildAllocaWorkaround(elem_ty, result_align); const same_size_int = try o.builder.intType(@intCast(elem_bits)); @@ -10639,13 +10707,14 @@ pub const FuncGen = struct { ordering: Builder.AtomicOrdering, ) !void { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const info = ptr_ty.ptrInfo(mod); const elem_ty = Type.fromInterned(info.child); - if (!elem_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) { + if (!elem_ty.isFnOrHasRuntimeBitsIgnoreComptime(pt)) { return; } - const ptr_alignment = ptr_ty.ptrAlignment(mod).toLlvm(); + const ptr_alignment = ptr_ty.ptrAlignment(pt).toLlvm(); const access_kind: Builder.MemoryAccessKind = if (info.flags.is_volatile) .@"volatile" else .normal; @@ -10669,7 +10738,7 @@ pub const FuncGen = struct { assert(ordering == .none); const containing_int = try self.wip.load(access_kind, containing_int_ty, ptr, ptr_alignment, ""); - const elem_bits = ptr_ty.childType(mod).bitSize(mod); + const elem_bits = ptr_ty.childType(mod).bitSize(pt); const shift_amt = try o.builder.intConst(containing_int_ty, info.packed_offset.bit_offset); // Convert to equally-sized integer type in order to perform the bit // operations on the value to store @@ -10704,7 +10773,7 @@ pub const FuncGen = struct { _ = try self.wip.store(access_kind, ored_value, ptr, ptr_alignment); return; } - if (!isByRef(elem_ty, mod)) { + if (!isByRef(elem_ty, pt)) { _ = try self.wip.storeAtomic( access_kind, elem, @@ -10720,8 +10789,8 @@ pub const FuncGen = struct { ptr, ptr_alignment, elem, - elem_ty.abiAlignment(mod).toLlvm(), - try o.builder.intValue(try o.lowerType(Type.usize), elem_ty.abiSize(mod)), + elem_ty.abiAlignment(pt).toLlvm(), + try o.builder.intValue(try o.lowerType(Type.usize), elem_ty.abiSize(pt)), access_kind, ); } @@ -10747,12 +10816,13 @@ pub const FuncGen = struct { a5: Builder.Value, ) Allocator.Error!Builder.Value { const o = fg.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const target = mod.getTarget(); if (!target_util.hasValgrindSupport(target)) return default_value; const llvm_usize = try o.lowerType(Type.usize); - const usize_alignment = Type.usize.abiAlignment(mod).toLlvm(); + const usize_alignment = Type.usize.abiAlignment(pt).toLlvm(); const array_llvm_ty = try o.builder.arrayType(6, llvm_usize); const array_ptr = if (fg.valgrind_client_request_array == .none) a: { @@ -10813,13 +10883,13 @@ pub const FuncGen = struct { fn typeOf(fg: *FuncGen, inst: Air.Inst.Ref) Type { const o = fg.dg.object; - const mod = o.module; + const mod = o.pt.zcu; return fg.air.typeOf(inst, &mod.intern_pool); } fn typeOfIndex(fg: *FuncGen, inst: Air.Inst.Index) Type { const o = fg.dg.object; - const mod = o.module; + const mod = o.pt.zcu; return fg.air.typeOfIndex(inst, &mod.intern_pool); } }; @@ -10990,12 +11060,12 @@ fn toLlvmGlobalAddressSpace(wanted_address_space: std.builtin.AddressSpace, targ }; } -fn returnTypeByRef(zcu: *Zcu, target: std.Target, ty: Type) bool { - if (isByRef(ty, zcu)) { +fn returnTypeByRef(pt: Zcu.PerThread, target: std.Target, ty: Type) bool { + if (isByRef(ty, pt)) { return true; } else if (target.cpu.arch.isX86() and !std.Target.x86.featureSetHas(target.cpu.features, .evex512) and - ty.totalVectorBits(zcu) >= 512) + ty.totalVectorBits(pt) >= 512) { // As of LLVM 18, passing a vector byval with fastcc that is 512 bits or more returns // "512-bit vector arguments require 'evex512' for AVX512" @@ -11005,38 +11075,38 @@ fn returnTypeByRef(zcu: *Zcu, target: std.Target, ty: Type) bool { } } -fn firstParamSRet(fn_info: InternPool.Key.FuncType, zcu: *Zcu, target: std.Target) bool { +fn firstParamSRet(fn_info: InternPool.Key.FuncType, pt: Zcu.PerThread, target: std.Target) bool { const return_type = Type.fromInterned(fn_info.return_type); - if (!return_type.hasRuntimeBitsIgnoreComptime(zcu)) return false; + if (!return_type.hasRuntimeBitsIgnoreComptime(pt)) return false; return switch (fn_info.cc) { - .Unspecified, .Inline => returnTypeByRef(zcu, target, return_type), + .Unspecified, .Inline => returnTypeByRef(pt, target, return_type), .C => switch (target.cpu.arch) { .mips, .mipsel => false, - .x86 => isByRef(return_type, zcu), + .x86 => isByRef(return_type, pt), .x86_64 => switch (target.os.tag) { - .windows => x86_64_abi.classifyWindows(return_type, zcu) == .memory, - else => firstParamSRetSystemV(return_type, zcu, target), + .windows => x86_64_abi.classifyWindows(return_type, pt) == .memory, + else => firstParamSRetSystemV(return_type, pt, target), }, - .wasm32 => wasm_c_abi.classifyType(return_type, zcu)[0] == .indirect, - .aarch64, .aarch64_be => aarch64_c_abi.classifyType(return_type, zcu) == .memory, - .arm, .armeb => switch (arm_c_abi.classifyType(return_type, zcu, .ret)) { + .wasm32 => wasm_c_abi.classifyType(return_type, pt)[0] == .indirect, + .aarch64, .aarch64_be => aarch64_c_abi.classifyType(return_type, pt) == .memory, + .arm, .armeb => switch (arm_c_abi.classifyType(return_type, pt, .ret)) { .memory, .i64_array => true, .i32_array => |size| size != 1, .byval => false, }, - .riscv32, .riscv64 => riscv_c_abi.classifyType(return_type, zcu) == .memory, + .riscv32, .riscv64 => riscv_c_abi.classifyType(return_type, pt) == .memory, else => false, // TODO investigate C ABI for other architectures }, - .SysV => firstParamSRetSystemV(return_type, zcu, target), - .Win64 => x86_64_abi.classifyWindows(return_type, zcu) == .memory, - .Stdcall => !isScalar(zcu, return_type), + .SysV => firstParamSRetSystemV(return_type, pt, target), + .Win64 => x86_64_abi.classifyWindows(return_type, pt) == .memory, + .Stdcall => !isScalar(pt.zcu, return_type), else => false, }; } -fn firstParamSRetSystemV(ty: Type, zcu: *Zcu, target: std.Target) bool { - const class = x86_64_abi.classifySystemV(ty, zcu, target, .ret); +fn firstParamSRetSystemV(ty: Type, pt: Zcu.PerThread, target: std.Target) bool { + const class = x86_64_abi.classifySystemV(ty, pt, target, .ret); if (class[0] == .memory) return true; if (class[0] == .x87 and class[2] != .none) return true; return false; @@ -11046,9 +11116,10 @@ fn firstParamSRetSystemV(ty: Type, zcu: *Zcu, target: std.Target) bool { /// completely differently in the function prototype to honor the C ABI, and then /// be effectively bitcasted to the actual return type. fn lowerFnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) Allocator.Error!Builder.Type { - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const return_type = Type.fromInterned(fn_info.return_type); - if (!return_type.hasRuntimeBitsIgnoreComptime(mod)) { + if (!return_type.hasRuntimeBitsIgnoreComptime(pt)) { // If the return type is an error set or an error union, then we make this // anyerror return type instead, so that it can be coerced into a function // pointer type which has anyerror as the return type. @@ -11058,12 +11129,12 @@ fn lowerFnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) Allocator.Error!Bu switch (fn_info.cc) { .Unspecified, .Inline, - => return if (returnTypeByRef(mod, target, return_type)) .void else o.lowerType(return_type), + => return if (returnTypeByRef(pt, target, return_type)) .void else o.lowerType(return_type), .C => { switch (target.cpu.arch) { .mips, .mipsel => return o.lowerType(return_type), - .x86 => return if (isByRef(return_type, mod)) .void else o.lowerType(return_type), + .x86 => return if (isByRef(return_type, pt)) .void else o.lowerType(return_type), .x86_64 => switch (target.os.tag) { .windows => return lowerWin64FnRetTy(o, fn_info), else => return lowerSystemVFnRetTy(o, fn_info), @@ -11072,36 +11143,36 @@ fn lowerFnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) Allocator.Error!Bu if (isScalar(mod, return_type)) { return o.lowerType(return_type); } - const classes = wasm_c_abi.classifyType(return_type, mod); + const classes = wasm_c_abi.classifyType(return_type, pt); if (classes[0] == .indirect or classes[0] == .none) { return .void; } assert(classes[0] == .direct and classes[1] == .none); - const scalar_type = wasm_c_abi.scalarType(return_type, mod); - return o.builder.intType(@intCast(scalar_type.abiSize(mod) * 8)); + const scalar_type = wasm_c_abi.scalarType(return_type, pt); + return o.builder.intType(@intCast(scalar_type.abiSize(pt) * 8)); }, .aarch64, .aarch64_be => { - switch (aarch64_c_abi.classifyType(return_type, mod)) { + switch (aarch64_c_abi.classifyType(return_type, pt)) { .memory => return .void, .float_array => return o.lowerType(return_type), .byval => return o.lowerType(return_type), - .integer => return o.builder.intType(@intCast(return_type.bitSize(mod))), + .integer => return o.builder.intType(@intCast(return_type.bitSize(pt))), .double_integer => return o.builder.arrayType(2, .i64), } }, .arm, .armeb => { - switch (arm_c_abi.classifyType(return_type, mod, .ret)) { + switch (arm_c_abi.classifyType(return_type, pt, .ret)) { .memory, .i64_array => return .void, .i32_array => |len| return if (len == 1) .i32 else .void, .byval => return o.lowerType(return_type), } }, .riscv32, .riscv64 => { - switch (riscv_c_abi.classifyType(return_type, mod)) { + switch (riscv_c_abi.classifyType(return_type, pt)) { .memory => return .void, .integer => { - return o.builder.intType(@intCast(return_type.bitSize(mod))); + return o.builder.intType(@intCast(return_type.bitSize(pt))); }, .double_integer => { return o.builder.structType(.normal, &.{ .i64, .i64 }); @@ -11112,7 +11183,7 @@ fn lowerFnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) Allocator.Error!Bu var types: [8]Builder.Type = undefined; for (0..return_type.structFieldCount(mod)) |field_index| { const field_ty = return_type.structFieldType(field_index, mod); - if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; + if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) continue; types[types_len] = try o.lowerType(field_ty); types_len += 1; } @@ -11132,14 +11203,14 @@ fn lowerFnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) Allocator.Error!Bu } fn lowerWin64FnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) Allocator.Error!Builder.Type { - const mod = o.module; + const pt = o.pt; const return_type = Type.fromInterned(fn_info.return_type); - switch (x86_64_abi.classifyWindows(return_type, mod)) { + switch (x86_64_abi.classifyWindows(return_type, pt)) { .integer => { - if (isScalar(mod, return_type)) { + if (isScalar(pt.zcu, return_type)) { return o.lowerType(return_type); } else { - return o.builder.intType(@intCast(return_type.abiSize(mod) * 8)); + return o.builder.intType(@intCast(return_type.abiSize(pt) * 8)); } }, .win_i128 => return o.builder.vectorType(.normal, 2, .i64), @@ -11150,14 +11221,15 @@ fn lowerWin64FnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) Allocator.Err } fn lowerSystemVFnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) Allocator.Error!Builder.Type { - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const ip = &mod.intern_pool; const return_type = Type.fromInterned(fn_info.return_type); if (isScalar(mod, return_type)) { return o.lowerType(return_type); } const target = mod.getTarget(); - const classes = x86_64_abi.classifySystemV(return_type, mod, target, .ret); + const classes = x86_64_abi.classifySystemV(return_type, pt, target, .ret); if (classes[0] == .memory) return .void; var types_index: u32 = 0; var types_buffer: [8]Builder.Type = undefined; @@ -11249,8 +11321,7 @@ const ParamTypeIterator = struct { pub fn next(it: *ParamTypeIterator) Allocator.Error!?Lowering { if (it.zig_index >= it.fn_info.param_types.len) return null; - const zcu = it.object.module; - const ip = &zcu.intern_pool; + const ip = &it.object.pt.zcu.intern_pool; const ty = it.fn_info.param_types.get(ip)[it.zig_index]; it.byval_attr = false; return nextInner(it, Type.fromInterned(ty)); @@ -11258,8 +11329,7 @@ const ParamTypeIterator = struct { /// `airCall` uses this instead of `next` so that it can take into account variadic functions. pub fn nextCall(it: *ParamTypeIterator, fg: *FuncGen, args: []const Air.Inst.Ref) Allocator.Error!?Lowering { - const zcu = it.object.module; - const ip = &zcu.intern_pool; + const ip = &it.object.pt.zcu.intern_pool; if (it.zig_index >= it.fn_info.param_types.len) { if (it.zig_index >= args.len) { return null; @@ -11272,10 +11342,11 @@ const ParamTypeIterator = struct { } fn nextInner(it: *ParamTypeIterator, ty: Type) Allocator.Error!?Lowering { - const zcu = it.object.module; + const pt = it.object.pt; + const zcu = pt.zcu; const target = zcu.getTarget(); - if (!ty.hasRuntimeBitsIgnoreComptime(zcu)) { + if (!ty.hasRuntimeBitsIgnoreComptime(pt)) { it.zig_index += 1; return .no_bits; } @@ -11288,11 +11359,11 @@ const ParamTypeIterator = struct { { it.llvm_index += 1; return .slice; - } else if (isByRef(ty, zcu)) { + } else if (isByRef(ty, pt)) { return .byref; } else if (target.cpu.arch.isX86() and !std.Target.x86.featureSetHas(target.cpu.features, .evex512) and - ty.totalVectorBits(zcu) >= 512) + ty.totalVectorBits(pt) >= 512) { // As of LLVM 18, passing a vector byval with fastcc that is 512 bits or more returns // "512-bit vector arguments require 'evex512' for AVX512" @@ -11320,7 +11391,7 @@ const ParamTypeIterator = struct { if (isScalar(zcu, ty)) { return .byval; } - const classes = wasm_c_abi.classifyType(ty, zcu); + const classes = wasm_c_abi.classifyType(ty, pt); if (classes[0] == .indirect) { return .byref; } @@ -11329,7 +11400,7 @@ const ParamTypeIterator = struct { .aarch64, .aarch64_be => { it.zig_index += 1; it.llvm_index += 1; - switch (aarch64_c_abi.classifyType(ty, zcu)) { + switch (aarch64_c_abi.classifyType(ty, pt)) { .memory => return .byref_mut, .float_array => |len| return Lowering{ .float_array = len }, .byval => return .byval, @@ -11344,7 +11415,7 @@ const ParamTypeIterator = struct { .arm, .armeb => { it.zig_index += 1; it.llvm_index += 1; - switch (arm_c_abi.classifyType(ty, zcu, .arg)) { + switch (arm_c_abi.classifyType(ty, pt, .arg)) { .memory => { it.byval_attr = true; return .byref; @@ -11359,7 +11430,7 @@ const ParamTypeIterator = struct { it.llvm_index += 1; if (ty.toIntern() == .f16_type and !std.Target.riscv.featureSetHas(target.cpu.features, .d)) return .as_u16; - switch (riscv_c_abi.classifyType(ty, zcu)) { + switch (riscv_c_abi.classifyType(ty, pt)) { .memory => return .byref_mut, .byval => return .byval, .integer => return .abi_sized_int, @@ -11368,7 +11439,7 @@ const ParamTypeIterator = struct { it.types_len = 0; for (0..ty.structFieldCount(zcu)) |field_index| { const field_ty = ty.structFieldType(field_index, zcu); - if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue; + if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) continue; it.types_buffer[it.types_len] = try it.object.lowerType(field_ty); it.types_len += 1; } @@ -11406,10 +11477,10 @@ const ParamTypeIterator = struct { } fn nextWin64(it: *ParamTypeIterator, ty: Type) ?Lowering { - const zcu = it.object.module; - switch (x86_64_abi.classifyWindows(ty, zcu)) { + const pt = it.object.pt; + switch (x86_64_abi.classifyWindows(ty, pt)) { .integer => { - if (isScalar(zcu, ty)) { + if (isScalar(pt.zcu, ty)) { it.zig_index += 1; it.llvm_index += 1; return .byval; @@ -11439,17 +11510,17 @@ const ParamTypeIterator = struct { } fn nextSystemV(it: *ParamTypeIterator, ty: Type) Allocator.Error!?Lowering { - const zcu = it.object.module; - const ip = &zcu.intern_pool; - const target = zcu.getTarget(); - const classes = x86_64_abi.classifySystemV(ty, zcu, target, .arg); + const pt = it.object.pt; + const ip = &pt.zcu.intern_pool; + const target = pt.zcu.getTarget(); + const classes = x86_64_abi.classifySystemV(ty, pt, target, .arg); if (classes[0] == .memory) { it.zig_index += 1; it.llvm_index += 1; it.byval_attr = true; return .byref; } - if (isScalar(zcu, ty)) { + if (isScalar(pt.zcu, ty)) { it.zig_index += 1; it.llvm_index += 1; return .byval; @@ -11550,7 +11621,7 @@ fn iterateParamTypes(object: *Object, fn_info: InternPool.Key.FuncType) ParamTyp fn ccAbiPromoteInt( cc: std.builtin.CallingConvention, - mod: *Module, + mod: *Zcu, ty: Type, ) ?std.builtin.Signedness { const target = mod.getTarget(); @@ -11598,13 +11669,13 @@ fn ccAbiPromoteInt( /// This is the one source of truth for whether a type is passed around as an LLVM pointer, /// or as an LLVM value. -fn isByRef(ty: Type, mod: *Module) bool { +fn isByRef(ty: Type, pt: Zcu.PerThread) bool { // For tuples and structs, if there are more than this many non-void // fields, then we make it byref, otherwise byval. const max_fields_byval = 0; - const ip = &mod.intern_pool; + const ip = &pt.zcu.intern_pool; - switch (ty.zigTypeTag(mod)) { + switch (ty.zigTypeTag(pt.zcu)) { .Type, .ComptimeInt, .ComptimeFloat, @@ -11627,17 +11698,17 @@ fn isByRef(ty: Type, mod: *Module) bool { .AnyFrame, => return false, - .Array, .Frame => return ty.hasRuntimeBits(mod), + .Array, .Frame => return ty.hasRuntimeBits(pt), .Struct => { const struct_type = switch (ip.indexToKey(ty.toIntern())) { .anon_struct_type => |tuple| { var count: usize = 0; for (tuple.types.get(ip), tuple.values.get(ip)) |field_ty, field_val| { - if (field_val != .none or !Type.fromInterned(field_ty).hasRuntimeBits(mod)) continue; + if (field_val != .none or !Type.fromInterned(field_ty).hasRuntimeBits(pt)) continue; count += 1; if (count > max_fields_byval) return true; - if (isByRef(Type.fromInterned(field_ty), mod)) return true; + if (isByRef(Type.fromInterned(field_ty), pt)) return true; } return false; }, @@ -11655,27 +11726,27 @@ fn isByRef(ty: Type, mod: *Module) bool { count += 1; if (count > max_fields_byval) return true; const field_ty = Type.fromInterned(field_types[field_index]); - if (isByRef(field_ty, mod)) return true; + if (isByRef(field_ty, pt)) return true; } return false; }, - .Union => switch (ty.containerLayout(mod)) { + .Union => switch (ty.containerLayout(pt.zcu)) { .@"packed" => return false, - else => return ty.hasRuntimeBits(mod), + else => return ty.hasRuntimeBits(pt), }, .ErrorUnion => { - const payload_ty = ty.errorUnionPayload(mod); - if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { + const payload_ty = ty.errorUnionPayload(pt.zcu); + if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) { return false; } return true; }, .Optional => { - const payload_ty = ty.optionalChild(mod); - if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { + const payload_ty = ty.optionalChild(pt.zcu); + if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) { return false; } - if (ty.optionalReprIsPayload(mod)) { + if (ty.optionalReprIsPayload(pt.zcu)) { return false; } return true; @@ -11683,7 +11754,7 @@ fn isByRef(ty: Type, mod: *Module) bool { } } -fn isScalar(mod: *Module, ty: Type) bool { +fn isScalar(mod: *Zcu, ty: Type) bool { return switch (ty.zigTypeTag(mod)) { .Void, .Bool, @@ -11774,7 +11845,7 @@ const lt_errors_fn_name = "__zig_lt_errors_len"; /// Without this workaround, LLVM crashes with "unknown codeview register H1" /// https://github.com/llvm/llvm-project/issues/56484 fn needDbgVarWorkaround(o: *Object) bool { - const target = o.module.getTarget(); + const target = o.pt.zcu.getTarget(); if (target.os.tag == .windows and target.cpu.arch == .aarch64) { return true; } @@ -11817,14 +11888,14 @@ fn buildAllocaInner( return wip.conv(.unneeded, alloca, .ptr, ""); } -fn errUnionPayloadOffset(payload_ty: Type, mod: *Module) !u1 { - const err_int_ty = try mod.errorIntType(); - return @intFromBool(err_int_ty.abiAlignment(mod).compare(.gt, payload_ty.abiAlignment(mod))); +fn errUnionPayloadOffset(payload_ty: Type, pt: Zcu.PerThread) !u1 { + const err_int_ty = try pt.errorIntType(); + return @intFromBool(err_int_ty.abiAlignment(pt).compare(.gt, payload_ty.abiAlignment(pt))); } -fn errUnionErrorOffset(payload_ty: Type, mod: *Module) !u1 { - const err_int_ty = try mod.errorIntType(); - return @intFromBool(err_int_ty.abiAlignment(mod).compare(.lte, payload_ty.abiAlignment(mod))); +fn errUnionErrorOffset(payload_ty: Type, pt: Zcu.PerThread) !u1 { + const err_int_ty = try pt.errorIntType(); + return @intFromBool(err_int_ty.abiAlignment(pt).compare(.lte, payload_ty.abiAlignment(pt))); } /// Returns true for asm constraint (e.g. "=*m", "=r") if it accepts a memory location diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 2fbe9097d63d..95874a5d65bb 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -6,9 +6,7 @@ const assert = std.debug.assert; const Signedness = std.builtin.Signedness; const Zcu = @import("../Zcu.zig"); -/// Deprecated. -const Module = Zcu; -const Decl = Module.Decl; +const Decl = Zcu.Decl; const Type = @import("../Type.zig"); const Value = @import("../Value.zig"); const Air = @import("../Air.zig"); @@ -188,12 +186,13 @@ pub const Object = struct { fn genDecl( self: *Object, - zcu: *Zcu, + pt: Zcu.PerThread, decl_index: InternPool.DeclIndex, air: Air, liveness: Liveness, ) !void { - const gpa = self.gpa; + const zcu = pt.zcu; + const gpa = zcu.gpa; const decl = zcu.declPtr(decl_index); const namespace = zcu.namespacePtr(decl.src_namespace); const structured_cfg = namespace.fileScope(zcu).mod.structured_cfg; @@ -201,7 +200,7 @@ pub const Object = struct { var decl_gen = DeclGen{ .gpa = gpa, .object = self, - .module = zcu, + .pt = pt, .spv = &self.spv, .decl_index = decl_index, .air = air, @@ -235,34 +234,34 @@ pub const Object = struct { pub fn updateFunc( self: *Object, - mod: *Module, + pt: Zcu.PerThread, func_index: InternPool.Index, air: Air, liveness: Liveness, ) !void { - const decl_index = mod.funcInfo(func_index).owner_decl; + const decl_index = pt.zcu.funcInfo(func_index).owner_decl; // TODO: Separate types for generating decls and functions? - try self.genDecl(mod, decl_index, air, liveness); + try self.genDecl(pt, decl_index, air, liveness); } pub fn updateDecl( self: *Object, - mod: *Module, + pt: Zcu.PerThread, decl_index: InternPool.DeclIndex, ) !void { - try self.genDecl(mod, decl_index, undefined, undefined); + try self.genDecl(pt, decl_index, undefined, undefined); } /// Fetch or allocate a result id for decl index. This function also marks the decl as alive. /// Note: Function does not actually generate the decl, it just allocates an index. - pub fn resolveDecl(self: *Object, mod: *Module, decl_index: InternPool.DeclIndex) !SpvModule.Decl.Index { - const decl = mod.declPtr(decl_index); + pub fn resolveDecl(self: *Object, zcu: *Zcu, decl_index: InternPool.DeclIndex) !SpvModule.Decl.Index { + const decl = zcu.declPtr(decl_index); assert(decl.has_tv); // TODO: Do we need to handle a situation where this is false? const entry = try self.decl_link.getOrPut(self.gpa, decl_index); if (!entry.found_existing) { // TODO: Extern fn? - const kind: SpvModule.Decl.Kind = if (decl.val.isFuncBody(mod)) + const kind: SpvModule.Decl.Kind = if (decl.val.isFuncBody(zcu)) .func else switch (decl.@"addrspace") { .generic => .invocation_global, @@ -285,7 +284,7 @@ const DeclGen = struct { object: *Object, /// The Zig module that we are generating decls for. - module: *Module, + pt: Zcu.PerThread, /// The SPIR-V module that instructions should be emitted into. /// This is the same as `self.object.spv`, repeated here for brevity. @@ -333,7 +332,7 @@ const DeclGen = struct { /// If `gen` returned `Error.CodegenFail`, this contains an explanatory message. /// Memory is owned by `module.gpa`. - error_msg: ?*Module.ErrorMsg = null, + error_msg: ?*Zcu.ErrorMsg = null, /// Possible errors the `genDecl` function may return. const Error = error{ CodegenFail, OutOfMemory }; @@ -410,15 +409,15 @@ const DeclGen = struct { /// Return the target which we are currently compiling for. pub fn getTarget(self: *DeclGen) std.Target { - return self.module.getTarget(); + return self.pt.zcu.getTarget(); } pub fn fail(self: *DeclGen, comptime format: []const u8, args: anytype) Error { @setCold(true); - const mod = self.module; - const src_loc = self.module.declPtr(self.decl_index).navSrcLoc(mod); + const zcu = self.pt.zcu; + const src_loc = zcu.declPtr(self.decl_index).navSrcLoc(zcu); assert(self.error_msg == null); - self.error_msg = try Module.ErrorMsg.create(self.module.gpa, src_loc, format, args); + self.error_msg = try Zcu.ErrorMsg.create(zcu.gpa, src_loc, format, args); return error.CodegenFail; } @@ -439,8 +438,9 @@ const DeclGen = struct { /// Fetch the result-id for a previously generated instruction or constant. fn resolve(self: *DeclGen, inst: Air.Inst.Ref) !IdRef { - const mod = self.module; - if (try self.air.value(inst, mod)) |val| { + const pt = self.pt; + const mod = pt.zcu; + if (try self.air.value(inst, pt)) |val| { const ty = self.typeOf(inst); if (ty.zigTypeTag(mod) == .Fn) { const fn_decl_index = switch (mod.intern_pool.indexToKey(val.ip_index)) { @@ -462,7 +462,7 @@ const DeclGen = struct { fn resolveAnonDecl(self: *DeclGen, val: InternPool.Index) !IdRef { // TODO: This cannot be a function at this point, but it should probably be handled anyway. - const mod = self.module; + const mod = self.pt.zcu; const ty = Type.fromInterned(mod.intern_pool.typeOf(val)); const decl_ptr_ty_id = try self.ptrType(ty, .Generic); @@ -642,7 +642,7 @@ const DeclGen = struct { /// Checks whether the type can be directly translated to SPIR-V vectors fn isSpvVector(self: *DeclGen, ty: Type) bool { - const mod = self.module; + const mod = self.pt.zcu; const target = self.getTarget(); if (ty.zigTypeTag(mod) != .Vector) return false; @@ -668,7 +668,7 @@ const DeclGen = struct { } fn arithmeticTypeInfo(self: *DeclGen, ty: Type) ArithmeticTypeInfo { - const mod = self.module; + const mod = self.pt.zcu; const target = self.getTarget(); var scalar_ty = ty.scalarType(mod); if (scalar_ty.zigTypeTag(mod) == .Enum) { @@ -744,7 +744,7 @@ const DeclGen = struct { /// the value to an unsigned int first for Kernels. fn constInt(self: *DeclGen, ty: Type, value: anytype, repr: Repr) !IdRef { // TODO: Cache? - const mod = self.module; + const mod = self.pt.zcu; const scalar_ty = ty.scalarType(mod); const int_info = scalar_ty.intInfo(mod); // Use backing bits so that negatives are sign extended @@ -824,7 +824,7 @@ const DeclGen = struct { /// Construct a vector at runtime. /// ty must be an vector type. fn constructVector(self: *DeclGen, ty: Type, constituents: []const IdRef) !IdRef { - const mod = self.module; + const mod = self.pt.zcu; assert(ty.vectorLen(mod) == constituents.len); // Note: older versions of the Khronos SPRIV-LLVM translator crash on this instruction @@ -848,7 +848,7 @@ const DeclGen = struct { /// Construct a vector at runtime with all lanes set to the same value. /// ty must be an vector type. fn constructVectorSplat(self: *DeclGen, ty: Type, constituent: IdRef) !IdRef { - const mod = self.module; + const mod = self.pt.zcu; const n = ty.vectorLen(mod); const constituents = try self.gpa.alloc(IdRef, n); @@ -886,12 +886,13 @@ const DeclGen = struct { return id; } - const mod = self.module; + const pt = self.pt; + const mod = pt.zcu; const target = self.getTarget(); const result_ty_id = try self.resolveType(ty, repr); const ip = &mod.intern_pool; - log.debug("lowering constant: ty = {}, val = {}", .{ ty.fmt(mod), val.fmtValue(mod, null) }); + log.debug("lowering constant: ty = {}, val = {}", .{ ty.fmt(pt), val.fmtValue(pt, null) }); if (val.isUndefDeep(mod)) { return self.spv.constUndef(result_ty_id); } @@ -940,16 +941,16 @@ const DeclGen = struct { }, .int => { if (ty.isSignedInt(mod)) { - break :cache try self.constInt(ty, val.toSignedInt(mod), repr); + break :cache try self.constInt(ty, val.toSignedInt(pt), repr); } else { - break :cache try self.constInt(ty, val.toUnsignedInt(mod), repr); + break :cache try self.constInt(ty, val.toUnsignedInt(pt), repr); } }, .float => { const lit: spec.LiteralContextDependentNumber = switch (ty.floatBits(target)) { - 16 => .{ .uint32 = @as(u16, @bitCast(val.toFloat(f16, mod))) }, - 32 => .{ .float32 = val.toFloat(f32, mod) }, - 64 => .{ .float64 = val.toFloat(f64, mod) }, + 16 => .{ .uint32 = @as(u16, @bitCast(val.toFloat(f16, pt))) }, + 32 => .{ .float32 = val.toFloat(f32, pt) }, + 64 => .{ .float64 = val.toFloat(f64, pt) }, 80, 128 => unreachable, // TODO else => unreachable, }; @@ -968,17 +969,17 @@ const DeclGen = struct { .error_union => |error_union| { // TODO: Error unions may be constructed with constant instructions if the payload type // allows it. For now, just generate it here regardless. - const err_int_ty = try mod.errorIntType(); + const err_int_ty = try pt.errorIntType(); const err_ty = switch (error_union.val) { .err_name => ty.errorUnionSet(mod), .payload => err_int_ty, }; const err_val = switch (error_union.val) { - .err_name => |err_name| Value.fromInterned((try mod.intern(.{ .err = .{ + .err_name => |err_name| Value.fromInterned(try pt.intern(.{ .err = .{ .ty = ty.errorUnionSet(mod).toIntern(), .name = err_name, - } }))), - .payload => try mod.intValue(err_int_ty, 0), + } })), + .payload => try pt.intValue(err_int_ty, 0), }; const payload_ty = ty.errorUnionPayload(mod); const eu_layout = self.errorUnionLayout(payload_ty); @@ -988,7 +989,7 @@ const DeclGen = struct { } const payload_val = Value.fromInterned(switch (error_union.val) { - .err_name => try mod.intern(.{ .undef = payload_ty.toIntern() }), + .err_name => try pt.intern(.{ .undef = payload_ty.toIntern() }), .payload => |payload| payload, }); @@ -1007,7 +1008,7 @@ const DeclGen = struct { return try self.constructStruct(ty, &types, &constituents); }, .enum_tag => { - const int_val = try val.intFromEnum(ty, mod); + const int_val = try val.intFromEnum(ty, pt); const int_ty = ty.intTagType(mod); break :cache try self.constant(int_ty, int_val, repr); }, @@ -1026,7 +1027,7 @@ const DeclGen = struct { const payload_ty = ty.optionalChild(mod); const maybe_payload_val = val.optionalValue(mod); - if (!payload_ty.hasRuntimeBits(mod)) { + if (!payload_ty.hasRuntimeBits(pt)) { break :cache try self.constBool(maybe_payload_val != null, .indirect); } else if (ty.optionalReprIsPayload(mod)) { // Optional representation is a nullable pointer or slice. @@ -1104,13 +1105,13 @@ const DeclGen = struct { var it = struct_type.iterateRuntimeOrder(ip); while (it.next()) |field_index| { const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_index]); - if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) { // This is a zero-bit field - we only needed it for the alignment. continue; } // TODO: Padding? - const field_val = try val.fieldValue(mod, field_index); + const field_val = try val.fieldValue(pt, field_index); const field_id = try self.constant(field_ty, field_val, .indirect); try types.append(field_ty); @@ -1126,7 +1127,7 @@ const DeclGen = struct { const active_field = ty.unionTagFieldIndex(Value.fromInterned(un.tag), mod).?; const union_obj = mod.typeToUnion(ty).?; const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[active_field]); - const payload = if (field_ty.hasRuntimeBitsIgnoreComptime(mod)) + const payload = if (field_ty.hasRuntimeBitsIgnoreComptime(pt)) try self.constant(field_ty, Value.fromInterned(un.val), .direct) else null; @@ -1144,10 +1145,10 @@ const DeclGen = struct { fn constantPtr(self: *DeclGen, ptr_val: Value) Error!IdRef { // TODO: Caching?? - const zcu = self.module; + const pt = self.pt; - if (ptr_val.isUndef(zcu)) { - const result_ty = ptr_val.typeOf(zcu); + if (ptr_val.isUndef(pt.zcu)) { + const result_ty = ptr_val.typeOf(pt.zcu); const result_ty_id = try self.resolveType(result_ty, .direct); return self.spv.constUndef(result_ty_id); } @@ -1155,12 +1156,13 @@ const DeclGen = struct { var arena = std.heap.ArenaAllocator.init(self.gpa); defer arena.deinit(); - const derivation = try ptr_val.pointerDerivation(arena.allocator(), zcu); + const derivation = try ptr_val.pointerDerivation(arena.allocator(), pt); return self.derivePtr(derivation); } fn derivePtr(self: *DeclGen, derivation: Value.PointerDeriveStep) Error!IdRef { - const zcu = self.module; + const pt = self.pt; + const zcu = pt.zcu; switch (derivation) { .comptime_alloc_ptr, .comptime_field_ptr => unreachable, .int => |int| { @@ -1172,12 +1174,12 @@ const DeclGen = struct { try self.func.body.emit(self.spv.gpa, .OpConvertUToPtr, .{ .id_result_type = result_ty_id, .id_result = result_ptr_id, - .integer_value = try self.constant(Type.usize, try zcu.intValue(Type.usize, int.addr), .direct), + .integer_value = try self.constant(Type.usize, try pt.intValue(Type.usize, int.addr), .direct), }); return result_ptr_id; }, .decl_ptr => |decl| { - const result_ptr_ty = try zcu.declPtr(decl).declPtrType(zcu); + const result_ptr_ty = try zcu.declPtr(decl).declPtrType(pt); return self.constantDeclRef(result_ptr_ty, decl); }, .anon_decl_ptr => |ad| { @@ -1188,18 +1190,18 @@ const DeclGen = struct { .opt_payload_ptr => @panic("TODO"), .field_ptr => |field| { const parent_ptr_id = try self.derivePtr(field.parent.*); - const parent_ptr_ty = try field.parent.ptrType(zcu); + const parent_ptr_ty = try field.parent.ptrType(pt); return self.structFieldPtr(field.result_ptr_ty, parent_ptr_ty, parent_ptr_id, field.field_idx); }, .elem_ptr => |elem| { const parent_ptr_id = try self.derivePtr(elem.parent.*); - const parent_ptr_ty = try elem.parent.ptrType(zcu); + const parent_ptr_ty = try elem.parent.ptrType(pt); const index_id = try self.constInt(Type.usize, elem.elem_idx, .direct); return self.ptrElemPtr(parent_ptr_ty, parent_ptr_id, index_id); }, .offset_and_cast => |oac| { const parent_ptr_id = try self.derivePtr(oac.parent.*); - const parent_ptr_ty = try oac.parent.ptrType(zcu); + const parent_ptr_ty = try oac.parent.ptrType(pt); disallow: { if (oac.byte_offset != 0) break :disallow; // Allow changing the pointer type child only to restructure arrays. @@ -1218,8 +1220,8 @@ const DeclGen = struct { return result_ptr_id; } return self.fail("Cannot perform pointer cast: '{}' to '{}'", .{ - parent_ptr_ty.fmt(zcu), - oac.new_ptr_ty.fmt(zcu), + parent_ptr_ty.fmt(pt), + oac.new_ptr_ty.fmt(pt), }); }, } @@ -1232,7 +1234,8 @@ const DeclGen = struct { ) !IdRef { // TODO: Merge this function with constantDeclRef. - const mod = self.module; + const pt = self.pt; + const mod = pt.zcu; const ip = &mod.intern_pool; const ty_id = try self.resolveType(ty, .direct); const decl_val = anon_decl.val; @@ -1247,7 +1250,7 @@ const DeclGen = struct { } // const is_fn_body = decl_ty.zigTypeTag(mod) == .Fn; - if (!decl_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) { + if (!decl_ty.isFnOrHasRuntimeBitsIgnoreComptime(pt)) { // Pointer to nothing - return undefoined return self.spv.constUndef(ty_id); } @@ -1276,7 +1279,8 @@ const DeclGen = struct { } fn constantDeclRef(self: *DeclGen, ty: Type, decl_index: InternPool.DeclIndex) !IdRef { - const mod = self.module; + const pt = self.pt; + const mod = pt.zcu; const ty_id = try self.resolveType(ty, .direct); const decl = mod.declPtr(decl_index); @@ -1290,7 +1294,7 @@ const DeclGen = struct { else => {}, } - if (!decl.typeOf(mod).isFnOrHasRuntimeBitsIgnoreComptime(mod)) { + if (!decl.typeOf(mod).isFnOrHasRuntimeBitsIgnoreComptime(pt)) { // Pointer to nothing - return undefined. return self.spv.constUndef(ty_id); } @@ -1331,7 +1335,7 @@ const DeclGen = struct { fn resolveTypeName(self: *DeclGen, ty: Type) ![]const u8 { var name = std.ArrayList(u8).init(self.gpa); defer name.deinit(); - try ty.print(name.writer(), self.module); + try ty.print(name.writer(), self.pt); return try name.toOwnedSlice(); } @@ -1424,14 +1428,14 @@ const DeclGen = struct { } fn zigScalarOrVectorTypeLike(self: *DeclGen, new_ty: Type, base_ty: Type) !Type { - const mod = self.module; - const new_scalar_ty = new_ty.scalarType(mod); - if (!base_ty.isVector(mod)) { + const pt = self.pt; + const new_scalar_ty = new_ty.scalarType(pt.zcu); + if (!base_ty.isVector(pt.zcu)) { return new_scalar_ty; } - return try mod.vectorType(.{ - .len = base_ty.vectorLen(mod), + return try pt.vectorType(.{ + .len = base_ty.vectorLen(pt.zcu), .child = new_scalar_ty.toIntern(), }); } @@ -1455,7 +1459,7 @@ const DeclGen = struct { /// } /// If any of the fields' size is 0, it will be omitted. fn resolveUnionType(self: *DeclGen, ty: Type) !IdRef { - const mod = self.module; + const mod = self.pt.zcu; const ip = &mod.intern_pool; const union_obj = mod.typeToUnion(ty).?; @@ -1506,12 +1510,12 @@ const DeclGen = struct { } fn resolveFnReturnType(self: *DeclGen, ret_ty: Type) !IdRef { - const mod = self.module; - if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) { + const pt = self.pt; + if (!ret_ty.hasRuntimeBitsIgnoreComptime(pt)) { // If the return type is an error set or an error union, then we make this // anyerror return type instead, so that it can be coerced into a function // pointer type which has anyerror as the return type. - if (ret_ty.isError(mod)) { + if (ret_ty.isError(pt.zcu)) { return self.resolveType(Type.anyerror, .direct); } else { return self.resolveType(Type.void, .direct); @@ -1533,9 +1537,10 @@ const DeclGen = struct { } fn resolveTypeInner(self: *DeclGen, ty: Type, repr: Repr) Error!IdRef { - const mod = self.module; + const pt = self.pt; + const mod = pt.zcu; const ip = &mod.intern_pool; - log.debug("resolveType: ty = {}", .{ty.fmt(mod)}); + log.debug("resolveType: ty = {}", .{ty.fmt(pt)}); const target = self.getTarget(); const section = &self.spv.sections.types_globals_constants; @@ -1607,7 +1612,7 @@ const DeclGen = struct { return self.fail("array type of {} elements is too large", .{ty.arrayLenIncludingSentinel(mod)}); }; - if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (!elem_ty.hasRuntimeBitsIgnoreComptime(pt)) { // The size of the array would be 0, but that is not allowed in SPIR-V. // This path can be reached when the backend is asked to generate a pointer to // an array of some zero-bit type. This should always be an indirect path. @@ -1655,7 +1660,7 @@ const DeclGen = struct { var param_index: usize = 0; for (fn_info.param_types.get(ip)) |param_ty_index| { const param_ty = Type.fromInterned(param_ty_index); - if (!param_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; + if (!param_ty.hasRuntimeBitsIgnoreComptime(pt)) continue; param_ty_ids[param_index] = try self.resolveType(param_ty, .direct); param_index += 1; @@ -1713,7 +1718,7 @@ const DeclGen = struct { var member_index: usize = 0; for (tuple.types.get(ip), tuple.values.get(ip)) |field_ty, field_val| { - if (field_val != .none or !Type.fromInterned(field_ty).hasRuntimeBits(mod)) continue; + if (field_val != .none or !Type.fromInterned(field_ty).hasRuntimeBits(pt)) continue; member_types[member_index] = try self.resolveType(Type.fromInterned(field_ty), .indirect); member_index += 1; @@ -1742,7 +1747,7 @@ const DeclGen = struct { var it = struct_type.iterateRuntimeOrder(ip); while (it.next()) |field_index| { const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_index]); - if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) { // This is a zero-bit field - we only needed it for the alignment. continue; } @@ -1761,7 +1766,7 @@ const DeclGen = struct { }, .Optional => { const payload_ty = ty.optionalChild(mod); - if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) { // Just use a bool. // Note: Always generate the bool with indirect format, to save on some sanity // Perform the conversion to a direct bool when the field is extracted. @@ -1878,14 +1883,14 @@ const DeclGen = struct { }; fn errorUnionLayout(self: *DeclGen, payload_ty: Type) ErrorUnionLayout { - const mod = self.module; + const pt = self.pt; - const error_align = Type.anyerror.abiAlignment(mod); - const payload_align = payload_ty.abiAlignment(mod); + const error_align = Type.anyerror.abiAlignment(pt); + const payload_align = payload_ty.abiAlignment(pt); const error_first = error_align.compare(.gt, payload_align); return .{ - .payload_has_bits = payload_ty.hasRuntimeBitsIgnoreComptime(mod), + .payload_has_bits = payload_ty.hasRuntimeBitsIgnoreComptime(pt), .error_first = error_first, }; } @@ -1909,9 +1914,10 @@ const DeclGen = struct { }; fn unionLayout(self: *DeclGen, ty: Type) UnionLayout { - const mod = self.module; + const pt = self.pt; + const mod = pt.zcu; const ip = &mod.intern_pool; - const layout = ty.unionGetLayout(self.module); + const layout = ty.unionGetLayout(pt); const union_obj = mod.typeToUnion(ty).?; var union_layout = UnionLayout{ @@ -1932,7 +1938,7 @@ const DeclGen = struct { const most_aligned_field = layout.most_aligned_field; const most_aligned_field_ty = Type.fromInterned(union_obj.field_types.get(ip)[most_aligned_field]); union_layout.payload_ty = most_aligned_field_ty; - union_layout.payload_size = @intCast(most_aligned_field_ty.abiSize(mod)); + union_layout.payload_size = @intCast(most_aligned_field_ty.abiSize(pt)); } else { union_layout.payload_size = 0; } @@ -1999,7 +2005,7 @@ const DeclGen = struct { } fn materialize(self: Temporary, dg: *DeclGen) !IdResult { - const mod = dg.module; + const mod = dg.pt.zcu; switch (self.value) { .singleton => |id| return id, .exploded_vector => |range| { @@ -2029,12 +2035,12 @@ const DeclGen = struct { /// 'Explode' a temporary into separate elements. This turns a vector /// into a bag of elements. fn explode(self: Temporary, dg: *DeclGen) !IdRange { - const mod = dg.module; + const mod = dg.pt.zcu; // If the value is a scalar, then this is a no-op. if (!self.ty.isVector(mod)) { return switch (self.value) { - .singleton => |id| IdRange{ .base = @intFromEnum(id), .len = 1 }, + .singleton => |id| .{ .base = @intFromEnum(id), .len = 1 }, .exploded_vector => |range| range, }; } @@ -2088,7 +2094,7 @@ const DeclGen = struct { /// only checks the size, but the source-of-truth is implemented /// by `isSpvVector()`. fn fromType(ty: Type, dg: *DeclGen) Vectorization { - const mod = dg.module; + const mod = dg.pt.zcu; if (!ty.isVector(mod)) { return .scalar; } else if (dg.isSpvVector(ty)) { @@ -2164,11 +2170,11 @@ const DeclGen = struct { /// Turns `ty` into the result-type of an individual vector operation. /// `ty` may be a scalar or vector, it doesn't matter. fn operationType(self: Vectorization, dg: *DeclGen, ty: Type) !Type { - const mod = dg.module; - const scalar_ty = ty.scalarType(mod); + const pt = dg.pt; + const scalar_ty = ty.scalarType(pt.zcu); return switch (self) { .scalar, .unrolled => scalar_ty, - .spv_vectorized => |n| try mod.vectorType(.{ + .spv_vectorized => |n| try pt.vectorType(.{ .len = n, .child = scalar_ty.toIntern(), }), @@ -2178,11 +2184,11 @@ const DeclGen = struct { /// Turns `ty` into the result-type of the entire operation. /// `ty` may be a scalar or vector, it doesn't matter. fn resultType(self: Vectorization, dg: *DeclGen, ty: Type) !Type { - const mod = dg.module; - const scalar_ty = ty.scalarType(mod); + const pt = dg.pt; + const scalar_ty = ty.scalarType(pt.zcu); return switch (self) { .scalar => scalar_ty, - .unrolled, .spv_vectorized => |n| try mod.vectorType(.{ + .unrolled, .spv_vectorized => |n| try pt.vectorType(.{ .len = n, .child = scalar_ty.toIntern(), }), @@ -2193,8 +2199,8 @@ const DeclGen = struct { /// this setup, and returns a new type that holds the relevant information on how to access /// elements of the input. fn prepare(self: Vectorization, dg: *DeclGen, tmp: Temporary) !PreparedOperand { - const mod = dg.module; - const is_vector = tmp.ty.isVector(mod); + const pt = dg.pt; + const is_vector = tmp.ty.isVector(pt.zcu); const is_spv_vector = dg.isSpvVector(tmp.ty); const value: PreparedOperand.Value = switch (tmp.value) { .singleton => |id| switch (self) { @@ -2209,7 +2215,7 @@ const DeclGen = struct { } // Broadcast scalar into vector. - const vector_ty = try mod.vectorType(.{ + const vector_ty = try pt.vectorType(.{ .len = self.components(), .child = tmp.ty.toIntern(), }); @@ -2340,7 +2346,7 @@ const DeclGen = struct { /// This function builds an OpSConvert of OpUConvert depending on the /// signedness of the types. fn buildIntConvert(self: *DeclGen, dst_ty: Type, src: Temporary) !Temporary { - const mod = self.module; + const mod = self.pt.zcu; const dst_ty_id = try self.resolveType(dst_ty.scalarType(mod), .direct); const src_ty_id = try self.resolveType(src.ty.scalarType(mod), .direct); @@ -2419,7 +2425,7 @@ const DeclGen = struct { } fn buildSelect(self: *DeclGen, condition: Temporary, lhs: Temporary, rhs: Temporary) !Temporary { - const mod = self.module; + const mod = self.pt.zcu; const v = self.vectorization(.{ condition, lhs, rhs }); const ops = v.operations(); @@ -2764,7 +2770,8 @@ const DeclGen = struct { lhs: Temporary, rhs: Temporary, ) !struct { Temporary, Temporary } { - const mod = self.module; + const pt = self.pt; + const mod = pt.zcu; const target = self.getTarget(); const ip = &mod.intern_pool; @@ -2814,7 +2821,7 @@ const DeclGen = struct { // where T is maybe vectorized. const types = [2]InternPool.Index{ arith_op_ty.toIntern(), arith_op_ty.toIntern() }; const values = [2]InternPool.Index{ .none, .none }; - const index = try ip.getAnonStructType(mod.gpa, .{ + const index = try ip.getAnonStructType(mod.gpa, pt.tid, .{ .types = &types, .values = &values, .names = &.{}, @@ -2888,7 +2895,7 @@ const DeclGen = struct { /// the name of an error in the text executor. fn generateTestEntryPoint(self: *DeclGen, name: []const u8, spv_test_decl_index: SpvModule.Decl.Index) !void { const anyerror_ty_id = try self.resolveType(Type.anyerror, .direct); - const ptr_anyerror_ty = try self.module.ptrType(.{ + const ptr_anyerror_ty = try self.pt.ptrType(.{ .child = Type.anyerror.toIntern(), .flags = .{ .address_space = .global }, }); @@ -2940,7 +2947,8 @@ const DeclGen = struct { } fn genDecl(self: *DeclGen) !void { - const mod = self.module; + const pt = self.pt; + const mod = pt.zcu; const ip = &mod.intern_pool; const decl = mod.declPtr(self.decl_index); const spv_decl_index = try self.object.resolveDecl(mod, self.decl_index); @@ -2967,7 +2975,7 @@ const DeclGen = struct { try self.args.ensureUnusedCapacity(self.gpa, fn_info.param_types.len); for (fn_info.param_types.get(ip)) |param_ty_index| { const param_ty = Type.fromInterned(param_ty_index); - if (!param_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; + if (!param_ty.hasRuntimeBitsIgnoreComptime(pt)) continue; const param_type_id = try self.resolveType(param_ty, .direct); const arg_result_id = self.spv.allocId(); @@ -3004,11 +3012,11 @@ const DeclGen = struct { // Append the actual code into the functions section. try self.spv.addFunction(spv_decl_index, self.func); - const fqn = try decl.fullyQualifiedName(self.module); + const fqn = try decl.fullyQualifiedName(self.pt.zcu); try self.spv.debugName(result_id, fqn.toSlice(ip)); // Temporarily generate a test kernel declaration if this is a test function. - if (self.module.test_functions.contains(self.decl_index)) { + if (self.pt.zcu.test_functions.contains(self.decl_index)) { try self.generateTestEntryPoint(fqn.toSlice(ip), spv_decl_index); } }, @@ -3033,7 +3041,7 @@ const DeclGen = struct { .storage_class = final_storage_class, }); - const fqn = try decl.fullyQualifiedName(self.module); + const fqn = try decl.fullyQualifiedName(self.pt.zcu); try self.spv.debugName(result_id, fqn.toSlice(ip)); try self.spv.declareDeclDeps(spv_decl_index, &.{}); }, @@ -3078,7 +3086,7 @@ const DeclGen = struct { try self.func.body.emit(self.spv.gpa, .OpFunctionEnd, {}); try self.spv.addFunction(spv_decl_index, self.func); - const fqn = try decl.fullyQualifiedName(self.module); + const fqn = try decl.fullyQualifiedName(self.pt.zcu); try self.spv.debugNameFmt(initializer_id, "initializer of {}", .{fqn.fmt(ip)}); try self.spv.sections.types_globals_constants.emit(self.spv.gpa, .OpExtInst, .{ @@ -3119,7 +3127,7 @@ const DeclGen = struct { /// Convert representation from indirect (in memory) to direct (in 'register') /// This converts the argument type from resolveType(ty, .indirect) to resolveType(ty, .direct). fn convertToDirect(self: *DeclGen, ty: Type, operand_id: IdRef) !IdRef { - const mod = self.module; + const mod = self.pt.zcu; switch (ty.scalarType(mod).zigTypeTag(mod)) { .Bool => { const false_id = try self.constBool(false, .indirect); @@ -3145,7 +3153,7 @@ const DeclGen = struct { /// Convert representation from direct (in 'register) to direct (in memory) /// This converts the argument type from resolveType(ty, .direct) to resolveType(ty, .indirect). fn convertToIndirect(self: *DeclGen, ty: Type, operand_id: IdRef) !IdRef { - const mod = self.module; + const mod = self.pt.zcu; switch (ty.scalarType(mod).zigTypeTag(mod)) { .Bool => { const result = try self.intFromBool(Temporary.init(ty, operand_id)); @@ -3222,7 +3230,7 @@ const DeclGen = struct { } fn genInst(self: *DeclGen, inst: Air.Inst.Index) !void { - const mod = self.module; + const mod = self.pt.zcu; const ip = &mod.intern_pool; if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip)) return; @@ -3402,7 +3410,7 @@ const DeclGen = struct { } fn airShift(self: *DeclGen, inst: Air.Inst.Index, unsigned: BinaryOp, signed: BinaryOp) !?IdRef { - const mod = self.module; + const mod = self.pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const base = try self.temporary(bin_op.lhs); @@ -3480,7 +3488,7 @@ const DeclGen = struct { /// All other values are returned unmodified (this makes strange integer /// wrapping easier to use in generic operations). fn normalize(self: *DeclGen, value: Temporary, info: ArithmeticTypeInfo) !Temporary { - const mod = self.module; + const mod = self.pt.zcu; const ty = value.ty; switch (info.class) { .integer, .bool, .float => return value, @@ -3721,7 +3729,7 @@ const DeclGen = struct { fn airMulOverflow(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { const target = self.getTarget(); - const mod = self.module; + const pt = self.pt; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; @@ -3758,7 +3766,7 @@ const DeclGen = struct { const result, const overflowed = switch (info.signedness) { .unsigned => blk: { if (maybe_op_ty_bits) |op_ty_bits| { - const op_ty = try mod.intType(.unsigned, op_ty_bits); + const op_ty = try pt.intType(.unsigned, op_ty_bits); const casted_lhs = try self.buildIntConvert(op_ty, lhs); const casted_rhs = try self.buildIntConvert(op_ty, rhs); @@ -3828,7 +3836,7 @@ const DeclGen = struct { ); if (maybe_op_ty_bits) |op_ty_bits| { - const op_ty = try mod.intType(.signed, op_ty_bits); + const op_ty = try pt.intType(.signed, op_ty_bits); // Assume normalized; sign bit is set. We want a sign extend. const casted_lhs = try self.buildIntConvert(op_ty, lhs); const casted_rhs = try self.buildIntConvert(op_ty, rhs); @@ -3900,7 +3908,7 @@ const DeclGen = struct { } fn airShlOverflow(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { - const mod = self.module; + const mod = self.pt.zcu; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; @@ -3958,7 +3966,7 @@ const DeclGen = struct { fn airClzCtz(self: *DeclGen, inst: Air.Inst.Index, op: UnaryOp) !?IdRef { if (self.liveness.isUnused(inst)) return null; - const mod = self.module; + const mod = self.pt.zcu; const target = self.getTarget(); const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const operand = try self.temporary(ty_op.operand); @@ -4007,7 +4015,7 @@ const DeclGen = struct { } fn airReduce(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { - const mod = self.module; + const mod = self.pt.zcu; const reduce = self.air.instructions.items(.data)[@intFromEnum(inst)].reduce; const operand = try self.resolve(reduce.operand); const operand_ty = self.typeOf(reduce.operand); @@ -4082,7 +4090,8 @@ const DeclGen = struct { } fn airShuffle(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { - const mod = self.module; + const pt = self.pt; + const mod = pt.zcu; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = self.air.extraData(Air.Shuffle, ty_pl.payload).data; const a = try self.resolve(extra.a); @@ -4108,14 +4117,14 @@ const DeclGen = struct { const a_len = a_ty.vectorLen(mod); for (components, 0..) |*component, i| { - const elem = try mask.elemValue(mod, i); + const elem = try mask.elemValue(pt, i); if (elem.isUndef(mod)) { // This is explicitly valid for OpVectorShuffle, it indicates undefined. component.* = 0xFFFF_FFFF; continue; } - const index = elem.toSignedInt(mod); + const index = elem.toSignedInt(pt); if (index >= 0) { component.* = @intCast(index); } else { @@ -4140,13 +4149,13 @@ const DeclGen = struct { defer self.gpa.free(components); for (components, 0..) |*id, i| { - const elem = try mask.elemValue(mod, i); + const elem = try mask.elemValue(pt, i); if (elem.isUndef(mod)) { id.* = try self.spv.constUndef(scalar_ty_id); continue; } - const index = elem.toSignedInt(mod); + const index = elem.toSignedInt(pt); if (index >= 0) { id.* = try self.extractVectorComponent(scalar_ty, a, @intCast(index)); } else { @@ -4220,7 +4229,7 @@ const DeclGen = struct { } fn ptrAdd(self: *DeclGen, result_ty: Type, ptr_ty: Type, ptr_id: IdRef, offset_id: IdRef) !IdRef { - const mod = self.module; + const mod = self.pt.zcu; const result_ty_id = try self.resolveType(result_ty, .direct); switch (ptr_ty.ptrSize(mod)) { @@ -4276,7 +4285,8 @@ const DeclGen = struct { lhs: Temporary, rhs: Temporary, ) !Temporary { - const mod = self.module; + const pt = self.pt; + const mod = pt.zcu; const scalar_ty = lhs.ty.scalarType(mod); const is_vector = lhs.ty.isVector(mod); @@ -4324,7 +4334,7 @@ const DeclGen = struct { const payload_ty = ty.optionalChild(mod); if (ty.optionalReprIsPayload(mod)) { - assert(payload_ty.hasRuntimeBitsIgnoreComptime(mod)); + assert(payload_ty.hasRuntimeBitsIgnoreComptime(pt)); assert(!payload_ty.isSlice(mod)); return try self.cmp(op, lhs.pun(payload_ty), rhs.pun(payload_ty)); @@ -4333,12 +4343,12 @@ const DeclGen = struct { const lhs_id = try lhs.materialize(self); const rhs_id = try rhs.materialize(self); - const lhs_valid_id = if (payload_ty.hasRuntimeBitsIgnoreComptime(mod)) + const lhs_valid_id = if (payload_ty.hasRuntimeBitsIgnoreComptime(pt)) try self.extractField(Type.bool, lhs_id, 1) else try self.convertToDirect(Type.bool, lhs_id); - const rhs_valid_id = if (payload_ty.hasRuntimeBitsIgnoreComptime(mod)) + const rhs_valid_id = if (payload_ty.hasRuntimeBitsIgnoreComptime(pt)) try self.extractField(Type.bool, rhs_id, 1) else try self.convertToDirect(Type.bool, rhs_id); @@ -4346,7 +4356,7 @@ const DeclGen = struct { const lhs_valid = Temporary.init(Type.bool, lhs_valid_id); const rhs_valid = Temporary.init(Type.bool, rhs_valid_id); - if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) { return try self.cmp(op, lhs_valid, rhs_valid); } @@ -4466,7 +4476,7 @@ const DeclGen = struct { src_ty: Type, src_id: IdRef, ) !IdRef { - const mod = self.module; + const mod = self.pt.zcu; const src_ty_id = try self.resolveType(src_ty, .direct); const dst_ty_id = try self.resolveType(dst_ty, .direct); @@ -4675,7 +4685,8 @@ const DeclGen = struct { } fn airArrayToSlice(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { - const mod = self.module; + const pt = self.pt; + const mod = pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const array_ptr_ty = self.typeOf(ty_op.operand); const array_ty = array_ptr_ty.childType(mod); @@ -4687,7 +4698,7 @@ const DeclGen = struct { const array_ptr_id = try self.resolve(ty_op.operand); const len_id = try self.constInt(Type.usize, array_ty.arrayLen(mod), .direct); - const elem_ptr_id = if (!array_ty.hasRuntimeBitsIgnoreComptime(mod)) + const elem_ptr_id = if (!array_ty.hasRuntimeBitsIgnoreComptime(pt)) // Note: The pointer is something like *opaque{}, so we need to bitcast it to the element type. try self.bitCast(elem_ptr_ty, array_ptr_ty, array_ptr_id) else @@ -4719,7 +4730,8 @@ const DeclGen = struct { } fn airAggregateInit(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { - const mod = self.module; + const pt = self.pt; + const mod = pt.zcu; const ip = &mod.intern_pool; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const result_ty = self.typeOfIndex(inst); @@ -4742,8 +4754,8 @@ const DeclGen = struct { switch (ip.indexToKey(result_ty.toIntern())) { .anon_struct_type => |tuple| { for (tuple.types.get(ip), elements, 0..) |field_ty, element, i| { - if ((try result_ty.structFieldValueComptime(mod, i)) != null) continue; - assert(Type.fromInterned(field_ty).hasRuntimeBits(mod)); + if ((try result_ty.structFieldValueComptime(pt, i)) != null) continue; + assert(Type.fromInterned(field_ty).hasRuntimeBits(pt)); const id = try self.resolve(element); types[index] = Type.fromInterned(field_ty); @@ -4756,9 +4768,9 @@ const DeclGen = struct { var it = struct_type.iterateRuntimeOrder(ip); for (elements, 0..) |element, i| { const field_index = it.next().?; - if ((try result_ty.structFieldValueComptime(mod, i)) != null) continue; + if ((try result_ty.structFieldValueComptime(pt, i)) != null) continue; const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_index]); - assert(field_ty.hasRuntimeBitsIgnoreComptime(mod)); + assert(field_ty.hasRuntimeBitsIgnoreComptime(pt)); const id = try self.resolve(element); types[index] = field_ty; @@ -4808,13 +4820,14 @@ const DeclGen = struct { } fn sliceOrArrayLen(self: *DeclGen, operand_id: IdRef, ty: Type) !IdRef { - const mod = self.module; + const pt = self.pt; + const mod = pt.zcu; switch (ty.ptrSize(mod)) { .Slice => return self.extractField(Type.usize, operand_id, 1), .One => { const array_ty = ty.childType(mod); const elem_ty = array_ty.childType(mod); - const abi_size = elem_ty.abiSize(mod); + const abi_size = elem_ty.abiSize(pt); const size = array_ty.arrayLenIncludingSentinel(mod) * abi_size; return try self.constInt(Type.usize, size, .direct); }, @@ -4823,7 +4836,7 @@ const DeclGen = struct { } fn sliceOrArrayPtr(self: *DeclGen, operand_id: IdRef, ty: Type) !IdRef { - const mod = self.module; + const mod = self.pt.zcu; if (ty.isSlice(mod)) { const ptr_ty = ty.slicePtrFieldType(mod); return self.extractField(ptr_ty, operand_id, 0); @@ -4855,7 +4868,7 @@ const DeclGen = struct { } fn airSliceElemPtr(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { - const mod = self.module; + const mod = self.pt.zcu; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const slice_ty = self.typeOf(bin_op.lhs); @@ -4872,7 +4885,7 @@ const DeclGen = struct { } fn airSliceElemVal(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { - const mod = self.module; + const mod = self.pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const slice_ty = self.typeOf(bin_op.lhs); if (!slice_ty.isVolatilePtr(mod) and self.liveness.isUnused(inst)) return null; @@ -4889,7 +4902,7 @@ const DeclGen = struct { } fn ptrElemPtr(self: *DeclGen, ptr_ty: Type, ptr_id: IdRef, index_id: IdRef) !IdRef { - const mod = self.module; + const mod = self.pt.zcu; // Construct new pointer type for the resulting pointer const elem_ty = ptr_ty.elemType2(mod); // use elemType() so that we get T for *[N]T. const elem_ptr_ty_id = try self.ptrType(elem_ty, self.spvStorageClass(ptr_ty.ptrAddressSpace(mod))); @@ -4904,14 +4917,15 @@ const DeclGen = struct { } fn airPtrElemPtr(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { - const mod = self.module; + const pt = self.pt; + const mod = pt.zcu; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const src_ptr_ty = self.typeOf(bin_op.lhs); const elem_ty = src_ptr_ty.childType(mod); const ptr_id = try self.resolve(bin_op.lhs); - if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (!elem_ty.hasRuntimeBitsIgnoreComptime(pt)) { const dst_ptr_ty = self.typeOfIndex(inst); return try self.bitCast(dst_ptr_ty, src_ptr_ty, ptr_id); } @@ -4921,7 +4935,7 @@ const DeclGen = struct { } fn airArrayElemVal(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { - const mod = self.module; + const mod = self.pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const array_ty = self.typeOf(bin_op.lhs); const elem_ty = array_ty.childType(mod); @@ -4982,7 +4996,7 @@ const DeclGen = struct { } fn airPtrElemVal(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { - const mod = self.module; + const mod = self.pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const ptr_ty = self.typeOf(bin_op.lhs); const elem_ty = self.typeOfIndex(inst); @@ -4993,7 +5007,7 @@ const DeclGen = struct { } fn airVectorStoreElem(self: *DeclGen, inst: Air.Inst.Index) !void { - const mod = self.module; + const mod = self.pt.zcu; const data = self.air.instructions.items(.data)[@intFromEnum(inst)].vector_store_elem; const extra = self.air.extraData(Air.Bin, data.payload).data; @@ -5015,7 +5029,7 @@ const DeclGen = struct { } fn airSetUnionTag(self: *DeclGen, inst: Air.Inst.Index) !void { - const mod = self.module; + const mod = self.pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const un_ptr_ty = self.typeOf(bin_op.lhs); const un_ty = un_ptr_ty.childType(mod); @@ -5041,7 +5055,7 @@ const DeclGen = struct { const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const un_ty = self.typeOf(ty_op.operand); - const mod = self.module; + const mod = self.pt.zcu; const layout = self.unionLayout(un_ty); if (layout.tag_size == 0) return null; @@ -5064,7 +5078,8 @@ const DeclGen = struct { // Note: The result here is not cached, because it generates runtime code. - const mod = self.module; + const pt = self.pt; + const mod = pt.zcu; const ip = &mod.intern_pool; const union_ty = mod.typeToUnion(ty).?; const tag_ty = Type.fromInterned(union_ty.enum_tag_ty); @@ -5076,9 +5091,9 @@ const DeclGen = struct { const layout = self.unionLayout(ty); const tag_int = if (layout.tag_size != 0) blk: { - const tag_val = try mod.enumValueFieldIndex(tag_ty, active_field); - const tag_int_val = try tag_val.intFromEnum(tag_ty, mod); - break :blk tag_int_val.toUnsignedInt(mod); + const tag_val = try pt.enumValueFieldIndex(tag_ty, active_field); + const tag_int_val = try tag_val.intFromEnum(tag_ty, pt); + break :blk tag_int_val.toUnsignedInt(pt); } else 0; if (!layout.has_payload) { @@ -5095,7 +5110,7 @@ const DeclGen = struct { } const payload_ty = Type.fromInterned(union_ty.field_types.get(ip)[active_field]); - if (payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (payload_ty.hasRuntimeBitsIgnoreComptime(pt)) { const pl_ptr_ty_id = try self.ptrType(layout.payload_ty, .Function); const pl_ptr_id = try self.accessChain(pl_ptr_ty_id, tmp_id, &.{layout.payload_index}); const active_pl_ptr_ty_id = try self.ptrType(payload_ty, .Function); @@ -5118,7 +5133,8 @@ const DeclGen = struct { } fn airUnionInit(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { - const mod = self.module; + const pt = self.pt; + const mod = pt.zcu; const ip = &mod.intern_pool; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = self.air.extraData(Air.UnionInit, ty_pl.payload).data; @@ -5126,7 +5142,7 @@ const DeclGen = struct { const union_obj = mod.typeToUnion(ty).?; const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[extra.field_index]); - const payload = if (field_ty.hasRuntimeBitsIgnoreComptime(mod)) + const payload = if (field_ty.hasRuntimeBitsIgnoreComptime(pt)) try self.resolve(extra.init) else null; @@ -5134,7 +5150,8 @@ const DeclGen = struct { } fn airStructFieldVal(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { - const mod = self.module; + const pt = self.pt; + const mod = pt.zcu; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const struct_field = self.air.extraData(Air.StructField, ty_pl.payload).data; @@ -5143,7 +5160,7 @@ const DeclGen = struct { const field_index = struct_field.field_index; const field_ty = object_ty.structFieldType(field_index, mod); - if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) return null; + if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) return null; switch (object_ty.zigTypeTag(mod)) { .Struct => switch (object_ty.containerLayout(mod)) { @@ -5178,7 +5195,8 @@ const DeclGen = struct { } fn airFieldParentPtr(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { - const mod = self.module; + const pt = self.pt; + const mod = pt.zcu; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = self.air.extraData(Air.FieldParentPtr, ty_pl.payload).data; @@ -5187,7 +5205,7 @@ const DeclGen = struct { const field_ptr = try self.resolve(extra.field_ptr); const field_ptr_int = try self.intFromPtr(field_ptr); - const field_offset = parent_ty.structFieldOffset(extra.field_index, mod); + const field_offset = parent_ty.structFieldOffset(extra.field_index, pt); const base_ptr_int = base_ptr_int: { if (field_offset == 0) break :base_ptr_int field_ptr_int; @@ -5218,7 +5236,7 @@ const DeclGen = struct { ) !IdRef { const result_ty_id = try self.resolveType(result_ptr_ty, .direct); - const zcu = self.module; + const zcu = self.pt.zcu; const object_ty = object_ptr_ty.childType(zcu); switch (object_ty.zigTypeTag(zcu)) { .Pointer => { @@ -5312,7 +5330,7 @@ const DeclGen = struct { } fn airAlloc(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { - const mod = self.module; + const mod = self.pt.zcu; const ptr_ty = self.typeOfIndex(inst); assert(ptr_ty.ptrAddressSpace(mod) == .generic); const child_ty = ptr_ty.childType(mod); @@ -5486,9 +5504,10 @@ const DeclGen = struct { // of the block, then a label, and then generate the rest of the current // ir.Block in a different SPIR-V block. - const mod = self.module; + const pt = self.pt; + const mod = pt.zcu; const ty = self.typeOfIndex(inst); - const have_block_result = ty.isFnOrHasRuntimeBitsIgnoreComptime(mod); + const have_block_result = ty.isFnOrHasRuntimeBitsIgnoreComptime(pt); const cf = switch (self.control_flow) { .structured => |*cf| cf, @@ -5618,13 +5637,13 @@ const DeclGen = struct { } fn airBr(self: *DeclGen, inst: Air.Inst.Index) !void { - const mod = self.module; + const pt = self.pt; const br = self.air.instructions.items(.data)[@intFromEnum(inst)].br; const operand_ty = self.typeOf(br.operand); switch (self.control_flow) { .structured => |*cf| { - if (operand_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) { + if (operand_ty.isFnOrHasRuntimeBitsIgnoreComptime(pt)) { const operand_id = try self.resolve(br.operand); const block_result_var_id = cf.block_results.get(br.block_inst).?; try self.store(operand_ty, block_result_var_id, operand_id, .{}); @@ -5635,7 +5654,7 @@ const DeclGen = struct { }, .unstructured => |cf| { const block = cf.blocks.get(br.block_inst).?; - if (operand_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) { + if (operand_ty.isFnOrHasRuntimeBitsIgnoreComptime(pt)) { const operand_id = try self.resolve(br.operand); // current_block_label should not be undefined here, lest there // is a br or br_void in the function's body. @@ -5762,7 +5781,7 @@ const DeclGen = struct { } fn airLoad(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { - const mod = self.module; + const mod = self.pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const ptr_ty = self.typeOf(ty_op.operand); const elem_ty = self.typeOfIndex(inst); @@ -5773,20 +5792,22 @@ const DeclGen = struct { } fn airStore(self: *DeclGen, inst: Air.Inst.Index) !void { + const mod = self.pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const ptr_ty = self.typeOf(bin_op.lhs); - const elem_ty = ptr_ty.childType(self.module); + const elem_ty = ptr_ty.childType(mod); const ptr = try self.resolve(bin_op.lhs); const value = try self.resolve(bin_op.rhs); - try self.store(elem_ty, ptr, value, .{ .is_volatile = ptr_ty.isVolatilePtr(self.module) }); + try self.store(elem_ty, ptr, value, .{ .is_volatile = ptr_ty.isVolatilePtr(mod) }); } fn airRet(self: *DeclGen, inst: Air.Inst.Index) !void { + const pt = self.pt; + const mod = pt.zcu; const operand = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; const ret_ty = self.typeOf(operand); - const mod = self.module; - if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (!ret_ty.hasRuntimeBitsIgnoreComptime(pt)) { const decl = mod.declPtr(self.decl_index); const fn_info = mod.typeToFunc(decl.typeOf(mod)).?; if (Type.fromInterned(fn_info.return_type).isError(mod)) { @@ -5805,12 +5826,13 @@ const DeclGen = struct { } fn airRetLoad(self: *DeclGen, inst: Air.Inst.Index) !void { - const mod = self.module; + const pt = self.pt; + const mod = pt.zcu; const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; const ptr_ty = self.typeOf(un_op); const ret_ty = ptr_ty.childType(mod); - if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (!ret_ty.hasRuntimeBitsIgnoreComptime(pt)) { const decl = mod.declPtr(self.decl_index); const fn_info = mod.typeToFunc(decl.typeOf(mod)).?; if (Type.fromInterned(fn_info.return_type).isError(mod)) { @@ -5832,7 +5854,7 @@ const DeclGen = struct { } fn airTry(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { - const mod = self.module; + const mod = self.pt.zcu; const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; const err_union_id = try self.resolve(pl_op.operand); const extra = self.air.extraData(Air.Try, pl_op.payload); @@ -5902,7 +5924,7 @@ const DeclGen = struct { } fn airErrUnionErr(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { - const mod = self.module; + const mod = self.pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const operand_id = try self.resolve(ty_op.operand); const err_union_ty = self.typeOf(ty_op.operand); @@ -5938,7 +5960,7 @@ const DeclGen = struct { } fn airWrapErrUnionErr(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { - const mod = self.module; + const mod = self.pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const err_union_ty = self.typeOfIndex(inst); const payload_ty = err_union_ty.errorUnionPayload(mod); @@ -5985,7 +6007,8 @@ const DeclGen = struct { } fn airIsNull(self: *DeclGen, inst: Air.Inst.Index, is_pointer: bool, pred: enum { is_null, is_non_null }) !?IdRef { - const mod = self.module; + const pt = self.pt; + const mod = pt.zcu; const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; const operand_id = try self.resolve(un_op); const operand_ty = self.typeOf(un_op); @@ -6026,7 +6049,7 @@ const DeclGen = struct { const is_non_null_id = blk: { if (is_pointer) { - if (payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (payload_ty.hasRuntimeBitsIgnoreComptime(pt)) { const storage_class = self.spvStorageClass(operand_ty.ptrAddressSpace(mod)); const bool_ptr_ty_id = try self.ptrType(Type.bool, storage_class); const tag_ptr_id = try self.accessChain(bool_ptr_ty_id, operand_id, &.{1}); @@ -6036,7 +6059,7 @@ const DeclGen = struct { break :blk try self.load(Type.bool, operand_id, .{}); } - break :blk if (payload_ty.hasRuntimeBitsIgnoreComptime(mod)) + break :blk if (payload_ty.hasRuntimeBitsIgnoreComptime(pt)) try self.extractField(Type.bool, operand_id, 1) else // Optional representation is bool indicating whether the optional is set @@ -6061,7 +6084,7 @@ const DeclGen = struct { } fn airIsErr(self: *DeclGen, inst: Air.Inst.Index, pred: enum { is_err, is_non_err }) !?IdRef { - const mod = self.module; + const mod = self.pt.zcu; const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; const operand_id = try self.resolve(un_op); const err_union_ty = self.typeOf(un_op); @@ -6094,13 +6117,14 @@ const DeclGen = struct { } fn airUnwrapOptional(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { - const mod = self.module; + const pt = self.pt; + const mod = pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const operand_id = try self.resolve(ty_op.operand); const optional_ty = self.typeOf(ty_op.operand); const payload_ty = self.typeOfIndex(inst); - if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) return null; + if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) return null; if (optional_ty.optionalReprIsPayload(mod)) { return operand_id; @@ -6110,7 +6134,8 @@ const DeclGen = struct { } fn airUnwrapOptionalPtr(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { - const mod = self.module; + const pt = self.pt; + const mod = pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const operand_id = try self.resolve(ty_op.operand); const operand_ty = self.typeOf(ty_op.operand); @@ -6119,7 +6144,7 @@ const DeclGen = struct { const result_ty = self.typeOfIndex(inst); const result_ty_id = try self.resolveType(result_ty, .direct); - if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) { // There is no payload, but we still need to return a valid pointer. // We can just return anything here, so just return a pointer to the operand. return try self.bitCast(result_ty, operand_ty, operand_id); @@ -6134,11 +6159,12 @@ const DeclGen = struct { } fn airWrapOptional(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { - const mod = self.module; + const pt = self.pt; + const mod = pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const payload_ty = self.typeOf(ty_op.operand); - if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) { return try self.constBool(true, .indirect); } @@ -6156,7 +6182,8 @@ const DeclGen = struct { } fn airSwitchBr(self: *DeclGen, inst: Air.Inst.Index) !void { - const mod = self.module; + const pt = self.pt; + const mod = pt.zcu; const target = self.getTarget(); const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; const cond_ty = self.typeOf(pl_op.operand); @@ -6240,15 +6267,15 @@ const DeclGen = struct { const label = case_labels.at(case_i); for (items) |item| { - const value = (try self.air.value(item, mod)) orelse unreachable; + const value = (try self.air.value(item, pt)) orelse unreachable; const int_val: u64 = switch (cond_ty.zigTypeTag(mod)) { - .Bool, .Int => if (cond_ty.isSignedInt(mod)) @bitCast(value.toSignedInt(mod)) else value.toUnsignedInt(mod), + .Bool, .Int => if (cond_ty.isSignedInt(mod)) @bitCast(value.toSignedInt(pt)) else value.toUnsignedInt(pt), .Enum => blk: { // TODO: figure out of cond_ty is correct (something with enum literals) - break :blk (try value.intFromEnum(cond_ty, mod)).toUnsignedInt(mod); // TODO: composite integer constants + break :blk (try value.intFromEnum(cond_ty, pt)).toUnsignedInt(pt); // TODO: composite integer constants }, .ErrorSet => value.getErrorInt(mod), - .Pointer => value.toUnsignedInt(mod), + .Pointer => value.toUnsignedInt(pt), else => unreachable, }; const int_lit: spec.LiteralContextDependentNumber = switch (cond_words) { @@ -6328,8 +6355,9 @@ const DeclGen = struct { } fn airDbgStmt(self: *DeclGen, inst: Air.Inst.Index) !void { + const pt = self.pt; + const mod = pt.zcu; const dbg_stmt = self.air.instructions.items(.data)[@intFromEnum(inst)].dbg_stmt; - const mod = self.module; const decl = mod.declPtr(self.decl_index); const path = decl.getFileScope(mod).sub_file_path; try self.func.body.emit(self.spv.gpa, .OpLine, .{ @@ -6340,7 +6368,7 @@ const DeclGen = struct { } fn airDbgInlineBlock(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { - const mod = self.module; + const mod = self.pt.zcu; const inst_datas = self.air.instructions.items(.data); const extra = self.air.extraData(Air.DbgInlineBlock, inst_datas[@intFromEnum(inst)].ty_pl.payload); const decl = mod.funcOwnerDeclPtr(extra.data.func); @@ -6358,7 +6386,7 @@ const DeclGen = struct { } fn airAssembly(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { - const mod = self.module; + const mod = self.pt.zcu; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = self.air.extraData(Air.Asm, ty_pl.payload); @@ -6440,20 +6468,20 @@ const DeclGen = struct { // TODO: Translate proper error locations. assert(as.errors.items.len != 0); assert(self.error_msg == null); - const src_loc = self.module.declPtr(self.decl_index).navSrcLoc(mod); - self.error_msg = try Module.ErrorMsg.create(self.module.gpa, src_loc, "failed to assemble SPIR-V inline assembly", .{}); - const notes = try self.module.gpa.alloc(Module.ErrorMsg, as.errors.items.len); + const src_loc = mod.declPtr(self.decl_index).navSrcLoc(mod); + self.error_msg = try Zcu.ErrorMsg.create(mod.gpa, src_loc, "failed to assemble SPIR-V inline assembly", .{}); + const notes = try mod.gpa.alloc(Zcu.ErrorMsg, as.errors.items.len); // Sub-scope to prevent `return error.CodegenFail` from running the errdefers. { - errdefer self.module.gpa.free(notes); + errdefer mod.gpa.free(notes); var i: usize = 0; errdefer for (notes[0..i]) |*note| { - note.deinit(self.module.gpa); + note.deinit(mod.gpa); }; while (i < as.errors.items.len) : (i += 1) { - notes[i] = try Module.ErrorMsg.init(self.module.gpa, src_loc, "{s}", .{as.errors.items[i].msg}); + notes[i] = try Zcu.ErrorMsg.init(mod.gpa, src_loc, "{s}", .{as.errors.items[i].msg}); } } self.error_msg.?.notes = notes; @@ -6489,7 +6517,8 @@ const DeclGen = struct { fn airCall(self: *DeclGen, inst: Air.Inst.Index, modifier: std.builtin.CallModifier) !?IdRef { _ = modifier; - const mod = self.module; + const pt = self.pt; + const mod = pt.zcu; const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; const extra = self.air.extraData(Air.Call, pl_op.payload); const args: []const Air.Inst.Ref = @ptrCast(self.air.extra[extra.end..][0..extra.data.args_len]); @@ -6515,7 +6544,7 @@ const DeclGen = struct { // before starting to emit OpFunctionCall instructions. Hence the // temporary params buffer. const arg_ty = self.typeOf(arg); - if (!arg_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; + if (!arg_ty.hasRuntimeBitsIgnoreComptime(pt)) continue; const arg_id = try self.resolve(arg); params[n_params] = arg_id; @@ -6533,7 +6562,7 @@ const DeclGen = struct { try self.func.body.emit(self.spv.gpa, .OpUnreachable, {}); } - if (self.liveness.isUnused(inst) or !Type.fromInterned(return_type).hasRuntimeBitsIgnoreComptime(mod)) { + if (self.liveness.isUnused(inst) or !Type.fromInterned(return_type).hasRuntimeBitsIgnoreComptime(pt)) { return null; } @@ -6541,11 +6570,10 @@ const DeclGen = struct { } fn builtin3D(self: *DeclGen, result_ty: Type, builtin: spec.BuiltIn, dimension: u32, out_of_range_value: anytype) !IdRef { - const mod = self.module; if (dimension >= 3) { return try self.constInt(result_ty, out_of_range_value, .direct); } - const vec_ty = try mod.vectorType(.{ + const vec_ty = try self.pt.vectorType(.{ .len = 3, .child = result_ty.toIntern(), }); @@ -6591,12 +6619,12 @@ const DeclGen = struct { } fn typeOf(self: *DeclGen, inst: Air.Inst.Ref) Type { - const mod = self.module; + const mod = self.pt.zcu; return self.air.typeOf(inst, &mod.intern_pool); } fn typeOfIndex(self: *DeclGen, inst: Air.Inst.Index) Type { - const mod = self.module; + const mod = self.pt.zcu; return self.air.typeOfIndex(inst, &mod.intern_pool); } }; diff --git a/src/crash_report.zig b/src/crash_report.zig index 453d5441d41d..653c7428dc1e 100644 --- a/src/crash_report.zig +++ b/src/crash_report.zig @@ -76,9 +76,9 @@ fn dumpStatusReport() !void { const stderr = io.getStdErr().writer(); const block: *Sema.Block = anal.block; - const mod = anal.sema.mod; + const zcu = anal.sema.pt.zcu; - const file, const src_base_node = Zcu.LazySrcLoc.resolveBaseNode(block.src_base_inst, mod); + const file, const src_base_node = Zcu.LazySrcLoc.resolveBaseNode(block.src_base_inst, zcu); try stderr.writeAll("Analyzing "); try writeFilePath(file, stderr); @@ -104,7 +104,7 @@ fn dumpStatusReport() !void { while (parent) |curr| { fba.reset(); try stderr.writeAll(" in "); - const cur_block_file, const cur_block_src_base_node = Zcu.LazySrcLoc.resolveBaseNode(curr.block.src_base_inst, mod); + const cur_block_file, const cur_block_src_base_node = Zcu.LazySrcLoc.resolveBaseNode(curr.block.src_base_inst, zcu); try writeFilePath(cur_block_file, stderr); try stderr.writeAll("\n > "); print_zir.renderSingleInstruction( diff --git a/src/link.zig b/src/link.zig index 298d81d80c43..db19a16d4d3d 100644 --- a/src/link.zig +++ b/src/link.zig @@ -15,8 +15,6 @@ const Compilation = @import("Compilation.zig"); const LibCInstallation = std.zig.LibCInstallation; const Liveness = @import("Liveness.zig"); const Zcu = @import("Zcu.zig"); -/// Deprecated. -const Module = Zcu; const InternPool = @import("InternPool.zig"); const Type = @import("Type.zig"); const Value = @import("Value.zig"); @@ -367,14 +365,14 @@ pub const File = struct { /// Called from within the CodeGen to lower a local variable instantion as an unnamed /// constant. Returns the symbol index of the lowered constant in the read-only section /// of the final binary. - pub fn lowerUnnamedConst(base: *File, val: Value, decl_index: InternPool.DeclIndex) UpdateDeclError!u32 { + pub fn lowerUnnamedConst(base: *File, pt: Zcu.PerThread, val: Value, decl_index: InternPool.DeclIndex) UpdateDeclError!u32 { if (build_options.only_c) @compileError("unreachable"); switch (base.tag) { .spirv => unreachable, .c => unreachable, .nvptx => unreachable, inline else => |t| { - return @as(*t.Type(), @fieldParentPtr("base", base)).lowerUnnamedConst(val, decl_index); + return @as(*t.Type(), @fieldParentPtr("base", base)).lowerUnnamedConst(pt, val, decl_index); }, } } @@ -399,13 +397,13 @@ pub const File = struct { } /// May be called before or after updateExports for any given Decl. - pub fn updateDecl(base: *File, module: *Module, decl_index: InternPool.DeclIndex) UpdateDeclError!void { - const decl = module.declPtr(decl_index); + pub fn updateDecl(base: *File, pt: Zcu.PerThread, decl_index: InternPool.DeclIndex) UpdateDeclError!void { + const decl = pt.zcu.declPtr(decl_index); assert(decl.has_tv); switch (base.tag) { inline else => |tag| { if (tag != .c and build_options.only_c) unreachable; - return @as(*tag.Type(), @fieldParentPtr("base", base)).updateDecl(module, decl_index); + return @as(*tag.Type(), @fieldParentPtr("base", base)).updateDecl(pt, decl_index); }, } } @@ -413,7 +411,7 @@ pub const File = struct { /// May be called before or after updateExports for any given Decl. pub fn updateFunc( base: *File, - module: *Module, + pt: Zcu.PerThread, func_index: InternPool.Index, air: Air, liveness: Liveness, @@ -421,12 +419,12 @@ pub const File = struct { switch (base.tag) { inline else => |tag| { if (tag != .c and build_options.only_c) unreachable; - return @as(*tag.Type(), @fieldParentPtr("base", base)).updateFunc(module, func_index, air, liveness); + return @as(*tag.Type(), @fieldParentPtr("base", base)).updateFunc(pt, func_index, air, liveness); }, } } - pub fn updateDeclLineNumber(base: *File, module: *Module, decl_index: InternPool.DeclIndex) UpdateDeclError!void { + pub fn updateDeclLineNumber(base: *File, module: *Zcu, decl_index: InternPool.DeclIndex) UpdateDeclError!void { const decl = module.declPtr(decl_index); assert(decl.has_tv); switch (base.tag) { @@ -537,7 +535,7 @@ pub const File = struct { /// Commit pending changes and write headers. Takes into account final output mode /// and `use_lld`, not only `effectiveOutputMode`. /// `arena` has the lifetime of the call to `Compilation.update`. - pub fn flush(base: *File, arena: Allocator, prog_node: std.Progress.Node) FlushError!void { + pub fn flush(base: *File, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) FlushError!void { if (build_options.only_c) { assert(base.tag == .c); return @as(*C, @fieldParentPtr("base", base)).flush(arena, prog_node); @@ -563,27 +561,27 @@ pub const File = struct { const output_mode = comp.config.output_mode; const link_mode = comp.config.link_mode; if (use_lld and output_mode == .Lib and link_mode == .static) { - return base.linkAsArchive(arena, prog_node); + return base.linkAsArchive(arena, tid, prog_node); } switch (base.tag) { inline else => |tag| { - return @as(*tag.Type(), @fieldParentPtr("base", base)).flush(arena, prog_node); + return @as(*tag.Type(), @fieldParentPtr("base", base)).flush(arena, tid, prog_node); }, } } /// Commit pending changes and write headers. Works based on `effectiveOutputMode` /// rather than final output mode. - pub fn flushModule(base: *File, arena: Allocator, prog_node: std.Progress.Node) FlushError!void { + pub fn flushModule(base: *File, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) FlushError!void { switch (base.tag) { inline else => |tag| { if (tag != .c and build_options.only_c) unreachable; - return @as(*tag.Type(), @fieldParentPtr("base", base)).flushModule(arena, prog_node); + return @as(*tag.Type(), @fieldParentPtr("base", base)).flushModule(arena, tid, prog_node); }, } } - /// Called when a Decl is deleted from the Module. + /// Called when a Decl is deleted from the Zcu. pub fn freeDecl(base: *File, decl_index: InternPool.DeclIndex) void { switch (base.tag) { inline else => |tag| { @@ -604,14 +602,14 @@ pub const File = struct { /// May be called before or after updateDecl for any given Decl. pub fn updateExports( base: *File, - module: *Module, - exported: Module.Exported, + pt: Zcu.PerThread, + exported: Zcu.Exported, export_indices: []const u32, ) UpdateExportsError!void { switch (base.tag) { inline else => |tag| { if (tag != .c and build_options.only_c) unreachable; - return @as(*tag.Type(), @fieldParentPtr("base", base)).updateExports(module, exported, export_indices); + return @as(*tag.Type(), @fieldParentPtr("base", base)).updateExports(pt, exported, export_indices); }, } } @@ -644,9 +642,10 @@ pub const File = struct { pub fn lowerAnonDecl( base: *File, + pt: Zcu.PerThread, decl_val: InternPool.Index, decl_align: InternPool.Alignment, - src_loc: Module.LazySrcLoc, + src_loc: Zcu.LazySrcLoc, ) !LowerResult { if (build_options.only_c) @compileError("unreachable"); switch (base.tag) { @@ -654,7 +653,7 @@ pub const File = struct { .spirv => unreachable, .nvptx => unreachable, inline else => |tag| { - return @as(*tag.Type(), @fieldParentPtr("base", base)).lowerAnonDecl(decl_val, decl_align, src_loc); + return @as(*tag.Type(), @fieldParentPtr("base", base)).lowerAnonDecl(pt, decl_val, decl_align, src_loc); }, } } @@ -689,7 +688,7 @@ pub const File = struct { } } - pub fn linkAsArchive(base: *File, arena: Allocator, prog_node: std.Progress.Node) FlushError!void { + pub fn linkAsArchive(base: *File, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) FlushError!void { const tracy = trace(@src()); defer tracy.end(); @@ -704,7 +703,7 @@ pub const File = struct { // If there is no Zig code to compile, then we should skip flushing the output file // because it will not be part of the linker line anyway. const zcu_obj_path: ?[]const u8 = if (opt_zcu != null) blk: { - try base.flushModule(arena, prog_node); + try base.flushModule(arena, tid, prog_node); const dirname = fs.path.dirname(full_out_path_z) orelse "."; break :blk try fs.path.join(arena, &.{ dirname, base.zcu_object_sub_path.? }); @@ -896,14 +895,14 @@ pub const File = struct { kind: Kind, ty: Type, - pub fn initDecl(kind: Kind, decl: ?InternPool.DeclIndex, mod: *Module) LazySymbol { + pub fn initDecl(kind: Kind, decl: ?InternPool.DeclIndex, mod: *Zcu) LazySymbol { return .{ .kind = kind, .ty = if (decl) |decl_index| mod.declPtr(decl_index).val.toType() else Type.anyerror }; } - pub fn getDecl(self: LazySymbol, mod: *Module) InternPool.OptionalDeclIndex { + pub fn getDecl(self: LazySymbol, mod: *Zcu) InternPool.OptionalDeclIndex { return InternPool.OptionalDeclIndex.init(self.ty.getOwnerDeclOrNull(mod)); } }; diff --git a/src/link/C.zig b/src/link/C.zig index 563604f7e09d..3db5952a4c60 100644 --- a/src/link/C.zig +++ b/src/link/C.zig @@ -186,13 +186,13 @@ pub fn freeDecl(self: *C, decl_index: InternPool.DeclIndex) void { pub fn updateFunc( self: *C, - zcu: *Zcu, + pt: Zcu.PerThread, func_index: InternPool.Index, air: Air, liveness: Liveness, ) !void { - const gpa = self.base.comp.gpa; - + const zcu = pt.zcu; + const gpa = zcu.gpa; const func = zcu.funcInfo(func_index); const decl_index = func.owner_decl; const decl = zcu.declPtr(decl_index); @@ -218,7 +218,7 @@ pub fn updateFunc( .object = .{ .dg = .{ .gpa = gpa, - .zcu = zcu, + .pt = pt, .mod = file_scope.mod, .error_msg = null, .pass = .{ .decl = decl_index }, @@ -263,7 +263,7 @@ pub fn updateFunc( gop.value_ptr.code = try self.addString(function.object.code.items); } -fn updateAnonDecl(self: *C, zcu: *Zcu, i: usize) !void { +fn updateAnonDecl(self: *C, pt: Zcu.PerThread, i: usize) !void { const gpa = self.base.comp.gpa; const anon_decl = self.anon_decls.keys()[i]; @@ -275,8 +275,8 @@ fn updateAnonDecl(self: *C, zcu: *Zcu, i: usize) !void { var object: codegen.Object = .{ .dg = .{ .gpa = gpa, - .zcu = zcu, - .mod = zcu.root_mod, + .pt = pt, + .mod = pt.zcu.root_mod, .error_msg = null, .pass = .{ .anon = anon_decl }, .is_naked_fn = false, @@ -319,12 +319,13 @@ fn updateAnonDecl(self: *C, zcu: *Zcu, i: usize) !void { }; } -pub fn updateDecl(self: *C, zcu: *Zcu, decl_index: InternPool.DeclIndex) !void { +pub fn updateDecl(self: *C, pt: Zcu.PerThread, decl_index: InternPool.DeclIndex) !void { const tracy = trace(@src()); defer tracy.end(); const gpa = self.base.comp.gpa; + const zcu = pt.zcu; const decl = zcu.declPtr(decl_index); const gop = try self.decl_table.getOrPut(gpa, decl_index); errdefer _ = self.decl_table.pop(); @@ -342,7 +343,7 @@ pub fn updateDecl(self: *C, zcu: *Zcu, decl_index: InternPool.DeclIndex) !void { var object: codegen.Object = .{ .dg = .{ .gpa = gpa, - .zcu = zcu, + .pt = pt, .mod = file_scope.mod, .error_msg = null, .pass = .{ .decl = decl_index }, @@ -390,8 +391,8 @@ pub fn updateDeclLineNumber(self: *C, zcu: *Zcu, decl_index: InternPool.DeclInde _ = decl_index; } -pub fn flush(self: *C, arena: Allocator, prog_node: std.Progress.Node) !void { - return self.flushModule(arena, prog_node); +pub fn flush(self: *C, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) !void { + return self.flushModule(arena, tid, prog_node); } fn abiDefines(self: *C, target: std.Target) !std.ArrayList(u8) { @@ -409,7 +410,7 @@ fn abiDefines(self: *C, target: std.Target) !std.ArrayList(u8) { return defines; } -pub fn flushModule(self: *C, arena: Allocator, prog_node: std.Progress.Node) !void { +pub fn flushModule(self: *C, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) !void { _ = arena; // Has the same lifetime as the call to Compilation.update. const tracy = trace(@src()); @@ -421,11 +422,12 @@ pub fn flushModule(self: *C, arena: Allocator, prog_node: std.Progress.Node) !vo const comp = self.base.comp; const gpa = comp.gpa; const zcu = self.base.comp.module.?; + const pt: Zcu.PerThread = .{ .zcu = zcu, .tid = tid }; { var i: usize = 0; while (i < self.anon_decls.count()) : (i += 1) { - try updateAnonDecl(self, zcu, i); + try updateAnonDecl(self, pt, i); } } @@ -463,7 +465,7 @@ pub fn flushModule(self: *C, arena: Allocator, prog_node: std.Progress.Node) !vo self.lazy_fwd_decl_buf.clearRetainingCapacity(); self.lazy_code_buf.clearRetainingCapacity(); try f.lazy_ctype_pool.init(gpa); - try self.flushErrDecls(zcu, &f.lazy_ctype_pool); + try self.flushErrDecls(pt, &f.lazy_ctype_pool); // Unlike other backends, the .c code we are emitting has order-dependent decls. // `CType`s, forward decls, and non-functions first. @@ -483,7 +485,7 @@ pub fn flushModule(self: *C, arena: Allocator, prog_node: std.Progress.Node) !vo } for (self.anon_decls.keys(), self.anon_decls.values()) |value, *decl_block| try self.flushDeclBlock( - zcu, + pt, zcu.root_mod, &f, decl_block, @@ -497,7 +499,7 @@ pub fn flushModule(self: *C, arena: Allocator, prog_node: std.Progress.Node) !vo const extern_name = if (decl.isExtern(zcu)) decl.name.toOptional() else .none; const mod = zcu.namespacePtr(decl.src_namespace).fileScope(zcu).mod; try self.flushDeclBlock( - zcu, + pt, mod, &f, decl_block, @@ -670,7 +672,7 @@ fn flushCTypes( } } -fn flushErrDecls(self: *C, zcu: *Zcu, ctype_pool: *codegen.CType.Pool) FlushDeclError!void { +fn flushErrDecls(self: *C, pt: Zcu.PerThread, ctype_pool: *codegen.CType.Pool) FlushDeclError!void { const gpa = self.base.comp.gpa; const fwd_decl = &self.lazy_fwd_decl_buf; @@ -679,8 +681,8 @@ fn flushErrDecls(self: *C, zcu: *Zcu, ctype_pool: *codegen.CType.Pool) FlushDecl var object = codegen.Object{ .dg = .{ .gpa = gpa, - .zcu = zcu, - .mod = zcu.root_mod, + .pt = pt, + .mod = pt.zcu.root_mod, .error_msg = null, .pass = .flush, .is_naked_fn = false, @@ -712,7 +714,7 @@ fn flushErrDecls(self: *C, zcu: *Zcu, ctype_pool: *codegen.CType.Pool) FlushDecl fn flushLazyFn( self: *C, - zcu: *Zcu, + pt: Zcu.PerThread, mod: *Module, ctype_pool: *codegen.CType.Pool, lazy_ctype_pool: *const codegen.CType.Pool, @@ -726,7 +728,7 @@ fn flushLazyFn( var object = codegen.Object{ .dg = .{ .gpa = gpa, - .zcu = zcu, + .pt = pt, .mod = mod, .error_msg = null, .pass = .flush, @@ -761,7 +763,7 @@ fn flushLazyFn( fn flushLazyFns( self: *C, - zcu: *Zcu, + pt: Zcu.PerThread, mod: *Module, f: *Flush, lazy_ctype_pool: *const codegen.CType.Pool, @@ -775,13 +777,13 @@ fn flushLazyFns( const gop = f.lazy_fns.getOrPutAssumeCapacity(entry.key_ptr.*); if (gop.found_existing) continue; gop.value_ptr.* = {}; - try self.flushLazyFn(zcu, mod, &f.lazy_ctype_pool, lazy_ctype_pool, entry); + try self.flushLazyFn(pt, mod, &f.lazy_ctype_pool, lazy_ctype_pool, entry); } } fn flushDeclBlock( self: *C, - zcu: *Zcu, + pt: Zcu.PerThread, mod: *Module, f: *Flush, decl_block: *const DeclBlock, @@ -790,7 +792,7 @@ fn flushDeclBlock( extern_name: InternPool.OptionalNullTerminatedString, ) FlushDeclError!void { const gpa = self.base.comp.gpa; - try self.flushLazyFns(zcu, mod, f, &decl_block.ctype_pool, decl_block.lazy_fns); + try self.flushLazyFns(pt, mod, f, &decl_block.ctype_pool, decl_block.lazy_fns); try f.all_buffers.ensureUnusedCapacity(gpa, 1); // avoid emitting extern decls that are already exported if (extern_name.unwrap()) |name| if (export_names.contains(name)) return; @@ -845,11 +847,12 @@ pub fn flushEmitH(zcu: *Zcu) !void { pub fn updateExports( self: *C, - zcu: *Zcu, + pt: Zcu.PerThread, exported: Zcu.Exported, export_indices: []const u32, ) !void { - const gpa = self.base.comp.gpa; + const zcu = pt.zcu; + const gpa = zcu.gpa; const mod, const pass: codegen.DeclGen.Pass, const decl_block, const exported_block = switch (exported) { .decl_index => |decl_index| .{ zcu.namespacePtr(zcu.declPtr(decl_index).src_namespace).fileScope(zcu).mod, @@ -869,7 +872,7 @@ pub fn updateExports( fwd_decl.clearRetainingCapacity(); var dg: codegen.DeclGen = .{ .gpa = gpa, - .zcu = zcu, + .pt = pt, .mod = mod, .error_msg = null, .pass = pass, diff --git a/src/link/Coff.zig b/src/link/Coff.zig index 55028fc8ad32..7ef5bde6e637 100644 --- a/src/link/Coff.zig +++ b/src/link/Coff.zig @@ -1120,16 +1120,17 @@ fn freeAtom(self: *Coff, atom_index: Atom.Index) void { self.getAtomPtr(atom_index).sym_index = 0; } -pub fn updateFunc(self: *Coff, mod: *Module, func_index: InternPool.Index, air: Air, liveness: Liveness) !void { +pub fn updateFunc(self: *Coff, pt: Zcu.PerThread, func_index: InternPool.Index, air: Air, liveness: Liveness) !void { if (build_options.skip_non_native and builtin.object_format != .coff) { @panic("Attempted to compile for object format that was disabled by build configuration"); } if (self.llvm_object) |llvm_object| { - return llvm_object.updateFunc(mod, func_index, air, liveness); + return llvm_object.updateFunc(pt, func_index, air, liveness); } const tracy = trace(@src()); defer tracy.end(); + const mod = pt.zcu; const func = mod.funcInfo(func_index); const decl_index = func.owner_decl; const decl = mod.declPtr(decl_index); @@ -1144,6 +1145,7 @@ pub fn updateFunc(self: *Coff, mod: *Module, func_index: InternPool.Index, air: const res = try codegen.generateFunction( &self.base, + pt, decl.navSrcLoc(mod), func_index, air, @@ -1160,14 +1162,14 @@ pub fn updateFunc(self: *Coff, mod: *Module, func_index: InternPool.Index, air: }, }; - try self.updateDeclCode(decl_index, code, .FUNCTION); + try self.updateDeclCode(pt, decl_index, code, .FUNCTION); // Exports will be updated by `Zcu.processExports` after the update. } -pub fn lowerUnnamedConst(self: *Coff, val: Value, decl_index: InternPool.DeclIndex) !u32 { - const gpa = self.base.comp.gpa; - const mod = self.base.comp.module.?; +pub fn lowerUnnamedConst(self: *Coff, pt: Zcu.PerThread, val: Value, decl_index: InternPool.DeclIndex) !u32 { + const mod = pt.zcu; + const gpa = mod.gpa; const decl = mod.declPtr(decl_index); const gop = try self.unnamed_const_atoms.getOrPut(gpa, decl_index); if (!gop.found_existing) { @@ -1179,7 +1181,7 @@ pub fn lowerUnnamedConst(self: *Coff, val: Value, decl_index: InternPool.DeclInd const sym_name = try std.fmt.allocPrint(gpa, "__unnamed_{}_{d}", .{ decl_name.fmt(&mod.intern_pool), index }); defer gpa.free(sym_name); const ty = val.typeOf(mod); - const atom_index = switch (try self.lowerConst(sym_name, val, ty.abiAlignment(mod), self.rdata_section_index.?, decl.navSrcLoc(mod))) { + const atom_index = switch (try self.lowerConst(pt, sym_name, val, ty.abiAlignment(pt), self.rdata_section_index.?, decl.navSrcLoc(mod))) { .ok => |atom_index| atom_index, .fail => |em| { decl.analysis = .codegen_failure; @@ -1197,7 +1199,15 @@ const LowerConstResult = union(enum) { fail: *Module.ErrorMsg, }; -fn lowerConst(self: *Coff, name: []const u8, val: Value, required_alignment: InternPool.Alignment, sect_id: u16, src_loc: Module.LazySrcLoc) !LowerConstResult { +fn lowerConst( + self: *Coff, + pt: Zcu.PerThread, + name: []const u8, + val: Value, + required_alignment: InternPool.Alignment, + sect_id: u16, + src_loc: Module.LazySrcLoc, +) !LowerConstResult { const gpa = self.base.comp.gpa; var code_buffer = std.ArrayList(u8).init(gpa); @@ -1208,7 +1218,7 @@ fn lowerConst(self: *Coff, name: []const u8, val: Value, required_alignment: Int try self.setSymbolName(sym, name); sym.section_number = @as(coff.SectionNumber, @enumFromInt(sect_id + 1)); - const res = try codegen.generateSymbol(&self.base, src_loc, val, &code_buffer, .none, .{ + const res = try codegen.generateSymbol(&self.base, pt, src_loc, val, &code_buffer, .none, .{ .parent_atom_index = self.getAtom(atom_index).getSymbolIndex().?, }); const code = switch (res) { @@ -1235,13 +1245,14 @@ fn lowerConst(self: *Coff, name: []const u8, val: Value, required_alignment: Int pub fn updateDecl( self: *Coff, - mod: *Module, + pt: Zcu.PerThread, decl_index: InternPool.DeclIndex, ) link.File.UpdateDeclError!void { + const mod = pt.zcu; if (build_options.skip_non_native and builtin.object_format != .coff) { @panic("Attempted to compile for object format that was disabled by build configuration"); } - if (self.llvm_object) |llvm_object| return llvm_object.updateDecl(mod, decl_index); + if (self.llvm_object) |llvm_object| return llvm_object.updateDecl(pt, decl_index); const tracy = trace(@src()); defer tracy.end(); @@ -1270,7 +1281,7 @@ pub fn updateDecl( defer code_buffer.deinit(); const decl_val = if (decl.val.getVariable(mod)) |variable| Value.fromInterned(variable.init) else decl.val; - const res = try codegen.generateSymbol(&self.base, decl.navSrcLoc(mod), decl_val, &code_buffer, .none, .{ + const res = try codegen.generateSymbol(&self.base, pt, decl.navSrcLoc(mod), decl_val, &code_buffer, .none, .{ .parent_atom_index = atom.getSymbolIndex().?, }); const code = switch (res) { @@ -1282,19 +1293,20 @@ pub fn updateDecl( }, }; - try self.updateDeclCode(decl_index, code, .NULL); + try self.updateDeclCode(pt, decl_index, code, .NULL); // Exports will be updated by `Zcu.processExports` after the update. } fn updateLazySymbolAtom( self: *Coff, + pt: Zcu.PerThread, sym: link.File.LazySymbol, atom_index: Atom.Index, section_index: u16, ) !void { - const gpa = self.base.comp.gpa; - const mod = self.base.comp.module.?; + const mod = pt.zcu; + const gpa = mod.gpa; var required_alignment: InternPool.Alignment = .none; var code_buffer = std.ArrayList(u8).init(gpa); @@ -1302,7 +1314,7 @@ fn updateLazySymbolAtom( const name = try std.fmt.allocPrint(gpa, "__lazy_{s}_{}", .{ @tagName(sym.kind), - sym.ty.fmt(mod), + sym.ty.fmt(pt), }); defer gpa.free(name); @@ -1312,6 +1324,7 @@ fn updateLazySymbolAtom( const src = sym.ty.srcLocOrNull(mod) orelse Module.LazySrcLoc.unneeded; const res = try codegen.generateLazySymbol( &self.base, + pt, src, sym, &required_alignment, @@ -1346,7 +1359,7 @@ fn updateLazySymbolAtom( try self.writeAtom(atom_index, code); } -pub fn getOrCreateAtomForLazySymbol(self: *Coff, sym: link.File.LazySymbol) !Atom.Index { +pub fn getOrCreateAtomForLazySymbol(self: *Coff, pt: Zcu.PerThread, sym: link.File.LazySymbol) !Atom.Index { const gpa = self.base.comp.gpa; const mod = self.base.comp.module.?; const gop = try self.lazy_syms.getOrPut(gpa, sym.getDecl(mod)); @@ -1364,7 +1377,7 @@ pub fn getOrCreateAtomForLazySymbol(self: *Coff, sym: link.File.LazySymbol) !Ato metadata.state.* = .pending_flush; const atom = metadata.atom.*; // anyerror needs to be deferred until flushModule - if (sym.getDecl(mod) != .none) try self.updateLazySymbolAtom(sym, atom, switch (sym.kind) { + if (sym.getDecl(mod) != .none) try self.updateLazySymbolAtom(pt, sym, atom, switch (sym.kind) { .code => self.text_section_index.?, .const_data => self.rdata_section_index.?, }); @@ -1410,14 +1423,14 @@ fn getDeclOutputSection(self: *Coff, decl_index: InternPool.DeclIndex) u16 { return index; } -fn updateDeclCode(self: *Coff, decl_index: InternPool.DeclIndex, code: []u8, complex_type: coff.ComplexType) !void { - const mod = self.base.comp.module.?; +fn updateDeclCode(self: *Coff, pt: Zcu.PerThread, decl_index: InternPool.DeclIndex, code: []u8, complex_type: coff.ComplexType) !void { + const mod = pt.zcu; const decl = mod.declPtr(decl_index); const decl_name = try decl.fullyQualifiedName(mod); log.debug("updateDeclCode {}{*}", .{ decl_name.fmt(&mod.intern_pool), decl }); - const required_alignment: u32 = @intCast(decl.getAlignment(mod).toByteUnits() orelse 0); + const required_alignment: u32 = @intCast(decl.getAlignment(pt).toByteUnits() orelse 0); const decl_metadata = self.decls.get(decl_index).?; const atom_index = decl_metadata.atom; @@ -1496,7 +1509,7 @@ pub fn freeDecl(self: *Coff, decl_index: InternPool.DeclIndex) void { pub fn updateExports( self: *Coff, - mod: *Module, + pt: Zcu.PerThread, exported: Module.Exported, export_indices: []const u32, ) link.File.UpdateExportsError!void { @@ -1504,6 +1517,7 @@ pub fn updateExports( @panic("Attempted to compile for object format that was disabled by build configuration"); } + const mod = pt.zcu; const ip = &mod.intern_pool; const comp = self.base.comp; const target = comp.root_mod.resolved_target.result; @@ -1542,7 +1556,7 @@ pub fn updateExports( } } - if (self.llvm_object) |llvm_object| return llvm_object.updateExports(mod, exported, export_indices); + if (self.llvm_object) |llvm_object| return llvm_object.updateExports(pt, exported, export_indices); const gpa = comp.gpa; @@ -1553,7 +1567,7 @@ pub fn updateExports( }, .value => |value| self.anon_decls.getPtr(value) orelse blk: { const first_exp = mod.all_exports.items[export_indices[0]]; - const res = try self.lowerAnonDecl(value, .none, first_exp.src); + const res = try self.lowerAnonDecl(pt, value, .none, first_exp.src); switch (res) { .ok => {}, .fail => |em| { @@ -1696,19 +1710,19 @@ fn resolveGlobalSymbol(self: *Coff, current: SymbolWithLoc) !void { gop.value_ptr.* = current; } -pub fn flush(self: *Coff, arena: Allocator, prog_node: std.Progress.Node) link.File.FlushError!void { +pub fn flush(self: *Coff, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) link.File.FlushError!void { const comp = self.base.comp; const use_lld = build_options.have_llvm and comp.config.use_lld; if (use_lld) { - return lld.linkWithLLD(self, arena, prog_node); + return lld.linkWithLLD(self, arena, tid, prog_node); } switch (comp.config.output_mode) { - .Exe, .Obj => return self.flushModule(arena, prog_node), + .Exe, .Obj => return self.flushModule(arena, tid, prog_node), .Lib => return error.TODOImplementWritingLibFiles, } } -pub fn flushModule(self: *Coff, arena: Allocator, prog_node: std.Progress.Node) link.File.FlushError!void { +pub fn flushModule(self: *Coff, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) link.File.FlushError!void { const tracy = trace(@src()); defer tracy.end(); @@ -1723,13 +1737,17 @@ pub fn flushModule(self: *Coff, arena: Allocator, prog_node: std.Progress.Node) const sub_prog_node = prog_node.start("COFF Flush", 0); defer sub_prog_node.end(); - const module = comp.module orelse return error.LinkingWithoutZigSourceUnimplemented; + const pt: Zcu.PerThread = .{ + .zcu = comp.module orelse return error.LinkingWithoutZigSourceUnimplemented, + .tid = tid, + }; if (self.lazy_syms.getPtr(.none)) |metadata| { // Most lazy symbols can be updated on first use, but // anyerror needs to wait for everything to be flushed. if (metadata.text_state != .unused) self.updateLazySymbolAtom( - link.File.LazySymbol.initDecl(.code, null, module), + pt, + link.File.LazySymbol.initDecl(.code, null, pt.zcu), metadata.text_atom, self.text_section_index.?, ) catch |err| return switch (err) { @@ -1737,7 +1755,8 @@ pub fn flushModule(self: *Coff, arena: Allocator, prog_node: std.Progress.Node) else => |e| e, }; if (metadata.rdata_state != .unused) self.updateLazySymbolAtom( - link.File.LazySymbol.initDecl(.const_data, null, module), + pt, + link.File.LazySymbol.initDecl(.const_data, null, pt.zcu), metadata.rdata_atom, self.rdata_section_index.?, ) catch |err| return switch (err) { @@ -1858,6 +1877,7 @@ pub fn getDeclVAddr(self: *Coff, decl_index: InternPool.DeclIndex, reloc_info: l pub fn lowerAnonDecl( self: *Coff, + pt: Zcu.PerThread, decl_val: InternPool.Index, explicit_alignment: InternPool.Alignment, src_loc: Module.LazySrcLoc, @@ -1866,7 +1886,7 @@ pub fn lowerAnonDecl( const mod = self.base.comp.module.?; const ty = Type.fromInterned(mod.intern_pool.typeOf(decl_val)); const decl_alignment = switch (explicit_alignment) { - .none => ty.abiAlignment(mod), + .none => ty.abiAlignment(pt), else => explicit_alignment, }; if (self.anon_decls.get(decl_val)) |metadata| { @@ -1881,6 +1901,7 @@ pub fn lowerAnonDecl( @intFromEnum(decl_val), }) catch unreachable; const res = self.lowerConst( + pt, name, val, decl_alignment, diff --git a/src/link/Coff/lld.zig b/src/link/Coff/lld.zig index c2620c1fe930..4ec84583673b 100644 --- a/src/link/Coff/lld.zig +++ b/src/link/Coff/lld.zig @@ -15,8 +15,9 @@ const Allocator = mem.Allocator; const Coff = @import("../Coff.zig"); const Compilation = @import("../../Compilation.zig"); +const Zcu = @import("../../Zcu.zig"); -pub fn linkWithLLD(self: *Coff, arena: Allocator, prog_node: std.Progress.Node) !void { +pub fn linkWithLLD(self: *Coff, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) !void { const tracy = trace(@src()); defer tracy.end(); @@ -29,7 +30,7 @@ pub fn linkWithLLD(self: *Coff, arena: Allocator, prog_node: std.Progress.Node) // If there is no Zig code to compile, then we should skip flushing the output file because it // will not be part of the linker line anyway. const module_obj_path: ?[]const u8 = if (comp.module != null) blk: { - try self.flushModule(arena, prog_node); + try self.flushModule(arena, tid, prog_node); if (fs.path.dirname(full_out_path)) |dirname| { break :blk try fs.path.join(arena, &.{ dirname, self.base.zcu_object_sub_path.? }); diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig index 40cbfd28a89b..80c88666bcf8 100644 --- a/src/link/Dwarf.zig +++ b/src/link/Dwarf.zig @@ -31,7 +31,7 @@ strtab: StringTable = .{}, /// They will end up in the DWARF debug_line header as two lists: /// * []include_directory /// * []file_names -di_files: std.AutoArrayHashMapUnmanaged(*const Module.File, void) = .{}, +di_files: std.AutoArrayHashMapUnmanaged(*const Zcu.File, void) = .{}, global_abbrev_relocs: std.ArrayListUnmanaged(AbbrevRelocation) = .{}, @@ -67,7 +67,7 @@ const DbgLineHeader = struct { /// Decl's inner Atom is assigned an offset within the DWARF section. pub const DeclState = struct { dwarf: *Dwarf, - mod: *Module, + pt: Zcu.PerThread, di_atom_decls: *const AtomTable, dbg_line_func: InternPool.Index, dbg_line: std.ArrayList(u8), @@ -113,7 +113,7 @@ pub const DeclState = struct { .type = ty, .offset = undefined, }); - log.debug("%{d}: {}", .{ sym_index, ty.fmt(self.mod) }); + log.debug("%{d}: {}", .{ sym_index, ty.fmt(self.pt) }); try self.abbrev_resolver.putNoClobber(gpa, ty.toIntern(), sym_index); break :blk sym_index; }; @@ -128,16 +128,17 @@ pub const DeclState = struct { fn addDbgInfoType( self: *DeclState, - mod: *Module, + pt: Zcu.PerThread, atom_index: Atom.Index, ty: Type, ) error{OutOfMemory}!void { + const zcu = pt.zcu; const dbg_info_buffer = &self.dbg_info; - const target = mod.getTarget(); + const target = zcu.getTarget(); const target_endian = target.cpu.arch.endian(); - const ip = &mod.intern_pool; + const ip = &zcu.intern_pool; - switch (ty.zigTypeTag(mod)) { + switch (ty.zigTypeTag(zcu)) { .NoReturn => unreachable, .Void => { try dbg_info_buffer.append(@intFromEnum(AbbrevCode.zero_bit_type)); @@ -148,12 +149,12 @@ pub const DeclState = struct { // DW.AT.encoding, DW.FORM.data1 dbg_info_buffer.appendAssumeCapacity(DW.ATE.boolean); // DW.AT.byte_size, DW.FORM.udata - try leb128.writeUleb128(dbg_info_buffer.writer(), ty.abiSize(mod)); + try leb128.writeUleb128(dbg_info_buffer.writer(), ty.abiSize(pt)); // DW.AT.name, DW.FORM.string - try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(mod)}); + try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(pt)}); }, .Int => { - const info = ty.intInfo(mod); + const info = ty.intInfo(zcu); try dbg_info_buffer.ensureUnusedCapacity(12); dbg_info_buffer.appendAssumeCapacity(@intFromEnum(AbbrevCode.base_type)); // DW.AT.encoding, DW.FORM.data1 @@ -162,30 +163,30 @@ pub const DeclState = struct { .unsigned => DW.ATE.unsigned, }); // DW.AT.byte_size, DW.FORM.udata - try leb128.writeUleb128(dbg_info_buffer.writer(), ty.abiSize(mod)); + try leb128.writeUleb128(dbg_info_buffer.writer(), ty.abiSize(pt)); // DW.AT.name, DW.FORM.string - try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(mod)}); + try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(pt)}); }, .Optional => { - if (ty.isPtrLikeOptional(mod)) { + if (ty.isPtrLikeOptional(zcu)) { try dbg_info_buffer.ensureUnusedCapacity(12); dbg_info_buffer.appendAssumeCapacity(@intFromEnum(AbbrevCode.base_type)); // DW.AT.encoding, DW.FORM.data1 dbg_info_buffer.appendAssumeCapacity(DW.ATE.address); // DW.AT.byte_size, DW.FORM.udata - try leb128.writeUleb128(dbg_info_buffer.writer(), ty.abiSize(mod)); + try leb128.writeUleb128(dbg_info_buffer.writer(), ty.abiSize(pt)); // DW.AT.name, DW.FORM.string - try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(mod)}); + try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(pt)}); } else { // Non-pointer optionals are structs: struct { .maybe = *, .val = * } - const payload_ty = ty.optionalChild(mod); + const payload_ty = ty.optionalChild(zcu); // DW.AT.structure_type try dbg_info_buffer.append(@intFromEnum(AbbrevCode.struct_type)); // DW.AT.byte_size, DW.FORM.udata - const abi_size = ty.abiSize(mod); + const abi_size = ty.abiSize(pt); try leb128.writeUleb128(dbg_info_buffer.writer(), abi_size); // DW.AT.name, DW.FORM.string - try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(mod)}); + try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(pt)}); // DW.AT.member try dbg_info_buffer.ensureUnusedCapacity(21); dbg_info_buffer.appendAssumeCapacity(@intFromEnum(AbbrevCode.struct_member)); @@ -208,14 +209,14 @@ pub const DeclState = struct { dbg_info_buffer.appendNTimesAssumeCapacity(0, 4); try self.addTypeRelocGlobal(atom_index, payload_ty, @intCast(index)); // DW.AT.data_member_location, DW.FORM.udata - const offset = abi_size - payload_ty.abiSize(mod); + const offset = abi_size - payload_ty.abiSize(pt); try leb128.writeUleb128(dbg_info_buffer.writer(), offset); // DW.AT.structure_type delimit children try dbg_info_buffer.append(0); } }, .Pointer => { - if (ty.isSlice(mod)) { + if (ty.isSlice(zcu)) { // Slices are structs: struct { .ptr = *, .len = N } const ptr_bits = target.ptrBitWidth(); const ptr_bytes: u8 = @intCast(@divExact(ptr_bits, 8)); @@ -223,9 +224,9 @@ pub const DeclState = struct { try dbg_info_buffer.ensureUnusedCapacity(2); dbg_info_buffer.appendAssumeCapacity(@intFromEnum(AbbrevCode.struct_type)); // DW.AT.byte_size, DW.FORM.udata - try leb128.writeUleb128(dbg_info_buffer.writer(), ty.abiSize(mod)); + try leb128.writeUleb128(dbg_info_buffer.writer(), ty.abiSize(pt)); // DW.AT.name, DW.FORM.string - try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(mod)}); + try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(pt)}); // DW.AT.member try dbg_info_buffer.ensureUnusedCapacity(21); dbg_info_buffer.appendAssumeCapacity(@intFromEnum(AbbrevCode.struct_member)); @@ -235,7 +236,7 @@ pub const DeclState = struct { // DW.AT.type, DW.FORM.ref4 var index = dbg_info_buffer.items.len; dbg_info_buffer.appendNTimesAssumeCapacity(0, 4); - const ptr_ty = ty.slicePtrFieldType(mod); + const ptr_ty = ty.slicePtrFieldType(zcu); try self.addTypeRelocGlobal(atom_index, ptr_ty, @intCast(index)); // DW.AT.data_member_location, DW.FORM.udata dbg_info_buffer.appendAssumeCapacity(0); @@ -258,19 +259,19 @@ pub const DeclState = struct { // DW.AT.type, DW.FORM.ref4 const index = dbg_info_buffer.items.len; dbg_info_buffer.appendNTimesAssumeCapacity(0, 4); - try self.addTypeRelocGlobal(atom_index, ty.childType(mod), @intCast(index)); + try self.addTypeRelocGlobal(atom_index, ty.childType(zcu), @intCast(index)); } }, .Array => { // DW.AT.array_type try dbg_info_buffer.append(@intFromEnum(AbbrevCode.array_type)); // DW.AT.name, DW.FORM.string - try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(mod)}); + try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(pt)}); // DW.AT.type, DW.FORM.ref4 var index = dbg_info_buffer.items.len; try dbg_info_buffer.ensureUnusedCapacity(9); dbg_info_buffer.appendNTimesAssumeCapacity(0, 4); - try self.addTypeRelocGlobal(atom_index, ty.childType(mod), @intCast(index)); + try self.addTypeRelocGlobal(atom_index, ty.childType(zcu), @intCast(index)); // DW.AT.subrange_type dbg_info_buffer.appendAssumeCapacity(@intFromEnum(AbbrevCode.array_dim)); // DW.AT.type, DW.FORM.ref4 @@ -278,7 +279,7 @@ pub const DeclState = struct { dbg_info_buffer.appendNTimesAssumeCapacity(0, 4); try self.addTypeRelocGlobal(atom_index, Type.usize, @intCast(index)); // DW.AT.count, DW.FORM.udata - const len = ty.arrayLenIncludingSentinel(mod); + const len = ty.arrayLenIncludingSentinel(pt.zcu); try leb128.writeUleb128(dbg_info_buffer.writer(), len); // DW.AT.array_type delimit children try dbg_info_buffer.append(0); @@ -287,13 +288,13 @@ pub const DeclState = struct { // DW.AT.structure_type try dbg_info_buffer.append(@intFromEnum(AbbrevCode.struct_type)); // DW.AT.byte_size, DW.FORM.udata - try leb128.writeUleb128(dbg_info_buffer.writer(), ty.abiSize(mod)); + try leb128.writeUleb128(dbg_info_buffer.writer(), ty.abiSize(pt)); blk: { switch (ip.indexToKey(ty.ip_index)) { .anon_struct_type => |fields| { // DW.AT.name, DW.FORM.string - try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(mod)}); + try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(pt)}); for (fields.types.get(ip), 0..) |field_ty, field_index| { // DW.AT.member @@ -305,14 +306,14 @@ pub const DeclState = struct { try dbg_info_buffer.appendNTimes(0, 4); try self.addTypeRelocGlobal(atom_index, Type.fromInterned(field_ty), @intCast(index)); // DW.AT.data_member_location, DW.FORM.udata - const field_off = ty.structFieldOffset(field_index, mod); + const field_off = ty.structFieldOffset(field_index, pt); try leb128.writeUleb128(dbg_info_buffer.writer(), field_off); } }, .struct_type => { const struct_type = ip.loadStructType(ty.toIntern()); // DW.AT.name, DW.FORM.string - try ty.print(dbg_info_buffer.writer(), mod); + try ty.print(dbg_info_buffer.writer(), pt); try dbg_info_buffer.append(0); if (struct_type.layout == .@"packed") { @@ -322,7 +323,7 @@ pub const DeclState = struct { if (struct_type.isTuple(ip)) { for (struct_type.field_types.get(ip), struct_type.offsets.get(ip), 0..) |field_ty, field_off, field_index| { - if (!Type.fromInterned(field_ty).hasRuntimeBits(mod)) continue; + if (!Type.fromInterned(field_ty).hasRuntimeBits(pt)) continue; // DW.AT.member try dbg_info_buffer.append(@intFromEnum(AbbrevCode.struct_member)); // DW.AT.name, DW.FORM.string @@ -340,7 +341,7 @@ pub const DeclState = struct { struct_type.field_types.get(ip), struct_type.offsets.get(ip), ) |field_name, field_ty, field_off| { - if (!Type.fromInterned(field_ty).hasRuntimeBits(mod)) continue; + if (!Type.fromInterned(field_ty).hasRuntimeBits(pt)) continue; const field_name_slice = field_name.toSlice(ip); // DW.AT.member try dbg_info_buffer.ensureUnusedCapacity(field_name_slice.len + 2); @@ -367,9 +368,9 @@ pub const DeclState = struct { // DW.AT.enumeration_type try dbg_info_buffer.append(@intFromEnum(AbbrevCode.enum_type)); // DW.AT.byte_size, DW.FORM.udata - try leb128.writeUleb128(dbg_info_buffer.writer(), ty.abiSize(mod)); + try leb128.writeUleb128(dbg_info_buffer.writer(), ty.abiSize(pt)); // DW.AT.name, DW.FORM.string - try ty.print(dbg_info_buffer.writer(), mod); + try ty.print(dbg_info_buffer.writer(), pt); try dbg_info_buffer.append(0); const enum_type = ip.loadEnumType(ty.ip_index); @@ -386,8 +387,8 @@ pub const DeclState = struct { const value = enum_type.values.get(ip)[field_i]; // TODO do not assume a 64bit enum value - could be bigger. // See https://github.com/ziglang/zig/issues/645 - const field_int_val = try Value.fromInterned(value).intFromEnum(ty, mod); - break :value @bitCast(field_int_val.toSignedInt(mod)); + const field_int_val = try Value.fromInterned(value).intFromEnum(ty, pt); + break :value @bitCast(field_int_val.toSignedInt(pt)); }; mem.writeInt(u64, dbg_info_buffer.addManyAsArrayAssumeCapacity(8), value, target_endian); } @@ -396,8 +397,8 @@ pub const DeclState = struct { try dbg_info_buffer.append(0); }, .Union => { - const union_obj = mod.typeToUnion(ty).?; - const layout = mod.getUnionLayout(union_obj); + const union_obj = zcu.typeToUnion(ty).?; + const layout = pt.getUnionLayout(union_obj); const payload_offset = if (layout.tag_align.compare(.gte, layout.payload_align)) layout.tag_size else 0; const tag_offset = if (layout.tag_align.compare(.gte, layout.payload_align)) 0 else layout.payload_size; // TODO this is temporary to match current state of unions in Zig - we don't yet have @@ -410,7 +411,7 @@ pub const DeclState = struct { // DW.AT.byte_size, DW.FORM.udata try leb128.writeUleb128(dbg_info_buffer.writer(), layout.abi_size); // DW.AT.name, DW.FORM.string - try ty.print(dbg_info_buffer.writer(), mod); + try ty.print(dbg_info_buffer.writer(), pt); try dbg_info_buffer.append(0); // DW.AT.member @@ -435,12 +436,12 @@ pub const DeclState = struct { if (is_tagged) { try dbg_info_buffer.writer().print("AnonUnion\x00", .{}); } else { - try ty.print(dbg_info_buffer.writer(), mod); + try ty.print(dbg_info_buffer.writer(), pt); try dbg_info_buffer.append(0); } for (union_obj.field_types.get(ip), union_obj.loadTagType(ip).names.get(ip)) |field_ty, field_name| { - if (!Type.fromInterned(field_ty).hasRuntimeBits(mod)) continue; + if (!Type.fromInterned(field_ty).hasRuntimeBits(pt)) continue; const field_name_slice = field_name.toSlice(ip); // DW.AT.member try dbg_info_buffer.append(@intFromEnum(AbbrevCode.struct_member)); @@ -474,25 +475,25 @@ pub const DeclState = struct { try dbg_info_buffer.append(0); } }, - .ErrorSet => try addDbgInfoErrorSet(mod, ty, target, &self.dbg_info), + .ErrorSet => try addDbgInfoErrorSet(pt, ty, target, &self.dbg_info), .ErrorUnion => { - const error_ty = ty.errorUnionSet(mod); - const payload_ty = ty.errorUnionPayload(mod); - const payload_align = if (payload_ty.isNoReturn(mod)) .none else payload_ty.abiAlignment(mod); - const error_align = Type.anyerror.abiAlignment(mod); - const abi_size = ty.abiSize(mod); - const payload_off = if (error_align.compare(.gte, payload_align)) Type.anyerror.abiSize(mod) else 0; - const error_off = if (error_align.compare(.gte, payload_align)) 0 else payload_ty.abiSize(mod); + const error_ty = ty.errorUnionSet(zcu); + const payload_ty = ty.errorUnionPayload(zcu); + const payload_align = if (payload_ty.isNoReturn(zcu)) .none else payload_ty.abiAlignment(pt); + const error_align = Type.anyerror.abiAlignment(pt); + const abi_size = ty.abiSize(pt); + const payload_off = if (error_align.compare(.gte, payload_align)) Type.anyerror.abiSize(pt) else 0; + const error_off = if (error_align.compare(.gte, payload_align)) 0 else payload_ty.abiSize(pt); // DW.AT.structure_type try dbg_info_buffer.append(@intFromEnum(AbbrevCode.struct_type)); // DW.AT.byte_size, DW.FORM.udata try leb128.writeUleb128(dbg_info_buffer.writer(), abi_size); // DW.AT.name, DW.FORM.string - try ty.print(dbg_info_buffer.writer(), mod); + try ty.print(dbg_info_buffer.writer(), pt); try dbg_info_buffer.append(0); - if (!payload_ty.isNoReturn(mod)) { + if (!payload_ty.isNoReturn(zcu)) { // DW.AT.member try dbg_info_buffer.ensureUnusedCapacity(11); dbg_info_buffer.appendAssumeCapacity(@intFromEnum(AbbrevCode.struct_member)); @@ -526,7 +527,7 @@ pub const DeclState = struct { try dbg_info_buffer.append(0); }, else => { - log.debug("TODO implement .debug_info for type '{}'", .{ty.fmt(self.mod)}); + log.debug("TODO implement .debug_info for type '{}'", .{ty.fmt(pt)}); try dbg_info_buffer.append(@intFromEnum(AbbrevCode.zero_bit_type)); }, } @@ -555,6 +556,7 @@ pub const DeclState = struct { owner_decl: InternPool.DeclIndex, loc: DbgInfoLoc, ) error{OutOfMemory}!void { + const pt = self.pt; const dbg_info = &self.dbg_info; const atom_index = self.di_atom_decls.get(owner_decl).?; const name_with_null = name.ptr[0 .. name.len + 1]; @@ -580,9 +582,9 @@ pub const DeclState = struct { } }, .register_pair => |regs| { - const reg_bits = self.mod.getTarget().ptrBitWidth(); + const reg_bits = pt.zcu.getTarget().ptrBitWidth(); const reg_bytes: u8 = @intCast(@divExact(reg_bits, 8)); - const abi_size = ty.abiSize(self.mod); + const abi_size = ty.abiSize(pt); try dbg_info.ensureUnusedCapacity(10); dbg_info.appendAssumeCapacity(@intFromEnum(AbbrevCode.parameter)); // DW.AT.location, DW.FORM.exprloc @@ -675,10 +677,10 @@ pub const DeclState = struct { const name_with_null = name.ptr[0 .. name.len + 1]; try dbg_info.append(@intFromEnum(AbbrevCode.variable)); const gpa = self.dwarf.allocator; - const mod = self.mod; - const target = mod.getTarget(); + const pt = self.pt; + const target = pt.zcu.getTarget(); const endian = target.cpu.arch.endian(); - const child_ty = if (is_ptr) ty.childType(mod) else ty; + const child_ty = if (is_ptr) ty.childType(pt.zcu) else ty; switch (loc) { .register => |reg| { @@ -701,9 +703,9 @@ pub const DeclState = struct { }, .register_pair => |regs| { - const reg_bits = self.mod.getTarget().ptrBitWidth(); + const reg_bits = pt.zcu.getTarget().ptrBitWidth(); const reg_bytes: u8 = @intCast(@divExact(reg_bits, 8)); - const abi_size = child_ty.abiSize(self.mod); + const abi_size = child_ty.abiSize(pt); try dbg_info.ensureUnusedCapacity(9); // DW.AT.location, DW.FORM.exprloc var expr_len = std.io.countingWriter(std.io.null_writer); @@ -829,9 +831,9 @@ pub const DeclState = struct { const fixup = dbg_info.items.len; dbg_info.appendSliceAssumeCapacity(&[2]u8{ // DW.AT.location, DW.FORM.exprloc 1, - if (child_ty.isSignedInt(mod)) DW.OP.consts else DW.OP.constu, + if (child_ty.isSignedInt(pt.zcu)) DW.OP.consts else DW.OP.constu, }); - if (child_ty.isSignedInt(mod)) { + if (child_ty.isSignedInt(pt.zcu)) { try leb128.writeIleb128(dbg_info.writer(), @as(i64, @bitCast(x))); } else { try leb128.writeUleb128(dbg_info.writer(), x); @@ -844,7 +846,7 @@ pub const DeclState = struct { // DW.AT.location, DW.FORM.exprloc // uleb128(exprloc_len) // DW.OP.implicit_value uleb128(len_of_bytes) bytes - const abi_size: u32 = @intCast(child_ty.abiSize(mod)); + const abi_size: u32 = @intCast(child_ty.abiSize(self.pt)); var implicit_value_len = std.ArrayList(u8).init(gpa); defer implicit_value_len.deinit(); try leb128.writeUleb128(implicit_value_len.writer(), abi_size); @@ -934,22 +936,23 @@ pub const DeclState = struct { } pub fn setInlineFunc(self: *DeclState, func: InternPool.Index) error{OutOfMemory}!void { + const zcu = self.pt.zcu; if (self.dbg_line_func == func) return; try self.dbg_line.ensureUnusedCapacity((1 + 4) + (1 + 5)); - const old_func_info = self.mod.funcInfo(self.dbg_line_func); - const new_func_info = self.mod.funcInfo(func); + const old_func_info = zcu.funcInfo(self.dbg_line_func); + const new_func_info = zcu.funcInfo(func); - const old_file = try self.dwarf.addDIFile(self.mod, old_func_info.owner_decl); - const new_file = try self.dwarf.addDIFile(self.mod, new_func_info.owner_decl); + const old_file = try self.dwarf.addDIFile(zcu, old_func_info.owner_decl); + const new_file = try self.dwarf.addDIFile(zcu, new_func_info.owner_decl); if (old_file != new_file) { self.dbg_line.appendAssumeCapacity(DW.LNS.set_file); leb128.writeUnsignedFixed(4, self.dbg_line.addManyAsArrayAssumeCapacity(4), new_file); } - const old_src_line: i33 = self.mod.declPtr(old_func_info.owner_decl).navSrcLine(self.mod); - const new_src_line: i33 = self.mod.declPtr(new_func_info.owner_decl).navSrcLine(self.mod); + const old_src_line: i33 = zcu.declPtr(old_func_info.owner_decl).navSrcLine(zcu); + const new_src_line: i33 = zcu.declPtr(new_func_info.owner_decl).navSrcLine(zcu); if (new_src_line != old_src_line) { self.dbg_line.appendAssumeCapacity(DW.LNS.advance_line); leb128.writeSignedFixed(5, self.dbg_line.addManyAsArrayAssumeCapacity(5), new_src_line - old_src_line); @@ -1074,19 +1077,19 @@ pub fn deinit(self: *Dwarf) void { /// Initializes Decl's state and its matching output buffers. /// Call this before `commitDeclState`. -pub fn initDeclState(self: *Dwarf, mod: *Module, decl_index: InternPool.DeclIndex) !DeclState { +pub fn initDeclState(self: *Dwarf, pt: Zcu.PerThread, decl_index: InternPool.DeclIndex) !DeclState { const tracy = trace(@src()); defer tracy.end(); - const decl = mod.declPtr(decl_index); - const decl_linkage_name = try decl.fullyQualifiedName(mod); + const decl = pt.zcu.declPtr(decl_index); + const decl_linkage_name = try decl.fullyQualifiedName(pt.zcu); - log.debug("initDeclState {}{*}", .{ decl_linkage_name.fmt(&mod.intern_pool), decl }); + log.debug("initDeclState {}{*}", .{ decl_linkage_name.fmt(&pt.zcu.intern_pool), decl }); const gpa = self.allocator; var decl_state: DeclState = .{ .dwarf = self, - .mod = mod, + .pt = pt, .di_atom_decls = &self.di_atom_decls, .dbg_line_func = undefined, .dbg_line = std.ArrayList(u8).init(gpa), @@ -1105,7 +1108,7 @@ pub fn initDeclState(self: *Dwarf, mod: *Module, decl_index: InternPool.DeclInde assert(decl.has_tv); - switch (decl.typeOf(mod).zigTypeTag(mod)) { + switch (decl.typeOf(pt.zcu).zigTypeTag(pt.zcu)) { .Fn => { _ = try self.getOrCreateAtomForDecl(.src_fn, decl_index); @@ -1114,13 +1117,13 @@ pub fn initDeclState(self: *Dwarf, mod: *Module, decl_index: InternPool.DeclInde try dbg_line_buffer.ensureTotalCapacity((3 + ptr_width_bytes) + (1 + 4) + (1 + 4) + (1 + 5) + 1); decl_state.dbg_line_func = decl.val.toIntern(); - const func = decl.val.getFunction(mod).?; + const func = decl.val.getFunction(pt.zcu).?; log.debug("decl.src_line={d}, func.lbrace_line={d}, func.rbrace_line={d}", .{ - decl.navSrcLine(mod), + decl.navSrcLine(pt.zcu), func.lbrace_line, func.rbrace_line, }); - const line: u28 = @intCast(decl.navSrcLine(mod) + func.lbrace_line); + const line: u28 = @intCast(decl.navSrcLine(pt.zcu) + func.lbrace_line); dbg_line_buffer.appendSliceAssumeCapacity(&.{ DW.LNS.extended_op, @@ -1142,7 +1145,7 @@ pub fn initDeclState(self: *Dwarf, mod: *Module, decl_index: InternPool.DeclInde assert(self.getRelocDbgFileIndex() == dbg_line_buffer.items.len); // Once we support more than one source file, this will have the ability to be more // than one possible value. - const file_index = try self.addDIFile(mod, decl_index); + const file_index = try self.addDIFile(pt.zcu, decl_index); leb128.writeUnsignedFixed(4, dbg_line_buffer.addManyAsArrayAssumeCapacity(4), file_index); dbg_line_buffer.appendAssumeCapacity(DW.LNS.set_column); @@ -1153,13 +1156,13 @@ pub fn initDeclState(self: *Dwarf, mod: *Module, decl_index: InternPool.DeclInde dbg_line_buffer.appendAssumeCapacity(DW.LNS.copy); // .debug_info subprogram - const decl_name_slice = decl.name.toSlice(&mod.intern_pool); - const decl_linkage_name_slice = decl_linkage_name.toSlice(&mod.intern_pool); + const decl_name_slice = decl.name.toSlice(&pt.zcu.intern_pool); + const decl_linkage_name_slice = decl_linkage_name.toSlice(&pt.zcu.intern_pool); try dbg_info_buffer.ensureUnusedCapacity(1 + ptr_width_bytes + 4 + 4 + (decl_name_slice.len + 1) + (decl_linkage_name_slice.len + 1)); - const fn_ret_type = decl.typeOf(mod).fnReturnType(mod); - const fn_ret_has_bits = fn_ret_type.hasRuntimeBits(mod); + const fn_ret_type = decl.typeOf(pt.zcu).fnReturnType(pt.zcu); + const fn_ret_has_bits = fn_ret_type.hasRuntimeBits(pt); dbg_info_buffer.appendAssumeCapacity(@intFromEnum( @as(AbbrevCode, if (fn_ret_has_bits) .subprogram else .subprogram_retvoid), )); @@ -1191,7 +1194,7 @@ pub fn initDeclState(self: *Dwarf, mod: *Module, decl_index: InternPool.DeclInde pub fn commitDeclState( self: *Dwarf, - zcu: *Module, + pt: Zcu.PerThread, decl_index: InternPool.DeclIndex, sym_addr: u64, sym_size: u64, @@ -1201,6 +1204,7 @@ pub fn commitDeclState( defer tracy.end(); const gpa = self.allocator; + const zcu = pt.zcu; const decl = zcu.declPtr(decl_index); const ip = &zcu.intern_pool; const namespace = zcu.namespacePtr(decl.src_namespace); @@ -1432,7 +1436,7 @@ pub fn commitDeclState( if (ip.isErrorSetType(ty.toIntern())) continue; symbol.offset = @intCast(dbg_info_buffer.items.len); - try decl_state.addDbgInfoType(zcu, di_atom_index, ty); + try decl_state.addDbgInfoType(pt, di_atom_index, ty); } } @@ -1457,7 +1461,7 @@ pub fn commitDeclState( reloc.offset, value, reloc_target, - ty.fmt(zcu), + ty.fmt(pt), }); mem.writeInt( u32, @@ -1691,7 +1695,7 @@ fn writeDeclDebugInfo(self: *Dwarf, atom_index: Atom.Index, dbg_info_buf: []cons } } -pub fn updateDeclLineNumber(self: *Dwarf, mod: *Module, decl_index: InternPool.DeclIndex) !void { +pub fn updateDeclLineNumber(self: *Dwarf, zcu: *Zcu, decl_index: InternPool.DeclIndex) !void { const tracy = trace(@src()); defer tracy.end(); @@ -1699,14 +1703,14 @@ pub fn updateDeclLineNumber(self: *Dwarf, mod: *Module, decl_index: InternPool.D const atom = self.getAtom(.src_fn, atom_index); if (atom.len == 0) return; - const decl = mod.declPtr(decl_index); - const func = decl.val.getFunction(mod).?; + const decl = zcu.declPtr(decl_index); + const func = decl.val.getFunction(zcu).?; log.debug("decl.src_line={d}, func.lbrace_line={d}, func.rbrace_line={d}", .{ - decl.navSrcLine(mod), + decl.navSrcLine(zcu), func.lbrace_line, func.rbrace_line, }); - const line: u28 = @intCast(decl.navSrcLine(mod) + func.lbrace_line); + const line: u28 = @intCast(decl.navSrcLine(zcu) + func.lbrace_line); var data: [4]u8 = undefined; leb128.writeUnsignedFixed(4, &data, line); @@ -1969,7 +1973,7 @@ fn dbgInfoHeaderBytes(self: *Dwarf) usize { return 120; } -pub fn writeDbgInfoHeader(self: *Dwarf, zcu: *Module, low_pc: u64, high_pc: u64) !void { +pub fn writeDbgInfoHeader(self: *Dwarf, zcu: *Zcu, low_pc: u64, high_pc: u64) !void { // If this value is null it means there is an error in the module; // leave debug_info_header_dirty=true. const first_dbg_info_off = self.getDebugInfoOff() orelse return; @@ -2058,14 +2062,14 @@ pub fn writeDbgInfoHeader(self: *Dwarf, zcu: *Module, low_pc: u64, high_pc: u64) } } -fn resolveCompilationDir(module: *Module, buffer: *[std.fs.max_path_bytes]u8) []const u8 { +fn resolveCompilationDir(zcu: *Zcu, buffer: *[std.fs.max_path_bytes]u8) []const u8 { // We fully resolve all paths at this point to avoid lack of source line info in stack // traces or lack of debugging information which, if relative paths were used, would // be very location dependent. // TODO: the only concern I have with this is WASI as either host or target, should // we leave the paths as relative then? - const root_dir_path = module.root_mod.root.root_dir.path orelse "."; - const sub_path = module.root_mod.root.sub_path; + const root_dir_path = zcu.root_mod.root.root_dir.path orelse "."; + const sub_path = zcu.root_mod.root.sub_path; const realpath = if (std.fs.path.isAbsolute(root_dir_path)) r: { @memcpy(buffer[0..root_dir_path.len], root_dir_path); break :r root_dir_path; @@ -2682,7 +2686,7 @@ fn padToIdeal(actual_size: anytype) @TypeOf(actual_size) { return actual_size +| (actual_size / ideal_factor); } -pub fn flushModule(self: *Dwarf, module: *Module) !void { +pub fn flushModule(self: *Dwarf, pt: Zcu.PerThread) !void { const comp = self.bin_file.comp; const target = comp.root_mod.resolved_target.result; @@ -2694,9 +2698,9 @@ pub fn flushModule(self: *Dwarf, module: *Module) !void { var dbg_info_buffer = std.ArrayList(u8).init(arena); try addDbgInfoErrorSetNames( - module, + pt, Type.anyerror, - module.global_error_set.keys(), + pt.zcu.global_error_set.keys(), target, &dbg_info_buffer, ); @@ -2759,9 +2763,9 @@ pub fn flushModule(self: *Dwarf, module: *Module) !void { } } -fn addDIFile(self: *Dwarf, mod: *Module, decl_index: InternPool.DeclIndex) !u28 { - const decl = mod.declPtr(decl_index); - const file_scope = decl.getFileScope(mod); +fn addDIFile(self: *Dwarf, zcu: *Zcu, decl_index: InternPool.DeclIndex) !u28 { + const decl = zcu.declPtr(decl_index); + const file_scope = decl.getFileScope(zcu); const gop = try self.di_files.getOrPut(self.allocator, file_scope); if (!gop.found_existing) { switch (self.bin_file.tag) { @@ -2827,16 +2831,16 @@ fn genIncludeDirsAndFileNames(self: *Dwarf, arena: Allocator) !struct { } fn addDbgInfoErrorSet( - mod: *Module, + pt: Zcu.PerThread, ty: Type, target: std.Target, dbg_info_buffer: *std.ArrayList(u8), ) !void { - return addDbgInfoErrorSetNames(mod, ty, ty.errorSetNames(mod).get(&mod.intern_pool), target, dbg_info_buffer); + return addDbgInfoErrorSetNames(pt, ty, ty.errorSetNames(pt.zcu).get(&pt.zcu.intern_pool), target, dbg_info_buffer); } fn addDbgInfoErrorSetNames( - mod: *Module, + pt: Zcu.PerThread, /// Used for printing the type name only. ty: Type, error_names: []const InternPool.NullTerminatedString, @@ -2848,10 +2852,10 @@ fn addDbgInfoErrorSetNames( // DW.AT.enumeration_type try dbg_info_buffer.append(@intFromEnum(AbbrevCode.enum_type)); // DW.AT.byte_size, DW.FORM.udata - const abi_size = Type.anyerror.abiSize(mod); + const abi_size = Type.anyerror.abiSize(pt); try leb128.writeUleb128(dbg_info_buffer.writer(), abi_size); // DW.AT.name, DW.FORM.string - try ty.print(dbg_info_buffer.writer(), mod); + try ty.print(dbg_info_buffer.writer(), pt); try dbg_info_buffer.append(0); // DW.AT.enumerator @@ -2865,8 +2869,8 @@ fn addDbgInfoErrorSetNames( mem.writeInt(u64, dbg_info_buffer.addManyAsArrayAssumeCapacity(8), 0, target_endian); for (error_names) |error_name| { - const int = try mod.getErrorValue(error_name); - const error_name_slice = error_name.toSlice(&mod.intern_pool); + const int = try pt.zcu.getErrorValue(error_name); + const error_name_slice = error_name.toSlice(&pt.zcu.intern_pool); // DW.AT.enumerator try dbg_info_buffer.ensureUnusedCapacity(error_name_slice.len + 2 + @sizeOf(u64)); dbg_info_buffer.appendAssumeCapacity(@intFromEnum(AbbrevCode.enum_variant)); @@ -2965,8 +2969,6 @@ const LinkBlock = File.LinkBlock; const LinkFn = File.LinkFn; const LinkerLoad = @import("../codegen.zig").LinkerLoad; const Zcu = @import("../Zcu.zig"); -/// Deprecated. -const Module = Zcu; const InternPool = @import("../InternPool.zig"); const StringTable = @import("StringTable.zig"); const Type = @import("../Type.zig"); diff --git a/src/link/Elf.zig b/src/link/Elf.zig index 7510dd4956bb..39704d937c6f 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -550,11 +550,12 @@ pub fn getDeclVAddr(self: *Elf, decl_index: InternPool.DeclIndex, reloc_info: li pub fn lowerAnonDecl( self: *Elf, + pt: Zcu.PerThread, decl_val: InternPool.Index, explicit_alignment: InternPool.Alignment, src_loc: Module.LazySrcLoc, ) !codegen.Result { - return self.zigObjectPtr().?.lowerAnonDecl(self, decl_val, explicit_alignment, src_loc); + return self.zigObjectPtr().?.lowerAnonDecl(self, pt, decl_val, explicit_alignment, src_loc); } pub fn getAnonDeclVAddr(self: *Elf, decl_val: InternPool.Index, reloc_info: link.File.RelocInfo) !u64 { @@ -1064,15 +1065,15 @@ pub fn markDirty(self: *Elf, shdr_index: u32) void { } } -pub fn flush(self: *Elf, arena: Allocator, prog_node: std.Progress.Node) link.File.FlushError!void { +pub fn flush(self: *Elf, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) link.File.FlushError!void { const use_lld = build_options.have_llvm and self.base.comp.config.use_lld; if (use_lld) { - return self.linkWithLLD(arena, prog_node); + return self.linkWithLLD(arena, tid, prog_node); } - try self.flushModule(arena, prog_node); + try self.flushModule(arena, tid, prog_node); } -pub fn flushModule(self: *Elf, arena: Allocator, prog_node: std.Progress.Node) link.File.FlushError!void { +pub fn flushModule(self: *Elf, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) link.File.FlushError!void { const tracy = trace(@src()); defer tracy.end(); @@ -1103,7 +1104,7 @@ pub fn flushModule(self: *Elf, arena: Allocator, prog_node: std.Progress.Node) l // --verbose-link if (comp.verbose_link) try self.dumpArgv(comp); - if (self.zigObjectPtr()) |zig_object| try zig_object.flushModule(self); + if (self.zigObjectPtr()) |zig_object| try zig_object.flushModule(self, tid); if (self.base.isStaticLib()) return relocatable.flushStaticLib(self, comp, module_obj_path); if (self.base.isObject()) return relocatable.flushObject(self, comp, module_obj_path); @@ -2146,7 +2147,7 @@ fn scanRelocs(self: *Elf) !void { } } -fn linkWithLLD(self: *Elf, arena: Allocator, prog_node: std.Progress.Node) !void { +fn linkWithLLD(self: *Elf, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) !void { const tracy = trace(@src()); defer tracy.end(); @@ -2159,7 +2160,7 @@ fn linkWithLLD(self: *Elf, arena: Allocator, prog_node: std.Progress.Node) !void // If there is no Zig code to compile, then we should skip flushing the output file because it // will not be part of the linker line anyway. const module_obj_path: ?[]const u8 = if (comp.module != null) blk: { - try self.flushModule(arena, prog_node); + try self.flushModule(arena, tid, prog_node); if (fs.path.dirname(full_out_path)) |dirname| { break :blk try fs.path.join(arena, &.{ dirname, self.base.zcu_object_sub_path.? }); @@ -2983,41 +2984,41 @@ pub fn freeDecl(self: *Elf, decl_index: InternPool.DeclIndex) void { return self.zigObjectPtr().?.freeDecl(self, decl_index); } -pub fn updateFunc(self: *Elf, mod: *Module, func_index: InternPool.Index, air: Air, liveness: Liveness) !void { +pub fn updateFunc(self: *Elf, pt: Zcu.PerThread, func_index: InternPool.Index, air: Air, liveness: Liveness) !void { if (build_options.skip_non_native and builtin.object_format != .elf) { @panic("Attempted to compile for object format that was disabled by build configuration"); } - if (self.llvm_object) |llvm_object| return llvm_object.updateFunc(mod, func_index, air, liveness); - return self.zigObjectPtr().?.updateFunc(self, mod, func_index, air, liveness); + if (self.llvm_object) |llvm_object| return llvm_object.updateFunc(pt, func_index, air, liveness); + return self.zigObjectPtr().?.updateFunc(self, pt, func_index, air, liveness); } pub fn updateDecl( self: *Elf, - mod: *Module, + pt: Zcu.PerThread, decl_index: InternPool.DeclIndex, ) link.File.UpdateDeclError!void { if (build_options.skip_non_native and builtin.object_format != .elf) { @panic("Attempted to compile for object format that was disabled by build configuration"); } - if (self.llvm_object) |llvm_object| return llvm_object.updateDecl(mod, decl_index); - return self.zigObjectPtr().?.updateDecl(self, mod, decl_index); + if (self.llvm_object) |llvm_object| return llvm_object.updateDecl(pt, decl_index); + return self.zigObjectPtr().?.updateDecl(self, pt, decl_index); } -pub fn lowerUnnamedConst(self: *Elf, val: Value, decl_index: InternPool.DeclIndex) !u32 { - return self.zigObjectPtr().?.lowerUnnamedConst(self, val, decl_index); +pub fn lowerUnnamedConst(self: *Elf, pt: Zcu.PerThread, val: Value, decl_index: InternPool.DeclIndex) !u32 { + return self.zigObjectPtr().?.lowerUnnamedConst(self, pt, val, decl_index); } pub fn updateExports( self: *Elf, - mod: *Module, + pt: Zcu.PerThread, exported: Module.Exported, export_indices: []const u32, ) link.File.UpdateExportsError!void { if (build_options.skip_non_native and builtin.object_format != .elf) { @panic("Attempted to compile for object format that was disabled by build configuration"); } - if (self.llvm_object) |llvm_object| return llvm_object.updateExports(mod, exported, export_indices); - return self.zigObjectPtr().?.updateExports(self, mod, exported, export_indices); + if (self.llvm_object) |llvm_object| return llvm_object.updateExports(pt, exported, export_indices); + return self.zigObjectPtr().?.updateExports(self, pt, exported, export_indices); } pub fn updateDeclLineNumber(self: *Elf, mod: *Module, decl_index: InternPool.DeclIndex) !void { diff --git a/src/link/Elf/ZigObject.zig b/src/link/Elf/ZigObject.zig index 56311dd64b81..8cfa5e701ffb 100644 --- a/src/link/Elf/ZigObject.zig +++ b/src/link/Elf/ZigObject.zig @@ -158,16 +158,17 @@ pub fn deinit(self: *ZigObject, allocator: Allocator) void { } } -pub fn flushModule(self: *ZigObject, elf_file: *Elf) !void { +pub fn flushModule(self: *ZigObject, elf_file: *Elf, tid: Zcu.PerThread.Id) !void { // Handle any lazy symbols that were emitted by incremental compilation. if (self.lazy_syms.getPtr(.none)) |metadata| { - const zcu = elf_file.base.comp.module.?; + const pt: Zcu.PerThread = .{ .zcu = elf_file.base.comp.module.?, .tid = tid }; // Most lazy symbols can be updated on first use, but // anyerror needs to wait for everything to be flushed. if (metadata.text_state != .unused) self.updateLazySymbol( elf_file, - link.File.LazySymbol.initDecl(.code, null, zcu), + pt, + link.File.LazySymbol.initDecl(.code, null, pt.zcu), metadata.text_symbol_index, ) catch |err| return switch (err) { error.CodegenFail => error.FlushFailure, @@ -175,7 +176,8 @@ pub fn flushModule(self: *ZigObject, elf_file: *Elf) !void { }; if (metadata.rodata_state != .unused) self.updateLazySymbol( elf_file, - link.File.LazySymbol.initDecl(.const_data, null, zcu), + pt, + link.File.LazySymbol.initDecl(.const_data, null, pt.zcu), metadata.rodata_symbol_index, ) catch |err| return switch (err) { error.CodegenFail => error.FlushFailure, @@ -188,8 +190,8 @@ pub fn flushModule(self: *ZigObject, elf_file: *Elf) !void { } if (self.dwarf) |*dw| { - const zcu = elf_file.base.comp.module.?; - try dw.flushModule(zcu); + const pt: Zcu.PerThread = .{ .zcu = elf_file.base.comp.module.?, .tid = tid }; + try dw.flushModule(pt); // TODO I need to re-think how to handle ZigObject's debug sections AND debug sections // extracted from input object files correctly. @@ -202,7 +204,7 @@ pub fn flushModule(self: *ZigObject, elf_file: *Elf) !void { const text_shdr = elf_file.shdrs.items[elf_file.zig_text_section_index.?]; const low_pc = text_shdr.sh_addr; const high_pc = text_shdr.sh_addr + text_shdr.sh_size; - try dw.writeDbgInfoHeader(zcu, low_pc, high_pc); + try dw.writeDbgInfoHeader(pt.zcu, low_pc, high_pc); self.debug_info_header_dirty = false; } @@ -684,6 +686,7 @@ pub fn getAnonDeclVAddr( pub fn lowerAnonDecl( self: *ZigObject, elf_file: *Elf, + pt: Zcu.PerThread, decl_val: InternPool.Index, explicit_alignment: InternPool.Alignment, src_loc: Module.LazySrcLoc, @@ -692,7 +695,7 @@ pub fn lowerAnonDecl( const mod = elf_file.base.comp.module.?; const ty = Type.fromInterned(mod.intern_pool.typeOf(decl_val)); const decl_alignment = switch (explicit_alignment) { - .none => ty.abiAlignment(mod), + .none => ty.abiAlignment(pt), else => explicit_alignment, }; if (self.anon_decls.get(decl_val)) |metadata| { @@ -708,6 +711,7 @@ pub fn lowerAnonDecl( }) catch unreachable; const res = self.lowerConst( elf_file, + pt, name, val, decl_alignment, @@ -733,10 +737,11 @@ pub fn lowerAnonDecl( pub fn getOrCreateMetadataForLazySymbol( self: *ZigObject, elf_file: *Elf, + pt: Zcu.PerThread, lazy_sym: link.File.LazySymbol, ) !Symbol.Index { - const gpa = elf_file.base.comp.gpa; - const mod = elf_file.base.comp.module.?; + const mod = pt.zcu; + const gpa = mod.gpa; const gop = try self.lazy_syms.getOrPut(gpa, lazy_sym.getDecl(mod)); errdefer _ = if (!gop.found_existing) self.lazy_syms.pop(); if (!gop.found_existing) gop.value_ptr.* = .{}; @@ -766,7 +771,7 @@ pub fn getOrCreateMetadataForLazySymbol( metadata.state.* = .pending_flush; const symbol_index = metadata.symbol_index.*; // anyerror needs to be deferred until flushModule - if (lazy_sym.getDecl(mod) != .none) try self.updateLazySymbol(elf_file, lazy_sym, symbol_index); + if (lazy_sym.getDecl(mod) != .none) try self.updateLazySymbol(elf_file, pt, lazy_sym, symbol_index); return symbol_index; } @@ -893,6 +898,7 @@ fn getDeclShdrIndex( fn updateDeclCode( self: *ZigObject, elf_file: *Elf, + pt: Zcu.PerThread, decl_index: InternPool.DeclIndex, sym_index: Symbol.Index, shdr_index: u32, @@ -900,13 +906,13 @@ fn updateDeclCode( stt_bits: u8, ) !void { const gpa = elf_file.base.comp.gpa; - const mod = elf_file.base.comp.module.?; + const mod = pt.zcu; const decl = mod.declPtr(decl_index); const decl_name = try decl.fullyQualifiedName(mod); log.debug("updateDeclCode {}{*}", .{ decl_name.fmt(&mod.intern_pool), decl }); - const required_alignment = decl.getAlignment(mod).max( + const required_alignment = decl.getAlignment(pt).max( target_util.minFunctionAlignment(mod.getTarget()), ); @@ -994,19 +1000,20 @@ fn updateDeclCode( fn updateTlv( self: *ZigObject, elf_file: *Elf, + pt: Zcu.PerThread, decl_index: InternPool.DeclIndex, sym_index: Symbol.Index, shndx: u32, code: []const u8, ) !void { - const gpa = elf_file.base.comp.gpa; - const mod = elf_file.base.comp.module.?; + const mod = pt.zcu; + const gpa = mod.gpa; const decl = mod.declPtr(decl_index); const decl_name = try decl.fullyQualifiedName(mod); log.debug("updateTlv {} ({*})", .{ decl_name.fmt(&mod.intern_pool), decl }); - const required_alignment = decl.getAlignment(mod); + const required_alignment = decl.getAlignment(pt); const sym = elf_file.symbol(sym_index); const esym = &self.local_esyms.items(.elf_sym)[sym.esym_index]; @@ -1048,7 +1055,7 @@ fn updateTlv( pub fn updateFunc( self: *ZigObject, elf_file: *Elf, - mod: *Module, + pt: Zcu.PerThread, func_index: InternPool.Index, air: Air, liveness: Liveness, @@ -1056,6 +1063,7 @@ pub fn updateFunc( const tracy = trace(@src()); defer tracy.end(); + const mod = pt.zcu; const gpa = elf_file.base.comp.gpa; const func = mod.funcInfo(func_index); const decl_index = func.owner_decl; @@ -1068,29 +1076,19 @@ pub fn updateFunc( var code_buffer = std.ArrayList(u8).init(gpa); defer code_buffer.deinit(); - var decl_state: ?Dwarf.DeclState = if (self.dwarf) |*dw| try dw.initDeclState(mod, decl_index) else null; + var decl_state: ?Dwarf.DeclState = if (self.dwarf) |*dw| try dw.initDeclState(pt, decl_index) else null; defer if (decl_state) |*ds| ds.deinit(); - const res = if (decl_state) |*ds| - try codegen.generateFunction( - &elf_file.base, - decl.navSrcLoc(mod), - func_index, - air, - liveness, - &code_buffer, - .{ .dwarf = ds }, - ) - else - try codegen.generateFunction( - &elf_file.base, - decl.navSrcLoc(mod), - func_index, - air, - liveness, - &code_buffer, - .none, - ); + const res = try codegen.generateFunction( + &elf_file.base, + pt, + decl.navSrcLoc(mod), + func_index, + air, + liveness, + &code_buffer, + if (decl_state) |*ds| .{ .dwarf = ds } else .none, + ); const code = switch (res) { .ok => code_buffer.items, @@ -1102,12 +1100,12 @@ pub fn updateFunc( }; const shndx = try self.getDeclShdrIndex(elf_file, decl, code); - try self.updateDeclCode(elf_file, decl_index, sym_index, shndx, code, elf.STT_FUNC); + try self.updateDeclCode(elf_file, pt, decl_index, sym_index, shndx, code, elf.STT_FUNC); if (decl_state) |*ds| { const sym = elf_file.symbol(sym_index); try self.dwarf.?.commitDeclState( - mod, + pt, decl_index, @intCast(sym.address(.{}, elf_file)), sym.atom(elf_file).?.size, @@ -1121,12 +1119,13 @@ pub fn updateFunc( pub fn updateDecl( self: *ZigObject, elf_file: *Elf, - mod: *Module, + pt: Zcu.PerThread, decl_index: InternPool.DeclIndex, ) link.File.UpdateDeclError!void { const tracy = trace(@src()); defer tracy.end(); + const mod = pt.zcu; const decl = mod.declPtr(decl_index); if (decl.val.getExternFunc(mod)) |_| { @@ -1150,19 +1149,19 @@ pub fn updateDecl( var code_buffer = std.ArrayList(u8).init(gpa); defer code_buffer.deinit(); - var decl_state: ?Dwarf.DeclState = if (self.dwarf) |*dw| try dw.initDeclState(mod, decl_index) else null; + var decl_state: ?Dwarf.DeclState = if (self.dwarf) |*dw| try dw.initDeclState(pt, decl_index) else null; defer if (decl_state) |*ds| ds.deinit(); // TODO implement .debug_info for global variables const decl_val = if (decl.val.getVariable(mod)) |variable| Value.fromInterned(variable.init) else decl.val; const res = if (decl_state) |*ds| - try codegen.generateSymbol(&elf_file.base, decl.navSrcLoc(mod), decl_val, &code_buffer, .{ + try codegen.generateSymbol(&elf_file.base, pt, decl.navSrcLoc(mod), decl_val, &code_buffer, .{ .dwarf = ds, }, .{ .parent_atom_index = sym_index, }) else - try codegen.generateSymbol(&elf_file.base, decl.navSrcLoc(mod), decl_val, &code_buffer, .none, .{ + try codegen.generateSymbol(&elf_file.base, pt, decl.navSrcLoc(mod), decl_val, &code_buffer, .none, .{ .parent_atom_index = sym_index, }); @@ -1177,14 +1176,14 @@ pub fn updateDecl( const shndx = try self.getDeclShdrIndex(elf_file, decl, code); if (elf_file.shdrs.items[shndx].sh_flags & elf.SHF_TLS != 0) - try self.updateTlv(elf_file, decl_index, sym_index, shndx, code) + try self.updateTlv(elf_file, pt, decl_index, sym_index, shndx, code) else - try self.updateDeclCode(elf_file, decl_index, sym_index, shndx, code, elf.STT_OBJECT); + try self.updateDeclCode(elf_file, pt, decl_index, sym_index, shndx, code, elf.STT_OBJECT); if (decl_state) |*ds| { const sym = elf_file.symbol(sym_index); try self.dwarf.?.commitDeclState( - mod, + pt, decl_index, @intCast(sym.address(.{}, elf_file)), sym.atom(elf_file).?.size, @@ -1198,11 +1197,12 @@ pub fn updateDecl( fn updateLazySymbol( self: *ZigObject, elf_file: *Elf, + pt: Zcu.PerThread, sym: link.File.LazySymbol, symbol_index: Symbol.Index, ) !void { - const gpa = elf_file.base.comp.gpa; - const mod = elf_file.base.comp.module.?; + const mod = pt.zcu; + const gpa = mod.gpa; var required_alignment: InternPool.Alignment = .none; var code_buffer = std.ArrayList(u8).init(gpa); @@ -1211,7 +1211,7 @@ fn updateLazySymbol( const name_str_index = blk: { const name = try std.fmt.allocPrint(gpa, "__lazy_{s}_{}", .{ @tagName(sym.kind), - sym.ty.fmt(mod), + sym.ty.fmt(pt), }); defer gpa.free(name); break :blk try self.strtab.insert(gpa, name); @@ -1220,6 +1220,7 @@ fn updateLazySymbol( const src = sym.ty.srcLocOrNull(mod) orelse Module.LazySrcLoc.unneeded; const res = try codegen.generateLazySymbol( &elf_file.base, + pt, src, sym, &required_alignment, @@ -1273,6 +1274,7 @@ fn updateLazySymbol( pub fn lowerUnnamedConst( self: *ZigObject, elf_file: *Elf, + pt: Zcu.PerThread, val: Value, decl_index: InternPool.DeclIndex, ) !u32 { @@ -1291,9 +1293,10 @@ pub fn lowerUnnamedConst( const ty = val.typeOf(mod); const sym_index = switch (try self.lowerConst( elf_file, + pt, name, val, - ty.abiAlignment(mod), + ty.abiAlignment(pt), elf_file.zig_data_rel_ro_section_index.?, decl.navSrcLoc(mod), )) { @@ -1318,20 +1321,21 @@ const LowerConstResult = union(enum) { fn lowerConst( self: *ZigObject, elf_file: *Elf, + pt: Zcu.PerThread, name: []const u8, val: Value, required_alignment: InternPool.Alignment, output_section_index: u32, src_loc: Module.LazySrcLoc, ) !LowerConstResult { - const gpa = elf_file.base.comp.gpa; + const gpa = pt.zcu.gpa; var code_buffer = std.ArrayList(u8).init(gpa); defer code_buffer.deinit(); const sym_index = try self.addAtom(elf_file); - const res = try codegen.generateSymbol(&elf_file.base, src_loc, val, &code_buffer, .{ + const res = try codegen.generateSymbol(&elf_file.base, pt, src_loc, val, &code_buffer, .{ .none = {}, }, .{ .parent_atom_index = sym_index, @@ -1373,13 +1377,14 @@ fn lowerConst( pub fn updateExports( self: *ZigObject, elf_file: *Elf, - mod: *Module, + pt: Zcu.PerThread, exported: Module.Exported, export_indices: []const u32, ) link.File.UpdateExportsError!void { const tracy = trace(@src()); defer tracy.end(); + const mod = pt.zcu; const gpa = elf_file.base.comp.gpa; const metadata = switch (exported) { .decl_index => |decl_index| blk: { @@ -1388,7 +1393,7 @@ pub fn updateExports( }, .value => |value| self.anon_decls.getPtr(value) orelse blk: { const first_exp = mod.all_exports.items[export_indices[0]]; - const res = try self.lowerAnonDecl(elf_file, value, .none, first_exp.src); + const res = try self.lowerAnonDecl(elf_file, pt, value, .none, first_exp.src); switch (res) { .ok => {}, .fail => |em| { diff --git a/src/link/MachO.zig b/src/link/MachO.zig index 3dd3d07e6bc0..d0c78bc2c2e7 100644 --- a/src/link/MachO.zig +++ b/src/link/MachO.zig @@ -360,11 +360,11 @@ pub fn deinit(self: *MachO) void { self.unwind_records.deinit(gpa); } -pub fn flush(self: *MachO, arena: Allocator, prog_node: std.Progress.Node) link.File.FlushError!void { - try self.flushModule(arena, prog_node); +pub fn flush(self: *MachO, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) link.File.FlushError!void { + try self.flushModule(arena, tid, prog_node); } -pub fn flushModule(self: *MachO, arena: Allocator, prog_node: std.Progress.Node) link.File.FlushError!void { +pub fn flushModule(self: *MachO, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) link.File.FlushError!void { const tracy = trace(@src()); defer tracy.end(); @@ -391,7 +391,7 @@ pub fn flushModule(self: *MachO, arena: Allocator, prog_node: std.Progress.Node) // --verbose-link if (comp.verbose_link) try self.dumpArgv(comp); - if (self.getZigObject()) |zo| try zo.flushModule(self); + if (self.getZigObject()) |zo| try zo.flushModule(self, tid); if (self.base.isStaticLib()) return relocatable.flushStaticLib(self, comp, module_obj_path); if (self.base.isObject()) return relocatable.flushObject(self, comp, module_obj_path); @@ -3178,24 +3178,24 @@ pub fn writeCodeSignature(self: *MachO, code_sig: *CodeSignature) !void { try self.base.file.?.pwriteAll(buffer.items, offset); } -pub fn updateFunc(self: *MachO, mod: *Module, func_index: InternPool.Index, air: Air, liveness: Liveness) !void { +pub fn updateFunc(self: *MachO, pt: Zcu.PerThread, func_index: InternPool.Index, air: Air, liveness: Liveness) !void { if (build_options.skip_non_native and builtin.object_format != .macho) { @panic("Attempted to compile for object format that was disabled by build configuration"); } - if (self.llvm_object) |llvm_object| return llvm_object.updateFunc(mod, func_index, air, liveness); - return self.getZigObject().?.updateFunc(self, mod, func_index, air, liveness); + if (self.llvm_object) |llvm_object| return llvm_object.updateFunc(pt, func_index, air, liveness); + return self.getZigObject().?.updateFunc(self, pt, func_index, air, liveness); } -pub fn lowerUnnamedConst(self: *MachO, val: Value, decl_index: InternPool.DeclIndex) !u32 { - return self.getZigObject().?.lowerUnnamedConst(self, val, decl_index); +pub fn lowerUnnamedConst(self: *MachO, pt: Zcu.PerThread, val: Value, decl_index: InternPool.DeclIndex) !u32 { + return self.getZigObject().?.lowerUnnamedConst(self, pt, val, decl_index); } -pub fn updateDecl(self: *MachO, mod: *Module, decl_index: InternPool.DeclIndex) !void { +pub fn updateDecl(self: *MachO, pt: Zcu.PerThread, decl_index: InternPool.DeclIndex) !void { if (build_options.skip_non_native and builtin.object_format != .macho) { @panic("Attempted to compile for object format that was disabled by build configuration"); } - if (self.llvm_object) |llvm_object| return llvm_object.updateDecl(mod, decl_index); - return self.getZigObject().?.updateDecl(self, mod, decl_index); + if (self.llvm_object) |llvm_object| return llvm_object.updateDecl(pt, decl_index); + return self.getZigObject().?.updateDecl(self, pt, decl_index); } pub fn updateDeclLineNumber(self: *MachO, module: *Module, decl_index: InternPool.DeclIndex) !void { @@ -3205,15 +3205,15 @@ pub fn updateDeclLineNumber(self: *MachO, module: *Module, decl_index: InternPoo pub fn updateExports( self: *MachO, - mod: *Module, + pt: Zcu.PerThread, exported: Module.Exported, export_indices: []const u32, ) link.File.UpdateExportsError!void { if (build_options.skip_non_native and builtin.object_format != .macho) { @panic("Attempted to compile for object format that was disabled by build configuration"); } - if (self.llvm_object) |llvm_object| return llvm_object.updateExports(mod, exported, export_indices); - return self.getZigObject().?.updateExports(self, mod, exported, export_indices); + if (self.llvm_object) |llvm_object| return llvm_object.updateExports(pt, exported, export_indices); + return self.getZigObject().?.updateExports(self, pt, exported, export_indices); } pub fn deleteExport( @@ -3237,11 +3237,12 @@ pub fn getDeclVAddr(self: *MachO, decl_index: InternPool.DeclIndex, reloc_info: pub fn lowerAnonDecl( self: *MachO, + pt: Zcu.PerThread, decl_val: InternPool.Index, explicit_alignment: InternPool.Alignment, src_loc: Module.LazySrcLoc, ) !codegen.Result { - return self.getZigObject().?.lowerAnonDecl(self, decl_val, explicit_alignment, src_loc); + return self.getZigObject().?.lowerAnonDecl(self, pt, decl_val, explicit_alignment, src_loc); } pub fn getAnonDeclVAddr(self: *MachO, decl_val: InternPool.Index, reloc_info: link.File.RelocInfo) !u64 { diff --git a/src/link/MachO/ZigObject.zig b/src/link/MachO/ZigObject.zig index bb5ded654d0d..ffe362038d52 100644 --- a/src/link/MachO/ZigObject.zig +++ b/src/link/MachO/ZigObject.zig @@ -425,16 +425,17 @@ pub fn getInputSection(self: ZigObject, atom: Atom, macho_file: *MachO) macho.se return sect; } -pub fn flushModule(self: *ZigObject, macho_file: *MachO) !void { +pub fn flushModule(self: *ZigObject, macho_file: *MachO, tid: Zcu.PerThread.Id) !void { // Handle any lazy symbols that were emitted by incremental compilation. if (self.lazy_syms.getPtr(.none)) |metadata| { - const zcu = macho_file.base.comp.module.?; + const pt: Zcu.PerThread = .{ .zcu = macho_file.base.comp.module.?, .tid = tid }; // Most lazy symbols can be updated on first use, but // anyerror needs to wait for everything to be flushed. if (metadata.text_state != .unused) self.updateLazySymbol( macho_file, - link.File.LazySymbol.initDecl(.code, null, zcu), + pt, + link.File.LazySymbol.initDecl(.code, null, pt.zcu), metadata.text_symbol_index, ) catch |err| return switch (err) { error.CodegenFail => error.FlushFailure, @@ -442,7 +443,8 @@ pub fn flushModule(self: *ZigObject, macho_file: *MachO) !void { }; if (metadata.const_state != .unused) self.updateLazySymbol( macho_file, - link.File.LazySymbol.initDecl(.const_data, null, zcu), + pt, + link.File.LazySymbol.initDecl(.const_data, null, pt.zcu), metadata.const_symbol_index, ) catch |err| return switch (err) { error.CodegenFail => error.FlushFailure, @@ -455,8 +457,8 @@ pub fn flushModule(self: *ZigObject, macho_file: *MachO) !void { } if (self.dwarf) |*dw| { - const zcu = macho_file.base.comp.module.?; - try dw.flushModule(zcu); + const pt: Zcu.PerThread = .{ .zcu = macho_file.base.comp.module.?, .tid = tid }; + try dw.flushModule(pt); if (self.debug_abbrev_dirty) { try dw.writeDbgAbbrev(); @@ -469,7 +471,7 @@ pub fn flushModule(self: *ZigObject, macho_file: *MachO) !void { const text_section = macho_file.sections.items(.header)[macho_file.zig_text_sect_index.?]; const low_pc = text_section.addr; const high_pc = text_section.addr + text_section.size; - try dw.writeDbgInfoHeader(zcu, low_pc, high_pc); + try dw.writeDbgInfoHeader(pt.zcu, low_pc, high_pc); self.debug_info_header_dirty = false; } @@ -570,6 +572,7 @@ pub fn getAnonDeclVAddr( pub fn lowerAnonDecl( self: *ZigObject, macho_file: *MachO, + pt: Zcu.PerThread, decl_val: InternPool.Index, explicit_alignment: Atom.Alignment, src_loc: Module.LazySrcLoc, @@ -578,7 +581,7 @@ pub fn lowerAnonDecl( const mod = macho_file.base.comp.module.?; const ty = Type.fromInterned(mod.intern_pool.typeOf(decl_val)); const decl_alignment = switch (explicit_alignment) { - .none => ty.abiAlignment(mod), + .none => ty.abiAlignment(pt), else => explicit_alignment, }; if (self.anon_decls.get(decl_val)) |metadata| { @@ -593,6 +596,7 @@ pub fn lowerAnonDecl( }) catch unreachable; const res = self.lowerConst( macho_file, + pt, name, Value.fromInterned(decl_val), decl_alignment, @@ -656,7 +660,7 @@ pub fn freeDecl(self: *ZigObject, macho_file: *MachO, decl_index: InternPool.Dec pub fn updateFunc( self: *ZigObject, macho_file: *MachO, - mod: *Module, + pt: Zcu.PerThread, func_index: InternPool.Index, air: Air, liveness: Liveness, @@ -664,7 +668,8 @@ pub fn updateFunc( const tracy = trace(@src()); defer tracy.end(); - const gpa = macho_file.base.comp.gpa; + const mod = pt.zcu; + const gpa = mod.gpa; const func = mod.funcInfo(func_index); const decl_index = func.owner_decl; const decl = mod.declPtr(decl_index); @@ -676,12 +681,13 @@ pub fn updateFunc( var code_buffer = std.ArrayList(u8).init(gpa); defer code_buffer.deinit(); - var decl_state: ?Dwarf.DeclState = if (self.dwarf) |*dw| try dw.initDeclState(mod, decl_index) else null; + var decl_state: ?Dwarf.DeclState = if (self.dwarf) |*dw| try dw.initDeclState(pt, decl_index) else null; defer if (decl_state) |*ds| ds.deinit(); const dio: codegen.DebugInfoOutput = if (decl_state) |*ds| .{ .dwarf = ds } else .none; const res = try codegen.generateFunction( &macho_file.base, + pt, decl.navSrcLoc(mod), func_index, air, @@ -700,12 +706,12 @@ pub fn updateFunc( }; const sect_index = try self.getDeclOutputSection(macho_file, decl, code); - try self.updateDeclCode(macho_file, decl_index, sym_index, sect_index, code); + try self.updateDeclCode(macho_file, pt, decl_index, sym_index, sect_index, code); if (decl_state) |*ds| { const sym = macho_file.getSymbol(sym_index); try self.dwarf.?.commitDeclState( - mod, + pt, decl_index, sym.getAddress(.{}, macho_file), sym.getAtom(macho_file).?.size, @@ -719,12 +725,13 @@ pub fn updateFunc( pub fn updateDecl( self: *ZigObject, macho_file: *MachO, - mod: *Module, + pt: Zcu.PerThread, decl_index: InternPool.DeclIndex, ) link.File.UpdateDeclError!void { const tracy = trace(@src()); defer tracy.end(); + const mod = pt.zcu; const decl = mod.declPtr(decl_index); if (decl.val.getExternFunc(mod)) |_| { @@ -749,12 +756,12 @@ pub fn updateDecl( var code_buffer = std.ArrayList(u8).init(gpa); defer code_buffer.deinit(); - var decl_state: ?Dwarf.DeclState = if (self.dwarf) |*dw| try dw.initDeclState(mod, decl_index) else null; + var decl_state: ?Dwarf.DeclState = if (self.dwarf) |*dw| try dw.initDeclState(pt, decl_index) else null; defer if (decl_state) |*ds| ds.deinit(); const decl_val = if (decl.val.getVariable(mod)) |variable| Value.fromInterned(variable.init) else decl.val; const dio: codegen.DebugInfoOutput = if (decl_state) |*ds| .{ .dwarf = ds } else .none; - const res = try codegen.generateSymbol(&macho_file.base, decl.navSrcLoc(mod), decl_val, &code_buffer, dio, .{ + const res = try codegen.generateSymbol(&macho_file.base, pt, decl.navSrcLoc(mod), decl_val, &code_buffer, dio, .{ .parent_atom_index = sym_index, }); @@ -772,15 +779,15 @@ pub fn updateDecl( else => false, }; if (is_threadlocal) { - try self.updateTlv(macho_file, decl_index, sym_index, sect_index, code); + try self.updateTlv(macho_file, pt, decl_index, sym_index, sect_index, code); } else { - try self.updateDeclCode(macho_file, decl_index, sym_index, sect_index, code); + try self.updateDeclCode(macho_file, pt, decl_index, sym_index, sect_index, code); } if (decl_state) |*ds| { const sym = macho_file.getSymbol(sym_index); try self.dwarf.?.commitDeclState( - mod, + pt, decl_index, sym.getAddress(.{}, macho_file), sym.getAtom(macho_file).?.size, @@ -794,19 +801,20 @@ pub fn updateDecl( fn updateDeclCode( self: *ZigObject, macho_file: *MachO, + pt: Zcu.PerThread, decl_index: InternPool.DeclIndex, sym_index: Symbol.Index, sect_index: u8, code: []const u8, ) !void { const gpa = macho_file.base.comp.gpa; - const mod = macho_file.base.comp.module.?; + const mod = pt.zcu; const decl = mod.declPtr(decl_index); const decl_name = try decl.fullyQualifiedName(mod); log.debug("updateDeclCode {}{*}", .{ decl_name.fmt(&mod.intern_pool), decl }); - const required_alignment = decl.getAlignment(mod); + const required_alignment = decl.getAlignment(pt); const sect = &macho_file.sections.items(.header)[sect_index]; const sym = macho_file.getSymbol(sym_index); @@ -879,19 +887,20 @@ fn updateDeclCode( fn updateTlv( self: *ZigObject, macho_file: *MachO, + pt: Zcu.PerThread, decl_index: InternPool.DeclIndex, sym_index: Symbol.Index, sect_index: u8, code: []const u8, ) !void { - const mod = macho_file.base.comp.module.?; + const mod = pt.zcu; const decl = mod.declPtr(decl_index); const decl_name = try decl.fullyQualifiedName(mod); log.debug("updateTlv {} ({*})", .{ decl_name.fmt(&mod.intern_pool), decl }); const decl_name_slice = decl_name.toSlice(&mod.intern_pool); - const required_alignment = decl.getAlignment(mod); + const required_alignment = decl.getAlignment(pt); // 1. Lower TLV initializer const init_sym_index = try self.createTlvInitializer( @@ -1079,11 +1088,12 @@ fn getDeclOutputSection( pub fn lowerUnnamedConst( self: *ZigObject, macho_file: *MachO, + pt: Zcu.PerThread, val: Value, decl_index: InternPool.DeclIndex, ) !u32 { - const gpa = macho_file.base.comp.gpa; - const mod = macho_file.base.comp.module.?; + const mod = pt.zcu; + const gpa = mod.gpa; const gop = try self.unnamed_consts.getOrPut(gpa, decl_index); if (!gop.found_existing) { gop.value_ptr.* = .{}; @@ -1096,9 +1106,10 @@ pub fn lowerUnnamedConst( defer gpa.free(name); const sym_index = switch (try self.lowerConst( macho_file, + pt, name, val, - val.typeOf(mod).abiAlignment(mod), + val.typeOf(mod).abiAlignment(pt), macho_file.zig_const_sect_index.?, decl.navSrcLoc(mod), )) { @@ -1123,6 +1134,7 @@ const LowerConstResult = union(enum) { fn lowerConst( self: *ZigObject, macho_file: *MachO, + pt: Zcu.PerThread, name: []const u8, val: Value, required_alignment: Atom.Alignment, @@ -1136,7 +1148,7 @@ fn lowerConst( const sym_index = try self.addAtom(macho_file); - const res = try codegen.generateSymbol(&macho_file.base, src_loc, val, &code_buffer, .{ + const res = try codegen.generateSymbol(&macho_file.base, pt, src_loc, val, &code_buffer, .{ .none = {}, }, .{ .parent_atom_index = sym_index, @@ -1181,13 +1193,14 @@ fn lowerConst( pub fn updateExports( self: *ZigObject, macho_file: *MachO, - mod: *Module, + pt: Zcu.PerThread, exported: Module.Exported, export_indices: []const u32, ) link.File.UpdateExportsError!void { const tracy = trace(@src()); defer tracy.end(); + const mod = pt.zcu; const gpa = macho_file.base.comp.gpa; const metadata = switch (exported) { .decl_index => |decl_index| blk: { @@ -1196,7 +1209,7 @@ pub fn updateExports( }, .value => |value| self.anon_decls.getPtr(value) orelse blk: { const first_exp = mod.all_exports.items[export_indices[0]]; - const res = try self.lowerAnonDecl(macho_file, value, .none, first_exp.src); + const res = try self.lowerAnonDecl(macho_file, pt, value, .none, first_exp.src); switch (res) { .ok => {}, .fail => |em| { @@ -1272,6 +1285,7 @@ pub fn updateExports( fn updateLazySymbol( self: *ZigObject, macho_file: *MachO, + pt: Zcu.PerThread, lazy_sym: link.File.LazySymbol, symbol_index: Symbol.Index, ) !void { @@ -1285,7 +1299,7 @@ fn updateLazySymbol( const name_str_index = blk: { const name = try std.fmt.allocPrint(gpa, "__lazy_{s}_{}", .{ @tagName(lazy_sym.kind), - lazy_sym.ty.fmt(mod), + lazy_sym.ty.fmt(pt), }); defer gpa.free(name); break :blk try self.strtab.insert(gpa, name); @@ -1294,6 +1308,7 @@ fn updateLazySymbol( const src = lazy_sym.ty.srcLocOrNull(mod) orelse Module.LazySrcLoc.unneeded; const res = try codegen.generateLazySymbol( &macho_file.base, + pt, src, lazy_sym, &required_alignment, @@ -1431,10 +1446,11 @@ pub fn getOrCreateMetadataForDecl( pub fn getOrCreateMetadataForLazySymbol( self: *ZigObject, macho_file: *MachO, + pt: Zcu.PerThread, lazy_sym: link.File.LazySymbol, ) !Symbol.Index { - const gpa = macho_file.base.comp.gpa; - const mod = macho_file.base.comp.module.?; + const mod = pt.zcu; + const gpa = mod.gpa; const gop = try self.lazy_syms.getOrPut(gpa, lazy_sym.getDecl(mod)); errdefer _ = if (!gop.found_existing) self.lazy_syms.pop(); if (!gop.found_existing) gop.value_ptr.* = .{}; @@ -1464,7 +1480,7 @@ pub fn getOrCreateMetadataForLazySymbol( metadata.state.* = .pending_flush; const symbol_index = metadata.symbol_index.*; // anyerror needs to be deferred until flushModule - if (lazy_sym.getDecl(mod) != .none) try self.updateLazySymbol(macho_file, lazy_sym, symbol_index); + if (lazy_sym.getDecl(mod) != .none) try self.updateLazySymbol(macho_file, pt, lazy_sym, symbol_index); return symbol_index; } diff --git a/src/link/NvPtx.zig b/src/link/NvPtx.zig index aa9ea1b5cdfe..6d6179642dcc 100644 --- a/src/link/NvPtx.zig +++ b/src/link/NvPtx.zig @@ -13,8 +13,6 @@ const assert = std.debug.assert; const log = std.log.scoped(.link); const Zcu = @import("../Zcu.zig"); -/// Deprecated. -const Module = Zcu; const InternPool = @import("../InternPool.zig"); const Compilation = @import("../Compilation.zig"); const link = @import("../link.zig"); @@ -84,35 +82,35 @@ pub fn deinit(self: *NvPtx) void { self.llvm_object.deinit(); } -pub fn updateFunc(self: *NvPtx, module: *Module, func_index: InternPool.Index, air: Air, liveness: Liveness) !void { - try self.llvm_object.updateFunc(module, func_index, air, liveness); +pub fn updateFunc(self: *NvPtx, pt: Zcu.PerThread, func_index: InternPool.Index, air: Air, liveness: Liveness) !void { + try self.llvm_object.updateFunc(pt, func_index, air, liveness); } -pub fn updateDecl(self: *NvPtx, module: *Module, decl_index: InternPool.DeclIndex) !void { - return self.llvm_object.updateDecl(module, decl_index); +pub fn updateDecl(self: *NvPtx, pt: Zcu.PerThread, decl_index: InternPool.DeclIndex) !void { + return self.llvm_object.updateDecl(pt, decl_index); } pub fn updateExports( self: *NvPtx, - module: *Module, - exported: Module.Exported, + pt: Zcu.PerThread, + exported: Zcu.Exported, export_indices: []const u32, ) !void { if (build_options.skip_non_native and builtin.object_format != .nvptx) @panic("Attempted to compile for object format that was disabled by build configuration"); - return self.llvm_object.updateExports(module, exported, export_indices); + return self.llvm_object.updateExports(pt, exported, export_indices); } pub fn freeDecl(self: *NvPtx, decl_index: InternPool.DeclIndex) void { return self.llvm_object.freeDecl(decl_index); } -pub fn flush(self: *NvPtx, arena: Allocator, prog_node: std.Progress.Node) link.File.FlushError!void { - return self.flushModule(arena, prog_node); +pub fn flush(self: *NvPtx, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) link.File.FlushError!void { + return self.flushModule(arena, tid, prog_node); } -pub fn flushModule(self: *NvPtx, arena: Allocator, prog_node: std.Progress.Node) link.File.FlushError!void { +pub fn flushModule(self: *NvPtx, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) link.File.FlushError!void { if (build_options.skip_non_native) @panic("Attempted to compile for architecture that was disabled by build configuration"); @@ -121,5 +119,6 @@ pub fn flushModule(self: *NvPtx, arena: Allocator, prog_node: std.Progress.Node) _ = arena; _ = self; _ = prog_node; + _ = tid; @panic("TODO: rewrite the NvPtx.flushModule function"); } diff --git a/src/link/Plan9.zig b/src/link/Plan9.zig index 96fbaf42c7d3..827c974180f9 100644 --- a/src/link/Plan9.zig +++ b/src/link/Plan9.zig @@ -4,8 +4,6 @@ const Plan9 = @This(); const link = @import("../link.zig"); const Zcu = @import("../Zcu.zig"); -/// Deprecated. -const Module = Zcu; const InternPool = @import("../InternPool.zig"); const Compilation = @import("../Compilation.zig"); const aout = @import("Plan9/aout.zig"); @@ -56,7 +54,7 @@ path_arena: std.heap.ArenaAllocator, /// of the function to know what file it came from. /// If we group the decls by file, it makes it really easy to do this (put the symbol in the correct place) fn_decl_table: std.AutoArrayHashMapUnmanaged( - *Module.File, + *Zcu.File, struct { sym_index: u32, functions: std.AutoArrayHashMapUnmanaged(InternPool.DeclIndex, FnDeclOutput) = .{} }, ) = .{}, /// the code is modified when relocated, so that is why it is mutable @@ -411,12 +409,13 @@ fn addPathComponents(self: *Plan9, path: []const u8, a: *std.ArrayList(u8)) !voi } } -pub fn updateFunc(self: *Plan9, mod: *Module, func_index: InternPool.Index, air: Air, liveness: Liveness) !void { +pub fn updateFunc(self: *Plan9, pt: Zcu.PerThread, func_index: InternPool.Index, air: Air, liveness: Liveness) !void { if (build_options.skip_non_native and builtin.object_format != .plan9) { @panic("Attempted to compile for object format that was disabled by build configuration"); } - const gpa = self.base.comp.gpa; + const mod = pt.zcu; + const gpa = mod.gpa; const target = self.base.comp.root_mod.resolved_target.result; const func = mod.funcInfo(func_index); const decl_index = func.owner_decl; @@ -439,6 +438,7 @@ pub fn updateFunc(self: *Plan9, mod: *Module, func_index: InternPool.Index, air: const res = try codegen.generateFunction( &self.base, + pt, decl.navSrcLoc(mod), func_index, air, @@ -468,13 +468,13 @@ pub fn updateFunc(self: *Plan9, mod: *Module, func_index: InternPool.Index, air: return self.updateFinish(decl_index); } -pub fn lowerUnnamedConst(self: *Plan9, val: Value, decl_index: InternPool.DeclIndex) !u32 { - const gpa = self.base.comp.gpa; +pub fn lowerUnnamedConst(self: *Plan9, pt: Zcu.PerThread, val: Value, decl_index: InternPool.DeclIndex) !u32 { + const mod = pt.zcu; + const gpa = mod.gpa; _ = try self.seeDecl(decl_index); var code_buffer = std.ArrayList(u8).init(gpa); defer code_buffer.deinit(); - const mod = self.base.comp.module.?; const decl = mod.declPtr(decl_index); const gop = try self.unnamed_const_atoms.getOrPut(gpa, decl_index); @@ -505,7 +505,7 @@ pub fn lowerUnnamedConst(self: *Plan9, val: Value, decl_index: InternPool.DeclIn }; self.syms.items[info.sym_index.?] = sym; - const res = try codegen.generateSymbol(&self.base, decl.navSrcLoc(mod), val, &code_buffer, .{ + const res = try codegen.generateSymbol(&self.base, pt, decl.navSrcLoc(mod), val, &code_buffer, .{ .none = {}, }, .{ .parent_atom_index = new_atom_idx, @@ -530,8 +530,9 @@ pub fn lowerUnnamedConst(self: *Plan9, val: Value, decl_index: InternPool.DeclIn return new_atom_idx; } -pub fn updateDecl(self: *Plan9, mod: *Module, decl_index: InternPool.DeclIndex) !void { +pub fn updateDecl(self: *Plan9, pt: Zcu.PerThread, decl_index: InternPool.DeclIndex) !void { const gpa = self.base.comp.gpa; + const mod = pt.zcu; const decl = mod.declPtr(decl_index); if (decl.isExtern(mod)) { @@ -544,7 +545,7 @@ pub fn updateDecl(self: *Plan9, mod: *Module, decl_index: InternPool.DeclIndex) defer code_buffer.deinit(); const decl_val = if (decl.val.getVariable(mod)) |variable| Value.fromInterned(variable.init) else decl.val; // TODO we need the symbol index for symbol in the table of locals for the containing atom - const res = try codegen.generateSymbol(&self.base, decl.navSrcLoc(mod), decl_val, &code_buffer, .{ .none = {} }, .{ + const res = try codegen.generateSymbol(&self.base, pt, decl.navSrcLoc(mod), decl_val, &code_buffer, .{ .none = {} }, .{ .parent_atom_index = @as(Atom.Index, @intCast(atom_idx)), }); const code = switch (res) { @@ -610,7 +611,7 @@ fn allocateGotIndex(self: *Plan9) usize { } } -pub fn flush(self: *Plan9, arena: Allocator, prog_node: std.Progress.Node) link.File.FlushError!void { +pub fn flush(self: *Plan9, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) link.File.FlushError!void { const comp = self.base.comp; const use_lld = build_options.have_llvm and comp.config.use_lld; assert(!use_lld); @@ -621,7 +622,7 @@ pub fn flush(self: *Plan9, arena: Allocator, prog_node: std.Progress.Node) link. .Obj => return error.TODOImplementPlan9Objs, .Lib => return error.TODOImplementWritingLibFiles, } - return self.flushModule(arena, prog_node); + return self.flushModule(arena, tid, prog_node); } pub fn changeLine(l: *std.ArrayList(u8), delta_line: i32) !void { @@ -669,20 +670,20 @@ fn atomCount(self: *Plan9) usize { return data_decl_count + fn_decl_count + unnamed_const_count + lazy_atom_count + extern_atom_count + anon_atom_count; } -pub fn flushModule(self: *Plan9, arena: Allocator, prog_node: std.Progress.Node) link.File.FlushError!void { +pub fn flushModule(self: *Plan9, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) link.File.FlushError!void { if (build_options.skip_non_native and builtin.object_format != .plan9) { @panic("Attempted to compile for object format that was disabled by build configuration"); } + const tracy = trace(@src()); + defer tracy.end(); + _ = arena; // Has the same lifetime as the call to Compilation.update. const comp = self.base.comp; const gpa = comp.gpa; const target = comp.root_mod.resolved_target.result; - const tracy = trace(@src()); - defer tracy.end(); - const sub_prog_node = prog_node.start("Flush Module", 0); defer sub_prog_node.end(); @@ -690,21 +691,26 @@ pub fn flushModule(self: *Plan9, arena: Allocator, prog_node: std.Progress.Node) defer assert(self.hdr.entry != 0x0); - const mod = self.base.comp.module orelse return error.LinkingWithoutZigSourceUnimplemented; + const pt: Zcu.PerThread = .{ + .zcu = self.base.comp.module orelse return error.LinkingWithoutZigSourceUnimplemented, + .tid = tid, + }; // finish up the lazy syms if (self.lazy_syms.getPtr(.none)) |metadata| { // Most lazy symbols can be updated on first use, but // anyerror needs to wait for everything to be flushed. if (metadata.text_state != .unused) self.updateLazySymbolAtom( - File.LazySymbol.initDecl(.code, null, mod), + pt, + File.LazySymbol.initDecl(.code, null, pt.zcu), metadata.text_atom, ) catch |err| return switch (err) { error.CodegenFail => error.FlushFailure, else => |e| e, }; if (metadata.rodata_state != .unused) self.updateLazySymbolAtom( - File.LazySymbol.initDecl(.const_data, null, mod), + pt, + File.LazySymbol.initDecl(.const_data, null, pt.zcu), metadata.rodata_atom, ) catch |err| return switch (err) { error.CodegenFail => error.FlushFailure, @@ -747,7 +753,7 @@ pub fn flushModule(self: *Plan9, arena: Allocator, prog_node: std.Progress.Node) var it = fentry.value_ptr.functions.iterator(); while (it.next()) |entry| { const decl_index = entry.key_ptr.*; - const decl = mod.declPtr(decl_index); + const decl = pt.zcu.declPtr(decl_index); const atom = self.getAtomPtr(self.decls.get(decl_index).?.index); const out = entry.value_ptr.*; { @@ -767,7 +773,7 @@ pub fn flushModule(self: *Plan9, arena: Allocator, prog_node: std.Progress.Node) const off = self.getAddr(text_i, .t); text_i += out.code.len; atom.offset = off; - log.debug("write text decl {*} ({}), lines {d} to {d}.;__GOT+0x{x} vaddr: 0x{x}", .{ decl, decl.name.fmt(&mod.intern_pool), out.start_line + 1, out.end_line, atom.got_index.? * 8, off }); + log.debug("write text decl {*} ({}), lines {d} to {d}.;__GOT+0x{x} vaddr: 0x{x}", .{ decl, decl.name.fmt(&pt.zcu.intern_pool), out.start_line + 1, out.end_line, atom.got_index.? * 8, off }); if (!self.sixtyfour_bit) { mem.writeInt(u32, got_table[atom.got_index.? * 4 ..][0..4], @as(u32, @intCast(off)), target.cpu.arch.endian()); } else { @@ -775,7 +781,7 @@ pub fn flushModule(self: *Plan9, arena: Allocator, prog_node: std.Progress.Node) } self.syms.items[atom.sym_index.?].value = off; if (self.decl_exports.get(decl_index)) |export_indices| { - try self.addDeclExports(mod, decl_index, export_indices); + try self.addDeclExports(pt.zcu, decl_index, export_indices); } } } @@ -841,7 +847,7 @@ pub fn flushModule(self: *Plan9, arena: Allocator, prog_node: std.Progress.Node) } self.syms.items[atom.sym_index.?].value = off; if (self.decl_exports.get(decl_index)) |export_indices| { - try self.addDeclExports(mod, decl_index, export_indices); + try self.addDeclExports(pt.zcu, decl_index, export_indices); } } // write the unnamed constants after the other data decls @@ -1009,7 +1015,7 @@ pub fn flushModule(self: *Plan9, arena: Allocator, prog_node: std.Progress.Node) } fn addDeclExports( self: *Plan9, - mod: *Module, + mod: *Zcu, decl_index: InternPool.DeclIndex, export_indices: []const u32, ) !void { @@ -1025,7 +1031,7 @@ fn addDeclExports( if (!section_name.eqlSlice(".text", &mod.intern_pool) and !section_name.eqlSlice(".data", &mod.intern_pool)) { - try mod.failed_exports.put(mod.gpa, export_idx, try Module.ErrorMsg.create( + try mod.failed_exports.put(mod.gpa, export_idx, try Zcu.ErrorMsg.create( gpa, mod.declPtr(decl_index).navSrcLoc(mod), "plan9 does not support extra sections", @@ -1155,8 +1161,8 @@ pub fn seeDecl(self: *Plan9, decl_index: InternPool.DeclIndex) !Atom.Index { pub fn updateExports( self: *Plan9, - module: *Module, - exported: Module.Exported, + pt: Zcu.PerThread, + exported: Zcu.Exported, export_indices: []const u32, ) !void { const gpa = self.base.comp.gpa; @@ -1173,11 +1179,11 @@ pub fn updateExports( }, } // all proper work is done in flush - _ = module; + _ = pt; } -pub fn getOrCreateAtomForLazySymbol(self: *Plan9, sym: File.LazySymbol) !Atom.Index { - const gpa = self.base.comp.gpa; +pub fn getOrCreateAtomForLazySymbol(self: *Plan9, pt: Zcu.PerThread, sym: File.LazySymbol) !Atom.Index { + const gpa = pt.zcu.gpa; const gop = try self.lazy_syms.getOrPut(gpa, sym.getDecl(self.base.comp.module.?)); errdefer _ = if (!gop.found_existing) self.lazy_syms.pop(); @@ -1198,14 +1204,13 @@ pub fn getOrCreateAtomForLazySymbol(self: *Plan9, sym: File.LazySymbol) !Atom.In _ = self.getAtomPtr(atom).getOrCreateOffsetTableEntry(self); // anyerror needs to be deferred until flushModule if (sym.getDecl(self.base.comp.module.?) != .none) { - try self.updateLazySymbolAtom(sym, atom); + try self.updateLazySymbolAtom(pt, sym, atom); } return atom; } -fn updateLazySymbolAtom(self: *Plan9, sym: File.LazySymbol, atom_index: Atom.Index) !void { - const gpa = self.base.comp.gpa; - const mod = self.base.comp.module.?; +fn updateLazySymbolAtom(self: *Plan9, pt: Zcu.PerThread, sym: File.LazySymbol, atom_index: Atom.Index) !void { + const gpa = pt.zcu.gpa; var required_alignment: InternPool.Alignment = .none; var code_buffer = std.ArrayList(u8).init(gpa); @@ -1214,7 +1219,7 @@ fn updateLazySymbolAtom(self: *Plan9, sym: File.LazySymbol, atom_index: Atom.Ind // create the symbol for the name const name = try std.fmt.allocPrint(gpa, "__lazy_{s}_{}", .{ @tagName(sym.kind), - sym.ty.fmt(mod), + sym.ty.fmt(pt), }); const symbol: aout.Sym = .{ @@ -1225,9 +1230,10 @@ fn updateLazySymbolAtom(self: *Plan9, sym: File.LazySymbol, atom_index: Atom.Ind self.syms.items[self.getAtomPtr(atom_index).sym_index.?] = symbol; // generate the code - const src = sym.ty.srcLocOrNull(mod) orelse Module.LazySrcLoc.unneeded; + const src = sym.ty.srcLocOrNull(pt.zcu) orelse Zcu.LazySrcLoc.unneeded; const res = try codegen.generateLazySymbol( &self.base, + pt, src, sym, &required_alignment, @@ -1490,7 +1496,7 @@ pub fn writeSyms(self: *Plan9, buf: *std.ArrayList(u8)) !void { } /// Must be called only after a successful call to `updateDecl`. -pub fn updateDeclLineNumber(self: *Plan9, mod: *Module, decl_index: InternPool.DeclIndex) !void { +pub fn updateDeclLineNumber(self: *Plan9, mod: *Zcu, decl_index: InternPool.DeclIndex) !void { _ = self; _ = mod; _ = decl_index; @@ -1544,9 +1550,10 @@ pub fn getDeclVAddr( pub fn lowerAnonDecl( self: *Plan9, + pt: Zcu.PerThread, decl_val: InternPool.Index, explicit_alignment: InternPool.Alignment, - src_loc: Module.LazySrcLoc, + src_loc: Zcu.LazySrcLoc, ) !codegen.Result { _ = explicit_alignment; // This is basically the same as lowerUnnamedConst. @@ -1569,7 +1576,7 @@ pub fn lowerAnonDecl( gop.value_ptr.* = index; // we need to free name latex var code_buffer = std.ArrayList(u8).init(gpa); - const res = try codegen.generateSymbol(&self.base, src_loc, val, &code_buffer, .{ .none = {} }, .{ .parent_atom_index = index }); + const res = try codegen.generateSymbol(&self.base, pt, src_loc, val, &code_buffer, .{ .none = {} }, .{ .parent_atom_index = index }); const code = switch (res) { .ok => code_buffer.items, .fail => |em| return .{ .fail = em }, diff --git a/src/link/SpirV.zig b/src/link/SpirV.zig index d1a8ff96c6e5..14020433bf69 100644 --- a/src/link/SpirV.zig +++ b/src/link/SpirV.zig @@ -28,8 +28,6 @@ const assert = std.debug.assert; const log = std.log.scoped(.link); const Zcu = @import("../Zcu.zig"); -/// Deprecated. -const Module = Zcu; const InternPool = @import("../InternPool.zig"); const Compilation = @import("../Compilation.zig"); const link = @import("../link.zig"); @@ -125,35 +123,36 @@ pub fn deinit(self: *SpirV) void { self.object.deinit(); } -pub fn updateFunc(self: *SpirV, module: *Module, func_index: InternPool.Index, air: Air, liveness: Liveness) !void { +pub fn updateFunc(self: *SpirV, pt: Zcu.PerThread, func_index: InternPool.Index, air: Air, liveness: Liveness) !void { if (build_options.skip_non_native) { @panic("Attempted to compile for architecture that was disabled by build configuration"); } - const func = module.funcInfo(func_index); - const decl = module.declPtr(func.owner_decl); - log.debug("lowering function {}", .{decl.name.fmt(&module.intern_pool)}); + const func = pt.zcu.funcInfo(func_index); + const decl = pt.zcu.declPtr(func.owner_decl); + log.debug("lowering function {}", .{decl.name.fmt(&pt.zcu.intern_pool)}); - try self.object.updateFunc(module, func_index, air, liveness); + try self.object.updateFunc(pt, func_index, air, liveness); } -pub fn updateDecl(self: *SpirV, module: *Module, decl_index: InternPool.DeclIndex) !void { +pub fn updateDecl(self: *SpirV, pt: Zcu.PerThread, decl_index: InternPool.DeclIndex) !void { if (build_options.skip_non_native) { @panic("Attempted to compile for architecture that was disabled by build configuration"); } - const decl = module.declPtr(decl_index); - log.debug("lowering declaration {}", .{decl.name.fmt(&module.intern_pool)}); + const decl = pt.zcu.declPtr(decl_index); + log.debug("lowering declaration {}", .{decl.name.fmt(&pt.zcu.intern_pool)}); - try self.object.updateDecl(module, decl_index); + try self.object.updateDecl(pt, decl_index); } pub fn updateExports( self: *SpirV, - mod: *Module, - exported: Module.Exported, + pt: Zcu.PerThread, + exported: Zcu.Exported, export_indices: []const u32, ) !void { + const mod = pt.zcu; const decl_index = switch (exported) { .decl_index => |i| i, .value => |val| { @@ -196,11 +195,11 @@ pub fn freeDecl(self: *SpirV, decl_index: InternPool.DeclIndex) void { _ = decl_index; } -pub fn flush(self: *SpirV, arena: Allocator, prog_node: std.Progress.Node) link.File.FlushError!void { - return self.flushModule(arena, prog_node); +pub fn flush(self: *SpirV, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) link.File.FlushError!void { + return self.flushModule(arena, tid, prog_node); } -pub fn flushModule(self: *SpirV, arena: Allocator, prog_node: std.Progress.Node) link.File.FlushError!void { +pub fn flushModule(self: *SpirV, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) link.File.FlushError!void { if (build_options.skip_non_native) { @panic("Attempted to compile for architecture that was disabled by build configuration"); } @@ -216,6 +215,7 @@ pub fn flushModule(self: *SpirV, arena: Allocator, prog_node: std.Progress.Node) const comp = self.base.comp; const gpa = comp.gpa; const target = comp.getTarget(); + _ = tid; try writeCapabilities(spv, target); try writeMemoryModel(spv, target); diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig index d14061fe78c1..4e661e33e4d6 100644 --- a/src/link/Wasm.zig +++ b/src/link/Wasm.zig @@ -29,8 +29,6 @@ const InternPool = @import("../InternPool.zig"); const Liveness = @import("../Liveness.zig"); const LlvmObject = @import("../codegen/llvm.zig").Object; const Zcu = @import("../Zcu.zig"); -/// Deprecated. -const Module = Zcu; const Object = @import("Wasm/Object.zig"); const Symbol = @import("Wasm/Symbol.zig"); const Type = @import("../Type.zig"); @@ -1441,25 +1439,25 @@ pub fn deinit(wasm: *Wasm) void { wasm.files.deinit(gpa); } -pub fn updateFunc(wasm: *Wasm, mod: *Module, func_index: InternPool.Index, air: Air, liveness: Liveness) !void { +pub fn updateFunc(wasm: *Wasm, pt: Zcu.PerThread, func_index: InternPool.Index, air: Air, liveness: Liveness) !void { if (build_options.skip_non_native and builtin.object_format != .wasm) { @panic("Attempted to compile for object format that was disabled by build configuration"); } - if (wasm.llvm_object) |llvm_object| return llvm_object.updateFunc(mod, func_index, air, liveness); - try wasm.zigObjectPtr().?.updateFunc(wasm, mod, func_index, air, liveness); + if (wasm.llvm_object) |llvm_object| return llvm_object.updateFunc(pt, func_index, air, liveness); + try wasm.zigObjectPtr().?.updateFunc(wasm, pt, func_index, air, liveness); } // Generate code for the Decl, storing it in memory to be later written to // the file on flush(). -pub fn updateDecl(wasm: *Wasm, mod: *Module, decl_index: InternPool.DeclIndex) !void { +pub fn updateDecl(wasm: *Wasm, pt: Zcu.PerThread, decl_index: InternPool.DeclIndex) !void { if (build_options.skip_non_native and builtin.object_format != .wasm) { @panic("Attempted to compile for object format that was disabled by build configuration"); } - if (wasm.llvm_object) |llvm_object| return llvm_object.updateDecl(mod, decl_index); - try wasm.zigObjectPtr().?.updateDecl(wasm, mod, decl_index); + if (wasm.llvm_object) |llvm_object| return llvm_object.updateDecl(pt, decl_index); + try wasm.zigObjectPtr().?.updateDecl(wasm, pt, decl_index); } -pub fn updateDeclLineNumber(wasm: *Wasm, mod: *Module, decl_index: InternPool.DeclIndex) !void { +pub fn updateDeclLineNumber(wasm: *Wasm, mod: *Zcu, decl_index: InternPool.DeclIndex) !void { if (wasm.llvm_object) |_| return; try wasm.zigObjectPtr().?.updateDeclLineNumber(mod, decl_index); } @@ -1506,8 +1504,8 @@ fn getFunctionSignature(wasm: *const Wasm, loc: SymbolLoc) std.wasm.Type { /// Lowers a constant typed value to a local symbol and atom. /// Returns the symbol index of the local /// The given `decl` is the parent decl whom owns the constant. -pub fn lowerUnnamedConst(wasm: *Wasm, val: Value, decl_index: InternPool.DeclIndex) !u32 { - return wasm.zigObjectPtr().?.lowerUnnamedConst(wasm, val, decl_index); +pub fn lowerUnnamedConst(wasm: *Wasm, pt: Zcu.PerThread, val: Value, decl_index: InternPool.DeclIndex) !u32 { + return wasm.zigObjectPtr().?.lowerUnnamedConst(wasm, pt, val, decl_index); } /// Returns the symbol index from a symbol of which its flag is set global, @@ -1531,11 +1529,12 @@ pub fn getDeclVAddr( pub fn lowerAnonDecl( wasm: *Wasm, + pt: Zcu.PerThread, decl_val: InternPool.Index, explicit_alignment: Alignment, - src_loc: Module.LazySrcLoc, + src_loc: Zcu.LazySrcLoc, ) !codegen.Result { - return wasm.zigObjectPtr().?.lowerAnonDecl(wasm, decl_val, explicit_alignment, src_loc); + return wasm.zigObjectPtr().?.lowerAnonDecl(wasm, pt, decl_val, explicit_alignment, src_loc); } pub fn getAnonDeclVAddr(wasm: *Wasm, decl_val: InternPool.Index, reloc_info: link.File.RelocInfo) !u64 { @@ -1553,15 +1552,15 @@ pub fn deleteExport( pub fn updateExports( wasm: *Wasm, - mod: *Module, - exported: Module.Exported, + pt: Zcu.PerThread, + exported: Zcu.Exported, export_indices: []const u32, ) !void { if (build_options.skip_non_native and builtin.object_format != .wasm) { @panic("Attempted to compile for object format that was disabled by build configuration"); } - if (wasm.llvm_object) |llvm_object| return llvm_object.updateExports(mod, exported, export_indices); - return wasm.zigObjectPtr().?.updateExports(wasm, mod, exported, export_indices); + if (wasm.llvm_object) |llvm_object| return llvm_object.updateExports(pt, exported, export_indices); + return wasm.zigObjectPtr().?.updateExports(wasm, pt, exported, export_indices); } pub fn freeDecl(wasm: *Wasm, decl_index: InternPool.DeclIndex) void { @@ -2466,18 +2465,18 @@ fn appendDummySegment(wasm: *Wasm) !void { }); } -pub fn flush(wasm: *Wasm, arena: Allocator, prog_node: std.Progress.Node) link.File.FlushError!void { +pub fn flush(wasm: *Wasm, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) link.File.FlushError!void { const comp = wasm.base.comp; const use_lld = build_options.have_llvm and comp.config.use_lld; if (use_lld) { - return wasm.linkWithLLD(arena, prog_node); + return wasm.linkWithLLD(arena, tid, prog_node); } - return wasm.flushModule(arena, prog_node); + return wasm.flushModule(arena, tid, prog_node); } /// Uses the in-house linker to link one or multiple object -and archive files into a WebAssembly binary. -pub fn flushModule(wasm: *Wasm, arena: Allocator, prog_node: std.Progress.Node) link.File.FlushError!void { +pub fn flushModule(wasm: *Wasm, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) link.File.FlushError!void { const tracy = trace(@src()); defer tracy.end(); @@ -2513,7 +2512,7 @@ pub fn flushModule(wasm: *Wasm, arena: Allocator, prog_node: std.Progress.Node) const wasi_exec_model = comp.config.wasi_exec_model; if (wasm.zigObjectPtr()) |zig_object| { - try zig_object.flushModule(wasm); + try zig_object.flushModule(wasm, tid); } // When the target os is WASI, we allow linking with WASI-LIBC @@ -3324,7 +3323,7 @@ fn emitImport(wasm: *Wasm, writer: anytype, import: types.Import) !void { } } -fn linkWithLLD(wasm: *Wasm, arena: Allocator, prog_node: std.Progress.Node) !void { +fn linkWithLLD(wasm: *Wasm, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) !void { const tracy = trace(@src()); defer tracy.end(); @@ -3342,7 +3341,7 @@ fn linkWithLLD(wasm: *Wasm, arena: Allocator, prog_node: std.Progress.Node) !voi // If there is no Zig code to compile, then we should skip flushing the output file because it // will not be part of the linker line anyway. const module_obj_path: ?[]const u8 = if (comp.module != null) blk: { - try wasm.flushModule(arena, prog_node); + try wasm.flushModule(arena, tid, prog_node); if (fs.path.dirname(full_out_path)) |dirname| { break :blk try fs.path.join(arena, &.{ dirname, wasm.base.zcu_object_sub_path.? }); @@ -4009,8 +4008,8 @@ pub fn storeDeclType(wasm: *Wasm, decl_index: InternPool.DeclIndex, func_type: s /// Returns the symbol index of the error name table. /// /// When the symbol does not yet exist, it will create a new one instead. -pub fn getErrorTableSymbol(wasm_file: *Wasm) !u32 { - const sym_index = try wasm_file.zigObjectPtr().?.getErrorTableSymbol(wasm_file); +pub fn getErrorTableSymbol(wasm_file: *Wasm, pt: Zcu.PerThread) !u32 { + const sym_index = try wasm_file.zigObjectPtr().?.getErrorTableSymbol(wasm_file, pt); return @intFromEnum(sym_index); } diff --git a/src/link/Wasm/ZigObject.zig b/src/link/Wasm/ZigObject.zig index da38381cbb79..a693902743ae 100644 --- a/src/link/Wasm/ZigObject.zig +++ b/src/link/Wasm/ZigObject.zig @@ -241,9 +241,10 @@ pub fn allocateSymbol(zig_object: *ZigObject, gpa: std.mem.Allocator) !Symbol.In pub fn updateDecl( zig_object: *ZigObject, wasm_file: *Wasm, - mod: *Module, + pt: Zcu.PerThread, decl_index: InternPool.DeclIndex, ) !void { + const mod = pt.zcu; const decl = mod.declPtr(decl_index); if (decl.val.getFunction(mod)) |_| { return; @@ -269,6 +270,7 @@ pub fn updateDecl( const res = try codegen.generateSymbol( &wasm_file.base, + pt, decl.navSrcLoc(mod), val, &code_writer, @@ -285,21 +287,21 @@ pub fn updateDecl( }, }; - return zig_object.finishUpdateDecl(wasm_file, decl_index, code); + return zig_object.finishUpdateDecl(wasm_file, pt, decl_index, code); } pub fn updateFunc( zig_object: *ZigObject, wasm_file: *Wasm, - mod: *Module, + pt: Zcu.PerThread, func_index: InternPool.Index, air: Air, liveness: Liveness, ) !void { const gpa = wasm_file.base.comp.gpa; - const func = mod.funcInfo(func_index); + const func = pt.zcu.funcInfo(func_index); const decl_index = func.owner_decl; - const decl = mod.declPtr(decl_index); + const decl = pt.zcu.declPtr(decl_index); const atom_index = try zig_object.getOrCreateAtomForDecl(wasm_file, decl_index); const atom = wasm_file.getAtomPtr(atom_index); atom.clear(); @@ -308,7 +310,8 @@ pub fn updateFunc( defer code_writer.deinit(); const result = try codegen.generateFunction( &wasm_file.base, - decl.navSrcLoc(mod), + pt, + decl.navSrcLoc(pt.zcu), func_index, air, liveness, @@ -320,29 +323,31 @@ pub fn updateFunc( .ok => code_writer.items, .fail => |em| { decl.analysis = .codegen_failure; - try mod.failed_analysis.put(mod.gpa, AnalUnit.wrap(.{ .decl = decl_index }), em); + try pt.zcu.failed_analysis.put(gpa, AnalUnit.wrap(.{ .decl = decl_index }), em); return; }, }; - return zig_object.finishUpdateDecl(wasm_file, decl_index, code); + return zig_object.finishUpdateDecl(wasm_file, pt, decl_index, code); } fn finishUpdateDecl( zig_object: *ZigObject, wasm_file: *Wasm, + pt: Zcu.PerThread, decl_index: InternPool.DeclIndex, code: []const u8, ) !void { - const gpa = wasm_file.base.comp.gpa; - const zcu = wasm_file.base.comp.module.?; + const zcu = pt.zcu; + const ip = &zcu.intern_pool; + const gpa = zcu.gpa; const decl = zcu.declPtr(decl_index); const decl_info = zig_object.decls_map.get(decl_index).?; const atom_index = decl_info.atom; const atom = wasm_file.getAtomPtr(atom_index); const sym = zig_object.symbol(atom.sym_index); const full_name = try decl.fullyQualifiedName(zcu); - sym.name = try zig_object.string_table.insert(gpa, full_name.toSlice(&zcu.intern_pool)); + sym.name = try zig_object.string_table.insert(gpa, full_name.toSlice(ip)); try atom.code.appendSlice(gpa, code); atom.size = @intCast(code.len); @@ -382,7 +387,7 @@ fn finishUpdateDecl( // Will be freed upon freeing of decl or after cleanup of Wasm binary. const full_segment_name = try std.mem.concat(gpa, u8, &.{ segment_name, - full_name.toSlice(&zcu.intern_pool), + full_name.toSlice(ip), }); errdefer gpa.free(full_segment_name); sym.tag = .data; @@ -390,7 +395,7 @@ fn finishUpdateDecl( }, } if (code.len == 0) return; - atom.alignment = decl.getAlignment(zcu); + atom.alignment = decl.getAlignment(pt); } /// Creates and initializes a new segment in the 'Data' section. @@ -437,9 +442,10 @@ pub fn getOrCreateAtomForDecl(zig_object: *ZigObject, wasm_file: *Wasm, decl_ind pub fn lowerAnonDecl( zig_object: *ZigObject, wasm_file: *Wasm, + pt: Zcu.PerThread, decl_val: InternPool.Index, explicit_alignment: InternPool.Alignment, - src_loc: Module.LazySrcLoc, + src_loc: Zcu.LazySrcLoc, ) !codegen.Result { const gpa = wasm_file.base.comp.gpa; const gop = try zig_object.anon_decls.getOrPut(gpa, decl_val); @@ -449,7 +455,7 @@ pub fn lowerAnonDecl( @intFromEnum(decl_val), }) catch unreachable; - switch (try zig_object.lowerConst(wasm_file, name, Value.fromInterned(decl_val), src_loc)) { + switch (try zig_object.lowerConst(wasm_file, pt, name, Value.fromInterned(decl_val), src_loc)) { .ok => |atom_index| zig_object.anon_decls.values()[gop.index] = atom_index, .fail => |em| return .{ .fail = em }, } @@ -469,9 +475,15 @@ pub fn lowerAnonDecl( /// Lowers a constant typed value to a local symbol and atom. /// Returns the symbol index of the local /// The given `decl` is the parent decl whom owns the constant. -pub fn lowerUnnamedConst(zig_object: *ZigObject, wasm_file: *Wasm, val: Value, decl_index: InternPool.DeclIndex) !u32 { - const gpa = wasm_file.base.comp.gpa; - const mod = wasm_file.base.comp.module.?; +pub fn lowerUnnamedConst( + zig_object: *ZigObject, + wasm_file: *Wasm, + pt: Zcu.PerThread, + val: Value, + decl_index: InternPool.DeclIndex, +) !u32 { + const mod = pt.zcu; + const gpa = mod.gpa; std.debug.assert(val.typeOf(mod).zigTypeTag(mod) != .Fn); // cannot create local symbols for functions const decl = mod.declPtr(decl_index); @@ -494,7 +506,7 @@ pub fn lowerUnnamedConst(zig_object: *ZigObject, wasm_file: *Wasm, val: Value, d else decl.navSrcLoc(mod); - switch (try zig_object.lowerConst(wasm_file, name, val, decl_src)) { + switch (try zig_object.lowerConst(wasm_file, pt, name, val, decl_src)) { .ok => |atom_index| { try wasm_file.getAtomPtr(parent_atom_index).locals.append(gpa, atom_index); return @intFromEnum(wasm_file.getAtom(atom_index).sym_index); @@ -509,10 +521,17 @@ pub fn lowerUnnamedConst(zig_object: *ZigObject, wasm_file: *Wasm, val: Value, d const LowerConstResult = union(enum) { ok: Atom.Index, - fail: *Module.ErrorMsg, + fail: *Zcu.ErrorMsg, }; -fn lowerConst(zig_object: *ZigObject, wasm_file: *Wasm, name: []const u8, val: Value, src_loc: Module.LazySrcLoc) !LowerConstResult { +fn lowerConst( + zig_object: *ZigObject, + wasm_file: *Wasm, + pt: Zcu.PerThread, + name: []const u8, + val: Value, + src_loc: Zcu.LazySrcLoc, +) !LowerConstResult { const gpa = wasm_file.base.comp.gpa; const mod = wasm_file.base.comp.module.?; @@ -526,7 +545,7 @@ fn lowerConst(zig_object: *ZigObject, wasm_file: *Wasm, name: []const u8, val: V const code = code: { const atom = wasm_file.getAtomPtr(atom_index); - atom.alignment = ty.abiAlignment(mod); + atom.alignment = ty.abiAlignment(pt); const segment_name = try std.mem.concat(gpa, u8, &.{ ".rodata.", name }); errdefer gpa.free(segment_name); zig_object.symbol(sym_index).* = .{ @@ -536,13 +555,14 @@ fn lowerConst(zig_object: *ZigObject, wasm_file: *Wasm, name: []const u8, val: V .index = try zig_object.createDataSegment( gpa, segment_name, - ty.abiAlignment(mod), + ty.abiAlignment(pt), ), .virtual_address = undefined, }; const result = try codegen.generateSymbol( &wasm_file.base, + pt, src_loc, val, &value_bytes, @@ -568,7 +588,7 @@ fn lowerConst(zig_object: *ZigObject, wasm_file: *Wasm, name: []const u8, val: V /// Returns the symbol index of the error name table. /// /// When the symbol does not yet exist, it will create a new one instead. -pub fn getErrorTableSymbol(zig_object: *ZigObject, wasm_file: *Wasm) !Symbol.Index { +pub fn getErrorTableSymbol(zig_object: *ZigObject, wasm_file: *Wasm, pt: Zcu.PerThread) !Symbol.Index { if (zig_object.error_table_symbol != .null) { return zig_object.error_table_symbol; } @@ -581,8 +601,7 @@ pub fn getErrorTableSymbol(zig_object: *ZigObject, wasm_file: *Wasm) !Symbol.Ind const atom_index = try wasm_file.createAtom(sym_index, zig_object.index); const atom = wasm_file.getAtomPtr(atom_index); const slice_ty = Type.slice_const_u8_sentinel_0; - const mod = wasm_file.base.comp.module.?; - atom.alignment = slice_ty.abiAlignment(mod); + atom.alignment = slice_ty.abiAlignment(pt); const sym_name = try zig_object.string_table.insert(gpa, "__zig_err_name_table"); const segment_name = try gpa.dupe(u8, ".rodata.__zig_err_name_table"); @@ -604,7 +623,7 @@ pub fn getErrorTableSymbol(zig_object: *ZigObject, wasm_file: *Wasm) !Symbol.Ind /// /// This creates a table that consists of pointers and length to each error name. /// The table is what is being pointed to within the runtime bodies that are generated. -fn populateErrorNameTable(zig_object: *ZigObject, wasm_file: *Wasm) !void { +fn populateErrorNameTable(zig_object: *ZigObject, wasm_file: *Wasm, tid: Zcu.PerThread.Id) !void { if (zig_object.error_table_symbol == .null) return; const gpa = wasm_file.base.comp.gpa; const atom_index = wasm_file.symbol_atom.get(.{ .file = zig_object.index, .index = zig_object.error_table_symbol }).?; @@ -631,11 +650,11 @@ fn populateErrorNameTable(zig_object: *ZigObject, wasm_file: *Wasm) !void { // Addend for each relocation to the table var addend: u32 = 0; - const mod = wasm_file.base.comp.module.?; - for (mod.global_error_set.keys()) |error_name| { + const pt: Zcu.PerThread = .{ .zcu = wasm_file.base.comp.module.?, .tid = tid }; + for (pt.zcu.global_error_set.keys()) |error_name| { const atom = wasm_file.getAtomPtr(atom_index); - const error_name_slice = error_name.toSlice(&mod.intern_pool); + const error_name_slice = error_name.toSlice(&pt.zcu.intern_pool); const len: u32 = @intCast(error_name_slice.len + 1); // names are 0-terminated const slice_ty = Type.slice_const_u8_sentinel_0; @@ -650,14 +669,14 @@ fn populateErrorNameTable(zig_object: *ZigObject, wasm_file: *Wasm) !void { .offset = offset, .addend = @intCast(addend), }); - atom.size += @intCast(slice_ty.abiSize(mod)); + atom.size += @intCast(slice_ty.abiSize(pt)); addend += len; // as we updated the error name table, we now store the actual name within the names atom try names_atom.code.ensureUnusedCapacity(gpa, len); names_atom.code.appendSliceAssumeCapacity(error_name_slice[0..len]); - log.debug("Populated error name: '{}'", .{error_name.fmt(&mod.intern_pool)}); + log.debug("Populated error name: '{}'", .{error_name.fmt(&pt.zcu.intern_pool)}); } names_atom.size = addend; zig_object.error_names_atom = names_atom_index; @@ -858,10 +877,11 @@ pub fn deleteExport( pub fn updateExports( zig_object: *ZigObject, wasm_file: *Wasm, - mod: *Module, - exported: Module.Exported, + pt: Zcu.PerThread, + exported: Zcu.Exported, export_indices: []const u32, ) !void { + const mod = pt.zcu; const decl_index = switch (exported) { .decl_index => |i| i, .value => |val| { @@ -880,7 +900,7 @@ pub fn updateExports( for (export_indices) |export_idx| { const exp = mod.all_exports.items[export_idx]; if (exp.opts.section.toSlice(&mod.intern_pool)) |section| { - try mod.failed_exports.putNoClobber(gpa, export_idx, try Module.ErrorMsg.create( + try mod.failed_exports.putNoClobber(gpa, export_idx, try Zcu.ErrorMsg.create( gpa, decl.navSrcLoc(mod), "Unimplemented: ExportOptions.section '{s}'", @@ -913,7 +933,7 @@ pub fn updateExports( }, .strong => {}, // symbols are strong by default .link_once => { - try mod.failed_exports.putNoClobber(gpa, export_idx, try Module.ErrorMsg.create( + try mod.failed_exports.putNoClobber(gpa, export_idx, try Zcu.ErrorMsg.create( gpa, decl.navSrcLoc(mod), "Unimplemented: LinkOnce", @@ -1096,7 +1116,7 @@ pub fn createDebugSectionForIndex(zig_object: *ZigObject, wasm_file: *Wasm, inde return atom_index; } -pub fn updateDeclLineNumber(zig_object: *ZigObject, mod: *Module, decl_index: InternPool.DeclIndex) !void { +pub fn updateDeclLineNumber(zig_object: *ZigObject, mod: *Zcu, decl_index: InternPool.DeclIndex) !void { if (zig_object.dwarf) |*dw| { const decl = mod.declPtr(decl_index); const decl_name = try decl.fullyQualifiedName(mod); @@ -1228,8 +1248,8 @@ fn appendFunction(zig_object: *ZigObject, gpa: std.mem.Allocator, func: std.wasm return index; } -pub fn flushModule(zig_object: *ZigObject, wasm_file: *Wasm) !void { - try zig_object.populateErrorNameTable(wasm_file); +pub fn flushModule(zig_object: *ZigObject, wasm_file: *Wasm, tid: Zcu.PerThread.Id) !void { + try zig_object.populateErrorNameTable(wasm_file, tid); try zig_object.setupErrorsLen(wasm_file); } @@ -1248,8 +1268,6 @@ const File = @import("file.zig").File; const InternPool = @import("../../InternPool.zig"); const Liveness = @import("../../Liveness.zig"); const Zcu = @import("../../Zcu.zig"); -/// Deprecated. -const Module = Zcu; const StringTable = @import("../StringTable.zig"); const Symbol = @import("Symbol.zig"); const Type = @import("../../Type.zig"); diff --git a/src/main.zig b/src/main.zig index 3ba1276abfbf..9fd9087b639d 100644 --- a/src/main.zig +++ b/src/main.zig @@ -172,7 +172,7 @@ pub fn main() anyerror!void { } // We would prefer to use raw libc allocator here, but cannot // use it if it won't support the alignment we need. - if (@alignOf(std.c.max_align_t) < @alignOf(i128)) { + if (@alignOf(std.c.max_align_t) < @max(@alignOf(i128), std.atomic.cache_line)) { break :gpa std.heap.c_allocator; } break :gpa std.heap.raw_c_allocator; @@ -3092,7 +3092,7 @@ fn buildOutputType( defer emit_implib_resolved.deinit(); var thread_pool: ThreadPool = undefined; - try thread_pool.init(.{ .allocator = gpa }); + try thread_pool.init(.{ .allocator = gpa, .track_ids = true }); defer thread_pool.deinit(); var cleanup_local_cache_dir: ?fs.Dir = null; @@ -4895,7 +4895,7 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !void { child_argv.items[argv_index_cache_dir] = local_cache_directory.path orelse cwd_path; var thread_pool: ThreadPool = undefined; - try thread_pool.init(.{ .allocator = gpa }); + try thread_pool.init(.{ .allocator = gpa, .track_ids = true }); defer thread_pool.deinit(); // Dummy http client that is not actually used when only_core_functionality is enabled. @@ -5329,7 +5329,7 @@ fn jitCmd( defer global_cache_directory.handle.close(); var thread_pool: ThreadPool = undefined; - try thread_pool.init(.{ .allocator = gpa }); + try thread_pool.init(.{ .allocator = gpa, .track_ids = true }); defer thread_pool.deinit(); var child_argv: std.ArrayListUnmanaged([]const u8) = .{}; diff --git a/src/mutable_value.zig b/src/mutable_value.zig index 1806e6ba1915..0ca2d1d31760 100644 --- a/src/mutable_value.zig +++ b/src/mutable_value.zig @@ -54,46 +54,44 @@ pub const MutableValue = union(enum) { payload: *MutableValue, }; - pub fn intern(mv: MutableValue, zcu: *Zcu, arena: Allocator) Allocator.Error!Value { - const ip = &zcu.intern_pool; - const gpa = zcu.gpa; + pub fn intern(mv: MutableValue, pt: Zcu.PerThread, arena: Allocator) Allocator.Error!Value { return Value.fromInterned(switch (mv) { .interned => |ip_index| ip_index, - .eu_payload => |sv| try ip.get(gpa, .{ .error_union = .{ + .eu_payload => |sv| try pt.intern(.{ .error_union = .{ .ty = sv.ty, - .val = .{ .payload = (try sv.child.intern(zcu, arena)).toIntern() }, + .val = .{ .payload = (try sv.child.intern(pt, arena)).toIntern() }, } }), - .opt_payload => |sv| try ip.get(gpa, .{ .opt = .{ + .opt_payload => |sv| try pt.intern(.{ .opt = .{ .ty = sv.ty, - .val = (try sv.child.intern(zcu, arena)).toIntern(), + .val = (try sv.child.intern(pt, arena)).toIntern(), } }), - .repeated => |sv| try ip.get(gpa, .{ .aggregate = .{ + .repeated => |sv| try pt.intern(.{ .aggregate = .{ .ty = sv.ty, - .storage = .{ .repeated_elem = (try sv.child.intern(zcu, arena)).toIntern() }, + .storage = .{ .repeated_elem = (try sv.child.intern(pt, arena)).toIntern() }, } }), - .bytes => |b| try ip.get(gpa, .{ .aggregate = .{ + .bytes => |b| try pt.intern(.{ .aggregate = .{ .ty = b.ty, - .storage = .{ .bytes = try ip.getOrPutString(gpa, b.data, .maybe_embedded_nulls) }, + .storage = .{ .bytes = try pt.zcu.intern_pool.getOrPutString(pt.zcu.gpa, b.data, .maybe_embedded_nulls) }, } }), .aggregate => |a| { const elems = try arena.alloc(InternPool.Index, a.elems.len); for (a.elems, elems) |mut_elem, *interned_elem| { - interned_elem.* = (try mut_elem.intern(zcu, arena)).toIntern(); + interned_elem.* = (try mut_elem.intern(pt, arena)).toIntern(); } - return Value.fromInterned(try ip.get(gpa, .{ .aggregate = .{ + return Value.fromInterned(try pt.intern(.{ .aggregate = .{ .ty = a.ty, .storage = .{ .elems = elems }, } })); }, - .slice => |s| try ip.get(gpa, .{ .slice = .{ + .slice => |s| try pt.intern(.{ .slice = .{ .ty = s.ty, - .ptr = (try s.ptr.intern(zcu, arena)).toIntern(), - .len = (try s.len.intern(zcu, arena)).toIntern(), + .ptr = (try s.ptr.intern(pt, arena)).toIntern(), + .len = (try s.len.intern(pt, arena)).toIntern(), } }), - .un => |u| try ip.get(gpa, .{ .un = .{ + .un => |u| try pt.intern(.{ .un = .{ .ty = u.ty, .tag = u.tag, - .val = (try u.payload.intern(zcu, arena)).toIntern(), + .val = (try u.payload.intern(pt, arena)).toIntern(), } }), }); } @@ -108,13 +106,13 @@ pub const MutableValue = union(enum) { /// If `!allow_repeated`, the `repeated` representation will not be used. pub fn unintern( mv: *MutableValue, - zcu: *Zcu, + pt: Zcu.PerThread, arena: Allocator, allow_bytes: bool, allow_repeated: bool, ) Allocator.Error!void { + const zcu = pt.zcu; const ip = &zcu.intern_pool; - const gpa = zcu.gpa; switch (mv.*) { .interned => |ip_index| switch (ip.indexToKey(ip_index)) { .opt => |opt| if (opt.val != .none) { @@ -170,7 +168,7 @@ pub const MutableValue = union(enum) { } else { const mut_elems = try arena.alloc(MutableValue, len); for (bytes.toSlice(len, ip), mut_elems) |b, *mut_elem| { - mut_elem.* = .{ .interned = try ip.get(gpa, .{ .int = .{ + mut_elem.* = .{ .interned = try pt.intern(.{ .int = .{ .ty = .u8_type, .storage = .{ .u64 = b }, } }) }; @@ -221,12 +219,12 @@ pub const MutableValue = union(enum) { switch (type_tag) { .Array, .Vector => { const elem_ty = ip.childType(ty_ip); - const undef_elem = try ip.get(gpa, .{ .undef = elem_ty }); + const undef_elem = try pt.intern(.{ .undef = elem_ty }); @memset(elems[0..@intCast(len_no_sent)], .{ .interned = undef_elem }); }, .Struct => for (elems[0..@intCast(len_no_sent)], 0..) |*mut_elem, i| { const field_ty = ty.structFieldType(i, zcu).toIntern(); - mut_elem.* = .{ .interned = try ip.get(gpa, .{ .undef = field_ty }) }; + mut_elem.* = .{ .interned = try pt.intern(.{ .undef = field_ty }) }; }, else => unreachable, } @@ -238,7 +236,7 @@ pub const MutableValue = union(enum) { } else { const repeated_val = try arena.create(MutableValue); repeated_val.* = .{ - .interned = try ip.get(gpa, .{ .undef = ip.childType(ty_ip) }), + .interned = try pt.intern(.{ .undef = ip.childType(ty_ip) }), }; mv.* = .{ .repeated = .{ .ty = ty_ip, @@ -248,11 +246,8 @@ pub const MutableValue = union(enum) { }, .Union => { const payload = try arena.create(MutableValue); - const backing_ty = try Type.fromInterned(ty_ip).unionBackingType(zcu); - payload.* = .{ .interned = try ip.get( - gpa, - .{ .undef = backing_ty.toIntern() }, - ) }; + const backing_ty = try Type.fromInterned(ty_ip).unionBackingType(pt); + payload.* = .{ .interned = try pt.intern(.{ .undef = backing_ty.toIntern() }) }; mv.* = .{ .un = .{ .ty = ty_ip, .tag = .none, @@ -264,8 +259,8 @@ pub const MutableValue = union(enum) { if (ptr_ty.flags.size != .Slice) return; const ptr = try arena.create(MutableValue); const len = try arena.create(MutableValue); - ptr.* = .{ .interned = try ip.get(gpa, .{ .undef = ip.slicePtrType(ty_ip) }) }; - len.* = .{ .interned = try ip.get(gpa, .{ .undef = .usize_type }) }; + ptr.* = .{ .interned = try pt.intern(.{ .undef = ip.slicePtrType(ty_ip) }) }; + len.* = .{ .interned = try pt.intern(.{ .undef = .usize_type }) }; mv.* = .{ .slice = .{ .ty = ty_ip, .ptr = ptr, @@ -279,7 +274,7 @@ pub const MutableValue = union(enum) { .bytes => |bytes| if (!allow_bytes) { const elems = try arena.alloc(MutableValue, bytes.data.len); for (bytes.data, elems) |byte, *interned_byte| { - interned_byte.* = .{ .interned = try ip.get(gpa, .{ .int = .{ + interned_byte.* = .{ .interned = try pt.intern(.{ .int = .{ .ty = .u8_type, .storage = .{ .u64 = byte }, } }) }; @@ -298,22 +293,22 @@ pub const MutableValue = union(enum) { /// The returned pointer is valid until the representation of `mv` changes. pub fn elem( mv: *MutableValue, - zcu: *Zcu, + pt: Zcu.PerThread, arena: Allocator, field_idx: usize, ) Allocator.Error!*MutableValue { + const zcu = pt.zcu; const ip = &zcu.intern_pool; - const gpa = zcu.gpa; // Convert to the `aggregate` representation. switch (mv.*) { .eu_payload, .opt_payload, .un => unreachable, .interned => { - try mv.unintern(zcu, arena, false, false); + try mv.unintern(pt, arena, false, false); }, .bytes => |bytes| { const elems = try arena.alloc(MutableValue, bytes.data.len); for (bytes.data, elems) |byte, *interned_byte| { - interned_byte.* = .{ .interned = try ip.get(gpa, .{ .int = .{ + interned_byte.* = .{ .interned = try pt.intern(.{ .int = .{ .ty = .u8_type, .storage = .{ .u64 = byte }, } }) }; @@ -351,14 +346,15 @@ pub const MutableValue = union(enum) { /// For slices, uses `Value.slice_ptr_index` and `Value.slice_len_index`. pub fn setElem( mv: *MutableValue, - zcu: *Zcu, + pt: Zcu.PerThread, arena: Allocator, field_idx: usize, field_val: MutableValue, ) Allocator.Error!void { + const zcu = pt.zcu; const ip = &zcu.intern_pool; const is_trivial_int = field_val.isTrivialInt(zcu); - try mv.unintern(zcu, arena, is_trivial_int, true); + try mv.unintern(pt, arena, is_trivial_int, true); switch (mv.*) { .interned, .eu_payload, @@ -373,7 +369,7 @@ pub const MutableValue = union(enum) { .bytes => |b| { assert(is_trivial_int); assert(field_val.typeOf(zcu).toIntern() == .u8_type); - b.data[field_idx] = @intCast(Value.fromInterned(field_val.interned).toUnsignedInt(zcu)); + b.data[field_idx] = @intCast(Value.fromInterned(field_val.interned).toUnsignedInt(pt)); }, .repeated => |r| { if (field_val.eqlTrivial(r.child.*)) return; @@ -386,9 +382,9 @@ pub const MutableValue = union(enum) { { // We can use the `bytes` representation. const bytes = try arena.alloc(u8, @intCast(len_inc_sent)); - const repeated_byte = Value.fromInterned(r.child.interned).toUnsignedInt(zcu); + const repeated_byte = Value.fromInterned(r.child.interned).toUnsignedInt(pt); @memset(bytes, @intCast(repeated_byte)); - bytes[field_idx] = @intCast(Value.fromInterned(field_val.interned).toUnsignedInt(zcu)); + bytes[field_idx] = @intCast(Value.fromInterned(field_val.interned).toUnsignedInt(pt)); mv.* = .{ .bytes = .{ .ty = r.ty, .data = bytes, @@ -435,7 +431,7 @@ pub const MutableValue = union(enum) { } else { const bytes = try arena.alloc(u8, a.elems.len); for (a.elems, bytes) |elem_val, *b| { - b.* = @intCast(Value.fromInterned(elem_val.interned).toUnsignedInt(zcu)); + b.* = @intCast(Value.fromInterned(elem_val.interned).toUnsignedInt(pt)); } mv.* = .{ .bytes = .{ .ty = a.ty, @@ -451,7 +447,7 @@ pub const MutableValue = union(enum) { /// For slices, uses `Value.slice_ptr_index` and `Value.slice_len_index`. pub fn getElem( mv: MutableValue, - zcu: *Zcu, + pt: Zcu.PerThread, field_idx: usize, ) Allocator.Error!MutableValue { return switch (mv) { @@ -459,16 +455,16 @@ pub const MutableValue = union(enum) { .opt_payload, => unreachable, .interned => |ip_index| { - const ty = Type.fromInterned(zcu.intern_pool.typeOf(ip_index)); - switch (ty.zigTypeTag(zcu)) { - .Array, .Vector => return .{ .interned = (try Value.fromInterned(ip_index).elemValue(zcu, field_idx)).toIntern() }, - .Struct, .Union => return .{ .interned = (try Value.fromInterned(ip_index).fieldValue(zcu, field_idx)).toIntern() }, + const ty = Type.fromInterned(pt.zcu.intern_pool.typeOf(ip_index)); + switch (ty.zigTypeTag(pt.zcu)) { + .Array, .Vector => return .{ .interned = (try Value.fromInterned(ip_index).elemValue(pt, field_idx)).toIntern() }, + .Struct, .Union => return .{ .interned = (try Value.fromInterned(ip_index).fieldValue(pt, field_idx)).toIntern() }, .Pointer => { - assert(ty.isSlice(zcu)); + assert(ty.isSlice(pt.zcu)); return switch (field_idx) { - Value.slice_ptr_index => .{ .interned = Value.fromInterned(ip_index).slicePtr(zcu).toIntern() }, - Value.slice_len_index => .{ .interned = switch (zcu.intern_pool.indexToKey(ip_index)) { - .undef => try zcu.intern(.{ .undef = .usize_type }), + Value.slice_ptr_index => .{ .interned = Value.fromInterned(ip_index).slicePtr(pt.zcu).toIntern() }, + Value.slice_len_index => .{ .interned = switch (pt.zcu.intern_pool.indexToKey(ip_index)) { + .undef => try pt.intern(.{ .undef = .usize_type }), .slice => |s| s.len, else => unreachable, } }, @@ -487,7 +483,7 @@ pub const MutableValue = union(enum) { Value.slice_len_index => s.len.*, else => unreachable, }, - .bytes => |b| .{ .interned = try zcu.intern(.{ .int = .{ + .bytes => |b| .{ .interned = try pt.intern(.{ .int = .{ .ty = .u8_type, .storage = .{ .u64 = b.data[field_idx] }, } }) }, diff --git a/src/print_air.zig b/src/print_air.zig index 85fbe87ec9d1..d85750bd2795 100644 --- a/src/print_air.zig +++ b/src/print_air.zig @@ -9,7 +9,7 @@ const Air = @import("Air.zig"); const Liveness = @import("Liveness.zig"); const InternPool = @import("InternPool.zig"); -pub fn write(stream: anytype, module: *Zcu, air: Air, liveness: ?Liveness) void { +pub fn write(stream: anytype, pt: Zcu.PerThread, air: Air, liveness: ?Liveness) void { const instruction_bytes = air.instructions.len * // Here we don't use @sizeOf(Air.Inst.Data) because it would include // the debug safety tag but we want to measure release size. @@ -42,8 +42,8 @@ pub fn write(stream: anytype, module: *Zcu, air: Air, liveness: ?Liveness) void // zig fmt: on var writer: Writer = .{ - .module = module, - .gpa = module.gpa, + .pt = pt, + .gpa = pt.zcu.gpa, .air = air, .liveness = liveness, .indent = 2, @@ -55,13 +55,13 @@ pub fn write(stream: anytype, module: *Zcu, air: Air, liveness: ?Liveness) void pub fn writeInst( stream: anytype, inst: Air.Inst.Index, - module: *Zcu, + pt: Zcu.PerThread, air: Air, liveness: ?Liveness, ) void { var writer: Writer = .{ - .module = module, - .gpa = module.gpa, + .pt = pt, + .gpa = pt.zcu.gpa, .air = air, .liveness = liveness, .indent = 2, @@ -70,16 +70,16 @@ pub fn writeInst( writer.writeInst(stream, inst) catch return; } -pub fn dump(module: *Zcu, air: Air, liveness: ?Liveness) void { - write(std.io.getStdErr().writer(), module, air, liveness); +pub fn dump(pt: Zcu.PerThread, air: Air, liveness: ?Liveness) void { + write(std.io.getStdErr().writer(), pt, air, liveness); } -pub fn dumpInst(inst: Air.Inst.Index, module: *Zcu, air: Air, liveness: ?Liveness) void { - writeInst(std.io.getStdErr().writer(), inst, module, air, liveness); +pub fn dumpInst(inst: Air.Inst.Index, pt: Zcu.PerThread, air: Air, liveness: ?Liveness) void { + writeInst(std.io.getStdErr().writer(), inst, pt, air, liveness); } const Writer = struct { - module: *Zcu, + pt: Zcu.PerThread, gpa: Allocator, air: Air, liveness: ?Liveness, @@ -345,7 +345,7 @@ const Writer = struct { } fn writeType(w: *Writer, s: anytype, ty: Type) !void { - return ty.print(s, w.module); + return ty.print(s, w.pt); } fn writeTy(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { @@ -424,7 +424,7 @@ const Writer = struct { } fn writeAggregateInit(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { - const mod = w.module; + const mod = w.pt.zcu; const ty_pl = w.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const vector_ty = ty_pl.ty.toType(); const len = @as(usize, @intCast(vector_ty.arrayLen(mod))); @@ -504,7 +504,7 @@ const Writer = struct { } fn writeSelect(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { - const mod = w.module; + const mod = w.pt.zcu; const pl_op = w.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; const extra = w.air.extraData(Air.Bin, pl_op.payload).data; @@ -947,11 +947,11 @@ const Writer = struct { if (@intFromEnum(operand) < InternPool.static_len) { return s.print("@{}", .{operand}); } else if (operand.toInterned()) |ip_index| { - const mod = w.module; - const ty = Type.fromInterned(mod.intern_pool.indexToKey(ip_index).typeOf()); + const pt = w.pt; + const ty = Type.fromInterned(pt.zcu.intern_pool.indexToKey(ip_index).typeOf()); try s.print("<{}, {}>", .{ - ty.fmt(mod), - Value.fromInterned(ip_index).fmtValue(mod, null), + ty.fmt(pt), + Value.fromInterned(ip_index).fmtValue(pt, null), }); } else { return w.writeInstIndex(s, operand.toIndex().?, dies); @@ -970,7 +970,7 @@ const Writer = struct { } fn typeOfIndex(w: *Writer, inst: Air.Inst.Index) Type { - const mod = w.module; + const mod = w.pt.zcu; return w.air.typeOfIndex(inst, &mod.intern_pool); } }; diff --git a/src/print_value.zig b/src/print_value.zig index 394f021049ac..19e70d0564b0 100644 --- a/src/print_value.zig +++ b/src/print_value.zig @@ -5,8 +5,6 @@ const std = @import("std"); const Type = @import("Type.zig"); const Value = @import("Value.zig"); const Zcu = @import("Zcu.zig"); -/// Deprecated. -const Module = Zcu; const Sema = @import("Sema.zig"); const InternPool = @import("InternPool.zig"); const Allocator = std.mem.Allocator; @@ -17,7 +15,7 @@ const max_string_len = 256; pub const FormatContext = struct { val: Value, - mod: *Module, + pt: Zcu.PerThread, opt_sema: ?*Sema, depth: u8, }; @@ -30,7 +28,7 @@ pub fn format( ) !void { _ = options; comptime std.debug.assert(fmt.len == 0); - return print(ctx.val, writer, ctx.depth, ctx.mod, ctx.opt_sema) catch |err| switch (err) { + return print(ctx.val, writer, ctx.depth, ctx.pt, ctx.opt_sema) catch |err| switch (err) { error.OutOfMemory => @panic("OOM"), // We're not allowed to return this from a format function error.ComptimeBreak, error.ComptimeReturn => unreachable, error.AnalysisFail => unreachable, // TODO: re-evaluate when we use `opt_sema` more fully @@ -42,10 +40,11 @@ pub fn print( val: Value, writer: anytype, level: u8, - mod: *Module, + pt: Zcu.PerThread, /// If this `Sema` is provided, we will recurse through pointers where possible to provide friendly output. opt_sema: ?*Sema, -) (@TypeOf(writer).Error || Module.CompileError)!void { +) (@TypeOf(writer).Error || Zcu.CompileError)!void { + const mod = pt.zcu; const ip = &mod.intern_pool; switch (ip.indexToKey(val.toIntern())) { .int_type, @@ -64,7 +63,7 @@ pub fn print( .func_type, .error_set_type, .inferred_error_set_type, - => try Type.print(val.toType(), writer, mod), + => try Type.print(val.toType(), writer, pt), .undef => try writer.writeAll("undefined"), .simple_value => |simple_value| switch (simple_value) { .void => try writer.writeAll("{}"), @@ -82,13 +81,13 @@ pub fn print( .int => |int| switch (int.storage) { inline .u64, .i64, .big_int => |x| try writer.print("{}", .{x}), .lazy_align => |ty| if (opt_sema != null) { - const a = (try Type.fromInterned(ty).abiAlignmentAdvanced(mod, .sema)).scalar; + const a = (try Type.fromInterned(ty).abiAlignmentAdvanced(pt, .sema)).scalar; try writer.print("{}", .{a.toByteUnits() orelse 0}); - } else try writer.print("@alignOf({})", .{Type.fromInterned(ty).fmt(mod)}), + } else try writer.print("@alignOf({})", .{Type.fromInterned(ty).fmt(pt)}), .lazy_size => |ty| if (opt_sema != null) { - const s = (try Type.fromInterned(ty).abiSizeAdvanced(mod, .sema)).scalar; + const s = (try Type.fromInterned(ty).abiSizeAdvanced(pt, .sema)).scalar; try writer.print("{}", .{s}); - } else try writer.print("@sizeOf({})", .{Type.fromInterned(ty).fmt(mod)}), + } else try writer.print("@sizeOf({})", .{Type.fromInterned(ty).fmt(pt)}), }, .err => |err| try writer.print("error.{}", .{ err.name.fmt(ip), @@ -97,7 +96,7 @@ pub fn print( .err_name => |err_name| try writer.print("error.{}", .{ err_name.fmt(ip), }), - .payload => |payload| try print(Value.fromInterned(payload), writer, level, mod, opt_sema), + .payload => |payload| try print(Value.fromInterned(payload), writer, level, pt, opt_sema), }, .enum_literal => |enum_literal| try writer.print(".{}", .{ enum_literal.fmt(ip), @@ -111,7 +110,7 @@ pub fn print( return writer.writeAll("@enumFromInt(...)"); } try writer.writeAll("@enumFromInt("); - try print(Value.fromInterned(enum_tag.int), writer, level - 1, mod, opt_sema); + try print(Value.fromInterned(enum_tag.int), writer, level - 1, pt, opt_sema); try writer.writeAll(")"); }, .empty_enum_value => try writer.writeAll("(empty enum value)"), @@ -128,12 +127,12 @@ pub fn print( // TODO: eventually we want to load the slice as an array with `opt_sema`, but that's // currently not possible without e.g. triggering compile errors. } - try printPtr(Value.fromInterned(slice.ptr), writer, level, mod, opt_sema); + try printPtr(Value.fromInterned(slice.ptr), writer, level, pt, opt_sema); try writer.writeAll("[0.."); if (level == 0) { try writer.writeAll("(...)"); } else { - try print(Value.fromInterned(slice.len), writer, level - 1, mod, opt_sema); + try print(Value.fromInterned(slice.len), writer, level - 1, pt, opt_sema); } try writer.writeAll("]"); }, @@ -147,28 +146,28 @@ pub fn print( // TODO: eventually we want to load the pointer with `opt_sema`, but that's // currently not possible without e.g. triggering compile errors. } - try printPtr(val, writer, level, mod, opt_sema); + try printPtr(val, writer, level, pt, opt_sema); }, .opt => |opt| switch (opt.val) { .none => try writer.writeAll("null"), - else => |payload| try print(Value.fromInterned(payload), writer, level, mod, opt_sema), + else => |payload| try print(Value.fromInterned(payload), writer, level, pt, opt_sema), }, - .aggregate => |aggregate| try printAggregate(val, aggregate, false, writer, level, mod, opt_sema), + .aggregate => |aggregate| try printAggregate(val, aggregate, false, writer, level, pt, opt_sema), .un => |un| { if (level == 0) { try writer.writeAll(".{ ... }"); return; } if (un.tag == .none) { - const backing_ty = try val.typeOf(mod).unionBackingType(mod); - try writer.print("@bitCast(@as({}, ", .{backing_ty.fmt(mod)}); - try print(Value.fromInterned(un.val), writer, level - 1, mod, opt_sema); + const backing_ty = try val.typeOf(mod).unionBackingType(pt); + try writer.print("@bitCast(@as({}, ", .{backing_ty.fmt(pt)}); + try print(Value.fromInterned(un.val), writer, level - 1, pt, opt_sema); try writer.writeAll("))"); } else { try writer.writeAll(".{ "); - try print(Value.fromInterned(un.tag), writer, level - 1, mod, opt_sema); + try print(Value.fromInterned(un.tag), writer, level - 1, pt, opt_sema); try writer.writeAll(" = "); - try print(Value.fromInterned(un.val), writer, level - 1, mod, opt_sema); + try print(Value.fromInterned(un.val), writer, level - 1, pt, opt_sema); try writer.writeAll(" }"); } }, @@ -182,13 +181,14 @@ fn printAggregate( is_ref: bool, writer: anytype, level: u8, - zcu: *Zcu, + pt: Zcu.PerThread, opt_sema: ?*Sema, -) (@TypeOf(writer).Error || Module.CompileError)!void { +) (@TypeOf(writer).Error || Zcu.CompileError)!void { if (level == 0) { if (is_ref) try writer.writeByte('&'); return writer.writeAll(".{ ... }"); } + const zcu = pt.zcu; const ip = &zcu.intern_pool; const ty = Type.fromInterned(aggregate.ty); switch (ty.zigTypeTag(zcu)) { @@ -203,7 +203,7 @@ fn printAggregate( if (i != 0) try writer.writeAll(", "); const field_name = ty.structFieldName(@intCast(i), zcu).unwrap().?; try writer.print(".{i} = ", .{field_name.fmt(ip)}); - try print(try val.fieldValue(zcu, i), writer, level - 1, zcu, opt_sema); + try print(try val.fieldValue(pt, i), writer, level - 1, pt, opt_sema); } try writer.writeAll(" }"); return; @@ -230,7 +230,7 @@ fn printAggregate( if (ty.childType(zcu).toIntern() != .u8_type) break :one_byte_str; const elem_val = Value.fromInterned(aggregate.storage.values()[0]); if (elem_val.isUndef(zcu)) break :one_byte_str; - const byte = elem_val.toUnsignedInt(zcu); + const byte = elem_val.toUnsignedInt(pt); try writer.print("\"{}\"", .{std.zig.fmtEscapes(&.{@intCast(byte)})}); if (!is_ref) try writer.writeAll(".*"); return; @@ -253,7 +253,7 @@ fn printAggregate( const max_len = @min(len, max_aggregate_items); for (0..max_len) |i| { if (i != 0) try writer.writeAll(", "); - try print(try val.fieldValue(zcu, i), writer, level - 1, zcu, opt_sema); + try print(try val.fieldValue(pt, i), writer, level - 1, pt, opt_sema); } if (len > max_aggregate_items) { try writer.writeAll(", ..."); @@ -261,8 +261,8 @@ fn printAggregate( return writer.writeAll(" }"); } -fn printPtr(ptr_val: Value, writer: anytype, level: u8, zcu: *Zcu, opt_sema: ?*Sema) (@TypeOf(writer).Error || Module.CompileError)!void { - const ptr = switch (zcu.intern_pool.indexToKey(ptr_val.toIntern())) { +fn printPtr(ptr_val: Value, writer: anytype, level: u8, pt: Zcu.PerThread, opt_sema: ?*Sema) (@TypeOf(writer).Error || Zcu.CompileError)!void { + const ptr = switch (pt.zcu.intern_pool.indexToKey(ptr_val.toIntern())) { .undef => return writer.writeAll("undefined"), .ptr => |ptr| ptr, else => unreachable, @@ -270,32 +270,33 @@ fn printPtr(ptr_val: Value, writer: anytype, level: u8, zcu: *Zcu, opt_sema: ?*S if (ptr.base_addr == .anon_decl) { // If the value is an aggregate, we can potentially print it more nicely. - switch (zcu.intern_pool.indexToKey(ptr.base_addr.anon_decl.val)) { + switch (pt.zcu.intern_pool.indexToKey(ptr.base_addr.anon_decl.val)) { .aggregate => |agg| return printAggregate( Value.fromInterned(ptr.base_addr.anon_decl.val), agg, true, writer, level, - zcu, + pt, opt_sema, ), else => {}, } } - var arena = std.heap.ArenaAllocator.init(zcu.gpa); + var arena = std.heap.ArenaAllocator.init(pt.zcu.gpa); defer arena.deinit(); - const derivation = try ptr_val.pointerDerivationAdvanced(arena.allocator(), zcu, opt_sema); - try printPtrDerivation(derivation, writer, level, zcu, opt_sema); + const derivation = try ptr_val.pointerDerivationAdvanced(arena.allocator(), pt, opt_sema); + try printPtrDerivation(derivation, writer, level, pt, opt_sema); } /// Print `derivation` as an lvalue, i.e. such that writing `&` before this gives the pointer value. -fn printPtrDerivation(derivation: Value.PointerDeriveStep, writer: anytype, level: u8, zcu: *Zcu, opt_sema: ?*Sema) (@TypeOf(writer).Error || Module.CompileError)!void { +fn printPtrDerivation(derivation: Value.PointerDeriveStep, writer: anytype, level: u8, pt: Zcu.PerThread, opt_sema: ?*Sema) (@TypeOf(writer).Error || Zcu.CompileError)!void { + const zcu = pt.zcu; const ip = &zcu.intern_pool; switch (derivation) { .int => |int| try writer.print("@as({}, @ptrFromInt({x})).*", .{ - int.ptr_ty.fmt(zcu), + int.ptr_ty.fmt(pt), int.addr, }), .decl_ptr => |decl| { @@ -303,33 +304,33 @@ fn printPtrDerivation(derivation: Value.PointerDeriveStep, writer: anytype, leve }, .anon_decl_ptr => |anon| { const ty = Value.fromInterned(anon.val).typeOf(zcu); - try writer.print("@as({}, ", .{ty.fmt(zcu)}); - try print(Value.fromInterned(anon.val), writer, level - 1, zcu, opt_sema); + try writer.print("@as({}, ", .{ty.fmt(pt)}); + try print(Value.fromInterned(anon.val), writer, level - 1, pt, opt_sema); try writer.writeByte(')'); }, .comptime_alloc_ptr => |info| { - try writer.print("@as({}, ", .{info.val.typeOf(zcu).fmt(zcu)}); - try print(info.val, writer, level - 1, zcu, opt_sema); + try writer.print("@as({}, ", .{info.val.typeOf(zcu).fmt(pt)}); + try print(info.val, writer, level - 1, pt, opt_sema); try writer.writeByte(')'); }, .comptime_field_ptr => |val| { const ty = val.typeOf(zcu); - try writer.print("@as({}, ", .{ty.fmt(zcu)}); - try print(val, writer, level - 1, zcu, opt_sema); + try writer.print("@as({}, ", .{ty.fmt(pt)}); + try print(val, writer, level - 1, pt, opt_sema); try writer.writeByte(')'); }, .eu_payload_ptr => |info| { try writer.writeByte('('); - try printPtrDerivation(info.parent.*, writer, level, zcu, opt_sema); + try printPtrDerivation(info.parent.*, writer, level, pt, opt_sema); try writer.writeAll(" catch unreachable)"); }, .opt_payload_ptr => |info| { - try printPtrDerivation(info.parent.*, writer, level, zcu, opt_sema); + try printPtrDerivation(info.parent.*, writer, level, pt, opt_sema); try writer.writeAll(".?"); }, .field_ptr => |field| { - try printPtrDerivation(field.parent.*, writer, level, zcu, opt_sema); - const agg_ty = (try field.parent.ptrType(zcu)).childType(zcu); + try printPtrDerivation(field.parent.*, writer, level, pt, opt_sema); + const agg_ty = (try field.parent.ptrType(pt)).childType(zcu); switch (agg_ty.zigTypeTag(zcu)) { .Struct => if (agg_ty.structFieldName(field.field_idx, zcu).unwrap()) |field_name| { try writer.print(".{i}", .{field_name.fmt(ip)}); @@ -350,16 +351,16 @@ fn printPtrDerivation(derivation: Value.PointerDeriveStep, writer: anytype, leve } }, .elem_ptr => |elem| { - try printPtrDerivation(elem.parent.*, writer, level, zcu, opt_sema); + try printPtrDerivation(elem.parent.*, writer, level, pt, opt_sema); try writer.print("[{d}]", .{elem.elem_idx}); }, .offset_and_cast => |oac| if (oac.byte_offset == 0) { - try writer.print("@as({}, @ptrCast(", .{oac.new_ptr_ty.fmt(zcu)}); - try printPtrDerivation(oac.parent.*, writer, level, zcu, opt_sema); + try writer.print("@as({}, @ptrCast(", .{oac.new_ptr_ty.fmt(pt)}); + try printPtrDerivation(oac.parent.*, writer, level, pt, opt_sema); try writer.writeAll("))"); } else { - try writer.print("@as({}, @ptrFromInt(@intFromPtr(", .{oac.new_ptr_ty.fmt(zcu)}); - try printPtrDerivation(oac.parent.*, writer, level, zcu, opt_sema); + try writer.print("@as({}, @ptrFromInt(@intFromPtr(", .{oac.new_ptr_ty.fmt(pt)}); + try printPtrDerivation(oac.parent.*, writer, level, pt, opt_sema); try writer.print(") + {d}))", .{oac.byte_offset}); }, } diff --git a/src/print_zir.zig b/src/print_zir.zig index d064f02a8b30..2fee5f5d835a 100644 --- a/src/print_zir.zig +++ b/src/print_zir.zig @@ -7,13 +7,12 @@ const InternPool = @import("InternPool.zig"); const Zir = std.zig.Zir; const Zcu = @import("Zcu.zig"); -const Module = Zcu; const LazySrcLoc = Zcu.LazySrcLoc; /// Write human-readable, debug formatted ZIR code to a file. pub fn renderAsTextToFile( gpa: Allocator, - scope_file: *Module.File, + scope_file: *Zcu.File, fs_file: std.fs.File, ) !void { var arena = std.heap.ArenaAllocator.init(gpa); @@ -64,7 +63,7 @@ pub fn renderInstructionContext( gpa: Allocator, block: []const Zir.Inst.Index, block_index: usize, - scope_file: *Module.File, + scope_file: *Zcu.File, parent_decl_node: Ast.Node.Index, indent: u32, stream: anytype, @@ -96,7 +95,7 @@ pub fn renderInstructionContext( pub fn renderSingleInstruction( gpa: Allocator, inst: Zir.Inst.Index, - scope_file: *Module.File, + scope_file: *Zcu.File, parent_decl_node: Ast.Node.Index, indent: u32, stream: anytype, @@ -122,7 +121,7 @@ pub fn renderSingleInstruction( const Writer = struct { gpa: Allocator, arena: Allocator, - file: *Module.File, + file: *Zcu.File, code: Zir, indent: u32, parent_decl_node: Ast.Node.Index, diff --git a/src/register_manager.zig b/src/register_manager.zig index fb9afbbc0109..3e75cb152de2 100644 --- a/src/register_manager.zig +++ b/src/register_manager.zig @@ -7,8 +7,6 @@ const Air = @import("Air.zig"); const StaticBitSet = std.bit_set.StaticBitSet; const Type = @import("Type.zig"); const Zcu = @import("Zcu.zig"); -/// Deprecated. -const Module = Zcu; const expect = std.testing.expect; const expectEqual = std.testing.expectEqual; const expectEqualSlices = std.testing.expectEqualSlices; From ca02266157ee72e41068672c8ca6f928fcbf6fdf Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Sat, 15 Jun 2024 19:57:47 -0400 Subject: [PATCH 062/152] Zcu: pass `PerThread` to intern pool string functions --- src/Compilation.zig | 89 +++-- src/InternPool.zig | 21 +- src/Sema.zig | 262 +++++++------ src/Value.zig | 4 +- src/Zcu.zig | 687 +-------------------------------- src/Zcu/PerThread.zig | 725 ++++++++++++++++++++++++++++++++++- src/arch/wasm/CodeGen.zig | 10 +- src/codegen.zig | 2 +- src/codegen/llvm.zig | 30 +- src/codegen/spirv.zig | 8 +- src/link.zig | 10 +- src/link/C.zig | 4 +- src/link/Coff.zig | 10 +- src/link/Dwarf.zig | 2 +- src/link/Elf.zig | 6 +- src/link/Elf/ZigObject.zig | 16 +- src/link/MachO.zig | 6 +- src/link/MachO/ZigObject.zig | 17 +- src/link/Plan9.zig | 14 +- src/link/Wasm.zig | 11 +- src/link/Wasm/ZigObject.zig | 52 +-- src/mutable_value.zig | 2 +- 22 files changed, 1025 insertions(+), 963 deletions(-) diff --git a/src/Compilation.zig b/src/Compilation.zig index d3ff33808020..1f4c425bc53b 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -29,8 +29,6 @@ const wasi_libc = @import("wasi_libc.zig"); const fatal = @import("main.zig").fatal; const clangMain = @import("main.zig").clangMain; const Zcu = @import("Zcu.zig"); -/// Deprecated; use `Zcu`. -const Module = Zcu; const Sema = @import("Sema.zig"); const InternPool = @import("InternPool.zig"); const Cache = std.Build.Cache; @@ -50,7 +48,7 @@ gpa: Allocator, arena: Allocator, /// Not every Compilation compiles .zig code! For example you could do `zig build-exe foo.o`. /// TODO: rename to zcu: ?*Zcu -module: ?*Module, +module: ?*Zcu, /// Contains different state depending on whether the Compilation uses /// incremental or whole cache mode. cache_use: CacheUse, @@ -120,7 +118,7 @@ astgen_work_queue: std.fifo.LinearFifo(Zcu.File.Index, .Dynamic), /// These jobs are to inspect the file system stat() and if the embedded file has changed /// on disk, mark the corresponding Decl outdated and queue up an `analyze_decl` /// task for it. -embed_file_work_queue: std.fifo.LinearFifo(*Module.EmbedFile, .Dynamic), +embed_file_work_queue: std.fifo.LinearFifo(*Zcu.EmbedFile, .Dynamic), /// The ErrorMsg memory is owned by the `CObject`, using Compilation's general purpose allocator. /// This data is accessed by multiple threads and is protected by `mutex`. @@ -252,7 +250,7 @@ pub const Emit = struct { }; pub const default_stack_protector_buffer_size = target_util.default_stack_protector_buffer_size; -pub const SemaError = Module.SemaError; +pub const SemaError = Zcu.SemaError; pub const CRTFile = struct { lock: Cache.Lock, @@ -1138,7 +1136,7 @@ pub const CreateOptions = struct { pdb_source_path: ?[]const u8 = null, /// (Windows) PDB output path pdb_out_path: ?[]const u8 = null, - error_limit: ?Compilation.Module.ErrorInt = null, + error_limit: ?Zcu.ErrorInt = null, global_cc_argv: []const []const u8 = &.{}, pub const Entry = link.File.OpenOptions.Entry; @@ -1344,7 +1342,7 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil const main_mod = options.main_mod orelse options.root_mod; const comp = try arena.create(Compilation); - const opt_zcu: ?*Module = if (have_zcu) blk: { + const opt_zcu: ?*Zcu = if (have_zcu) blk: { // Pre-open the directory handles for cached ZIR code so that it does not need // to redundantly happen for each AstGen operation. const zir_sub_dir = "z"; @@ -1362,8 +1360,8 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil .path = try options.global_cache_directory.join(arena, &[_][]const u8{zir_sub_dir}), }; - const emit_h: ?*Module.GlobalEmitH = if (options.emit_h) |loc| eh: { - const eh = try arena.create(Module.GlobalEmitH); + const emit_h: ?*Zcu.GlobalEmitH = if (options.emit_h) |loc| eh: { + const eh = try arena.create(Zcu.GlobalEmitH); eh.* = .{ .loc = loc }; break :eh eh; } else null; @@ -1386,7 +1384,7 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil .builtin_modules = null, // `builtin_mod` is set }); - const zcu = try arena.create(Module); + const zcu = try arena.create(Zcu); zcu.* = .{ .gpa = gpa, .comp = comp, @@ -1434,7 +1432,7 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil .c_object_work_queue = std.fifo.LinearFifo(*CObject, .Dynamic).init(gpa), .win32_resource_work_queue = if (build_options.only_core_functionality) {} else std.fifo.LinearFifo(*Win32Resource, .Dynamic).init(gpa), .astgen_work_queue = std.fifo.LinearFifo(Zcu.File.Index, .Dynamic).init(gpa), - .embed_file_work_queue = std.fifo.LinearFifo(*Module.EmbedFile, .Dynamic).init(gpa), + .embed_file_work_queue = std.fifo.LinearFifo(*Zcu.EmbedFile, .Dynamic).init(gpa), .c_source_files = options.c_source_files, .rc_source_files = options.rc_source_files, .cache_parent = cache, @@ -2626,7 +2624,7 @@ fn reportMultiModuleErrors(zcu: *Zcu) !void { var num_errors: u32 = 0; const max_errors = 5; // Attach the "some omitted" note to the final error message - var last_err: ?*Module.ErrorMsg = null; + var last_err: ?*Zcu.ErrorMsg = null; for (zcu.import_table.values(), 0..) |file, file_index_usize| { if (!file.multi_pkg) continue; @@ -2642,13 +2640,13 @@ fn reportMultiModuleErrors(zcu: *Zcu) !void { const omitted = file.references.items.len -| max_notes; const num_notes = file.references.items.len - omitted; - const notes = try gpa.alloc(Module.ErrorMsg, if (omitted > 0) num_notes + 1 else num_notes); + const notes = try gpa.alloc(Zcu.ErrorMsg, if (omitted > 0) num_notes + 1 else num_notes); errdefer gpa.free(notes); for (notes[0..num_notes], file.references.items[0..num_notes], 0..) |*note, ref, i| { errdefer for (notes[0..i]) |*n| n.deinit(gpa); note.* = switch (ref) { - .import => |import| try Module.ErrorMsg.init( + .import => |import| try Zcu.ErrorMsg.init( gpa, .{ .base_node_inst = try ip.trackZir(gpa, import.file, .main_struct_inst), @@ -2657,7 +2655,7 @@ fn reportMultiModuleErrors(zcu: *Zcu) !void { "imported from module {s}", .{zcu.fileByIndex(import.file).mod.fully_qualified_name}, ), - .root => |pkg| try Module.ErrorMsg.init( + .root => |pkg| try Zcu.ErrorMsg.init( gpa, .{ .base_node_inst = try ip.trackZir(gpa, file_index, .main_struct_inst), @@ -2671,7 +2669,7 @@ fn reportMultiModuleErrors(zcu: *Zcu) !void { errdefer for (notes[0..num_notes]) |*n| n.deinit(gpa); if (omitted > 0) { - notes[num_notes] = try Module.ErrorMsg.init( + notes[num_notes] = try Zcu.ErrorMsg.init( gpa, .{ .base_node_inst = try ip.trackZir(gpa, file_index, .main_struct_inst), @@ -2683,7 +2681,7 @@ fn reportMultiModuleErrors(zcu: *Zcu) !void { } errdefer if (omitted > 0) notes[num_notes].deinit(gpa); - const err = try Module.ErrorMsg.create( + const err = try Zcu.ErrorMsg.create( gpa, .{ .base_node_inst = try ip.trackZir(gpa, file_index, .main_struct_inst), @@ -2706,7 +2704,7 @@ fn reportMultiModuleErrors(zcu: *Zcu) !void { // There isn't really any meaningful place to put this note, so just attach it to the // last failed file - var note = try Module.ErrorMsg.init( + var note = try Zcu.ErrorMsg.init( gpa, err.src_loc, "{} more errors omitted", @@ -3095,10 +3093,10 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle { const values = zcu.compile_log_sources.values(); // First one will be the error; subsequent ones will be notes. const src_loc = values[0].src(); - const err_msg: Module.ErrorMsg = .{ + const err_msg: Zcu.ErrorMsg = .{ .src_loc = src_loc, .msg = "found compile log statement", - .notes = try gpa.alloc(Module.ErrorMsg, zcu.compile_log_sources.count() - 1), + .notes = try gpa.alloc(Zcu.ErrorMsg, zcu.compile_log_sources.count() - 1), }; defer gpa.free(err_msg.notes); @@ -3166,9 +3164,9 @@ pub const ErrorNoteHashContext = struct { }; pub fn addModuleErrorMsg( - mod: *Module, + mod: *Zcu, eb: *ErrorBundle.Wip, - module_err_msg: Module.ErrorMsg, + module_err_msg: Zcu.ErrorMsg, all_references: *const std.AutoHashMapUnmanaged(InternPool.AnalUnit, Zcu.ResolvedReference), ) !void { const gpa = eb.gpa; @@ -3299,7 +3297,7 @@ pub fn addModuleErrorMsg( } } -pub fn addZirErrorMessages(eb: *ErrorBundle.Wip, file: *Module.File) !void { +pub fn addZirErrorMessages(eb: *ErrorBundle.Wip, file: *Zcu.File) !void { assert(file.zir_loaded); assert(file.tree_loaded); assert(file.source_loaded); @@ -3378,7 +3376,7 @@ pub fn performAllTheWork( const path_digest = zcu.filePathDigest(file_index); const root_decl = zcu.fileRootDecl(file_index); const file = zcu.fileByIndex(file_index); - comp.thread_pool.spawnWg(&comp.astgen_wait_group, workerAstGenFile, .{ + comp.thread_pool.spawnWgId(&comp.astgen_wait_group, workerAstGenFile, .{ comp, file, file_index, path_digest, root_decl, zir_prog_node, &comp.astgen_wait_group, .root, }); } @@ -3587,22 +3585,22 @@ fn processOneJob(tid: usize, comp: *Compilation, job: Job, prog_node: std.Progre defer named_frame.end(); const gpa = comp.gpa; - const zcu = comp.module.?; - const decl = zcu.declPtr(decl_index); + const pt: Zcu.PerThread = .{ .zcu = comp.module.?, .tid = @enumFromInt(tid) }; + const decl = pt.zcu.declPtr(decl_index); const lf = comp.bin_file.?; - lf.updateDeclLineNumber(zcu, decl_index) catch |err| { - try zcu.failed_analysis.ensureUnusedCapacity(gpa, 1); - zcu.failed_analysis.putAssumeCapacityNoClobber( + lf.updateDeclLineNumber(pt, decl_index) catch |err| { + try pt.zcu.failed_analysis.ensureUnusedCapacity(gpa, 1); + pt.zcu.failed_analysis.putAssumeCapacityNoClobber( InternPool.AnalUnit.wrap(.{ .decl = decl_index }), try Zcu.ErrorMsg.create( gpa, - decl.navSrcLoc(zcu), + decl.navSrcLoc(pt.zcu), "unable to update line number: {s}", .{@errorName(err)}, ), ); decl.analysis = .codegen_failure; - try zcu.retryable_failures.append(gpa, InternPool.AnalUnit.wrap(.{ .decl = decl_index })); + try pt.zcu.retryable_failures.append(gpa, InternPool.AnalUnit.wrap(.{ .decl = decl_index })); }; }, .analyze_mod => |mod| { @@ -4049,6 +4047,7 @@ const AstGenSrc = union(enum) { }; fn workerAstGenFile( + tid: usize, comp: *Compilation, file: *Zcu.File, file_index: Zcu.File.Index, @@ -4061,8 +4060,8 @@ fn workerAstGenFile( const child_prog_node = prog_node.start(file.sub_file_path, 0); defer child_prog_node.end(); - const zcu = comp.module.?; - zcu.astGenFile(file, file_index, path_digest, root_decl) catch |err| switch (err) { + const pt: Zcu.PerThread = .{ .zcu = comp.module.?, .tid = @enumFromInt(tid) }; + pt.astGenFile(file, file_index, path_digest, root_decl) catch |err| switch (err) { error.AnalysisFail => return, else => { file.status = .retryable_failure; @@ -4097,15 +4096,15 @@ fn workerAstGenFile( comp.mutex.lock(); defer comp.mutex.unlock(); - const res = zcu.importFile(file, import_path) catch continue; + const res = pt.zcu.importFile(file, import_path) catch continue; if (!res.is_pkg) { - res.file.addReference(zcu.*, .{ .import = .{ + res.file.addReference(pt.zcu.*, .{ .import = .{ .file = file_index, .token = item.data.token, } }) catch continue; } - const imported_path_digest = zcu.filePathDigest(res.file_index); - const imported_root_decl = zcu.fileRootDecl(res.file_index); + const imported_path_digest = pt.zcu.filePathDigest(res.file_index); + const imported_root_decl = pt.zcu.fileRootDecl(res.file_index); break :blk .{ res, imported_path_digest, imported_root_decl }; }; if (import_result.is_new) { @@ -4116,7 +4115,7 @@ fn workerAstGenFile( .importing_file = file_index, .import_tok = item.data.token, } }; - comp.thread_pool.spawnWg(wg, workerAstGenFile, .{ + comp.thread_pool.spawnWgId(wg, workerAstGenFile, .{ comp, import_result.file, import_result.file_index, imported_path_digest, imported_root_decl, prog_node, wg, sub_src, }); } @@ -4127,7 +4126,7 @@ fn workerAstGenFile( fn workerUpdateBuiltinZigFile( comp: *Compilation, mod: *Package.Module, - file: *Module.File, + file: *Zcu.File, ) void { Builtin.populateFile(comp, mod, file) catch |err| { comp.mutex.lock(); @@ -4139,7 +4138,7 @@ fn workerUpdateBuiltinZigFile( }; } -fn workerCheckEmbedFile(comp: *Compilation, embed_file: *Module.EmbedFile) void { +fn workerCheckEmbedFile(comp: *Compilation, embed_file: *Zcu.EmbedFile) void { comp.detectEmbedFileUpdate(embed_file) catch |err| { comp.reportRetryableEmbedFileError(embed_file, err) catch |oom| switch (oom) { // Swallowing this error is OK because it's implied to be OOM when @@ -4150,7 +4149,7 @@ fn workerCheckEmbedFile(comp: *Compilation, embed_file: *Module.EmbedFile) void }; } -fn detectEmbedFileUpdate(comp: *Compilation, embed_file: *Module.EmbedFile) !void { +fn detectEmbedFileUpdate(comp: *Compilation, embed_file: *Zcu.EmbedFile) !void { const mod = comp.module.?; const ip = &mod.intern_pool; var file = try embed_file.owner.root.openFile(embed_file.sub_file_path.toSlice(ip), .{}); @@ -4477,7 +4476,7 @@ fn reportRetryableAstGenError( const file = zcu.fileByIndex(file_index); file.status = .retryable_failure; - const src_loc: Module.LazySrcLoc = switch (src) { + const src_loc: Zcu.LazySrcLoc = switch (src) { .root => .{ .base_node_inst = try zcu.intern_pool.trackZir(gpa, file_index, .main_struct_inst), .offset = .entire_file, @@ -4488,7 +4487,7 @@ fn reportRetryableAstGenError( }, }; - const err_msg = try Module.ErrorMsg.create(gpa, src_loc, "unable to load '{}{s}': {s}", .{ + const err_msg = try Zcu.ErrorMsg.create(gpa, src_loc, "unable to load '{}{s}': {s}", .{ file.mod.root, file.sub_file_path, @errorName(err), }); errdefer err_msg.destroy(gpa); @@ -4502,14 +4501,14 @@ fn reportRetryableAstGenError( fn reportRetryableEmbedFileError( comp: *Compilation, - embed_file: *Module.EmbedFile, + embed_file: *Zcu.EmbedFile, err: anyerror, ) error{OutOfMemory}!void { const mod = comp.module.?; const gpa = mod.gpa; const src_loc = embed_file.src_loc; const ip = &mod.intern_pool; - const err_msg = try Module.ErrorMsg.create(gpa, src_loc, "unable to load '{}{s}': {s}", .{ + const err_msg = try Zcu.ErrorMsg.create(gpa, src_loc, "unable to load '{}{s}': {s}", .{ embed_file.owner.root, embed_file.sub_file_path.toSlice(ip), @errorName(err), diff --git a/src/InternPool.zig b/src/InternPool.zig index 133874318228..97fd35bf201b 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -4539,7 +4539,7 @@ pub fn init(ip: *InternPool, gpa: Allocator) !void { assert(ip.items.len == 0); // Reserve string index 0 for an empty string. - assert((try ip.getOrPutString(gpa, "", .no_embedded_nulls)) == .empty); + assert((try ip.getOrPutString(gpa, .main, "", .no_embedded_nulls)) == .empty); // So that we can use `catch unreachable` below. try ip.items.ensureUnusedCapacity(gpa, static_keys.len); @@ -5986,6 +5986,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All ); const string = try ip.getOrPutTrailingString( gpa, + tid, @intCast(len_including_sentinel), .maybe_embedded_nulls, ); @@ -6865,6 +6866,7 @@ pub fn getFuncInstance( return finishFuncInstance( ip, gpa, + tid, generic_owner, func_index, func_extra_index, @@ -6879,7 +6881,7 @@ pub fn getFuncInstance( pub fn getFuncInstanceIes( ip: *InternPool, gpa: Allocator, - _: Zcu.PerThread.Id, + tid: Zcu.PerThread.Id, arg: GetFuncInstanceKey, ) Allocator.Error!Index { // Validate input parameters. @@ -6994,6 +6996,7 @@ pub fn getFuncInstanceIes( return finishFuncInstance( ip, gpa, + tid, generic_owner, func_index, func_extra_index, @@ -7005,6 +7008,7 @@ pub fn getFuncInstanceIes( fn finishFuncInstance( ip: *InternPool, gpa: Allocator, + tid: Zcu.PerThread.Id, generic_owner: Index, func_index: Index, func_extra_index: u32, @@ -7036,7 +7040,7 @@ fn finishFuncInstance( // TODO: improve this name const decl = ip.declPtr(decl_index); - decl.name = try ip.getOrPutStringFmt(gpa, "{}__anon_{d}", .{ + decl.name = try ip.getOrPutStringFmt(gpa, tid, "{}__anon_{d}", .{ fn_owner_decl.name.fmt(ip), @intFromEnum(decl_index), }, .no_embedded_nulls); @@ -8782,18 +8786,20 @@ const EmbeddedNulls = enum { pub fn getOrPutString( ip: *InternPool, gpa: Allocator, + tid: Zcu.PerThread.Id, slice: []const u8, comptime embedded_nulls: EmbeddedNulls, ) Allocator.Error!embedded_nulls.StringType() { try ip.string_bytes.ensureUnusedCapacity(gpa, slice.len + 1); ip.string_bytes.appendSliceAssumeCapacity(slice); ip.string_bytes.appendAssumeCapacity(0); - return ip.getOrPutTrailingString(gpa, slice.len + 1, embedded_nulls); + return ip.getOrPutTrailingString(gpa, tid, slice.len + 1, embedded_nulls); } pub fn getOrPutStringFmt( ip: *InternPool, gpa: Allocator, + tid: Zcu.PerThread.Id, comptime format: []const u8, args: anytype, comptime embedded_nulls: EmbeddedNulls, @@ -8803,16 +8809,17 @@ pub fn getOrPutStringFmt( try ip.string_bytes.ensureUnusedCapacity(gpa, len); ip.string_bytes.writer(undefined).print(format, args) catch unreachable; ip.string_bytes.appendAssumeCapacity(0); - return ip.getOrPutTrailingString(gpa, len, embedded_nulls); + return ip.getOrPutTrailingString(gpa, tid, len, embedded_nulls); } pub fn getOrPutStringOpt( ip: *InternPool, gpa: Allocator, + tid: Zcu.PerThread.Id, slice: ?[]const u8, comptime embedded_nulls: EmbeddedNulls, ) Allocator.Error!embedded_nulls.OptionalStringType() { - const string = try getOrPutString(ip, gpa, slice orelse return .none, embedded_nulls); + const string = try getOrPutString(ip, gpa, tid, slice orelse return .none, embedded_nulls); return string.toOptional(); } @@ -8820,9 +8827,11 @@ pub fn getOrPutStringOpt( pub fn getOrPutTrailingString( ip: *InternPool, gpa: Allocator, + tid: Zcu.PerThread.Id, len: usize, comptime embedded_nulls: EmbeddedNulls, ) Allocator.Error!embedded_nulls.StringType() { + _ = tid; const string_bytes = &ip.string_bytes; const str_index: u32 = @intCast(string_bytes.items.len - len); if (len > 0 and string_bytes.getLast() == 0) { diff --git a/src/Sema.zig b/src/Sema.zig index dd8d2712ed70..ee4ac3b70319 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -2093,12 +2093,12 @@ pub fn setupErrorReturnTrace(sema: *Sema, block: *Block, last_arg_index: usize) const st_ptr = try err_trace_block.addTy(.alloc, try pt.singleMutPtrType(stack_trace_ty)); // st.instruction_addresses = &addrs; - const instruction_addresses_field_name = try ip.getOrPutString(gpa, "instruction_addresses", .no_embedded_nulls); + const instruction_addresses_field_name = try ip.getOrPutString(gpa, pt.tid, "instruction_addresses", .no_embedded_nulls); const addr_field_ptr = try sema.fieldPtr(&err_trace_block, src, st_ptr, instruction_addresses_field_name, src, true); try sema.storePtr2(&err_trace_block, src, addr_field_ptr, src, addrs_ptr, src, .store); // st.index = 0; - const index_field_name = try ip.getOrPutString(gpa, "index", .no_embedded_nulls); + const index_field_name = try ip.getOrPutString(gpa, pt.tid, "index", .no_embedded_nulls); const index_field_ptr = try sema.fieldPtr(&err_trace_block, src, st_ptr, index_field_name, src, true); try sema.storePtr2(&err_trace_block, src, index_field_ptr, src, .zero_usize, src, .store); @@ -2691,6 +2691,7 @@ fn getCaptures(sema: *Sema, block: *Block, type_src: LazySrcLoc, extra_index: us .decl_val => |str| capture: { const decl_name = try ip.getOrPutString( sema.gpa, + pt.tid, sema.code.nullTerminatedString(str), .no_embedded_nulls, ); @@ -2700,6 +2701,7 @@ fn getCaptures(sema: *Sema, block: *Block, type_src: LazySrcLoc, extra_index: us .decl_ref => |str| capture: { const decl_name = try ip.getOrPutString( sema.gpa, + pt.tid, sema.code.nullTerminatedString(str), .no_embedded_nulls, ); @@ -2847,7 +2849,7 @@ fn zirStructDecl( if (new_namespace_index.unwrap()) |ns| { const decls = sema.code.bodySlice(extra_index, decls_len); - try mod.scanNamespace(ns, decls, mod.declPtr(new_decl_index)); + try pt.scanNamespace(ns, decls, mod.declPtr(new_decl_index)); } try pt.finalizeAnonDecl(new_decl_index); @@ -2919,7 +2921,7 @@ fn createAnonymousDeclTypeNamed( }; try writer.writeByte(')'); - const name = try ip.getOrPutString(gpa, buf.items, .no_embedded_nulls); + const name = try ip.getOrPutString(gpa, pt.tid, buf.items, .no_embedded_nulls); try zcu.initNewAnonDecl(new_decl_index, val, name); return new_decl_index; }, @@ -2931,7 +2933,7 @@ fn createAnonymousDeclTypeNamed( .dbg_var_ptr, .dbg_var_val => { if (zir_data[i].str_op.operand != ref) continue; - const name = try ip.getOrPutStringFmt(gpa, "{}.{s}", .{ + const name = try ip.getOrPutStringFmt(gpa, pt.tid, "{}.{s}", .{ block.type_name_ctx.fmt(ip), zir_data[i].str_op.getStr(sema.code), }, .no_embedded_nulls); try zcu.initNewAnonDecl(new_decl_index, val, name); @@ -2952,7 +2954,7 @@ fn createAnonymousDeclTypeNamed( // This name is also used as the key in the parent namespace so it cannot be // renamed. - const name = ip.getOrPutStringFmt(gpa, "{}__{s}_{d}", .{ + const name = ip.getOrPutStringFmt(gpa, pt.tid, "{}__{s}_{d}", .{ block.type_name_ctx.fmt(ip), anon_prefix, @intFromEnum(new_decl_index), }, .no_embedded_nulls) catch unreachable; try zcu.initNewAnonDecl(new_decl_index, val, name); @@ -3084,7 +3086,7 @@ fn zirEnumDecl( errdefer if (!done) if (new_namespace_index.unwrap()) |ns| mod.destroyNamespace(ns); if (new_namespace_index.unwrap()) |ns| { - try mod.scanNamespace(ns, decls, new_decl); + try pt.scanNamespace(ns, decls, new_decl); } // We've finished the initial construction of this type, and are about to perform analysis. @@ -3169,7 +3171,7 @@ fn zirEnumDecl( const field_name_zir = sema.code.nullTerminatedString(field_name_index); extra_index += 2; // field name, doc comment - const field_name = try mod.intern_pool.getOrPutString(gpa, field_name_zir, .no_embedded_nulls); + const field_name = try mod.intern_pool.getOrPutString(gpa, pt.tid, field_name_zir, .no_embedded_nulls); const value_src: LazySrcLoc = .{ .base_node_inst = tracked_inst, @@ -3352,7 +3354,7 @@ fn zirUnionDecl( if (new_namespace_index.unwrap()) |ns| { const decls = sema.code.bodySlice(extra_index, decls_len); - try mod.scanNamespace(ns, decls, mod.declPtr(new_decl_index)); + try pt.scanNamespace(ns, decls, mod.declPtr(new_decl_index)); } try pt.finalizeAnonDecl(new_decl_index); @@ -3441,7 +3443,7 @@ fn zirOpaqueDecl( if (new_namespace_index.unwrap()) |ns| { const decls = sema.code.bodySlice(extra_index, decls_len); - try mod.scanNamespace(ns, decls, mod.declPtr(new_decl_index)); + try pt.scanNamespace(ns, decls, mod.declPtr(new_decl_index)); } try pt.finalizeAnonDecl(new_decl_index); @@ -3470,7 +3472,7 @@ fn zirErrorSetDecl( while (extra_index < extra_index_end) : (extra_index += 2) { // +2 to skip over doc_string const name_index: Zir.NullTerminatedString = @enumFromInt(sema.code.extra[extra_index]); const name = sema.code.nullTerminatedString(name_index); - const name_ip = try mod.intern_pool.getOrPutString(gpa, name, .no_embedded_nulls); + const name_ip = try mod.intern_pool.getOrPutString(gpa, pt.tid, name, .no_embedded_nulls); _ = try mod.getErrorValue(name_ip); const result = names.getOrPutAssumeCapacity(name_ip); assert(!result.found_existing); // verified in AstGen @@ -3634,7 +3636,7 @@ fn indexablePtrLen( const is_pointer_to = object_ty.isSinglePointer(mod); const indexable_ty = if (is_pointer_to) object_ty.childType(mod) else object_ty; try checkIndexable(sema, block, src, indexable_ty); - const field_name = try mod.intern_pool.getOrPutString(sema.gpa, "len", .no_embedded_nulls); + const field_name = try mod.intern_pool.getOrPutString(sema.gpa, pt.tid, "len", .no_embedded_nulls); return sema.fieldVal(block, src, object, field_name, src); } @@ -3649,7 +3651,7 @@ fn indexablePtrLenOrNone( const operand_ty = sema.typeOf(operand); try checkMemOperand(sema, block, src, operand_ty); if (operand_ty.ptrSize(mod) == .Many) return .none; - const field_name = try mod.intern_pool.getOrPutString(sema.gpa, "len", .no_embedded_nulls); + const field_name = try mod.intern_pool.getOrPutString(sema.gpa, pt.tid, "len", .no_embedded_nulls); return sema.fieldVal(block, src, operand, field_name, src); } @@ -4405,7 +4407,7 @@ fn zirForLen(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. } if (!object_ty.indexableHasLen(mod)) continue; - break :l try sema.fieldVal(block, arg_src, object, try ip.getOrPutString(gpa, "len", .no_embedded_nulls), arg_src); + break :l try sema.fieldVal(block, arg_src, object, try ip.getOrPutString(gpa, pt.tid, "len", .no_embedded_nulls), arg_src); }; const arg_len = try sema.coerce(block, Type.usize, arg_len_uncoerced, arg_src); if (len == .none) { @@ -4797,6 +4799,7 @@ fn validateUnionInit( const field_ptr_extra = sema.code.extraData(Zir.Inst.Field, field_ptr_data.payload_index).data; const field_name = try mod.intern_pool.getOrPutString( gpa, + pt.tid, sema.code.nullTerminatedString(field_ptr_extra.field_name_start), .no_embedded_nulls, ); @@ -4942,6 +4945,7 @@ fn validateStructInit( struct_ptr_zir_ref = field_ptr_extra.lhs; const field_name = try ip.getOrPutString( gpa, + pt.tid, sema.code.nullTerminatedString(field_ptr_extra.field_name_start), .no_embedded_nulls, ); @@ -5518,10 +5522,11 @@ fn failWithBadStructFieldAccess( field_src: LazySrcLoc, field_name: InternPool.NullTerminatedString, ) CompileError { - const zcu = sema.pt.zcu; + const pt = sema.pt; + const zcu = pt.zcu; const ip = &zcu.intern_pool; const decl = zcu.declPtr(struct_type.decl.unwrap().?); - const fqn = try decl.fullyQualifiedName(zcu); + const fqn = try decl.fullyQualifiedName(pt); const msg = msg: { const msg = try sema.errMsg( @@ -5544,12 +5549,13 @@ fn failWithBadUnionFieldAccess( field_src: LazySrcLoc, field_name: InternPool.NullTerminatedString, ) CompileError { - const zcu = sema.pt.zcu; + const pt = sema.pt; + const zcu = pt.zcu; const ip = &zcu.intern_pool; const gpa = sema.gpa; const decl = zcu.declPtr(union_obj.decl); - const fqn = try decl.fullyQualifiedName(zcu); + const fqn = try decl.fullyQualifiedName(pt); const msg = msg: { const msg = try sema.errMsg( @@ -5715,7 +5721,7 @@ fn zirStoreNode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!v fn zirStr(sema: *Sema, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const bytes = sema.code.instructions.items(.data)[@intFromEnum(inst)].str.get(sema.code); return sema.addStrLit( - try sema.pt.zcu.intern_pool.getOrPutString(sema.gpa, bytes, .maybe_embedded_nulls), + try sema.pt.zcu.intern_pool.getOrPutString(sema.gpa, sema.pt.tid, bytes, .maybe_embedded_nulls), bytes.len, ); } @@ -6057,7 +6063,7 @@ fn zirCImport(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileEr const path_digest = zcu.filePathDigest(result.file_index); const root_decl = zcu.fileRootDecl(result.file_index); - zcu.astGenFile(result.file, result.file_index, path_digest, root_decl) catch |err| + pt.astGenFile(result.file, result.file_index, path_digest, root_decl) catch |err| return sema.fail(&child_block, src, "C import failed: {s}", .{@errorName(err)}); try pt.ensureFileAnalyzed(result.file_index); @@ -6418,6 +6424,7 @@ fn zirExport(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void const options_src = block.builtinCallArgSrc(inst_data.src_node, 1); const decl_name = try mod.intern_pool.getOrPutString( mod.gpa, + pt.tid, sema.code.nullTerminatedString(extra.decl_name), .no_embedded_nulls, ); @@ -6737,6 +6744,7 @@ fn zirDeclRef(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air const src = block.tokenOffset(inst_data.src_tok); const decl_name = try mod.intern_pool.getOrPutString( sema.gpa, + pt.tid, inst_data.get(sema.code), .no_embedded_nulls, ); @@ -6751,6 +6759,7 @@ fn zirDeclVal(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air const src = block.tokenOffset(inst_data.src_tok); const decl_name = try mod.intern_pool.getOrPutString( sema.gpa, + pt.tid, inst_data.get(sema.code), .no_embedded_nulls, ); @@ -6907,7 +6916,7 @@ pub fn analyzeSaveErrRetIndex(sema: *Sema, block: *Block) SemaError!Air.Inst.Ref const stack_trace_ty = try pt.getBuiltinType("StackTrace"); try stack_trace_ty.resolveFields(pt); - const field_name = try mod.intern_pool.getOrPutString(gpa, "index", .no_embedded_nulls); + const field_name = try mod.intern_pool.getOrPutString(gpa, pt.tid, "index", .no_embedded_nulls); const field_index = sema.structFieldIndex(block, stack_trace_ty, field_name, LazySrcLoc.unneeded) catch |err| switch (err) { error.AnalysisFail => @panic("std.builtin.StackTrace is corrupt"), error.GenericPoison, error.ComptimeReturn, error.ComptimeBreak => unreachable, @@ -6951,7 +6960,7 @@ fn popErrorReturnTrace( try stack_trace_ty.resolveFields(pt); const ptr_stack_trace_ty = try pt.singleMutPtrType(stack_trace_ty); const err_return_trace = try block.addTy(.err_return_trace, ptr_stack_trace_ty); - const field_name = try mod.intern_pool.getOrPutString(gpa, "index", .no_embedded_nulls); + const field_name = try mod.intern_pool.getOrPutString(gpa, pt.tid, "index", .no_embedded_nulls); const field_ptr = try sema.structFieldPtr(block, src, err_return_trace, field_name, src, stack_trace_ty, true); try sema.storePtr2(block, src, field_ptr, src, saved_error_trace_index, src, .store); } else if (is_non_error == null) { @@ -6977,7 +6986,7 @@ fn popErrorReturnTrace( try stack_trace_ty.resolveFields(pt); const ptr_stack_trace_ty = try pt.singleMutPtrType(stack_trace_ty); const err_return_trace = try then_block.addTy(.err_return_trace, ptr_stack_trace_ty); - const field_name = try mod.intern_pool.getOrPutString(gpa, "index", .no_embedded_nulls); + const field_name = try mod.intern_pool.getOrPutString(gpa, pt.tid, "index", .no_embedded_nulls); const field_ptr = try sema.structFieldPtr(&then_block, src, err_return_trace, field_name, src, stack_trace_ty, true); try sema.storePtr2(&then_block, src, field_ptr, src, saved_error_trace_index, src, .store); _ = try then_block.addBr(cond_block_inst, .void_value); @@ -7038,6 +7047,7 @@ fn zirCall( const object_ptr = try sema.resolveInst(extra.data.obj_ptr); const field_name = try mod.intern_pool.getOrPutString( sema.gpa, + pt.tid, sema.code.nullTerminatedString(extra.data.field_name_start), .no_embedded_nulls, ); @@ -7103,7 +7113,7 @@ fn zirCall( if (input_is_error or (pop_error_return_trace and return_ty.isError(mod))) { const stack_trace_ty = try pt.getBuiltinType("StackTrace"); try stack_trace_ty.resolveFields(pt); - const field_name = try mod.intern_pool.getOrPutString(sema.gpa, "index", .no_embedded_nulls); + const field_name = try mod.intern_pool.getOrPutString(sema.gpa, pt.tid, "index", .no_embedded_nulls); const field_index = try sema.structFieldIndex(block, stack_trace_ty, field_name, call_src); // Insert a save instruction before the arg resolution + call instructions we just generated @@ -8687,6 +8697,7 @@ fn zirErrorValue(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].str_tok; const name = try pt.zcu.intern_pool.getOrPutString( sema.gpa, + pt.tid, inst_data.get(sema.code), .no_embedded_nulls, ); @@ -8849,7 +8860,7 @@ fn zirEnumLiteral(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].str_tok; const name = inst_data.get(sema.code); return Air.internedToRef((try pt.intern(.{ - .enum_literal = try mod.intern_pool.getOrPutString(sema.gpa, name, .no_embedded_nulls), + .enum_literal = try mod.intern_pool.getOrPutString(sema.gpa, pt.tid, name, .no_embedded_nulls), }))); } @@ -9820,7 +9831,7 @@ fn funcCommon( const func_index = try ip.getExternFunc(gpa, pt.tid, .{ .ty = func_ty, .decl = sema.owner_decl_index, - .lib_name = try mod.intern_pool.getOrPutStringOpt(gpa, opt_lib_name, .no_embedded_nulls), + .lib_name = try mod.intern_pool.getOrPutStringOpt(gpa, pt.tid, opt_lib_name, .no_embedded_nulls), }); return finishFunc( sema, @@ -10281,6 +10292,7 @@ fn zirFieldVal(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const extra = sema.code.extraData(Zir.Inst.Field, inst_data.payload_index).data; const field_name = try mod.intern_pool.getOrPutString( sema.gpa, + pt.tid, sema.code.nullTerminatedString(extra.field_name_start), .no_embedded_nulls, ); @@ -10300,6 +10312,7 @@ fn zirFieldPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const extra = sema.code.extraData(Zir.Inst.Field, inst_data.payload_index).data; const field_name = try mod.intern_pool.getOrPutString( sema.gpa, + pt.tid, sema.code.nullTerminatedString(extra.field_name_start), .no_embedded_nulls, ); @@ -10319,6 +10332,7 @@ fn zirStructInitFieldPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Compi const extra = sema.code.extraData(Zir.Inst.Field, inst_data.payload_index).data; const field_name = try mod.intern_pool.getOrPutString( sema.gpa, + pt.tid, sema.code.nullTerminatedString(extra.field_name_start), .no_embedded_nulls, ); @@ -13983,6 +13997,7 @@ fn zirRetErrValueCode(sema: *Sema, inst: Zir.Inst.Index) CompileError!Air.Inst.R const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].str_tok; const name = try mod.intern_pool.getOrPutString( sema.gpa, + pt.tid, inst_data.get(sema.code), .no_embedded_nulls, ); @@ -17716,7 +17731,7 @@ fn zirBuiltinSrc( .val = try pt.intern(.{ .aggregate = .{ .ty = array_ty, .storage = .{ - .bytes = try ip.getOrPutString(gpa, file_name, .maybe_embedded_nulls), + .bytes = try ip.getOrPutString(gpa, pt.tid, file_name, .maybe_embedded_nulls), }, } }), } }, @@ -17778,7 +17793,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai block, src, type_info_ty.getNamespaceIndex(mod), - try ip.getOrPutString(gpa, "Fn", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "Fn", .no_embedded_nulls), )).?; try sema.ensureDeclAnalyzed(fn_info_decl_index); const fn_info_decl = mod.declPtr(fn_info_decl_index); @@ -17788,7 +17803,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai block, src, fn_info_ty.getNamespaceIndex(mod), - try ip.getOrPutString(gpa, "Param", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "Param", .no_embedded_nulls), )).?; try sema.ensureDeclAnalyzed(param_info_decl_index); const param_info_decl = mod.declPtr(param_info_decl_index); @@ -17890,7 +17905,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai block, src, type_info_ty.getNamespaceIndex(mod), - try ip.getOrPutString(gpa, "Int", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "Int", .no_embedded_nulls), )).?; try sema.ensureDeclAnalyzed(int_info_decl_index); const int_info_decl = mod.declPtr(int_info_decl_index); @@ -17918,7 +17933,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai block, src, type_info_ty.getNamespaceIndex(mod), - try ip.getOrPutString(gpa, "Float", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "Float", .no_embedded_nulls), )).?; try sema.ensureDeclAnalyzed(float_info_decl_index); const float_info_decl = mod.declPtr(float_info_decl_index); @@ -17950,7 +17965,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai block, src, (try pt.getBuiltinType("Type")).getNamespaceIndex(mod), - try ip.getOrPutString(gpa, "Pointer", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "Pointer", .no_embedded_nulls), )).?; try sema.ensureDeclAnalyzed(decl_index); const decl = mod.declPtr(decl_index); @@ -17961,7 +17976,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai block, src, pointer_ty.getNamespaceIndex(mod), - try ip.getOrPutString(gpa, "Size", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "Size", .no_embedded_nulls), )).?; try sema.ensureDeclAnalyzed(decl_index); const decl = mod.declPtr(decl_index); @@ -18004,7 +18019,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai block, src, type_info_ty.getNamespaceIndex(mod), - try ip.getOrPutString(gpa, "Array", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "Array", .no_embedded_nulls), )).?; try sema.ensureDeclAnalyzed(array_field_ty_decl_index); const array_field_ty_decl = mod.declPtr(array_field_ty_decl_index); @@ -18035,7 +18050,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai block, src, type_info_ty.getNamespaceIndex(mod), - try ip.getOrPutString(gpa, "Vector", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "Vector", .no_embedded_nulls), )).?; try sema.ensureDeclAnalyzed(vector_field_ty_decl_index); const vector_field_ty_decl = mod.declPtr(vector_field_ty_decl_index); @@ -18064,7 +18079,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai block, src, type_info_ty.getNamespaceIndex(mod), - try ip.getOrPutString(gpa, "Optional", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "Optional", .no_embedded_nulls), )).?; try sema.ensureDeclAnalyzed(optional_field_ty_decl_index); const optional_field_ty_decl = mod.declPtr(optional_field_ty_decl_index); @@ -18091,7 +18106,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai block, src, type_info_ty.getNamespaceIndex(mod), - try ip.getOrPutString(gpa, "Error", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "Error", .no_embedded_nulls), )).?; try sema.ensureDeclAnalyzed(set_field_ty_decl_index); const set_field_ty_decl = mod.declPtr(set_field_ty_decl_index); @@ -18197,7 +18212,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai block, src, type_info_ty.getNamespaceIndex(mod), - try ip.getOrPutString(gpa, "ErrorUnion", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "ErrorUnion", .no_embedded_nulls), )).?; try sema.ensureDeclAnalyzed(error_union_field_ty_decl_index); const error_union_field_ty_decl = mod.declPtr(error_union_field_ty_decl_index); @@ -18227,7 +18242,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai block, src, type_info_ty.getNamespaceIndex(mod), - try ip.getOrPutString(gpa, "EnumField", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "EnumField", .no_embedded_nulls), )).?; try sema.ensureDeclAnalyzed(enum_field_ty_decl_index); const enum_field_ty_decl = mod.declPtr(enum_field_ty_decl_index); @@ -18324,7 +18339,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai block, src, type_info_ty.getNamespaceIndex(mod), - try ip.getOrPutString(gpa, "Enum", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "Enum", .no_embedded_nulls), )).?; try sema.ensureDeclAnalyzed(type_enum_ty_decl_index); const type_enum_ty_decl = mod.declPtr(type_enum_ty_decl_index); @@ -18356,7 +18371,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai block, src, type_info_ty.getNamespaceIndex(mod), - try ip.getOrPutString(gpa, "Union", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "Union", .no_embedded_nulls), )).?; try sema.ensureDeclAnalyzed(type_union_ty_decl_index); const type_union_ty_decl = mod.declPtr(type_union_ty_decl_index); @@ -18368,7 +18383,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai block, src, type_info_ty.getNamespaceIndex(mod), - try ip.getOrPutString(gpa, "UnionField", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "UnionField", .no_embedded_nulls), )).?; try sema.ensureDeclAnalyzed(union_field_ty_decl_index); const union_field_ty_decl = mod.declPtr(union_field_ty_decl_index); @@ -18473,7 +18488,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai block, src, (try pt.getBuiltinType("Type")).getNamespaceIndex(mod), - try ip.getOrPutString(gpa, "ContainerLayout", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "ContainerLayout", .no_embedded_nulls), )).?; try sema.ensureDeclAnalyzed(decl_index); const decl = mod.declPtr(decl_index); @@ -18506,7 +18521,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai block, src, type_info_ty.getNamespaceIndex(mod), - try ip.getOrPutString(gpa, "Struct", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "Struct", .no_embedded_nulls), )).?; try sema.ensureDeclAnalyzed(type_struct_ty_decl_index); const type_struct_ty_decl = mod.declPtr(type_struct_ty_decl_index); @@ -18518,7 +18533,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai block, src, type_info_ty.getNamespaceIndex(mod), - try ip.getOrPutString(gpa, "StructField", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "StructField", .no_embedded_nulls), )).?; try sema.ensureDeclAnalyzed(struct_field_ty_decl_index); const struct_field_ty_decl = mod.declPtr(struct_field_ty_decl_index); @@ -18540,7 +18555,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const field_name = if (anon_struct_type.names.len != 0) anon_struct_type.names.get(ip)[field_index] else - try ip.getOrPutStringFmt(gpa, "{d}", .{field_index}, .no_embedded_nulls); + try ip.getOrPutStringFmt(gpa, pt.tid, "{d}", .{field_index}, .no_embedded_nulls); const field_name_len = field_name.length(ip); const new_decl_ty = try pt.arrayType(.{ .len = field_name_len, @@ -18600,7 +18615,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const field_name = if (struct_type.fieldName(ip, field_index).unwrap()) |field_name| field_name else - try ip.getOrPutStringFmt(gpa, "{d}", .{field_index}, .no_embedded_nulls); + try ip.getOrPutStringFmt(gpa, pt.tid, "{d}", .{field_index}, .no_embedded_nulls); const field_name_len = field_name.length(ip); const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_index]); const field_init = struct_type.fieldInit(ip, field_index); @@ -18706,7 +18721,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai block, src, (try pt.getBuiltinType("Type")).getNamespaceIndex(mod), - try ip.getOrPutString(gpa, "ContainerLayout", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "ContainerLayout", .no_embedded_nulls), )).?; try sema.ensureDeclAnalyzed(decl_index); const decl = mod.declPtr(decl_index); @@ -18742,7 +18757,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai block, src, type_info_ty.getNamespaceIndex(mod), - try ip.getOrPutString(gpa, "Opaque", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "Opaque", .no_embedded_nulls), )).?; try sema.ensureDeclAnalyzed(type_opaque_ty_decl_index); const type_opaque_ty_decl = mod.declPtr(type_opaque_ty_decl_index); @@ -18786,7 +18801,7 @@ fn typeInfoDecls( block, src, type_info_ty.getNamespaceIndex(mod), - try mod.intern_pool.getOrPutString(gpa, "Declaration", .no_embedded_nulls), + try mod.intern_pool.getOrPutString(gpa, pt.tid, "Declaration", .no_embedded_nulls), )).?; try sema.ensureDeclAnalyzed(declaration_ty_decl_index); const declaration_ty_decl = mod.declPtr(declaration_ty_decl_index); @@ -19541,6 +19556,7 @@ fn zirRetErrValue( const src = block.tokenOffset(inst_data.src_tok); const err_name = try mod.intern_pool.getOrPutString( sema.gpa, + pt.tid, inst_data.get(sema.code), .no_embedded_nulls, ); @@ -20251,6 +20267,7 @@ fn zirStructInit( const field_type_extra = sema.code.extraData(Zir.Inst.FieldType, field_type_data.payload_index).data; const field_name = try ip.getOrPutString( gpa, + pt.tid, sema.code.nullTerminatedString(field_type_extra.name_start), .no_embedded_nulls, ); @@ -20292,6 +20309,7 @@ fn zirStructInit( const field_type_extra = sema.code.extraData(Zir.Inst.FieldType, field_type_data.payload_index).data; const field_name = try ip.getOrPutString( gpa, + pt.tid, sema.code.nullTerminatedString(field_type_extra.name_start), .no_embedded_nulls, ); @@ -20581,7 +20599,7 @@ fn structInitAnon( }, }; - field_name.* = try mod.intern_pool.getOrPutString(gpa, name, .no_embedded_nulls); + field_name.* = try mod.intern_pool.getOrPutString(gpa, pt.tid, name, .no_embedded_nulls); const init = try sema.resolveInst(item.data.init); field_ty.* = sema.typeOf(init).toIntern(); @@ -20958,7 +20976,7 @@ fn zirStructInitFieldType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Comp }; const aggregate_ty = wrapped_aggregate_ty.optEuBaseType(mod); const zir_field_name = sema.code.nullTerminatedString(extra.name_start); - const field_name = try ip.getOrPutString(sema.gpa, zir_field_name, .no_embedded_nulls); + const field_name = try ip.getOrPutString(sema.gpa, pt.tid, zir_field_name, .no_embedded_nulls); return sema.fieldType(block, aggregate_ty, field_name, field_name_src, ty_src); } @@ -21344,11 +21362,11 @@ fn zirReify( const struct_type = ip.loadStructType(ip.typeOf(union_val.val)); const signedness_val = try Value.fromInterned(union_val.val).fieldValue( pt, - struct_type.nameIndex(ip, try ip.getOrPutString(gpa, "signedness", .no_embedded_nulls)).?, + struct_type.nameIndex(ip, try ip.getOrPutString(gpa, pt.tid, "signedness", .no_embedded_nulls)).?, ); const bits_val = try Value.fromInterned(union_val.val).fieldValue( pt, - struct_type.nameIndex(ip, try ip.getOrPutString(gpa, "bits", .no_embedded_nulls)).?, + struct_type.nameIndex(ip, try ip.getOrPutString(gpa, pt.tid, "bits", .no_embedded_nulls)).?, ); const signedness = mod.toEnum(std.builtin.Signedness, signedness_val); @@ -21360,11 +21378,11 @@ fn zirReify( const struct_type = ip.loadStructType(ip.typeOf(union_val.val)); const len_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "len", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "len", .no_embedded_nulls), ).?); const child_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "child", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "child", .no_embedded_nulls), ).?); const len: u32 = @intCast(try len_val.toUnsignedIntSema(pt)); @@ -21382,7 +21400,7 @@ fn zirReify( const struct_type = ip.loadStructType(ip.typeOf(union_val.val)); const bits_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "bits", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "bits", .no_embedded_nulls), ).?); const bits: u16 = @intCast(try bits_val.toUnsignedIntSema(pt)); @@ -21400,35 +21418,35 @@ fn zirReify( const struct_type = ip.loadStructType(ip.typeOf(union_val.val)); const size_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "size", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "size", .no_embedded_nulls), ).?); const is_const_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "is_const", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "is_const", .no_embedded_nulls), ).?); const is_volatile_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "is_volatile", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "is_volatile", .no_embedded_nulls), ).?); const alignment_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "alignment", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "alignment", .no_embedded_nulls), ).?); const address_space_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "address_space", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "address_space", .no_embedded_nulls), ).?); const child_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "child", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "child", .no_embedded_nulls), ).?); const is_allowzero_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "is_allowzero", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "is_allowzero", .no_embedded_nulls), ).?); const sentinel_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "sentinel", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "sentinel", .no_embedded_nulls), ).?); if (!try sema.intFitsInType(alignment_val, Type.u32, null)) { @@ -21505,15 +21523,15 @@ fn zirReify( const struct_type = ip.loadStructType(ip.typeOf(union_val.val)); const len_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "len", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "len", .no_embedded_nulls), ).?); const child_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "child", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "child", .no_embedded_nulls), ).?); const sentinel_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "sentinel", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "sentinel", .no_embedded_nulls), ).?); const len = try len_val.toUnsignedIntSema(pt); @@ -21534,7 +21552,7 @@ fn zirReify( const struct_type = ip.loadStructType(ip.typeOf(union_val.val)); const child_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "child", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "child", .no_embedded_nulls), ).?); const child_ty = child_val.toType(); @@ -21546,11 +21564,11 @@ fn zirReify( const struct_type = ip.loadStructType(ip.typeOf(union_val.val)); const error_set_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "error_set", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "error_set", .no_embedded_nulls), ).?); const payload_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "payload", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "payload", .no_embedded_nulls), ).?); const error_set_ty = error_set_val.toType(); @@ -21579,7 +21597,7 @@ fn zirReify( const elem_struct_type = ip.loadStructType(ip.typeOf(elem_val.toIntern())); const name_val = try elem_val.fieldValue(pt, elem_struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "name", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "name", .no_embedded_nulls), ).?); const name = try sema.sliceToIpString(block, src, name_val, .{ @@ -21601,23 +21619,23 @@ fn zirReify( const struct_type = ip.loadStructType(ip.typeOf(union_val.val)); const layout_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "layout", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "layout", .no_embedded_nulls), ).?); const backing_integer_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "backing_integer", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "backing_integer", .no_embedded_nulls), ).?); const fields_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "fields", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "fields", .no_embedded_nulls), ).?); const decls_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "decls", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "decls", .no_embedded_nulls), ).?); const is_tuple_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "is_tuple", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "is_tuple", .no_embedded_nulls), ).?); const layout = mod.toEnum(std.builtin.Type.ContainerLayout, layout_val); @@ -21641,19 +21659,19 @@ fn zirReify( const struct_type = ip.loadStructType(ip.typeOf(union_val.val)); const tag_type_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "tag_type", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "tag_type", .no_embedded_nulls), ).?); const fields_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "fields", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "fields", .no_embedded_nulls), ).?); const decls_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "decls", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "decls", .no_embedded_nulls), ).?); const is_exhaustive_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "is_exhaustive", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "is_exhaustive", .no_embedded_nulls), ).?); if (try decls_val.sliceLen(pt) > 0) { @@ -21670,7 +21688,7 @@ fn zirReify( const struct_type = ip.loadStructType(ip.typeOf(union_val.val)); const decls_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "decls", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "decls", .no_embedded_nulls), ).?); // Decls @@ -21707,19 +21725,19 @@ fn zirReify( const struct_type = ip.loadStructType(ip.typeOf(union_val.val)); const layout_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "layout", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "layout", .no_embedded_nulls), ).?); const tag_type_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "tag_type", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "tag_type", .no_embedded_nulls), ).?); const fields_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "fields", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "fields", .no_embedded_nulls), ).?); const decls_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "decls", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "decls", .no_embedded_nulls), ).?); if (try decls_val.sliceLen(pt) > 0) { @@ -21737,23 +21755,23 @@ fn zirReify( const struct_type = ip.loadStructType(ip.typeOf(union_val.val)); const calling_convention_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "calling_convention", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "calling_convention", .no_embedded_nulls), ).?); const is_generic_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "is_generic", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "is_generic", .no_embedded_nulls), ).?); const is_var_args_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "is_var_args", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "is_var_args", .no_embedded_nulls), ).?); const return_type_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "return_type", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "return_type", .no_embedded_nulls), ).?); const params_slice_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "params", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "params", .no_embedded_nulls), ).?); const is_generic = is_generic_val.toBool(); @@ -21783,15 +21801,15 @@ fn zirReify( const elem_struct_type = ip.loadStructType(ip.typeOf(elem_val.toIntern())); const param_is_generic_val = try elem_val.fieldValue(pt, elem_struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "is_generic", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "is_generic", .no_embedded_nulls), ).?); const param_is_noalias_val = try elem_val.fieldValue(pt, elem_struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "is_noalias", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "is_noalias", .no_embedded_nulls), ).?); const opt_param_type_val = try elem_val.fieldValue(pt, elem_struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "type", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "type", .no_embedded_nulls), ).?); if (param_is_generic_val.toBool()) { @@ -22535,7 +22553,7 @@ fn zirTypeName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const ty_src = block.builtinCallArgSrc(inst_data.src_node, 0); const ty = try sema.resolveType(block, ty_src, inst_data.operand); - const type_name = try ip.getOrPutStringFmt(sema.gpa, "{}", .{ty.fmt(pt)}, .no_embedded_nulls); + const type_name = try ip.getOrPutStringFmt(sema.gpa, pt.tid, "{}", .{ty.fmt(pt)}, .no_embedded_nulls); return sema.addNullTerminatedStrLit(type_name); } @@ -24143,18 +24161,18 @@ fn resolveExportOptions( const section_src = block.src(.{ .init_field_section = src.offset.node_offset_builtin_call_arg.builtin_call_node }); const visibility_src = block.src(.{ .init_field_visibility = src.offset.node_offset_builtin_call_arg.builtin_call_node }); - const name_operand = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, "name", .no_embedded_nulls), name_src); + const name_operand = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, pt.tid, "name", .no_embedded_nulls), name_src); const name = try sema.toConstString(block, name_src, name_operand, .{ .needed_comptime_reason = "name of exported value must be comptime-known", }); - const linkage_operand = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, "linkage", .no_embedded_nulls), linkage_src); + const linkage_operand = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, pt.tid, "linkage", .no_embedded_nulls), linkage_src); const linkage_val = try sema.resolveConstDefinedValue(block, linkage_src, linkage_operand, .{ .needed_comptime_reason = "linkage of exported value must be comptime-known", }); const linkage = mod.toEnum(std.builtin.GlobalLinkage, linkage_val); - const section_operand = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, "section", .no_embedded_nulls), section_src); + const section_operand = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, pt.tid, "section", .no_embedded_nulls), section_src); const section_opt_val = try sema.resolveConstDefinedValue(block, section_src, section_operand, .{ .needed_comptime_reason = "linksection of exported value must be comptime-known", }); @@ -24165,7 +24183,7 @@ fn resolveExportOptions( else null; - const visibility_operand = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, "visibility", .no_embedded_nulls), visibility_src); + const visibility_operand = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, pt.tid, "visibility", .no_embedded_nulls), visibility_src); const visibility_val = try sema.resolveConstDefinedValue(block, visibility_src, visibility_operand, .{ .needed_comptime_reason = "visibility of exported value must be comptime-known", }); @@ -24182,9 +24200,9 @@ fn resolveExportOptions( } return .{ - .name = try ip.getOrPutString(gpa, name, .no_embedded_nulls), + .name = try ip.getOrPutString(gpa, pt.tid, name, .no_embedded_nulls), .linkage = linkage, - .section = try ip.getOrPutStringOpt(gpa, section, .no_embedded_nulls), + .section = try ip.getOrPutStringOpt(gpa, pt.tid, section, .no_embedded_nulls), .visibility = visibility, }; } @@ -25821,7 +25839,7 @@ fn zirMemset(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void const runtime_src = rs: { const ptr_val = try sema.resolveDefinedValue(block, dest_src, dest_ptr) orelse break :rs dest_src; - const len_air_ref = try sema.fieldVal(block, src, dest_ptr, try ip.getOrPutString(gpa, "len", .no_embedded_nulls), dest_src); + const len_air_ref = try sema.fieldVal(block, src, dest_ptr, try ip.getOrPutString(gpa, pt.tid, "len", .no_embedded_nulls), dest_src); const len_val = (try sema.resolveDefinedValue(block, dest_src, len_air_ref)) orelse break :rs dest_src; const len_u64 = (try len_val.getUnsignedIntAdvanced(pt, .sema)).?; const len = try sema.usizeCast(block, dest_src, len_u64); @@ -25952,7 +25970,7 @@ fn zirVarExtended( .ty = var_ty.toIntern(), .init = init_val, .decl = sema.owner_decl_index, - .lib_name = try mod.intern_pool.getOrPutStringOpt(sema.gpa, lib_name, .no_embedded_nulls), + .lib_name = try mod.intern_pool.getOrPutStringOpt(sema.gpa, pt.tid, lib_name, .no_embedded_nulls), .is_extern = small.is_extern, .is_const = small.is_const, .is_threadlocal = small.is_threadlocal, @@ -26323,17 +26341,17 @@ fn resolvePrefetchOptions( const locality_src = block.src(.{ .init_field_locality = src.offset.node_offset_builtin_call_arg.builtin_call_node }); const cache_src = block.src(.{ .init_field_cache = src.offset.node_offset_builtin_call_arg.builtin_call_node }); - const rw = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, "rw", .no_embedded_nulls), rw_src); + const rw = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, pt.tid, "rw", .no_embedded_nulls), rw_src); const rw_val = try sema.resolveConstDefinedValue(block, rw_src, rw, .{ .needed_comptime_reason = "prefetch read/write must be comptime-known", }); - const locality = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, "locality", .no_embedded_nulls), locality_src); + const locality = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, pt.tid, "locality", .no_embedded_nulls), locality_src); const locality_val = try sema.resolveConstDefinedValue(block, locality_src, locality, .{ .needed_comptime_reason = "prefetch locality must be comptime-known", }); - const cache = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, "cache", .no_embedded_nulls), cache_src); + const cache = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, pt.tid, "cache", .no_embedded_nulls), cache_src); const cache_val = try sema.resolveConstDefinedValue(block, cache_src, cache, .{ .needed_comptime_reason = "prefetch cache must be comptime-known", }); @@ -26397,23 +26415,23 @@ fn resolveExternOptions( const linkage_src = block.src(.{ .init_field_linkage = src.offset.node_offset_builtin_call_arg.builtin_call_node }); const thread_local_src = block.src(.{ .init_field_thread_local = src.offset.node_offset_builtin_call_arg.builtin_call_node }); - const name_ref = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, "name", .no_embedded_nulls), name_src); + const name_ref = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, pt.tid, "name", .no_embedded_nulls), name_src); const name = try sema.toConstString(block, name_src, name_ref, .{ .needed_comptime_reason = "name of the extern symbol must be comptime-known", }); - const library_name_inst = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, "library_name", .no_embedded_nulls), library_src); + const library_name_inst = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, pt.tid, "library_name", .no_embedded_nulls), library_src); const library_name_val = try sema.resolveConstDefinedValue(block, library_src, library_name_inst, .{ .needed_comptime_reason = "library in which extern symbol is must be comptime-known", }); - const linkage_ref = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, "linkage", .no_embedded_nulls), linkage_src); + const linkage_ref = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, pt.tid, "linkage", .no_embedded_nulls), linkage_src); const linkage_val = try sema.resolveConstDefinedValue(block, linkage_src, linkage_ref, .{ .needed_comptime_reason = "linkage of the extern symbol must be comptime-known", }); const linkage = mod.toEnum(std.builtin.GlobalLinkage, linkage_val); - const is_thread_local = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, "is_thread_local", .no_embedded_nulls), thread_local_src); + const is_thread_local = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, pt.tid, "is_thread_local", .no_embedded_nulls), thread_local_src); const is_thread_local_val = try sema.resolveConstDefinedValue(block, thread_local_src, is_thread_local, .{ .needed_comptime_reason = "threadlocality of the extern symbol must be comptime-known", }); @@ -26438,8 +26456,8 @@ fn resolveExternOptions( } return .{ - .name = try ip.getOrPutString(gpa, name, .no_embedded_nulls), - .library_name = try ip.getOrPutStringOpt(gpa, library_name, .no_embedded_nulls), + .name = try ip.getOrPutString(gpa, pt.tid, name, .no_embedded_nulls), + .library_name = try ip.getOrPutStringOpt(gpa, pt.tid, library_name, .no_embedded_nulls), .linkage = linkage, .is_thread_local = is_thread_local_val.toBool(), }; @@ -27052,7 +27070,7 @@ fn preparePanicId(sema: *Sema, block: *Block, panic_id: Module.PanicId) !InternP block, LazySrcLoc.unneeded, panic_messages_ty.getNamespaceIndex(mod), - try mod.intern_pool.getOrPutString(gpa, @tagName(panic_id), .no_embedded_nulls), + try mod.intern_pool.getOrPutString(gpa, pt.tid, @tagName(panic_id), .no_embedded_nulls), ) catch |err| switch (err) { error.AnalysisFail => @panic("std.builtin.panic_messages is corrupt"), error.GenericPoison, error.ComptimeReturn, error.ComptimeBreak => unreachable, @@ -31745,7 +31763,7 @@ fn coerceTupleToStruct( .anon_struct_type => |anon_struct_type| if (anon_struct_type.names.len > 0) anon_struct_type.names.get(ip)[tuple_field_index] else - try ip.getOrPutStringFmt(sema.gpa, "{d}", .{tuple_field_index}, .no_embedded_nulls), + try ip.getOrPutStringFmt(sema.gpa, pt.tid, "{d}", .{tuple_field_index}, .no_embedded_nulls), .struct_type => ip.loadStructType(inst_ty.toIntern()).field_names.get(ip)[tuple_field_index], else => unreachable, }; @@ -31858,13 +31876,13 @@ fn coerceTupleToTuple( .anon_struct_type => |anon_struct_type| if (anon_struct_type.names.len > 0) anon_struct_type.names.get(ip)[field_i] else - try ip.getOrPutStringFmt(sema.gpa, "{d}", .{field_i}, .no_embedded_nulls), + try ip.getOrPutStringFmt(sema.gpa, pt.tid, "{d}", .{field_i}, .no_embedded_nulls), .struct_type => s: { const struct_type = ip.loadStructType(inst_ty.toIntern()); if (struct_type.field_names.len > 0) { break :s struct_type.field_names.get(ip)[field_i]; } else { - break :s try ip.getOrPutStringFmt(sema.gpa, "{d}", .{field_i}, .no_embedded_nulls); + break :s try ip.getOrPutStringFmt(sema.gpa, pt.tid, "{d}", .{field_i}, .no_embedded_nulls); } }, else => unreachable, @@ -34849,7 +34867,7 @@ fn resolvePeerTypesInner( const result_buf = try sema.arena.create(PeerResolveResult); result_buf.* = result; const field_name = if (is_tuple) - try ip.getOrPutStringFmt(sema.gpa, "{d}", .{field_index}, .no_embedded_nulls) + try ip.getOrPutStringFmt(sema.gpa, pt.tid, "{d}", .{field_index}, .no_embedded_nulls) else field_names[field_index]; @@ -36066,7 +36084,7 @@ fn semaStructFields( // This string needs to outlive the ZIR code. if (opt_field_name_zir) |field_name_zir| { - const field_name = try ip.getOrPutString(gpa, field_name_zir, .no_embedded_nulls); + const field_name = try ip.getOrPutString(gpa, pt.tid, field_name_zir, .no_embedded_nulls); assert(struct_type.addFieldName(ip, field_name) == null); } @@ -36567,7 +36585,7 @@ fn semaUnionFields(pt: Zcu.PerThread, arena: Allocator, union_type: InternPool.L } // This string needs to outlive the ZIR code. - const field_name = try ip.getOrPutString(gpa, field_name_zir, .no_embedded_nulls); + const field_name = try ip.getOrPutString(gpa, pt.tid, field_name_zir, .no_embedded_nulls); if (enum_field_names.len != 0) { enum_field_names[field_i] = field_name; } @@ -36716,9 +36734,10 @@ fn generateUnionTagTypeNumbered( const new_decl_index = try mod.allocateNewDecl(block.namespace); errdefer mod.destroyDecl(new_decl_index); - const fqn = try union_owner_decl.fullyQualifiedName(mod); + const fqn = try union_owner_decl.fullyQualifiedName(pt); const name = try ip.getOrPutStringFmt( gpa, + pt.tid, "@typeInfo({}).Union.tag_type.?", .{fqn.fmt(ip)}, .no_embedded_nulls, @@ -36764,11 +36783,12 @@ fn generateUnionTagTypeSimple( const gpa = sema.gpa; const new_decl_index = new_decl_index: { - const fqn = try union_owner_decl.fullyQualifiedName(mod); + const fqn = try union_owner_decl.fullyQualifiedName(pt); const new_decl_index = try mod.allocateNewDecl(block.namespace); errdefer mod.destroyDecl(new_decl_index); const name = try ip.getOrPutStringFmt( gpa, + pt.tid, "@typeInfo({}).Union.tag_type.?", .{fqn.fmt(ip)}, .no_embedded_nulls, diff --git a/src/Value.zig b/src/Value.zig index 21bb207b59c7..e47598fe0a36 100644 --- a/src/Value.zig +++ b/src/Value.zig @@ -67,7 +67,7 @@ pub fn toIpString(val: Value, ty: Type, pt: Zcu.PerThread) !InternPool.NullTermi const byte: u8 = @intCast(Value.fromInterned(elem).toUnsignedInt(pt)); const len: usize = @intCast(ty.arrayLen(mod)); try ip.string_bytes.appendNTimes(mod.gpa, byte, len); - return ip.getOrPutTrailingString(mod.gpa, len, .no_embedded_nulls); + return ip.getOrPutTrailingString(mod.gpa, pt.tid, len, .no_embedded_nulls); }, } } @@ -118,7 +118,7 @@ fn arrayToIpString(val: Value, len_u64: u64, pt: Zcu.PerThread) !InternPool.Null const byte: u8 = @intCast(elem_val.toUnsignedInt(pt)); ip.string_bytes.appendAssumeCapacity(byte); } - return ip.getOrPutTrailingString(gpa, len, .no_embedded_nulls); + return ip.getOrPutTrailingString(gpa, pt.tid, len, .no_embedded_nulls); } pub fn fromInterned(i: InternPool.Index) Value { diff --git a/src/Zcu.zig b/src/Zcu.zig index bfc70815dfc5..c4ebc6a36b88 100644 --- a/src/Zcu.zig +++ b/src/Zcu.zig @@ -420,11 +420,11 @@ pub const Decl = struct { return zcu.namespacePtr(decl.src_namespace).renderFullyQualifiedDebugName(zcu, decl.name, writer); } - pub fn fullyQualifiedName(decl: Decl, zcu: *Zcu) !InternPool.NullTerminatedString { + pub fn fullyQualifiedName(decl: Decl, pt: Zcu.PerThread) !InternPool.NullTerminatedString { return if (decl.name_fully_qualified) decl.name else - zcu.namespacePtr(decl.src_namespace).fullyQualifiedName(zcu, decl.name); + pt.zcu.namespacePtr(decl.src_namespace).fullyQualifiedName(pt, decl.name); } pub fn typeOf(decl: Decl, zcu: *const Zcu) Type { @@ -688,9 +688,10 @@ pub const Namespace = struct { pub fn fullyQualifiedName( ns: Namespace, - zcu: *Zcu, + pt: Zcu.PerThread, name: InternPool.NullTerminatedString, ) !InternPool.NullTerminatedString { + const zcu = pt.zcu; const ip = &zcu.intern_pool; const count = count: { var count: usize = name.length(ip) + 1; @@ -723,7 +724,7 @@ pub const Namespace = struct { }; } - return ip.getOrPutTrailingString(gpa, ip.string_bytes.items.len - start, .no_embedded_nulls); + return ip.getOrPutTrailingString(gpa, pt.tid, ip.string_bytes.items.len - start, .no_embedded_nulls); } pub fn getType(ns: Namespace, zcu: *Zcu) Type { @@ -875,11 +876,12 @@ pub const File = struct { }; } - pub fn fullyQualifiedName(file: File, mod: *Module) !InternPool.NullTerminatedString { - const ip = &mod.intern_pool; + pub fn fullyQualifiedName(file: File, pt: Zcu.PerThread) !InternPool.NullTerminatedString { + const gpa = pt.zcu.gpa; + const ip = &pt.zcu.intern_pool; const start = ip.string_bytes.items.len; - try file.renderFullyQualifiedName(ip.string_bytes.writer(mod.gpa)); - return ip.getOrPutTrailingString(mod.gpa, ip.string_bytes.items.len - start, .no_embedded_nulls); + try file.renderFullyQualifiedName(ip.string_bytes.writer(gpa)); + return ip.getOrPutTrailingString(gpa, pt.tid, ip.string_bytes.items.len - start, .no_embedded_nulls); } pub fn fullPath(file: File, ally: Allocator) ![]u8 { @@ -2569,8 +2571,8 @@ pub fn declIsRoot(mod: *Module, decl_index: Decl.Index) bool { } // TODO https://github.com/ziglang/zig/issues/8643 -const data_has_safety_tag = @sizeOf(Zir.Inst.Data) != 8; -const HackDataLayout = extern struct { +pub const data_has_safety_tag = @sizeOf(Zir.Inst.Data) != 8; +pub const HackDataLayout = extern struct { data: [8]u8 align(@alignOf(Zir.Inst.Data)), safety_tag: u8, }; @@ -2580,291 +2582,11 @@ comptime { } } -pub fn astGenFile( - zcu: *Zcu, - file: *File, - /// This parameter is provided separately from `file` because it is not - /// safe to access `import_table` without a lock, and this index is needed - /// in the call to `updateZirRefs`. - file_index: File.Index, - path_digest: Cache.BinDigest, - opt_root_decl: Zcu.Decl.OptionalIndex, -) !void { - assert(!file.mod.isBuiltin()); - - const tracy = trace(@src()); - defer tracy.end(); - - const comp = zcu.comp; - const gpa = zcu.gpa; - - // In any case we need to examine the stat of the file to determine the course of action. - var source_file = try file.mod.root.openFile(file.sub_file_path, .{}); - defer source_file.close(); - - const stat = try source_file.stat(); - - const want_local_cache = file.mod == zcu.main_mod; - const hex_digest = Cache.binToHex(path_digest); - const cache_directory = if (want_local_cache) zcu.local_zir_cache else zcu.global_zir_cache; - const zir_dir = cache_directory.handle; - - // Determine whether we need to reload the file from disk and redo parsing and AstGen. - var lock: std.fs.File.Lock = switch (file.status) { - .never_loaded, .retryable_failure => lock: { - // First, load the cached ZIR code, if any. - log.debug("AstGen checking cache: {s} (local={}, digest={s})", .{ - file.sub_file_path, want_local_cache, &hex_digest, - }); - - break :lock .shared; - }, - .parse_failure, .astgen_failure, .success_zir => lock: { - const unchanged_metadata = - stat.size == file.stat.size and - stat.mtime == file.stat.mtime and - stat.inode == file.stat.inode; - - if (unchanged_metadata) { - log.debug("unmodified metadata of file: {s}", .{file.sub_file_path}); - return; - } - - log.debug("metadata changed: {s}", .{file.sub_file_path}); - - break :lock .exclusive; - }, - }; - - // We ask for a lock in order to coordinate with other zig processes. - // If another process is already working on this file, we will get the cached - // version. Likewise if we're working on AstGen and another process asks for - // the cached file, they'll get it. - const cache_file = while (true) { - break zir_dir.createFile(&hex_digest, .{ - .read = true, - .truncate = false, - .lock = lock, - }) catch |err| switch (err) { - error.NotDir => unreachable, // no dir components - error.InvalidUtf8 => unreachable, // it's a hex encoded name - error.InvalidWtf8 => unreachable, // it's a hex encoded name - error.BadPathName => unreachable, // it's a hex encoded name - error.NameTooLong => unreachable, // it's a fixed size name - error.PipeBusy => unreachable, // it's not a pipe - error.WouldBlock => unreachable, // not asking for non-blocking I/O - // There are no dir components, so you would think that this was - // unreachable, however we have observed on macOS two processes racing - // to do openat() with O_CREAT manifest in ENOENT. - error.FileNotFound => continue, - - else => |e| return e, // Retryable errors are handled at callsite. - }; - }; - defer cache_file.close(); - - while (true) { - update: { - // First we read the header to determine the lengths of arrays. - const header = cache_file.reader().readStruct(Zir.Header) catch |err| switch (err) { - // This can happen if Zig bails out of this function between creating - // the cached file and writing it. - error.EndOfStream => break :update, - else => |e| return e, - }; - const unchanged_metadata = - stat.size == header.stat_size and - stat.mtime == header.stat_mtime and - stat.inode == header.stat_inode; - - if (!unchanged_metadata) { - log.debug("AstGen cache stale: {s}", .{file.sub_file_path}); - break :update; - } - log.debug("AstGen cache hit: {s} instructions_len={d}", .{ - file.sub_file_path, header.instructions_len, - }); - - file.zir = loadZirCacheBody(gpa, header, cache_file) catch |err| switch (err) { - error.UnexpectedFileSize => { - log.warn("unexpected EOF reading cached ZIR for {s}", .{file.sub_file_path}); - break :update; - }, - else => |e| return e, - }; - file.zir_loaded = true; - file.stat = .{ - .size = header.stat_size, - .inode = header.stat_inode, - .mtime = header.stat_mtime, - }; - file.status = .success_zir; - log.debug("AstGen cached success: {s}", .{file.sub_file_path}); - - // TODO don't report compile errors until Sema @importFile - if (file.zir.hasCompileErrors()) { - { - comp.mutex.lock(); - defer comp.mutex.unlock(); - try zcu.failed_files.putNoClobber(gpa, file, null); - } - file.status = .astgen_failure; - return error.AnalysisFail; - } - return; - } - - // If we already have the exclusive lock then it is our job to update. - if (builtin.os.tag == .wasi or lock == .exclusive) break; - // Otherwise, unlock to give someone a chance to get the exclusive lock - // and then upgrade to an exclusive lock. - cache_file.unlock(); - lock = .exclusive; - try cache_file.lock(lock); - } - - // The cache is definitely stale so delete the contents to avoid an underwrite later. - cache_file.setEndPos(0) catch |err| switch (err) { - error.FileTooBig => unreachable, // 0 is not too big - - else => |e| return e, - }; - - zcu.lockAndClearFileCompileError(file); - - // If the previous ZIR does not have compile errors, keep it around - // in case parsing or new ZIR fails. In case of successful ZIR update - // at the end of this function we will free it. - // We keep the previous ZIR loaded so that we can use it - // for the update next time it does not have any compile errors. This avoids - // needlessly tossing out semantic analysis work when an error is - // temporarily introduced. - if (file.zir_loaded and !file.zir.hasCompileErrors()) { - assert(file.prev_zir == null); - const prev_zir_ptr = try gpa.create(Zir); - file.prev_zir = prev_zir_ptr; - prev_zir_ptr.* = file.zir; - file.zir = undefined; - file.zir_loaded = false; - } - file.unload(gpa); - - if (stat.size > std.math.maxInt(u32)) - return error.FileTooBig; - - const source = try gpa.allocSentinel(u8, @as(usize, @intCast(stat.size)), 0); - defer if (!file.source_loaded) gpa.free(source); - const amt = try source_file.readAll(source); - if (amt != stat.size) - return error.UnexpectedEndOfFile; - - file.stat = .{ - .size = stat.size, - .inode = stat.inode, - .mtime = stat.mtime, - }; - file.source = source; - file.source_loaded = true; - - file.tree = try Ast.parse(gpa, source, .zig); - file.tree_loaded = true; - - // Any potential AST errors are converted to ZIR errors here. - file.zir = try AstGen.generate(gpa, file.tree); - file.zir_loaded = true; - file.status = .success_zir; - log.debug("AstGen fresh success: {s}", .{file.sub_file_path}); - - const safety_buffer = if (data_has_safety_tag) - try gpa.alloc([8]u8, file.zir.instructions.len) - else - undefined; - defer if (data_has_safety_tag) gpa.free(safety_buffer); - const data_ptr = if (data_has_safety_tag) - if (file.zir.instructions.len == 0) - @as([*]const u8, undefined) - else - @as([*]const u8, @ptrCast(safety_buffer.ptr)) - else - @as([*]const u8, @ptrCast(file.zir.instructions.items(.data).ptr)); - if (data_has_safety_tag) { - // The `Data` union has a safety tag but in the file format we store it without. - for (file.zir.instructions.items(.data), 0..) |*data, i| { - const as_struct = @as(*const HackDataLayout, @ptrCast(data)); - safety_buffer[i] = as_struct.data; - } - } - - const header: Zir.Header = .{ - .instructions_len = @as(u32, @intCast(file.zir.instructions.len)), - .string_bytes_len = @as(u32, @intCast(file.zir.string_bytes.len)), - .extra_len = @as(u32, @intCast(file.zir.extra.len)), - - .stat_size = stat.size, - .stat_inode = stat.inode, - .stat_mtime = stat.mtime, - }; - var iovecs = [_]std.posix.iovec_const{ - .{ - .base = @as([*]const u8, @ptrCast(&header)), - .len = @sizeOf(Zir.Header), - }, - .{ - .base = @as([*]const u8, @ptrCast(file.zir.instructions.items(.tag).ptr)), - .len = file.zir.instructions.len, - }, - .{ - .base = data_ptr, - .len = file.zir.instructions.len * 8, - }, - .{ - .base = file.zir.string_bytes.ptr, - .len = file.zir.string_bytes.len, - }, - .{ - .base = @as([*]const u8, @ptrCast(file.zir.extra.ptr)), - .len = file.zir.extra.len * 4, - }, - }; - cache_file.writevAll(&iovecs) catch |err| { - log.warn("unable to write cached ZIR code for {}{s} to {}{s}: {s}", .{ - file.mod.root, file.sub_file_path, cache_directory, &hex_digest, @errorName(err), - }); - }; - - if (file.zir.hasCompileErrors()) { - { - comp.mutex.lock(); - defer comp.mutex.unlock(); - try zcu.failed_files.putNoClobber(gpa, file, null); - } - file.status = .astgen_failure; - return error.AnalysisFail; - } - - if (file.prev_zir) |prev_zir| { - try updateZirRefs(zcu, file, file_index, prev_zir.*); - // No need to keep previous ZIR. - prev_zir.deinit(gpa); - gpa.destroy(prev_zir); - file.prev_zir = null; - } - - if (opt_root_decl.unwrap()) |root_decl| { - // The root of this file must be re-analyzed, since the file has changed. - comp.mutex.lock(); - defer comp.mutex.unlock(); - - log.debug("outdated root Decl: {}", .{root_decl}); - try zcu.outdated_file_root.put(gpa, root_decl, {}); - } -} - pub fn loadZirCache(gpa: Allocator, cache_file: std.fs.File) !Zir { return loadZirCacheBody(gpa, try cache_file.reader().readStruct(Zir.Header), cache_file); } -fn loadZirCacheBody(gpa: Allocator, header: Zir.Header, cache_file: std.fs.File) !Zir { +pub fn loadZirCacheBody(gpa: Allocator, header: Zir.Header, cache_file: std.fs.File) !Zir { var instructions: std.MultiArrayList(Zir.Inst) = .{}; errdefer instructions.deinit(gpa); @@ -2930,127 +2652,6 @@ fn loadZirCacheBody(gpa: Allocator, header: Zir.Header, cache_file: std.fs.File) return zir; } -/// This is called from the AstGen thread pool, so must acquire -/// the Compilation mutex when acting on shared state. -fn updateZirRefs(zcu: *Module, file: *File, file_index: File.Index, old_zir: Zir) !void { - const gpa = zcu.gpa; - const new_zir = file.zir; - - var inst_map: std.AutoHashMapUnmanaged(Zir.Inst.Index, Zir.Inst.Index) = .{}; - defer inst_map.deinit(gpa); - - try mapOldZirToNew(gpa, old_zir, new_zir, &inst_map); - - const old_tag = old_zir.instructions.items(.tag); - const old_data = old_zir.instructions.items(.data); - - // TODO: this should be done after all AstGen workers complete, to avoid - // iterating over this full set for every updated file. - for (zcu.intern_pool.tracked_insts.keys(), 0..) |*ti, idx_raw| { - const ti_idx: InternPool.TrackedInst.Index = @enumFromInt(idx_raw); - if (ti.file != file_index) continue; - const old_inst = ti.inst; - ti.inst = inst_map.get(ti.inst) orelse { - // Tracking failed for this instruction. Invalidate associated `src_hash` deps. - zcu.comp.mutex.lock(); - defer zcu.comp.mutex.unlock(); - log.debug("tracking failed for %{d}", .{old_inst}); - try zcu.markDependeeOutdated(.{ .src_hash = ti_idx }); - continue; - }; - - if (old_zir.getAssociatedSrcHash(old_inst)) |old_hash| hash_changed: { - if (new_zir.getAssociatedSrcHash(ti.inst)) |new_hash| { - if (std.zig.srcHashEql(old_hash, new_hash)) { - break :hash_changed; - } - log.debug("hash for (%{d} -> %{d}) changed: {} -> {}", .{ - old_inst, - ti.inst, - std.fmt.fmtSliceHexLower(&old_hash), - std.fmt.fmtSliceHexLower(&new_hash), - }); - } - // The source hash associated with this instruction changed - invalidate relevant dependencies. - zcu.comp.mutex.lock(); - defer zcu.comp.mutex.unlock(); - try zcu.markDependeeOutdated(.{ .src_hash = ti_idx }); - } - - // If this is a `struct_decl` etc, we must invalidate any outdated namespace dependencies. - const has_namespace = switch (old_tag[@intFromEnum(old_inst)]) { - .extended => switch (old_data[@intFromEnum(old_inst)].extended.opcode) { - .struct_decl, .union_decl, .opaque_decl, .enum_decl => true, - else => false, - }, - else => false, - }; - if (!has_namespace) continue; - - var old_names: std.AutoArrayHashMapUnmanaged(InternPool.NullTerminatedString, void) = .{}; - defer old_names.deinit(zcu.gpa); - { - var it = old_zir.declIterator(old_inst); - while (it.next()) |decl_inst| { - const decl_name = old_zir.getDeclaration(decl_inst)[0].name; - switch (decl_name) { - .@"comptime", .@"usingnamespace", .unnamed_test, .decltest => continue, - _ => if (decl_name.isNamedTest(old_zir)) continue, - } - const name_zir = decl_name.toString(old_zir).?; - const name_ip = try zcu.intern_pool.getOrPutString( - zcu.gpa, - old_zir.nullTerminatedString(name_zir), - .no_embedded_nulls, - ); - try old_names.put(zcu.gpa, name_ip, {}); - } - } - var any_change = false; - { - var it = new_zir.declIterator(ti.inst); - while (it.next()) |decl_inst| { - const decl_name = old_zir.getDeclaration(decl_inst)[0].name; - switch (decl_name) { - .@"comptime", .@"usingnamespace", .unnamed_test, .decltest => continue, - _ => if (decl_name.isNamedTest(old_zir)) continue, - } - const name_zir = decl_name.toString(old_zir).?; - const name_ip = try zcu.intern_pool.getOrPutString( - zcu.gpa, - old_zir.nullTerminatedString(name_zir), - .no_embedded_nulls, - ); - if (!old_names.swapRemove(name_ip)) continue; - // Name added - any_change = true; - zcu.comp.mutex.lock(); - defer zcu.comp.mutex.unlock(); - try zcu.markDependeeOutdated(.{ .namespace_name = .{ - .namespace = ti_idx, - .name = name_ip, - } }); - } - } - // The only elements remaining in `old_names` now are any names which were removed. - for (old_names.keys()) |name_ip| { - any_change = true; - zcu.comp.mutex.lock(); - defer zcu.comp.mutex.unlock(); - try zcu.markDependeeOutdated(.{ .namespace_name = .{ - .namespace = ti_idx, - .name = name_ip, - } }); - } - - if (any_change) { - zcu.comp.mutex.lock(); - defer zcu.comp.mutex.unlock(); - try zcu.markDependeeOutdated(.{ .namespace = ti_idx }); - } - } -} - pub fn markDependeeOutdated(zcu: *Zcu, dependee: InternPool.Dependee) !void { log.debug("outdated dependee: {}", .{dependee}); var it = zcu.intern_pool.dependencyIterator(dependee); @@ -3695,268 +3296,6 @@ fn computePathDigest(zcu: *Zcu, mod: *Package.Module, sub_file_path: []const u8) return bin; } -pub fn scanNamespace( - zcu: *Zcu, - namespace_index: Namespace.Index, - decls: []const Zir.Inst.Index, - parent_decl: *Decl, -) Allocator.Error!void { - const tracy = trace(@src()); - defer tracy.end(); - - const gpa = zcu.gpa; - const namespace = zcu.namespacePtr(namespace_index); - - // For incremental updates, `scanDecl` wants to look up existing decls by their ZIR index rather - // than their name. We'll build an efficient mapping now, then discard the current `decls`. - var existing_by_inst: std.AutoHashMapUnmanaged(InternPool.TrackedInst.Index, Decl.Index) = .{}; - defer existing_by_inst.deinit(gpa); - - try existing_by_inst.ensureTotalCapacity(gpa, @intCast(namespace.decls.count())); - - for (namespace.decls.keys()) |decl_index| { - const decl = zcu.declPtr(decl_index); - existing_by_inst.putAssumeCapacityNoClobber(decl.zir_decl_index.unwrap().?, decl_index); - } - - var seen_decls: std.AutoHashMapUnmanaged(InternPool.NullTerminatedString, void) = .{}; - defer seen_decls.deinit(gpa); - - try zcu.comp.work_queue.ensureUnusedCapacity(decls.len); - - namespace.decls.clearRetainingCapacity(); - try namespace.decls.ensureTotalCapacity(gpa, decls.len); - - namespace.usingnamespace_set.clearRetainingCapacity(); - - var scan_decl_iter: ScanDeclIter = .{ - .zcu = zcu, - .namespace_index = namespace_index, - .parent_decl = parent_decl, - .seen_decls = &seen_decls, - .existing_by_inst = &existing_by_inst, - .pass = .named, - }; - for (decls) |decl_inst| { - try scanDecl(&scan_decl_iter, decl_inst); - } - scan_decl_iter.pass = .unnamed; - for (decls) |decl_inst| { - try scanDecl(&scan_decl_iter, decl_inst); - } - - if (seen_decls.count() != namespace.decls.count()) { - // Do a pass over the namespace contents and remove any decls from the last update - // which were removed in this one. - var i: usize = 0; - while (i < namespace.decls.count()) { - const decl_index = namespace.decls.keys()[i]; - const decl = zcu.declPtr(decl_index); - if (!seen_decls.contains(decl.name)) { - // We must preserve namespace ordering for @typeInfo. - namespace.decls.orderedRemoveAt(i); - i -= 1; - } - } - } -} - -const ScanDeclIter = struct { - zcu: *Zcu, - namespace_index: Namespace.Index, - parent_decl: *Decl, - seen_decls: *std.AutoHashMapUnmanaged(InternPool.NullTerminatedString, void), - existing_by_inst: *const std.AutoHashMapUnmanaged(InternPool.TrackedInst.Index, Decl.Index), - /// Decl scanning is run in two passes, so that we can detect when a generated - /// name would clash with an explicit name and use a different one. - pass: enum { named, unnamed }, - usingnamespace_index: usize = 0, - comptime_index: usize = 0, - unnamed_test_index: usize = 0, - - fn avoidNameConflict(iter: *ScanDeclIter, comptime fmt: []const u8, args: anytype) !InternPool.NullTerminatedString { - const zcu = iter.zcu; - const gpa = zcu.gpa; - const ip = &zcu.intern_pool; - var name = try ip.getOrPutStringFmt(gpa, fmt, args, .no_embedded_nulls); - var gop = try iter.seen_decls.getOrPut(gpa, name); - var next_suffix: u32 = 0; - while (gop.found_existing) { - name = try ip.getOrPutStringFmt(gpa, "{}_{d}", .{ name.fmt(ip), next_suffix }, .no_embedded_nulls); - gop = try iter.seen_decls.getOrPut(gpa, name); - next_suffix += 1; - } - return name; - } -}; - -fn scanDecl(iter: *ScanDeclIter, decl_inst: Zir.Inst.Index) Allocator.Error!void { - const tracy = trace(@src()); - defer tracy.end(); - - const zcu = iter.zcu; - const namespace_index = iter.namespace_index; - const namespace = zcu.namespacePtr(namespace_index); - const gpa = zcu.gpa; - const zir = namespace.fileScope(zcu).zir; - const ip = &zcu.intern_pool; - - const inst_data = zir.instructions.items(.data)[@intFromEnum(decl_inst)].declaration; - const extra = zir.extraData(Zir.Inst.Declaration, inst_data.payload_index); - const declaration = extra.data; - - // Every Decl needs a name. - const decl_name: InternPool.NullTerminatedString, const kind: Decl.Kind, const is_named_test: bool = switch (declaration.name) { - .@"comptime" => info: { - if (iter.pass != .unnamed) return; - const i = iter.comptime_index; - iter.comptime_index += 1; - break :info .{ - try iter.avoidNameConflict("comptime_{d}", .{i}), - .@"comptime", - false, - }; - }, - .@"usingnamespace" => info: { - // TODO: this isn't right! These should be considered unnamed. Name conflicts can happen here. - // The problem is, we need to preserve the decl ordering for `@typeInfo`. - // I'm not bothering to fix this now, since some upcoming changes will change this code significantly anyway. - if (iter.pass != .named) return; - const i = iter.usingnamespace_index; - iter.usingnamespace_index += 1; - break :info .{ - try iter.avoidNameConflict("usingnamespace_{d}", .{i}), - .@"usingnamespace", - false, - }; - }, - .unnamed_test => info: { - if (iter.pass != .unnamed) return; - const i = iter.unnamed_test_index; - iter.unnamed_test_index += 1; - break :info .{ - try iter.avoidNameConflict("test_{d}", .{i}), - .@"test", - false, - }; - }, - .decltest => info: { - // We consider these to be unnamed since the decl name can be adjusted to avoid conflicts if necessary. - if (iter.pass != .unnamed) return; - assert(declaration.flags.has_doc_comment); - const name = zir.nullTerminatedString(@enumFromInt(zir.extra[extra.end])); - break :info .{ - try iter.avoidNameConflict("decltest.{s}", .{name}), - .@"test", - true, - }; - }, - _ => if (declaration.name.isNamedTest(zir)) info: { - // We consider these to be unnamed since the decl name can be adjusted to avoid conflicts if necessary. - if (iter.pass != .unnamed) return; - break :info .{ - try iter.avoidNameConflict("test.{s}", .{zir.nullTerminatedString(declaration.name.toString(zir).?)}), - .@"test", - true, - }; - } else info: { - if (iter.pass != .named) return; - const name = try ip.getOrPutString( - gpa, - zir.nullTerminatedString(declaration.name.toString(zir).?), - .no_embedded_nulls, - ); - try iter.seen_decls.putNoClobber(gpa, name, {}); - break :info .{ - name, - .named, - false, - }; - }, - }; - - switch (kind) { - .@"usingnamespace" => try namespace.usingnamespace_set.ensureUnusedCapacity(gpa, 1), - .@"test" => try zcu.test_functions.ensureUnusedCapacity(gpa, 1), - else => {}, - } - - const parent_file_scope_index = iter.parent_decl.getFileScopeIndex(zcu); - const tracked_inst = try ip.trackZir(gpa, parent_file_scope_index, decl_inst); - - // We create a Decl for it regardless of analysis status. - - const prev_exported, const decl_index = if (iter.existing_by_inst.get(tracked_inst)) |decl_index| decl_index: { - // We need only update this existing Decl. - const decl = zcu.declPtr(decl_index); - const was_exported = decl.is_exported; - assert(decl.kind == kind); // ZIR tracking should preserve this - decl.name = decl_name; - decl.is_pub = declaration.flags.is_pub; - decl.is_exported = declaration.flags.is_export; - break :decl_index .{ was_exported, decl_index }; - } else decl_index: { - // Create and set up a new Decl. - const new_decl_index = try zcu.allocateNewDecl(namespace_index); - const new_decl = zcu.declPtr(new_decl_index); - new_decl.kind = kind; - new_decl.name = decl_name; - new_decl.is_pub = declaration.flags.is_pub; - new_decl.is_exported = declaration.flags.is_export; - new_decl.zir_decl_index = tracked_inst.toOptional(); - break :decl_index .{ false, new_decl_index }; - }; - - const decl = zcu.declPtr(decl_index); - - namespace.decls.putAssumeCapacityNoClobberContext(decl_index, {}, .{ .zcu = zcu }); - - const comp = zcu.comp; - const decl_mod = namespace.fileScope(zcu).mod; - const want_analysis = declaration.flags.is_export or switch (kind) { - .anon => unreachable, - .@"comptime" => true, - .@"usingnamespace" => a: { - namespace.usingnamespace_set.putAssumeCapacityNoClobber(decl_index, declaration.flags.is_pub); - break :a true; - }, - .named => false, - .@"test" => a: { - if (!comp.config.is_test) break :a false; - if (decl_mod != zcu.main_mod) break :a false; - if (is_named_test and comp.test_filters.len > 0) { - const decl_fqn = try namespace.fullyQualifiedName(zcu, decl_name); - const decl_fqn_slice = decl_fqn.toSlice(ip); - for (comp.test_filters) |test_filter| { - if (mem.indexOf(u8, decl_fqn_slice, test_filter)) |_| break; - } else break :a false; - } - zcu.test_functions.putAssumeCapacity(decl_index, {}); // may clobber on incremental update - break :a true; - }, - }; - - if (want_analysis) { - // We will not queue analysis if the decl has been analyzed on a previous update and - // `is_export` is unchanged. In this case, the incremental update mechanism will handle - // re-analysis for us if necessary. - if (prev_exported != declaration.flags.is_export or decl.analysis == .unreferenced) { - log.debug("scanDecl queue analyze_decl file='{s}' decl_name='{}' decl_index={d}", .{ - namespace.fileScope(zcu).sub_file_path, decl_name.fmt(ip), decl_index, - }); - comp.work_queue.writeItemAssumeCapacity(.{ .analyze_decl = decl_index }); - } - } - - if (decl.getOwnedFunction(zcu) != null) { - // TODO this logic is insufficient; namespaces we don't re-scan may still require - // updated line numbers. Look into this! - // TODO Look into detecting when this would be unnecessary by storing enough state - // in `Decl` to notice that the line number did not change. - comp.work_queue.writeItemAssumeCapacity(.{ .update_line_number = decl_index }); - } -} - /// Cancel the creation of an anon decl and delete any references to it. /// If other decls depend on this decl, they must be aborted first. pub fn abortAnonDecl(mod: *Module, decl_index: Decl.Index) void { diff --git a/src/Zcu/PerThread.zig b/src/Zcu/PerThread.zig index 785a5d52e098..8cf69223450b 100644 --- a/src/Zcu/PerThread.zig +++ b/src/Zcu/PerThread.zig @@ -5,6 +5,411 @@ tid: Id, pub const Id = if (builtin.single_threaded) enum { main } else enum(usize) { main, _ }; +pub fn astGenFile( + pt: Zcu.PerThread, + file: *Zcu.File, + /// This parameter is provided separately from `file` because it is not + /// safe to access `import_table` without a lock, and this index is needed + /// in the call to `updateZirRefs`. + file_index: Zcu.File.Index, + path_digest: Cache.BinDigest, + opt_root_decl: Zcu.Decl.OptionalIndex, +) !void { + assert(!file.mod.isBuiltin()); + + const tracy = trace(@src()); + defer tracy.end(); + + const zcu = pt.zcu; + const comp = zcu.comp; + const gpa = zcu.gpa; + + // In any case we need to examine the stat of the file to determine the course of action. + var source_file = try file.mod.root.openFile(file.sub_file_path, .{}); + defer source_file.close(); + + const stat = try source_file.stat(); + + const want_local_cache = file.mod == zcu.main_mod; + const hex_digest = Cache.binToHex(path_digest); + const cache_directory = if (want_local_cache) zcu.local_zir_cache else zcu.global_zir_cache; + const zir_dir = cache_directory.handle; + + // Determine whether we need to reload the file from disk and redo parsing and AstGen. + var lock: std.fs.File.Lock = switch (file.status) { + .never_loaded, .retryable_failure => lock: { + // First, load the cached ZIR code, if any. + log.debug("AstGen checking cache: {s} (local={}, digest={s})", .{ + file.sub_file_path, want_local_cache, &hex_digest, + }); + + break :lock .shared; + }, + .parse_failure, .astgen_failure, .success_zir => lock: { + const unchanged_metadata = + stat.size == file.stat.size and + stat.mtime == file.stat.mtime and + stat.inode == file.stat.inode; + + if (unchanged_metadata) { + log.debug("unmodified metadata of file: {s}", .{file.sub_file_path}); + return; + } + + log.debug("metadata changed: {s}", .{file.sub_file_path}); + + break :lock .exclusive; + }, + }; + + // We ask for a lock in order to coordinate with other zig processes. + // If another process is already working on this file, we will get the cached + // version. Likewise if we're working on AstGen and another process asks for + // the cached file, they'll get it. + const cache_file = while (true) { + break zir_dir.createFile(&hex_digest, .{ + .read = true, + .truncate = false, + .lock = lock, + }) catch |err| switch (err) { + error.NotDir => unreachable, // no dir components + error.InvalidUtf8 => unreachable, // it's a hex encoded name + error.InvalidWtf8 => unreachable, // it's a hex encoded name + error.BadPathName => unreachable, // it's a hex encoded name + error.NameTooLong => unreachable, // it's a fixed size name + error.PipeBusy => unreachable, // it's not a pipe + error.WouldBlock => unreachable, // not asking for non-blocking I/O + // There are no dir components, so you would think that this was + // unreachable, however we have observed on macOS two processes racing + // to do openat() with O_CREAT manifest in ENOENT. + error.FileNotFound => continue, + + else => |e| return e, // Retryable errors are handled at callsite. + }; + }; + defer cache_file.close(); + + while (true) { + update: { + // First we read the header to determine the lengths of arrays. + const header = cache_file.reader().readStruct(Zir.Header) catch |err| switch (err) { + // This can happen if Zig bails out of this function between creating + // the cached file and writing it. + error.EndOfStream => break :update, + else => |e| return e, + }; + const unchanged_metadata = + stat.size == header.stat_size and + stat.mtime == header.stat_mtime and + stat.inode == header.stat_inode; + + if (!unchanged_metadata) { + log.debug("AstGen cache stale: {s}", .{file.sub_file_path}); + break :update; + } + log.debug("AstGen cache hit: {s} instructions_len={d}", .{ + file.sub_file_path, header.instructions_len, + }); + + file.zir = Zcu.loadZirCacheBody(gpa, header, cache_file) catch |err| switch (err) { + error.UnexpectedFileSize => { + log.warn("unexpected EOF reading cached ZIR for {s}", .{file.sub_file_path}); + break :update; + }, + else => |e| return e, + }; + file.zir_loaded = true; + file.stat = .{ + .size = header.stat_size, + .inode = header.stat_inode, + .mtime = header.stat_mtime, + }; + file.status = .success_zir; + log.debug("AstGen cached success: {s}", .{file.sub_file_path}); + + // TODO don't report compile errors until Sema @importFile + if (file.zir.hasCompileErrors()) { + { + comp.mutex.lock(); + defer comp.mutex.unlock(); + try zcu.failed_files.putNoClobber(gpa, file, null); + } + file.status = .astgen_failure; + return error.AnalysisFail; + } + return; + } + + // If we already have the exclusive lock then it is our job to update. + if (builtin.os.tag == .wasi or lock == .exclusive) break; + // Otherwise, unlock to give someone a chance to get the exclusive lock + // and then upgrade to an exclusive lock. + cache_file.unlock(); + lock = .exclusive; + try cache_file.lock(lock); + } + + // The cache is definitely stale so delete the contents to avoid an underwrite later. + cache_file.setEndPos(0) catch |err| switch (err) { + error.FileTooBig => unreachable, // 0 is not too big + + else => |e| return e, + }; + + pt.lockAndClearFileCompileError(file); + + // If the previous ZIR does not have compile errors, keep it around + // in case parsing or new ZIR fails. In case of successful ZIR update + // at the end of this function we will free it. + // We keep the previous ZIR loaded so that we can use it + // for the update next time it does not have any compile errors. This avoids + // needlessly tossing out semantic analysis work when an error is + // temporarily introduced. + if (file.zir_loaded and !file.zir.hasCompileErrors()) { + assert(file.prev_zir == null); + const prev_zir_ptr = try gpa.create(Zir); + file.prev_zir = prev_zir_ptr; + prev_zir_ptr.* = file.zir; + file.zir = undefined; + file.zir_loaded = false; + } + file.unload(gpa); + + if (stat.size > std.math.maxInt(u32)) + return error.FileTooBig; + + const source = try gpa.allocSentinel(u8, @as(usize, @intCast(stat.size)), 0); + defer if (!file.source_loaded) gpa.free(source); + const amt = try source_file.readAll(source); + if (amt != stat.size) + return error.UnexpectedEndOfFile; + + file.stat = .{ + .size = stat.size, + .inode = stat.inode, + .mtime = stat.mtime, + }; + file.source = source; + file.source_loaded = true; + + file.tree = try Ast.parse(gpa, source, .zig); + file.tree_loaded = true; + + // Any potential AST errors are converted to ZIR errors here. + file.zir = try AstGen.generate(gpa, file.tree); + file.zir_loaded = true; + file.status = .success_zir; + log.debug("AstGen fresh success: {s}", .{file.sub_file_path}); + + const safety_buffer = if (Zcu.data_has_safety_tag) + try gpa.alloc([8]u8, file.zir.instructions.len) + else + undefined; + defer if (Zcu.data_has_safety_tag) gpa.free(safety_buffer); + const data_ptr = if (Zcu.data_has_safety_tag) + if (file.zir.instructions.len == 0) + @as([*]const u8, undefined) + else + @as([*]const u8, @ptrCast(safety_buffer.ptr)) + else + @as([*]const u8, @ptrCast(file.zir.instructions.items(.data).ptr)); + if (Zcu.data_has_safety_tag) { + // The `Data` union has a safety tag but in the file format we store it without. + for (file.zir.instructions.items(.data), 0..) |*data, i| { + const as_struct: *const Zcu.HackDataLayout = @ptrCast(data); + safety_buffer[i] = as_struct.data; + } + } + + const header: Zir.Header = .{ + .instructions_len = @as(u32, @intCast(file.zir.instructions.len)), + .string_bytes_len = @as(u32, @intCast(file.zir.string_bytes.len)), + .extra_len = @as(u32, @intCast(file.zir.extra.len)), + + .stat_size = stat.size, + .stat_inode = stat.inode, + .stat_mtime = stat.mtime, + }; + var iovecs = [_]std.posix.iovec_const{ + .{ + .base = @as([*]const u8, @ptrCast(&header)), + .len = @sizeOf(Zir.Header), + }, + .{ + .base = @as([*]const u8, @ptrCast(file.zir.instructions.items(.tag).ptr)), + .len = file.zir.instructions.len, + }, + .{ + .base = data_ptr, + .len = file.zir.instructions.len * 8, + }, + .{ + .base = file.zir.string_bytes.ptr, + .len = file.zir.string_bytes.len, + }, + .{ + .base = @as([*]const u8, @ptrCast(file.zir.extra.ptr)), + .len = file.zir.extra.len * 4, + }, + }; + cache_file.writevAll(&iovecs) catch |err| { + log.warn("unable to write cached ZIR code for {}{s} to {}{s}: {s}", .{ + file.mod.root, file.sub_file_path, cache_directory, &hex_digest, @errorName(err), + }); + }; + + if (file.zir.hasCompileErrors()) { + { + comp.mutex.lock(); + defer comp.mutex.unlock(); + try zcu.failed_files.putNoClobber(gpa, file, null); + } + file.status = .astgen_failure; + return error.AnalysisFail; + } + + if (file.prev_zir) |prev_zir| { + try pt.updateZirRefs(file, file_index, prev_zir.*); + // No need to keep previous ZIR. + prev_zir.deinit(gpa); + gpa.destroy(prev_zir); + file.prev_zir = null; + } + + if (opt_root_decl.unwrap()) |root_decl| { + // The root of this file must be re-analyzed, since the file has changed. + comp.mutex.lock(); + defer comp.mutex.unlock(); + + log.debug("outdated root Decl: {}", .{root_decl}); + try zcu.outdated_file_root.put(gpa, root_decl, {}); + } +} + +/// This is called from the AstGen thread pool, so must acquire +/// the Compilation mutex when acting on shared state. +fn updateZirRefs(pt: Zcu.PerThread, file: *Zcu.File, file_index: Zcu.File.Index, old_zir: Zir) !void { + const zcu = pt.zcu; + const gpa = zcu.gpa; + const new_zir = file.zir; + + var inst_map: std.AutoHashMapUnmanaged(Zir.Inst.Index, Zir.Inst.Index) = .{}; + defer inst_map.deinit(gpa); + + try Zcu.mapOldZirToNew(gpa, old_zir, new_zir, &inst_map); + + const old_tag = old_zir.instructions.items(.tag); + const old_data = old_zir.instructions.items(.data); + + // TODO: this should be done after all AstGen workers complete, to avoid + // iterating over this full set for every updated file. + for (zcu.intern_pool.tracked_insts.keys(), 0..) |*ti, idx_raw| { + const ti_idx: InternPool.TrackedInst.Index = @enumFromInt(idx_raw); + if (ti.file != file_index) continue; + const old_inst = ti.inst; + ti.inst = inst_map.get(ti.inst) orelse { + // Tracking failed for this instruction. Invalidate associated `src_hash` deps. + zcu.comp.mutex.lock(); + defer zcu.comp.mutex.unlock(); + log.debug("tracking failed for %{d}", .{old_inst}); + try zcu.markDependeeOutdated(.{ .src_hash = ti_idx }); + continue; + }; + + if (old_zir.getAssociatedSrcHash(old_inst)) |old_hash| hash_changed: { + if (new_zir.getAssociatedSrcHash(ti.inst)) |new_hash| { + if (std.zig.srcHashEql(old_hash, new_hash)) { + break :hash_changed; + } + log.debug("hash for (%{d} -> %{d}) changed: {} -> {}", .{ + old_inst, + ti.inst, + std.fmt.fmtSliceHexLower(&old_hash), + std.fmt.fmtSliceHexLower(&new_hash), + }); + } + // The source hash associated with this instruction changed - invalidate relevant dependencies. + zcu.comp.mutex.lock(); + defer zcu.comp.mutex.unlock(); + try zcu.markDependeeOutdated(.{ .src_hash = ti_idx }); + } + + // If this is a `struct_decl` etc, we must invalidate any outdated namespace dependencies. + const has_namespace = switch (old_tag[@intFromEnum(old_inst)]) { + .extended => switch (old_data[@intFromEnum(old_inst)].extended.opcode) { + .struct_decl, .union_decl, .opaque_decl, .enum_decl => true, + else => false, + }, + else => false, + }; + if (!has_namespace) continue; + + var old_names: std.AutoArrayHashMapUnmanaged(InternPool.NullTerminatedString, void) = .{}; + defer old_names.deinit(zcu.gpa); + { + var it = old_zir.declIterator(old_inst); + while (it.next()) |decl_inst| { + const decl_name = old_zir.getDeclaration(decl_inst)[0].name; + switch (decl_name) { + .@"comptime", .@"usingnamespace", .unnamed_test, .decltest => continue, + _ => if (decl_name.isNamedTest(old_zir)) continue, + } + const name_zir = decl_name.toString(old_zir).?; + const name_ip = try zcu.intern_pool.getOrPutString( + zcu.gpa, + pt.tid, + old_zir.nullTerminatedString(name_zir), + .no_embedded_nulls, + ); + try old_names.put(zcu.gpa, name_ip, {}); + } + } + var any_change = false; + { + var it = new_zir.declIterator(ti.inst); + while (it.next()) |decl_inst| { + const decl_name = old_zir.getDeclaration(decl_inst)[0].name; + switch (decl_name) { + .@"comptime", .@"usingnamespace", .unnamed_test, .decltest => continue, + _ => if (decl_name.isNamedTest(old_zir)) continue, + } + const name_zir = decl_name.toString(old_zir).?; + const name_ip = try zcu.intern_pool.getOrPutString( + zcu.gpa, + pt.tid, + old_zir.nullTerminatedString(name_zir), + .no_embedded_nulls, + ); + if (!old_names.swapRemove(name_ip)) continue; + // Name added + any_change = true; + zcu.comp.mutex.lock(); + defer zcu.comp.mutex.unlock(); + try zcu.markDependeeOutdated(.{ .namespace_name = .{ + .namespace = ti_idx, + .name = name_ip, + } }); + } + } + // The only elements remaining in `old_names` now are any names which were removed. + for (old_names.keys()) |name_ip| { + any_change = true; + zcu.comp.mutex.lock(); + defer zcu.comp.mutex.unlock(); + try zcu.markDependeeOutdated(.{ .namespace_name = .{ + .namespace = ti_idx, + .name = name_ip, + } }); + } + + if (any_change) { + zcu.comp.mutex.lock(); + defer zcu.comp.mutex.unlock(); + try zcu.markDependeeOutdated(.{ .namespace = ti_idx }); + } + } +} + /// Like `ensureDeclAnalyzed`, but the Decl is a file's root Decl. pub fn ensureFileAnalyzed(pt: Zcu.PerThread, file_index: Zcu.File.Index) Zcu.SemaError!void { if (pt.zcu.fileRootDecl(file_index).unwrap()) |existing_root| { @@ -91,7 +496,7 @@ pub fn ensureDeclAnalyzed(pt: Zcu.PerThread, decl_index: Zcu.Decl.Index) Zcu.Sem }; } - const decl_prog_node = mod.sema_prog_node.start((try decl.fullyQualifiedName(mod)).toSlice(ip), 0); + const decl_prog_node = mod.sema_prog_node.start((try decl.fullyQualifiedName(pt)).toSlice(ip), 0); defer decl_prog_node.end(); break :blk pt.semaDecl(decl_index) catch |err| switch (err) { @@ -290,7 +695,7 @@ pub fn linkerUpdateFunc(pt: Zcu.PerThread, func_index: InternPool.Index, air: Ai defer liveness.deinit(gpa); if (build_options.enable_debug_extensions and comp.verbose_air) { - const fqn = try decl.fullyQualifiedName(zcu); + const fqn = try decl.fullyQualifiedName(pt); std.debug.print("# Begin Function AIR: {}:\n", .{fqn.fmt(ip)}); @import("../print_air.zig").dump(pt, air, liveness); std.debug.print("# End Function AIR: {}\n\n", .{fqn.fmt(ip)}); @@ -324,7 +729,7 @@ pub fn linkerUpdateFunc(pt: Zcu.PerThread, func_index: InternPool.Index, air: Ai }; } - const codegen_prog_node = zcu.codegen_prog_node.start((try decl.fullyQualifiedName(zcu)).toSlice(ip), 0); + const codegen_prog_node = zcu.codegen_prog_node.start((try decl.fullyQualifiedName(pt)).toSlice(ip), 0); defer codegen_prog_node.end(); if (!air.typesFullyResolved(zcu)) { @@ -434,7 +839,7 @@ fn getFileRootStruct( decl.owns_tv = true; decl.analysis = .complete; - try zcu.scanNamespace(namespace_index, decls, decl); + try pt.scanNamespace(namespace_index, decls, decl); try zcu.comp.work_queue.writeItem(.{ .resolve_type_fully = wip_ty.index }); return wip_ty.finish(ip, decl_index, namespace_index.toOptional()); } @@ -502,7 +907,7 @@ fn semaFileUpdate(pt: Zcu.PerThread, file_index: Zcu.File.Index, type_outdated: const decls = file.zir.bodySlice(extra_index, decls_len); if (!type_outdated) { - try zcu.scanNamespace(decl.src_namespace, decls, decl); + try pt.scanNamespace(decl.src_namespace, decls, decl); } return false; @@ -539,7 +944,7 @@ fn semaFile(pt: Zcu.PerThread, file_index: Zcu.File.Index) Zcu.SemaError!void { zcu.setFileRootDecl(file_index, new_decl_index.toOptional()); zcu.namespacePtr(new_namespace_index).decl_index = new_decl_index; - new_decl.name = try file.fullyQualifiedName(zcu); + new_decl.name = try file.fullyQualifiedName(pt); new_decl.name_fully_qualified = true; new_decl.is_pub = true; new_decl.is_exported = false; @@ -601,9 +1006,9 @@ fn semaDecl(pt: Zcu.PerThread, decl_index: Zcu.Decl.Index) !Zcu.SemaDeclResult { } log.debug("semaDecl '{d}'", .{@intFromEnum(decl_index)}); - log.debug("decl name '{}'", .{(try decl.fullyQualifiedName(zcu)).fmt(ip)}); + log.debug("decl name '{}'", .{(try decl.fullyQualifiedName(pt)).fmt(ip)}); defer blk: { - log.debug("finish decl name '{}'", .{(decl.fullyQualifiedName(zcu) catch break :blk).fmt(ip)}); + log.debug("finish decl name '{}'", .{(decl.fullyQualifiedName(pt) catch break :blk).fmt(ip)}); } const old_has_tv = decl.has_tv; @@ -631,7 +1036,7 @@ fn semaDecl(pt: Zcu.PerThread, decl_index: Zcu.Decl.Index) !Zcu.SemaDeclResult { const std_file_root_decl_index = zcu.fileRootDecl(std_file_imported.file_index); const std_decl = zcu.declPtr(std_file_root_decl_index.unwrap().?); const std_namespace = std_decl.getInnerNamespace(zcu).?; - const builtin_str = try ip.getOrPutString(gpa, "builtin", .no_embedded_nulls); + const builtin_str = try ip.getOrPutString(gpa, pt.tid, "builtin", .no_embedded_nulls); const builtin_decl = zcu.declPtr(std_namespace.decls.getKeyAdapted(builtin_str, Zcu.DeclAdapter{ .zcu = zcu }) orelse break :ip_index .none); const builtin_namespace = builtin_decl.getInnerNamespaceIndex(zcu).unwrap() orelse break :ip_index .none; if (decl.src_namespace != builtin_namespace) break :ip_index .none; @@ -802,7 +1207,7 @@ fn semaDecl(pt: Zcu.PerThread, decl_index: Zcu.Decl.Index) !Zcu.SemaDeclResult { } else if (bytes.len == 0) { return sema.fail(&block_scope, section_src, "linksection cannot be empty", .{}); } - break :blk try ip.getOrPutStringOpt(gpa, bytes, .no_embedded_nulls); + break :blk try ip.getOrPutStringOpt(gpa, pt.tid, bytes, .no_embedded_nulls); }; decl.@"addrspace" = blk: { const addrspace_ctx: Sema.AddressSpaceContext = switch (ip.indexToKey(decl_val.toIntern())) { @@ -996,7 +1401,7 @@ fn newEmbedFile( } }); const array_val = try pt.intern(.{ .aggregate = .{ .ty = array_ty, - .storage = .{ .bytes = try ip.getOrPutTrailingString(gpa, bytes.len, .maybe_embedded_nulls) }, + .storage = .{ .bytes = try ip.getOrPutTrailingString(gpa, pt.tid, bytes.len, .maybe_embedded_nulls) }, } }); const ptr_ty = (try pt.ptrType(.{ @@ -1018,7 +1423,7 @@ fn newEmbedFile( result.* = new_file; new_file.* = .{ - .sub_file_path = try ip.getOrPutString(gpa, sub_file_path, .no_embedded_nulls), + .sub_file_path = try ip.getOrPutString(gpa, pt.tid, sub_file_path, .no_embedded_nulls), .owner = pkg, .stat = stat, .val = ptr_val, @@ -1027,6 +1432,271 @@ fn newEmbedFile( return ptr_val; } +pub fn scanNamespace( + pt: Zcu.PerThread, + namespace_index: Zcu.Namespace.Index, + decls: []const Zir.Inst.Index, + parent_decl: *Zcu.Decl, +) Allocator.Error!void { + const tracy = trace(@src()); + defer tracy.end(); + + const zcu = pt.zcu; + const gpa = zcu.gpa; + const namespace = zcu.namespacePtr(namespace_index); + + // For incremental updates, `scanDecl` wants to look up existing decls by their ZIR index rather + // than their name. We'll build an efficient mapping now, then discard the current `decls`. + var existing_by_inst: std.AutoHashMapUnmanaged(InternPool.TrackedInst.Index, Zcu.Decl.Index) = .{}; + defer existing_by_inst.deinit(gpa); + + try existing_by_inst.ensureTotalCapacity(gpa, @intCast(namespace.decls.count())); + + for (namespace.decls.keys()) |decl_index| { + const decl = zcu.declPtr(decl_index); + existing_by_inst.putAssumeCapacityNoClobber(decl.zir_decl_index.unwrap().?, decl_index); + } + + var seen_decls: std.AutoHashMapUnmanaged(InternPool.NullTerminatedString, void) = .{}; + defer seen_decls.deinit(gpa); + + try zcu.comp.work_queue.ensureUnusedCapacity(decls.len); + + namespace.decls.clearRetainingCapacity(); + try namespace.decls.ensureTotalCapacity(gpa, decls.len); + + namespace.usingnamespace_set.clearRetainingCapacity(); + + var scan_decl_iter: ScanDeclIter = .{ + .pt = pt, + .namespace_index = namespace_index, + .parent_decl = parent_decl, + .seen_decls = &seen_decls, + .existing_by_inst = &existing_by_inst, + .pass = .named, + }; + for (decls) |decl_inst| { + try scan_decl_iter.scanDecl(decl_inst); + } + scan_decl_iter.pass = .unnamed; + for (decls) |decl_inst| { + try scan_decl_iter.scanDecl(decl_inst); + } + + if (seen_decls.count() != namespace.decls.count()) { + // Do a pass over the namespace contents and remove any decls from the last update + // which were removed in this one. + var i: usize = 0; + while (i < namespace.decls.count()) { + const decl_index = namespace.decls.keys()[i]; + const decl = zcu.declPtr(decl_index); + if (!seen_decls.contains(decl.name)) { + // We must preserve namespace ordering for @typeInfo. + namespace.decls.orderedRemoveAt(i); + i -= 1; + } + } + } +} + +const ScanDeclIter = struct { + pt: Zcu.PerThread, + namespace_index: Zcu.Namespace.Index, + parent_decl: *Zcu.Decl, + seen_decls: *std.AutoHashMapUnmanaged(InternPool.NullTerminatedString, void), + existing_by_inst: *const std.AutoHashMapUnmanaged(InternPool.TrackedInst.Index, Zcu.Decl.Index), + /// Decl scanning is run in two passes, so that we can detect when a generated + /// name would clash with an explicit name and use a different one. + pass: enum { named, unnamed }, + usingnamespace_index: usize = 0, + comptime_index: usize = 0, + unnamed_test_index: usize = 0, + + fn avoidNameConflict(iter: *ScanDeclIter, comptime fmt: []const u8, args: anytype) !InternPool.NullTerminatedString { + const pt = iter.pt; + const gpa = pt.zcu.gpa; + const ip = &pt.zcu.intern_pool; + var name = try ip.getOrPutStringFmt(gpa, pt.tid, fmt, args, .no_embedded_nulls); + var gop = try iter.seen_decls.getOrPut(gpa, name); + var next_suffix: u32 = 0; + while (gop.found_existing) { + name = try ip.getOrPutStringFmt(gpa, pt.tid, "{}_{d}", .{ name.fmt(ip), next_suffix }, .no_embedded_nulls); + gop = try iter.seen_decls.getOrPut(gpa, name); + next_suffix += 1; + } + return name; + } + + fn scanDecl(iter: *ScanDeclIter, decl_inst: Zir.Inst.Index) Allocator.Error!void { + const tracy = trace(@src()); + defer tracy.end(); + + const pt = iter.pt; + const zcu = pt.zcu; + const namespace_index = iter.namespace_index; + const namespace = zcu.namespacePtr(namespace_index); + const gpa = zcu.gpa; + const zir = namespace.fileScope(zcu).zir; + const ip = &zcu.intern_pool; + + const inst_data = zir.instructions.items(.data)[@intFromEnum(decl_inst)].declaration; + const extra = zir.extraData(Zir.Inst.Declaration, inst_data.payload_index); + const declaration = extra.data; + + // Every Decl needs a name. + const decl_name: InternPool.NullTerminatedString, const kind: Zcu.Decl.Kind, const is_named_test: bool = switch (declaration.name) { + .@"comptime" => info: { + if (iter.pass != .unnamed) return; + const i = iter.comptime_index; + iter.comptime_index += 1; + break :info .{ + try iter.avoidNameConflict("comptime_{d}", .{i}), + .@"comptime", + false, + }; + }, + .@"usingnamespace" => info: { + // TODO: this isn't right! These should be considered unnamed. Name conflicts can happen here. + // The problem is, we need to preserve the decl ordering for `@typeInfo`. + // I'm not bothering to fix this now, since some upcoming changes will change this code significantly anyway. + if (iter.pass != .named) return; + const i = iter.usingnamespace_index; + iter.usingnamespace_index += 1; + break :info .{ + try iter.avoidNameConflict("usingnamespace_{d}", .{i}), + .@"usingnamespace", + false, + }; + }, + .unnamed_test => info: { + if (iter.pass != .unnamed) return; + const i = iter.unnamed_test_index; + iter.unnamed_test_index += 1; + break :info .{ + try iter.avoidNameConflict("test_{d}", .{i}), + .@"test", + false, + }; + }, + .decltest => info: { + // We consider these to be unnamed since the decl name can be adjusted to avoid conflicts if necessary. + if (iter.pass != .unnamed) return; + assert(declaration.flags.has_doc_comment); + const name = zir.nullTerminatedString(@enumFromInt(zir.extra[extra.end])); + break :info .{ + try iter.avoidNameConflict("decltest.{s}", .{name}), + .@"test", + true, + }; + }, + _ => if (declaration.name.isNamedTest(zir)) info: { + // We consider these to be unnamed since the decl name can be adjusted to avoid conflicts if necessary. + if (iter.pass != .unnamed) return; + break :info .{ + try iter.avoidNameConflict("test.{s}", .{zir.nullTerminatedString(declaration.name.toString(zir).?)}), + .@"test", + true, + }; + } else info: { + if (iter.pass != .named) return; + const name = try ip.getOrPutString( + gpa, + pt.tid, + zir.nullTerminatedString(declaration.name.toString(zir).?), + .no_embedded_nulls, + ); + try iter.seen_decls.putNoClobber(gpa, name, {}); + break :info .{ + name, + .named, + false, + }; + }, + }; + + switch (kind) { + .@"usingnamespace" => try namespace.usingnamespace_set.ensureUnusedCapacity(gpa, 1), + .@"test" => try zcu.test_functions.ensureUnusedCapacity(gpa, 1), + else => {}, + } + + const parent_file_scope_index = iter.parent_decl.getFileScopeIndex(zcu); + const tracked_inst = try ip.trackZir(gpa, parent_file_scope_index, decl_inst); + + // We create a Decl for it regardless of analysis status. + + const prev_exported, const decl_index = if (iter.existing_by_inst.get(tracked_inst)) |decl_index| decl_index: { + // We need only update this existing Decl. + const decl = zcu.declPtr(decl_index); + const was_exported = decl.is_exported; + assert(decl.kind == kind); // ZIR tracking should preserve this + decl.name = decl_name; + decl.is_pub = declaration.flags.is_pub; + decl.is_exported = declaration.flags.is_export; + break :decl_index .{ was_exported, decl_index }; + } else decl_index: { + // Create and set up a new Decl. + const new_decl_index = try zcu.allocateNewDecl(namespace_index); + const new_decl = zcu.declPtr(new_decl_index); + new_decl.kind = kind; + new_decl.name = decl_name; + new_decl.is_pub = declaration.flags.is_pub; + new_decl.is_exported = declaration.flags.is_export; + new_decl.zir_decl_index = tracked_inst.toOptional(); + break :decl_index .{ false, new_decl_index }; + }; + + const decl = zcu.declPtr(decl_index); + + namespace.decls.putAssumeCapacityNoClobberContext(decl_index, {}, .{ .zcu = zcu }); + + const comp = zcu.comp; + const decl_mod = namespace.fileScope(zcu).mod; + const want_analysis = declaration.flags.is_export or switch (kind) { + .anon => unreachable, + .@"comptime" => true, + .@"usingnamespace" => a: { + namespace.usingnamespace_set.putAssumeCapacityNoClobber(decl_index, declaration.flags.is_pub); + break :a true; + }, + .named => false, + .@"test" => a: { + if (!comp.config.is_test) break :a false; + if (decl_mod != zcu.main_mod) break :a false; + if (is_named_test and comp.test_filters.len > 0) { + const decl_fqn = try namespace.fullyQualifiedName(pt, decl_name); + const decl_fqn_slice = decl_fqn.toSlice(ip); + for (comp.test_filters) |test_filter| { + if (std.mem.indexOf(u8, decl_fqn_slice, test_filter)) |_| break; + } else break :a false; + } + zcu.test_functions.putAssumeCapacity(decl_index, {}); // may clobber on incremental update + break :a true; + }, + }; + + if (want_analysis) { + // We will not queue analysis if the decl has been analyzed on a previous update and + // `is_export` is unchanged. In this case, the incremental update mechanism will handle + // re-analysis for us if necessary. + if (prev_exported != declaration.flags.is_export or decl.analysis == .unreferenced) { + log.debug("scanDecl queue analyze_decl file='{s}' decl_name='{}' decl_index={d}", .{ + namespace.fileScope(zcu).sub_file_path, decl_name.fmt(ip), decl_index, + }); + comp.work_queue.writeItemAssumeCapacity(.{ .analyze_decl = decl_index }); + } + } + + if (decl.getOwnedFunction(zcu) != null) { + // TODO this logic is insufficient; namespaces we don't re-scan may still require + // updated line numbers. Look into this! + // TODO Look into detecting when this would be unnecessary by storing enough state + // in `Decl` to notice that the line number did not change. + comp.work_queue.writeItemAssumeCapacity(.{ .update_line_number = decl_index }); + } + } +}; + pub fn analyzeFnBody(pt: Zcu.PerThread, func_index: InternPool.Index, arena: Allocator) Zcu.SemaError!Air { const tracy = trace(@src()); defer tracy.end(); @@ -1038,12 +1708,12 @@ pub fn analyzeFnBody(pt: Zcu.PerThread, func_index: InternPool.Index, arena: All const decl_index = func.owner_decl; const decl = mod.declPtr(decl_index); - log.debug("func name '{}'", .{(try decl.fullyQualifiedName(mod)).fmt(ip)}); + log.debug("func name '{}'", .{(try decl.fullyQualifiedName(pt)).fmt(ip)}); defer blk: { - log.debug("finish func name '{}'", .{(decl.fullyQualifiedName(mod) catch break :blk).fmt(ip)}); + log.debug("finish func name '{}'", .{(decl.fullyQualifiedName(pt) catch break :blk).fmt(ip)}); } - const decl_prog_node = mod.sema_prog_node.start((try decl.fullyQualifiedName(mod)).toSlice(ip), 0); + const decl_prog_node = mod.sema_prog_node.start((try decl.fullyQualifiedName(pt)).toSlice(ip), 0); defer decl_prog_node.end(); mod.intern_pool.removeDependenciesForDepender(gpa, InternPool.AnalUnit.wrap(.{ .func = func_index })); @@ -1273,6 +1943,19 @@ pub fn analyzeFnBody(pt: Zcu.PerThread, func_index: InternPool.Index, arena: All }; } +fn lockAndClearFileCompileError(pt: Zcu.PerThread, file: *Zcu.File) void { + switch (file.status) { + .success_zir, .retryable_failure => {}, + .never_loaded, .parse_failure, .astgen_failure => { + pt.zcu.comp.mutex.lock(); + defer pt.zcu.comp.mutex.unlock(); + if (pt.zcu.failed_files.fetchSwapRemove(file)) |kv| { + if (kv.value) |msg| msg.destroy(pt.zcu.gpa); // Delete previous error message. + } + }, + } +} + /// Called from `Compilation.update`, after everything is done, just before /// reporting compile errors. In this function we emit exported symbol collision /// errors and communicate exported symbols to the linker backend. @@ -1397,7 +2080,7 @@ pub fn populateTestFunctions( const root_decl_index = zcu.fileRootDecl(builtin_file_index); const root_decl = zcu.declPtr(root_decl_index.unwrap().?); const builtin_namespace = zcu.namespacePtr(root_decl.src_namespace); - const test_functions_str = try ip.getOrPutString(gpa, "test_functions", .no_embedded_nulls); + const test_functions_str = try ip.getOrPutString(gpa, pt.tid, "test_functions", .no_embedded_nulls); const decl_index = builtin_namespace.decls.getKeyAdapted( test_functions_str, Zcu.DeclAdapter{ .zcu = zcu }, @@ -1424,7 +2107,7 @@ pub fn populateTestFunctions( for (test_fn_vals, zcu.test_functions.keys()) |*test_fn_val, test_decl_index| { const test_decl = zcu.declPtr(test_decl_index); - const test_decl_name = try test_decl.fullyQualifiedName(zcu); + const test_decl_name = try test_decl.fullyQualifiedName(pt); const test_decl_name_len = test_decl_name.length(ip); const test_name_anon_decl: InternPool.Key.Ptr.BaseAddr.AnonDecl = n: { const test_name_ty = try pt.arrayType(.{ @@ -1530,7 +2213,7 @@ pub fn linkerUpdateDecl(pt: Zcu.PerThread, decl_index: Zcu.Decl.Index) !void { const decl = zcu.declPtr(decl_index); - const codegen_prog_node = zcu.codegen_prog_node.start((try decl.fullyQualifiedName(zcu)).toSlice(&zcu.intern_pool), 0); + const codegen_prog_node = zcu.codegen_prog_node.start((try decl.fullyQualifiedName(pt)).toSlice(&zcu.intern_pool), 0); defer codegen_prog_node.end(); if (comp.bin_file) |lf| { @@ -2064,11 +2747,11 @@ pub fn getBuiltinDecl(pt: Zcu.PerThread, name: []const u8) Allocator.Error!Inter const std_file_imported = zcu.importPkg(zcu.std_mod) catch @panic("failed to import lib/std.zig"); const std_file_root_decl = zcu.fileRootDecl(std_file_imported.file_index).unwrap().?; const std_namespace = zcu.declPtr(std_file_root_decl).getOwnedInnerNamespace(zcu).?; - const builtin_str = try ip.getOrPutString(gpa, "builtin", .no_embedded_nulls); + const builtin_str = try ip.getOrPutString(gpa, pt.tid, "builtin", .no_embedded_nulls); const builtin_decl = std_namespace.decls.getKeyAdapted(builtin_str, Zcu.DeclAdapter{ .zcu = zcu }) orelse @panic("lib/std.zig is corrupt and missing 'builtin'"); pt.ensureDeclAnalyzed(builtin_decl) catch @panic("std.builtin is corrupt"); const builtin_namespace = zcu.declPtr(builtin_decl).getInnerNamespace(zcu) orelse @panic("std.builtin is corrupt"); - const name_str = try ip.getOrPutString(gpa, name, .no_embedded_nulls); + const name_str = try ip.getOrPutString(gpa, pt.tid, name, .no_embedded_nulls); return builtin_namespace.decls.getKeyAdapted(name_str, Zcu.DeclAdapter{ .zcu = zcu }) orelse @panic("lib/std/builtin.zig is corrupt"); } @@ -2082,6 +2765,8 @@ pub fn getBuiltinType(pt: Zcu.PerThread, name: []const u8) Allocator.Error!Type const Air = @import("../Air.zig"); const Allocator = std.mem.Allocator; const assert = std.debug.assert; +const Ast = std.zig.Ast; +const AstGen = std.zig.AstGen; const BigIntConst = std.math.big.int.Const; const BigIntMutable = std.math.big.int.Mutable; const build_options = @import("build_options"); diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index a8ac674e07d3..8873c5cb1bbb 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -2204,14 +2204,14 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif const func_val = (try func.air.value(pl_op.operand, pt)) orelse break :blk null; if (func_val.getFunction(mod)) |function| { - _ = try func.bin_file.getOrCreateAtomForDecl(function.owner_decl); + _ = try func.bin_file.getOrCreateAtomForDecl(pt, function.owner_decl); break :blk function.owner_decl; } else if (func_val.getExternFunc(mod)) |extern_func| { const ext_decl = mod.declPtr(extern_func.decl); const ext_info = mod.typeToFunc(ext_decl.typeOf(mod)).?; var func_type = try genFunctype(func.gpa, ext_info.cc, ext_info.param_types.get(ip), Type.fromInterned(ext_info.return_type), pt); defer func_type.deinit(func.gpa); - const atom_index = try func.bin_file.getOrCreateAtomForDecl(extern_func.decl); + const atom_index = try func.bin_file.getOrCreateAtomForDecl(pt, extern_func.decl); const atom = func.bin_file.getAtomPtr(atom_index); const type_index = try func.bin_file.storeDeclType(extern_func.decl, func_type); try func.bin_file.addOrUpdateImport( @@ -2224,7 +2224,7 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif } else switch (mod.intern_pool.indexToKey(func_val.ip_index)) { .ptr => |ptr| if (ptr.byte_offset == 0) switch (ptr.base_addr) { .decl => |decl| { - _ = try func.bin_file.getOrCreateAtomForDecl(decl); + _ = try func.bin_file.getOrCreateAtomForDecl(pt, decl); break :blk decl; }, else => {}, @@ -3227,7 +3227,7 @@ fn lowerDeclRefValue(func: *CodeGen, decl_index: InternPool.DeclIndex, offset: u return WValue{ .imm32 = 0xaaaaaaaa }; } - const atom_index = try func.bin_file.getOrCreateAtomForDecl(decl_index); + const atom_index = try func.bin_file.getOrCreateAtomForDecl(pt, decl_index); const atom = func.bin_file.getAtom(atom_index); const target_sym_index = @intFromEnum(atom.sym_index); @@ -7284,7 +7284,7 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 { defer arena_allocator.deinit(); const arena = arena_allocator.allocator(); - const fqn = try mod.declPtr(enum_decl_index).fullyQualifiedName(mod); + const fqn = try mod.declPtr(enum_decl_index).fullyQualifiedName(pt); const func_name = try std.fmt.allocPrintZ(arena, "__zig_tag_name_{}", .{fqn.fmt(ip)}); // check if we already generated code for this. diff --git a/src/codegen.zig b/src/codegen.zig index 5fc8ef174f1c..0513682d73c4 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -756,7 +756,7 @@ fn lowerDeclRef( return Result.ok; } - const vaddr = try lf.getDeclVAddr(decl_index, .{ + const vaddr = try lf.getDeclVAddr(pt, decl_index, .{ .parent_atom_index = reloc_info.parent_atom_index, .offset = code.items.len, .addend = @intCast(offset), diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index ca574070bf73..0f13c9fd9b05 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -1744,7 +1744,7 @@ pub const Object = struct { if (export_indices.len != 0) { return updateExportedGlobal(self, zcu, global_index, export_indices); } else { - const fqn = try self.builder.strtabString((try decl.fullyQualifiedName(zcu)).toSlice(ip)); + const fqn = try self.builder.strtabString((try decl.fullyQualifiedName(pt)).toSlice(ip)); try global_index.rename(fqn, &self.builder); global_index.setLinkage(.internal, &self.builder); if (comp.config.dll_export_fns) @@ -2520,7 +2520,7 @@ pub const Object = struct { const field_offset = ty.structFieldOffset(field_index, pt); const field_name = struct_type.fieldName(ip, field_index).unwrap() orelse - try ip.getOrPutStringFmt(gpa, "{d}", .{field_index}, .no_embedded_nulls); + try ip.getOrPutStringFmt(gpa, pt.tid, "{d}", .{field_index}, .no_embedded_nulls); fields.appendAssumeCapacity(try o.builder.debugMemberType( try o.builder.metadataString(field_name.toSlice(ip)), @@ -2807,17 +2807,18 @@ pub const Object = struct { } fn getStackTraceType(o: *Object) Allocator.Error!Type { - const zcu = o.pt.zcu; + const pt = o.pt; + const zcu = pt.zcu; const std_mod = zcu.std_mod; const std_file_imported = zcu.importPkg(std_mod) catch unreachable; - const builtin_str = try zcu.intern_pool.getOrPutString(zcu.gpa, "builtin", .no_embedded_nulls); + const builtin_str = try zcu.intern_pool.getOrPutString(zcu.gpa, pt.tid, "builtin", .no_embedded_nulls); const std_file_root_decl = zcu.fileRootDecl(std_file_imported.file_index); const std_namespace = zcu.namespacePtr(zcu.declPtr(std_file_root_decl.unwrap().?).src_namespace); const builtin_decl = std_namespace.decls.getKeyAdapted(builtin_str, Zcu.DeclAdapter{ .zcu = zcu }).?; - const stack_trace_str = try zcu.intern_pool.getOrPutString(zcu.gpa, "StackTrace", .no_embedded_nulls); + const stack_trace_str = try zcu.intern_pool.getOrPutString(zcu.gpa, pt.tid, "StackTrace", .no_embedded_nulls); // buffer is only used for int_type, `builtin` is a struct. const builtin_ty = zcu.declPtr(builtin_decl).val.toType(); const builtin_namespace = zcu.namespacePtrUnwrap(builtin_ty.getNamespaceIndex(zcu)).?; @@ -2865,7 +2866,7 @@ pub const Object = struct { try o.builder.strtabString((if (is_extern) decl.name else - try decl.fullyQualifiedName(zcu)).toSlice(ip)), + try decl.fullyQualifiedName(pt)).toSlice(ip)), toLlvmAddressSpace(decl.@"addrspace", target), ); gop.value_ptr.* = function_index.ptrConst(&o.builder).global; @@ -3074,7 +3075,8 @@ pub const Object = struct { if (gop.found_existing) return gop.value_ptr.ptr(&o.builder).kind.variable; errdefer assert(o.decl_map.remove(decl_index)); - const zcu = o.pt.zcu; + const pt = o.pt; + const zcu = pt.zcu; const decl = zcu.declPtr(decl_index); const is_extern = decl.isExtern(zcu); @@ -3082,7 +3084,7 @@ pub const Object = struct { try o.builder.strtabString((if (is_extern) decl.name else - try decl.fullyQualifiedName(zcu)).toSlice(&zcu.intern_pool)), + try decl.fullyQualifiedName(pt)).toSlice(&zcu.intern_pool)), try o.lowerType(decl.typeOf(zcu)), toLlvmGlobalAddressSpace(decl.@"addrspace", zcu.getTarget()), ); @@ -3310,7 +3312,7 @@ pub const Object = struct { return int_ty; } - const fqn = try mod.declPtr(struct_type.decl.unwrap().?).fullyQualifiedName(mod); + const fqn = try mod.declPtr(struct_type.decl.unwrap().?).fullyQualifiedName(pt); var llvm_field_types = std.ArrayListUnmanaged(Builder.Type){}; defer llvm_field_types.deinit(o.gpa); @@ -3464,7 +3466,7 @@ pub const Object = struct { return enum_tag_ty; } - const fqn = try mod.declPtr(union_obj.decl).fullyQualifiedName(mod); + const fqn = try mod.declPtr(union_obj.decl).fullyQualifiedName(pt); const aligned_field_ty = Type.fromInterned(union_obj.field_types.get(ip)[layout.most_aligned_field]); const aligned_field_llvm_ty = try o.lowerType(aligned_field_ty); @@ -3525,7 +3527,7 @@ pub const Object = struct { const gop = try o.type_map.getOrPut(o.gpa, t.toIntern()); if (!gop.found_existing) { const decl = mod.declPtr(ip.loadOpaqueType(t.toIntern()).decl); - const fqn = try decl.fullyQualifiedName(mod); + const fqn = try decl.fullyQualifiedName(pt); gop.value_ptr.* = try o.builder.opaqueType(try o.builder.string(fqn.toSlice(ip))); } return gop.value_ptr.*; @@ -4585,7 +4587,7 @@ pub const Object = struct { const usize_ty = try o.lowerType(Type.usize); const ret_ty = try o.lowerType(Type.slice_const_u8_sentinel_0); - const fqn = try zcu.declPtr(enum_type.decl).fullyQualifiedName(zcu); + const fqn = try zcu.declPtr(enum_type.decl).fullyQualifiedName(pt); const target = zcu.root_mod.resolved_target.result; const function_index = try o.builder.addFunction( try o.builder.fnType(ret_ty, &.{try o.lowerType(Type.fromInterned(enum_type.tag_ty))}, .normal), @@ -5173,7 +5175,7 @@ pub const FuncGen = struct { const line_number = decl.navSrcLine(zcu) + 1; self.inlined = self.wip.debug_location; - const fqn = try decl.fullyQualifiedName(zcu); + const fqn = try decl.fullyQualifiedName(pt); const fn_ty = try pt.funcType(.{ .param_types = &.{}, @@ -9707,7 +9709,7 @@ pub const FuncGen = struct { if (gop.found_existing) return gop.value_ptr.*; errdefer assert(o.named_enum_map.remove(enum_type.decl)); - const fqn = try zcu.declPtr(enum_type.decl).fullyQualifiedName(zcu); + const fqn = try zcu.declPtr(enum_type.decl).fullyQualifiedName(pt); const target = zcu.root_mod.resolved_target.result; const function_index = try o.builder.addFunction( try o.builder.fnType(.i1, &.{try o.lowerType(Type.fromInterned(enum_type.tag_ty))}, .normal), diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 95874a5d65bb..92cff8b2d004 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -1753,7 +1753,7 @@ const DeclGen = struct { } const field_name = struct_type.fieldName(ip, field_index).unwrap() orelse - try ip.getOrPutStringFmt(mod.gpa, "{d}", .{field_index}, .no_embedded_nulls); + try ip.getOrPutStringFmt(mod.gpa, pt.tid, "{d}", .{field_index}, .no_embedded_nulls); try member_types.append(try self.resolveType(field_ty, .indirect)); try member_names.append(field_name.toSlice(ip)); } @@ -3012,7 +3012,7 @@ const DeclGen = struct { // Append the actual code into the functions section. try self.spv.addFunction(spv_decl_index, self.func); - const fqn = try decl.fullyQualifiedName(self.pt.zcu); + const fqn = try decl.fullyQualifiedName(self.pt); try self.spv.debugName(result_id, fqn.toSlice(ip)); // Temporarily generate a test kernel declaration if this is a test function. @@ -3041,7 +3041,7 @@ const DeclGen = struct { .storage_class = final_storage_class, }); - const fqn = try decl.fullyQualifiedName(self.pt.zcu); + const fqn = try decl.fullyQualifiedName(self.pt); try self.spv.debugName(result_id, fqn.toSlice(ip)); try self.spv.declareDeclDeps(spv_decl_index, &.{}); }, @@ -3086,7 +3086,7 @@ const DeclGen = struct { try self.func.body.emit(self.spv.gpa, .OpFunctionEnd, {}); try self.spv.addFunction(spv_decl_index, self.func); - const fqn = try decl.fullyQualifiedName(self.pt.zcu); + const fqn = try decl.fullyQualifiedName(self.pt); try self.spv.debugNameFmt(initializer_id, "initializer of {}", .{fqn.fmt(ip)}); try self.spv.sections.types_globals_constants.emit(self.spv.gpa, .OpExtInst, .{ diff --git a/src/link.zig b/src/link.zig index db19a16d4d3d..f407ad2f4c33 100644 --- a/src/link.zig +++ b/src/link.zig @@ -424,14 +424,14 @@ pub const File = struct { } } - pub fn updateDeclLineNumber(base: *File, module: *Zcu, decl_index: InternPool.DeclIndex) UpdateDeclError!void { - const decl = module.declPtr(decl_index); + pub fn updateDeclLineNumber(base: *File, pt: Zcu.PerThread, decl_index: InternPool.DeclIndex) UpdateDeclError!void { + const decl = pt.zcu.declPtr(decl_index); assert(decl.has_tv); switch (base.tag) { .spirv, .nvptx => {}, inline else => |tag| { if (tag != .c and build_options.only_c) unreachable; - return @as(*tag.Type(), @fieldParentPtr("base", base)).updateDeclLineNumber(module, decl_index); + return @as(*tag.Type(), @fieldParentPtr("base", base)).updateDeclLineNumber(pt, decl_index); }, } } @@ -626,14 +626,14 @@ pub const File = struct { /// `Decl`'s address was not yet resolved, or the containing atom gets moved in virtual memory. /// May be called before or after updateFunc/updateDecl therefore it is up to the linker to allocate /// the block/atom. - pub fn getDeclVAddr(base: *File, decl_index: InternPool.DeclIndex, reloc_info: RelocInfo) !u64 { + pub fn getDeclVAddr(base: *File, pt: Zcu.PerThread, decl_index: InternPool.DeclIndex, reloc_info: RelocInfo) !u64 { if (build_options.only_c) @compileError("unreachable"); switch (base.tag) { .c => unreachable, .spirv => unreachable, .nvptx => unreachable, inline else => |tag| { - return @as(*tag.Type(), @fieldParentPtr("base", base)).getDeclVAddr(decl_index, reloc_info); + return @as(*tag.Type(), @fieldParentPtr("base", base)).getDeclVAddr(pt, decl_index, reloc_info); }, } } diff --git a/src/link/C.zig b/src/link/C.zig index 3db5952a4c60..1a6cee068ebc 100644 --- a/src/link/C.zig +++ b/src/link/C.zig @@ -383,11 +383,11 @@ pub fn updateDecl(self: *C, pt: Zcu.PerThread, decl_index: InternPool.DeclIndex) gop.value_ptr.fwd_decl = try self.addString(object.dg.fwd_decl.items); } -pub fn updateDeclLineNumber(self: *C, zcu: *Zcu, decl_index: InternPool.DeclIndex) !void { +pub fn updateDeclLineNumber(self: *C, pt: Zcu.PerThread, decl_index: InternPool.DeclIndex) !void { // The C backend does not have the ability to fix line numbers without re-generating // the entire Decl. _ = self; - _ = zcu; + _ = pt; _ = decl_index; } diff --git a/src/link/Coff.zig b/src/link/Coff.zig index 7ef5bde6e637..bd1c96bf8b4e 100644 --- a/src/link/Coff.zig +++ b/src/link/Coff.zig @@ -1176,7 +1176,7 @@ pub fn lowerUnnamedConst(self: *Coff, pt: Zcu.PerThread, val: Value, decl_index: gop.value_ptr.* = .{}; } const unnamed_consts = gop.value_ptr; - const decl_name = try decl.fullyQualifiedName(mod); + const decl_name = try decl.fullyQualifiedName(pt); const index = unnamed_consts.items.len; const sym_name = try std.fmt.allocPrint(gpa, "__unnamed_{}_{d}", .{ decl_name.fmt(&mod.intern_pool), index }); defer gpa.free(sym_name); @@ -1427,7 +1427,7 @@ fn updateDeclCode(self: *Coff, pt: Zcu.PerThread, decl_index: InternPool.DeclInd const mod = pt.zcu; const decl = mod.declPtr(decl_index); - const decl_name = try decl.fullyQualifiedName(mod); + const decl_name = try decl.fullyQualifiedName(pt); log.debug("updateDeclCode {}{*}", .{ decl_name.fmt(&mod.intern_pool), decl }); const required_alignment: u32 = @intCast(decl.getAlignment(pt).toByteUnits() orelse 0); @@ -1855,7 +1855,7 @@ pub fn flushModule(self: *Coff, arena: Allocator, tid: Zcu.PerThread.Id, prog_no assert(!self.imports_count_dirty); } -pub fn getDeclVAddr(self: *Coff, decl_index: InternPool.DeclIndex, reloc_info: link.File.RelocInfo) !u64 { +pub fn getDeclVAddr(self: *Coff, _: Zcu.PerThread, decl_index: InternPool.DeclIndex, reloc_info: link.File.RelocInfo) !u64 { assert(self.llvm_object == null); const this_atom_index = try self.getOrCreateAtomForDecl(decl_index); @@ -1972,9 +1972,9 @@ pub fn getGlobalSymbol(self: *Coff, name: []const u8, lib_name_name: ?[]const u8 return global_index; } -pub fn updateDeclLineNumber(self: *Coff, module: *Module, decl_index: InternPool.DeclIndex) !void { +pub fn updateDeclLineNumber(self: *Coff, pt: Zcu.PerThread, decl_index: InternPool.DeclIndex) !void { _ = self; - _ = module; + _ = pt; _ = decl_index; log.debug("TODO implement updateDeclLineNumber", .{}); } diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig index 80c88666bcf8..9ae4ee3be66c 100644 --- a/src/link/Dwarf.zig +++ b/src/link/Dwarf.zig @@ -1082,7 +1082,7 @@ pub fn initDeclState(self: *Dwarf, pt: Zcu.PerThread, decl_index: InternPool.Dec defer tracy.end(); const decl = pt.zcu.declPtr(decl_index); - const decl_linkage_name = try decl.fullyQualifiedName(pt.zcu); + const decl_linkage_name = try decl.fullyQualifiedName(pt); log.debug("initDeclState {}{*}", .{ decl_linkage_name.fmt(&pt.zcu.intern_pool), decl }); diff --git a/src/link/Elf.zig b/src/link/Elf.zig index 39704d937c6f..579df0760a6c 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -543,7 +543,7 @@ pub fn deinit(self: *Elf) void { self.comdat_group_sections.deinit(gpa); } -pub fn getDeclVAddr(self: *Elf, decl_index: InternPool.DeclIndex, reloc_info: link.File.RelocInfo) !u64 { +pub fn getDeclVAddr(self: *Elf, _: Zcu.PerThread, decl_index: InternPool.DeclIndex, reloc_info: link.File.RelocInfo) !u64 { assert(self.llvm_object == null); return self.zigObjectPtr().?.getDeclVAddr(self, decl_index, reloc_info); } @@ -3021,9 +3021,9 @@ pub fn updateExports( return self.zigObjectPtr().?.updateExports(self, pt, exported, export_indices); } -pub fn updateDeclLineNumber(self: *Elf, mod: *Module, decl_index: InternPool.DeclIndex) !void { +pub fn updateDeclLineNumber(self: *Elf, pt: Zcu.PerThread, decl_index: InternPool.DeclIndex) !void { if (self.llvm_object) |_| return; - return self.zigObjectPtr().?.updateDeclLineNumber(mod, decl_index); + return self.zigObjectPtr().?.updateDeclLineNumber(pt, decl_index); } pub fn deleteExport( diff --git a/src/link/Elf/ZigObject.zig b/src/link/Elf/ZigObject.zig index 8cfa5e701ffb..7a419750d4af 100644 --- a/src/link/Elf/ZigObject.zig +++ b/src/link/Elf/ZigObject.zig @@ -908,7 +908,7 @@ fn updateDeclCode( const gpa = elf_file.base.comp.gpa; const mod = pt.zcu; const decl = mod.declPtr(decl_index); - const decl_name = try decl.fullyQualifiedName(mod); + const decl_name = try decl.fullyQualifiedName(pt); log.debug("updateDeclCode {}{*}", .{ decl_name.fmt(&mod.intern_pool), decl }); @@ -1009,7 +1009,7 @@ fn updateTlv( const mod = pt.zcu; const gpa = mod.gpa; const decl = mod.declPtr(decl_index); - const decl_name = try decl.fullyQualifiedName(mod); + const decl_name = try decl.fullyQualifiedName(pt); log.debug("updateTlv {} ({*})", .{ decl_name.fmt(&mod.intern_pool), decl }); @@ -1286,7 +1286,7 @@ pub fn lowerUnnamedConst( } const unnamed_consts = gop.value_ptr; const decl = mod.declPtr(decl_index); - const decl_name = try decl.fullyQualifiedName(mod); + const decl_name = try decl.fullyQualifiedName(pt); const index = unnamed_consts.items.len; const name = try std.fmt.allocPrint(gpa, "__unnamed_{}_{d}", .{ decl_name.fmt(&mod.intern_pool), index }); defer gpa.free(name); @@ -1466,19 +1466,19 @@ pub fn updateExports( /// Must be called only after a successful call to `updateDecl`. pub fn updateDeclLineNumber( self: *ZigObject, - mod: *Module, + pt: Zcu.PerThread, decl_index: InternPool.DeclIndex, ) !void { const tracy = trace(@src()); defer tracy.end(); - const decl = mod.declPtr(decl_index); - const decl_name = try decl.fullyQualifiedName(mod); + const decl = pt.zcu.declPtr(decl_index); + const decl_name = try decl.fullyQualifiedName(pt); - log.debug("updateDeclLineNumber {}{*}", .{ decl_name.fmt(&mod.intern_pool), decl }); + log.debug("updateDeclLineNumber {}{*}", .{ decl_name.fmt(&pt.zcu.intern_pool), decl }); if (self.dwarf) |*dw| { - try dw.updateDeclLineNumber(mod, decl_index); + try dw.updateDeclLineNumber(pt.zcu, decl_index); } } diff --git a/src/link/MachO.zig b/src/link/MachO.zig index d0c78bc2c2e7..ff083d367c10 100644 --- a/src/link/MachO.zig +++ b/src/link/MachO.zig @@ -3198,9 +3198,9 @@ pub fn updateDecl(self: *MachO, pt: Zcu.PerThread, decl_index: InternPool.DeclIn return self.getZigObject().?.updateDecl(self, pt, decl_index); } -pub fn updateDeclLineNumber(self: *MachO, module: *Module, decl_index: InternPool.DeclIndex) !void { +pub fn updateDeclLineNumber(self: *MachO, pt: Zcu.PerThread, decl_index: InternPool.DeclIndex) !void { if (self.llvm_object) |_| return; - return self.getZigObject().?.updateDeclLineNumber(module, decl_index); + return self.getZigObject().?.updateDeclLineNumber(pt, decl_index); } pub fn updateExports( @@ -3230,7 +3230,7 @@ pub fn freeDecl(self: *MachO, decl_index: InternPool.DeclIndex) void { return self.getZigObject().?.freeDecl(decl_index); } -pub fn getDeclVAddr(self: *MachO, decl_index: InternPool.DeclIndex, reloc_info: link.File.RelocInfo) !u64 { +pub fn getDeclVAddr(self: *MachO, _: Zcu.PerThread, decl_index: InternPool.DeclIndex, reloc_info: link.File.RelocInfo) !u64 { assert(self.llvm_object == null); return self.getZigObject().?.getDeclVAddr(self, decl_index, reloc_info); } diff --git a/src/link/MachO/ZigObject.zig b/src/link/MachO/ZigObject.zig index ffe362038d52..03e659c497c5 100644 --- a/src/link/MachO/ZigObject.zig +++ b/src/link/MachO/ZigObject.zig @@ -810,7 +810,7 @@ fn updateDeclCode( const gpa = macho_file.base.comp.gpa; const mod = pt.zcu; const decl = mod.declPtr(decl_index); - const decl_name = try decl.fullyQualifiedName(mod); + const decl_name = try decl.fullyQualifiedName(pt); log.debug("updateDeclCode {}{*}", .{ decl_name.fmt(&mod.intern_pool), decl }); @@ -893,13 +893,12 @@ fn updateTlv( sect_index: u8, code: []const u8, ) !void { - const mod = pt.zcu; - const decl = mod.declPtr(decl_index); - const decl_name = try decl.fullyQualifiedName(mod); + const decl = pt.zcu.declPtr(decl_index); + const decl_name = try decl.fullyQualifiedName(pt); - log.debug("updateTlv {} ({*})", .{ decl_name.fmt(&mod.intern_pool), decl }); + log.debug("updateTlv {} ({*})", .{ decl_name.fmt(&pt.zcu.intern_pool), decl }); - const decl_name_slice = decl_name.toSlice(&mod.intern_pool); + const decl_name_slice = decl_name.toSlice(&pt.zcu.intern_pool); const required_alignment = decl.getAlignment(pt); // 1. Lower TLV initializer @@ -1100,7 +1099,7 @@ pub fn lowerUnnamedConst( } const unnamed_consts = gop.value_ptr; const decl = mod.declPtr(decl_index); - const decl_name = try decl.fullyQualifiedName(mod); + const decl_name = try decl.fullyQualifiedName(pt); const index = unnamed_consts.items.len; const name = try std.fmt.allocPrint(gpa, "__unnamed_{}_{d}", .{ decl_name.fmt(&mod.intern_pool), index }); defer gpa.free(name); @@ -1363,9 +1362,9 @@ fn updateLazySymbol( } /// Must be called only after a successful call to `updateDecl`. -pub fn updateDeclLineNumber(self: *ZigObject, mod: *Module, decl_index: InternPool.DeclIndex) !void { +pub fn updateDeclLineNumber(self: *ZigObject, pt: Zcu.PerThread, decl_index: InternPool.DeclIndex) !void { if (self.dwarf) |*dw| { - try dw.updateDeclLineNumber(mod, decl_index); + try dw.updateDeclLineNumber(pt.zcu, decl_index); } } diff --git a/src/link/Plan9.zig b/src/link/Plan9.zig index 827c974180f9..cfc8435906fb 100644 --- a/src/link/Plan9.zig +++ b/src/link/Plan9.zig @@ -483,7 +483,7 @@ pub fn lowerUnnamedConst(self: *Plan9, pt: Zcu.PerThread, val: Value, decl_index } const unnamed_consts = gop.value_ptr; - const decl_name = try decl.fullyQualifiedName(mod); + const decl_name = try decl.fullyQualifiedName(pt); const index = unnamed_consts.items.len; // name is freed when the unnamed const is freed @@ -1496,22 +1496,22 @@ pub fn writeSyms(self: *Plan9, buf: *std.ArrayList(u8)) !void { } /// Must be called only after a successful call to `updateDecl`. -pub fn updateDeclLineNumber(self: *Plan9, mod: *Zcu, decl_index: InternPool.DeclIndex) !void { +pub fn updateDeclLineNumber(self: *Plan9, pt: Zcu.PerThread, decl_index: InternPool.DeclIndex) !void { _ = self; - _ = mod; + _ = pt; _ = decl_index; } pub fn getDeclVAddr( self: *Plan9, + pt: Zcu.PerThread, decl_index: InternPool.DeclIndex, reloc_info: link.File.RelocInfo, ) !u64 { - const mod = self.base.comp.module.?; - const ip = &mod.intern_pool; - const decl = mod.declPtr(decl_index); + const ip = &pt.zcu.intern_pool; + const decl = pt.zcu.declPtr(decl_index); log.debug("getDeclVAddr for {}", .{decl.name.fmt(ip)}); - if (decl.isExtern(mod)) { + if (decl.isExtern(pt.zcu)) { if (decl.name.eqlSlice("etext", ip)) { try self.addReloc(reloc_info.parent_atom_index, .{ .target = undefined, diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig index 4e661e33e4d6..32af00413280 100644 --- a/src/link/Wasm.zig +++ b/src/link/Wasm.zig @@ -1457,9 +1457,9 @@ pub fn updateDecl(wasm: *Wasm, pt: Zcu.PerThread, decl_index: InternPool.DeclInd try wasm.zigObjectPtr().?.updateDecl(wasm, pt, decl_index); } -pub fn updateDeclLineNumber(wasm: *Wasm, mod: *Zcu, decl_index: InternPool.DeclIndex) !void { +pub fn updateDeclLineNumber(wasm: *Wasm, pt: Zcu.PerThread, decl_index: InternPool.DeclIndex) !void { if (wasm.llvm_object) |_| return; - try wasm.zigObjectPtr().?.updateDeclLineNumber(mod, decl_index); + try wasm.zigObjectPtr().?.updateDeclLineNumber(pt, decl_index); } /// From a given symbol location, returns its `wasm.GlobalType`. @@ -1521,10 +1521,11 @@ pub fn getGlobalSymbol(wasm: *Wasm, name: []const u8, lib_name: ?[]const u8) !Sy /// Returns the given pointer address pub fn getDeclVAddr( wasm: *Wasm, + pt: Zcu.PerThread, decl_index: InternPool.DeclIndex, reloc_info: link.File.RelocInfo, ) !u64 { - return wasm.zigObjectPtr().?.getDeclVAddr(wasm, decl_index, reloc_info); + return wasm.zigObjectPtr().?.getDeclVAddr(wasm, pt, decl_index, reloc_info); } pub fn lowerAnonDecl( @@ -4016,8 +4017,8 @@ pub fn getErrorTableSymbol(wasm_file: *Wasm, pt: Zcu.PerThread) !u32 { /// For a given `InternPool.DeclIndex` returns its corresponding `Atom.Index`. /// When the index was not found, a new `Atom` will be created, and its index will be returned. /// The newly created Atom is empty with default fields as specified by `Atom.empty`. -pub fn getOrCreateAtomForDecl(wasm_file: *Wasm, decl_index: InternPool.DeclIndex) !Atom.Index { - return wasm_file.zigObjectPtr().?.getOrCreateAtomForDecl(wasm_file, decl_index); +pub fn getOrCreateAtomForDecl(wasm_file: *Wasm, pt: Zcu.PerThread, decl_index: InternPool.DeclIndex) !Atom.Index { + return wasm_file.zigObjectPtr().?.getOrCreateAtomForDecl(wasm_file, pt, decl_index); } /// Verifies all resolved symbols and checks whether itself needs to be marked alive, diff --git a/src/link/Wasm/ZigObject.zig b/src/link/Wasm/ZigObject.zig index a693902743ae..f95c8fc7945c 100644 --- a/src/link/Wasm/ZigObject.zig +++ b/src/link/Wasm/ZigObject.zig @@ -253,7 +253,7 @@ pub fn updateDecl( } const gpa = wasm_file.base.comp.gpa; - const atom_index = try zig_object.getOrCreateAtomForDecl(wasm_file, decl_index); + const atom_index = try zig_object.getOrCreateAtomForDecl(wasm_file, pt, decl_index); const atom = wasm_file.getAtomPtr(atom_index); atom.clear(); @@ -302,7 +302,7 @@ pub fn updateFunc( const func = pt.zcu.funcInfo(func_index); const decl_index = func.owner_decl; const decl = pt.zcu.declPtr(decl_index); - const atom_index = try zig_object.getOrCreateAtomForDecl(wasm_file, decl_index); + const atom_index = try zig_object.getOrCreateAtomForDecl(wasm_file, pt, decl_index); const atom = wasm_file.getAtomPtr(atom_index); atom.clear(); @@ -346,7 +346,7 @@ fn finishUpdateDecl( const atom_index = decl_info.atom; const atom = wasm_file.getAtomPtr(atom_index); const sym = zig_object.symbol(atom.sym_index); - const full_name = try decl.fullyQualifiedName(zcu); + const full_name = try decl.fullyQualifiedName(pt); sym.name = try zig_object.string_table.insert(gpa, full_name.toSlice(ip)); try atom.code.appendSlice(gpa, code); atom.size = @intCast(code.len); @@ -424,17 +424,21 @@ fn createDataSegment( /// For a given `InternPool.DeclIndex` returns its corresponding `Atom.Index`. /// When the index was not found, a new `Atom` will be created, and its index will be returned. /// The newly created Atom is empty with default fields as specified by `Atom.empty`. -pub fn getOrCreateAtomForDecl(zig_object: *ZigObject, wasm_file: *Wasm, decl_index: InternPool.DeclIndex) !Atom.Index { - const gpa = wasm_file.base.comp.gpa; +pub fn getOrCreateAtomForDecl( + zig_object: *ZigObject, + wasm_file: *Wasm, + pt: Zcu.PerThread, + decl_index: InternPool.DeclIndex, +) !Atom.Index { + const gpa = pt.zcu.gpa; const gop = try zig_object.decls_map.getOrPut(gpa, decl_index); if (!gop.found_existing) { const sym_index = try zig_object.allocateSymbol(gpa); gop.value_ptr.* = .{ .atom = try wasm_file.createAtom(sym_index, zig_object.index) }; - const mod = wasm_file.base.comp.module.?; - const decl = mod.declPtr(decl_index); - const full_name = try decl.fullyQualifiedName(mod); + const decl = pt.zcu.declPtr(decl_index); + const full_name = try decl.fullyQualifiedName(pt); const sym = zig_object.symbol(sym_index); - sym.name = try zig_object.string_table.insert(gpa, full_name.toSlice(&mod.intern_pool)); + sym.name = try zig_object.string_table.insert(gpa, full_name.toSlice(&pt.zcu.intern_pool)); } return gop.value_ptr.atom; } @@ -487,10 +491,10 @@ pub fn lowerUnnamedConst( std.debug.assert(val.typeOf(mod).zigTypeTag(mod) != .Fn); // cannot create local symbols for functions const decl = mod.declPtr(decl_index); - const parent_atom_index = try zig_object.getOrCreateAtomForDecl(wasm_file, decl_index); + const parent_atom_index = try zig_object.getOrCreateAtomForDecl(wasm_file, pt, decl_index); const parent_atom = wasm_file.getAtom(parent_atom_index); const local_index = parent_atom.locals.items.len; - const fqn = try decl.fullyQualifiedName(mod); + const fqn = try decl.fullyQualifiedName(pt); const name = try std.fmt.allocPrintZ(gpa, "__unnamed_{}_{d}", .{ fqn.fmt(&mod.intern_pool), local_index, }); @@ -775,22 +779,22 @@ pub fn getGlobalSymbol(zig_object: *ZigObject, gpa: std.mem.Allocator, name: []c pub fn getDeclVAddr( zig_object: *ZigObject, wasm_file: *Wasm, + pt: Zcu.PerThread, decl_index: InternPool.DeclIndex, reloc_info: link.File.RelocInfo, ) !u64 { const target = wasm_file.base.comp.root_mod.resolved_target.result; - const gpa = wasm_file.base.comp.gpa; - const mod = wasm_file.base.comp.module.?; - const decl = mod.declPtr(decl_index); + const gpa = pt.zcu.gpa; + const decl = pt.zcu.declPtr(decl_index); - const target_atom_index = try zig_object.getOrCreateAtomForDecl(wasm_file, decl_index); + const target_atom_index = try zig_object.getOrCreateAtomForDecl(wasm_file, pt, decl_index); const target_symbol_index = @intFromEnum(wasm_file.getAtom(target_atom_index).sym_index); std.debug.assert(reloc_info.parent_atom_index != 0); const atom_index = wasm_file.symbol_atom.get(.{ .file = zig_object.index, .index = @enumFromInt(reloc_info.parent_atom_index) }).?; const atom = wasm_file.getAtomPtr(atom_index); const is_wasm32 = target.cpu.arch == .wasm32; - if (decl.typeOf(mod).zigTypeTag(mod) == .Fn) { + if (decl.typeOf(pt.zcu).zigTypeTag(pt.zcu) == .Fn) { std.debug.assert(reloc_info.addend == 0); // addend not allowed for function relocations try atom.relocs.append(gpa, .{ .index = target_symbol_index, @@ -890,7 +894,7 @@ pub fn updateExports( }, }; const decl = mod.declPtr(decl_index); - const atom_index = try zig_object.getOrCreateAtomForDecl(wasm_file, decl_index); + const atom_index = try zig_object.getOrCreateAtomForDecl(wasm_file, pt, decl_index); const decl_info = zig_object.decls_map.getPtr(decl_index).?; const atom = wasm_file.getAtom(atom_index); const atom_sym = atom.symbolLoc().getSymbol(wasm_file).*; @@ -1116,13 +1120,17 @@ pub fn createDebugSectionForIndex(zig_object: *ZigObject, wasm_file: *Wasm, inde return atom_index; } -pub fn updateDeclLineNumber(zig_object: *ZigObject, mod: *Zcu, decl_index: InternPool.DeclIndex) !void { +pub fn updateDeclLineNumber( + zig_object: *ZigObject, + pt: Zcu.PerThread, + decl_index: InternPool.DeclIndex, +) !void { if (zig_object.dwarf) |*dw| { - const decl = mod.declPtr(decl_index); - const decl_name = try decl.fullyQualifiedName(mod); + const decl = pt.zcu.declPtr(decl_index); + const decl_name = try decl.fullyQualifiedName(pt); - log.debug("updateDeclLineNumber {}{*}", .{ decl_name.fmt(&mod.intern_pool), decl }); - try dw.updateDeclLineNumber(mod, decl_index); + log.debug("updateDeclLineNumber {}{*}", .{ decl_name.fmt(&pt.zcu.intern_pool), decl }); + try dw.updateDeclLineNumber(pt.zcu, decl_index); } } diff --git a/src/mutable_value.zig b/src/mutable_value.zig index 0ca2d1d31760..63f198dfa71e 100644 --- a/src/mutable_value.zig +++ b/src/mutable_value.zig @@ -71,7 +71,7 @@ pub const MutableValue = union(enum) { } }), .bytes => |b| try pt.intern(.{ .aggregate = .{ .ty = b.ty, - .storage = .{ .bytes = try pt.zcu.intern_pool.getOrPutString(pt.zcu.gpa, b.data, .maybe_embedded_nulls) }, + .storage = .{ .bytes = try pt.zcu.intern_pool.getOrPutString(pt.zcu.gpa, pt.tid, b.data, .maybe_embedded_nulls) }, } }), .aggregate => |a| { const elems = try arena.alloc(InternPool.Index, a.elems.len); From cda716ecc43929fd1c2c9679335b8b22f1b67d1a Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Sat, 15 Jun 2024 16:18:41 -0400 Subject: [PATCH 063/152] InternPool: implement thread-safe hash map --- lib/std/Thread/Pool.zig | 4 + src/Compilation.zig | 4 +- src/InternPool.zig | 635 ++++++++++++++++++++++++++++------------ src/Zcu.zig | 4 +- 4 files changed, 452 insertions(+), 195 deletions(-) diff --git a/lib/std/Thread/Pool.zig b/lib/std/Thread/Pool.zig index 03ca8ffc8eba..9fb3c3483a67 100644 --- a/lib/std/Thread/Pool.zig +++ b/lib/std/Thread/Pool.zig @@ -291,3 +291,7 @@ pub fn waitAndWork(pool: *Pool, wait_group: *WaitGroup) void { return; } } + +pub fn getIdCount(pool: *Pool) usize { + return 1 + pool.threads.len; +} diff --git a/src/Compilation.zig b/src/Compilation.zig index 1f4c425bc53b..7e10febf0e64 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -1397,7 +1397,7 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil .error_limit = error_limit, .llvm_object = null, }; - try zcu.init(); + try zcu.init(options.thread_pool.getIdCount()); break :blk zcu; } else blk: { if (options.emit_h != null) return error.NoZigModuleForCHeader; @@ -2156,7 +2156,7 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void { if (build_options.enable_debug_extensions and comp.verbose_generic_instances) { std.debug.print("generic instances for '{s}:0x{x}':\n", .{ comp.root_name, - @as(usize, @intFromPtr(zcu)), + @intFromPtr(zcu), }); zcu.intern_pool.dumpGenericInstances(gpa); } diff --git a/src/InternPool.zig b/src/InternPool.zig index 97fd35bf201b..13fb9be24e89 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -2,9 +2,10 @@ //! This data structure is self-contained, with the following exceptions: //! * Module.Namespace has a pointer to Module.File -/// Maps `Key` to `Index`. `Key` objects are not stored anywhere; they are -/// constructed lazily. -map: std.AutoArrayHashMapUnmanaged(void, void) = .{}, +local: []Local = &.{}, +shard_shift: std.math.Log2Int(usize) = 0, +shards: []Shard = &.{}, + items: std.MultiArrayList(Item) = .{}, extra: std.ArrayListUnmanaged(u32) = .{}, /// On 32-bit systems, this array is ignored and extra is used for everything. @@ -351,6 +352,115 @@ pub const DepEntry = extern struct { }; }; +const Local = struct { + aligned: void align(std.atomic.cache_line) = {}, + + /// node: Garbage.Node, + /// header: List.Header, + /// data: [capacity]u32, + /// tag: [capacity]Tag, + items: List, + + /// node: Garbage.Node, + /// header: List.Header, + /// extra: [capacity]u32, + extra: List, + + garbage: Garbage, + + const List = struct { + entries: [*]u32, + + const empty: List = .{ + .entries = @constCast(&[_]u32{ 0, 0 })[Header.fields_len..].ptr, + }; + + fn acquire(list: *const List) List { + return .{ .entries = @atomicLoad([*]u32, &list.entries, .acquire) }; + } + fn release(list: *List, new_list: List) void { + @atomicStore([*]u32, &list.entries, new_list.entries, .release); + } + + const Header = extern struct { + len: u32, + capacity: u32, + + const fields_len = @typeInfo(Header).Struct.fields.len; + }; + fn header(list: List) *Header { + return @ptrCast(list.entries - Header.fields_len); + } + }; + + const Garbage = std.SinglyLinkedList(struct { buf_len: usize }); + const garbage_align = @max(@alignOf(Garbage.Node), @alignOf(u32)); + + fn freeGarbage(garbage: *const Garbage.Node, gpa: Allocator) void { + gpa.free(@as([*]align(Local.garbage_align) const u8, @ptrCast(garbage))[0..garbage.data.buf_len]); + } +}; + +const Shard = struct { + aligned: void align(std.atomic.cache_line) = {}, + + mutate_mutex: std.Thread.Mutex.Recursive, + + /// node: Local.Garbage.Node, + /// header: Map.Header, + /// entries: [capacity]Map.Entry, + map: Map, + + const Map = struct { + entries: [*]u32, + + const empty: Map = .{ + .entries = @constCast(&[_]u32{ 0, 1, @intFromEnum(Index.none), 0 })[Header.fields_len..].ptr, + }; + + fn acquire(map: *const Map) Map { + return .{ .entries = @atomicLoad([*]u32, &map.entries, .acquire) }; + } + fn release(map: *Map, new_map: Map) void { + @atomicStore([*]u32, &map.entries, new_map.entries, .release); + } + + const Header = extern struct { + len: u32, + capacity: u32, + + const fields_len: u32 = @typeInfo(Header).Struct.fields.len; + + fn mask(head: *const Header) u32 { + assert(std.math.isPowerOfTwo(head.capacity)); + assert(std.math.isPowerOfTwo(Entry.fields_len)); + return (head.capacity - 1) * Entry.fields_len; + } + }; + fn header(map: Map) *Header { + return @ptrCast(map.entries - Header.fields_len); + } + + const Entry = extern struct { + index: Index, + hash: u32, + + const fields_len: u32 = @typeInfo(Entry).Struct.fields.len; + + fn acquire(entry: *const Entry) Index { + return @atomicLoad(Index, &entry.index, .acquire); + } + fn release(entry: *Entry, index: Index) void { + @atomicStore(Index, &entry.index, index, .release); + } + }; + fn at(map: Map, index: usize) *Entry { + assert(index % Entry.fields_len == 0); + return @ptrCast(&map.entries[index]); + } + }; +}; + const FieldMap = std.ArrayHashMapUnmanaged(void, void, std.array_hash_map.AutoContext(void), false); const builtin = @import("builtin"); @@ -369,20 +479,6 @@ const Zcu = @import("Zcu.zig"); const Module = Zcu; const Zir = std.zig.Zir; -const KeyAdapter = struct { - intern_pool: *const InternPool, - - pub fn eql(ctx: @This(), a: Key, b_void: void, b_map_index: usize) bool { - _ = b_void; - if (ctx.intern_pool.items.items(.tag)[b_map_index] == .removed) return false; - return ctx.intern_pool.indexToKey(@enumFromInt(b_map_index)).eql(a, ctx.intern_pool); - } - - pub fn hash(ctx: @This(), a: Key) u32 { - return a.hash32(ctx.intern_pool); - } -}; - /// An index into `maps` which might be `none`. pub const OptionalMapIndex = enum(u32) { none = std.math.maxInt(u32), @@ -4535,17 +4631,27 @@ pub const MemoizedCall = struct { result: Index, }; -pub fn init(ip: *InternPool, gpa: Allocator) !void { +pub fn init(ip: *InternPool, gpa: Allocator, total_threads: usize) !void { + errdefer ip.deinit(gpa); assert(ip.items.len == 0); + ip.local = try gpa.alloc(Local, total_threads); + @memset(ip.local, .{ + .items = Local.List.empty, + .extra = Local.List.empty, + .garbage = .{}, + }); + + ip.shard_shift = @intCast(std.math.log2_int_ceil(usize, total_threads)); + ip.shards = try gpa.alloc(Shard, @as(usize, 1) << ip.shard_shift); + @memset(ip.shards, .{ + .mutate_mutex = std.Thread.Mutex.Recursive.init, + .map = Shard.Map.empty, + }); + // Reserve string index 0 for an empty string. assert((try ip.getOrPutString(gpa, .main, "", .no_embedded_nulls)) == .empty); - // So that we can use `catch unreachable` below. - try ip.items.ensureUnusedCapacity(gpa, static_keys.len); - try ip.map.ensureUnusedCapacity(gpa, static_keys.len); - try ip.extra.ensureUnusedCapacity(gpa, static_keys.len); - // This inserts all the statically-known values into the intern pool in the // order expected. for (&static_keys, 0..) |key, key_index| switch (@as(Index, @enumFromInt(key_index))) { @@ -4574,12 +4680,9 @@ pub fn init(ip: *InternPool, gpa: Allocator) !void { assert(ip.indexToKey(ip.typeOf(cc_inline)).int_type.bits == @typeInfo(@typeInfo(std.builtin.CallingConvention).Enum.tag_type).Int.bits); } - - assert(ip.items.len == static_keys.len); } pub fn deinit(ip: *InternPool, gpa: Allocator) void { - ip.map.deinit(gpa); ip.items.deinit(gpa); ip.extra.deinit(gpa); ip.limbs.deinit(gpa); @@ -4611,6 +4714,16 @@ pub fn deinit(ip: *InternPool, gpa: Allocator) void { ip.files.deinit(gpa); + gpa.free(ip.shards); + for (ip.local) |*local| { + var next = local.garbage.first; + while (next) |cur| { + next = cur.next; + Local.freeGarbage(cur, gpa); + } + } + gpa.free(ip.local); + ip.* = undefined; } @@ -5239,10 +5352,133 @@ fn indexToKeyBigInt(ip: *const InternPool, limb_index: u32, positive: bool) Key } }; } +const GetOrPutKey = union(enum) { + existing: Index, + new: struct { + shard: *Shard, + map_index: u32, + }, + + fn set(gop: *GetOrPutKey, index: Index) Index { + switch (gop.*) { + .existing => unreachable, + .new => |info| { + info.shard.map.at(info.map_index).release(index); + info.shard.map.header().len += 1; + info.shard.mutate_mutex.unlock(); + }, + } + gop.* = .{ .existing = index }; + return index; + } + + fn assign(gop: *GetOrPutKey, new_gop: GetOrPutKey) void { + gop.deinit(); + gop.* = new_gop; + } + + fn deinit(gop: *GetOrPutKey) void { + switch (gop.*) { + .existing => {}, + .new => |info| info.shard.mutate_mutex.unlock(), + } + gop.* = undefined; + } +}; +fn getOrPutKey( + ip: *InternPool, + gpa: Allocator, + tid: Zcu.PerThread.Id, + key: Key, +) Allocator.Error!GetOrPutKey { + const full_hash = key.hash64(ip); + const hash: u32 = @truncate(full_hash >> 32); + const shard = &ip.shards[@intCast(full_hash & (ip.shards.len - 1))]; + var map = shard.map.acquire(); + var map_mask = map.header().mask(); + var map_index = hash; + while (true) : (map_index += Shard.Map.Entry.fields_len) { + map_index &= map_mask; + const entry = map.at(map_index); + const index = entry.acquire(); + if (index == .none) break; + if (entry.hash == hash and ip.indexToKey(index).eql(key, ip)) + return .{ .existing = index }; + } + shard.mutate_mutex.lock(); + errdefer shard.mutate_mutex.unlock(); + if (map.entries != shard.map.entries) { + map = shard.map; + map_mask = map.header().mask(); + map_index = hash; + } + while (true) : (map_index += Shard.Map.Entry.fields_len) { + map_index &= map_mask; + const entry = map.at(map_index); + const index = entry.index; + if (index == .none) break; + if (entry.hash == hash and ip.indexToKey(index).eql(key, ip)) { + defer shard.mutate_mutex.unlock(); + return .{ .existing = index }; + } + } + const map_header = map.header().*; + if (map_header.len >= map_header.capacity * 3 / 5) { + const new_map_capacity = map_header.capacity * 2; + const new_map_buf = try gpa.alignedAlloc( + u8, + Local.garbage_align, + @sizeOf(Local.Garbage.Node) + (Shard.Map.Header.fields_len + + new_map_capacity * Shard.Map.Entry.fields_len) * @sizeOf(u32), + ); + const new_node: *Local.Garbage.Node = @ptrCast(new_map_buf.ptr); + new_node.* = .{ .data = .{ .buf_len = new_map_buf.len } }; + ip.local[@intFromEnum(tid)].garbage.prepend(new_node); + const new_map_entries = std.mem.bytesAsSlice( + u32, + new_map_buf[@sizeOf(Local.Garbage.Node)..], + )[Shard.Map.Header.fields_len..]; + const new_map: Shard.Map = .{ .entries = new_map_entries.ptr }; + new_map.header().* = .{ + .len = map_header.len, + .capacity = new_map_capacity, + }; + @memset(new_map_entries, @intFromEnum(Index.none)); + const new_map_mask = new_map.header().mask(); + map_index = 0; + while (map_index < map_header.capacity * 2) : (map_index += Shard.Map.Entry.fields_len) { + const entry = map.at(map_index); + const index = entry.index; + if (index == .none) continue; + const item_hash = entry.hash; + var new_map_index = item_hash; + while (true) : (new_map_index += Shard.Map.Entry.fields_len) { + new_map_index &= new_map_mask; + const new_entry = new_map.at(new_map_index); + if (new_entry.index != .none) continue; + new_entry.* = .{ + .index = index, + .hash = item_hash, + }; + break; + } + } + map = new_map; + map_index = hash; + while (true) : (map_index += Shard.Map.Entry.fields_len) { + map_index &= new_map_mask; + if (map.at(map_index).index == .none) break; + } + shard.map.release(new_map); + } + map.at(map_index).hash = hash; + return .{ .new = .{ .shard = shard, .map_index = map_index } }; +} + pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) Allocator.Error!Index { - const adapter: KeyAdapter = .{ .intern_pool = ip }; - const gop = try ip.map.getOrPutAdapted(gpa, key, adapter); - if (gop.found_existing) return @enumFromInt(gop.index); + var gop = try ip.getOrPutKey(gpa, tid, key); + defer gop.deinit(); + if (gop == .existing) return gop.existing; try ip.items.ensureUnusedCapacity(gpa, 1); switch (key) { .int_type => |int_type| { @@ -5260,18 +5496,17 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All assert(ptr_type.sentinel == .none or ip.typeOf(ptr_type.sentinel) == ptr_type.child); if (ptr_type.flags.size == .Slice) { - _ = ip.map.pop(); var new_key = key; new_key.ptr_type.flags.size = .Many; const ptr_type_index = try ip.get(gpa, tid, new_key); - assert(!(try ip.map.getOrPutAdapted(gpa, key, adapter)).found_existing); + gop.assign(try ip.getOrPutKey(gpa, tid, key)); try ip.items.ensureUnusedCapacity(gpa, 1); ip.items.appendAssumeCapacity(.{ .tag = .type_slice, .data = @intFromEnum(ptr_type_index), }); - return @enumFromInt(ip.items.len - 1); + return gop.set(@enumFromInt(ip.items.len - 1)); } var ptr_type_adjusted = ptr_type; @@ -5295,7 +5530,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All .child = array_type.child, }), }); - return @enumFromInt(ip.items.len - 1); + return gop.set(@enumFromInt(ip.items.len - 1)); } } @@ -5442,11 +5677,10 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All }, .anon_decl => |anon_decl| if (ptrsHaveSameAlignment(ip, ptr.ty, ptr_type, anon_decl.orig_ty)) item: { if (ptr.ty != anon_decl.orig_ty) { - _ = ip.map.pop(); var new_key = key; new_key.ptr.base_addr.anon_decl.orig_ty = ptr.ty; - const new_gop = try ip.map.getOrPutAdapted(gpa, new_key, adapter); - if (new_gop.found_existing) return @enumFromInt(new_gop.index); + gop.assign(try ip.getOrPutKey(gpa, tid, new_key)); + if (gop == .existing) return gop.existing; } break :item .{ .tag = .ptr_anon_decl, @@ -5486,7 +5720,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All .tag = .ptr_int, .data = try ip.addExtra(gpa, PtrInt.init(ptr.ty, ptr.byte_offset)), }, - .arr_elem, .field => |base_index| item: { + .arr_elem, .field => |base_index| { const base_ptr_type = ip.indexToKey(ip.typeOf(base_index.base)).ptr_type; switch (ptr.base_addr) { .arr_elem => assert(base_ptr_type.flags.size == .Many), @@ -5516,21 +5750,21 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All }, else => unreachable, } - _ = ip.map.pop(); const index_index = try ip.get(gpa, tid, .{ .int = .{ .ty = .usize_type, .storage = .{ .u64 = base_index.index }, } }); - assert(!(try ip.map.getOrPutAdapted(gpa, key, adapter)).found_existing); + gop.assign(try ip.getOrPutKey(gpa, tid, key)); try ip.items.ensureUnusedCapacity(gpa, 1); - break :item .{ + ip.items.appendAssumeCapacity(.{ .tag = switch (ptr.base_addr) { .arr_elem => .ptr_elem, .field => .ptr_field, else => unreachable, }, .data = try ip.addExtra(gpa, PtrBaseIndex.init(ptr.ty, base_index.base, index_index, ptr.byte_offset)), - }; + }); + return gop.set(@enumFromInt(ip.items.len - 1)); }, }); }, @@ -5566,7 +5800,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All .lazy_ty = lazy_ty, }), }); - return @enumFromInt(ip.items.len - 1); + return gop.set(@enumFromInt(ip.items.len - 1)); }, } switch (int.ty) { @@ -5707,7 +5941,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All .value = casted, }), }); - return @enumFromInt(ip.items.len - 1); + return gop.set(@enumFromInt(ip.items.len - 1)); } else |_| {} const tag: Tag = if (big_int.positive) .int_positive else .int_negative; @@ -5722,7 +5956,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All .value = casted, }), }); - return @enumFromInt(ip.items.len - 1); + return gop.set(@enumFromInt(ip.items.len - 1)); } var buf: [2]Limb = undefined; @@ -5881,7 +6115,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All .tag = .only_possible_value, .data = @intFromEnum(aggregate.ty), }); - return @enumFromInt(ip.items.len - 1); + return gop.set(@enumFromInt(ip.items.len - 1)); } switch (ty_key) { @@ -5914,7 +6148,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All .tag = .only_possible_value, .data = @intFromEnum(aggregate.ty), }); - return @enumFromInt(ip.items.len - 1); + return gop.set(@enumFromInt(ip.items.len - 1)); }, else => {}, } @@ -5929,12 +6163,11 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All } const elem = switch (aggregate.storage) { .bytes => |bytes| elem: { - _ = ip.map.pop(); const elem = try ip.get(gpa, tid, .{ .int = .{ .ty = .u8_type, .storage = .{ .u64 = bytes.at(0, ip) }, } }); - assert(!(try ip.map.getOrPutAdapted(gpa, key, adapter)).found_existing); + gop.assign(try ip.getOrPutKey(gpa, tid, key)); try ip.items.ensureUnusedCapacity(gpa, 1); break :elem elem; }, @@ -5953,7 +6186,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All .elem_val = elem, }), }); - return @enumFromInt(ip.items.len - 1); + return gop.set(@enumFromInt(ip.items.len - 1)); } if (child == .u8_type) bytes: { @@ -5997,7 +6230,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All .bytes = string, }), }); - return @enumFromInt(ip.items.len - 1); + return gop.set(@enumFromInt(ip.items.len - 1)); } try ip.extra.ensureUnusedCapacity( @@ -6038,7 +6271,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All ip.extra.appendSliceAssumeCapacity(@ptrCast(memoized_call.arg_values)); }, } - return @enumFromInt(ip.items.len - 1); + return gop.set(@enumFromInt(ip.items.len - 1)); } pub const UnionTypeInit = struct { @@ -6076,11 +6309,10 @@ pub const UnionTypeInit = struct { pub fn getUnionType( ip: *InternPool, gpa: Allocator, - _: Zcu.PerThread.Id, + tid: Zcu.PerThread.Id, ini: UnionTypeInit, ) Allocator.Error!WipNamespaceType.Result { - const adapter: KeyAdapter = .{ .intern_pool = ip }; - const gop = try ip.map.getOrPutAdapted(gpa, Key{ .union_type = switch (ini.key) { + var gop = try ip.getOrPutKey(gpa, tid, .{ .union_type = switch (ini.key) { .declared => |d| .{ .declared = .{ .zir_index = d.zir_index, .captures = .{ .external = d.captures }, @@ -6089,9 +6321,9 @@ pub fn getUnionType( .zir_index = r.zir_index, .type_hash = r.type_hash, } }, - } }, adapter); - if (gop.found_existing) return .{ .existing = @enumFromInt(gop.index) }; - errdefer _ = ip.map.pop(); + } }); + defer gop.deinit(); + if (gop == .existing) return .{ .existing = gop.existing }; const align_elements_len = if (ini.flags.any_aligned_fields) (ini.fields_len + 3) / 4 else 0; const align_element: u32 = @bitCast([1]u8{@intFromEnum(Alignment.none)} ** 4); @@ -6167,7 +6399,7 @@ pub fn getUnionType( } return .{ .wip = .{ - .index = @enumFromInt(ip.items.len - 1), + .index = gop.set(@enumFromInt(ip.items.len - 1)), .decl_extra_index = extra_index + std.meta.fieldIndex(Tag.TypeUnion, "decl").?, .namespace_extra_index = if (ini.has_namespace) extra_index + std.meta.fieldIndex(Tag.TypeUnion, "namespace").? @@ -6225,11 +6457,10 @@ pub const StructTypeInit = struct { pub fn getStructType( ip: *InternPool, gpa: Allocator, - _: Zcu.PerThread.Id, + tid: Zcu.PerThread.Id, ini: StructTypeInit, ) Allocator.Error!WipNamespaceType.Result { - const adapter: KeyAdapter = .{ .intern_pool = ip }; - const key: Key = .{ .struct_type = switch (ini.key) { + var gop = try ip.getOrPutKey(gpa, tid, .{ .struct_type = switch (ini.key) { .declared => |d| .{ .declared = .{ .zir_index = d.zir_index, .captures = .{ .external = d.captures }, @@ -6238,10 +6469,9 @@ pub fn getStructType( .zir_index = r.zir_index, .type_hash = r.type_hash, } }, - } }; - const gop = try ip.map.getOrPutAdapted(gpa, key, adapter); - if (gop.found_existing) return .{ .existing = @enumFromInt(gop.index) }; - errdefer _ = ip.map.pop(); + } }); + defer gop.deinit(); + if (gop == .existing) return .{ .existing = gop.existing }; const names_map = try ip.addMap(gpa, ini.fields_len); errdefer _ = ip.maps.pop(); @@ -6298,7 +6528,7 @@ pub fn getStructType( ip.extra.appendNTimesAssumeCapacity(@intFromEnum(Index.none), ini.fields_len); } return .{ .wip = .{ - .index = @enumFromInt(ip.items.len - 1), + .index = gop.set(@enumFromInt(ip.items.len - 1)), .decl_extra_index = extra_index + std.meta.fieldIndex(Tag.TypeStructPacked, "decl").?, .namespace_extra_index = if (ini.has_namespace) extra_index + std.meta.fieldIndex(Tag.TypeStructPacked, "namespace").? @@ -6387,7 +6617,7 @@ pub fn getStructType( } ip.extra.appendNTimesAssumeCapacity(std.math.maxInt(u32), ini.fields_len); return .{ .wip = .{ - .index = @enumFromInt(ip.items.len - 1), + .index = gop.set(@enumFromInt(ip.items.len - 1)), .decl_extra_index = extra_index + std.meta.fieldIndex(Tag.TypeStruct, "decl").?, .namespace_extra_index = namespace_extra_index, } }; @@ -6404,7 +6634,7 @@ pub const AnonStructTypeInit = struct { pub fn getAnonStructType( ip: *InternPool, gpa: Allocator, - _: Zcu.PerThread.Id, + tid: Zcu.PerThread.Id, ini: AnonStructTypeInit, ) Allocator.Error!Index { assert(ini.types.len == ini.values.len); @@ -6424,25 +6654,26 @@ pub fn getAnonStructType( }); ip.extra.appendSliceAssumeCapacity(@ptrCast(ini.types)); ip.extra.appendSliceAssumeCapacity(@ptrCast(ini.values)); + errdefer ip.extra.items.len = prev_extra_len; - const adapter: KeyAdapter = .{ .intern_pool = ip }; - const key: Key = .{ + var gop = try ip.getOrPutKey(gpa, tid, .{ .anon_struct_type = if (ini.names.len == 0) extraTypeTupleAnon(ip, extra_index) else k: { assert(ini.names.len == ini.types.len); ip.extra.appendSliceAssumeCapacity(@ptrCast(ini.names)); break :k extraTypeStructAnon(ip, extra_index); }, - }; - const gop = try ip.map.getOrPutAdapted(gpa, key, adapter); - if (gop.found_existing) { + }); + defer gop.deinit(); + if (gop == .existing) { ip.extra.items.len = prev_extra_len; - return @enumFromInt(gop.index); + return gop.existing; } + ip.items.appendAssumeCapacity(.{ .tag = if (ini.names.len == 0) .type_tuple_anon else .type_struct_anon, .data = extra_index, }); - return @enumFromInt(ip.items.len - 1); + return gop.set(@enumFromInt(ip.items.len - 1)); } /// This is equivalent to `Key.FuncType` but adjusted to have a slice for `param_types`. @@ -6463,7 +6694,7 @@ pub const GetFuncTypeKey = struct { pub fn getFuncType( ip: *InternPool, gpa: Allocator, - _: Zcu.PerThread.Id, + tid: Zcu.PerThread.Id, key: GetFuncTypeKey, ) Allocator.Error!Index { // Validate input parameters. @@ -6501,33 +6732,33 @@ pub fn getFuncType( if (key.comptime_bits != 0) ip.extra.appendAssumeCapacity(key.comptime_bits); if (key.noalias_bits != 0) ip.extra.appendAssumeCapacity(key.noalias_bits); ip.extra.appendSliceAssumeCapacity(@ptrCast(key.param_types)); + errdefer ip.extra.items.len = prev_extra_len; - const adapter: KeyAdapter = .{ .intern_pool = ip }; - const gop = try ip.map.getOrPutAdapted(gpa, Key{ + var gop = try ip.getOrPutKey(gpa, tid, .{ .func_type = extraFuncType(ip, func_type_extra_index), - }, adapter); - if (gop.found_existing) { + }); + defer gop.deinit(); + if (gop == .existing) { ip.extra.items.len = prev_extra_len; - return @enumFromInt(gop.index); + return gop.existing; } ip.items.appendAssumeCapacity(.{ .tag = .type_function, .data = func_type_extra_index, }); - return @enumFromInt(ip.items.len - 1); + return gop.set(@enumFromInt(ip.items.len - 1)); } pub fn getExternFunc( ip: *InternPool, gpa: Allocator, - _: Zcu.PerThread.Id, + tid: Zcu.PerThread.Id, key: Key.ExternFunc, ) Allocator.Error!Index { - const adapter: KeyAdapter = .{ .intern_pool = ip }; - const gop = try ip.map.getOrPutAdapted(gpa, Key{ .extern_func = key }, adapter); - if (gop.found_existing) return @enumFromInt(gop.index); - errdefer _ = ip.map.pop(); + var gop = try ip.getOrPutKey(gpa, tid, .{ .extern_func = key }); + defer gop.deinit(); + if (gop == .existing) return gop.existing; const prev_extra_len = ip.extra.items.len; const extra_index = try ip.addExtra(gpa, @as(Tag.ExternFunc, key)); errdefer ip.extra.items.len = prev_extra_len; @@ -6536,7 +6767,7 @@ pub fn getExternFunc( .data = extra_index, }); errdefer ip.items.len -= 1; - return @enumFromInt(ip.items.len - 1); + return gop.set(@enumFromInt(ip.items.len - 1)); } pub const GetFuncDeclKey = struct { @@ -6554,7 +6785,7 @@ pub const GetFuncDeclKey = struct { pub fn getFuncDecl( ip: *InternPool, gpa: Allocator, - _: Zcu.PerThread.Id, + tid: Zcu.PerThread.Id, key: GetFuncDeclKey, ) Allocator.Error!Index { // The strategy here is to add the function type unconditionally, then to @@ -6564,7 +6795,6 @@ pub fn getFuncDecl( try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Tag.FuncDecl).Struct.fields.len); try ip.items.ensureUnusedCapacity(gpa, 1); - try ip.map.ensureUnusedCapacity(gpa, 1); const func_decl_extra_index = ip.addExtraAssumeCapacity(Tag.FuncDecl{ .analysis = .{ @@ -6583,22 +6813,22 @@ pub fn getFuncDecl( .lbrace_column = key.lbrace_column, .rbrace_column = key.rbrace_column, }); + errdefer ip.extra.items.len = prev_extra_len; - const adapter: KeyAdapter = .{ .intern_pool = ip }; - const gop = ip.map.getOrPutAssumeCapacityAdapted(Key{ + var gop = try ip.getOrPutKey(gpa, tid, .{ .func = extraFuncDecl(ip, func_decl_extra_index), - }, adapter); - - if (gop.found_existing) { + }); + defer gop.deinit(); + if (gop == .existing) { ip.extra.items.len = prev_extra_len; - return @enumFromInt(gop.index); + return gop.existing; } ip.items.appendAssumeCapacity(.{ .tag = .func_decl, .data = func_decl_extra_index, }); - return @enumFromInt(ip.items.len - 1); + return gop.set(@enumFromInt(ip.items.len - 1)); } pub const GetFuncDeclIesKey = struct { @@ -6626,7 +6856,7 @@ pub const GetFuncDeclIesKey = struct { pub fn getFuncDeclIes( ip: *InternPool, gpa: Allocator, - _: Zcu.PerThread.Id, + tid: Zcu.PerThread.Id, key: GetFuncDeclIesKey, ) Allocator.Error!Index { // Validate input parameters. @@ -6639,7 +6869,6 @@ pub fn getFuncDeclIes( const prev_extra_len = ip.extra.items.len; const params_len: u32 = @intCast(key.param_types.len); - try ip.map.ensureUnusedCapacity(gpa, 4); try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Tag.FuncDecl).Struct.fields.len + 1 + // inferred_error_set @typeInfo(Tag.ErrorUnionType).Struct.fields.len + @@ -6704,40 +6933,51 @@ pub fn getFuncDeclIes( if (key.comptime_bits != 0) ip.extra.appendAssumeCapacity(key.comptime_bits); if (key.noalias_bits != 0) ip.extra.appendAssumeCapacity(key.noalias_bits); ip.extra.appendSliceAssumeCapacity(@ptrCast(key.param_types)); + errdefer { + ip.items.len -= 4; + ip.extra.items.len = prev_extra_len; + } ip.items.appendAssumeCapacity(.{ .tag = .type_function, .data = func_type_extra_index, }); - const adapter: KeyAdapter = .{ .intern_pool = ip }; - const gop = ip.map.getOrPutAssumeCapacityAdapted(Key{ + var gop = try ip.getOrPutKey(gpa, tid, .{ .func = extraFuncDecl(ip, func_decl_extra_index), - }, adapter); - if (!gop.found_existing) { - assert(!ip.map.getOrPutAssumeCapacityAdapted(Key{ .error_union_type = .{ - .error_set_type = @enumFromInt(ip.items.len - 2), - .payload_type = key.bare_return_type, - } }, adapter).found_existing); - assert(!ip.map.getOrPutAssumeCapacityAdapted(Key{ - .inferred_error_set_type = @enumFromInt(ip.items.len - 4), - }, adapter).found_existing); - assert(!ip.map.getOrPutAssumeCapacityAdapted(Key{ - .func_type = extraFuncType(ip, func_type_extra_index), - }, adapter).found_existing); - return @enumFromInt(ip.items.len - 4); - } - - // An existing function type was found; undo the additions to our two arrays. - ip.items.len -= 4; - ip.extra.items.len = prev_extra_len; - return @enumFromInt(gop.index); + }); + defer gop.deinit(); + if (gop == .existing) { + // An existing function type was found; undo the additions to our two arrays. + ip.items.len -= 4; + ip.extra.items.len = prev_extra_len; + return gop.existing; + } + + var eu_gop = try ip.getOrPutKey(gpa, tid, .{ .error_union_type = .{ + .error_set_type = @enumFromInt(ip.items.len - 2), + .payload_type = key.bare_return_type, + } }); + defer eu_gop.deinit(); + var ies_gop = try ip.getOrPutKey(gpa, tid, .{ + .inferred_error_set_type = @enumFromInt(ip.items.len - 4), + }); + defer ies_gop.deinit(); + var ty_gop = try ip.getOrPutKey(gpa, tid, .{ + .func_type = extraFuncType(ip, func_type_extra_index), + }); + defer ty_gop.deinit(); + const index = gop.set(@enumFromInt(ip.items.len - 4)); + _ = eu_gop.set(@enumFromInt(@intFromEnum(index) + 1)); + _ = ies_gop.set(@enumFromInt(@intFromEnum(index) + 2)); + _ = ty_gop.set(@enumFromInt(@intFromEnum(index) + 3)); + return index; } pub fn getErrorSetType( ip: *InternPool, gpa: Allocator, - _: Zcu.PerThread.Id, + tid: Zcu.PerThread.Id, names: []const NullTerminatedString, ) Allocator.Error!Index { assert(std.sort.isSorted(NullTerminatedString, names, {}, NullTerminatedString.indexLessThan)); @@ -6757,16 +6997,15 @@ pub fn getErrorSetType( .names_map = predicted_names_map, }); ip.extra.appendSliceAssumeCapacity(@ptrCast(names)); + errdefer ip.extra.items.len = prev_extra_len; - const adapter: KeyAdapter = .{ .intern_pool = ip }; - const gop = try ip.map.getOrPutAdapted(gpa, Key{ + var gop = try ip.getOrPutKey(gpa, tid, .{ .error_set_type = extraErrorSet(ip, error_set_extra_index), - }, adapter); - errdefer _ = ip.map.pop(); - - if (gop.found_existing) { + }); + defer gop.deinit(); + if (gop == .existing) { ip.extra.items.len = prev_extra_len; - return @enumFromInt(gop.index); + return gop.existing; } try ip.items.append(gpa, .{ @@ -6781,7 +7020,7 @@ pub fn getErrorSetType( addStringsToMap(ip, names_map, names); - return @enumFromInt(ip.items.len - 1); + return gop.set(@enumFromInt(ip.items.len - 1)); } pub const GetFuncInstanceKey = struct { @@ -6845,14 +7084,13 @@ pub fn getFuncInstance( }); ip.extra.appendSliceAssumeCapacity(@ptrCast(arg.comptime_args)); - const gop = try ip.map.getOrPutAdapted(gpa, Key{ + var gop = try ip.getOrPutKey(gpa, tid, .{ .func = extraFuncInstance(ip, func_extra_index), - }, KeyAdapter{ .intern_pool = ip }); - errdefer _ = ip.map.pop(); - - if (gop.found_existing) { + }); + defer gop.deinit(); + if (gop == .existing) { ip.extra.items.len = prev_extra_len; - return @enumFromInt(gop.index); + return gop.existing; } const func_index: Index = @enumFromInt(ip.items.len); @@ -6863,7 +7101,7 @@ pub fn getFuncInstance( }); errdefer ip.items.len -= 1; - return finishFuncInstance( + return gop.set(try finishFuncInstance( ip, gpa, tid, @@ -6872,7 +7110,7 @@ pub fn getFuncInstance( func_extra_index, arg.alignment, arg.section, - ); + )); } /// This function exists separately than `getFuncInstance` because it needs to @@ -6897,7 +7135,6 @@ pub fn getFuncInstanceIes( const prev_extra_len = ip.extra.items.len; const params_len: u32 = @intCast(arg.param_types.len); - try ip.map.ensureUnusedCapacity(gpa, 4); try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Tag.FuncInstance).Struct.fields.len + 1 + // inferred_error_set arg.comptime_args.len + @@ -6970,30 +7207,37 @@ pub fn getFuncInstanceIes( .tag = .type_function, .data = func_type_extra_index, }); + errdefer { + ip.items.len -= 4; + ip.extra.items.len = prev_extra_len; + } - const adapter: KeyAdapter = .{ .intern_pool = ip }; - const gop = ip.map.getOrPutAssumeCapacityAdapted(Key{ + var gop = try ip.getOrPutKey(gpa, tid, .{ .func = extraFuncInstance(ip, func_extra_index), - }, adapter); - if (gop.found_existing) { + }); + defer gop.deinit(); + if (gop == .existing) { // Hot path: undo the additions to our two arrays. ip.items.len -= 4; ip.extra.items.len = prev_extra_len; - return @enumFromInt(gop.index); + return gop.existing; } // Synchronize the map with items. - assert(!ip.map.getOrPutAssumeCapacityAdapted(Key{ .error_union_type = .{ + var eu_gop = try ip.getOrPutKey(gpa, tid, .{ .error_union_type = .{ .error_set_type = error_set_type, .payload_type = arg.bare_return_type, - } }, adapter).found_existing); - assert(!ip.map.getOrPutAssumeCapacityAdapted(Key{ + } }); + defer eu_gop.deinit(); + var ies_gop = try ip.getOrPutKey(gpa, tid, .{ .inferred_error_set_type = func_index, - }, adapter).found_existing); - assert(!ip.map.getOrPutAssumeCapacityAdapted(Key{ + }); + defer ies_gop.deinit(); + var ty_gop = try ip.getOrPutKey(gpa, tid, .{ .func_type = extraFuncType(ip, func_type_extra_index), - }, adapter).found_existing); - return finishFuncInstance( + }); + defer ty_gop.deinit(); + const index = gop.set(try finishFuncInstance( ip, gpa, tid, @@ -7002,7 +7246,11 @@ pub fn getFuncInstanceIes( func_extra_index, arg.alignment, arg.section, - ); + )); + _ = eu_gop.set(@enumFromInt(@intFromEnum(index) + 1)); + _ = ies_gop.set(@enumFromInt(@intFromEnum(index) + 2)); + _ = ty_gop.set(@enumFromInt(@intFromEnum(index) + 3)); + return index; } fn finishFuncInstance( @@ -7135,11 +7383,10 @@ pub const WipEnumType = struct { pub fn getEnumType( ip: *InternPool, gpa: Allocator, - _: Zcu.PerThread.Id, + tid: Zcu.PerThread.Id, ini: EnumTypeInit, ) Allocator.Error!WipEnumType.Result { - const adapter: KeyAdapter = .{ .intern_pool = ip }; - const gop = try ip.map.getOrPutAdapted(gpa, Key{ .enum_type = switch (ini.key) { + var gop = try ip.getOrPutKey(gpa, tid, .{ .enum_type = switch (ini.key) { .declared => |d| .{ .declared = .{ .zir_index = d.zir_index, .captures = .{ .external = d.captures }, @@ -7148,10 +7395,9 @@ pub fn getEnumType( .zir_index = r.zir_index, .type_hash = r.type_hash, } }, - } }, adapter); - if (gop.found_existing) return .{ .existing = @enumFromInt(gop.index) }; - assert(gop.index == ip.items.len); - errdefer _ = ip.map.pop(); + } }); + defer gop.deinit(); + if (gop == .existing) return .{ .existing = gop.existing }; try ip.items.ensureUnusedCapacity(gpa, 1); @@ -7196,7 +7442,7 @@ pub fn getEnumType( const names_start = ip.extra.items.len; ip.extra.appendNTimesAssumeCapacity(undefined, ini.fields_len); return .{ .wip = .{ - .index = @enumFromInt(gop.index), + .index = gop.set(@enumFromInt(ip.items.len - 1)), .tag_ty_index = extra_index + std.meta.fieldIndex(EnumAuto, "int_tag_type").?, .decl_index = extra_index + std.meta.fieldIndex(EnumAuto, "decl").?, .namespace_index = if (ini.has_namespace) extra_index + std.meta.fieldIndex(EnumAuto, "namespace").? else null, @@ -7260,7 +7506,7 @@ pub fn getEnumType( ip.extra.appendNTimesAssumeCapacity(undefined, ini.fields_len); } return .{ .wip = .{ - .index = @enumFromInt(gop.index), + .index = gop.set(@enumFromInt(ip.items.len - 1)), .tag_ty_index = extra_index + std.meta.fieldIndex(EnumAuto, "int_tag_type").?, .decl_index = extra_index + std.meta.fieldIndex(EnumAuto, "decl").?, .namespace_index = if (ini.has_namespace) extra_index + std.meta.fieldIndex(EnumAuto, "namespace").? else null, @@ -7288,14 +7534,13 @@ const GeneratedTagEnumTypeInit = struct { pub fn getGeneratedTagEnumType( ip: *InternPool, gpa: Allocator, - _: Zcu.PerThread.Id, + tid: Zcu.PerThread.Id, ini: GeneratedTagEnumTypeInit, ) Allocator.Error!Index { assert(ip.isUnion(ini.owner_union_ty)); assert(ip.isIntegerType(ini.tag_ty)); for (ini.values) |val| assert(ip.typeOf(val) == ini.tag_ty); - try ip.map.ensureUnusedCapacity(gpa, 1); try ip.items.ensureUnusedCapacity(gpa, 1); const names_map = try ip.addMap(gpa, ini.names.len); @@ -7304,6 +7549,7 @@ pub fn getGeneratedTagEnumType( const fields_len: u32 = @intCast(ini.names.len); + const prev_extra_len = ip.extra.items.len; switch (ini.tag_mode) { .auto => { try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(EnumAuto).Struct.fields.len + @@ -7360,17 +7606,17 @@ pub fn getGeneratedTagEnumType( ip.extra.appendSliceAssumeCapacity(@ptrCast(ini.values)); }, } - // Same as above - errdefer @compileError("error path leaks values_map and extra data"); + errdefer ip.extra.items.len = prev_extra_len; + errdefer switch (ini.tag_mode) { + .auto => {}, + .explicit, .nonexhaustive => _ = if (ini.values.len != 0) ip.maps.pop(), + }; - // Capacity for this was ensured earlier - const adapter: KeyAdapter = .{ .intern_pool = ip }; - const gop = ip.map.getOrPutAssumeCapacityAdapted(Key{ .enum_type = .{ + var gop = try ip.getOrPutKey(gpa, tid, .{ .enum_type = .{ .generated_tag = .{ .union_type = ini.owner_union_ty }, - } }, adapter); - assert(!gop.found_existing); - assert(gop.index == ip.items.len - 1); - return @enumFromInt(gop.index); + } }); + defer gop.deinit(); + return gop.set(@enumFromInt(ip.items.len - 1)); } pub const OpaqueTypeInit = struct { @@ -7390,11 +7636,10 @@ pub const OpaqueTypeInit = struct { pub fn getOpaqueType( ip: *InternPool, gpa: Allocator, - _: Zcu.PerThread.Id, + tid: Zcu.PerThread.Id, ini: OpaqueTypeInit, ) Allocator.Error!WipNamespaceType.Result { - const adapter: KeyAdapter = .{ .intern_pool = ip }; - const gop = try ip.map.getOrPutAdapted(gpa, Key{ .opaque_type = switch (ini.key) { + var gop = try ip.getOrPutKey(gpa, tid, .{ .opaque_type = switch (ini.key) { .declared => |d| .{ .declared = .{ .zir_index = d.zir_index, .captures = .{ .external = d.captures }, @@ -7403,9 +7648,9 @@ pub fn getOpaqueType( .zir_index = r.zir_index, .type_hash = 0, } }, - } }, adapter); - if (gop.found_existing) return .{ .existing = @enumFromInt(gop.index) }; - errdefer _ = ip.map.pop(); + } }); + defer gop.deinit(); + if (gop == .existing) return .{ .existing = gop.existing }; try ip.items.ensureUnusedCapacity(gpa, 1); try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Tag.TypeOpaque).Struct.fields.len + switch (ini.key) { .declared => |d| d.captures.len, @@ -7431,7 +7676,7 @@ pub fn getOpaqueType( .reified => {}, } return .{ .wip = .{ - .index = @enumFromInt(gop.index), + .index = gop.set(@enumFromInt(ip.items.len - 1)), .decl_extra_index = extra_index + std.meta.fieldIndex(Tag.TypeOpaque, "decl").?, .namespace_extra_index = if (ini.has_namespace) extra_index + std.meta.fieldIndex(Tag.TypeOpaque, "namespace").? @@ -7441,9 +7686,19 @@ pub fn getOpaqueType( } pub fn getIfExists(ip: *const InternPool, key: Key) ?Index { - const adapter: KeyAdapter = .{ .intern_pool = ip }; - const index = ip.map.getIndexAdapted(key, adapter) orelse return null; - return @enumFromInt(index); + const full_hash = key.hash64(ip); + const hash: u32 = @truncate(full_hash >> 32); + const shard = &ip.shards[@intCast(full_hash & (ip.shards.len - 1))]; + const map = shard.map.acquire(); + const map_mask = map.header().mask(); + var map_index = hash; + while (true) : (map_index += Shard.Map.Entry.fields_len) { + map_index &= map_mask; + const entry = map.at(map_index); + const index = entry.acquire(); + if (index == .none) return null; + if (entry.hash == hash and ip.indexToKey(index).eql(key, ip)) return index; + } } pub fn getAssumeExists(ip: *const InternPool, key: Key) Index { @@ -7506,7 +7761,6 @@ pub fn remove(ip: *InternPool, index: Index) void { if (@intFromEnum(index) == ip.items.len - 1) { // Happy case - we can just drop the item without affecting any other indices. ip.items.len -= 1; - _ = ip.map.pop(); } else { // We must preserve the item so that indices following it remain valid. // Thus, we will rewrite the tag to `removed`, leaking the item until @@ -8133,35 +8387,34 @@ fn getCoercedFuncInstance( fn getCoercedFunc( ip: *InternPool, gpa: Allocator, - _: Zcu.PerThread.Id, + tid: Zcu.PerThread.Id, func: Index, ty: Index, ) Allocator.Error!Index { const prev_extra_len = ip.extra.items.len; try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Tag.FuncCoerced).Struct.fields.len); try ip.items.ensureUnusedCapacity(gpa, 1); - try ip.map.ensureUnusedCapacity(gpa, 1); const extra_index = ip.addExtraAssumeCapacity(Tag.FuncCoerced{ .ty = ty, .func = func, }); + errdefer ip.extra.items.len = prev_extra_len; - const adapter: KeyAdapter = .{ .intern_pool = ip }; - const gop = ip.map.getOrPutAssumeCapacityAdapted(Key{ + var gop = try ip.getOrPutKey(gpa, tid, .{ .func = extraFuncCoerced(ip, extra_index), - }, adapter); - - if (gop.found_existing) { + }); + defer gop.deinit(); + if (gop == .existing) { ip.extra.items.len = prev_extra_len; - return @enumFromInt(gop.index); + return gop.existing; } ip.items.appendAssumeCapacity(.{ .tag = .func_coerced, .data = extra_index, }); - return @enumFromInt(ip.items.len - 1); + return gop.set(@enumFromInt(ip.items.len - 1)); } /// Asserts `val` has an integer type. diff --git a/src/Zcu.zig b/src/Zcu.zig index c4ebc6a36b88..32c9045910ff 100644 --- a/src/Zcu.zig +++ b/src/Zcu.zig @@ -2394,9 +2394,9 @@ pub const CompileError = error{ ComptimeBreak, }; -pub fn init(mod: *Module) !void { +pub fn init(mod: *Module, thread_count: usize) !void { const gpa = mod.gpa; - try mod.intern_pool.init(gpa); + try mod.intern_pool.init(gpa, thread_count); try mod.global_error_set.put(gpa, .empty, {}); } From c8b9364b30adfdd1716b20428a3d934eac75cc87 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Sat, 15 Jun 2024 19:58:29 -0400 Subject: [PATCH 064/152] InternPool: use thread-safe hash map for strings --- src/InternPool.zig | 384 ++++++++++++++++++++++++++++++--------------- 1 file changed, 254 insertions(+), 130 deletions(-) diff --git a/src/InternPool.zig b/src/InternPool.zig index 13fb9be24e89..b6ef6a2e080e 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -46,14 +46,6 @@ namespaces_free_list: std.ArrayListUnmanaged(NamespaceIndex) = .{}, /// These are not serialized; it is computed upon deserialization. maps: std.ArrayListUnmanaged(FieldMap) = .{}, -/// Used for finding the index inside `string_bytes`. -string_table: std.HashMapUnmanaged( - u32, - void, - std.hash_map.StringIndexContext, - std.hash_map.default_max_load_percentage, -) = .{}, - /// An index into `tracked_insts` gives a reference to a single ZIR instruction which /// persists across incremental updates. tracked_insts: std.AutoArrayHashMapUnmanaged(TrackedInst, void) = .{}, @@ -358,22 +350,31 @@ const Local = struct { /// node: Garbage.Node, /// header: List.Header, /// data: [capacity]u32, - /// tag: [capacity]Tag, + /// tag: [header.capacity]Tag, items: List, /// node: Garbage.Node, /// header: List.Header, - /// extra: [capacity]u32, + /// extra: [header.capacity]u32, extra: List, + /// node: Garbage.Node, + /// header: List.Header, + /// bytes: [header.capacity]u8, + strings: List, + garbage: Garbage, const List = struct { entries: [*]u32, - const empty: List = .{ - .entries = @constCast(&[_]u32{ 0, 0 })[Header.fields_len..].ptr, - }; + const empty: List = .{ .entries = @constCast(&(extern struct { + header: Header, + entries: [0]u32, + }{ + .header = .{ .len = 0, .capacity = 0 }, + .entries = .{}, + }).entries) }; fn acquire(list: *const List) List { return .{ .entries = @atomicLoad([*]u32, &list.entries, .acquire) }; @@ -402,63 +403,75 @@ const Local = struct { }; const Shard = struct { - aligned: void align(std.atomic.cache_line) = {}, - - mutate_mutex: std.Thread.Mutex.Recursive, - - /// node: Local.Garbage.Node, - /// header: Map.Header, - /// entries: [capacity]Map.Entry, - map: Map, + shared: struct { + map: Map(Index), + string_map: Map(OptionalNullTerminatedString), + } align(std.atomic.cache_line), + mutate: struct { + // TODO: measure cost of sharing unrelated mutate state + map: Mutate align(std.atomic.cache_line), + string_map: Mutate align(std.atomic.cache_line), + }, - const Map = struct { - entries: [*]u32, + const Mutate = struct { + mutex: std.Thread.Mutex.Recursive, + len: u32, - const empty: Map = .{ - .entries = @constCast(&[_]u32{ 0, 1, @intFromEnum(Index.none), 0 })[Header.fields_len..].ptr, + const empty: Mutate = .{ + .mutex = std.Thread.Mutex.Recursive.init, + .len = 0, }; + }; - fn acquire(map: *const Map) Map { - return .{ .entries = @atomicLoad([*]u32, &map.entries, .acquire) }; - } - fn release(map: *Map, new_map: Map) void { - @atomicStore([*]u32, &map.entries, new_map.entries, .release); - } - - const Header = extern struct { - len: u32, - capacity: u32, + fn Map(comptime Value: type) type { + comptime assert(@typeInfo(Value).Enum.tag_type == u32); + _ = @as(Value, .none); // expected .none key + return struct { + /// node: Local.Garbage.Node, + /// header: Header, + /// entries: [header.capacity]Entry, + entries: [*]Entry, + + const empty: @This() = .{ .entries = @constCast(&(extern struct { + header: Header, + entries: [1]Entry, + }{ + .header = .{ .capacity = 1 }, + .entries = .{.{ .value = .none, .hash = undefined }}, + }).entries) }; + + fn acquire(map: *const @This()) @This() { + return .{ .entries = @atomicLoad([*]Entry, &map.entries, .acquire) }; + } + fn release(map: *@This(), new_map: @This()) void { + @atomicStore([*]Entry, &map.entries, new_map.entries, .release); + } - const fields_len: u32 = @typeInfo(Header).Struct.fields.len; + const Header = extern struct { + capacity: u32, - fn mask(head: *const Header) u32 { - assert(std.math.isPowerOfTwo(head.capacity)); - assert(std.math.isPowerOfTwo(Entry.fields_len)); - return (head.capacity - 1) * Entry.fields_len; + fn mask(head: *const Header) u32 { + assert(std.math.isPowerOfTwo(head.capacity)); + return head.capacity - 1; + } + }; + fn header(map: @This()) *Header { + return &(@as([*]Header, @ptrCast(map.entries)) - 1)[0]; } - }; - fn header(map: Map) *Header { - return @ptrCast(map.entries - Header.fields_len); - } - const Entry = extern struct { - index: Index, - hash: u32, + const Entry = extern struct { + value: Value, + hash: u32, - const fields_len: u32 = @typeInfo(Entry).Struct.fields.len; - - fn acquire(entry: *const Entry) Index { - return @atomicLoad(Index, &entry.index, .acquire); - } - fn release(entry: *Entry, index: Index) void { - @atomicStore(Index, &entry.index, index, .release); - } + fn acquire(entry: *const Entry) Value { + return @atomicLoad(Value, &entry.value, .acquire); + } + fn release(entry: *Entry, value: Value) void { + @atomicStore(Value, &entry.value, value, .release); + } + }; }; - fn at(map: Map, index: usize) *Entry { - assert(index % Entry.fields_len == 0); - return @ptrCast(&map.entries[index]); - } - }; + } }; const FieldMap = std.ArrayHashMapUnmanaged(void, void, std.array_hash_map.AutoContext(void), false); @@ -618,9 +631,13 @@ pub const NullTerminatedString = enum(u32) { return @enumFromInt(@intFromEnum(self)); } + fn toOverlongSlice(string: NullTerminatedString, ip: *const InternPool) []const u8 { + return ip.string_bytes.items[@intFromEnum(string)..]; + } + pub fn toSlice(string: NullTerminatedString, ip: *const InternPool) [:0]const u8 { - const slice = ip.string_bytes.items[@intFromEnum(string)..]; - return slice[0..std.mem.indexOfScalar(u8, slice, 0).? :0]; + const overlong_slice = string.toOverlongSlice(ip); + return overlong_slice[0..std.mem.indexOfScalar(u8, overlong_slice, 0).? :0]; } pub fn length(string: NullTerminatedString, ip: *const InternPool) u32 { @@ -628,7 +645,10 @@ pub const NullTerminatedString = enum(u32) { } pub fn eqlSlice(string: NullTerminatedString, slice: []const u8, ip: *const InternPool) bool { - return std.mem.eql(u8, string.toSlice(ip), slice); + const overlong_slice = string.toOverlongSlice(ip); + return overlong_slice.len > slice.len and + std.mem.eql(u8, overlong_slice[0..slice.len], slice) and + overlong_slice[slice.len] == 0; } const Adapter = struct { @@ -4639,14 +4659,21 @@ pub fn init(ip: *InternPool, gpa: Allocator, total_threads: usize) !void { @memset(ip.local, .{ .items = Local.List.empty, .extra = Local.List.empty, + .strings = Local.List.empty, .garbage = .{}, }); ip.shard_shift = @intCast(std.math.log2_int_ceil(usize, total_threads)); ip.shards = try gpa.alloc(Shard, @as(usize, 1) << ip.shard_shift); @memset(ip.shards, .{ - .mutate_mutex = std.Thread.Mutex.Recursive.init, - .map = Shard.Map.empty, + .shared = .{ + .map = Shard.Map(Index).empty, + .string_map = Shard.Map(OptionalNullTerminatedString).empty, + }, + .mutate = .{ + .map = Shard.Mutate.empty, + .string_map = Shard.Mutate.empty, + }, }); // Reserve string index 0 for an empty string. @@ -4697,8 +4724,6 @@ pub fn deinit(ip: *InternPool, gpa: Allocator) void { for (ip.maps.items) |*map| map.deinit(gpa); ip.maps.deinit(gpa); - ip.string_table.deinit(gpa); - ip.tracked_insts.deinit(gpa); ip.src_hash_deps.deinit(gpa); @@ -5363,9 +5388,9 @@ const GetOrPutKey = union(enum) { switch (gop.*) { .existing => unreachable, .new => |info| { - info.shard.map.at(info.map_index).release(index); - info.shard.map.header().len += 1; - info.shard.mutate_mutex.unlock(); + info.shard.shared.map.entries[info.map_index].release(index); + info.shard.mutate.map.len += 1; + info.shard.mutate.map.mutex.unlock(); }, } gop.* = .{ .existing = index }; @@ -5380,7 +5405,7 @@ const GetOrPutKey = union(enum) { fn deinit(gop: *GetOrPutKey) void { switch (gop.*) { .existing => {}, - .new => |info| info.shard.mutate_mutex.unlock(), + .new => |info| info.shard.mutate.map.mutex.unlock(), } gop.* = undefined; } @@ -5394,70 +5419,69 @@ fn getOrPutKey( const full_hash = key.hash64(ip); const hash: u32 = @truncate(full_hash >> 32); const shard = &ip.shards[@intCast(full_hash & (ip.shards.len - 1))]; - var map = shard.map.acquire(); + var map = shard.shared.map.acquire(); + const Map = @TypeOf(map); var map_mask = map.header().mask(); var map_index = hash; - while (true) : (map_index += Shard.Map.Entry.fields_len) { + while (true) : (map_index += 1) { map_index &= map_mask; - const entry = map.at(map_index); + const entry = &map.entries[map_index]; const index = entry.acquire(); if (index == .none) break; - if (entry.hash == hash and ip.indexToKey(index).eql(key, ip)) - return .{ .existing = index }; + if (entry.hash != hash) continue; + if (ip.indexToKey(index).eql(key, ip)) return .{ .existing = index }; } - shard.mutate_mutex.lock(); - errdefer shard.mutate_mutex.unlock(); - if (map.entries != shard.map.entries) { - map = shard.map; + shard.mutate.map.mutex.lock(); + errdefer shard.mutate.map.mutex.unlock(); + if (map.entries != shard.shared.map.entries) { + map = shard.shared.map; map_mask = map.header().mask(); map_index = hash; } - while (true) : (map_index += Shard.Map.Entry.fields_len) { + while (true) : (map_index += 1) { map_index &= map_mask; - const entry = map.at(map_index); - const index = entry.index; + const entry = &map.entries[map_index]; + const index = entry.value; if (index == .none) break; - if (entry.hash == hash and ip.indexToKey(index).eql(key, ip)) { - defer shard.mutate_mutex.unlock(); + if (entry.hash != hash) continue; + if (ip.indexToKey(index).eql(key, ip)) { + defer shard.mutate.map.mutex.unlock(); return .{ .existing = index }; } } const map_header = map.header().*; - if (map_header.len >= map_header.capacity * 3 / 5) { + if (shard.mutate.map.len >= map_header.capacity * 3 / 5) { const new_map_capacity = map_header.capacity * 2; const new_map_buf = try gpa.alignedAlloc( u8, Local.garbage_align, - @sizeOf(Local.Garbage.Node) + (Shard.Map.Header.fields_len + - new_map_capacity * Shard.Map.Entry.fields_len) * @sizeOf(u32), + @sizeOf(Local.Garbage.Node) + @sizeOf(Map.Header) + + new_map_capacity * @sizeOf(Map.Entry), ); const new_node: *Local.Garbage.Node = @ptrCast(new_map_buf.ptr); new_node.* = .{ .data = .{ .buf_len = new_map_buf.len } }; ip.local[@intFromEnum(tid)].garbage.prepend(new_node); const new_map_entries = std.mem.bytesAsSlice( - u32, - new_map_buf[@sizeOf(Local.Garbage.Node)..], - )[Shard.Map.Header.fields_len..]; - const new_map: Shard.Map = .{ .entries = new_map_entries.ptr }; - new_map.header().* = .{ - .len = map_header.len, - .capacity = new_map_capacity, - }; - @memset(new_map_entries, @intFromEnum(Index.none)); + Map.Entry, + new_map_buf[@sizeOf(Local.Garbage.Node) + @sizeOf(Map.Header) ..], + ); + const new_map: Map = .{ .entries = new_map_entries.ptr }; + new_map.header().* = .{ .capacity = new_map_capacity }; + @memset(new_map_entries, .{ .value = .none, .hash = undefined }); const new_map_mask = new_map.header().mask(); map_index = 0; - while (map_index < map_header.capacity * 2) : (map_index += Shard.Map.Entry.fields_len) { - const entry = map.at(map_index); - const index = entry.index; + while (map_index < map_header.capacity) : (map_index += 1) { + const entry = &map.entries[map_index]; + const index = entry.value; if (index == .none) continue; const item_hash = entry.hash; var new_map_index = item_hash; - while (true) : (new_map_index += Shard.Map.Entry.fields_len) { + while (true) : (new_map_index += 1) { new_map_index &= new_map_mask; - const new_entry = new_map.at(new_map_index); - if (new_entry.index != .none) continue; + const new_entry = &new_map.entries[new_map_index]; + if (new_entry.value != .none) continue; new_entry.* = .{ - .index = index, + .value = index, .hash = item_hash, }; break; @@ -5465,13 +5489,13 @@ fn getOrPutKey( } map = new_map; map_index = hash; - while (true) : (map_index += Shard.Map.Entry.fields_len) { + while (true) : (map_index += 1) { map_index &= new_map_mask; - if (map.at(map_index).index == .none) break; + if (map.entries[map_index].value == .none) break; } - shard.map.release(new_map); + shard.shared.map.release(new_map); } - map.at(map_index).hash = hash; + map.entries[map_index].hash = hash; return .{ .new = .{ .shard = shard, .map_index = map_index } }; } @@ -7689,22 +7713,19 @@ pub fn getIfExists(ip: *const InternPool, key: Key) ?Index { const full_hash = key.hash64(ip); const hash: u32 = @truncate(full_hash >> 32); const shard = &ip.shards[@intCast(full_hash & (ip.shards.len - 1))]; - const map = shard.map.acquire(); + const map = shard.shared.map.acquire(); const map_mask = map.header().mask(); var map_index = hash; - while (true) : (map_index += Shard.Map.Entry.fields_len) { + while (true) : (map_index += 1) { map_index &= map_mask; - const entry = map.at(map_index); + const entry = &map.entries[map_index]; const index = entry.acquire(); if (index == .none) return null; - if (entry.hash == hash and ip.indexToKey(index).eql(key, ip)) return index; + if (entry.hash != hash) continue; + if (ip.indexToKey(index).eql(key, ip)) return index; } } -pub fn getAssumeExists(ip: *const InternPool, key: Key) Index { - return ip.getIfExists(key).?; -} - fn addStringsToMap( ip: *InternPool, map_index: MapIndex, @@ -8618,7 +8639,13 @@ fn dumpStatsFallible(ip: *const InternPool, arena: Allocator) anyerror!void { .type_inferred_error_set => 0, .type_enum_explicit, .type_enum_nonexhaustive => b: { const info = ip.extraData(EnumExplicit, data); - var ints = @typeInfo(EnumExplicit).Struct.fields.len + info.captures_len + info.fields_len; + var ints = @typeInfo(EnumExplicit).Struct.fields.len; + if (info.zir_index == .none) ints += 1; + ints += if (info.captures_len != std.math.maxInt(u32)) + info.captures_len + else + @typeInfo(PackedU64).Struct.fields.len; + ints += info.fields_len; if (info.values_map != .none) ints += info.fields_len; break :b @sizeOf(u32) * ints; }, @@ -9084,7 +9111,6 @@ pub fn getOrPutTrailingString( len: usize, comptime embedded_nulls: EmbeddedNulls, ) Allocator.Error!embedded_nulls.StringType() { - _ = tid; const string_bytes = &ip.string_bytes; const str_index: u32 = @intCast(string_bytes.items.len - len); if (len > 0 and string_bytes.getLast() == 0) { @@ -9101,25 +9127,123 @@ pub fn getOrPutTrailingString( return @enumFromInt(str_index); }, } - const gop = try ip.string_table.getOrPutContextAdapted(gpa, key, std.hash_map.StringIndexAdapter{ - .bytes = string_bytes, - }, std.hash_map.StringIndexContext{ - .bytes = string_bytes, - }); - if (gop.found_existing) { + const maybe_existing_index = try ip.getOrPutStringValue(gpa, tid, key, @enumFromInt(str_index)); + if (maybe_existing_index.unwrap()) |existing_index| { string_bytes.shrinkRetainingCapacity(str_index); - return @enumFromInt(gop.key_ptr.*); + return @enumFromInt(@intFromEnum(existing_index)); } else { - gop.key_ptr.* = str_index; string_bytes.appendAssumeCapacity(0); return @enumFromInt(str_index); } } -pub fn getString(ip: *InternPool, s: []const u8) OptionalNullTerminatedString { - return if (ip.string_table.getKeyAdapted(s, std.hash_map.StringIndexAdapter{ - .bytes = &ip.string_bytes, - })) |index| @enumFromInt(index) else .none; +fn getOrPutStringValue( + ip: *InternPool, + gpa: Allocator, + tid: Zcu.PerThread.Id, + key: []const u8, + value: NullTerminatedString, +) Allocator.Error!OptionalNullTerminatedString { + const full_hash = Hash.hash(0, key); + const hash: u32 = @truncate(full_hash >> 32); + const shard = &ip.shards[@intCast(full_hash & (ip.shards.len - 1))]; + var map = shard.shared.string_map.acquire(); + const Map = @TypeOf(map); + var map_mask = map.header().mask(); + var map_index = hash; + while (true) : (map_index += 1) { + map_index &= map_mask; + const entry = &map.entries[map_index]; + const index = entry.acquire().unwrap() orelse break; + if (entry.hash != hash) continue; + if (index.eqlSlice(key, ip)) return index.toOptional(); + } + shard.mutate.string_map.mutex.lock(); + defer shard.mutate.string_map.mutex.unlock(); + if (map.entries != shard.shared.string_map.entries) { + shard.mutate.string_map.len += 1; + map = shard.shared.string_map; + map_mask = map.header().mask(); + map_index = hash; + } + while (true) : (map_index += 1) { + map_index &= map_mask; + const entry = &map.entries[map_index]; + const index = entry.acquire().unwrap() orelse break; + if (entry.hash != hash) continue; + if (index.eqlSlice(key, ip)) return index.toOptional(); + } + defer shard.mutate.string_map.len += 1; + const map_header = map.header().*; + if (shard.mutate.string_map.len < map_header.capacity * 3 / 5) { + const entry = &map.entries[map_index]; + entry.hash = hash; + entry.release(value.toOptional()); + return .none; + } + const new_map_capacity = map_header.capacity * 2; + const new_map_buf = try gpa.alignedAlloc( + u8, + Local.garbage_align, + @sizeOf(Local.Garbage.Node) + @sizeOf(Map.Header) + + new_map_capacity * @sizeOf(Map.Entry), + ); + const new_node: *Local.Garbage.Node = @ptrCast(new_map_buf.ptr); + new_node.* = .{ .data = .{ .buf_len = new_map_buf.len } }; + ip.local[@intFromEnum(tid)].garbage.prepend(new_node); + const new_map_entries = std.mem.bytesAsSlice( + Map.Entry, + new_map_buf[@sizeOf(Local.Garbage.Node) + @sizeOf(Map.Header) ..], + ); + const new_map: Map = .{ .entries = new_map_entries.ptr }; + new_map.header().* = .{ .capacity = new_map_capacity }; + @memset(new_map_entries, .{ .value = .none, .hash = undefined }); + const new_map_mask = new_map.header().mask(); + map_index = 0; + while (map_index < map_header.capacity) : (map_index += 1) { + const entry = &map.entries[map_index]; + const index = entry.value.unwrap() orelse continue; + const item_hash = entry.hash; + var new_map_index = item_hash; + while (true) : (new_map_index += 1) { + new_map_index &= new_map_mask; + const new_entry = &new_map.entries[new_map_index]; + if (new_entry.value != .none) continue; + new_entry.* = .{ + .value = index.toOptional(), + .hash = item_hash, + }; + break; + } + } + map = new_map; + map_index = hash; + while (true) : (map_index += 1) { + map_index &= new_map_mask; + if (map.entries[map_index].value == .none) break; + } + map.entries[map_index] = .{ + .value = value.toOptional(), + .hash = hash, + }; + shard.shared.string_map.release(new_map); + return .none; +} + +pub fn getString(ip: *InternPool, key: []const u8) OptionalNullTerminatedString { + const full_hash = Hash.hash(0, key); + const hash: u32 = @truncate(full_hash >> 32); + const shard = &ip.shards[@intCast(full_hash & (ip.shards.len - 1))]; + const map = shard.shared.string_map.acquire(); + const map_mask = map.header().mask(); + var map_index = hash; + while (true) : (map_index += 1) { + map_index &= map_mask; + const entry = map.at(map_index); + const index = entry.acquire().unwrap() orelse return null; + if (entry.hash != hash) continue; + if (index.eqlSlice(key, ip)) return index; + } } pub fn typeOf(ip: *const InternPool, index: Index) Index { From 3e1b190fe6955ba051d961494433b8346af2af38 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Wed, 3 Jul 2024 16:35:39 -0400 Subject: [PATCH 065/152] InternPool: replace garbage with an arena This was just a badly implemented arena anyway. --- src/InternPool.zig | 66 ++++++++++++++-------------------------------- 1 file changed, 20 insertions(+), 46 deletions(-) diff --git a/src/InternPool.zig b/src/InternPool.zig index b6ef6a2e080e..f0141a9092b0 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -347,23 +347,20 @@ pub const DepEntry = extern struct { const Local = struct { aligned: void align(std.atomic.cache_line) = {}, - /// node: Garbage.Node, /// header: List.Header, /// data: [capacity]u32, /// tag: [header.capacity]Tag, items: List, - /// node: Garbage.Node, /// header: List.Header, /// extra: [header.capacity]u32, extra: List, - /// node: Garbage.Node, /// header: List.Header, /// bytes: [header.capacity]u8, strings: List, - garbage: Garbage, + arena: std.heap.ArenaAllocator.State, const List = struct { entries: [*]u32, @@ -393,13 +390,6 @@ const Local = struct { return @ptrCast(list.entries - Header.fields_len); } }; - - const Garbage = std.SinglyLinkedList(struct { buf_len: usize }); - const garbage_align = @max(@alignOf(Garbage.Node), @alignOf(u32)); - - fn freeGarbage(garbage: *const Garbage.Node, gpa: Allocator) void { - gpa.free(@as([*]align(Local.garbage_align) const u8, @ptrCast(garbage))[0..garbage.data.buf_len]); - } }; const Shard = struct { @@ -427,7 +417,6 @@ const Shard = struct { comptime assert(@typeInfo(Value).Enum.tag_type == u32); _ = @as(Value, .none); // expected .none key return struct { - /// node: Local.Garbage.Node, /// header: Header, /// entries: [header.capacity]Entry, entries: [*]Entry, @@ -440,6 +429,9 @@ const Shard = struct { .entries = .{.{ .value = .none, .hash = undefined }}, }).entries) }; + const alignment = @max(@alignOf(Header), @alignOf(Entry)); + const entries_offset = std.mem.alignForward(usize, @sizeOf(Header), @alignOf(Entry)); + fn acquire(map: *const @This()) @This() { return .{ .entries = @atomicLoad([*]Entry, &map.entries, .acquire) }; } @@ -4660,7 +4652,7 @@ pub fn init(ip: *InternPool, gpa: Allocator, total_threads: usize) !void { .items = Local.List.empty, .extra = Local.List.empty, .strings = Local.List.empty, - .garbage = .{}, + .arena = .{}, }); ip.shard_shift = @intCast(std.math.log2_int_ceil(usize, total_threads)); @@ -4740,13 +4732,7 @@ pub fn deinit(ip: *InternPool, gpa: Allocator) void { ip.files.deinit(gpa); gpa.free(ip.shards); - for (ip.local) |*local| { - var next = local.garbage.first; - while (next) |cur| { - next = cur.next; - Local.freeGarbage(cur, gpa); - } - } + for (ip.local) |*local| local.arena.promote(gpa).deinit(); gpa.free(ip.local); ip.* = undefined; @@ -5451,23 +5437,17 @@ fn getOrPutKey( } const map_header = map.header().*; if (shard.mutate.map.len >= map_header.capacity * 3 / 5) { + var arena = ip.local[@intFromEnum(tid)].arena.promote(gpa); + defer ip.local[@intFromEnum(tid)].arena = arena.state; const new_map_capacity = map_header.capacity * 2; - const new_map_buf = try gpa.alignedAlloc( + const new_map_buf = try arena.allocator().alignedAlloc( u8, - Local.garbage_align, - @sizeOf(Local.Garbage.Node) + @sizeOf(Map.Header) + - new_map_capacity * @sizeOf(Map.Entry), + Map.alignment, + Map.entries_offset + new_map_capacity * @sizeOf(Map.Entry), ); - const new_node: *Local.Garbage.Node = @ptrCast(new_map_buf.ptr); - new_node.* = .{ .data = .{ .buf_len = new_map_buf.len } }; - ip.local[@intFromEnum(tid)].garbage.prepend(new_node); - const new_map_entries = std.mem.bytesAsSlice( - Map.Entry, - new_map_buf[@sizeOf(Local.Garbage.Node) + @sizeOf(Map.Header) ..], - ); - const new_map: Map = .{ .entries = new_map_entries.ptr }; + const new_map: Map = .{ .entries = @ptrCast(new_map_buf[Map.entries_offset..].ptr) }; new_map.header().* = .{ .capacity = new_map_capacity }; - @memset(new_map_entries, .{ .value = .none, .hash = undefined }); + @memset(new_map.entries[0..new_map_capacity], .{ .value = .none, .hash = undefined }); const new_map_mask = new_map.header().mask(); map_index = 0; while (map_index < map_header.capacity) : (map_index += 1) { @@ -9181,23 +9161,17 @@ fn getOrPutStringValue( entry.release(value.toOptional()); return .none; } + var arena = ip.local[@intFromEnum(tid)].arena.promote(gpa); + defer ip.local[@intFromEnum(tid)].arena = arena.state; const new_map_capacity = map_header.capacity * 2; - const new_map_buf = try gpa.alignedAlloc( + const new_map_buf = try arena.allocator().alignedAlloc( u8, - Local.garbage_align, - @sizeOf(Local.Garbage.Node) + @sizeOf(Map.Header) + - new_map_capacity * @sizeOf(Map.Entry), - ); - const new_node: *Local.Garbage.Node = @ptrCast(new_map_buf.ptr); - new_node.* = .{ .data = .{ .buf_len = new_map_buf.len } }; - ip.local[@intFromEnum(tid)].garbage.prepend(new_node); - const new_map_entries = std.mem.bytesAsSlice( - Map.Entry, - new_map_buf[@sizeOf(Local.Garbage.Node) + @sizeOf(Map.Header) ..], + Map.alignment, + Map.entries_offset + new_map_capacity * @sizeOf(Map.Entry), ); - const new_map: Map = .{ .entries = new_map_entries.ptr }; + const new_map: Map = .{ .entries = @ptrCast(new_map_buf[Map.entries_offset..].ptr) }; new_map.header().* = .{ .capacity = new_map_capacity }; - @memset(new_map_entries, .{ .value = .none, .hash = undefined }); + @memset(new_map.entries[0..new_map_capacity], .{ .value = .none, .hash = undefined }); const new_map_mask = new_map.header().mask(); map_index = 0; while (map_index < map_header.capacity) : (map_index += 1) { From 8293ff94cf2798a2678b91019979472d34273bdb Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Wed, 3 Jul 2024 22:37:09 -0400 Subject: [PATCH 066/152] InternPool: implement and use thread-safe list for strings --- lib/std/multi_array_list.zig | 2 +- src/Compilation.zig | 6 +- src/InternPool.zig | 460 +++++++++++++++++++++++++---------- src/Value.zig | 17 +- src/Zcu.zig | 41 ++-- src/Zcu/PerThread.zig | 11 +- 6 files changed, 380 insertions(+), 157 deletions(-) diff --git a/lib/std/multi_array_list.zig b/lib/std/multi_array_list.zig index d7327f8bee96..cfe77f11b5b8 100644 --- a/lib/std/multi_array_list.zig +++ b/lib/std/multi_array_list.zig @@ -534,7 +534,7 @@ pub fn MultiArrayList(comptime T: type) type { self.sortInternal(a, b, ctx, .unstable); } - fn capacityInBytes(capacity: usize) usize { + pub fn capacityInBytes(capacity: usize) usize { comptime var elem_bytes: usize = 0; inline for (sizes.bytes) |size| elem_bytes += size; return elem_bytes * capacity; diff --git a/src/Compilation.zig b/src/Compilation.zig index 7e10febf0e64..14d109bab372 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -2748,7 +2748,7 @@ const Header = extern struct { items_len: u32, extra_len: u32, limbs_len: u32, - string_bytes_len: u32, + //string_bytes_len: u32, tracked_insts_len: u32, src_hash_deps_len: u32, decl_val_deps_len: u32, @@ -2777,7 +2777,7 @@ pub fn saveState(comp: *Compilation) !void { .items_len = @intCast(ip.items.len), .extra_len = @intCast(ip.extra.items.len), .limbs_len = @intCast(ip.limbs.items.len), - .string_bytes_len = @intCast(ip.string_bytes.items.len), + //.string_bytes_len = @intCast(ip.string_bytes.items.len), .tracked_insts_len = @intCast(ip.tracked_insts.count()), .src_hash_deps_len = @intCast(ip.src_hash_deps.count()), .decl_val_deps_len = @intCast(ip.decl_val_deps.count()), @@ -2794,7 +2794,7 @@ pub fn saveState(comp: *Compilation) !void { addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.extra.items)); addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.items.items(.data))); addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.items.items(.tag))); - addBuf(&bufs_list, &bufs_len, ip.string_bytes.items); + //addBuf(&bufs_list, &bufs_len, ip.string_bytes.items); addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.tracked_insts.keys())); addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.src_hash_deps.keys())); diff --git a/src/InternPool.zig b/src/InternPool.zig index f0141a9092b0..6b875c2288e8 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -2,9 +2,11 @@ //! This data structure is self-contained, with the following exceptions: //! * Module.Namespace has a pointer to Module.File -local: []Local = &.{}, -shard_shift: std.math.Log2Int(usize) = 0, +locals: []Local = &.{}, shards: []Shard = &.{}, +tid_width: std.math.Log2Int(u32) = 0, +tid_shift_31: std.math.Log2Int(u32) = 31, +tid_shift_32: std.math.Log2Int(u32) = 31, items: std.MultiArrayList(Item) = .{}, extra: std.ArrayListUnmanaged(u32) = .{}, @@ -13,12 +15,6 @@ extra: std.ArrayListUnmanaged(u32) = .{}, /// Use the helper methods instead of accessing this directly in order to not /// violate the above mechanism. limbs: std.ArrayListUnmanaged(u64) = .{}, -/// In order to store references to strings in fewer bytes, we copy all -/// string bytes into here. String bytes can be null. It is up to whomever -/// is referencing the data here whether they want to store both index and length, -/// thus allowing null bytes, or store only index, and use null-termination. The -/// `string_bytes` array is agnostic to either usage. -string_bytes: std.ArrayListUnmanaged(u8) = .{}, /// Rather than allocating Decl objects with an Allocator, we instead allocate /// them with this SegmentedList. This provides four advantages: @@ -345,52 +341,237 @@ pub const DepEntry = extern struct { }; const Local = struct { - aligned: void align(std.atomic.cache_line) = {}, + shared: Shared align(std.atomic.cache_line), + mutate: struct { + arena: std.heap.ArenaAllocator.State, + strings: Mutate, + } align(std.atomic.cache_line), - /// header: List.Header, - /// data: [capacity]u32, - /// tag: [header.capacity]Tag, - items: List, + const Shared = struct { + strings: Strings, + }; - /// header: List.Header, - /// extra: [header.capacity]u32, - extra: List, + const Strings = List(struct { u8 }); - /// header: List.Header, - /// bytes: [header.capacity]u8, - strings: List, + const Mutate = struct { + len: u32, - arena: std.heap.ArenaAllocator.State, + const empty: Mutate = .{ + .len = 0, + }; + }; - const List = struct { - entries: [*]u32, + fn List(comptime Elem: type) type { + assert(@typeInfo(Elem) == .Struct); + return struct { + bytes: [*]align(@alignOf(Elem)) u8, + + const ListSelf = @This(); + const Mutable = struct { + gpa: std.mem.Allocator, + arena: *std.heap.ArenaAllocator.State, + mutate: *Mutate, + list: *ListSelf, + + const fields = std.enums.values(std.meta.FieldEnum(Elem)); + + fn Slice(comptime opts: struct { is_const: bool = false }) type { + const elem_info = @typeInfo(Elem).Struct; + const elem_fields = elem_info.fields; + var new_fields: [elem_fields.len]std.builtin.Type.StructField = undefined; + for (&new_fields, elem_fields) |*new_field, elem_field| new_field.* = .{ + .name = elem_field.name, + .type = @Type(.{ .Pointer = .{ + .size = .Slice, + .is_const = opts.is_const, + .is_volatile = false, + .alignment = 0, + .address_space = .generic, + .child = elem_field.type, + .is_allowzero = false, + .sentinel = null, + } }), + .default_value = null, + .is_comptime = false, + .alignment = 0, + }; + return @Type(.{ .Struct = .{ + .layout = .auto, + .fields = &new_fields, + .decls = &.{}, + .is_tuple = elem_info.is_tuple, + } }); + } - const empty: List = .{ .entries = @constCast(&(extern struct { - header: Header, - entries: [0]u32, - }{ - .header = .{ .len = 0, .capacity = 0 }, - .entries = .{}, - }).entries) }; + pub fn appendAssumeCapacity(mutable: Mutable, elem: Elem) void { + var mutable_view = mutable.view(); + defer mutable.lenPtr().* = @intCast(mutable_view.len); + mutable_view.appendAssumeCapacity(elem); + } - fn acquire(list: *const List) List { - return .{ .entries = @atomicLoad([*]u32, &list.entries, .acquire) }; - } - fn release(list: *List, new_list: List) void { - @atomicStore([*]u32, &list.entries, new_list.entries, .release); - } + pub fn appendSliceAssumeCapacity( + mutable: Mutable, + slice: Slice(.{ .is_const = true }), + ) void { + if (fields.len == 0) return; + const mutable_len = mutable.lenPtr(); + const start = mutable_len.*; + const slice_len = @field(slice, @tagName(fields[0])).len; + assert(slice_len < mutable.capacityPtr().* - start); + mutable_len.* = @intCast(start + slice_len); + const mutable_view = mutable.view(); + inline for (fields) |field| { + const field_slice = @field(slice, @tagName(field)); + assert(field_slice.len == slice_len); + @memcpy(mutable_view.items(field)[start..][0..slice_len], field_slice); + } + } - const Header = extern struct { - len: u32, - capacity: u32, + pub fn appendNTimes(mutable: Mutable, elem: Elem, len: usize) Allocator.Error!void { + try mutable.ensureUnusedCapacity(len); + mutable.appendNTimesAssumeCapacity(elem, len); + } + + pub fn appendNTimesAssumeCapacity(mutable: Mutable, elem: Elem, len: usize) void { + const mutable_len = mutable.lenPtr(); + const start = mutable_len.*; + assert(len <= mutable.capacityPtr().* - start); + mutable_len.* = @intCast(start + len); + const mutable_view = mutable.view(); + inline for (fields) |field| { + @memset(mutable_view.items(field)[start..][0..len], @field(elem, @tagName(field))); + } + } + + pub fn addManyAsSlice(mutable: Mutable, len: usize) Allocator.Error!Slice(.{}) { + try mutable.ensureUnusedCapacity(len); + return mutable.addManyAsSliceAssumeCapacity(len); + } + + pub fn addManyAsSliceAssumeCapacity(mutable: Mutable, len: usize) Slice(.{}) { + const mutable_len = mutable.lenPtr(); + const start = mutable_len.*; + assert(len <= mutable.capacityPtr().* - start); + mutable_len.* = @intCast(start + len); + const mutable_view = mutable.view(); + var slice: Slice(.{}) = undefined; + inline for (fields) |field| { + @field(slice, @tagName(field)) = mutable_view.items(field)[start..][0..len]; + } + return slice; + } + + pub fn shrinkRetainingCapacity(mutable: Mutable, len: usize) void { + const mutable_len = mutable.lenPtr(); + assert(len <= mutable_len.*); + mutable_len.* = @intCast(len); + } + + pub fn ensureUnusedCapacity(mutable: Mutable, unused_capacity: usize) Allocator.Error!void { + try mutable.ensureTotalCapacity(@intCast(mutable.lenPtr().* + unused_capacity)); + } + + pub fn ensureTotalCapacity(mutable: Mutable, total_capacity: usize) Allocator.Error!void { + const old_capacity = mutable.capacityPtr().*; + if (old_capacity >= total_capacity) return; + var new_capacity = old_capacity; + while (new_capacity < total_capacity) new_capacity = (new_capacity + 10) * 2; + try mutable.setCapacity(new_capacity); + } + + fn setCapacity(mutable: Mutable, capacity: u32) Allocator.Error!void { + var arena = mutable.arena.promote(mutable.gpa); + defer mutable.arena.* = arena.state; + const buf = try arena.allocator().alignedAlloc( + u8, + alignment, + bytes_offset + View.capacityInBytes(capacity), + ); + var new_list: ListSelf = .{ .bytes = @ptrCast(buf[bytes_offset..].ptr) }; + new_list.header().* = .{ .capacity = capacity }; + const len = mutable.lenPtr().*; + const old_slice = mutable.list.view().slice(); + const new_slice = new_list.view().slice(); + inline for (fields) |field| { + @memcpy(new_slice.items(field)[0..len], old_slice.items(field)[0..len]); + } + mutable.list.release(new_list); + } + + fn view(mutable: Mutable) View { + return .{ + .bytes = mutable.list.bytes, + .len = mutable.lenPtr().*, + .capacity = mutable.capacityPtr().*, + }; + } + + pub fn lenPtr(mutable: Mutable) *u32 { + return &mutable.mutate.len; + } + + pub fn capacityPtr(mutable: Mutable) *u32 { + return &mutable.list.header().capacity; + } + }; + + const empty: ListSelf = .{ .bytes = @constCast(&(extern struct { + header: Header, + bytes: [0]u8, + }{ + .header = .{ .capacity = 0 }, + .bytes = .{}, + }).bytes) }; - const fields_len = @typeInfo(Header).Struct.fields.len; + const alignment = @max(@alignOf(Header), @alignOf(Elem)); + const bytes_offset = std.mem.alignForward(usize, @sizeOf(Header), @alignOf(Elem)); + const View = std.MultiArrayList(Elem); + + fn acquire(list: *const ListSelf) ListSelf { + return .{ .bytes = @atomicLoad([*]align(@alignOf(Elem)) u8, &list.bytes, .acquire) }; + } + fn release(list: *ListSelf, new_list: ListSelf) void { + @atomicStore([*]align(@alignOf(Elem)) u8, &list.bytes, new_list.bytes, .release); + } + + const Header = extern struct { + capacity: u32, + }; + fn header(list: ListSelf) *Header { + return @ptrFromInt(@intFromPtr(list.bytes) - bytes_offset); + } + + fn view(list: ListSelf) View { + const capacity = list.header().capacity; + return .{ + .bytes = list.bytes, + .len = capacity, + .capacity = capacity, + }; + } }; - fn header(list: List) *Header { - return @ptrCast(list.entries - Header.fields_len); - } - }; + } + + /// In order to store references to strings in fewer bytes, we copy all + /// string bytes into here. String bytes can be null. It is up to whomever + /// is referencing the data here whether they want to store both index and length, + /// thus allowing null bytes, or store only index, and use null-termination. The + /// `strings` array is agnostic to either usage. + pub fn getMutableStrings(local: *Local, gpa: std.mem.Allocator) Strings.Mutable { + return .{ + .gpa = gpa, + .arena = &local.mutate.arena, + .mutate = &local.mutate.strings, + .list = &local.shared.strings, + }; + } }; +pub fn getLocal(ip: *InternPool, tid: Zcu.PerThread.Id) *Local { + return &ip.locals[@intFromEnum(tid)]; +} +pub fn getLocalShared(ip: *const InternPool, tid: Zcu.PerThread.Id) *const Local.Shared { + return &ip.locals[@intFromEnum(tid)].shared; +} const Shard = struct { shared: struct { @@ -448,7 +629,7 @@ const Shard = struct { } }; fn header(map: @This()) *Header { - return &(@as([*]Header, @ptrCast(map.entries)) - 1)[0]; + return @ptrFromInt(@intFromPtr(map.entries) - entries_offset); } const Entry = extern struct { @@ -465,6 +646,17 @@ const Shard = struct { }; } }; +fn getShard(ip: *InternPool, tid: Zcu.PerThread.Id) *Shard { + return &ip.shards[@intFromEnum(tid)]; +} + +fn getTidMask(ip: *const InternPool) u32 { + assert(std.math.isPowerOfTwo(ip.shards.len)); + return @intCast(ip.shards.len - 1); +} +fn getIndexMask(ip: *const InternPool, comptime BackingInt: type) u32 { + return @as(u32, std.math.maxInt(BackingInt)) >> ip.tid_width; +} const FieldMap = std.ArrayHashMapUnmanaged(void, void, std.array_hash_map.AutoContext(void), false); @@ -560,18 +752,18 @@ pub const OptionalNamespaceIndex = enum(u32) { } }; -/// An index into `string_bytes`. +/// An index into `strings`. pub const String = enum(u32) { /// An empty string. empty = 0, _, pub fn toSlice(string: String, len: u64, ip: *const InternPool) []const u8 { - return ip.string_bytes.items[@intFromEnum(string)..][0..@intCast(len)]; + return string.toOverlongSlice(ip)[0..@intCast(len)]; } pub fn at(string: String, index: u64, ip: *const InternPool) u8 { - return ip.string_bytes.items[@intCast(@intFromEnum(string) + index)]; + return string.toOverlongSlice(ip)[@intCast(index)]; } pub fn toNullTerminatedString(string: String, len: u64, ip: *const InternPool) NullTerminatedString { @@ -579,9 +771,32 @@ pub const String = enum(u32) { assert(string.at(len, ip) == 0); return @enumFromInt(@intFromEnum(string)); } + + const Unwrapped = struct { + tid: Zcu.PerThread.Id, + index: u32, + + fn wrap(unwrapped: Unwrapped, ip: *const InternPool) String { + assert(@intFromEnum(unwrapped.tid) <= ip.getTidMask()); + assert(unwrapped.index <= ip.getIndexMask(u32)); + return @enumFromInt(@intFromEnum(unwrapped.tid) << ip.tid_shift_32 | unwrapped.index); + } + }; + fn unwrap(string: String, ip: *const InternPool) Unwrapped { + return .{ + .tid = @enumFromInt(@intFromEnum(string) >> ip.tid_shift_32 & ip.getTidMask()), + .index = @intFromEnum(string) & ip.getIndexMask(u32), + }; + } + + fn toOverlongSlice(string: String, ip: *const InternPool) []const u8 { + const unwrapped = string.unwrap(ip); + const strings = ip.getLocalShared(unwrapped.tid).strings.acquire(); + return strings.view().items(.@"0")[unwrapped.index..]; + } }; -/// An index into `string_bytes` which might be `none`. +/// An index into `strings` which might be `none`. pub const OptionalString = enum(u32) { /// This is distinct from `none` - it is a valid index that represents empty string. empty = 0, @@ -597,7 +812,7 @@ pub const OptionalString = enum(u32) { } }; -/// An index into `string_bytes`. +/// An index into `strings`. pub const NullTerminatedString = enum(u32) { /// An empty string. empty = 0, @@ -623,12 +838,8 @@ pub const NullTerminatedString = enum(u32) { return @enumFromInt(@intFromEnum(self)); } - fn toOverlongSlice(string: NullTerminatedString, ip: *const InternPool) []const u8 { - return ip.string_bytes.items[@intFromEnum(string)..]; - } - pub fn toSlice(string: NullTerminatedString, ip: *const InternPool) [:0]const u8 { - const overlong_slice = string.toOverlongSlice(ip); + const overlong_slice = string.toString().toOverlongSlice(ip); return overlong_slice[0..std.mem.indexOfScalar(u8, overlong_slice, 0).? :0]; } @@ -637,7 +848,7 @@ pub const NullTerminatedString = enum(u32) { } pub fn eqlSlice(string: NullTerminatedString, slice: []const u8, ip: *const InternPool) bool { - const overlong_slice = string.toOverlongSlice(ip); + const overlong_slice = string.toString().toOverlongSlice(ip); return overlong_slice.len > slice.len and std.mem.eql(u8, overlong_slice[0..slice.len], slice) and overlong_slice[slice.len] == 0; @@ -688,12 +899,12 @@ pub const NullTerminatedString = enum(u32) { } else @compileError("invalid format string '" ++ specifier ++ "' for '" ++ @typeName(NullTerminatedString) ++ "'"); } - pub fn fmt(self: NullTerminatedString, ip: *const InternPool) std.fmt.Formatter(format) { - return .{ .data = .{ .string = self, .ip = ip } }; + pub fn fmt(string: NullTerminatedString, ip: *const InternPool) std.fmt.Formatter(format) { + return .{ .data = .{ .string = string, .ip = ip } }; } }; -/// An index into `string_bytes` which might be `none`. +/// An index into `strings` which might be `none`. pub const OptionalNullTerminatedString = enum(u32) { /// This is distinct from `none` - it is a valid index that represents empty string. empty = 0, @@ -4077,7 +4288,7 @@ pub const FuncAnalysis = packed struct(u32) { pub const Bytes = struct { /// The type of the aggregate ty: Index, - /// Index into string_bytes, of len ip.aggregateTypeLen(ty) + /// Index into strings, of len ip.aggregateTypeLen(ty) bytes: String, }; @@ -4647,16 +4858,21 @@ pub fn init(ip: *InternPool, gpa: Allocator, total_threads: usize) !void { errdefer ip.deinit(gpa); assert(ip.items.len == 0); - ip.local = try gpa.alloc(Local, total_threads); - @memset(ip.local, .{ - .items = Local.List.empty, - .extra = Local.List.empty, - .strings = Local.List.empty, - .arena = .{}, + ip.locals = try gpa.alloc(Local, total_threads); + @memset(ip.locals, .{ + .shared = .{ + .strings = Local.Strings.empty, + }, + .mutate = .{ + .arena = .{}, + .strings = Local.Mutate.empty, + }, }); - ip.shard_shift = @intCast(std.math.log2_int_ceil(usize, total_threads)); - ip.shards = try gpa.alloc(Shard, @as(usize, 1) << ip.shard_shift); + ip.tid_width = @intCast(std.math.log2_int_ceil(usize, total_threads)); + ip.tid_shift_31 = 31 - ip.tid_width; + ip.tid_shift_32 = ip.tid_shift_31 +| 1; + ip.shards = try gpa.alloc(Shard, @as(usize, 1) << ip.tid_width); @memset(ip.shards, .{ .shared = .{ .map = Shard.Map(Index).empty, @@ -4705,7 +4921,6 @@ pub fn deinit(ip: *InternPool, gpa: Allocator) void { ip.items.deinit(gpa); ip.extra.deinit(gpa); ip.limbs.deinit(gpa); - ip.string_bytes.deinit(gpa); ip.decls_free_list.deinit(gpa); ip.allocated_decls.deinit(gpa); @@ -4732,8 +4947,8 @@ pub fn deinit(ip: *InternPool, gpa: Allocator) void { ip.files.deinit(gpa); gpa.free(ip.shards); - for (ip.local) |*local| local.arena.promote(gpa).deinit(); - gpa.free(ip.local); + for (ip.locals) |*local| local.mutate.arena.promote(gpa).deinit(); + gpa.free(ip.locals); ip.* = undefined; } @@ -5437,8 +5652,9 @@ fn getOrPutKey( } const map_header = map.header().*; if (shard.mutate.map.len >= map_header.capacity * 3 / 5) { - var arena = ip.local[@intFromEnum(tid)].arena.promote(gpa); - defer ip.local[@intFromEnum(tid)].arena = arena.state; + const arena_state = &ip.getLocal(tid).mutate.arena; + var arena = arena_state.promote(gpa); + defer arena_state.* = arena.state; const new_map_capacity = map_header.capacity * 2; const new_map_buf = try arena.allocator().alignedAlloc( u8, @@ -6194,33 +6410,32 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All } if (child == .u8_type) bytes: { - const string_bytes_index = ip.string_bytes.items.len; - try ip.string_bytes.ensureUnusedCapacity(gpa, @intCast(len_including_sentinel + 1)); + const strings = ip.getLocal(tid).getMutableStrings(gpa); + const start = strings.lenPtr().*; + try strings.ensureUnusedCapacity(@intCast(len_including_sentinel + 1)); try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Bytes).Struct.fields.len); switch (aggregate.storage) { - .bytes => |bytes| ip.string_bytes.appendSliceAssumeCapacity(bytes.toSlice(len, ip)), + .bytes => |bytes| strings.appendSliceAssumeCapacity(.{bytes.toSlice(len, ip)}), .elems => |elems| for (elems[0..@intCast(len)]) |elem| switch (ip.indexToKey(elem)) { .undef => { - ip.string_bytes.shrinkRetainingCapacity(string_bytes_index); + strings.shrinkRetainingCapacity(start); break :bytes; }, - .int => |int| ip.string_bytes.appendAssumeCapacity( - @intCast(int.storage.u64), - ), + .int => |int| strings.appendAssumeCapacity(.{@intCast(int.storage.u64)}), else => unreachable, }, .repeated_elem => |elem| switch (ip.indexToKey(elem)) { .undef => break :bytes, .int => |int| @memset( - ip.string_bytes.addManyAsSliceAssumeCapacity(@intCast(len)), + strings.addManyAsSliceAssumeCapacity(@intCast(len))[0], @intCast(int.storage.u64), ), else => unreachable, }, } - if (sentinel != .none) ip.string_bytes.appendAssumeCapacity( + if (sentinel != .none) strings.appendAssumeCapacity(.{ @intCast(ip.indexToKey(sentinel).int.storage.u64), - ); + }); const string = try ip.getOrPutTrailingString( gpa, tid, @@ -9050,10 +9265,11 @@ pub fn getOrPutString( slice: []const u8, comptime embedded_nulls: EmbeddedNulls, ) Allocator.Error!embedded_nulls.StringType() { - try ip.string_bytes.ensureUnusedCapacity(gpa, slice.len + 1); - ip.string_bytes.appendSliceAssumeCapacity(slice); - ip.string_bytes.appendAssumeCapacity(0); - return ip.getOrPutTrailingString(gpa, tid, slice.len + 1, embedded_nulls); + const strings = ip.getLocal(tid).getMutableStrings(gpa); + try strings.ensureUnusedCapacity(slice.len + 1); + strings.appendSliceAssumeCapacity(.{slice}); + strings.appendAssumeCapacity(.{0}); + return ip.getOrPutTrailingString(gpa, tid, @intCast(slice.len + 1), embedded_nulls); } pub fn getOrPutStringFmt( @@ -9064,11 +9280,12 @@ pub fn getOrPutStringFmt( args: anytype, comptime embedded_nulls: EmbeddedNulls, ) Allocator.Error!embedded_nulls.StringType() { - // ensure that references to string_bytes in args do not get invalidated - const len: usize = @intCast(std.fmt.count(format, args) + 1); - try ip.string_bytes.ensureUnusedCapacity(gpa, len); - ip.string_bytes.writer(undefined).print(format, args) catch unreachable; - ip.string_bytes.appendAssumeCapacity(0); + // ensure that references to strings in args do not get invalidated + const format_z = format ++ .{0}; + const len: u32 = @intCast(std.fmt.count(format_z, args)); + const strings = ip.getLocal(tid).getMutableStrings(gpa); + const slice = try strings.addManyAsSlice(len); + assert((std.fmt.bufPrint(slice[0], format_z, args) catch unreachable).len == len); return ip.getOrPutTrailingString(gpa, tid, len, embedded_nulls); } @@ -9083,47 +9300,33 @@ pub fn getOrPutStringOpt( return string.toOptional(); } -/// Uses the last len bytes of ip.string_bytes as the key. +/// Uses the last len bytes of strings as the key. pub fn getOrPutTrailingString( ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, - len: usize, + len: u32, comptime embedded_nulls: EmbeddedNulls, ) Allocator.Error!embedded_nulls.StringType() { - const string_bytes = &ip.string_bytes; - const str_index: u32 = @intCast(string_bytes.items.len - len); - if (len > 0 and string_bytes.getLast() == 0) { - _ = string_bytes.pop(); + const strings = ip.getLocal(tid).getMutableStrings(gpa); + const start: u32 = @intCast(strings.lenPtr().* - len); + if (len > 0 and strings.view().items(.@"0")[strings.lenPtr().* - 1] == 0) { + strings.lenPtr().* -= 1; } else { - try string_bytes.ensureUnusedCapacity(gpa, 1); + try strings.ensureUnusedCapacity(1); } - const key: []const u8 = string_bytes.items[str_index..]; + const key: []const u8 = strings.view().items(.@"0")[start..]; + const value: embedded_nulls.StringType() = + @enumFromInt(@intFromEnum(tid) << ip.tid_shift_32 | start); const has_embedded_null = std.mem.indexOfScalar(u8, key, 0) != null; switch (embedded_nulls) { .no_embedded_nulls => assert(!has_embedded_null), .maybe_embedded_nulls => if (has_embedded_null) { - string_bytes.appendAssumeCapacity(0); - return @enumFromInt(str_index); + strings.appendAssumeCapacity(.{0}); + return value; }, } - const maybe_existing_index = try ip.getOrPutStringValue(gpa, tid, key, @enumFromInt(str_index)); - if (maybe_existing_index.unwrap()) |existing_index| { - string_bytes.shrinkRetainingCapacity(str_index); - return @enumFromInt(@intFromEnum(existing_index)); - } else { - string_bytes.appendAssumeCapacity(0); - return @enumFromInt(str_index); - } -} -fn getOrPutStringValue( - ip: *InternPool, - gpa: Allocator, - tid: Zcu.PerThread.Id, - key: []const u8, - value: NullTerminatedString, -) Allocator.Error!OptionalNullTerminatedString { const full_hash = Hash.hash(0, key); const hash: u32 = @truncate(full_hash >> 32); const shard = &ip.shards[@intCast(full_hash & (ip.shards.len - 1))]; @@ -9136,7 +9339,9 @@ fn getOrPutStringValue( const entry = &map.entries[map_index]; const index = entry.acquire().unwrap() orelse break; if (entry.hash != hash) continue; - if (index.eqlSlice(key, ip)) return index.toOptional(); + if (!index.eqlSlice(key, ip)) continue; + strings.shrinkRetainingCapacity(start); + return @enumFromInt(@intFromEnum(index)); } shard.mutate.string_map.mutex.lock(); defer shard.mutate.string_map.mutex.unlock(); @@ -9151,18 +9356,22 @@ fn getOrPutStringValue( const entry = &map.entries[map_index]; const index = entry.acquire().unwrap() orelse break; if (entry.hash != hash) continue; - if (index.eqlSlice(key, ip)) return index.toOptional(); + if (!index.eqlSlice(key, ip)) continue; + strings.shrinkRetainingCapacity(start); + return @enumFromInt(@intFromEnum(index)); } defer shard.mutate.string_map.len += 1; const map_header = map.header().*; if (shard.mutate.string_map.len < map_header.capacity * 3 / 5) { const entry = &map.entries[map_index]; entry.hash = hash; - entry.release(value.toOptional()); - return .none; + entry.release(@enumFromInt(@intFromEnum(value))); + strings.appendAssumeCapacity(.{0}); + return value; } - var arena = ip.local[@intFromEnum(tid)].arena.promote(gpa); - defer ip.local[@intFromEnum(tid)].arena = arena.state; + const arena_state = &ip.getLocal(tid).mutate.arena; + var arena = arena_state.promote(gpa); + defer arena_state.* = arena.state; const new_map_capacity = map_header.capacity * 2; const new_map_buf = try arena.allocator().alignedAlloc( u8, @@ -9197,11 +9406,12 @@ fn getOrPutStringValue( if (map.entries[map_index].value == .none) break; } map.entries[map_index] = .{ - .value = value.toOptional(), + .value = @enumFromInt(@intFromEnum(value)), .hash = hash, }; shard.shared.string_map.release(new_map); - return .none; + strings.appendAssumeCapacity(.{0}); + return value; } pub fn getString(ip: *InternPool, key: []const u8) OptionalNullTerminatedString { diff --git a/src/Value.zig b/src/Value.zig index e47598fe0a36..c3e4b05fcb34 100644 --- a/src/Value.zig +++ b/src/Value.zig @@ -65,8 +65,9 @@ pub fn toIpString(val: Value, ty: Type, pt: Zcu.PerThread) !InternPool.NullTermi .elems => return arrayToIpString(val, ty.arrayLen(mod), pt), .repeated_elem => |elem| { const byte: u8 = @intCast(Value.fromInterned(elem).toUnsignedInt(pt)); - const len: usize = @intCast(ty.arrayLen(mod)); - try ip.string_bytes.appendNTimes(mod.gpa, byte, len); + const len: u32 = @intCast(ty.arrayLen(mod)); + const strings = ip.getLocal(pt.tid).getMutableStrings(mod.gpa); + try strings.appendNTimes(.{byte}, len); return ip.getOrPutTrailingString(mod.gpa, pt.tid, len, .no_embedded_nulls); }, } @@ -107,16 +108,18 @@ fn arrayToIpString(val: Value, len_u64: u64, pt: Zcu.PerThread) !InternPool.Null const mod = pt.zcu; const gpa = mod.gpa; const ip = &mod.intern_pool; - const len: usize = @intCast(len_u64); - try ip.string_bytes.ensureUnusedCapacity(gpa, len); + const len: u32 = @intCast(len_u64); + const strings = ip.getLocal(pt.tid).getMutableStrings(gpa); + const strings_len = strings.lenPtr(); + try strings.ensureUnusedCapacity(len); for (0..len) |i| { // I don't think elemValue has the possibility to affect ip.string_bytes. Let's // assert just to be sure. - const prev = ip.string_bytes.items.len; + const prev_len = strings_len.*; const elem_val = try val.elemValue(pt, i); - assert(ip.string_bytes.items.len == prev); + assert(strings_len.* == prev_len); const byte: u8 = @intCast(elem_val.toUnsignedInt(pt)); - ip.string_bytes.appendAssumeCapacity(byte); + strings.appendAssumeCapacity(.{byte}); } return ip.getOrPutTrailingString(gpa, pt.tid, len, .no_embedded_nulls); } diff --git a/src/Zcu.zig b/src/Zcu.zig index 32c9045910ff..04ba7cc3284b 100644 --- a/src/Zcu.zig +++ b/src/Zcu.zig @@ -693,38 +693,39 @@ pub const Namespace = struct { ) !InternPool.NullTerminatedString { const zcu = pt.zcu; const ip = &zcu.intern_pool; - const count = count: { + + const gpa = zcu.gpa; + const strings = ip.getLocal(pt.tid).getMutableStrings(gpa); + // Protects reads of interned strings from being reallocated during the call to + // renderFullyQualifiedName. + const slice = try strings.addManyAsSlice(count: { var count: usize = name.length(ip) + 1; var cur_ns = &ns; while (true) { const decl = zcu.declPtr(cur_ns.decl_index); - count += decl.name.length(ip) + 1; cur_ns = zcu.namespacePtr(cur_ns.parent.unwrap() orelse { - count += ns.fileScope(zcu).sub_file_path.len; + count += ns.fileScope(zcu).fullyQualifiedNameLen(); break :count count; }); + count += decl.name.length(ip) + 1; } - }; - - const gpa = zcu.gpa; - const start = ip.string_bytes.items.len; - // Protects reads of interned strings from being reallocated during the call to - // renderFullyQualifiedName. - try ip.string_bytes.ensureUnusedCapacity(gpa, count); - ns.renderFullyQualifiedName(zcu, name, ip.string_bytes.writer(gpa)) catch unreachable; + }); + var fbs = std.io.fixedBufferStream(slice[0]); + ns.renderFullyQualifiedName(zcu, name, fbs.writer()) catch unreachable; + assert(fbs.pos == slice[0].len); // Sanitize the name for nvptx which is more restrictive. // TODO This should be handled by the backend, not the frontend. Have a // look at how the C backend does it for inspiration. const cpu_arch = zcu.root_mod.resolved_target.result.cpu.arch; if (cpu_arch.isNvptx()) { - for (ip.string_bytes.items[start..]) |*byte| switch (byte.*) { + for (slice[0]) |*byte| switch (byte.*) { '{', '}', '*', '[', ']', '(', ')', ',', ' ', '\'' => byte.* = '_', else => {}, }; } - return ip.getOrPutTrailingString(gpa, pt.tid, ip.string_bytes.items.len - start, .no_embedded_nulls); + return ip.getOrPutTrailingString(gpa, pt.tid, @intCast(slice[0].len), .no_embedded_nulls); } pub fn getType(ns: Namespace, zcu: *Zcu) Type { @@ -859,6 +860,11 @@ pub const File = struct { return &file.tree; } + pub fn fullyQualifiedNameLen(file: File) usize { + const ext = std.fs.path.extension(file.sub_file_path); + return file.sub_file_path.len - ext.len; + } + pub fn renderFullyQualifiedName(file: File, writer: anytype) !void { // Convert all the slashes into dots and truncate the extension. const ext = std.fs.path.extension(file.sub_file_path); @@ -879,9 +885,12 @@ pub const File = struct { pub fn fullyQualifiedName(file: File, pt: Zcu.PerThread) !InternPool.NullTerminatedString { const gpa = pt.zcu.gpa; const ip = &pt.zcu.intern_pool; - const start = ip.string_bytes.items.len; - try file.renderFullyQualifiedName(ip.string_bytes.writer(gpa)); - return ip.getOrPutTrailingString(gpa, pt.tid, ip.string_bytes.items.len - start, .no_embedded_nulls); + const strings = ip.getLocal(pt.tid).getMutableStrings(gpa); + const slice = try strings.addManyAsSlice(file.fullyQualifiedNameLen()); + var fbs = std.io.fixedBufferStream(slice[0]); + file.renderFullyQualifiedName(fbs.writer()) catch unreachable; + assert(fbs.pos == slice[0].len); + return ip.getOrPutTrailingString(gpa, pt.tid, @intCast(slice[0].len), .no_embedded_nulls); } pub fn fullPath(file: File, ally: Allocator) ![]u8 { diff --git a/src/Zcu/PerThread.zig b/src/Zcu/PerThread.zig index 8cf69223450b..2d2be29909f0 100644 --- a/src/Zcu/PerThread.zig +++ b/src/Zcu/PerThread.zig @@ -1377,10 +1377,11 @@ fn newEmbedFile( }; const size = std.math.cast(usize, actual_stat.size) orelse return error.Overflow; - const bytes = try ip.string_bytes.addManyAsSlice(gpa, try std.math.add(usize, size, 1)); - const actual_read = try file.readAll(bytes[0..size]); + const strings = ip.getLocal(pt.tid).getMutableStrings(gpa); + const bytes = try strings.addManyAsSlice(try std.math.add(usize, size, 1)); + const actual_read = try file.readAll(bytes[0][0..size]); if (actual_read != size) return error.UnexpectedEndOfFile; - bytes[size] = 0; + bytes[0][size] = 0; const comp = mod.comp; switch (comp.cache_use) { @@ -1389,7 +1390,7 @@ fn newEmbedFile( errdefer gpa.free(copied_resolved_path); whole.cache_manifest_mutex.lock(); defer whole.cache_manifest_mutex.unlock(); - try man.addFilePostContents(copied_resolved_path, bytes[0..size], stat); + try man.addFilePostContents(copied_resolved_path, bytes[0][0..size], stat); }, .incremental => {}, } @@ -1401,7 +1402,7 @@ fn newEmbedFile( } }); const array_val = try pt.intern(.{ .aggregate = .{ .ty = array_ty, - .storage = .{ .bytes = try ip.getOrPutTrailingString(gpa, pt.tid, bytes.len, .maybe_embedded_nulls) }, + .storage = .{ .bytes = try ip.getOrPutTrailingString(gpa, pt.tid, @intCast(bytes[0].len), .maybe_embedded_nulls) }, } }); const ptr_ty = (try pt.ptrType(.{ From 92ddb959a7c8877c98363b27c71cd5ae4b9603f4 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Thu, 4 Jul 2024 03:33:23 -0400 Subject: [PATCH 067/152] InternPool: implement and use thread-safe list for items --- src/Compilation.zig | 8 +- src/InternPool.zig | 1478 ++++++++++++++++++++++------------------- src/Sema.zig | 25 +- src/Type.zig | 2 +- src/Zcu.zig | 36 - src/Zcu/PerThread.zig | 47 +- 6 files changed, 859 insertions(+), 737 deletions(-) diff --git a/src/Compilation.zig b/src/Compilation.zig index 14d109bab372..a54205dddfe3 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -2745,7 +2745,7 @@ pub fn makeBinFileWritable(comp: *Compilation) !void { const Header = extern struct { intern_pool: extern struct { - items_len: u32, + //items_len: u32, extra_len: u32, limbs_len: u32, //string_bytes_len: u32, @@ -2774,7 +2774,7 @@ pub fn saveState(comp: *Compilation) !void { const ip = &zcu.intern_pool; const header: Header = .{ .intern_pool = .{ - .items_len = @intCast(ip.items.len), + //.items_len = @intCast(ip.items.len), .extra_len = @intCast(ip.extra.items.len), .limbs_len = @intCast(ip.limbs.items.len), //.string_bytes_len = @intCast(ip.string_bytes.items.len), @@ -2792,8 +2792,8 @@ pub fn saveState(comp: *Compilation) !void { addBuf(&bufs_list, &bufs_len, mem.asBytes(&header)); addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.limbs.items)); addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.extra.items)); - addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.items.items(.data))); - addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.items.items(.tag))); + //addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.items.items(.data))); + //addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.items.items(.tag))); //addBuf(&bufs_list, &bufs_len, ip.string_bytes.items); addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.tracked_insts.keys())); diff --git a/src/InternPool.zig b/src/InternPool.zig index 6b875c2288e8..117b2ceef8c7 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -8,7 +8,7 @@ tid_width: std.math.Log2Int(u32) = 0, tid_shift_31: std.math.Log2Int(u32) = 31, tid_shift_32: std.math.Log2Int(u32) = 31, -items: std.MultiArrayList(Item) = .{}, +//items: std.MultiArrayList(Item) = .{}, extra: std.ArrayListUnmanaged(u32) = .{}, /// On 32-bit systems, this array is ignored and extra is used for everything. /// On 64-bit systems, this array is used for big integers and associated metadata. @@ -344,10 +344,12 @@ const Local = struct { shared: Shared align(std.atomic.cache_line), mutate: struct { arena: std.heap.ArenaAllocator.State, + items: Mutate, strings: Mutate, } align(std.atomic.cache_line), const Shared = struct { + items: List(Item), strings: Strings, }; @@ -403,6 +405,11 @@ const Local = struct { } }); } + pub fn append(mutable: Mutable, elem: Elem) Allocator.Error!void { + try mutable.ensureUnusedCapacity(1); + mutable.appendAssumeCapacity(elem); + } + pub fn appendAssumeCapacity(mutable: Mutable, elem: Elem) void { var mutable_view = mutable.view(); defer mutable.lenPtr().* = @intCast(mutable_view.len); @@ -417,7 +424,7 @@ const Local = struct { const mutable_len = mutable.lenPtr(); const start = mutable_len.*; const slice_len = @field(slice, @tagName(fields[0])).len; - assert(slice_len < mutable.capacityPtr().* - start); + assert(slice_len <= mutable.capacityPtr().* - start); mutable_len.* = @intCast(start + slice_len); const mutable_view = mutable.view(); inline for (fields) |field| { @@ -552,6 +559,15 @@ const Local = struct { }; } + pub fn getMutableItems(local: *Local, gpa: std.mem.Allocator) List(Item).Mutable { + return .{ + .gpa = gpa, + .arena = &local.mutate.arena, + .mutate = &local.mutate.items, + .list = &local.shared.items, + }; + } + /// In order to store references to strings in fewer bytes, we copy all /// string bytes into here. String bytes can be null. It is up to whomever /// is referencing the data here whether they want to store both index and length, @@ -566,9 +582,11 @@ const Local = struct { }; } }; + pub fn getLocal(ip: *InternPool, tid: Zcu.PerThread.Id) *Local { return &ip.locals[@intFromEnum(tid)]; } + pub fn getLocalShared(ip: *const InternPool, tid: Zcu.PerThread.Id) *const Local.Shared { return &ip.locals[@intFromEnum(tid)].shared; } @@ -646,6 +664,7 @@ const Shard = struct { }; } }; + fn getShard(ip: *InternPool, tid: Zcu.PerThread.Id) *Shard { return &ip.shards[@intFromEnum(tid)]; } @@ -654,6 +673,7 @@ fn getTidMask(ip: *const InternPool) u32 { assert(std.math.isPowerOfTwo(ip.shards.len)); return @intCast(ip.shards.len - 1); } + fn getIndexMask(ip: *const InternPool, comptime BackingInt: type) u32 { return @as(u32, std.math.maxInt(BackingInt)) >> ip.tid_width; } @@ -791,8 +811,7 @@ pub const String = enum(u32) { fn toOverlongSlice(string: String, ip: *const InternPool) []const u8 { const unwrapped = string.unwrap(ip); - const strings = ip.getLocalShared(unwrapped.tid).strings.acquire(); - return strings.view().items(.@"0")[unwrapped.index..]; + return ip.getLocalShared(unwrapped.tid).strings.acquire().view().items(.@"0")[unwrapped.index..]; } }; @@ -2309,7 +2328,7 @@ pub const LoadedUnionType = struct { }; pub fn loadUnionType(ip: *const InternPool, index: Index) LoadedUnionType { - const data = ip.items.items(.data)[@intFromEnum(index)]; + const data = index.getData(ip); const type_union = ip.extraDataTrail(Tag.TypeUnion, data); const fields_len = type_union.data.fields_len; @@ -2731,7 +2750,7 @@ pub const LoadedStructType = struct { }; pub fn loadStructType(ip: *const InternPool, index: Index) LoadedStructType { - const item = ip.items.get(@intFromEnum(index)); + const item = index.getItem(ip); switch (item.tag) { .type_struct => { if (item.data == 0) return .{ @@ -2955,7 +2974,7 @@ const LoadedEnumType = struct { }; pub fn loadEnumType(ip: *const InternPool, index: Index) LoadedEnumType { - const item = ip.items.get(@intFromEnum(index)); + const item = index.getItem(ip); const tag_mode: LoadedEnumType.TagMode = switch (item.tag) { .type_enum_auto => { const extra = ip.extraDataTrail(EnumAuto, item.data); @@ -3034,9 +3053,9 @@ pub const LoadedOpaqueType = struct { }; pub fn loadOpaqueType(ip: *const InternPool, index: Index) LoadedOpaqueType { - assert(ip.items.items(.tag)[@intFromEnum(index)] == .type_opaque); - const extra_index = ip.items.items(.data)[@intFromEnum(index)]; - const extra = ip.extraDataTrail(Tag.TypeOpaque, extra_index); + const item = index.getItem(ip); + assert(item.tag == .type_opaque); + const extra = ip.extraDataTrail(Tag.TypeOpaque, item.data); const captures_len = if (extra.data.captures_len == std.math.maxInt(u32)) 0 else @@ -3211,6 +3230,38 @@ pub const Index = enum(u32) { } }; + pub fn getItem(index: Index, ip: *const InternPool) Item { + const unwrapped = index.unwrap(ip); + return ip.getLocalShared(unwrapped.tid).items.acquire().view().get(unwrapped.index); + } + + pub fn getTag(index: Index, ip: *const InternPool) Tag { + const unwrapped = index.unwrap(ip); + return ip.getLocalShared(unwrapped.tid).items.acquire().view().items(.tag)[unwrapped.index]; + } + + pub fn getData(index: Index, ip: *const InternPool) u32 { + const unwrapped = index.unwrap(ip); + return ip.getLocalShared(unwrapped.tid).items.acquire().view().items(.data)[unwrapped.index]; + } + + const Unwrapped = struct { + tid: Zcu.PerThread.Id, + index: u32, + + fn wrap(unwrapped: Unwrapped, ip: *const InternPool) Index { + assert(@intFromEnum(unwrapped.tid) <= ip.getTidMask()); + assert(unwrapped.index <= ip.getIndexMask(u31)); + return @enumFromInt(@intFromEnum(unwrapped.tid) << ip.tid_shift_31 | unwrapped.index); + } + }; + fn unwrap(index: Index, ip: *const InternPool) Unwrapped { + return .{ + .tid = @enumFromInt(@intFromEnum(index) >> ip.tid_shift_31 & ip.getTidMask()), + .index = @intFromEnum(index) & ip.getIndexMask(u31), + }; + } + /// This function is used in the debugger pretty formatters in tools/ to fetch the /// Tag to encoding mapping to facilitate fancy debug printing for this type. /// TODO merge this with `Tag.Payload`. @@ -4856,15 +4907,17 @@ pub const MemoizedCall = struct { pub fn init(ip: *InternPool, gpa: Allocator, total_threads: usize) !void { errdefer ip.deinit(gpa); - assert(ip.items.len == 0); + assert(ip.locals.len == 0 and ip.shards.len == 0); ip.locals = try gpa.alloc(Local, total_threads); @memset(ip.locals, .{ .shared = .{ + .items = Local.List(Item).empty, .strings = Local.Strings.empty, }, .mutate = .{ .arena = .{}, + .items = Local.Mutate.empty, .strings = Local.Mutate.empty, }, }); @@ -4918,7 +4971,6 @@ pub fn init(ip: *InternPool, gpa: Allocator, total_threads: usize) !void { } pub fn deinit(ip: *InternPool, gpa: Allocator) void { - ip.items.deinit(gpa); ip.extra.deinit(gpa); ip.limbs.deinit(gpa); @@ -4955,7 +5007,7 @@ pub fn deinit(ip: *InternPool, gpa: Allocator) void { pub fn indexToKey(ip: *const InternPool, index: Index) Key { assert(index != .none); - const item = ip.items.get(@intFromEnum(index)); + const item = index.getItem(ip); const data = item.data; return switch (item.tag) { .removed => unreachable, @@ -5001,8 +5053,10 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { .type_pointer => .{ .ptr_type = ip.extraData(Tag.TypePointer, data) }, .type_slice => { - assert(ip.items.items(.tag)[data] == .type_pointer); - var ptr_info = ip.extraData(Tag.TypePointer, ip.items.items(.data)[data]); + const many_ptr_index: Index = @enumFromInt(data); + const many_ptr_item = many_ptr_index.getItem(ip); + assert(many_ptr_item.tag == .type_pointer); + var ptr_info = ip.extraData(Tag.TypePointer, many_ptr_item.data); ptr_info.flags.size = .Slice; return .{ .ptr_type = ptr_info }; }, @@ -5196,7 +5250,7 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { .ptr_elem => { // Avoid `indexToKey` recursion by asserting the tag encoding. const info = ip.extraData(PtrBaseIndex, data); - const index_item = ip.items.get(@intFromEnum(info.index)); + const index_item = info.index.getItem(ip); return switch (index_item.tag) { .int_usize => .{ .ptr = .{ .ty = info.ty, .base_addr = .{ .arr_elem = .{ .base = info.base, @@ -5209,7 +5263,7 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { .ptr_field => { // Avoid `indexToKey` recursion by asserting the tag encoding. const info = ip.extraData(PtrBaseIndex, data); - const index_item = ip.items.get(@intFromEnum(info.index)); + const index_item = info.index.getItem(ip); return switch (index_item.tag) { .int_usize => .{ .ptr = .{ .ty = info.ty, .base_addr = .{ .field = .{ .base = info.base, @@ -5326,7 +5380,7 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { .func_coerced => .{ .func = ip.extraFuncCoerced(data) }, .only_possible_value => { const ty: Index = @enumFromInt(data); - const ty_item = ip.items.get(@intFromEnum(ty)); + const ty_item = ty.getItem(ip); return switch (ty_item.tag) { .type_array_big => { const sentinel = @as( @@ -5557,7 +5611,7 @@ fn extraFuncInstance(ip: *const InternPool, extra_index: u32) Key.Func { fn extraFuncCoerced(ip: *const InternPool, extra_index: u32) Key.Func { const func_coerced = ip.extraData(Tag.FuncCoerced, extra_index); - const sub_item = ip.items.get(@intFromEnum(func_coerced.func)); + const sub_item = func_coerced.func.getItem(ip); var func: Key.Func = switch (sub_item.tag) { .func_instance => ip.extraFuncInstance(sub_item.data), .func_decl => ip.extraFuncDecl(sub_item.data), @@ -5581,21 +5635,30 @@ fn indexToKeyBigInt(ip: *const InternPool, limb_index: u32, positive: bool) Key const GetOrPutKey = union(enum) { existing: Index, new: struct { + ip: *InternPool, + tid: Zcu.PerThread.Id, shard: *Shard, map_index: u32, }, - fn set(gop: *GetOrPutKey, index: Index) Index { + fn put(gop: *GetOrPutKey) Index { + return gop.putAt(0); + } + fn putAt(gop: *GetOrPutKey, offset: u32) Index { switch (gop.*) { .existing => unreachable, .new => |info| { + const index = Index.Unwrapped.wrap(.{ + .tid = info.tid, + .index = info.ip.getLocal(info.tid).mutate.items.len - 1 - offset, + }, info.ip); info.shard.shared.map.entries[info.map_index].release(index); info.shard.mutate.map.len += 1; info.shard.mutate.map.mutex.unlock(); + gop.* = .{ .existing = index }; + return index; }, } - gop.* = .{ .existing = index }; - return index; } fn assign(gop: *GetOrPutKey, new_gop: GetOrPutKey) void { @@ -5692,21 +5755,27 @@ fn getOrPutKey( shard.shared.map.release(new_map); } map.entries[map_index].hash = hash; - return .{ .new = .{ .shard = shard, .map_index = map_index } }; + return .{ .new = .{ + .ip = ip, + .tid = tid, + .shard = shard, + .map_index = map_index, + } }; } pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) Allocator.Error!Index { var gop = try ip.getOrPutKey(gpa, tid, key); defer gop.deinit(); if (gop == .existing) return gop.existing; - try ip.items.ensureUnusedCapacity(gpa, 1); + const items = ip.getLocal(tid).getMutableItems(gpa); + try items.ensureUnusedCapacity(1); switch (key) { .int_type => |int_type| { const t: Tag = switch (int_type.signedness) { .signed => .type_int_signed, .unsigned => .type_int_unsigned, }; - ip.items.appendAssumeCapacity(.{ + items.appendAssumeCapacity(.{ .tag = t, .data = int_type.bits, }); @@ -5721,18 +5790,18 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All const ptr_type_index = try ip.get(gpa, tid, new_key); gop.assign(try ip.getOrPutKey(gpa, tid, key)); - try ip.items.ensureUnusedCapacity(gpa, 1); - ip.items.appendAssumeCapacity(.{ + try items.ensureUnusedCapacity(1); + items.appendAssumeCapacity(.{ .tag = .type_slice, .data = @intFromEnum(ptr_type_index), }); - return gop.set(@enumFromInt(ip.items.len - 1)); + return gop.put(); } var ptr_type_adjusted = ptr_type; if (ptr_type.flags.size == .C) ptr_type_adjusted.flags.is_allowzero = true; - ip.items.appendAssumeCapacity(.{ + items.appendAssumeCapacity(.{ .tag = .type_pointer, .data = try ip.addExtra(gpa, ptr_type_adjusted), }); @@ -5743,19 +5812,19 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All if (std.math.cast(u32, array_type.len)) |len| { if (array_type.sentinel == .none) { - ip.items.appendAssumeCapacity(.{ + items.appendAssumeCapacity(.{ .tag = .type_array_small, .data = try ip.addExtra(gpa, Vector{ .len = len, .child = array_type.child, }), }); - return gop.set(@enumFromInt(ip.items.len - 1)); + return gop.put(); } } const length = Array.Length.init(array_type.len); - ip.items.appendAssumeCapacity(.{ + items.appendAssumeCapacity(.{ .tag = .type_array_big, .data = try ip.addExtra(gpa, Array{ .len0 = length.a, @@ -5766,7 +5835,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All }); }, .vector_type => |vector_type| { - ip.items.appendAssumeCapacity(.{ + items.appendAssumeCapacity(.{ .tag = .type_vector, .data = try ip.addExtra(gpa, Vector{ .len = vector_type.len, @@ -5776,20 +5845,20 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All }, .opt_type => |payload_type| { assert(payload_type != .none); - ip.items.appendAssumeCapacity(.{ + items.appendAssumeCapacity(.{ .tag = .type_optional, .data = @intFromEnum(payload_type), }); }, .anyframe_type => |payload_type| { // payload_type might be none, indicating the type is `anyframe`. - ip.items.appendAssumeCapacity(.{ + items.appendAssumeCapacity(.{ .tag = .type_anyframe, .data = @intFromEnum(payload_type), }); }, .error_union_type => |error_union_type| { - ip.items.appendAssumeCapacity(if (error_union_type.error_set_type == .anyerror_type) .{ + items.appendAssumeCapacity(if (error_union_type.error_set_type == .anyerror_type) .{ .tag = .type_anyerror_union, .data = @intFromEnum(error_union_type.payload_type), } else .{ @@ -5805,7 +5874,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All addStringsToMap(ip, names_map, names); const names_len = error_set_type.names.len; try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Tag.ErrorSet).Struct.fields.len + names_len); - ip.items.appendAssumeCapacity(.{ + items.appendAssumeCapacity(.{ .tag = .type_error_set, .data = ip.addExtraAssumeCapacity(Tag.ErrorSet{ .names_len = names_len, @@ -5815,26 +5884,26 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All ip.extra.appendSliceAssumeCapacity(@ptrCast(error_set_type.names.get(ip))); }, .inferred_error_set_type => |ies_index| { - ip.items.appendAssumeCapacity(.{ + items.appendAssumeCapacity(.{ .tag = .type_inferred_error_set, .data = @intFromEnum(ies_index), }); }, .simple_type => |simple_type| { - ip.items.appendAssumeCapacity(.{ + items.appendAssumeCapacity(.{ .tag = .simple_type, .data = @intFromEnum(simple_type), }); }, .simple_value => |simple_value| { - ip.items.appendAssumeCapacity(.{ + items.appendAssumeCapacity(.{ .tag = .simple_value, .data = @intFromEnum(simple_value), }); }, .undef => |ty| { assert(ty != .none); - ip.items.appendAssumeCapacity(.{ + items.appendAssumeCapacity(.{ .tag = .undef, .data = @intFromEnum(ty), }); @@ -5853,7 +5922,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All .variable => |variable| { const has_init = variable.init != .none; if (has_init) assert(variable.ty == ip.typeOf(variable.init)); - ip.items.appendAssumeCapacity(.{ + items.appendAssumeCapacity(.{ .tag = .variable, .data = try ip.addExtra(gpa, Tag.Variable{ .ty = variable.ty, @@ -5873,7 +5942,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All .slice => |slice| { assert(ip.indexToKey(slice.ty).ptr_type.flags.size == .Slice); assert(ip.indexToKey(ip.typeOf(slice.ptr)).ptr_type.flags.size == .Many); - ip.items.appendAssumeCapacity(.{ + items.appendAssumeCapacity(.{ .tag = .ptr_slice, .data = try ip.addExtra(gpa, PtrSlice{ .ty = slice.ty, @@ -5886,7 +5955,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All .ptr => |ptr| { const ptr_type = ip.indexToKey(ptr.ty).ptr_type; assert(ptr_type.flags.size != .Slice); - ip.items.appendAssumeCapacity(switch (ptr.base_addr) { + items.appendAssumeCapacity(switch (ptr.base_addr) { .decl => |decl| .{ .tag = .ptr_decl, .data = try ip.addExtra(gpa, PtrDecl.init(ptr.ty, decl, ptr.byte_offset)), @@ -5975,8 +6044,8 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All .storage = .{ .u64 = base_index.index }, } }); gop.assign(try ip.getOrPutKey(gpa, tid, key)); - try ip.items.ensureUnusedCapacity(gpa, 1); - ip.items.appendAssumeCapacity(.{ + try items.ensureUnusedCapacity(1); + items.appendAssumeCapacity(.{ .tag = switch (ptr.base_addr) { .arr_elem => .ptr_elem, .field => .ptr_field, @@ -5984,7 +6053,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All }, .data = try ip.addExtra(gpa, PtrBaseIndex.init(ptr.ty, base_index.base, index_index, ptr.byte_offset)), }); - return gop.set(@enumFromInt(ip.items.len - 1)); + return gop.put(); }, }); }, @@ -5992,7 +6061,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All .opt => |opt| { assert(ip.isOptionalType(opt.ty)); assert(opt.val == .none or ip.indexToKey(opt.ty).opt_type == ip.typeOf(opt.val)); - ip.items.appendAssumeCapacity(if (opt.val == .none) .{ + items.appendAssumeCapacity(if (opt.val == .none) .{ .tag = .opt_null, .data = @intFromEnum(opt.ty), } else .{ @@ -6009,7 +6078,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All switch (int.storage) { .u64, .i64, .big_int => {}, .lazy_align, .lazy_size => |lazy_ty| { - ip.items.appendAssumeCapacity(.{ + items.appendAssumeCapacity(.{ .tag = switch (int.storage) { else => unreachable, .lazy_align => .int_lazy_align, @@ -6020,20 +6089,20 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All .lazy_ty = lazy_ty, }), }); - return gop.set(@enumFromInt(ip.items.len - 1)); + return gop.put(); }, } switch (int.ty) { .u8_type => switch (int.storage) { .big_int => |big_int| { - ip.items.appendAssumeCapacity(.{ + items.appendAssumeCapacity(.{ .tag = .int_u8, .data = big_int.to(u8) catch unreachable, }); break :b; }, inline .u64, .i64 => |x| { - ip.items.appendAssumeCapacity(.{ + items.appendAssumeCapacity(.{ .tag = .int_u8, .data = @as(u8, @intCast(x)), }); @@ -6043,14 +6112,14 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All }, .u16_type => switch (int.storage) { .big_int => |big_int| { - ip.items.appendAssumeCapacity(.{ + items.appendAssumeCapacity(.{ .tag = .int_u16, .data = big_int.to(u16) catch unreachable, }); break :b; }, inline .u64, .i64 => |x| { - ip.items.appendAssumeCapacity(.{ + items.appendAssumeCapacity(.{ .tag = .int_u16, .data = @as(u16, @intCast(x)), }); @@ -6060,14 +6129,14 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All }, .u32_type => switch (int.storage) { .big_int => |big_int| { - ip.items.appendAssumeCapacity(.{ + items.appendAssumeCapacity(.{ .tag = .int_u32, .data = big_int.to(u32) catch unreachable, }); break :b; }, inline .u64, .i64 => |x| { - ip.items.appendAssumeCapacity(.{ + items.appendAssumeCapacity(.{ .tag = .int_u32, .data = @as(u32, @intCast(x)), }); @@ -6078,14 +6147,14 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All .i32_type => switch (int.storage) { .big_int => |big_int| { const casted = big_int.to(i32) catch unreachable; - ip.items.appendAssumeCapacity(.{ + items.appendAssumeCapacity(.{ .tag = .int_i32, .data = @as(u32, @bitCast(casted)), }); break :b; }, inline .u64, .i64 => |x| { - ip.items.appendAssumeCapacity(.{ + items.appendAssumeCapacity(.{ .tag = .int_i32, .data = @as(u32, @bitCast(@as(i32, @intCast(x)))), }); @@ -6096,7 +6165,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All .usize_type => switch (int.storage) { .big_int => |big_int| { if (big_int.to(u32)) |casted| { - ip.items.appendAssumeCapacity(.{ + items.appendAssumeCapacity(.{ .tag = .int_usize, .data = casted, }); @@ -6105,7 +6174,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All }, inline .u64, .i64 => |x| { if (std.math.cast(u32, x)) |casted| { - ip.items.appendAssumeCapacity(.{ + items.appendAssumeCapacity(.{ .tag = .int_usize, .data = casted, }); @@ -6117,14 +6186,14 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All .comptime_int_type => switch (int.storage) { .big_int => |big_int| { if (big_int.to(u32)) |casted| { - ip.items.appendAssumeCapacity(.{ + items.appendAssumeCapacity(.{ .tag = .int_comptime_int_u32, .data = casted, }); break :b; } else |_| {} if (big_int.to(i32)) |casted| { - ip.items.appendAssumeCapacity(.{ + items.appendAssumeCapacity(.{ .tag = .int_comptime_int_i32, .data = @as(u32, @bitCast(casted)), }); @@ -6133,14 +6202,14 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All }, inline .u64, .i64 => |x| { if (std.math.cast(u32, x)) |casted| { - ip.items.appendAssumeCapacity(.{ + items.appendAssumeCapacity(.{ .tag = .int_comptime_int_u32, .data = casted, }); break :b; } if (std.math.cast(i32, x)) |casted| { - ip.items.appendAssumeCapacity(.{ + items.appendAssumeCapacity(.{ .tag = .int_comptime_int_i32, .data = @as(u32, @bitCast(casted)), }); @@ -6154,35 +6223,35 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All switch (int.storage) { .big_int => |big_int| { if (big_int.to(u32)) |casted| { - ip.items.appendAssumeCapacity(.{ + items.appendAssumeCapacity(.{ .tag = .int_small, .data = try ip.addExtra(gpa, IntSmall{ .ty = int.ty, .value = casted, }), }); - return gop.set(@enumFromInt(ip.items.len - 1)); + return gop.put(); } else |_| {} const tag: Tag = if (big_int.positive) .int_positive else .int_negative; - try addInt(ip, gpa, int.ty, tag, big_int.limbs); + try addInt(ip, gpa, tid, int.ty, tag, big_int.limbs); }, inline .u64, .i64 => |x| { if (std.math.cast(u32, x)) |casted| { - ip.items.appendAssumeCapacity(.{ + items.appendAssumeCapacity(.{ .tag = .int_small, .data = try ip.addExtra(gpa, IntSmall{ .ty = int.ty, .value = casted, }), }); - return gop.set(@enumFromInt(ip.items.len - 1)); + return gop.put(); } var buf: [2]Limb = undefined; const big_int = BigIntMutable.init(&buf, x).toConst(); const tag: Tag = if (big_int.positive) .int_positive else .int_negative; - try addInt(ip, gpa, int.ty, tag, big_int.limbs); + try addInt(ip, gpa, tid, int.ty, tag, big_int.limbs); }, .lazy_align, .lazy_size => unreachable, } @@ -6190,7 +6259,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All .err => |err| { assert(ip.isErrorSetType(err.ty)); - ip.items.appendAssumeCapacity(.{ + items.appendAssumeCapacity(.{ .tag = .error_set_error, .data = try ip.addExtra(gpa, err), }); @@ -6198,7 +6267,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All .error_union => |error_union| { assert(ip.isErrorUnionType(error_union.ty)); - ip.items.appendAssumeCapacity(switch (error_union.val) { + items.appendAssumeCapacity(switch (error_union.val) { .err_name => |err_name| .{ .tag = .error_union_error, .data = try ip.addExtra(gpa, Key.Error{ @@ -6216,7 +6285,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All }); }, - .enum_literal => |enum_literal| ip.items.appendAssumeCapacity(.{ + .enum_literal => |enum_literal| items.appendAssumeCapacity(.{ .tag = .enum_literal, .data = @intFromEnum(enum_literal), }), @@ -6228,50 +6297,50 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All .enum_type => assert(ip.typeOf(enum_tag.int) == ip.loadEnumType(enum_tag.ty).tag_ty), else => unreachable, } - ip.items.appendAssumeCapacity(.{ + items.appendAssumeCapacity(.{ .tag = .enum_tag, .data = try ip.addExtra(gpa, enum_tag), }); }, - .empty_enum_value => |enum_or_union_ty| ip.items.appendAssumeCapacity(.{ + .empty_enum_value => |enum_or_union_ty| items.appendAssumeCapacity(.{ .tag = .only_possible_value, .data = @intFromEnum(enum_or_union_ty), }), .float => |float| { switch (float.ty) { - .f16_type => ip.items.appendAssumeCapacity(.{ + .f16_type => items.appendAssumeCapacity(.{ .tag = .float_f16, .data = @as(u16, @bitCast(float.storage.f16)), }), - .f32_type => ip.items.appendAssumeCapacity(.{ + .f32_type => items.appendAssumeCapacity(.{ .tag = .float_f32, .data = @as(u32, @bitCast(float.storage.f32)), }), - .f64_type => ip.items.appendAssumeCapacity(.{ + .f64_type => items.appendAssumeCapacity(.{ .tag = .float_f64, .data = try ip.addExtra(gpa, Float64.pack(float.storage.f64)), }), - .f80_type => ip.items.appendAssumeCapacity(.{ + .f80_type => items.appendAssumeCapacity(.{ .tag = .float_f80, .data = try ip.addExtra(gpa, Float80.pack(float.storage.f80)), }), - .f128_type => ip.items.appendAssumeCapacity(.{ + .f128_type => items.appendAssumeCapacity(.{ .tag = .float_f128, .data = try ip.addExtra(gpa, Float128.pack(float.storage.f128)), }), .c_longdouble_type => switch (float.storage) { - .f80 => |x| ip.items.appendAssumeCapacity(.{ + .f80 => |x| items.appendAssumeCapacity(.{ .tag = .float_c_longdouble_f80, .data = try ip.addExtra(gpa, Float80.pack(x)), }), - inline .f16, .f32, .f64, .f128 => |x| ip.items.appendAssumeCapacity(.{ + inline .f16, .f32, .f64, .f128 => |x| items.appendAssumeCapacity(.{ .tag = .float_c_longdouble_f128, .data = try ip.addExtra(gpa, Float128.pack(x)), }), }, - .comptime_float_type => ip.items.appendAssumeCapacity(.{ + .comptime_float_type => items.appendAssumeCapacity(.{ .tag = .float_comptime_float, .data = try ip.addExtra(gpa, Float128.pack(float.storage.f128)), }), @@ -6331,11 +6400,11 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All } if (len == 0) { - ip.items.appendAssumeCapacity(.{ + items.appendAssumeCapacity(.{ .tag = .only_possible_value, .data = @intFromEnum(aggregate.ty), }); - return gop.set(@enumFromInt(ip.items.len - 1)); + return gop.put(); } switch (ty_key) { @@ -6364,11 +6433,11 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All // This encoding works thanks to the fact that, as we just verified, // the type itself contains a slice of values that can be provided // in the aggregate fields. - ip.items.appendAssumeCapacity(.{ + items.appendAssumeCapacity(.{ .tag = .only_possible_value, .data = @intFromEnum(aggregate.ty), }); - return gop.set(@enumFromInt(ip.items.len - 1)); + return gop.put(); }, else => {}, } @@ -6388,7 +6457,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All .storage = .{ .u64 = bytes.at(0, ip) }, } }); gop.assign(try ip.getOrPutKey(gpa, tid, key)); - try ip.items.ensureUnusedCapacity(gpa, 1); + try items.ensureUnusedCapacity(1); break :elem elem; }, .elems => |elems| elems[0], @@ -6399,14 +6468,14 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All gpa, @typeInfo(Repeated).Struct.fields.len, ); - ip.items.appendAssumeCapacity(.{ + items.appendAssumeCapacity(.{ .tag = .repeated, .data = ip.addExtraAssumeCapacity(Repeated{ .ty = aggregate.ty, .elem_val = elem, }), }); - return gop.set(@enumFromInt(ip.items.len - 1)); + return gop.put(); } if (child == .u8_type) bytes: { @@ -6442,21 +6511,21 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All @intCast(len_including_sentinel), .maybe_embedded_nulls, ); - ip.items.appendAssumeCapacity(.{ + items.appendAssumeCapacity(.{ .tag = .bytes, .data = ip.addExtraAssumeCapacity(Bytes{ .ty = aggregate.ty, .bytes = string, }), }); - return gop.set(@enumFromInt(ip.items.len - 1)); + return gop.put(); } try ip.extra.ensureUnusedCapacity( gpa, @typeInfo(Tag.Aggregate).Struct.fields.len + @as(usize, @intCast(len_including_sentinel + 1)), ); - ip.items.appendAssumeCapacity(.{ + items.appendAssumeCapacity(.{ .tag = .aggregate, .data = ip.addExtraAssumeCapacity(Tag.Aggregate{ .ty = aggregate.ty, @@ -6469,7 +6538,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All .un => |un| { assert(un.ty != .none); assert(un.val != .none); - ip.items.appendAssumeCapacity(.{ + items.appendAssumeCapacity(.{ .tag = .union_value, .data = try ip.addExtra(gpa, un), }); @@ -6479,7 +6548,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All for (memoized_call.arg_values) |arg| assert(arg != .none); try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(MemoizedCall).Struct.fields.len + memoized_call.arg_values.len); - ip.items.appendAssumeCapacity(.{ + items.appendAssumeCapacity(.{ .tag = .memoized_call, .data = ip.addExtraAssumeCapacity(MemoizedCall{ .func = memoized_call.func, @@ -6490,7 +6559,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All ip.extra.appendSliceAssumeCapacity(@ptrCast(memoized_call.arg_values)); }, } - return gop.set(@enumFromInt(ip.items.len - 1)); + return gop.put(); } pub const UnionTypeInit = struct { @@ -6544,6 +6613,8 @@ pub fn getUnionType( defer gop.deinit(); if (gop == .existing) return .{ .existing = gop.existing }; + const items = ip.getLocal(tid).getMutableItems(gpa); + const align_elements_len = if (ini.flags.any_aligned_fields) (ini.fields_len + 3) / 4 else 0; const align_element: u32 = @bitCast([1]u8{@intFromEnum(Alignment.none)} ** 4); try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Tag.TypeUnion).Struct.fields.len + @@ -6556,7 +6627,7 @@ pub fn getUnionType( // zig fmt: on ini.fields_len + // field types align_elements_len); - try ip.items.ensureUnusedCapacity(gpa, 1); + try items.ensureUnusedCapacity(1); const extra_index = ip.addExtraAssumeCapacity(Tag.TypeUnion{ .flags = .{ @@ -6582,7 +6653,7 @@ pub fn getUnionType( }, }); - ip.items.appendAssumeCapacity(.{ + items.appendAssumeCapacity(.{ .tag = .type_union, .data = extra_index, }); @@ -6618,7 +6689,7 @@ pub fn getUnionType( } return .{ .wip = .{ - .index = gop.set(@enumFromInt(ip.items.len - 1)), + .index = gop.put(), .decl_extra_index = extra_index + std.meta.fieldIndex(Tag.TypeUnion, "decl").?, .namespace_extra_index = if (ini.has_namespace) extra_index + std.meta.fieldIndex(Tag.TypeUnion, "namespace").? @@ -6640,8 +6711,8 @@ pub const WipNamespaceType = struct { } return wip.index; } - pub fn cancel(wip: WipNamespaceType, ip: *InternPool) void { - ip.remove(wip.index); + pub fn cancel(wip: WipNamespaceType, ip: *InternPool, tid: Zcu.PerThread.Id) void { + ip.remove(tid, wip.index); } pub const Result = union(enum) { @@ -6692,6 +6763,8 @@ pub fn getStructType( defer gop.deinit(); if (gop == .existing) return .{ .existing = gop.existing }; + const items = ip.getLocal(tid).getMutableItems(gpa); + const names_map = try ip.addMap(gpa, ini.fields_len); errdefer _ = ip.maps.pop(); @@ -6728,7 +6801,7 @@ pub fn getStructType( .is_reified = ini.key == .reified, }, }); - try ip.items.append(gpa, .{ + try items.append(.{ .tag = if (ini.any_default_inits) .type_struct_packed_inits else .type_struct_packed, .data = extra_index, }); @@ -6747,7 +6820,7 @@ pub fn getStructType( ip.extra.appendNTimesAssumeCapacity(@intFromEnum(Index.none), ini.fields_len); } return .{ .wip = .{ - .index = gop.set(@enumFromInt(ip.items.len - 1)), + .index = gop.put(), .decl_extra_index = extra_index + std.meta.fieldIndex(Tag.TypeStructPacked, "decl").?, .namespace_extra_index = if (ini.has_namespace) extra_index + std.meta.fieldIndex(Tag.TypeStructPacked, "namespace").? @@ -6800,7 +6873,7 @@ pub fn getStructType( .is_reified = ini.key == .reified, }, }); - try ip.items.append(gpa, .{ + try items.append(.{ .tag = .type_struct, .data = extra_index, }); @@ -6836,7 +6909,7 @@ pub fn getStructType( } ip.extra.appendNTimesAssumeCapacity(std.math.maxInt(u32), ini.fields_len); return .{ .wip = .{ - .index = gop.set(@enumFromInt(ip.items.len - 1)), + .index = gop.put(), .decl_extra_index = extra_index + std.meta.fieldIndex(Tag.TypeStruct, "decl").?, .namespace_extra_index = namespace_extra_index, } }; @@ -6859,6 +6932,8 @@ pub fn getAnonStructType( assert(ini.types.len == ini.values.len); for (ini.types) |elem| assert(elem != .none); + const items = ip.getLocal(tid).getMutableItems(gpa); + const prev_extra_len = ip.extra.items.len; const fields_len: u32 = @intCast(ini.types.len); @@ -6866,7 +6941,7 @@ pub fn getAnonStructType( gpa, @typeInfo(TypeStructAnon).Struct.fields.len + (fields_len * 3), ); - try ip.items.ensureUnusedCapacity(gpa, 1); + try items.ensureUnusedCapacity(1); const extra_index = ip.addExtraAssumeCapacity(TypeStructAnon{ .fields_len = fields_len, @@ -6888,11 +6963,11 @@ pub fn getAnonStructType( return gop.existing; } - ip.items.appendAssumeCapacity(.{ + items.appendAssumeCapacity(.{ .tag = if (ini.names.len == 0) .type_tuple_anon else .type_struct_anon, .data = extra_index, }); - return gop.set(@enumFromInt(ip.items.len - 1)); + return gop.put(); } /// This is equivalent to `Key.FuncType` but adjusted to have a slice for `param_types`. @@ -6930,7 +7005,9 @@ pub fn getFuncType( @intFromBool(key.comptime_bits != 0) + @intFromBool(key.noalias_bits != 0) + params_len); - try ip.items.ensureUnusedCapacity(gpa, 1); + + const items = ip.getLocal(tid).getMutableItems(gpa); + try items.ensureUnusedCapacity(1); const func_type_extra_index = ip.addExtraAssumeCapacity(Tag.TypeFunction{ .params_len = params_len, @@ -6962,11 +7039,11 @@ pub fn getFuncType( return gop.existing; } - ip.items.appendAssumeCapacity(.{ + items.appendAssumeCapacity(.{ .tag = .type_function, .data = func_type_extra_index, }); - return gop.set(@enumFromInt(ip.items.len - 1)); + return gop.put(); } pub fn getExternFunc( @@ -6981,12 +7058,13 @@ pub fn getExternFunc( const prev_extra_len = ip.extra.items.len; const extra_index = try ip.addExtra(gpa, @as(Tag.ExternFunc, key)); errdefer ip.extra.items.len = prev_extra_len; - try ip.items.append(gpa, .{ + const items = ip.getLocal(tid).getMutableItems(gpa); + try items.append(.{ .tag = .extern_func, .data = extra_index, }); - errdefer ip.items.len -= 1; - return gop.set(@enumFromInt(ip.items.len - 1)); + errdefer ip.items.lenPtr().* -= 1; + return gop.put(); } pub const GetFuncDeclKey = struct { @@ -7013,7 +7091,9 @@ pub fn getFuncDecl( const prev_extra_len = ip.extra.items.len; try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Tag.FuncDecl).Struct.fields.len); - try ip.items.ensureUnusedCapacity(gpa, 1); + + const items = ip.getLocal(tid).getMutableItems(gpa); + try items.ensureUnusedCapacity(1); const func_decl_extra_index = ip.addExtraAssumeCapacity(Tag.FuncDecl{ .analysis = .{ @@ -7043,11 +7123,11 @@ pub fn getFuncDecl( return gop.existing; } - ip.items.appendAssumeCapacity(.{ + items.appendAssumeCapacity(.{ .tag = .func_decl, .data = func_decl_extra_index, }); - return gop.set(@enumFromInt(ip.items.len - 1)); + return gop.put(); } pub const GetFuncDeclIesKey = struct { @@ -7095,7 +7175,26 @@ pub fn getFuncDeclIes( @intFromBool(key.comptime_bits != 0) + @intFromBool(key.noalias_bits != 0) + params_len); - try ip.items.ensureUnusedCapacity(gpa, 4); + + const items = ip.getLocal(tid).getMutableItems(gpa); + try items.ensureUnusedCapacity(4); + + const func_index = Index.Unwrapped.wrap(.{ + .tid = tid, + .index = items.lenPtr().* + 0, + }, ip); + const error_union_type = Index.Unwrapped.wrap(.{ + .tid = tid, + .index = items.lenPtr().* + 1, + }, ip); + const error_set_type = Index.Unwrapped.wrap(.{ + .tid = tid, + .index = items.lenPtr().* + 2, + }, ip); + const func_ty = Index.Unwrapped.wrap(.{ + .tid = tid, + .index = items.lenPtr().* + 3, + }, ip); const func_decl_extra_index = ip.addExtraAssumeCapacity(Tag.FuncDecl{ .analysis = .{ @@ -7107,36 +7206,18 @@ pub fn getFuncDeclIes( .inferred_error_set = true, }, .owner_decl = key.owner_decl, - .ty = @enumFromInt(ip.items.len + 3), + .ty = func_ty, .zir_body_inst = key.zir_body_inst, .lbrace_line = key.lbrace_line, .rbrace_line = key.rbrace_line, .lbrace_column = key.lbrace_column, .rbrace_column = key.rbrace_column, }); - - ip.items.appendAssumeCapacity(.{ - .tag = .func_decl, - .data = func_decl_extra_index, - }); ip.extra.appendAssumeCapacity(@intFromEnum(Index.none)); - ip.items.appendAssumeCapacity(.{ - .tag = .type_error_union, - .data = ip.addExtraAssumeCapacity(Tag.ErrorUnionType{ - .error_set_type = @enumFromInt(ip.items.len + 1), - .payload_type = key.bare_return_type, - }), - }); - - ip.items.appendAssumeCapacity(.{ - .tag = .type_inferred_error_set, - .data = @intCast(ip.items.len - 2), - }); - const func_type_extra_index = ip.addExtraAssumeCapacity(Tag.TypeFunction{ .params_len = params_len, - .return_type = @enumFromInt(ip.items.len - 2), + .return_type = error_union_type, .flags = .{ .cc = key.cc orelse .Unspecified, .is_var_args = key.is_var_args, @@ -7152,45 +7233,57 @@ pub fn getFuncDeclIes( if (key.comptime_bits != 0) ip.extra.appendAssumeCapacity(key.comptime_bits); if (key.noalias_bits != 0) ip.extra.appendAssumeCapacity(key.noalias_bits); ip.extra.appendSliceAssumeCapacity(@ptrCast(key.param_types)); + + items.appendSliceAssumeCapacity(.{ + .tag = &.{ + .func_decl, + .type_error_union, + .type_inferred_error_set, + .type_function, + }, + .data = &.{ + func_decl_extra_index, + ip.addExtraAssumeCapacity(Tag.ErrorUnionType{ + .error_set_type = error_set_type, + .payload_type = key.bare_return_type, + }), + @intFromEnum(func_index), + func_type_extra_index, + }, + }); errdefer { - ip.items.len -= 4; + items.lenPtr().* -= 4; ip.extra.items.len = prev_extra_len; } - ip.items.appendAssumeCapacity(.{ - .tag = .type_function, - .data = func_type_extra_index, - }); - - var gop = try ip.getOrPutKey(gpa, tid, .{ + var func_gop = try ip.getOrPutKey(gpa, tid, .{ .func = extraFuncDecl(ip, func_decl_extra_index), }); - defer gop.deinit(); - if (gop == .existing) { + defer func_gop.deinit(); + if (func_gop == .existing) { // An existing function type was found; undo the additions to our two arrays. - ip.items.len -= 4; + items.lenPtr().* -= 4; ip.extra.items.len = prev_extra_len; - return gop.existing; + return func_gop.existing; } - - var eu_gop = try ip.getOrPutKey(gpa, tid, .{ .error_union_type = .{ - .error_set_type = @enumFromInt(ip.items.len - 2), + var error_union_type_gop = try ip.getOrPutKey(gpa, tid, .{ .error_union_type = .{ + .error_set_type = error_set_type, .payload_type = key.bare_return_type, } }); - defer eu_gop.deinit(); - var ies_gop = try ip.getOrPutKey(gpa, tid, .{ - .inferred_error_set_type = @enumFromInt(ip.items.len - 4), + defer error_union_type_gop.deinit(); + var error_set_type_gop = try ip.getOrPutKey(gpa, tid, .{ + .inferred_error_set_type = func_index, }); - defer ies_gop.deinit(); - var ty_gop = try ip.getOrPutKey(gpa, tid, .{ + defer error_set_type_gop.deinit(); + var func_ty_gop = try ip.getOrPutKey(gpa, tid, .{ .func_type = extraFuncType(ip, func_type_extra_index), }); - defer ty_gop.deinit(); - const index = gop.set(@enumFromInt(ip.items.len - 4)); - _ = eu_gop.set(@enumFromInt(@intFromEnum(index) + 1)); - _ = ies_gop.set(@enumFromInt(@intFromEnum(index) + 2)); - _ = ty_gop.set(@enumFromInt(@intFromEnum(index) + 3)); - return index; + defer func_ty_gop.deinit(); + assert(func_gop.putAt(3) == func_index); + assert(error_union_type_gop.putAt(2) == error_union_type); + assert(error_set_type_gop.putAt(1) == error_set_type); + assert(func_ty_gop.putAt(0) == func_ty); + return func_index; } pub fn getErrorSetType( @@ -7227,11 +7320,12 @@ pub fn getErrorSetType( return gop.existing; } - try ip.items.append(gpa, .{ + const items = ip.getLocal(tid).getMutableItems(gpa); + try items.append(.{ .tag = .type_error_set, .data = error_set_extra_index, }); - errdefer ip.items.len -= 1; + errdefer items.lenPtr().* -= 1; const names_map = try ip.addMap(gpa, names.len); assert(names_map == predicted_names_map); @@ -7239,7 +7333,7 @@ pub fn getErrorSetType( addStringsToMap(ip, names_map, names); - return gop.set(@enumFromInt(ip.items.len - 1)); + return gop.put(); } pub const GetFuncInstanceKey = struct { @@ -7312,15 +7406,14 @@ pub fn getFuncInstance( return gop.existing; } - const func_index: Index = @enumFromInt(ip.items.len); - - try ip.items.append(gpa, .{ + const items = ip.getLocal(tid).getMutableItems(gpa); + const func_index = Index.Unwrapped.wrap(.{ .tid = tid, .index = items.lenPtr().* }, ip); + try items.append(.{ .tag = .func_instance, .data = func_extra_index, }); - errdefer ip.items.len -= 1; - - return gop.set(try finishFuncInstance( + errdefer items.lenPtr().* -= 1; + try finishFuncInstance( ip, gpa, tid, @@ -7329,7 +7422,8 @@ pub fn getFuncInstance( func_extra_index, arg.alignment, arg.section, - )); + ); + return gop.put(); } /// This function exists separately than `getFuncInstance` because it needs to @@ -7361,12 +7455,26 @@ pub fn getFuncInstanceIes( @typeInfo(Tag.TypeFunction).Struct.fields.len + @intFromBool(arg.noalias_bits != 0) + params_len); - try ip.items.ensureUnusedCapacity(gpa, 4); - const func_index: Index = @enumFromInt(ip.items.len); - const error_union_type: Index = @enumFromInt(ip.items.len + 1); - const error_set_type: Index = @enumFromInt(ip.items.len + 2); - const func_ty: Index = @enumFromInt(ip.items.len + 3); + const items = ip.getLocal(tid).getMutableItems(gpa); + try items.ensureUnusedCapacity(4); + + const func_index = Index.Unwrapped.wrap(.{ + .tid = tid, + .index = items.lenPtr().* + 0, + }, ip); + const error_union_type = Index.Unwrapped.wrap(.{ + .tid = tid, + .index = items.lenPtr().* + 1, + }, ip); + const error_set_type = Index.Unwrapped.wrap(.{ + .tid = tid, + .index = items.lenPtr().* + 2, + }, ip); + const func_ty = Index.Unwrapped.wrap(.{ + .tid = tid, + .index = items.lenPtr().* + 3, + }, ip); const func_extra_index = ip.addExtraAssumeCapacity(Tag.FuncInstance{ .analysis = .{ @@ -7406,57 +7514,52 @@ pub fn getFuncInstanceIes( if (arg.noalias_bits != 0) ip.extra.appendAssumeCapacity(arg.noalias_bits); ip.extra.appendSliceAssumeCapacity(@ptrCast(arg.param_types)); - // TODO: add appendSliceAssumeCapacity to MultiArrayList. - ip.items.appendAssumeCapacity(.{ - .tag = .func_instance, - .data = func_extra_index, - }); - ip.items.appendAssumeCapacity(.{ - .tag = .type_error_union, - .data = ip.addExtraAssumeCapacity(Tag.ErrorUnionType{ - .error_set_type = error_set_type, - .payload_type = arg.bare_return_type, - }), - }); - ip.items.appendAssumeCapacity(.{ - .tag = .type_inferred_error_set, - .data = @intFromEnum(func_index), - }); - ip.items.appendAssumeCapacity(.{ - .tag = .type_function, - .data = func_type_extra_index, + items.appendSliceAssumeCapacity(.{ + .tag = &.{ + .func_instance, + .type_error_union, + .type_inferred_error_set, + .type_function, + }, + .data = &.{ + func_extra_index, + ip.addExtraAssumeCapacity(Tag.ErrorUnionType{ + .error_set_type = error_set_type, + .payload_type = arg.bare_return_type, + }), + @intFromEnum(func_index), + func_type_extra_index, + }, }); errdefer { - ip.items.len -= 4; + items.lenPtr().* -= 4; ip.extra.items.len = prev_extra_len; } - var gop = try ip.getOrPutKey(gpa, tid, .{ + var func_gop = try ip.getOrPutKey(gpa, tid, .{ .func = extraFuncInstance(ip, func_extra_index), }); - defer gop.deinit(); - if (gop == .existing) { + defer func_gop.deinit(); + if (func_gop == .existing) { // Hot path: undo the additions to our two arrays. - ip.items.len -= 4; + items.lenPtr().* -= 4; ip.extra.items.len = prev_extra_len; - return gop.existing; + return func_gop.existing; } - - // Synchronize the map with items. - var eu_gop = try ip.getOrPutKey(gpa, tid, .{ .error_union_type = .{ + var error_union_type_gop = try ip.getOrPutKey(gpa, tid, .{ .error_union_type = .{ .error_set_type = error_set_type, .payload_type = arg.bare_return_type, } }); - defer eu_gop.deinit(); - var ies_gop = try ip.getOrPutKey(gpa, tid, .{ + defer error_union_type_gop.deinit(); + var error_set_type_gop = try ip.getOrPutKey(gpa, tid, .{ .inferred_error_set_type = func_index, }); - defer ies_gop.deinit(); - var ty_gop = try ip.getOrPutKey(gpa, tid, .{ + defer error_set_type_gop.deinit(); + var func_ty_gop = try ip.getOrPutKey(gpa, tid, .{ .func_type = extraFuncType(ip, func_type_extra_index), }); - defer ty_gop.deinit(); - const index = gop.set(try finishFuncInstance( + defer func_ty_gop.deinit(); + try finishFuncInstance( ip, gpa, tid, @@ -7465,11 +7568,12 @@ pub fn getFuncInstanceIes( func_extra_index, arg.alignment, arg.section, - )); - _ = eu_gop.set(@enumFromInt(@intFromEnum(index) + 1)); - _ = ies_gop.set(@enumFromInt(@intFromEnum(index) + 2)); - _ = ty_gop.set(@enumFromInt(@intFromEnum(index) + 3)); - return index; + ); + assert(func_gop.putAt(3) == func_index); + assert(error_union_type_gop.putAt(2) == error_union_type); + assert(error_set_type_gop.putAt(1) == error_set_type); + assert(func_ty_gop.putAt(0) == func_ty); + return func_index; } fn finishFuncInstance( @@ -7481,7 +7585,7 @@ fn finishFuncInstance( func_extra_index: u32, alignment: Alignment, section: OptionalNullTerminatedString, -) Allocator.Error!Index { +) Allocator.Error!void { const fn_owner_decl = ip.declPtr(ip.funcDeclOwner(generic_owner)); const decl_index = try ip.createDecl(gpa, .{ .name = undefined, @@ -7510,8 +7614,6 @@ fn finishFuncInstance( decl.name = try ip.getOrPutStringFmt(gpa, tid, "{}__anon_{d}", .{ fn_owner_decl.name.fmt(ip), @intFromEnum(decl_index), }, .no_embedded_nulls); - - return func_index; } pub const EnumTypeInit = struct { @@ -7589,8 +7691,8 @@ pub const WipEnumType = struct { return null; } - pub fn cancel(wip: WipEnumType, ip: *InternPool) void { - ip.remove(wip.index); + pub fn cancel(wip: WipEnumType, ip: *InternPool, tid: Zcu.PerThread.Id) void { + ip.remove(tid, wip.index); } pub const Result = union(enum) { @@ -7618,7 +7720,8 @@ pub fn getEnumType( defer gop.deinit(); if (gop == .existing) return .{ .existing = gop.existing }; - try ip.items.ensureUnusedCapacity(gpa, 1); + const items = ip.getLocal(tid).getMutableItems(gpa); + try items.ensureUnusedCapacity(1); const names_map = try ip.addMap(gpa, ini.fields_len); errdefer _ = ip.maps.pop(); @@ -7650,7 +7753,7 @@ pub fn getEnumType( inline else => |x| x.zir_index, }.toOptional(), }); - ip.items.appendAssumeCapacity(.{ + items.appendAssumeCapacity(.{ .tag = .type_enum_auto, .data = extra_index, }); @@ -7661,7 +7764,7 @@ pub fn getEnumType( const names_start = ip.extra.items.len; ip.extra.appendNTimesAssumeCapacity(undefined, ini.fields_len); return .{ .wip = .{ - .index = gop.set(@enumFromInt(ip.items.len - 1)), + .index = gop.put(), .tag_ty_index = extra_index + std.meta.fieldIndex(EnumAuto, "int_tag_type").?, .decl_index = extra_index + std.meta.fieldIndex(EnumAuto, "decl").?, .namespace_index = if (ini.has_namespace) extra_index + std.meta.fieldIndex(EnumAuto, "namespace").? else null, @@ -7706,7 +7809,7 @@ pub fn getEnumType( inline else => |x| x.zir_index, }.toOptional(), }); - ip.items.appendAssumeCapacity(.{ + items.appendAssumeCapacity(.{ .tag = switch (ini.tag_mode) { .auto => unreachable, .explicit => .type_enum_explicit, @@ -7725,7 +7828,7 @@ pub fn getEnumType( ip.extra.appendNTimesAssumeCapacity(undefined, ini.fields_len); } return .{ .wip = .{ - .index = gop.set(@enumFromInt(ip.items.len - 1)), + .index = gop.put(), .tag_ty_index = extra_index + std.meta.fieldIndex(EnumAuto, "int_tag_type").?, .decl_index = extra_index + std.meta.fieldIndex(EnumAuto, "decl").?, .namespace_index = if (ini.has_namespace) extra_index + std.meta.fieldIndex(EnumAuto, "namespace").? else null, @@ -7760,7 +7863,8 @@ pub fn getGeneratedTagEnumType( assert(ip.isIntegerType(ini.tag_ty)); for (ini.values) |val| assert(ip.typeOf(val) == ini.tag_ty); - try ip.items.ensureUnusedCapacity(gpa, 1); + const items = ip.getLocal(tid).getMutableItems(gpa); + try items.ensureUnusedCapacity(1); const names_map = try ip.addMap(gpa, ini.names.len); errdefer _ = ip.maps.pop(); @@ -7774,7 +7878,7 @@ pub fn getGeneratedTagEnumType( try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(EnumAuto).Struct.fields.len + 1 + // owner_union fields_len); // field names - ip.items.appendAssumeCapacity(.{ + items.appendAssumeCapacity(.{ .tag = .type_enum_auto, .data = ip.addExtraAssumeCapacity(EnumAuto{ .decl = ini.decl, @@ -7803,7 +7907,7 @@ pub fn getGeneratedTagEnumType( // We don't clean up the values map on error! errdefer @compileError("error path leaks values_map"); - ip.items.appendAssumeCapacity(.{ + items.appendAssumeCapacity(.{ .tag = switch (ini.tag_mode) { .explicit => .type_enum_explicit, .nonexhaustive => .type_enum_nonexhaustive, @@ -7835,7 +7939,7 @@ pub fn getGeneratedTagEnumType( .generated_tag = .{ .union_type = ini.owner_union_ty }, } }); defer gop.deinit(); - return gop.set(@enumFromInt(ip.items.len - 1)); + return gop.put(); } pub const OpaqueTypeInit = struct { @@ -7870,7 +7974,10 @@ pub fn getOpaqueType( } }); defer gop.deinit(); if (gop == .existing) return .{ .existing = gop.existing }; - try ip.items.ensureUnusedCapacity(gpa, 1); + + const items = ip.getLocal(tid).getMutableItems(gpa); + try items.ensureUnusedCapacity(1); + try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Tag.TypeOpaque).Struct.fields.len + switch (ini.key) { .declared => |d| d.captures.len, .reified => 0, @@ -7886,7 +7993,7 @@ pub fn getOpaqueType( .reified => std.math.maxInt(u32), }, }); - ip.items.appendAssumeCapacity(.{ + items.appendAssumeCapacity(.{ .tag = .type_opaque, .data = extra_index, }); @@ -7895,7 +8002,7 @@ pub fn getOpaqueType( .reified => {}, } return .{ .wip = .{ - .index = gop.set(@enumFromInt(ip.items.len - 1)), + .index = gop.put(), .decl_extra_index = extra_index + std.meta.fieldIndex(Tag.TypeOpaque, "decl").?, .namespace_extra_index = if (ini.has_namespace) extra_index + std.meta.fieldIndex(Tag.TypeOpaque, "namespace").? @@ -7958,37 +8065,50 @@ fn addMap(ip: *InternPool, gpa: Allocator, cap: usize) Allocator.Error!MapIndex /// This operation only happens under compile error conditions. /// Leak the index until the next garbage collection. /// Invalidates all references to this index. -pub fn remove(ip: *InternPool, index: Index) void { +pub fn remove(ip: *InternPool, tid: Zcu.PerThread.Id, index: Index) void { + const unwrapped = index.unwrap(ip); if (@intFromEnum(index) < static_keys.len) { + if (tid != .main or unwrapped.tid != .main) @panic("This operation is impossible to be thread-safe"); // The item being removed replaced a special index via `InternPool.resolveBuiltinType`. // Restore the original item at this index. + var items = ip.getLocalShared(unwrapped.tid).items.view(); switch (static_keys[@intFromEnum(index)]) { - .simple_type => |s| { - ip.items.set(@intFromEnum(index), .{ - .tag = .simple_type, - .data = @intFromEnum(s), - }); - }, + .simple_type => |s| items.set(@intFromEnum(index), .{ + .tag = .simple_type, + .data = @intFromEnum(s), + }), else => unreachable, } return; } - if (@intFromEnum(index) == ip.items.len - 1) { - // Happy case - we can just drop the item without affecting any other indices. - ip.items.len -= 1; - } else { - // We must preserve the item so that indices following it remain valid. - // Thus, we will rewrite the tag to `removed`, leaking the item until - // next GC but causing `KeyAdapter` to ignore it. - ip.items.set(@intFromEnum(index), .{ .tag = .removed, .data = undefined }); + if (unwrapped.tid == tid) { + const items_len = &ip.getLocal(tid).mutate.items.len; + if (unwrapped.index == items_len.* - 1) { + // Happy case - we can just drop the item without affecting any other indices. + items_len.* -= 1; + return; + } } + + // We must preserve the item so that indices following it remain valid. + // Thus, we will rewrite the tag to `removed`, leaking the item until + // next GC but causing `KeyAdapter` to ignore it. + const items = ip.getLocalShared(unwrapped.tid).items.view(); + @atomicStore(Tag, &items.items(.tag)[unwrapped.index], .removed, .release); } -fn addInt(ip: *InternPool, gpa: Allocator, ty: Index, tag: Tag, limbs: []const Limb) !void { +fn addInt( + ip: *InternPool, + gpa: Allocator, + tid: Zcu.PerThread.Id, + ty: Index, + tag: Tag, + limbs: []const Limb, +) !void { const limbs_len: u32 = @intCast(limbs.len); try ip.reserveLimbs(gpa, @typeInfo(Int).Struct.fields.len + limbs_len); - ip.items.appendAssumeCapacity(.{ + ip.getLocal(tid).getMutableItems(gpa).appendAssumeCapacity(.{ .tag = tag, .data = ip.addLimbsExtraAssumeCapacity(Int{ .ty = ty, @@ -8235,13 +8355,13 @@ pub fn childType(ip: *const InternPool, i: Index) Index { } /// Given a slice type, returns the type of the ptr field. -pub fn slicePtrType(ip: *const InternPool, i: Index) Index { - switch (i) { +pub fn slicePtrType(ip: *const InternPool, index: Index) Index { + switch (index) { .slice_const_u8_type => return .manyptr_const_u8_type, .slice_const_u8_sentinel_0_type => return .manyptr_const_u8_sentinel_0_type, else => {}, } - const item = ip.items.get(@intFromEnum(i)); + const item = index.getItem(ip); switch (item.tag) { .type_slice => return @enumFromInt(item.data), else => unreachable, // not a slice type @@ -8249,8 +8369,8 @@ pub fn slicePtrType(ip: *const InternPool, i: Index) Index { } /// Given a slice value, returns the value of the ptr field. -pub fn slicePtr(ip: *const InternPool, i: Index) Index { - const item = ip.items.get(@intFromEnum(i)); +pub fn slicePtr(ip: *const InternPool, index: Index) Index { + const item = index.getItem(ip); switch (item.tag) { .ptr_slice => return ip.extraData(PtrSlice, item.data).ptr, else => unreachable, // not a slice value @@ -8258,8 +8378,8 @@ pub fn slicePtr(ip: *const InternPool, i: Index) Index { } /// Given a slice value, returns the value of the len field. -pub fn sliceLen(ip: *const InternPool, i: Index) Index { - const item = ip.items.get(@intFromEnum(i)); +pub fn sliceLen(ip: *const InternPool, index: Index) Index { + const item = index.getItem(ip); switch (item.tag) { .ptr_slice => return ip.extraData(PtrSlice, item.data).len, else => unreachable, // not a slice value @@ -8296,8 +8416,6 @@ pub fn getCoerced( const old_ty = ip.typeOf(val); if (old_ty == new_ty) return val; - const tags = ip.items.items(.tag); - switch (val) { .undef => return ip.get(gpa, tid, .{ .undef = new_ty }), .null_value => { @@ -8323,15 +8441,14 @@ pub fn getCoerced( } }), }; }, - else => switch (tags[@intFromEnum(val)]) { + else => switch (val.getTag(ip)) { .func_decl => return getCoercedFuncDecl(ip, gpa, tid, val, new_ty), .func_instance => return getCoercedFuncInstance(ip, gpa, tid, val, new_ty), .func_coerced => { - const extra_index = ip.items.items(.data)[@intFromEnum(val)]; const func: Index = @enumFromInt( - ip.extra.items[extra_index + std.meta.fieldIndex(Tag.FuncCoerced, "func").?], + ip.extra.items[val.getData(ip) + std.meta.fieldIndex(Tag.FuncCoerced, "func").?], ); - switch (tags[@intFromEnum(func)]) { + switch (func.getTag(ip)) { .func_decl => return getCoercedFuncDecl(ip, gpa, tid, val, new_ty), .func_instance => return getCoercedFuncInstance(ip, gpa, tid, val, new_ty), else => unreachable, @@ -8575,10 +8692,8 @@ fn getCoercedFuncDecl( val: Index, new_ty: Index, ) Allocator.Error!Index { - const datas = ip.items.items(.data); - const extra_index = datas[@intFromEnum(val)]; const prev_ty: Index = @enumFromInt( - ip.extra.items[extra_index + std.meta.fieldIndex(Tag.FuncDecl, "ty").?], + ip.extra.items[val.getData(ip) + std.meta.fieldIndex(Tag.FuncDecl, "ty").?], ); if (new_ty == prev_ty) return val; return getCoercedFunc(ip, gpa, tid, val, new_ty); @@ -8591,10 +8706,8 @@ fn getCoercedFuncInstance( val: Index, new_ty: Index, ) Allocator.Error!Index { - const datas = ip.items.items(.data); - const extra_index = datas[@intFromEnum(val)]; const prev_ty: Index = @enumFromInt( - ip.extra.items[extra_index + std.meta.fieldIndex(Tag.FuncInstance, "ty").?], + ip.extra.items[val.getData(ip) + std.meta.fieldIndex(Tag.FuncInstance, "ty").?], ); if (new_ty == prev_ty) return val; return getCoercedFunc(ip, gpa, tid, val, new_ty); @@ -8609,7 +8722,9 @@ fn getCoercedFunc( ) Allocator.Error!Index { const prev_extra_len = ip.extra.items.len; try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Tag.FuncCoerced).Struct.fields.len); - try ip.items.ensureUnusedCapacity(gpa, 1); + + const items = ip.getLocal(tid).getMutableItems(gpa); + try items.ensureUnusedCapacity(1); const extra_index = ip.addExtraAssumeCapacity(Tag.FuncCoerced{ .ty = ty, @@ -8626,11 +8741,11 @@ fn getCoercedFunc( return gop.existing; } - ip.items.appendAssumeCapacity(.{ + items.appendAssumeCapacity(.{ .tag = .func_coerced, .data = extra_index, }); - return gop.set(@enumFromInt(ip.items.len - 1)); + return gop.put(); } /// Asserts `val` has an integer type. @@ -8661,11 +8776,9 @@ pub fn getCoercedInts(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, in } pub fn indexToFuncType(ip: *const InternPool, val: Index) ?Key.FuncType { - assert(val != .none); - const tags = ip.items.items(.tag); - const datas = ip.items.items(.data); - switch (tags[@intFromEnum(val)]) { - .type_function => return extraFuncType(ip, datas[@intFromEnum(val)]), + const item = val.getItem(ip); + switch (item.tag) { + .type_function => return extraFuncType(ip, item.data), else => return null, } } @@ -8686,7 +8799,7 @@ pub fn isIntegerType(ip: *const InternPool, ty: Index) bool { .c_ulonglong_type, .comptime_int_type, => true, - else => switch (ip.items.items(.tag)[@intFromEnum(ty)]) { + else => switch (ty.getTag(ip)) { .type_int_signed, .type_int_unsigned, => true, @@ -8762,7 +8875,7 @@ pub fn errorUnionPayload(ip: *const InternPool, ty: Index) Index { /// The is only legal because the initializer is not part of the hash. pub fn mutateVarInit(ip: *InternPool, index: Index, init_index: Index) void { - const item = ip.items.get(@intFromEnum(index)); + const item = index.getItem(ip); assert(item.tag == .variable); ip.extra.items[item.data + std.meta.fieldIndex(Tag.Variable, "init").?] = @intFromEnum(init_index); } @@ -8773,7 +8886,11 @@ pub fn dump(ip: *const InternPool) void { } fn dumpStatsFallible(ip: *const InternPool, arena: Allocator) anyerror!void { - const items_size = (1 + 4) * ip.items.len; + var items_len: usize = 0; + for (ip.locals) |*local| { + items_len += local.mutate.items.len; + } + const items_size = (1 + 4) * items_len; const extra_size = 4 * ip.extra.items.len; const limbs_size = 8 * ip.limbs.items.len; const decls_size = ip.allocated_decls.len * @sizeOf(Module.Decl); @@ -8790,7 +8907,7 @@ fn dumpStatsFallible(ip: *const InternPool, arena: Allocator) anyerror!void { \\ , .{ total_size, - ip.items.len, + items_len, items_size, ip.extra.items.len, extra_size, @@ -8800,217 +8917,221 @@ fn dumpStatsFallible(ip: *const InternPool, arena: Allocator) anyerror!void { decls_size, }); - const tags = ip.items.items(.tag); - const datas = ip.items.items(.data); const TagStats = struct { count: usize = 0, bytes: usize = 0, }; var counts = std.AutoArrayHashMap(Tag, TagStats).init(arena); - for (tags, datas) |tag, data| { - const gop = try counts.getOrPut(tag); - if (!gop.found_existing) gop.value_ptr.* = .{}; - gop.value_ptr.count += 1; - gop.value_ptr.bytes += 1 + 4 + @as(usize, switch (tag) { - // Note that in this case, we have technically leaked some extra data - // bytes which we do not account for here. - .removed => 0, - - .type_int_signed => 0, - .type_int_unsigned => 0, - .type_array_small => @sizeOf(Vector), - .type_array_big => @sizeOf(Array), - .type_vector => @sizeOf(Vector), - .type_pointer => @sizeOf(Tag.TypePointer), - .type_slice => 0, - .type_optional => 0, - .type_anyframe => 0, - .type_error_union => @sizeOf(Key.ErrorUnionType), - .type_anyerror_union => 0, - .type_error_set => b: { - const info = ip.extraData(Tag.ErrorSet, data); - break :b @sizeOf(Tag.ErrorSet) + (@sizeOf(u32) * info.names_len); - }, - .type_inferred_error_set => 0, - .type_enum_explicit, .type_enum_nonexhaustive => b: { - const info = ip.extraData(EnumExplicit, data); - var ints = @typeInfo(EnumExplicit).Struct.fields.len; - if (info.zir_index == .none) ints += 1; - ints += if (info.captures_len != std.math.maxInt(u32)) - info.captures_len - else - @typeInfo(PackedU64).Struct.fields.len; - ints += info.fields_len; - if (info.values_map != .none) ints += info.fields_len; - break :b @sizeOf(u32) * ints; - }, - .type_enum_auto => b: { - const info = ip.extraData(EnumAuto, data); - const ints = @typeInfo(EnumAuto).Struct.fields.len + info.captures_len + info.fields_len; - break :b @sizeOf(u32) * ints; - }, - .type_opaque => b: { - const info = ip.extraData(Tag.TypeOpaque, data); - const ints = @typeInfo(Tag.TypeOpaque).Struct.fields.len + info.captures_len; - break :b @sizeOf(u32) * ints; - }, - .type_struct => b: { - if (data == 0) break :b 0; - const extra = ip.extraDataTrail(Tag.TypeStruct, data); - const info = extra.data; - var ints: usize = @typeInfo(Tag.TypeStruct).Struct.fields.len; - if (info.flags.any_captures) { - const captures_len = ip.extra.items[extra.end]; - ints += 1 + captures_len; - } - ints += info.fields_len; // types - if (!info.flags.is_tuple) { - ints += 1; // names_map - ints += info.fields_len; // names - } - if (info.flags.any_default_inits) - ints += info.fields_len; // inits - ints += @intFromBool(info.flags.has_namespace); // namespace - if (info.flags.any_aligned_fields) - ints += (info.fields_len + 3) / 4; // aligns - if (info.flags.any_comptime_fields) - ints += (info.fields_len + 31) / 32; // comptime bits - if (!info.flags.is_extern) - ints += info.fields_len; // runtime order - ints += info.fields_len; // offsets - break :b @sizeOf(u32) * ints; - }, - .type_struct_anon => b: { - const info = ip.extraData(TypeStructAnon, data); - break :b @sizeOf(TypeStructAnon) + (@sizeOf(u32) * 3 * info.fields_len); - }, - .type_struct_packed => b: { - const extra = ip.extraDataTrail(Tag.TypeStructPacked, data); - const captures_len = if (extra.data.flags.any_captures) - ip.extra.items[extra.end] - else - 0; - break :b @sizeOf(u32) * (@typeInfo(Tag.TypeStructPacked).Struct.fields.len + - @intFromBool(extra.data.flags.any_captures) + captures_len + - extra.data.fields_len * 2); - }, - .type_struct_packed_inits => b: { - const extra = ip.extraDataTrail(Tag.TypeStructPacked, data); - const captures_len = if (extra.data.flags.any_captures) - ip.extra.items[extra.end] - else - 0; - break :b @sizeOf(u32) * (@typeInfo(Tag.TypeStructPacked).Struct.fields.len + - @intFromBool(extra.data.flags.any_captures) + captures_len + - extra.data.fields_len * 3); - }, - .type_tuple_anon => b: { - const info = ip.extraData(TypeStructAnon, data); - break :b @sizeOf(TypeStructAnon) + (@sizeOf(u32) * 2 * info.fields_len); - }, - - .type_union => b: { - const extra = ip.extraDataTrail(Tag.TypeUnion, data); - const captures_len = if (extra.data.flags.any_captures) - ip.extra.items[extra.end] - else - 0; - const per_field = @sizeOf(u32); // field type - // 1 byte per field for alignment, rounded up to the nearest 4 bytes - const alignments = if (extra.data.flags.any_aligned_fields) - ((extra.data.fields_len + 3) / 4) * 4 - else - 0; - break :b @sizeOf(Tag.TypeUnion) + - 4 * (@intFromBool(extra.data.flags.any_captures) + captures_len) + - (extra.data.fields_len * per_field) + alignments; - }, + for (ip.locals) |*local| { + const items = local.shared.items.view(); + for ( + items.items(.tag)[0..local.mutate.items.len], + items.items(.data)[0..local.mutate.items.len], + ) |tag, data| { + const gop = try counts.getOrPut(tag); + if (!gop.found_existing) gop.value_ptr.* = .{}; + gop.value_ptr.count += 1; + gop.value_ptr.bytes += 1 + 4 + @as(usize, switch (tag) { + // Note that in this case, we have technically leaked some extra data + // bytes which we do not account for here. + .removed => 0, + + .type_int_signed => 0, + .type_int_unsigned => 0, + .type_array_small => @sizeOf(Vector), + .type_array_big => @sizeOf(Array), + .type_vector => @sizeOf(Vector), + .type_pointer => @sizeOf(Tag.TypePointer), + .type_slice => 0, + .type_optional => 0, + .type_anyframe => 0, + .type_error_union => @sizeOf(Key.ErrorUnionType), + .type_anyerror_union => 0, + .type_error_set => b: { + const info = ip.extraData(Tag.ErrorSet, data); + break :b @sizeOf(Tag.ErrorSet) + (@sizeOf(u32) * info.names_len); + }, + .type_inferred_error_set => 0, + .type_enum_explicit, .type_enum_nonexhaustive => b: { + const info = ip.extraData(EnumExplicit, data); + var ints = @typeInfo(EnumExplicit).Struct.fields.len; + if (info.zir_index == .none) ints += 1; + ints += if (info.captures_len != std.math.maxInt(u32)) + info.captures_len + else + @typeInfo(PackedU64).Struct.fields.len; + ints += info.fields_len; + if (info.values_map != .none) ints += info.fields_len; + break :b @sizeOf(u32) * ints; + }, + .type_enum_auto => b: { + const info = ip.extraData(EnumAuto, data); + const ints = @typeInfo(EnumAuto).Struct.fields.len + info.captures_len + info.fields_len; + break :b @sizeOf(u32) * ints; + }, + .type_opaque => b: { + const info = ip.extraData(Tag.TypeOpaque, data); + const ints = @typeInfo(Tag.TypeOpaque).Struct.fields.len + info.captures_len; + break :b @sizeOf(u32) * ints; + }, + .type_struct => b: { + if (data == 0) break :b 0; + const extra = ip.extraDataTrail(Tag.TypeStruct, data); + const info = extra.data; + var ints: usize = @typeInfo(Tag.TypeStruct).Struct.fields.len; + if (info.flags.any_captures) { + const captures_len = ip.extra.items[extra.end]; + ints += 1 + captures_len; + } + ints += info.fields_len; // types + if (!info.flags.is_tuple) { + ints += 1; // names_map + ints += info.fields_len; // names + } + if (info.flags.any_default_inits) + ints += info.fields_len; // inits + ints += @intFromBool(info.flags.has_namespace); // namespace + if (info.flags.any_aligned_fields) + ints += (info.fields_len + 3) / 4; // aligns + if (info.flags.any_comptime_fields) + ints += (info.fields_len + 31) / 32; // comptime bits + if (!info.flags.is_extern) + ints += info.fields_len; // runtime order + ints += info.fields_len; // offsets + break :b @sizeOf(u32) * ints; + }, + .type_struct_anon => b: { + const info = ip.extraData(TypeStructAnon, data); + break :b @sizeOf(TypeStructAnon) + (@sizeOf(u32) * 3 * info.fields_len); + }, + .type_struct_packed => b: { + const extra = ip.extraDataTrail(Tag.TypeStructPacked, data); + const captures_len = if (extra.data.flags.any_captures) + ip.extra.items[extra.end] + else + 0; + break :b @sizeOf(u32) * (@typeInfo(Tag.TypeStructPacked).Struct.fields.len + + @intFromBool(extra.data.flags.any_captures) + captures_len + + extra.data.fields_len * 2); + }, + .type_struct_packed_inits => b: { + const extra = ip.extraDataTrail(Tag.TypeStructPacked, data); + const captures_len = if (extra.data.flags.any_captures) + ip.extra.items[extra.end] + else + 0; + break :b @sizeOf(u32) * (@typeInfo(Tag.TypeStructPacked).Struct.fields.len + + @intFromBool(extra.data.flags.any_captures) + captures_len + + extra.data.fields_len * 3); + }, + .type_tuple_anon => b: { + const info = ip.extraData(TypeStructAnon, data); + break :b @sizeOf(TypeStructAnon) + (@sizeOf(u32) * 2 * info.fields_len); + }, - .type_function => b: { - const info = ip.extraData(Tag.TypeFunction, data); - break :b @sizeOf(Tag.TypeFunction) + - (@sizeOf(Index) * info.params_len) + - (@as(u32, 4) * @intFromBool(info.flags.has_comptime_bits)) + - (@as(u32, 4) * @intFromBool(info.flags.has_noalias_bits)); - }, + .type_union => b: { + const extra = ip.extraDataTrail(Tag.TypeUnion, data); + const captures_len = if (extra.data.flags.any_captures) + ip.extra.items[extra.end] + else + 0; + const per_field = @sizeOf(u32); // field type + // 1 byte per field for alignment, rounded up to the nearest 4 bytes + const alignments = if (extra.data.flags.any_aligned_fields) + ((extra.data.fields_len + 3) / 4) * 4 + else + 0; + break :b @sizeOf(Tag.TypeUnion) + + 4 * (@intFromBool(extra.data.flags.any_captures) + captures_len) + + (extra.data.fields_len * per_field) + alignments; + }, - .undef => 0, - .simple_type => 0, - .simple_value => 0, - .ptr_decl => @sizeOf(PtrDecl), - .ptr_comptime_alloc => @sizeOf(PtrComptimeAlloc), - .ptr_anon_decl => @sizeOf(PtrAnonDecl), - .ptr_anon_decl_aligned => @sizeOf(PtrAnonDeclAligned), - .ptr_comptime_field => @sizeOf(PtrComptimeField), - .ptr_int => @sizeOf(PtrInt), - .ptr_eu_payload => @sizeOf(PtrBase), - .ptr_opt_payload => @sizeOf(PtrBase), - .ptr_elem => @sizeOf(PtrBaseIndex), - .ptr_field => @sizeOf(PtrBaseIndex), - .ptr_slice => @sizeOf(PtrSlice), - .opt_null => 0, - .opt_payload => @sizeOf(Tag.TypeValue), - .int_u8 => 0, - .int_u16 => 0, - .int_u32 => 0, - .int_i32 => 0, - .int_usize => 0, - .int_comptime_int_u32 => 0, - .int_comptime_int_i32 => 0, - .int_small => @sizeOf(IntSmall), + .type_function => b: { + const info = ip.extraData(Tag.TypeFunction, data); + break :b @sizeOf(Tag.TypeFunction) + + (@sizeOf(Index) * info.params_len) + + (@as(u32, 4) * @intFromBool(info.flags.has_comptime_bits)) + + (@as(u32, 4) * @intFromBool(info.flags.has_noalias_bits)); + }, - .int_positive, - .int_negative, - => b: { - const int = ip.limbData(Int, data); - break :b @sizeOf(Int) + int.limbs_len * 8; - }, + .undef => 0, + .simple_type => 0, + .simple_value => 0, + .ptr_decl => @sizeOf(PtrDecl), + .ptr_comptime_alloc => @sizeOf(PtrComptimeAlloc), + .ptr_anon_decl => @sizeOf(PtrAnonDecl), + .ptr_anon_decl_aligned => @sizeOf(PtrAnonDeclAligned), + .ptr_comptime_field => @sizeOf(PtrComptimeField), + .ptr_int => @sizeOf(PtrInt), + .ptr_eu_payload => @sizeOf(PtrBase), + .ptr_opt_payload => @sizeOf(PtrBase), + .ptr_elem => @sizeOf(PtrBaseIndex), + .ptr_field => @sizeOf(PtrBaseIndex), + .ptr_slice => @sizeOf(PtrSlice), + .opt_null => 0, + .opt_payload => @sizeOf(Tag.TypeValue), + .int_u8 => 0, + .int_u16 => 0, + .int_u32 => 0, + .int_i32 => 0, + .int_usize => 0, + .int_comptime_int_u32 => 0, + .int_comptime_int_i32 => 0, + .int_small => @sizeOf(IntSmall), + + .int_positive, + .int_negative, + => b: { + const int = ip.limbData(Int, data); + break :b @sizeOf(Int) + int.limbs_len * 8; + }, - .int_lazy_align, .int_lazy_size => @sizeOf(IntLazy), + .int_lazy_align, .int_lazy_size => @sizeOf(IntLazy), - .error_set_error, .error_union_error => @sizeOf(Key.Error), - .error_union_payload => @sizeOf(Tag.TypeValue), - .enum_literal => 0, - .enum_tag => @sizeOf(Tag.EnumTag), + .error_set_error, .error_union_error => @sizeOf(Key.Error), + .error_union_payload => @sizeOf(Tag.TypeValue), + .enum_literal => 0, + .enum_tag => @sizeOf(Tag.EnumTag), - .bytes => b: { - const info = ip.extraData(Bytes, data); - const len: usize = @intCast(ip.aggregateTypeLenIncludingSentinel(info.ty)); - break :b @sizeOf(Bytes) + len + @intFromBool(info.bytes.at(len - 1, ip) != 0); - }, - .aggregate => b: { - const info = ip.extraData(Tag.Aggregate, data); - const fields_len: u32 = @intCast(ip.aggregateTypeLenIncludingSentinel(info.ty)); - break :b @sizeOf(Tag.Aggregate) + (@sizeOf(Index) * fields_len); - }, - .repeated => @sizeOf(Repeated), - - .float_f16 => 0, - .float_f32 => 0, - .float_f64 => @sizeOf(Float64), - .float_f80 => @sizeOf(Float80), - .float_f128 => @sizeOf(Float128), - .float_c_longdouble_f80 => @sizeOf(Float80), - .float_c_longdouble_f128 => @sizeOf(Float128), - .float_comptime_float => @sizeOf(Float128), - .variable => @sizeOf(Tag.Variable), - .extern_func => @sizeOf(Tag.ExternFunc), - .func_decl => @sizeOf(Tag.FuncDecl), - .func_instance => b: { - const info = ip.extraData(Tag.FuncInstance, data); - const ty = ip.typeOf(info.generic_owner); - const params_len = ip.indexToKey(ty).func_type.param_types.len; - break :b @sizeOf(Tag.FuncInstance) + @sizeOf(Index) * params_len; - }, - .func_coerced => @sizeOf(Tag.FuncCoerced), - .only_possible_value => 0, - .union_value => @sizeOf(Key.Union), + .bytes => b: { + const info = ip.extraData(Bytes, data); + const len: usize = @intCast(ip.aggregateTypeLenIncludingSentinel(info.ty)); + break :b @sizeOf(Bytes) + len + @intFromBool(info.bytes.at(len - 1, ip) != 0); + }, + .aggregate => b: { + const info = ip.extraData(Tag.Aggregate, data); + const fields_len: u32 = @intCast(ip.aggregateTypeLenIncludingSentinel(info.ty)); + break :b @sizeOf(Tag.Aggregate) + (@sizeOf(Index) * fields_len); + }, + .repeated => @sizeOf(Repeated), + + .float_f16 => 0, + .float_f32 => 0, + .float_f64 => @sizeOf(Float64), + .float_f80 => @sizeOf(Float80), + .float_f128 => @sizeOf(Float128), + .float_c_longdouble_f80 => @sizeOf(Float80), + .float_c_longdouble_f128 => @sizeOf(Float128), + .float_comptime_float => @sizeOf(Float128), + .variable => @sizeOf(Tag.Variable), + .extern_func => @sizeOf(Tag.ExternFunc), + .func_decl => @sizeOf(Tag.FuncDecl), + .func_instance => b: { + const info = ip.extraData(Tag.FuncInstance, data); + const ty = ip.typeOf(info.generic_owner); + const params_len = ip.indexToKey(ty).func_type.param_types.len; + break :b @sizeOf(Tag.FuncInstance) + @sizeOf(Index) * params_len; + }, + .func_coerced => @sizeOf(Tag.FuncCoerced), + .only_possible_value => 0, + .union_value => @sizeOf(Key.Union), - .memoized_call => b: { - const info = ip.extraData(MemoizedCall, data); - break :b @sizeOf(MemoizedCall) + (@sizeOf(Index) * info.args_len); - }, - }); + .memoized_call => b: { + const info = ip.extraData(MemoizedCall, data); + break :b @sizeOf(MemoizedCall) + (@sizeOf(Index) * info.args_len); + }, + }); + } } const SortContext = struct { map: *std.AutoArrayHashMap(Tag, TagStats), @@ -9031,97 +9152,103 @@ fn dumpStatsFallible(ip: *const InternPool, arena: Allocator) anyerror!void { } fn dumpAllFallible(ip: *const InternPool) anyerror!void { - const tags = ip.items.items(.tag); - const datas = ip.items.items(.data); var bw = std.io.bufferedWriter(std.io.getStdErr().writer()); const w = bw.writer(); - for (tags, datas, 0..) |tag, data, i| { - try w.print("${d} = {s}(", .{ i, @tagName(tag) }); - switch (tag) { - .removed => {}, - - .simple_type => try w.print("{s}", .{@tagName(@as(SimpleType, @enumFromInt(data)))}), - .simple_value => try w.print("{s}", .{@tagName(@as(SimpleValue, @enumFromInt(data)))}), - - .type_int_signed, - .type_int_unsigned, - .type_array_small, - .type_array_big, - .type_vector, - .type_pointer, - .type_optional, - .type_anyframe, - .type_error_union, - .type_anyerror_union, - .type_error_set, - .type_inferred_error_set, - .type_enum_explicit, - .type_enum_nonexhaustive, - .type_enum_auto, - .type_opaque, - .type_struct, - .type_struct_anon, - .type_struct_packed, - .type_struct_packed_inits, - .type_tuple_anon, - .type_union, - .type_function, - .undef, - .ptr_decl, - .ptr_comptime_alloc, - .ptr_anon_decl, - .ptr_anon_decl_aligned, - .ptr_comptime_field, - .ptr_int, - .ptr_eu_payload, - .ptr_opt_payload, - .ptr_elem, - .ptr_field, - .ptr_slice, - .opt_payload, - .int_u8, - .int_u16, - .int_u32, - .int_i32, - .int_usize, - .int_comptime_int_u32, - .int_comptime_int_i32, - .int_small, - .int_positive, - .int_negative, - .int_lazy_align, - .int_lazy_size, - .error_set_error, - .error_union_error, - .error_union_payload, - .enum_literal, - .enum_tag, - .bytes, - .aggregate, - .repeated, - .float_f16, - .float_f32, - .float_f64, - .float_f80, - .float_f128, - .float_c_longdouble_f80, - .float_c_longdouble_f128, - .float_comptime_float, - .variable, - .extern_func, - .func_decl, - .func_instance, - .func_coerced, - .union_value, - .memoized_call, - => try w.print("{d}", .{data}), - - .opt_null, - .type_slice, - .only_possible_value, - => try w.print("${d}", .{data}), + for (ip.locals, 0..) |*local, tid| { + const items = local.shared.items.view(); + for ( + items.items(.tag)[0..local.mutate.items.len], + items.items(.data)[0..local.mutate.items.len], + 0.., + ) |tag, data, index| { + const i = Index.Unwrapped.wrap(.{ .tid = @enumFromInt(tid), .index = @intCast(index) }, ip); + try w.print("${d} = {s}(", .{ i, @tagName(tag) }); + switch (tag) { + .removed => {}, + + .simple_type => try w.print("{s}", .{@tagName(@as(SimpleType, @enumFromInt(data)))}), + .simple_value => try w.print("{s}", .{@tagName(@as(SimpleValue, @enumFromInt(data)))}), + + .type_int_signed, + .type_int_unsigned, + .type_array_small, + .type_array_big, + .type_vector, + .type_pointer, + .type_optional, + .type_anyframe, + .type_error_union, + .type_anyerror_union, + .type_error_set, + .type_inferred_error_set, + .type_enum_explicit, + .type_enum_nonexhaustive, + .type_enum_auto, + .type_opaque, + .type_struct, + .type_struct_anon, + .type_struct_packed, + .type_struct_packed_inits, + .type_tuple_anon, + .type_union, + .type_function, + .undef, + .ptr_decl, + .ptr_comptime_alloc, + .ptr_anon_decl, + .ptr_anon_decl_aligned, + .ptr_comptime_field, + .ptr_int, + .ptr_eu_payload, + .ptr_opt_payload, + .ptr_elem, + .ptr_field, + .ptr_slice, + .opt_payload, + .int_u8, + .int_u16, + .int_u32, + .int_i32, + .int_usize, + .int_comptime_int_u32, + .int_comptime_int_i32, + .int_small, + .int_positive, + .int_negative, + .int_lazy_align, + .int_lazy_size, + .error_set_error, + .error_union_error, + .error_union_payload, + .enum_literal, + .enum_tag, + .bytes, + .aggregate, + .repeated, + .float_f16, + .float_f32, + .float_f64, + .float_f80, + .float_f128, + .float_c_longdouble_f80, + .float_c_longdouble_f128, + .float_comptime_float, + .variable, + .extern_func, + .func_decl, + .func_instance, + .func_coerced, + .union_value, + .memoized_call, + => try w.print("{d}", .{data}), + + .opt_null, + .type_slice, + .only_possible_value, + => try w.print("${d}", .{data}), + } + try w.writeAll(")\n"); } - try w.writeAll(")\n"); } try bw.flush(); } @@ -9139,15 +9266,24 @@ pub fn dumpGenericInstancesFallible(ip: *const InternPool, allocator: Allocator) const w = bw.writer(); var instances: std.AutoArrayHashMapUnmanaged(Index, std.ArrayListUnmanaged(Index)) = .{}; - const datas = ip.items.items(.data); - for (ip.items.items(.tag), 0..) |tag, i| { - if (tag != .func_instance) continue; - const info = ip.extraData(Tag.FuncInstance, datas[i]); - - const gop = try instances.getOrPut(arena, info.generic_owner); - if (!gop.found_existing) gop.value_ptr.* = .{}; - - try gop.value_ptr.append(arena, @enumFromInt(i)); + for (ip.locals, 0..) |*local, tid| { + const items = local.shared.items.view(); + for ( + items.items(.tag)[0..local.mutate.items.len], + items.items(.data)[0..local.mutate.items.len], + 0.., + ) |tag, data, index| { + if (tag != .func_instance) continue; + const info = ip.extraData(Tag.FuncInstance, data); + + const gop = try instances.getOrPut(arena, info.generic_owner); + if (!gop.found_existing) gop.value_ptr.* = .{}; + + try gop.value_ptr.append( + arena, + Index.Unwrapped.wrap(.{ .tid = @enumFromInt(tid), .index = @intCast(index) }, ip), + ); + } } const SortContext = struct { @@ -9163,7 +9299,7 @@ pub fn dumpGenericInstancesFallible(ip: *const InternPool, allocator: Allocator) const generic_fn_owner_decl = ip.declPtrConst(ip.funcDeclOwner(entry.key_ptr.*)); try w.print("{} ({}): \n", .{ generic_fn_owner_decl.name.fmt(ip), entry.value_ptr.items.len }); for (entry.value_ptr.items) |index| { - const func = ip.extraFuncInstance(datas[@intFromEnum(index)]); + const func = ip.extraFuncInstance(index.getData(ip)); const owner_decl = ip.declPtrConst(func.owner_decl); try w.print(" {}: (", .{owner_decl.name.fmt(ip)}); for (func.comptime_args.get(ip)) |arg| { @@ -9518,7 +9654,7 @@ pub fn typeOf(ip: *const InternPool, index: Index) Index { // This optimization on tags is needed so that indexToKey can call // typeOf without being recursive. - _ => switch (ip.items.items(.tag)[@intFromEnum(index)]) { + _ => switch (index.getTag(ip)) { .removed => unreachable, .type_int_signed, @@ -9551,7 +9687,7 @@ pub fn typeOf(ip: *const InternPool, index: Index) Index { .undef, .opt_null, .only_possible_value, - => @enumFromInt(ip.items.items(.data)[@intFromEnum(index)]), + => @enumFromInt(index.getData(ip)), .simple_value => unreachable, // handled via Index above @@ -9584,7 +9720,7 @@ pub fn typeOf(ip: *const InternPool, index: Index) Index { .aggregate, .repeated, => |t| { - const extra_index = ip.items.items(.data)[@intFromEnum(index)]; + const extra_index = index.getData(ip); const field_index = std.meta.fieldIndex(t.Payload(), "ty").?; return @enumFromInt(ip.extra.items[extra_index + field_index]); }, @@ -9602,7 +9738,7 @@ pub fn typeOf(ip: *const InternPool, index: Index) Index { // Note these are stored in limbs data, not extra data. .int_positive, .int_negative, - => ip.limbData(Int, ip.items.items(.data)[@intFromEnum(index)]).ty, + => ip.limbData(Int, index.getData(ip)).ty, .enum_literal => .enum_literal_type, .float_f16 => .f16_type, @@ -9651,11 +9787,11 @@ pub fn aggregateTypeLenIncludingSentinel(ip: *const InternPool, ty: Index) u64 { } pub fn funcTypeReturnType(ip: *const InternPool, ty: Index) Index { - const item = ip.items.get(@intFromEnum(ty)); + const item = ty.getItem(ip); const child_item = switch (item.tag) { - .type_pointer => ip.items.get(ip.extra.items[ + .type_pointer => @as(Index, @enumFromInt(ip.extra.items[ item.data + std.meta.fieldIndex(Tag.TypePointer, "child").? - ]), + ])).getItem(ip), .type_function => item, else => unreachable, }; @@ -9668,47 +9804,47 @@ pub fn funcTypeReturnType(ip: *const InternPool, ty: Index) Index { pub fn isNoReturn(ip: *const InternPool, ty: Index) bool { return switch (ty) { .noreturn_type => true, - else => switch (ip.items.items(.tag)[@intFromEnum(ty)]) { - .type_error_set => ip.extra.items[ip.items.items(.data)[@intFromEnum(ty)] + std.meta.fieldIndex(Tag.ErrorSet, "names_len").?] == 0, + else => switch (ty.getTag(ip)) { + .type_error_set => ip.extra.items[ty.getData(ip) + std.meta.fieldIndex(Tag.ErrorSet, "names_len").?] == 0, else => false, }, }; } pub fn isUndef(ip: *const InternPool, val: Index) bool { - return val == .undef or ip.items.items(.tag)[@intFromEnum(val)] == .undef; + return val == .undef or val.getTag(ip) == .undef; } pub fn isVariable(ip: *const InternPool, val: Index) bool { - return ip.items.items(.tag)[@intFromEnum(val)] == .variable; + return val.getTag(ip) == .variable; } pub fn getBackingDecl(ip: *const InternPool, val: Index) OptionalDeclIndex { - var base = @intFromEnum(val); + var base = val; while (true) { - switch (ip.items.items(.tag)[base]) { + switch (base.getTag(ip)) { .ptr_decl => return @enumFromInt(ip.extra.items[ - ip.items.items(.data)[base] + std.meta.fieldIndex(PtrDecl, "decl").? + base.getData(ip) + std.meta.fieldIndex(PtrDecl, "decl").? ]), inline .ptr_eu_payload, .ptr_opt_payload, .ptr_elem, .ptr_field, - => |tag| base = ip.extra.items[ - ip.items.items(.data)[base] + std.meta.fieldIndex(tag.Payload(), "base").? - ], - .ptr_slice => base = ip.extra.items[ - ip.items.items(.data)[base] + std.meta.fieldIndex(PtrSlice, "ptr").? - ], + => |tag| base = @enumFromInt(ip.extra.items[ + base.getData(ip) + std.meta.fieldIndex(tag.Payload(), "base").? + ]), + .ptr_slice => base = @enumFromInt(ip.extra.items[ + base.getData(ip) + std.meta.fieldIndex(PtrSlice, "ptr").? + ]), else => return .none, } } } pub fn getBackingAddrTag(ip: *const InternPool, val: Index) ?Key.Ptr.BaseAddr.Tag { - var base = @intFromEnum(val); + var base = val; while (true) { - switch (ip.items.items(.tag)[base]) { + switch (base.getTag(ip)) { .ptr_decl => return .decl, .ptr_comptime_alloc => return .comptime_alloc, .ptr_anon_decl, @@ -9720,12 +9856,12 @@ pub fn getBackingAddrTag(ip: *const InternPool, val: Index) ?Key.Ptr.BaseAddr.Ta .ptr_opt_payload, .ptr_elem, .ptr_field, - => |tag| base = ip.extra.items[ - ip.items.items(.data)[base] + std.meta.fieldIndex(tag.Payload(), "base").? - ], - inline .ptr_slice => |tag| base = ip.extra.items[ - ip.items.items(.data)[base] + std.meta.fieldIndex(tag.Payload(), "ptr").? - ], + => |tag| base = @enumFromInt(ip.extra.items[ + base.getData(ip) + std.meta.fieldIndex(tag.Payload(), "base").? + ]), + inline .ptr_slice => |tag| base = @enumFromInt(ip.extra.items[ + base.getData(ip) + std.meta.fieldIndex(tag.Payload(), "ptr").? + ]), else => return null, } } @@ -9834,7 +9970,7 @@ pub fn zigTypeTagOrPoison(ip: *const InternPool, index: Index) error{GenericPois .empty_struct => unreachable, .generic_poison => unreachable, - _ => switch (ip.items.items(.tag)[@intFromEnum(index)]) { + _ => switch (index.getTag(ip)) { .removed => unreachable, .type_int_signed, @@ -9941,24 +10077,22 @@ pub fn zigTypeTagOrPoison(ip: *const InternPool, index: Index) error{GenericPois }; } -pub fn isFuncBody(ip: *const InternPool, i: Index) bool { - assert(i != .none); - return switch (ip.items.items(.tag)[@intFromEnum(i)]) { +pub fn isFuncBody(ip: *const InternPool, index: Index) bool { + return switch (index.getTag(ip)) { .func_decl, .func_instance, .func_coerced => true, else => false, }; } -pub fn funcAnalysis(ip: *const InternPool, i: Index) *FuncAnalysis { - assert(i != .none); - const item = ip.items.get(@intFromEnum(i)); +pub fn funcAnalysis(ip: *const InternPool, index: Index) *FuncAnalysis { + const item = index.getItem(ip); const extra_index = switch (item.tag) { .func_decl => item.data + std.meta.fieldIndex(Tag.FuncDecl, "analysis").?, .func_instance => item.data + std.meta.fieldIndex(Tag.FuncInstance, "analysis").?, .func_coerced => i: { const extra_index = item.data + std.meta.fieldIndex(Tag.FuncCoerced, "func").?; const func_index: Index = @enumFromInt(ip.extra.items[extra_index]); - const sub_item = ip.items.get(@intFromEnum(func_index)); + const sub_item = func_index.getItem(ip); break :i switch (sub_item.tag) { .func_decl => sub_item.data + std.meta.fieldIndex(Tag.FuncDecl, "analysis").?, .func_instance => sub_item.data + std.meta.fieldIndex(Tag.FuncInstance, "analysis").?, @@ -9974,22 +10108,21 @@ pub fn funcHasInferredErrorSet(ip: *const InternPool, i: Index) bool { return funcAnalysis(ip, i).inferred_error_set; } -pub fn funcZirBodyInst(ip: *const InternPool, i: Index) TrackedInst.Index { - assert(i != .none); - const item = ip.items.get(@intFromEnum(i)); +pub fn funcZirBodyInst(ip: *const InternPool, index: Index) TrackedInst.Index { + const item = index.getItem(ip); const zir_body_inst_field_index = std.meta.fieldIndex(Tag.FuncDecl, "zir_body_inst").?; const extra_index = switch (item.tag) { .func_decl => item.data + zir_body_inst_field_index, - .func_instance => b: { + .func_instance => ei: { const generic_owner_field_index = std.meta.fieldIndex(Tag.FuncInstance, "generic_owner").?; - const func_decl_index = ip.extra.items[item.data + generic_owner_field_index]; - assert(ip.items.items(.tag)[func_decl_index] == .func_decl); - break :b ip.items.items(.data)[func_decl_index] + zir_body_inst_field_index; + const func_decl_index: Index = @enumFromInt(ip.extra.items[item.data + generic_owner_field_index]); + const func_decl_item = func_decl_index.getItem(ip); + assert(func_decl_item.tag == .func_decl); + break :ei func_decl_item.data + zir_body_inst_field_index; }, .func_coerced => { - const datas = ip.items.items(.data); const uncoerced_func_index: Index = @enumFromInt(ip.extra.items[ - datas[@intFromEnum(i)] + std.meta.fieldIndex(Tag.FuncCoerced, "func").? + item.data + std.meta.fieldIndex(Tag.FuncCoerced, "func").? ]); return ip.funcZirBodyInst(uncoerced_func_index); }, @@ -9999,15 +10132,14 @@ pub fn funcZirBodyInst(ip: *const InternPool, i: Index) TrackedInst.Index { } pub fn iesFuncIndex(ip: *const InternPool, ies_index: Index) Index { - assert(ies_index != .none); - const tags = ip.items.items(.tag); - assert(tags[@intFromEnum(ies_index)] == .type_inferred_error_set); - const func_index = ip.items.items(.data)[@intFromEnum(ies_index)]; - switch (tags[func_index]) { + const item = ies_index.getItem(ip); + assert(item.tag == .type_inferred_error_set); + const func_index: Index = @enumFromInt(item.data); + switch (func_index.getTag(ip)) { .func_decl, .func_instance => {}, else => unreachable, // assertion failed } - return @enumFromInt(func_index); + return func_index; } /// Returns a mutable pointer to the resolved error set type of an inferred @@ -10026,21 +10158,19 @@ pub fn iesResolved(ip: *const InternPool, ies_index: Index) *Index { /// error set function. The returned pointer is invalidated when anything is /// added to `ip`. pub fn funcIesResolved(ip: *const InternPool, func_index: Index) *Index { - const tags = ip.items.items(.tag); - const datas = ip.items.items(.data); assert(funcHasInferredErrorSet(ip, func_index)); - const func_start = datas[@intFromEnum(func_index)]; - const extra_index = switch (tags[@intFromEnum(func_index)]) { - .func_decl => func_start + @typeInfo(Tag.FuncDecl).Struct.fields.len, - .func_instance => func_start + @typeInfo(Tag.FuncInstance).Struct.fields.len, + const func_item = func_index.getItem(ip); + const extra_index = switch (func_item.tag) { + .func_decl => func_item.data + @typeInfo(Tag.FuncDecl).Struct.fields.len, + .func_instance => func_item.data + @typeInfo(Tag.FuncInstance).Struct.fields.len, .func_coerced => i: { const uncoerced_func_index: Index = @enumFromInt(ip.extra.items[ - func_start + std.meta.fieldIndex(Tag.FuncCoerced, "func").? + func_item.data + std.meta.fieldIndex(Tag.FuncCoerced, "func").? ]); - const uncoerced_func_start = datas[@intFromEnum(uncoerced_func_index)]; - break :i switch (tags[@intFromEnum(uncoerced_func_index)]) { - .func_decl => uncoerced_func_start + @typeInfo(Tag.FuncDecl).Struct.fields.len, - .func_instance => uncoerced_func_start + @typeInfo(Tag.FuncInstance).Struct.fields.len, + const uncoerced_func_item = uncoerced_func_index.getItem(ip); + break :i switch (uncoerced_func_item.tag) { + .func_decl => uncoerced_func_item.data + @typeInfo(Tag.FuncDecl).Struct.fields.len, + .func_instance => uncoerced_func_item.data + @typeInfo(Tag.FuncInstance).Struct.fields.len, else => unreachable, }; }, @@ -10049,35 +10179,28 @@ pub fn funcIesResolved(ip: *const InternPool, func_index: Index) *Index { return @ptrCast(&ip.extra.items[extra_index]); } -pub fn funcDeclInfo(ip: *const InternPool, i: Index) Key.Func { - const tags = ip.items.items(.tag); - const datas = ip.items.items(.data); - assert(tags[@intFromEnum(i)] == .func_decl); - return extraFuncDecl(ip, datas[@intFromEnum(i)]); +pub fn funcDeclInfo(ip: *const InternPool, index: Index) Key.Func { + const item = index.getItem(ip); + assert(item.tag == .func_decl); + return extraFuncDecl(ip, item.data); } -pub fn funcDeclOwner(ip: *const InternPool, i: Index) DeclIndex { - return funcDeclInfo(ip, i).owner_decl; +pub fn funcDeclOwner(ip: *const InternPool, index: Index) DeclIndex { + return funcDeclInfo(ip, index).owner_decl; } -pub fn funcTypeParamsLen(ip: *const InternPool, i: Index) u32 { - const tags = ip.items.items(.tag); - const datas = ip.items.items(.data); - assert(tags[@intFromEnum(i)] == .type_function); - const start = datas[@intFromEnum(i)]; - return ip.extra.items[start + std.meta.fieldIndex(Tag.TypeFunction, "params_len").?]; +pub fn funcTypeParamsLen(ip: *const InternPool, index: Index) u32 { + const item = index.getItem(ip); + assert(item.tag == .type_function); + return ip.extra.items[item.data + std.meta.fieldIndex(Tag.TypeFunction, "params_len").?]; } -pub fn unwrapCoercedFunc(ip: *const InternPool, i: Index) Index { - const tags = ip.items.items(.tag); - return switch (tags[@intFromEnum(i)]) { - .func_coerced => { - const datas = ip.items.items(.data); - return @enumFromInt(ip.extra.items[ - datas[@intFromEnum(i)] + std.meta.fieldIndex(Tag.FuncCoerced, "func").? - ]); - }, - .func_instance, .func_decl => i, +pub fn unwrapCoercedFunc(ip: *const InternPool, index: Index) Index { + return switch (index.getTag(ip)) { + .func_coerced => @enumFromInt(ip.extra.items[ + index.getData(ip) + std.meta.fieldIndex(Tag.FuncCoerced, "func").? + ]), + .func_instance, .func_decl => index, else => unreachable, }; } @@ -10085,7 +10208,12 @@ pub fn unwrapCoercedFunc(ip: *const InternPool, i: Index) Index { /// Having resolved a builtin type to a real struct/union/enum (which is now at `resolverd_index`), /// make `want_index` refer to this type instead. This invalidates `resolved_index`, so must be /// called only when it is guaranteed that no reference to `resolved_index` exists. -pub fn resolveBuiltinType(ip: *InternPool, want_index: Index, resolved_index: Index) void { +pub fn resolveBuiltinType( + ip: *InternPool, + tid: Zcu.PerThread.Id, + want_index: Index, + resolved_index: Index, +) void { assert(@intFromEnum(want_index) >= @intFromEnum(Index.first_type)); assert(@intFromEnum(want_index) <= @intFromEnum(Index.last_type)); @@ -10097,20 +10225,12 @@ pub fn resolveBuiltinType(ip: *InternPool, want_index: Index, resolved_index: In (ip.zigTypeTagOrPoison(resolved_index) catch unreachable)); // Copy the data - const item = ip.items.get(@intFromEnum(resolved_index)); - ip.items.set(@intFromEnum(want_index), item); - - if (std.debug.runtime_safety) { - // Make the value unreachable - this is a weird value which will make (incorrect) existing - // references easier to spot - ip.items.set(@intFromEnum(resolved_index), .{ - .tag = .simple_value, - .data = @intFromEnum(SimpleValue.@"unreachable"), - }); - } else { - // Here we could add the index to a free-list for reuse, but since - // there is so little garbage created this way it's not worth it. - } + const resolved_item = resolved_index.getItem(ip); + const want_unwrapped = want_index.unwrap(ip); + if (tid != .main or want_unwrapped.tid != .main) @panic("This operation is impossible to be thread-safe"); + var want_items = ip.getLocalShared(want_unwrapped.tid).items.view(); + want_items.set(want_unwrapped.index, resolved_item); + ip.remove(tid, resolved_index); } pub fn anonStructFieldTypes(ip: *const InternPool, i: Index) []const Index { diff --git a/src/Sema.zig b/src/Sema.zig index ee4ac3b70319..34db45795500 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -2717,10 +2717,11 @@ fn getCaptures(sema: *Sema, block: *Block, type_src: LazySrcLoc, extra_index: us /// Given an `InternPool.WipNamespaceType` or `InternPool.WipEnumType`, apply /// `sema.builtin_type_target_index` to it if necessary. fn wrapWipTy(sema: *Sema, wip_ty: anytype) @TypeOf(wip_ty) { + const pt = sema.pt; if (sema.builtin_type_target_index == .none) return wip_ty; var new = wip_ty; new.index = sema.builtin_type_target_index; - sema.pt.zcu.intern_pool.resolveBuiltinType(new.index, wip_ty.index); + pt.zcu.intern_pool.resolveBuiltinType(pt.tid, new.index, wip_ty.index); return new; } @@ -2740,7 +2741,7 @@ fn maybeRemoveOutdatedType(sema: *Sema, ty: InternPool.Index) !bool { if (!was_outdated) return false; _ = zcu.outdated_ready.swapRemove(decl_as_depender); zcu.intern_pool.removeDependenciesForDepender(zcu.gpa, AnalUnit.wrap(.{ .decl = decl_index })); - zcu.intern_pool.remove(ty); + zcu.intern_pool.remove(pt.tid, ty); zcu.declPtr(decl_index).analysis = .dependency_failure; try zcu.markDependeeOutdated(.{ .decl_val = decl_index }); return true; @@ -2819,7 +2820,7 @@ fn zirStructDecl( }, .wip => |wip| wip, }); - errdefer wip_ty.cancel(ip); + errdefer wip_ty.cancel(ip, pt.tid); const new_decl_index = try sema.createAnonymousDeclTypeNamed( block, @@ -3056,7 +3057,7 @@ fn zirEnumDecl( // have finished constructing the type and are in the process of analyzing it. var done = false; - errdefer if (!done) wip_ty.cancel(ip); + errdefer if (!done) wip_ty.cancel(ip, pt.tid); const new_decl_index = try sema.createAnonymousDeclTypeNamed( block, @@ -3324,7 +3325,7 @@ fn zirUnionDecl( }, .wip => |wip| wip, }); - errdefer wip_ty.cancel(ip); + errdefer wip_ty.cancel(ip, pt.tid); const new_decl_index = try sema.createAnonymousDeclTypeNamed( block, @@ -3414,7 +3415,7 @@ fn zirOpaqueDecl( }, .wip => |wip| wip, }; - errdefer wip_ty.cancel(ip); + errdefer wip_ty.cancel(ip, pt.tid); const new_decl_index = try sema.createAnonymousDeclTypeNamed( block, @@ -21705,7 +21706,7 @@ fn zirReify( .existing => |ty| return Air.internedToRef(ty), .wip => |wip| wip, }; - errdefer wip_ty.cancel(ip); + errdefer wip_ty.cancel(ip, pt.tid); const new_decl_index = try sema.createAnonymousDeclTypeNamed( block, @@ -21901,7 +21902,7 @@ fn reifyEnum( .wip => |wip| wip, .existing => |ty| return Air.internedToRef(ty), }; - errdefer wip_ty.cancel(ip); + errdefer wip_ty.cancel(ip, pt.tid); if (tag_ty.zigTypeTag(mod) != .Int) { return sema.fail(block, src, "Type.Enum.tag_type must be an integer type", .{}); @@ -22052,7 +22053,7 @@ fn reifyUnion( .wip => |wip| wip, .existing => |ty| return Air.internedToRef(ty), }; - errdefer wip_ty.cancel(ip); + errdefer wip_ty.cancel(ip, pt.tid); const new_decl_index = try sema.createAnonymousDeclTypeNamed( block, @@ -22158,7 +22159,7 @@ fn reifyUnion( const enum_tag_ty = try sema.generateUnionTagTypeSimple(block, field_names.keys(), mod.declPtr(new_decl_index)); break :tag_ty .{ enum_tag_ty, false }; }; - errdefer if (!has_explicit_tag) ip.remove(enum_tag_ty); // remove generated tag type on error + errdefer if (!has_explicit_tag) ip.remove(pt.tid, enum_tag_ty); // remove generated tag type on error for (field_types) |field_ty_ip| { const field_ty = Type.fromInterned(field_ty_ip); @@ -22305,7 +22306,7 @@ fn reifyStruct( .wip => |wip| wip, .existing => |ty| return Air.internedToRef(ty), }; - errdefer wip_ty.cancel(ip); + errdefer wip_ty.cancel(ip, pt.tid); if (is_tuple) switch (layout) { .@"extern" => return sema.fail(block, src, "extern tuples are not supported", .{}), @@ -36924,7 +36925,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { .none, => unreachable, - _ => switch (ip.items.items(.tag)[@intFromEnum(ty.toIntern())]) { + _ => switch (ty.toIntern().getTag(ip)) { .removed => unreachable, .type_int_signed, // i0 handled above diff --git a/src/Type.zig b/src/Type.zig index 0bb8f3f1448f..ba53535d40f8 100644 --- a/src/Type.zig +++ b/src/Type.zig @@ -3686,7 +3686,7 @@ pub fn resolveFields(ty: Type, pt: Zcu.PerThread) SemaError!void { .empty_struct => unreachable, .generic_poison => unreachable, - else => switch (ip.items.items(.tag)[@intFromEnum(ty_ip)]) { + else => switch (ty_ip.getTag(ip)) { .type_struct, .type_struct_packed, .type_struct_packed_inits, diff --git a/src/Zcu.zig b/src/Zcu.zig index 04ba7cc3284b..b855e4fcf0f6 100644 --- a/src/Zcu.zig +++ b/src/Zcu.zig @@ -3071,42 +3071,6 @@ pub const SemaDeclResult = packed struct { invalidate_decl_ref: bool, }; -pub fn semaAnonOwnerDecl(zcu: *Zcu, decl_index: Decl.Index) !SemaDeclResult { - const decl = zcu.declPtr(decl_index); - - assert(decl.has_tv); - assert(decl.owns_tv); - - log.debug("semaAnonOwnerDecl '{d}'", .{@intFromEnum(decl_index)}); - - switch (decl.typeOf(zcu).zigTypeTag(zcu)) { - .Fn => @panic("TODO: update fn instance"), - .Type => {}, - else => unreachable, - } - - // We are the owner Decl of a type, and we were marked as outdated. That means the *structure* - // of this type changed; not just its namespace. Therefore, we need a new InternPool index. - // - // However, as soon as we make that, the context that created us will require re-analysis anyway - // (as it depends on this Decl's value), meaning the `struct_decl` (or equivalent) instruction - // will be analyzed again. Since Sema already needs to be able to reconstruct types like this, - // why should we bother implementing it here too when the Sema logic will be hit right after? - // - // So instead, let's just mark this Decl as failed - so that any remaining Decls which genuinely - // reference it (via `@This`) end up silently erroring too - and we'll let Sema make a new type - // with a new Decl. - // - // Yes, this does mean that any type owner Decl has a constant value for its entire lifetime. - zcu.intern_pool.removeDependenciesForDepender(zcu.gpa, AnalUnit.wrap(.{ .decl = decl_index })); - zcu.intern_pool.remove(decl.val.toIntern()); - decl.analysis = .dependency_failure; - return .{ - .invalidate_decl_val = true, - .invalidate_decl_ref = true, - }; -} - pub const ImportFileResult = struct { file: *File, file_index: File.Index, diff --git a/src/Zcu/PerThread.zig b/src/Zcu/PerThread.zig index 2d2be29909f0..1233275a261e 100644 --- a/src/Zcu/PerThread.zig +++ b/src/Zcu/PerThread.zig @@ -574,7 +574,7 @@ pub fn ensureFuncBodyAnalyzed(pt: Zcu.PerThread, maybe_coerced_func_index: Inter if (decl.val.ip_index != func_index) { try zcu.markDependeeOutdated(.{ .func_ies = func_index }); ip.removeDependenciesForDepender(gpa, InternPool.AnalUnit.wrap(.{ .func = func_index })); - ip.remove(func_index); + ip.remove(pt.tid, func_index); @panic("TODO: remove orphaned function from binary"); } @@ -823,7 +823,7 @@ fn getFileRootStruct( .existing => unreachable, // we wouldn't be analysing the file root if this type existed .wip => |wip| wip, }; - errdefer wip_ty.cancel(ip); + errdefer wip_ty.cancel(ip, pt.tid); if (zcu.comp.debug_incremental) { try ip.addDependency( @@ -885,7 +885,7 @@ fn semaFileUpdate(pt: Zcu.PerThread, file_index: Zcu.File.Index, type_outdated: ip.removeDependenciesForDepender(zcu.gpa, InternPool.AnalUnit.wrap(.{ .decl = file_root_decl, })); - ip.remove(decl.val.toIntern()); + ip.remove(pt.tid, decl.val.toIntern()); decl.val = undefined; _ = try pt.getFileRootStruct(file_root_decl, decl.src_namespace, file_index); return true; @@ -959,7 +959,7 @@ fn semaFile(pt: Zcu.PerThread, file_index: Zcu.File.Index) Zcu.SemaError!void { assert(file.zir_loaded); const struct_ty = try pt.getFileRootStruct(new_decl_index, new_namespace_index, file_index); - errdefer zcu.intern_pool.remove(struct_ty); + errdefer zcu.intern_pool.remove(pt.tid, struct_ty); switch (zcu.comp.cache_use) { .whole => |whole| if (whole.cache_manifest) |man| { @@ -1002,7 +1002,7 @@ fn semaDecl(pt: Zcu.PerThread, decl_index: Zcu.Decl.Index) !Zcu.SemaDeclResult { if (decl.zir_decl_index == .none and decl.owns_tv) { // We are re-analyzing an anonymous owner Decl (for a function or a namespace type). - return zcu.semaAnonOwnerDecl(decl_index); + return pt.semaAnonOwnerDecl(decl_index); } log.debug("semaDecl '{d}'", .{@intFromEnum(decl_index)}); @@ -1270,6 +1270,43 @@ fn semaDecl(pt: Zcu.PerThread, decl_index: Zcu.Decl.Index) !Zcu.SemaDeclResult { return result; } +pub fn semaAnonOwnerDecl(pt: Zcu.PerThread, decl_index: Zcu.Decl.Index) !Zcu.SemaDeclResult { + const zcu = pt.zcu; + const decl = zcu.declPtr(decl_index); + + assert(decl.has_tv); + assert(decl.owns_tv); + + log.debug("semaAnonOwnerDecl '{d}'", .{@intFromEnum(decl_index)}); + + switch (decl.typeOf(zcu).zigTypeTag(zcu)) { + .Fn => @panic("TODO: update fn instance"), + .Type => {}, + else => unreachable, + } + + // We are the owner Decl of a type, and we were marked as outdated. That means the *structure* + // of this type changed; not just its namespace. Therefore, we need a new InternPool index. + // + // However, as soon as we make that, the context that created us will require re-analysis anyway + // (as it depends on this Decl's value), meaning the `struct_decl` (or equivalent) instruction + // will be analyzed again. Since Sema already needs to be able to reconstruct types like this, + // why should we bother implementing it here too when the Sema logic will be hit right after? + // + // So instead, let's just mark this Decl as failed - so that any remaining Decls which genuinely + // reference it (via `@This`) end up silently erroring too - and we'll let Sema make a new type + // with a new Decl. + // + // Yes, this does mean that any type owner Decl has a constant value for its entire lifetime. + zcu.intern_pool.removeDependenciesForDepender(zcu.gpa, InternPool.AnalUnit.wrap(.{ .decl = decl_index })); + zcu.intern_pool.remove(pt.tid, decl.val.toIntern()); + decl.analysis = .dependency_failure; + return .{ + .invalidate_decl_val = true, + .invalidate_decl_ref = true, + }; +} + pub fn embedFile( pt: Zcu.PerThread, cur_file: *Zcu.File, From 383cffbfae2d6be5862cbadaf138618c5b37b345 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Fri, 5 Jul 2024 13:34:14 -0400 Subject: [PATCH 068/152] InternPool: temporarily disable multi-threaded behavior This reduces the cost of the new data structure until the multi-threaded behavior is actually used. --- lib/std/Thread/Pool.zig | 11 +++-- src/InternPool.zig | 95 +++++++++++++++++++++++++---------------- src/Zcu/PerThread.zig | 2 +- 3 files changed, 67 insertions(+), 41 deletions(-) diff --git a/lib/std/Thread/Pool.zig b/lib/std/Thread/Pool.zig index 9fb3c3483a67..5972c4111a55 100644 --- a/lib/std/Thread/Pool.zig +++ b/lib/std/Thread/Pool.zig @@ -8,8 +8,13 @@ cond: std.Thread.Condition = .{}, run_queue: RunQueue = .{}, is_running: bool = true, allocator: std.mem.Allocator, -threads: []std.Thread, -ids: std.AutoArrayHashMapUnmanaged(std.Thread.Id, void), +threads: if (builtin.single_threaded) [0]std.Thread else []std.Thread, +ids: if (builtin.single_threaded) struct { + inline fn deinit(_: @This(), _: std.mem.Allocator) void {} + fn getIndex(_: @This(), _: std.Thread.Id) usize { + return 0; + } +} else std.AutoArrayHashMapUnmanaged(std.Thread.Id, void), const RunQueue = std.SinglyLinkedList(Runnable); const Runnable = struct { @@ -29,7 +34,7 @@ pub fn init(pool: *Pool, options: Options) !void { pool.* = .{ .allocator = allocator, - .threads = &[_]std.Thread{}, + .threads = if (builtin.single_threaded) .{} else &.{}, .ids = .{}, }; diff --git a/src/InternPool.zig b/src/InternPool.zig index 117b2ceef8c7..aab56c19e69e 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -4,11 +4,10 @@ locals: []Local = &.{}, shards: []Shard = &.{}, -tid_width: std.math.Log2Int(u32) = 0, -tid_shift_31: std.math.Log2Int(u32) = 31, -tid_shift_32: std.math.Log2Int(u32) = 31, +tid_width: if (single_threaded) u0 else std.math.Log2Int(u32) = 0, +tid_shift_31: if (single_threaded) u0 else std.math.Log2Int(u32) = if (single_threaded) 0 else 31, +tid_shift_32: if (single_threaded) u0 else std.math.Log2Int(u32) = if (single_threaded) 0 else 31, -//items: std.MultiArrayList(Item) = .{}, extra: std.ArrayListUnmanaged(u32) = .{}, /// On 32-bit systems, this array is ignored and extra is used for everything. /// On 64-bit systems, this array is used for big integers and associated metadata. @@ -92,6 +91,14 @@ free_dep_entries: std.ArrayListUnmanaged(DepEntry.Index) = .{}, /// Value is the `Decl` of the struct that represents this `File`. files: std.AutoArrayHashMapUnmanaged(Cache.BinDigest, OptionalDeclIndex) = .{}, +/// Whether a multi-threaded intern pool is useful. +/// Currently `false` until the intern pool is actually accessed +/// from multiple threads to reduce the cost of this data structure. +const want_multi_threaded = false; + +/// Whether a single-threaded intern pool impl is in use. +pub const single_threaded = builtin.single_threaded or !want_multi_threaded; + pub const FileIndex = enum(u32) { _, }; @@ -497,19 +504,23 @@ const Local = struct { var new_list: ListSelf = .{ .bytes = @ptrCast(buf[bytes_offset..].ptr) }; new_list.header().* = .{ .capacity = capacity }; const len = mutable.lenPtr().*; - const old_slice = mutable.list.view().slice(); - const new_slice = new_list.view().slice(); - inline for (fields) |field| { - @memcpy(new_slice.items(field)[0..len], old_slice.items(field)[0..len]); + // this cold, quickly predictable, condition enables + // the `MultiArrayList` optimization in `view` + if (len > 0) { + const old_slice = mutable.list.view().slice(); + const new_slice = new_list.view().slice(); + inline for (fields) |field| @memcpy(new_slice.items(field)[0..len], old_slice.items(field)[0..len]); } mutable.list.release(new_list); } fn view(mutable: Mutable) View { + const capacity = mutable.capacityPtr().*; + assert(capacity > 0); // optimizes `MultiArrayList.Slice.items` return .{ .bytes = mutable.list.bytes, .len = mutable.lenPtr().*, - .capacity = mutable.capacityPtr().*, + .capacity = capacity, }; } @@ -550,6 +561,7 @@ const Local = struct { fn view(list: ListSelf) View { const capacity = list.header().capacity; + assert(capacity > 0); // optimizes `MultiArrayList.Slice.items` return .{ .bytes = list.bytes, .len = capacity, @@ -665,13 +677,8 @@ const Shard = struct { } }; -fn getShard(ip: *InternPool, tid: Zcu.PerThread.Id) *Shard { - return &ip.shards[@intFromEnum(tid)]; -} - fn getTidMask(ip: *const InternPool) u32 { - assert(std.math.isPowerOfTwo(ip.shards.len)); - return @intCast(ip.shards.len - 1); + return (@as(u32, 1) << ip.tid_width) - 1; } fn getIndexMask(ip: *const InternPool, comptime BackingInt: type) u32 { @@ -809,7 +816,7 @@ pub const String = enum(u32) { }; } - fn toOverlongSlice(string: String, ip: *const InternPool) []const u8 { + noinline fn toOverlongSlice(string: String, ip: *const InternPool) []const u8 { const unwrapped = string.unwrap(ip); return ip.getLocalShared(unwrapped.tid).strings.acquire().view().items(.@"0")[unwrapped.index..]; } @@ -3230,19 +3237,35 @@ pub const Index = enum(u32) { } }; - pub fn getItem(index: Index, ip: *const InternPool) Item { - const unwrapped = index.unwrap(ip); - return ip.getLocalShared(unwrapped.tid).items.acquire().view().get(unwrapped.index); + pub inline fn getItem(index: Index, ip: *const InternPool) Item { + const item_ptr = index.itemPtr(ip); + const tag = @atomicLoad(Tag, item_ptr.tag_ptr, .acquire); + return .{ .tag = tag, .data = item_ptr.data_ptr.* }; } - pub fn getTag(index: Index, ip: *const InternPool) Tag { - const unwrapped = index.unwrap(ip); - return ip.getLocalShared(unwrapped.tid).items.acquire().view().items(.tag)[unwrapped.index]; + pub inline fn getTag(index: Index, ip: *const InternPool) Tag { + const item_ptr = index.itemPtr(ip); + return @atomicLoad(Tag, item_ptr.tag_ptr, .acquire); } - pub fn getData(index: Index, ip: *const InternPool) u32 { - const unwrapped = index.unwrap(ip); - return ip.getLocalShared(unwrapped.tid).items.acquire().view().items(.data)[unwrapped.index]; + pub inline fn getData(index: Index, ip: *const InternPool) u32 { + return index.getItem(ip).data; + } + + const ItemPtr = struct { + tag_ptr: *Tag, + data_ptr: *u32, + }; + fn itemPtr(index: Index, ip: *const InternPool) ItemPtr { + const unwrapped: Unwrapped = if (single_threaded) .{ + .tid = .main, + .index = @intFromEnum(index), + } else index.unwrap(ip); + const slice = ip.getLocalShared(unwrapped.tid).items.acquire().view().slice(); + return .{ + .tag_ptr = &slice.items(.tag)[unwrapped.index], + .data_ptr = &slice.items(.data)[unwrapped.index], + }; } const Unwrapped = struct { @@ -4905,11 +4928,12 @@ pub const MemoizedCall = struct { result: Index, }; -pub fn init(ip: *InternPool, gpa: Allocator, total_threads: usize) !void { +pub fn init(ip: *InternPool, gpa: Allocator, available_threads: usize) !void { errdefer ip.deinit(gpa); assert(ip.locals.len == 0 and ip.shards.len == 0); - ip.locals = try gpa.alloc(Local, total_threads); + const used_threads = if (single_threaded) 1 else available_threads; + ip.locals = try gpa.alloc(Local, used_threads); @memset(ip.locals, .{ .shared = .{ .items = Local.List(Item).empty, @@ -4922,9 +4946,9 @@ pub fn init(ip: *InternPool, gpa: Allocator, total_threads: usize) !void { }, }); - ip.tid_width = @intCast(std.math.log2_int_ceil(usize, total_threads)); - ip.tid_shift_31 = 31 - ip.tid_width; - ip.tid_shift_32 = ip.tid_shift_31 +| 1; + ip.tid_width = @intCast(std.math.log2_int_ceil(usize, used_threads)); + ip.tid_shift_31 = if (single_threaded) 0 else 31 - ip.tid_width; + ip.tid_shift_32 = if (single_threaded) 0 else ip.tid_shift_31 +| 1; ip.shards = try gpa.alloc(Shard, @as(usize, 1) << ip.tid_width); @memset(ip.shards, .{ .shared = .{ @@ -7063,7 +7087,7 @@ pub fn getExternFunc( .tag = .extern_func, .data = extra_index, }); - errdefer ip.items.lenPtr().* -= 1; + errdefer items.lenPtr().* -= 1; return gop.put(); } @@ -10146,12 +10170,9 @@ pub fn iesFuncIndex(ip: *const InternPool, ies_index: Index) Index { /// error set function. The returned pointer is invalidated when anything is /// added to `ip`. pub fn iesResolved(ip: *const InternPool, ies_index: Index) *Index { - assert(ies_index != .none); - const tags = ip.items.items(.tag); - const datas = ip.items.items(.data); - assert(tags[@intFromEnum(ies_index)] == .type_inferred_error_set); - const func_index = datas[@intFromEnum(ies_index)]; - return funcIesResolved(ip, func_index); + const ies_item = ies_index.getItem(ip); + assert(ies_item.tag == .type_inferred_error_set); + return funcIesResolved(ip, ies_item.data); } /// Returns a mutable pointer to the resolved error set type of an inferred diff --git a/src/Zcu/PerThread.zig b/src/Zcu/PerThread.zig index 1233275a261e..0396d06b98da 100644 --- a/src/Zcu/PerThread.zig +++ b/src/Zcu/PerThread.zig @@ -3,7 +3,7 @@ zcu: *Zcu, /// Dense, per-thread unique index. tid: Id, -pub const Id = if (builtin.single_threaded) enum { main } else enum(usize) { main, _ }; +pub const Id = if (InternPool.single_threaded) enum { main } else enum(usize) { main, _ }; pub fn astGenFile( pt: Zcu.PerThread, From 49b25475ad0d224e13d989f9ff860b32fca6315a Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Sat, 6 Jul 2024 03:48:32 -0400 Subject: [PATCH 069/152] InternPool: remove usage of data with simple indices This allows them to be atomically replaced. --- src/InternPool.zig | 153 ++++++++++++++++++++++----------------------- 1 file changed, 74 insertions(+), 79 deletions(-) diff --git a/src/InternPool.zig b/src/InternPool.zig index aab56c19e69e..8002b8d2f3cd 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -816,7 +816,7 @@ pub const String = enum(u32) { }; } - noinline fn toOverlongSlice(string: String, ip: *const InternPool) []const u8 { + fn toOverlongSlice(string: String, ip: *const InternPool) []const u8 { const unwrapped = string.unwrap(ip); return ip.getLocalShared(unwrapped.tid).strings.acquire().view().items(.@"0")[unwrapped.index..]; } @@ -3237,18 +3237,18 @@ pub const Index = enum(u32) { } }; - pub inline fn getItem(index: Index, ip: *const InternPool) Item { + pub fn getItem(index: Index, ip: *const InternPool) Item { const item_ptr = index.itemPtr(ip); const tag = @atomicLoad(Tag, item_ptr.tag_ptr, .acquire); return .{ .tag = tag, .data = item_ptr.data_ptr.* }; } - pub inline fn getTag(index: Index, ip: *const InternPool) Tag { + pub fn getTag(index: Index, ip: *const InternPool) Tag { const item_ptr = index.itemPtr(ip); return @atomicLoad(Tag, item_ptr.tag_ptr, .acquire); } - pub inline fn getData(index: Index, ip: *const InternPool) u32 { + pub fn getData(index: Index, ip: *const InternPool) u32 { return index.getItem(ip).data; } @@ -3340,7 +3340,7 @@ pub const Index = enum(u32) { }, type_enum_explicit: DataIsExtraIndexOfEnumExplicit, type_enum_nonexhaustive: DataIsExtraIndexOfEnumExplicit, - simple_type: struct { data: SimpleType }, + simple_type: void, type_opaque: struct { data: *Tag.TypeOpaque }, type_struct: struct { data: *Tag.TypeStruct }, type_struct_anon: DataIsExtraIndexOfTypeStructAnon, @@ -3360,7 +3360,7 @@ pub const Index = enum(u32) { }, undef: DataIsIndex, - simple_value: struct { data: SimpleValue }, + simple_value: void, ptr_decl: struct { data: *PtrDecl }, ptr_comptime_alloc: struct { data: *PtrComptimeAlloc }, ptr_anon_decl: struct { data: *PtrAnonDecl }, @@ -4386,64 +4386,64 @@ pub const TypeStructAnon = struct { /// implement logic that only wants to deal with types because the logic can /// ignore all simple values. Note that technically, types are values. pub const SimpleType = enum(u32) { - f16, - f32, - f64, - f80, - f128, - usize, - isize, - c_char, - c_short, - c_ushort, - c_int, - c_uint, - c_long, - c_ulong, - c_longlong, - c_ulonglong, - c_longdouble, - anyopaque, - bool, - void, - type, - anyerror, - comptime_int, - comptime_float, - noreturn, - null, - undefined, - enum_literal, - - atomic_order, - atomic_rmw_op, - calling_convention, - address_space, - float_mode, - reduce_op, - call_modifier, - prefetch_options, - export_options, - extern_options, - type_info, - - adhoc_inferred_error_set, - generic_poison, + f16 = @intFromEnum(Index.f16_type), + f32 = @intFromEnum(Index.f32_type), + f64 = @intFromEnum(Index.f64_type), + f80 = @intFromEnum(Index.f80_type), + f128 = @intFromEnum(Index.f128_type), + usize = @intFromEnum(Index.usize_type), + isize = @intFromEnum(Index.isize_type), + c_char = @intFromEnum(Index.c_char_type), + c_short = @intFromEnum(Index.c_short_type), + c_ushort = @intFromEnum(Index.c_ushort_type), + c_int = @intFromEnum(Index.c_int_type), + c_uint = @intFromEnum(Index.c_uint_type), + c_long = @intFromEnum(Index.c_long_type), + c_ulong = @intFromEnum(Index.c_ulong_type), + c_longlong = @intFromEnum(Index.c_longlong_type), + c_ulonglong = @intFromEnum(Index.c_ulonglong_type), + c_longdouble = @intFromEnum(Index.c_longdouble_type), + anyopaque = @intFromEnum(Index.anyopaque_type), + bool = @intFromEnum(Index.bool_type), + void = @intFromEnum(Index.void_type), + type = @intFromEnum(Index.type_type), + anyerror = @intFromEnum(Index.anyerror_type), + comptime_int = @intFromEnum(Index.comptime_int_type), + comptime_float = @intFromEnum(Index.comptime_float_type), + noreturn = @intFromEnum(Index.noreturn_type), + null = @intFromEnum(Index.null_type), + undefined = @intFromEnum(Index.undefined_type), + enum_literal = @intFromEnum(Index.enum_literal_type), + + atomic_order = @intFromEnum(Index.atomic_order_type), + atomic_rmw_op = @intFromEnum(Index.atomic_rmw_op_type), + calling_convention = @intFromEnum(Index.calling_convention_type), + address_space = @intFromEnum(Index.address_space_type), + float_mode = @intFromEnum(Index.float_mode_type), + reduce_op = @intFromEnum(Index.reduce_op_type), + call_modifier = @intFromEnum(Index.call_modifier_type), + prefetch_options = @intFromEnum(Index.prefetch_options_type), + export_options = @intFromEnum(Index.export_options_type), + extern_options = @intFromEnum(Index.extern_options_type), + type_info = @intFromEnum(Index.type_info_type), + + adhoc_inferred_error_set = @intFromEnum(Index.adhoc_inferred_error_set_type), + generic_poison = @intFromEnum(Index.generic_poison_type), }; pub const SimpleValue = enum(u32) { /// This is untyped `undefined`. - undefined, - void, + undefined = @intFromEnum(Index.undef), + void = @intFromEnum(Index.void_value), /// This is untyped `null`. - null, + null = @intFromEnum(Index.null_value), /// This is the untyped empty struct literal: `.{}` - empty_struct, - true, - false, - @"unreachable", + empty_struct = @intFromEnum(Index.empty_struct), + true = @intFromEnum(Index.bool_true), + false = @intFromEnum(Index.bool_false), + @"unreachable" = @intFromEnum(Index.unreachable_value), - generic_poison, + generic_poison = @intFromEnum(Index.generic_poison), }; /// Stored as a power-of-two, with one special value to indicate none. @@ -5063,8 +5063,8 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { .sentinel = .none, } }; }, - .simple_type => .{ .simple_type = @enumFromInt(data) }, - .simple_value => .{ .simple_value = @enumFromInt(data) }, + .simple_type => .{ .simple_type = @enumFromInt(@intFromEnum(index)) }, + .simple_value => .{ .simple_value = @enumFromInt(@intFromEnum(index)) }, .type_vector => { const vector_info = ip.extraData(Vector, data); @@ -5914,15 +5914,17 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All }); }, .simple_type => |simple_type| { + assert(@intFromEnum(simple_type) == items.lenPtr().*); items.appendAssumeCapacity(.{ .tag = .simple_type, - .data = @intFromEnum(simple_type), + .data = 0, // avoid writing `undefined` bits to a file }); }, .simple_value => |simple_value| { + assert(@intFromEnum(simple_value) == items.lenPtr().*); items.appendAssumeCapacity(.{ .tag = .simple_value, - .data = @intFromEnum(simple_value), + .data = 0, // avoid writing `undefined` bits to a file }); }, .undef => |ty| { @@ -8092,22 +8094,16 @@ fn addMap(ip: *InternPool, gpa: Allocator, cap: usize) Allocator.Error!MapIndex pub fn remove(ip: *InternPool, tid: Zcu.PerThread.Id, index: Index) void { const unwrapped = index.unwrap(ip); if (@intFromEnum(index) < static_keys.len) { - if (tid != .main or unwrapped.tid != .main) @panic("This operation is impossible to be thread-safe"); // The item being removed replaced a special index via `InternPool.resolveBuiltinType`. // Restore the original item at this index. - var items = ip.getLocalShared(unwrapped.tid).items.view(); - switch (static_keys[@intFromEnum(index)]) { - .simple_type => |s| items.set(@intFromEnum(index), .{ - .tag = .simple_type, - .data = @intFromEnum(s), - }), - else => unreachable, - } + assert(static_keys[@intFromEnum(index)] == .simple_type); + const items = ip.getLocalShared(unwrapped.tid).items.view(); + @atomicStore(Tag, &items.items(.tag)[unwrapped.index], .simple_type, .monotonic); return; } if (unwrapped.tid == tid) { - const items_len = &ip.getLocal(tid).mutate.items.len; + const items_len = &ip.getLocal(unwrapped.tid).mutate.items.len; if (unwrapped.index == items_len.* - 1) { // Happy case - we can just drop the item without affecting any other indices. items_len.* -= 1; @@ -8119,7 +8115,7 @@ pub fn remove(ip: *InternPool, tid: Zcu.PerThread.Id, index: Index) void { // Thus, we will rewrite the tag to `removed`, leaking the item until // next GC but causing `KeyAdapter` to ignore it. const items = ip.getLocalShared(unwrapped.tid).items.view(); - @atomicStore(Tag, &items.items(.tag)[unwrapped.index], .removed, .release); + @atomicStore(Tag, &items.items(.tag)[unwrapped.index], .removed, .monotonic); } fn addInt( @@ -9697,7 +9693,6 @@ pub fn typeOf(ip: *const InternPool, index: Index) Index { .type_enum_auto, .type_enum_explicit, .type_enum_nonexhaustive, - .simple_type, .type_opaque, .type_struct, .type_struct_anon, @@ -9713,7 +9708,7 @@ pub fn typeOf(ip: *const InternPool, index: Index) Index { .only_possible_value, => @enumFromInt(index.getData(ip)), - .simple_value => unreachable, // handled via Index above + .simple_type, .simple_value => unreachable, // handled via Index above inline .ptr_decl, .ptr_comptime_alloc, @@ -10246,11 +10241,11 @@ pub fn resolveBuiltinType( (ip.zigTypeTagOrPoison(resolved_index) catch unreachable)); // Copy the data - const resolved_item = resolved_index.getItem(ip); - const want_unwrapped = want_index.unwrap(ip); - if (tid != .main or want_unwrapped.tid != .main) @panic("This operation is impossible to be thread-safe"); - var want_items = ip.getLocalShared(want_unwrapped.tid).items.view(); - want_items.set(want_unwrapped.index, resolved_item); + const item = resolved_index.getItem(ip); + const unwrapped = want_index.unwrap(ip); + var items = ip.getLocalShared(unwrapped.tid).items.view().slice(); + items.items(.data)[unwrapped.index] = item.data; + @atomicStore(Tag, &items.items(.tag)[unwrapped.index], item.tag, .release); ip.remove(tid, resolved_index); } From bdae01ab047accbbc6dcd014d008f2554aa78696 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Sun, 7 Jul 2024 07:33:09 -0400 Subject: [PATCH 070/152] InternPool: implement and use thread-safe list for extra and limbs --- lib/std/Thread/Pool.zig | 21 +- src/Compilation.zig | 12 +- src/InternPool.zig | 1930 ++++++++++++++++++++++----------------- src/Sema.zig | 2 +- src/Type.zig | 2 +- src/Value.zig | 5 +- src/Zcu/PerThread.zig | 2 +- src/main.zig | 44 +- 8 files changed, 1133 insertions(+), 885 deletions(-) diff --git a/lib/std/Thread/Pool.zig b/lib/std/Thread/Pool.zig index 5972c4111a55..d501b665204a 100644 --- a/lib/std/Thread/Pool.zig +++ b/lib/std/Thread/Pool.zig @@ -21,7 +21,7 @@ const Runnable = struct { runFn: RunProto, }; -const RunProto = *const fn (*Runnable, id: ?usize) void; +const RunProto = *const fn (*Runnable, id: ?u32) void; pub const Options = struct { allocator: std.mem.Allocator, @@ -109,7 +109,7 @@ pub fn spawnWg(pool: *Pool, wait_group: *WaitGroup, comptime func: anytype, args run_node: RunQueue.Node = .{ .data = .{ .runFn = runFn } }, wait_group: *WaitGroup, - fn runFn(runnable: *Runnable, _: ?usize) void { + fn runFn(runnable: *Runnable, _: ?u32) void { const run_node: *RunQueue.Node = @fieldParentPtr("data", runnable); const closure: *@This() = @alignCast(@fieldParentPtr("run_node", run_node)); @call(.auto, func, closure.arguments); @@ -150,7 +150,7 @@ pub fn spawnWg(pool: *Pool, wait_group: *WaitGroup, comptime func: anytype, args /// Runs `func` in the thread pool, calling `WaitGroup.start` beforehand, and /// `WaitGroup.finish` after it returns. /// -/// The first argument passed to `func` is a dense `usize` thread id, the rest +/// The first argument passed to `func` is a dense `u32` thread id, the rest /// of the arguments are passed from `args`. Requires the pool to have been /// initialized with `.track_ids = true`. /// @@ -172,7 +172,7 @@ pub fn spawnWgId(pool: *Pool, wait_group: *WaitGroup, comptime func: anytype, ar run_node: RunQueue.Node = .{ .data = .{ .runFn = runFn } }, wait_group: *WaitGroup, - fn runFn(runnable: *Runnable, id: ?usize) void { + fn runFn(runnable: *Runnable, id: ?u32) void { const run_node: *RunQueue.Node = @fieldParentPtr("data", runnable); const closure: *@This() = @alignCast(@fieldParentPtr("run_node", run_node)); @call(.auto, func, .{id.?} ++ closure.arguments); @@ -258,7 +258,7 @@ fn worker(pool: *Pool) void { pool.mutex.lock(); defer pool.mutex.unlock(); - const id = if (pool.ids.count() > 0) pool.ids.count() else null; + const id: ?u32 = if (pool.ids.count() > 0) @intCast(pool.ids.count()) else null; if (id) |_| pool.ids.putAssumeCapacityNoClobber(std.Thread.getCurrentId(), {}); while (true) { @@ -280,12 +280,15 @@ fn worker(pool: *Pool) void { } pub fn waitAndWork(pool: *Pool, wait_group: *WaitGroup) void { - var id: ?usize = null; + var id: ?u32 = null; while (!wait_group.isDone()) { pool.mutex.lock(); if (pool.run_queue.popFirst()) |run_node| { - id = id orelse pool.ids.getIndex(std.Thread.getCurrentId()); + id = id orelse if (pool.ids.getIndex(std.Thread.getCurrentId())) |index| + @intCast(index) + else + null; pool.mutex.unlock(); run_node.data.runFn(&run_node.data, id); continue; @@ -297,6 +300,6 @@ pub fn waitAndWork(pool: *Pool, wait_group: *WaitGroup) void { } } -pub fn getIdCount(pool: *Pool) usize { - return 1 + pool.threads.len; +pub fn getIdCount(pool: *Pool) u32 { + return @intCast(1 + pool.threads.len); } diff --git a/src/Compilation.zig b/src/Compilation.zig index a54205dddfe3..74e8222bc3a1 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -2746,8 +2746,8 @@ pub fn makeBinFileWritable(comp: *Compilation) !void { const Header = extern struct { intern_pool: extern struct { //items_len: u32, - extra_len: u32, - limbs_len: u32, + //extra_len: u32, + //limbs_len: u32, //string_bytes_len: u32, tracked_insts_len: u32, src_hash_deps_len: u32, @@ -2775,8 +2775,8 @@ pub fn saveState(comp: *Compilation) !void { const header: Header = .{ .intern_pool = .{ //.items_len = @intCast(ip.items.len), - .extra_len = @intCast(ip.extra.items.len), - .limbs_len = @intCast(ip.limbs.items.len), + //.extra_len = @intCast(ip.extra.items.len), + //.limbs_len = @intCast(ip.limbs.items.len), //.string_bytes_len = @intCast(ip.string_bytes.items.len), .tracked_insts_len = @intCast(ip.tracked_insts.count()), .src_hash_deps_len = @intCast(ip.src_hash_deps.count()), @@ -2790,8 +2790,8 @@ pub fn saveState(comp: *Compilation) !void { }, }; addBuf(&bufs_list, &bufs_len, mem.asBytes(&header)); - addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.limbs.items)); - addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.extra.items)); + //addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.limbs.items)); + //addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.extra.items)); //addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.items.items(.data))); //addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.items.items(.tag))); //addBuf(&bufs_list, &bufs_len, ip.string_bytes.items); diff --git a/src/InternPool.zig b/src/InternPool.zig index 8002b8d2f3cd..9f179b601e12 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -8,13 +8,6 @@ tid_width: if (single_threaded) u0 else std.math.Log2Int(u32) = 0, tid_shift_31: if (single_threaded) u0 else std.math.Log2Int(u32) = if (single_threaded) 0 else 31, tid_shift_32: if (single_threaded) u0 else std.math.Log2Int(u32) = if (single_threaded) 0 else 31, -extra: std.ArrayListUnmanaged(u32) = .{}, -/// On 32-bit systems, this array is ignored and extra is used for everything. -/// On 64-bit systems, this array is used for big integers and associated metadata. -/// Use the helper methods instead of accessing this directly in order to not -/// violate the above mechanism. -limbs: std.ArrayListUnmanaged(u64) = .{}, - /// Rather than allocating Decl objects with an Allocator, we instead allocate /// them with this SegmentedList. This provides four advantages: /// * Stable memory so that one thread can access a Decl object while another @@ -352,14 +345,32 @@ const Local = struct { mutate: struct { arena: std.heap.ArenaAllocator.State, items: Mutate, + extra: Mutate, + limbs: Mutate, strings: Mutate, } align(std.atomic.cache_line), const Shared = struct { items: List(Item), + extra: Extra, + limbs: Limbs, strings: Strings, + + pub fn getLimbs(shared: *const Local.Shared) Limbs { + return switch (@sizeOf(Limb)) { + @sizeOf(u32) => shared.extra, + @sizeOf(u64) => shared.limbs, + else => @compileError("unsupported host"), + }.acquire(); + } }; + const Extra = List(struct { u32 }); + const Limbs = switch (@sizeOf(Limb)) { + @sizeOf(u32) => Extra, + @sizeOf(u64) => List(struct { u64 }), + else => @compileError("unsupported host"), + }; const Strings = List(struct { u8 }); const Mutate = struct { @@ -384,7 +395,25 @@ const Local = struct { const fields = std.enums.values(std.meta.FieldEnum(Elem)); - fn Slice(comptime opts: struct { is_const: bool = false }) type { + fn PtrArrayElem(comptime len: usize) type { + const elem_info = @typeInfo(Elem).Struct; + const elem_fields = elem_info.fields; + var new_fields: [elem_fields.len]std.builtin.Type.StructField = undefined; + for (&new_fields, elem_fields) |*new_field, elem_field| new_field.* = .{ + .name = elem_field.name, + .type = *[len]elem_field.type, + .default_value = null, + .is_comptime = false, + .alignment = 0, + }; + return @Type(.{ .Struct = .{ + .layout = .auto, + .fields = &new_fields, + .decls = &.{}, + .is_tuple = elem_info.is_tuple, + } }); + } + fn SliceElem(comptime opts: struct { is_const: bool = false }) type { const elem_info = @typeInfo(Elem).Struct; const elem_fields = elem_info.fields; var new_fields: [elem_fields.len]std.builtin.Type.StructField = undefined; @@ -419,20 +448,19 @@ const Local = struct { pub fn appendAssumeCapacity(mutable: Mutable, elem: Elem) void { var mutable_view = mutable.view(); - defer mutable.lenPtr().* = @intCast(mutable_view.len); + defer mutable.mutate.len = @intCast(mutable_view.len); mutable_view.appendAssumeCapacity(elem); } pub fn appendSliceAssumeCapacity( mutable: Mutable, - slice: Slice(.{ .is_const = true }), + slice: SliceElem(.{ .is_const = true }), ) void { if (fields.len == 0) return; - const mutable_len = mutable.lenPtr(); - const start = mutable_len.*; + const start = mutable.mutate.len; const slice_len = @field(slice, @tagName(fields[0])).len; - assert(slice_len <= mutable.capacityPtr().* - start); - mutable_len.* = @intCast(start + slice_len); + assert(slice_len <= mutable.list.header().capacity - start); + mutable.mutate.len = @intCast(start + slice_len); const mutable_view = mutable.view(); inline for (fields) |field| { const field_slice = @field(slice, @tagName(field)); @@ -447,28 +475,43 @@ const Local = struct { } pub fn appendNTimesAssumeCapacity(mutable: Mutable, elem: Elem, len: usize) void { - const mutable_len = mutable.lenPtr(); - const start = mutable_len.*; - assert(len <= mutable.capacityPtr().* - start); - mutable_len.* = @intCast(start + len); + const start = mutable.mutate.len; + assert(len <= mutable.list.header().capacity - start); + mutable.mutate.len = @intCast(start + len); const mutable_view = mutable.view(); inline for (fields) |field| { @memset(mutable_view.items(field)[start..][0..len], @field(elem, @tagName(field))); } } - pub fn addManyAsSlice(mutable: Mutable, len: usize) Allocator.Error!Slice(.{}) { + pub fn addManyAsArray(mutable: Mutable, comptime len: usize) Allocator.Error!PtrArrayElem(len) { + try mutable.ensureUnusedCapacity(len); + return mutable.addManyAsArrayAssumeCapacity(len); + } + + pub fn addManyAsArrayAssumeCapacity(mutable: Mutable, comptime len: usize) PtrArrayElem(len) { + const start = mutable.mutate.len; + assert(len <= mutable.list.header().capacity - start); + mutable.mutate.len = @intCast(start + len); + const mutable_view = mutable.view(); + var ptr_array: PtrArrayElem(len) = undefined; + inline for (fields) |field| { + @field(ptr_array, @tagName(field)) = mutable_view.items(field)[start..][0..len]; + } + return ptr_array; + } + + pub fn addManyAsSlice(mutable: Mutable, len: usize) Allocator.Error!SliceElem(.{}) { try mutable.ensureUnusedCapacity(len); return mutable.addManyAsSliceAssumeCapacity(len); } - pub fn addManyAsSliceAssumeCapacity(mutable: Mutable, len: usize) Slice(.{}) { - const mutable_len = mutable.lenPtr(); - const start = mutable_len.*; - assert(len <= mutable.capacityPtr().* - start); - mutable_len.* = @intCast(start + len); + pub fn addManyAsSliceAssumeCapacity(mutable: Mutable, len: usize) SliceElem(.{}) { + const start = mutable.mutate.len; + assert(len <= mutable.list.header().capacity - start); + mutable.mutate.len = @intCast(start + len); const mutable_view = mutable.view(); - var slice: Slice(.{}) = undefined; + var slice: SliceElem(.{}) = undefined; inline for (fields) |field| { @field(slice, @tagName(field)) = mutable_view.items(field)[start..][0..len]; } @@ -476,17 +519,16 @@ const Local = struct { } pub fn shrinkRetainingCapacity(mutable: Mutable, len: usize) void { - const mutable_len = mutable.lenPtr(); - assert(len <= mutable_len.*); - mutable_len.* = @intCast(len); + assert(len <= mutable.mutate.len); + mutable.mutate.len = @intCast(len); } pub fn ensureUnusedCapacity(mutable: Mutable, unused_capacity: usize) Allocator.Error!void { - try mutable.ensureTotalCapacity(@intCast(mutable.lenPtr().* + unused_capacity)); + try mutable.ensureTotalCapacity(@intCast(mutable.mutate.len + unused_capacity)); } pub fn ensureTotalCapacity(mutable: Mutable, total_capacity: usize) Allocator.Error!void { - const old_capacity = mutable.capacityPtr().*; + const old_capacity = mutable.list.header().capacity; if (old_capacity >= total_capacity) return; var new_capacity = old_capacity; while (new_capacity < total_capacity) new_capacity = (new_capacity + 10) * 2; @@ -503,7 +545,7 @@ const Local = struct { ); var new_list: ListSelf = .{ .bytes = @ptrCast(buf[bytes_offset..].ptr) }; new_list.header().* = .{ .capacity = capacity }; - const len = mutable.lenPtr().*; + const len = mutable.mutate.len; // this cold, quickly predictable, condition enables // the `MultiArrayList` optimization in `view` if (len > 0) { @@ -515,27 +557,19 @@ const Local = struct { } fn view(mutable: Mutable) View { - const capacity = mutable.capacityPtr().*; + const capacity = mutable.list.header().capacity; assert(capacity > 0); // optimizes `MultiArrayList.Slice.items` return .{ .bytes = mutable.list.bytes, - .len = mutable.lenPtr().*, + .len = mutable.mutate.len, .capacity = capacity, }; } - - pub fn lenPtr(mutable: Mutable) *u32 { - return &mutable.mutate.len; - } - - pub fn capacityPtr(mutable: Mutable) *u32 { - return &mutable.list.header().capacity; - } }; const empty: ListSelf = .{ .bytes = @constCast(&(extern struct { header: Header, - bytes: [0]u8, + bytes: [0]u8 align(@alignOf(Elem)), }{ .header = .{ .capacity = 0 }, .bytes = .{}, @@ -580,6 +614,32 @@ const Local = struct { }; } + pub fn getMutableExtra(local: *Local, gpa: std.mem.Allocator) Extra.Mutable { + return .{ + .gpa = gpa, + .arena = &local.mutate.arena, + .mutate = &local.mutate.extra, + .list = &local.shared.extra, + }; + } + + /// On 32-bit systems, this array is ignored and extra is used for everything. + /// On 64-bit systems, this array is used for big integers and associated metadata. + /// Use the helper methods instead of accessing this directly in order to not + /// violate the above mechanism. + pub fn getMutableLimbs(local: *Local, gpa: std.mem.Allocator) Limbs.Mutable { + return switch (@sizeOf(Limb)) { + @sizeOf(u32) => local.getMutableExtra(gpa), + @sizeOf(u64) => .{ + .gpa = gpa, + .arena = &local.mutate.arena, + .mutate = &local.mutate.limbs, + .list = &local.shared.limbs, + }, + else => @compileError("unsupported host"), + }; + } + /// In order to store references to strings in fewer bytes, we copy all /// string bytes into here. String bytes can be null. It is up to whomever /// is referencing the data here whether they want to store both index and length, @@ -817,8 +877,9 @@ pub const String = enum(u32) { } fn toOverlongSlice(string: String, ip: *const InternPool) []const u8 { - const unwrapped = string.unwrap(ip); - return ip.getLocalShared(unwrapped.tid).strings.acquire().view().items(.@"0")[unwrapped.index..]; + const unwrapped_string = string.unwrap(ip); + const strings = ip.getLocalShared(unwrapped_string.tid).strings.acquire(); + return strings.view().items(.@"0")[unwrapped_string.index..]; } }; @@ -848,11 +909,15 @@ pub const NullTerminatedString = enum(u32) { /// This type exists to provide a struct with lifetime that is /// not invalidated when items are added to the `InternPool`. pub const Slice = struct { + tid: Zcu.PerThread.Id, start: u32, len: u32, + pub const empty: Slice = .{ .tid = .main, .start = 0, .len = 0 }; + pub fn get(slice: Slice, ip: *const InternPool) []NullTerminatedString { - return @ptrCast(ip.extra.items[slice.start..][0..slice.len]); + const extra = ip.getLocalShared(slice.tid).extra.acquire(); + return @ptrCast(extra.view().items(.@"0")[slice.start..][0..slice.len]); } }; @@ -983,10 +1048,15 @@ pub const CaptureValue = packed struct(u32) { }; pub const Slice = struct { + tid: Zcu.PerThread.Id, start: u32, len: u32, + + pub const empty: Slice = .{ .tid = .main, .start = 0, .len = 0 }; + pub fn get(slice: Slice, ip: *const InternPool) []CaptureValue { - return @ptrCast(ip.extra.items[slice.start..][0..slice.len]); + const extra = ip.getLocalShared(slice.tid).extra.acquire(); + return @ptrCast(extra.view().items(.@"0")[slice.start..][0..slice.len]); } }; }; @@ -1272,6 +1342,7 @@ pub const Key = union(enum) { }; pub const Func = struct { + tid: Zcu.PerThread.Id, /// In the case of a generic function, this type will potentially have fewer parameters /// than the generic owner's type, because the comptime parameters will be deleted. ty: Index, @@ -1327,23 +1398,27 @@ pub const Key = union(enum) { /// Returns a pointer that becomes invalid after any additions to the `InternPool`. pub fn analysis(func: *const Func, ip: *const InternPool) *FuncAnalysis { - return @ptrCast(&ip.extra.items[func.analysis_extra_index]); + const extra = ip.getLocalShared(func.tid).extra.acquire(); + return @ptrCast(&extra.view().items(.@"0")[func.analysis_extra_index]); } /// Returns a pointer that becomes invalid after any additions to the `InternPool`. pub fn zirBodyInst(func: *const Func, ip: *const InternPool) *TrackedInst.Index { - return @ptrCast(&ip.extra.items[func.zir_body_inst_extra_index]); + const extra = ip.getLocalShared(func.tid).extra.acquire(); + return @ptrCast(&extra.view().items(.@"0")[func.zir_body_inst_extra_index]); } /// Returns a pointer that becomes invalid after any additions to the `InternPool`. pub fn branchQuota(func: *const Func, ip: *const InternPool) *u32 { - return &ip.extra.items[func.branch_quota_extra_index]; + const extra = ip.getLocalShared(func.tid).extra.acquire(); + return &extra.view().items(.@"0")[func.branch_quota_extra_index]; } /// Returns a pointer that becomes invalid after any additions to the `InternPool`. pub fn resolvedErrorSet(func: *const Func, ip: *const InternPool) *Index { + const extra = ip.getLocalShared(func.tid).extra.acquire(); assert(func.analysis(ip).inferred_error_set); - return @ptrCast(&ip.extra.items[func.resolved_error_set_extra_index]); + return @ptrCast(&extra.view().items(.@"0")[func.resolved_error_set_extra_index]); } }; @@ -2186,6 +2261,7 @@ pub const RequiresComptime = enum(u2) { no, yes, unknown, wip }; // minimal hashmap key, this type is a convenience type that contains info // needed by semantic analysis. pub const LoadedUnionType = struct { + tid: Zcu.PerThread.Id, /// The index of the `Tag.TypeUnion` payload. extra_index: u32, /// The Decl that corresponds to the union itself. @@ -2258,7 +2334,7 @@ pub const LoadedUnionType = struct { } }; - pub fn loadTagType(self: LoadedUnionType, ip: *InternPool) LoadedEnumType { + pub fn loadTagType(self: LoadedUnionType, ip: *const InternPool) LoadedEnumType { return ip.loadEnumType(self.enum_tag_ty); } @@ -2271,26 +2347,30 @@ pub const LoadedUnionType = struct { /// when it is mutated, the mutations are observed. /// The returned pointer expires with any addition to the `InternPool`. pub fn tagTypePtr(self: LoadedUnionType, ip: *const InternPool) *Index { + const extra = ip.getLocalShared(self.tid).extra.acquire(); const field_index = std.meta.fieldIndex(Tag.TypeUnion, "tag_ty").?; - return @ptrCast(&ip.extra.items[self.extra_index + field_index]); + return @ptrCast(&extra.view().items(.@"0")[self.extra_index + field_index]); } /// The returned pointer expires with any addition to the `InternPool`. pub fn flagsPtr(self: LoadedUnionType, ip: *const InternPool) *Tag.TypeUnion.Flags { + const extra = ip.getLocalShared(self.tid).extra.acquire(); const field_index = std.meta.fieldIndex(Tag.TypeUnion, "flags").?; - return @ptrCast(&ip.extra.items[self.extra_index + field_index]); + return @ptrCast(&extra.view().items(.@"0")[self.extra_index + field_index]); } /// The returned pointer expires with any addition to the `InternPool`. pub fn size(self: LoadedUnionType, ip: *const InternPool) *u32 { + const extra = ip.getLocalShared(self.tid).extra.acquire(); const field_index = std.meta.fieldIndex(Tag.TypeUnion, "size").?; - return &ip.extra.items[self.extra_index + field_index]; + return &extra.view().items(.@"0")[self.extra_index + field_index]; } /// The returned pointer expires with any addition to the `InternPool`. pub fn padding(self: LoadedUnionType, ip: *const InternPool) *u32 { + const extra = ip.getLocalShared(self.tid).extra.acquire(); const field_index = std.meta.fieldIndex(Tag.TypeUnion, "padding").?; - return &ip.extra.items[self.extra_index + field_index]; + return &extra.view().items(.@"0")[self.extra_index + field_index]; } pub fn hasTag(self: LoadedUnionType, ip: *const InternPool) bool { @@ -2319,7 +2399,7 @@ pub const LoadedUnionType = struct { const flags_field_index = std.meta.fieldIndex(Tag.TypeUnion, "flags").?; const zir_index_field_index = std.meta.fieldIndex(Tag.TypeUnion, "zir_index").?; const ptr: *TrackedInst.Index.Optional = - @ptrCast(&ip.extra.items[self.flags_index - flags_field_index + zir_index_field_index]); + @ptrCast(&ip.extra_.items[self.flags_index - flags_field_index + zir_index_field_index]); ptr.* = new_zir_index; } @@ -2335,18 +2415,21 @@ pub const LoadedUnionType = struct { }; pub fn loadUnionType(ip: *const InternPool, index: Index) LoadedUnionType { - const data = index.getData(ip); - const type_union = ip.extraDataTrail(Tag.TypeUnion, data); + const unwrapped_index = index.unwrap(ip); + const extra_list = unwrapped_index.getExtra(ip); + const data = unwrapped_index.getData(ip); + const type_union = extraDataTrail(extra_list, Tag.TypeUnion, data); const fields_len = type_union.data.fields_len; var extra_index = type_union.end; const captures_len = if (type_union.data.flags.any_captures) c: { - const len = ip.extra.items[extra_index]; + const len = extra_list.view().items(.@"0")[extra_index]; extra_index += 1; break :c len; } else 0; const captures: CaptureValue.Slice = .{ + .tid = unwrapped_index.tid, .start = extra_index, .len = captures_len, }; @@ -2356,21 +2439,24 @@ pub fn loadUnionType(ip: *const InternPool, index: Index) LoadedUnionType { } const field_types: Index.Slice = .{ + .tid = unwrapped_index.tid, .start = extra_index, .len = fields_len, }; extra_index += fields_len; - const field_aligns: Alignment.Slice = if (type_union.data.flags.any_aligned_fields) a: { + const field_aligns = if (type_union.data.flags.any_aligned_fields) a: { const a: Alignment.Slice = .{ + .tid = unwrapped_index.tid, .start = extra_index, .len = fields_len, }; extra_index += std.math.divCeil(u32, fields_len, 4) catch unreachable; break :a a; - } else .{ .start = 0, .len = 0 }; + } else Alignment.Slice.empty; return .{ + .tid = unwrapped_index.tid, .extra_index = data, .decl = type_union.data.decl, .namespace = type_union.data.namespace, @@ -2383,6 +2469,7 @@ pub fn loadUnionType(ip: *const InternPool, index: Index) LoadedUnionType { } pub const LoadedStructType = struct { + tid: Zcu.PerThread.Id, /// The index of the `Tag.TypeStruct` or `Tag.TypeStructPacked` payload. extra_index: u32, /// The struct's owner Decl. `none` when the struct is `@TypeOf(.{})`. @@ -2404,12 +2491,16 @@ pub const LoadedStructType = struct { captures: CaptureValue.Slice, pub const ComptimeBits = struct { + tid: Zcu.PerThread.Id, start: u32, /// This is the number of u32 elements, not the number of struct fields. len: u32, + pub const empty: ComptimeBits = .{ .tid = .main, .start = 0, .len = 0 }; + pub fn get(this: ComptimeBits, ip: *const InternPool) []u32 { - return ip.extra.items[this.start..][0..this.len]; + const extra = ip.getLocalShared(this.tid).extra.acquire(); + return extra.view().items(.@"0")[this.start..][0..this.len]; } pub fn getBit(this: ComptimeBits, ip: *const InternPool, i: usize) bool { @@ -2427,11 +2518,15 @@ pub const LoadedStructType = struct { }; pub const Offsets = struct { + tid: Zcu.PerThread.Id, start: u32, len: u32, + pub const empty: Offsets = .{ .tid = .main, .start = 0, .len = 0 }; + pub fn get(this: Offsets, ip: *const InternPool) []u32 { - return @ptrCast(ip.extra.items[this.start..][0..this.len]); + const extra = ip.getLocalShared(this.tid).extra.acquire(); + return @ptrCast(extra.view().items(.@"0")[this.start..][0..this.len]); } }; @@ -2443,11 +2538,15 @@ pub const LoadedStructType = struct { _, pub const Slice = struct { + tid: Zcu.PerThread.Id, start: u32, len: u32, + pub const empty: Slice = .{ .tid = .main, .start = 0, .len = 0 }; + pub fn get(slice: RuntimeOrder.Slice, ip: *const InternPool) []RuntimeOrder { - return @ptrCast(ip.extra.items[slice.start..][0..slice.len]); + const extra = ip.getLocalShared(slice.tid).extra.acquire(); + return @ptrCast(extra.view().items(.@"0")[slice.start..][0..slice.len]); } }; @@ -2479,7 +2578,8 @@ pub const LoadedStructType = struct { ip: *InternPool, name: NullTerminatedString, ) ?u32 { - return ip.addFieldName(self.names_map.unwrap().?, self.field_names.start, name); + const extra = ip.getLocalShared(self.tid).extra.acquire(); + return ip.addFieldName(extra, self.names_map.unwrap().?, self.field_names.start, name); } pub fn fieldAlign(s: LoadedStructType, ip: *const InternPool, i: usize) Alignment { @@ -2487,7 +2587,7 @@ pub const LoadedStructType = struct { return s.field_aligns.get(ip)[i]; } - pub fn fieldInit(s: LoadedStructType, ip: *const InternPool, i: usize) Index { + pub fn fieldInit(s: LoadedStructType, ip: *InternPool, i: usize) Index { if (s.field_inits.len == 0) return .none; assert(s.haveFieldInits(ip)); return s.field_inits.get(ip)[i]; @@ -2518,18 +2618,20 @@ pub const LoadedStructType = struct { /// The returned pointer expires with any addition to the `InternPool`. /// Asserts the struct is not packed. - pub fn flagsPtr(self: LoadedStructType, ip: *const InternPool) *Tag.TypeStruct.Flags { + pub fn flagsPtr(self: LoadedStructType, ip: *InternPool) *Tag.TypeStruct.Flags { assert(self.layout != .@"packed"); + const extra = ip.getLocalShared(self.tid).extra.acquire(); const flags_field_index = std.meta.fieldIndex(Tag.TypeStruct, "flags").?; - return @ptrCast(&ip.extra.items[self.extra_index + flags_field_index]); + return @ptrCast(&extra.view().items(.@"0")[self.extra_index + flags_field_index]); } /// The returned pointer expires with any addition to the `InternPool`. /// Asserts that the struct is packed. - pub fn packedFlagsPtr(self: LoadedStructType, ip: *const InternPool) *Tag.TypeStructPacked.Flags { + pub fn packedFlagsPtr(self: LoadedStructType, ip: *InternPool) *Tag.TypeStructPacked.Flags { assert(self.layout == .@"packed"); + const extra = ip.getLocalShared(self.tid).extra.acquire(); const flags_field_index = std.meta.fieldIndex(Tag.TypeStructPacked, "flags").?; - return @ptrCast(&ip.extra.items[self.extra_index + flags_field_index]); + return @ptrCast(&extra.view().items(.@"0")[self.extra_index + flags_field_index]); } pub fn assumeRuntimeBitsIfFieldTypesWip(s: LoadedStructType, ip: *InternPool) bool { @@ -2621,25 +2723,27 @@ pub const LoadedStructType = struct { /// Asserts the struct is not packed. pub fn size(self: LoadedStructType, ip: *InternPool) *u32 { assert(self.layout != .@"packed"); + const extra = ip.getLocalShared(self.tid).extra.acquire(); const size_field_index = std.meta.fieldIndex(Tag.TypeStruct, "size").?; - return @ptrCast(&ip.extra.items[self.extra_index + size_field_index]); + return @ptrCast(&extra.view().items(.@"0")[self.extra_index + size_field_index]); } /// The backing integer type of the packed struct. Whether zig chooses /// this type or the user specifies it, it is stored here. This will be /// set to `none` until the layout is resolved. /// Asserts the struct is packed. - pub fn backingIntType(s: LoadedStructType, ip: *const InternPool) *Index { + pub fn backingIntType(s: LoadedStructType, ip: *InternPool) *Index { assert(s.layout == .@"packed"); + const extra = ip.getLocalShared(s.tid).extra.acquire(); const field_index = std.meta.fieldIndex(Tag.TypeStructPacked, "backing_int_ty").?; - return @ptrCast(&ip.extra.items[s.extra_index + field_index]); + return @ptrCast(&extra.view().items(.@"0")[s.extra_index + field_index]); } /// Asserts the struct is not packed. pub fn setZirIndex(s: LoadedStructType, ip: *InternPool, new_zir_index: TrackedInst.Index.Optional) void { assert(s.layout != .@"packed"); const field_index = std.meta.fieldIndex(Tag.TypeStruct, "zir_index").?; - ip.extra.items[s.extra_index + field_index] = @intFromEnum(new_zir_index); + ip.extra_.items[s.extra_index + field_index] = @intFromEnum(new_zir_index); } pub fn haveFieldTypes(s: LoadedStructType, ip: *const InternPool) bool { @@ -2647,7 +2751,7 @@ pub const LoadedStructType = struct { return types.len == 0 or types[0] != .none; } - pub fn haveFieldInits(s: LoadedStructType, ip: *const InternPool) bool { + pub fn haveFieldInits(s: LoadedStructType, ip: *InternPool) bool { return switch (s.layout) { .@"packed" => s.packedFlagsPtr(ip).inits_resolved, .auto, .@"extern" => s.flagsPtr(ip).inits_resolved, @@ -2757,34 +2861,38 @@ pub const LoadedStructType = struct { }; pub fn loadStructType(ip: *const InternPool, index: Index) LoadedStructType { - const item = index.getItem(ip); + const unwrapped_index = index.unwrap(ip); + const extra_list = unwrapped_index.getExtra(ip); + const item = unwrapped_index.getItem(ip); switch (item.tag) { .type_struct => { if (item.data == 0) return .{ + .tid = .main, .extra_index = 0, .decl = .none, .namespace = .none, .zir_index = .none, .layout = .auto, - .field_names = .{ .start = 0, .len = 0 }, - .field_types = .{ .start = 0, .len = 0 }, - .field_inits = .{ .start = 0, .len = 0 }, - .field_aligns = .{ .start = 0, .len = 0 }, - .runtime_order = .{ .start = 0, .len = 0 }, - .comptime_bits = .{ .start = 0, .len = 0 }, - .offsets = .{ .start = 0, .len = 0 }, + .field_names = NullTerminatedString.Slice.empty, + .field_types = Index.Slice.empty, + .field_inits = Index.Slice.empty, + .field_aligns = Alignment.Slice.empty, + .runtime_order = LoadedStructType.RuntimeOrder.Slice.empty, + .comptime_bits = LoadedStructType.ComptimeBits.empty, + .offsets = LoadedStructType.Offsets.empty, .names_map = .none, - .captures = .{ .start = 0, .len = 0 }, + .captures = CaptureValue.Slice.empty, }; - const extra = ip.extraDataTrail(Tag.TypeStruct, item.data); + const extra = extraDataTrail(extra_list, Tag.TypeStruct, item.data); const fields_len = extra.data.fields_len; var extra_index = extra.end; const captures_len = if (extra.data.flags.any_captures) c: { - const len = ip.extra.items[extra_index]; + const len = extra_list.view().items(.@"0")[extra_index]; extra_index += 1; break :c len; } else 0; const captures: CaptureValue.Slice = .{ + .tid = unwrapped_index.tid, .start = extra_index, .len = captures_len, }; @@ -2793,49 +2901,75 @@ pub fn loadStructType(ip: *const InternPool, index: Index) LoadedStructType { extra_index += 2; // PackedU64 } const field_types: Index.Slice = .{ + .tid = unwrapped_index.tid, .start = extra_index, .len = fields_len, }; extra_index += fields_len; - const names_map: OptionalMapIndex, const names: NullTerminatedString.Slice = if (!extra.data.flags.is_tuple) n: { - const names_map: OptionalMapIndex = @enumFromInt(ip.extra.items[extra_index]); + const names_map: OptionalMapIndex, const names = if (!extra.data.flags.is_tuple) n: { + const names_map: OptionalMapIndex = @enumFromInt(extra_list.view().items(.@"0")[extra_index]); extra_index += 1; - const names: NullTerminatedString.Slice = .{ .start = extra_index, .len = fields_len }; + const names: NullTerminatedString.Slice = .{ + .tid = unwrapped_index.tid, + .start = extra_index, + .len = fields_len, + }; extra_index += fields_len; break :n .{ names_map, names }; - } else .{ .none, .{ .start = 0, .len = 0 } }; + } else .{ .none, NullTerminatedString.Slice.empty }; const inits: Index.Slice = if (extra.data.flags.any_default_inits) i: { - const inits: Index.Slice = .{ .start = extra_index, .len = fields_len }; + const inits: Index.Slice = .{ + .tid = unwrapped_index.tid, + .start = extra_index, + .len = fields_len, + }; extra_index += fields_len; break :i inits; - } else .{ .start = 0, .len = 0 }; + } else Index.Slice.empty; const namespace: OptionalNamespaceIndex = if (extra.data.flags.has_namespace) n: { - const n: NamespaceIndex = @enumFromInt(ip.extra.items[extra_index]); + const n: NamespaceIndex = @enumFromInt(extra_list.view().items(.@"0")[extra_index]); extra_index += 1; break :n n.toOptional(); } else .none; const aligns: Alignment.Slice = if (extra.data.flags.any_aligned_fields) a: { - const a: Alignment.Slice = .{ .start = extra_index, .len = fields_len }; + const a: Alignment.Slice = .{ + .tid = unwrapped_index.tid, + .start = extra_index, + .len = fields_len, + }; extra_index += std.math.divCeil(u32, fields_len, 4) catch unreachable; break :a a; - } else .{ .start = 0, .len = 0 }; + } else Alignment.Slice.empty; const comptime_bits: LoadedStructType.ComptimeBits = if (extra.data.flags.any_comptime_fields) c: { const len = std.math.divCeil(u32, fields_len, 32) catch unreachable; - const c: LoadedStructType.ComptimeBits = .{ .start = extra_index, .len = len }; + const c: LoadedStructType.ComptimeBits = .{ + .tid = unwrapped_index.tid, + .start = extra_index, + .len = len, + }; extra_index += len; break :c c; - } else .{ .start = 0, .len = 0 }; + } else LoadedStructType.ComptimeBits.empty; const runtime_order: LoadedStructType.RuntimeOrder.Slice = if (!extra.data.flags.is_extern) ro: { - const ro: LoadedStructType.RuntimeOrder.Slice = .{ .start = extra_index, .len = fields_len }; + const ro: LoadedStructType.RuntimeOrder.Slice = .{ + .tid = unwrapped_index.tid, + .start = extra_index, + .len = fields_len, + }; extra_index += fields_len; break :ro ro; - } else .{ .start = 0, .len = 0 }; + } else LoadedStructType.RuntimeOrder.Slice.empty; const offsets: LoadedStructType.Offsets = o: { - const o: LoadedStructType.Offsets = .{ .start = extra_index, .len = fields_len }; + const o: LoadedStructType.Offsets = .{ + .tid = unwrapped_index.tid, + .start = extra_index, + .len = fields_len, + }; extra_index += fields_len; break :o o; }; return .{ + .tid = unwrapped_index.tid, .extra_index = item.data, .decl = extra.data.decl.toOptional(), .namespace = namespace, @@ -2853,16 +2987,17 @@ pub fn loadStructType(ip: *const InternPool, index: Index) LoadedStructType { }; }, .type_struct_packed, .type_struct_packed_inits => { - const extra = ip.extraDataTrail(Tag.TypeStructPacked, item.data); + const extra = extraDataTrail(extra_list, Tag.TypeStructPacked, item.data); const has_inits = item.tag == .type_struct_packed_inits; const fields_len = extra.data.fields_len; var extra_index = extra.end; const captures_len = if (extra.data.flags.any_captures) c: { - const len = ip.extra.items[extra_index]; + const len = extra_list.view().items(.@"0")[extra_index]; extra_index += 1; break :c len; } else 0; const captures: CaptureValue.Slice = .{ + .tid = unwrapped_index.tid, .start = extra_index, .len = captures_len, }; @@ -2871,24 +3006,28 @@ pub fn loadStructType(ip: *const InternPool, index: Index) LoadedStructType { extra_index += 2; // PackedU64 } const field_types: Index.Slice = .{ + .tid = unwrapped_index.tid, .start = extra_index, .len = fields_len, }; extra_index += fields_len; const field_names: NullTerminatedString.Slice = .{ + .tid = unwrapped_index.tid, .start = extra_index, .len = fields_len, }; extra_index += fields_len; const field_inits: Index.Slice = if (has_inits) inits: { const i: Index.Slice = .{ + .tid = unwrapped_index.tid, .start = extra_index, .len = fields_len, }; extra_index += fields_len; break :inits i; - } else .{ .start = 0, .len = 0 }; + } else Index.Slice.empty; return .{ + .tid = unwrapped_index.tid, .extra_index = item.data, .decl = extra.data.decl.toOptional(), .namespace = extra.data.namespace, @@ -2897,10 +3036,10 @@ pub fn loadStructType(ip: *const InternPool, index: Index) LoadedStructType { .field_names = field_names, .field_types = field_types, .field_inits = field_inits, - .field_aligns = .{ .start = 0, .len = 0 }, - .runtime_order = .{ .start = 0, .len = 0 }, - .comptime_bits = .{ .start = 0, .len = 0 }, - .offsets = .{ .start = 0, .len = 0 }, + .field_aligns = Alignment.Slice.empty, + .runtime_order = LoadedStructType.RuntimeOrder.Slice.empty, + .comptime_bits = LoadedStructType.ComptimeBits.empty, + .offsets = LoadedStructType.Offsets.empty, .names_map = extra.data.names_map.toOptional(), .captures = captures, }; @@ -2981,10 +3120,12 @@ const LoadedEnumType = struct { }; pub fn loadEnumType(ip: *const InternPool, index: Index) LoadedEnumType { - const item = index.getItem(ip); + const unwrapped_index = index.unwrap(ip); + const extra_list = unwrapped_index.getExtra(ip); + const item = unwrapped_index.getItem(ip); const tag_mode: LoadedEnumType.TagMode = switch (item.tag) { .type_enum_auto => { - const extra = ip.extraDataTrail(EnumAuto, item.data); + const extra = extraDataTrail(extra_list, EnumAuto, item.data); var extra_index: u32 = @intCast(extra.end); if (extra.data.zir_index == .none) { extra_index += 1; // owner_union @@ -2998,15 +3139,17 @@ pub fn loadEnumType(ip: *const InternPool, index: Index) LoadedEnumType { .namespace = extra.data.namespace, .tag_ty = extra.data.int_tag_type, .names = .{ + .tid = unwrapped_index.tid, .start = extra_index + captures_len, .len = extra.data.fields_len, }, - .values = .{ .start = 0, .len = 0 }, + .values = Index.Slice.empty, .tag_mode = .auto, .names_map = extra.data.names_map, .values_map = .none, .zir_index = extra.data.zir_index, .captures = .{ + .tid = unwrapped_index.tid, .start = extra_index, .len = captures_len, }, @@ -3016,7 +3159,7 @@ pub fn loadEnumType(ip: *const InternPool, index: Index) LoadedEnumType { .type_enum_nonexhaustive => .nonexhaustive, else => unreachable, }; - const extra = ip.extraDataTrail(EnumExplicit, item.data); + const extra = extraDataTrail(extra_list, EnumExplicit, item.data); var extra_index: u32 = @intCast(extra.end); if (extra.data.zir_index == .none) { extra_index += 1; // owner_union @@ -3030,10 +3173,12 @@ pub fn loadEnumType(ip: *const InternPool, index: Index) LoadedEnumType { .namespace = extra.data.namespace, .tag_ty = extra.data.int_tag_type, .names = .{ + .tid = unwrapped_index.tid, .start = extra_index + captures_len, .len = extra.data.fields_len, }, .values = .{ + .tid = unwrapped_index.tid, .start = extra_index + captures_len + extra.data.fields_len, .len = if (extra.data.values_map != .none) extra.data.fields_len else 0, }, @@ -3042,6 +3187,7 @@ pub fn loadEnumType(ip: *const InternPool, index: Index) LoadedEnumType { .values_map = extra.data.values_map, .zir_index = extra.data.zir_index, .captures = .{ + .tid = unwrapped_index.tid, .start = extra_index, .len = captures_len, }, @@ -3060,9 +3206,10 @@ pub const LoadedOpaqueType = struct { }; pub fn loadOpaqueType(ip: *const InternPool, index: Index) LoadedOpaqueType { - const item = index.getItem(ip); + const unwrapped_index = index.unwrap(ip); + const item = unwrapped_index.getItem(ip); assert(item.tag == .type_opaque); - const extra = ip.extraDataTrail(Tag.TypeOpaque, item.data); + const extra = extraDataTrail(unwrapped_index.getExtra(ip), Tag.TypeOpaque, item.data); const captures_len = if (extra.data.captures_len == std.math.maxInt(u32)) 0 else @@ -3072,6 +3219,7 @@ pub fn loadOpaqueType(ip: *const InternPool, index: Index) LoadedOpaqueType { .namespace = extra.data.namespace, .zir_index = extra.data.zir_index, .captures = .{ + .tid = unwrapped_index.tid, .start = extra.end, .len = captures_len, }, @@ -3214,11 +3362,15 @@ pub const Index = enum(u32) { /// This type exists to provide a struct with lifetime that is /// not invalidated when items are added to the `InternPool`. pub const Slice = struct { + tid: Zcu.PerThread.Id, start: u32, len: u32, + pub const empty: Slice = .{ .tid = .main, .start = 0, .len = 0 }; + pub fn get(slice: Slice, ip: *const InternPool) []Index { - return @ptrCast(ip.extra.items[slice.start..][0..slice.len]); + const extra = ip.getLocalShared(slice.tid).extra.acquire(); + return @ptrCast(extra.view().items(.@"0")[slice.start..][0..slice.len]); } }; @@ -3237,37 +3389,6 @@ pub const Index = enum(u32) { } }; - pub fn getItem(index: Index, ip: *const InternPool) Item { - const item_ptr = index.itemPtr(ip); - const tag = @atomicLoad(Tag, item_ptr.tag_ptr, .acquire); - return .{ .tag = tag, .data = item_ptr.data_ptr.* }; - } - - pub fn getTag(index: Index, ip: *const InternPool) Tag { - const item_ptr = index.itemPtr(ip); - return @atomicLoad(Tag, item_ptr.tag_ptr, .acquire); - } - - pub fn getData(index: Index, ip: *const InternPool) u32 { - return index.getItem(ip).data; - } - - const ItemPtr = struct { - tag_ptr: *Tag, - data_ptr: *u32, - }; - fn itemPtr(index: Index, ip: *const InternPool) ItemPtr { - const unwrapped: Unwrapped = if (single_threaded) .{ - .tid = .main, - .index = @intFromEnum(index), - } else index.unwrap(ip); - const slice = ip.getLocalShared(unwrapped.tid).items.acquire().view().slice(); - return .{ - .tag_ptr = &slice.items(.tag)[unwrapped.index], - .data_ptr = &slice.items(.data)[unwrapped.index], - }; - } - const Unwrapped = struct { tid: Zcu.PerThread.Id, index: u32, @@ -3277,9 +3398,43 @@ pub const Index = enum(u32) { assert(unwrapped.index <= ip.getIndexMask(u31)); return @enumFromInt(@intFromEnum(unwrapped.tid) << ip.tid_shift_31 | unwrapped.index); } + + pub fn getExtra(unwrapped: Unwrapped, ip: *const InternPool) Local.Extra { + return ip.getLocalShared(unwrapped.tid).extra.acquire(); + } + + pub fn getItem(unwrapped: Unwrapped, ip: *const InternPool) Item { + const item_ptr = unwrapped.itemPtr(ip); + const tag = @atomicLoad(Tag, item_ptr.tag_ptr, .acquire); + return .{ .tag = tag, .data = item_ptr.data_ptr.* }; + } + + pub fn getTag(unwrapped: Unwrapped, ip: *const InternPool) Tag { + const item_ptr = unwrapped.itemPtr(ip); + return @atomicLoad(Tag, item_ptr.tag_ptr, .acquire); + } + + pub fn getData(unwrapped: Unwrapped, ip: *const InternPool) u32 { + return unwrapped.getItem(ip).data; + } + + const ItemPtr = struct { + tag_ptr: *Tag, + data_ptr: *u32, + }; + fn itemPtr(unwrapped: Unwrapped, ip: *const InternPool) ItemPtr { + const slice = ip.getLocalShared(unwrapped.tid).items.acquire().view().slice(); + return .{ + .tag_ptr = &slice.items(.tag)[unwrapped.index], + .data_ptr = &slice.items(.data)[unwrapped.index], + }; + } }; - fn unwrap(index: Index, ip: *const InternPool) Unwrapped { - return .{ + pub fn unwrap(index: Index, ip: *const InternPool) Unwrapped { + return if (single_threaded) .{ + .tid = .main, + .index = @intFromEnum(index), + } else .{ .tid = @enumFromInt(@intFromEnum(index) >> ip.tid_shift_31 & ip.getTidMask()), .index = @intFromEnum(index) & ip.getIndexMask(u31), }; @@ -3643,9 +3798,9 @@ pub const static_keys = [_]Key{ // empty_struct_type .{ .anon_struct_type = .{ - .types = .{ .start = 0, .len = 0 }, - .names = .{ .start = 0, .len = 0 }, - .values = .{ .start = 0, .len = 0 }, + .types = Index.Slice.empty, + .names = NullTerminatedString.Slice.empty, + .values = Index.Slice.empty, } }, .{ .simple_value = .undefined }, @@ -4563,14 +4718,18 @@ pub const Alignment = enum(u6) { /// This type exists to provide a struct with lifetime that is /// not invalidated when items are added to the `InternPool`. pub const Slice = struct { + tid: Zcu.PerThread.Id, start: u32, /// This is the number of alignment values, not the number of u32 elements. len: u32, + pub const empty: Slice = .{ .tid = .main, .start = 0, .len = 0 }; + pub fn get(slice: Slice, ip: *const InternPool) []Alignment { // TODO: implement @ptrCast between slices changing the length - //const bytes: []u8 = @ptrCast(ip.extra.items[slice.start..]); - const bytes: []u8 = std.mem.sliceAsBytes(ip.extra.items[slice.start..]); + const extra = ip.getLocalShared(slice.tid).extra.acquire(); + //const bytes: []u8 = @ptrCast(extra.view().items(.@"0")[slice.start..]); + const bytes: []u8 = std.mem.sliceAsBytes(extra.view().items(.@"0")[slice.start..]); return @ptrCast(bytes[0..slice.len]); } }; @@ -4837,9 +4996,11 @@ pub const PtrSlice = struct { }; /// Trailing: Limb for every limbs_len -pub const Int = struct { +pub const Int = packed struct { ty: Index, limbs_len: u32, + + const limbs_items_len = @divExact(@sizeOf(Int), @sizeOf(Limb)); }; pub const IntSmall = struct { @@ -4931,17 +5092,22 @@ pub const MemoizedCall = struct { pub fn init(ip: *InternPool, gpa: Allocator, available_threads: usize) !void { errdefer ip.deinit(gpa); assert(ip.locals.len == 0 and ip.shards.len == 0); + assert(available_threads > 0 and available_threads <= std.math.maxInt(u8)); const used_threads = if (single_threaded) 1 else available_threads; ip.locals = try gpa.alloc(Local, used_threads); @memset(ip.locals, .{ .shared = .{ .items = Local.List(Item).empty, + .extra = Local.Extra.empty, + .limbs = Local.Limbs.empty, .strings = Local.Strings.empty, }, .mutate = .{ .arena = .{}, .items = Local.Mutate.empty, + .extra = Local.Mutate.empty, + .limbs = Local.Mutate.empty, .strings = Local.Mutate.empty, }, }); @@ -4995,9 +5161,6 @@ pub fn init(ip: *InternPool, gpa: Allocator, available_threads: usize) !void { } pub fn deinit(ip: *InternPool, gpa: Allocator) void { - ip.extra.deinit(gpa); - ip.limbs.deinit(gpa); - ip.decls_free_list.deinit(gpa); ip.allocated_decls.deinit(gpa); @@ -5031,7 +5194,8 @@ pub fn deinit(ip: *InternPool, gpa: Allocator) void { pub fn indexToKey(ip: *const InternPool, index: Index) Key { assert(index != .none); - const item = index.getItem(ip); + const unwrapped_index = index.unwrap(ip); + const item = unwrapped_index.getItem(ip); const data = item.data; return switch (item.tag) { .removed => unreachable, @@ -5048,7 +5212,7 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { }, }, .type_array_big => { - const array_info = ip.extraData(Array, data); + const array_info = extraData(unwrapped_index.getExtra(ip), Array, data); return .{ .array_type = .{ .len = array_info.getLength(), .child = array_info.child, @@ -5056,7 +5220,7 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { } }; }, .type_array_small => { - const array_info = ip.extraData(Vector, data); + const array_info = extraData(unwrapped_index.getExtra(ip), Vector, data); return .{ .array_type = .{ .len = array_info.len, .child = array_info.child, @@ -5067,20 +5231,21 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { .simple_value => .{ .simple_value = @enumFromInt(@intFromEnum(index)) }, .type_vector => { - const vector_info = ip.extraData(Vector, data); + const vector_info = extraData(unwrapped_index.getExtra(ip), Vector, data); return .{ .vector_type = .{ .len = vector_info.len, .child = vector_info.child, } }; }, - .type_pointer => .{ .ptr_type = ip.extraData(Tag.TypePointer, data) }, + .type_pointer => .{ .ptr_type = extraData(unwrapped_index.getExtra(ip), Tag.TypePointer, data) }, .type_slice => { const many_ptr_index: Index = @enumFromInt(data); - const many_ptr_item = many_ptr_index.getItem(ip); + const many_ptr_unwrapped = many_ptr_index.unwrap(ip); + const many_ptr_item = many_ptr_unwrapped.getItem(ip); assert(many_ptr_item.tag == .type_pointer); - var ptr_info = ip.extraData(Tag.TypePointer, many_ptr_item.data); + var ptr_info = extraData(many_ptr_unwrapped.getExtra(ip), Tag.TypePointer, many_ptr_item.data); ptr_info.flags.size = .Slice; return .{ .ptr_type = ptr_info }; }, @@ -5088,18 +5253,18 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { .type_optional => .{ .opt_type = @enumFromInt(data) }, .type_anyframe => .{ .anyframe_type = @enumFromInt(data) }, - .type_error_union => .{ .error_union_type = ip.extraData(Key.ErrorUnionType, data) }, + .type_error_union => .{ .error_union_type = extraData(unwrapped_index.getExtra(ip), Key.ErrorUnionType, data) }, .type_anyerror_union => .{ .error_union_type = .{ .error_set_type = .anyerror_type, .payload_type = @enumFromInt(data), } }, - .type_error_set => .{ .error_set_type = ip.extraErrorSet(data) }, + .type_error_set => .{ .error_set_type = extraErrorSet(unwrapped_index.tid, unwrapped_index.getExtra(ip), data) }, .type_inferred_error_set => .{ .inferred_error_set_type = @enumFromInt(data), }, .type_opaque => .{ .opaque_type = ns: { - const extra = ip.extraDataTrail(Tag.TypeOpaque, data); + const extra = extraDataTrail(unwrapped_index.getExtra(ip), Tag.TypeOpaque, data); if (extra.data.captures_len == std.math.maxInt(u32)) { break :ns .{ .reified = .{ .zir_index = extra.data.zir_index, @@ -5109,6 +5274,7 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { break :ns .{ .declared = .{ .zir_index = extra.data.zir_index, .captures = .{ .owned = .{ + .tid = unwrapped_index.tid, .start = extra.end, .len = extra.data.captures_len, } }, @@ -5117,105 +5283,115 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { .type_struct => .{ .struct_type = ns: { if (data == 0) break :ns .empty_struct; - const extra = ip.extraDataTrail(Tag.TypeStruct, data); + const extra_list = unwrapped_index.getExtra(ip); + const extra = extraDataTrail(extra_list, Tag.TypeStruct, data); if (extra.data.flags.is_reified) { assert(!extra.data.flags.any_captures); break :ns .{ .reified = .{ .zir_index = extra.data.zir_index, - .type_hash = ip.extraData(PackedU64, extra.end).get(), + .type_hash = extraData(extra_list, PackedU64, extra.end).get(), } }; } break :ns .{ .declared = .{ .zir_index = extra.data.zir_index, .captures = .{ .owned = if (extra.data.flags.any_captures) .{ + .tid = unwrapped_index.tid, .start = extra.end + 1, - .len = ip.extra.items[extra.end], - } else .{ .start = 0, .len = 0 } }, + .len = extra_list.view().items(.@"0")[extra.end], + } else CaptureValue.Slice.empty }, } }; } }, .type_struct_packed, .type_struct_packed_inits => .{ .struct_type = ns: { - const extra = ip.extraDataTrail(Tag.TypeStructPacked, data); + const extra_list = unwrapped_index.getExtra(ip); + const extra = extraDataTrail(extra_list, Tag.TypeStructPacked, data); if (extra.data.flags.is_reified) { assert(!extra.data.flags.any_captures); break :ns .{ .reified = .{ .zir_index = extra.data.zir_index, - .type_hash = ip.extraData(PackedU64, extra.end).get(), + .type_hash = extraData(extra_list, PackedU64, extra.end).get(), } }; } break :ns .{ .declared = .{ .zir_index = extra.data.zir_index, .captures = .{ .owned = if (extra.data.flags.any_captures) .{ + .tid = unwrapped_index.tid, .start = extra.end + 1, - .len = ip.extra.items[extra.end], - } else .{ .start = 0, .len = 0 } }, + .len = extra_list.view().items(.@"0")[extra.end], + } else CaptureValue.Slice.empty }, } }; } }, - .type_struct_anon => .{ .anon_struct_type = extraTypeStructAnon(ip, data) }, - .type_tuple_anon => .{ .anon_struct_type = extraTypeTupleAnon(ip, data) }, + .type_struct_anon => .{ .anon_struct_type = extraTypeStructAnon(unwrapped_index.tid, unwrapped_index.getExtra(ip), data) }, + .type_tuple_anon => .{ .anon_struct_type = extraTypeTupleAnon(unwrapped_index.tid, unwrapped_index.getExtra(ip), data) }, .type_union => .{ .union_type = ns: { - const extra = ip.extraDataTrail(Tag.TypeUnion, data); + const extra_list = unwrapped_index.getExtra(ip); + const extra = extraDataTrail(extra_list, Tag.TypeUnion, data); if (extra.data.flags.is_reified) { assert(!extra.data.flags.any_captures); break :ns .{ .reified = .{ .zir_index = extra.data.zir_index, - .type_hash = ip.extraData(PackedU64, extra.end).get(), + .type_hash = extraData(extra_list, PackedU64, extra.end).get(), } }; } break :ns .{ .declared = .{ .zir_index = extra.data.zir_index, .captures = .{ .owned = if (extra.data.flags.any_captures) .{ + .tid = unwrapped_index.tid, .start = extra.end + 1, - .len = ip.extra.items[extra.end], - } else .{ .start = 0, .len = 0 } }, + .len = extra_list.view().items(.@"0")[extra.end], + } else CaptureValue.Slice.empty }, } }; } }, .type_enum_auto => .{ .enum_type = ns: { - const extra = ip.extraDataTrail(EnumAuto, data); + const extra_list = unwrapped_index.getExtra(ip); + const extra = extraDataTrail(extra_list, EnumAuto, data); const zir_index = extra.data.zir_index.unwrap() orelse { assert(extra.data.captures_len == 0); break :ns .{ .generated_tag = .{ - .union_type = @enumFromInt(ip.extra.items[extra.end]), + .union_type = @enumFromInt(extra_list.view().items(.@"0")[extra.end]), } }; }; if (extra.data.captures_len == std.math.maxInt(u32)) { break :ns .{ .reified = .{ .zir_index = zir_index, - .type_hash = ip.extraData(PackedU64, extra.end).get(), + .type_hash = extraData(extra_list, PackedU64, extra.end).get(), } }; } break :ns .{ .declared = .{ .zir_index = zir_index, .captures = .{ .owned = .{ + .tid = unwrapped_index.tid, .start = extra.end, .len = extra.data.captures_len, } }, } }; } }, .type_enum_explicit, .type_enum_nonexhaustive => .{ .enum_type = ns: { - const extra = ip.extraDataTrail(EnumExplicit, data); + const extra_list = unwrapped_index.getExtra(ip); + const extra = extraDataTrail(extra_list, EnumExplicit, data); const zir_index = extra.data.zir_index.unwrap() orelse { assert(extra.data.captures_len == 0); break :ns .{ .generated_tag = .{ - .union_type = @enumFromInt(ip.extra.items[extra.end]), + .union_type = @enumFromInt(extra_list.view().items(.@"0")[extra.end]), } }; }; if (extra.data.captures_len == std.math.maxInt(u32)) { break :ns .{ .reified = .{ .zir_index = zir_index, - .type_hash = ip.extraData(PackedU64, extra.end).get(), + .type_hash = extraData(extra_list, PackedU64, extra.end).get(), } }; } break :ns .{ .declared = .{ .zir_index = zir_index, .captures = .{ .owned = .{ + .tid = unwrapped_index.tid, .start = extra.end, .len = extra.data.captures_len, } }, } }; } }, - .type_function => .{ .func_type = ip.extraFuncType(data) }, + .type_function => .{ .func_type = extraFuncType(unwrapped_index.tid, unwrapped_index.getExtra(ip), data) }, .undef => .{ .undef = @enumFromInt(data) }, .opt_null => .{ .opt = .{ @@ -5223,40 +5399,40 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { .val = .none, } }, .opt_payload => { - const extra = ip.extraData(Tag.TypeValue, data); + const extra = extraData(unwrapped_index.getExtra(ip), Tag.TypeValue, data); return .{ .opt = .{ .ty = extra.ty, .val = extra.val, } }; }, .ptr_decl => { - const info = ip.extraData(PtrDecl, data); + const info = extraData(unwrapped_index.getExtra(ip), PtrDecl, data); return .{ .ptr = .{ .ty = info.ty, .base_addr = .{ .decl = info.decl }, .byte_offset = info.byteOffset() } }; }, .ptr_comptime_alloc => { - const info = ip.extraData(PtrComptimeAlloc, data); + const info = extraData(unwrapped_index.getExtra(ip), PtrComptimeAlloc, data); return .{ .ptr = .{ .ty = info.ty, .base_addr = .{ .comptime_alloc = info.index }, .byte_offset = info.byteOffset() } }; }, .ptr_anon_decl => { - const info = ip.extraData(PtrAnonDecl, data); + const info = extraData(unwrapped_index.getExtra(ip), PtrAnonDecl, data); return .{ .ptr = .{ .ty = info.ty, .base_addr = .{ .anon_decl = .{ .val = info.val, .orig_ty = info.ty, } }, .byte_offset = info.byteOffset() } }; }, .ptr_anon_decl_aligned => { - const info = ip.extraData(PtrAnonDeclAligned, data); + const info = extraData(unwrapped_index.getExtra(ip), PtrAnonDeclAligned, data); return .{ .ptr = .{ .ty = info.ty, .base_addr = .{ .anon_decl = .{ .val = info.val, .orig_ty = info.orig_ty, } }, .byte_offset = info.byteOffset() } }; }, .ptr_comptime_field => { - const info = ip.extraData(PtrComptimeField, data); + const info = extraData(unwrapped_index.getExtra(ip), PtrComptimeField, data); return .{ .ptr = .{ .ty = info.ty, .base_addr = .{ .comptime_field = info.field_val }, .byte_offset = info.byteOffset() } }; }, .ptr_int => { - const info = ip.extraData(PtrInt, data); + const info = extraData(unwrapped_index.getExtra(ip), PtrInt, data); return .{ .ptr = .{ .ty = info.ty, .base_addr = .int, @@ -5264,17 +5440,17 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { } }; }, .ptr_eu_payload => { - const info = ip.extraData(PtrBase, data); + const info = extraData(unwrapped_index.getExtra(ip), PtrBase, data); return .{ .ptr = .{ .ty = info.ty, .base_addr = .{ .eu_payload = info.base }, .byte_offset = info.byteOffset() } }; }, .ptr_opt_payload => { - const info = ip.extraData(PtrBase, data); + const info = extraData(unwrapped_index.getExtra(ip), PtrBase, data); return .{ .ptr = .{ .ty = info.ty, .base_addr = .{ .opt_payload = info.base }, .byte_offset = info.byteOffset() } }; }, .ptr_elem => { // Avoid `indexToKey` recursion by asserting the tag encoding. - const info = ip.extraData(PtrBaseIndex, data); - const index_item = info.index.getItem(ip); + const info = extraData(unwrapped_index.getExtra(ip), PtrBaseIndex, data); + const index_item = info.index.unwrap(ip).getItem(ip); return switch (index_item.tag) { .int_usize => .{ .ptr = .{ .ty = info.ty, .base_addr = .{ .arr_elem = .{ .base = info.base, @@ -5286,8 +5462,8 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { }, .ptr_field => { // Avoid `indexToKey` recursion by asserting the tag encoding. - const info = ip.extraData(PtrBaseIndex, data); - const index_item = info.index.getItem(ip); + const info = extraData(unwrapped_index.getExtra(ip), PtrBaseIndex, data); + const index_item = info.index.unwrap(ip).getItem(ip); return switch (index_item.tag) { .int_usize => .{ .ptr = .{ .ty = info.ty, .base_addr = .{ .field = .{ .base = info.base, @@ -5298,7 +5474,7 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { }; }, .ptr_slice => { - const info = ip.extraData(PtrSlice, data); + const info = extraData(unwrapped_index.getExtra(ip), PtrSlice, data); return .{ .slice = .{ .ty = info.ty, .ptr = info.ptr, @@ -5333,17 +5509,17 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { .ty = .comptime_int_type, .storage = .{ .i64 = @as(i32, @bitCast(data)) }, } }, - .int_positive => ip.indexToKeyBigInt(data, true), - .int_negative => ip.indexToKeyBigInt(data, false), + .int_positive => ip.indexToKeyBigInt(unwrapped_index.tid, data, true), + .int_negative => ip.indexToKeyBigInt(unwrapped_index.tid, data, false), .int_small => { - const info = ip.extraData(IntSmall, data); + const info = extraData(unwrapped_index.getExtra(ip), IntSmall, data); return .{ .int = .{ .ty = info.ty, .storage = .{ .u64 = info.value }, } }; }, .int_lazy_align, .int_lazy_size => |tag| { - const info = ip.extraData(IntLazy, data); + const info = extraData(unwrapped_index.getExtra(ip), IntLazy, data); return .{ .int = .{ .ty = info.ty, .storage = switch (tag) { @@ -5363,30 +5539,30 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { } }, .float_f64 => .{ .float = .{ .ty = .f64_type, - .storage = .{ .f64 = ip.extraData(Float64, data).get() }, + .storage = .{ .f64 = extraData(unwrapped_index.getExtra(ip), Float64, data).get() }, } }, .float_f80 => .{ .float = .{ .ty = .f80_type, - .storage = .{ .f80 = ip.extraData(Float80, data).get() }, + .storage = .{ .f80 = extraData(unwrapped_index.getExtra(ip), Float80, data).get() }, } }, .float_f128 => .{ .float = .{ .ty = .f128_type, - .storage = .{ .f128 = ip.extraData(Float128, data).get() }, + .storage = .{ .f128 = extraData(unwrapped_index.getExtra(ip), Float128, data).get() }, } }, .float_c_longdouble_f80 => .{ .float = .{ .ty = .c_longdouble_type, - .storage = .{ .f80 = ip.extraData(Float80, data).get() }, + .storage = .{ .f80 = extraData(unwrapped_index.getExtra(ip), Float80, data).get() }, } }, .float_c_longdouble_f128 => .{ .float = .{ .ty = .c_longdouble_type, - .storage = .{ .f128 = ip.extraData(Float128, data).get() }, + .storage = .{ .f128 = extraData(unwrapped_index.getExtra(ip), Float128, data).get() }, } }, .float_comptime_float => .{ .float = .{ .ty = .comptime_float_type, - .storage = .{ .f128 = ip.extraData(Float128, data).get() }, + .storage = .{ .f128 = extraData(unwrapped_index.getExtra(ip), Float128, data).get() }, } }, .variable => { - const extra = ip.extraData(Tag.Variable, data); + const extra = extraData(unwrapped_index.getExtra(ip), Tag.Variable, data); return .{ .variable = .{ .ty = extra.ty, .init = extra.init, @@ -5398,18 +5574,20 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { .is_weak_linkage = extra.flags.is_weak_linkage, } }; }, - .extern_func => .{ .extern_func = ip.extraData(Tag.ExternFunc, data) }, - .func_instance => .{ .func = ip.extraFuncInstance(data) }, - .func_decl => .{ .func = ip.extraFuncDecl(data) }, - .func_coerced => .{ .func = ip.extraFuncCoerced(data) }, + .extern_func => .{ .extern_func = extraData(unwrapped_index.getExtra(ip), Tag.ExternFunc, data) }, + .func_instance => .{ .func = ip.extraFuncInstance(unwrapped_index.tid, unwrapped_index.getExtra(ip), data) }, + .func_decl => .{ .func = extraFuncDecl(unwrapped_index.tid, unwrapped_index.getExtra(ip), data) }, + .func_coerced => .{ .func = ip.extraFuncCoerced(unwrapped_index.getExtra(ip), data) }, .only_possible_value => { const ty: Index = @enumFromInt(data); - const ty_item = ty.getItem(ip); + const ty_unwrapped = ty.unwrap(ip); + const ty_extra = ty_unwrapped.getExtra(ip); + const ty_item = ty_unwrapped.getItem(ip); return switch (ty_item.tag) { .type_array_big => { const sentinel = @as( *const [1]Index, - @ptrCast(&ip.extra.items[ty_item.data + std.meta.fieldIndex(Array, "sentinel").?]), + @ptrCast(&ty_extra.view().items(.@"0")[ty_item.data + std.meta.fieldIndex(Array, "sentinel").?]), ); return .{ .aggregate = .{ .ty = ty, @@ -5437,9 +5615,9 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { // There is only one possible value precisely due to the // fact that this values slice is fully populated! .type_struct_anon, .type_tuple_anon => { - const type_struct_anon = ip.extraDataTrail(TypeStructAnon, ty_item.data); + const type_struct_anon = extraDataTrail(ty_extra, TypeStructAnon, ty_item.data); const fields_len = type_struct_anon.data.fields_len; - const values = ip.extra.items[type_struct_anon.end + fields_len ..][0..fields_len]; + const values = ty_extra.view().items(.@"0")[type_struct_anon.end + fields_len ..][0..fields_len]; return .{ .aggregate = .{ .ty = ty, .storage = .{ .elems = @ptrCast(values) }, @@ -5455,62 +5633,65 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { }; }, .bytes => { - const extra = ip.extraData(Bytes, data); + const extra = extraData(unwrapped_index.getExtra(ip), Bytes, data); return .{ .aggregate = .{ .ty = extra.ty, .storage = .{ .bytes = extra.bytes }, } }; }, .aggregate => { - const extra = ip.extraDataTrail(Tag.Aggregate, data); + const extra_list = unwrapped_index.getExtra(ip); + const extra = extraDataTrail(extra_list, Tag.Aggregate, data); const len: u32 = @intCast(ip.aggregateTypeLenIncludingSentinel(extra.data.ty)); - const fields: []const Index = @ptrCast(ip.extra.items[extra.end..][0..len]); + const fields: []const Index = @ptrCast(extra_list.view().items(.@"0")[extra.end..][0..len]); return .{ .aggregate = .{ .ty = extra.data.ty, .storage = .{ .elems = fields }, } }; }, .repeated => { - const extra = ip.extraData(Repeated, data); + const extra = extraData(unwrapped_index.getExtra(ip), Repeated, data); return .{ .aggregate = .{ .ty = extra.ty, .storage = .{ .repeated_elem = extra.elem_val }, } }; }, - .union_value => .{ .un = ip.extraData(Key.Union, data) }, - .error_set_error => .{ .err = ip.extraData(Key.Error, data) }, + .union_value => .{ .un = extraData(unwrapped_index.getExtra(ip), Key.Union, data) }, + .error_set_error => .{ .err = extraData(unwrapped_index.getExtra(ip), Key.Error, data) }, .error_union_error => { - const extra = ip.extraData(Key.Error, data); + const extra = extraData(unwrapped_index.getExtra(ip), Key.Error, data); return .{ .error_union = .{ .ty = extra.ty, .val = .{ .err_name = extra.name }, } }; }, .error_union_payload => { - const extra = ip.extraData(Tag.TypeValue, data); + const extra = extraData(unwrapped_index.getExtra(ip), Tag.TypeValue, data); return .{ .error_union = .{ .ty = extra.ty, .val = .{ .payload = extra.val }, } }; }, .enum_literal => .{ .enum_literal = @enumFromInt(data) }, - .enum_tag => .{ .enum_tag = ip.extraData(Tag.EnumTag, data) }, + .enum_tag => .{ .enum_tag = extraData(unwrapped_index.getExtra(ip), Tag.EnumTag, data) }, .memoized_call => { - const extra = ip.extraDataTrail(MemoizedCall, data); + const extra_list = unwrapped_index.getExtra(ip); + const extra = extraDataTrail(extra_list, MemoizedCall, data); return .{ .memoized_call = .{ .func = extra.data.func, - .arg_values = @ptrCast(ip.extra.items[extra.end..][0..extra.data.args_len]), + .arg_values = @ptrCast(extra_list.view().items(.@"0")[extra.end..][0..extra.data.args_len]), .result = extra.data.result, } }; }, }; } -fn extraErrorSet(ip: *const InternPool, extra_index: u32) Key.ErrorSetType { - const error_set = ip.extraDataTrail(Tag.ErrorSet, extra_index); +fn extraErrorSet(tid: Zcu.PerThread.Id, extra: Local.Extra, extra_index: u32) Key.ErrorSetType { + const error_set = extraDataTrail(extra, Tag.ErrorSet, extra_index); return .{ .names = .{ + .tid = tid, .start = @intCast(error_set.end), .len = error_set.data.names_len, }, @@ -5518,60 +5699,67 @@ fn extraErrorSet(ip: *const InternPool, extra_index: u32) Key.ErrorSetType { }; } -fn extraTypeStructAnon(ip: *const InternPool, extra_index: u32) Key.AnonStructType { - const type_struct_anon = ip.extraDataTrail(TypeStructAnon, extra_index); +fn extraTypeStructAnon(tid: Zcu.PerThread.Id, extra: Local.Extra, extra_index: u32) Key.AnonStructType { + const type_struct_anon = extraDataTrail(extra, TypeStructAnon, extra_index); const fields_len = type_struct_anon.data.fields_len; return .{ .types = .{ + .tid = tid, .start = type_struct_anon.end, .len = fields_len, }, .values = .{ + .tid = tid, .start = type_struct_anon.end + fields_len, .len = fields_len, }, .names = .{ + .tid = tid, .start = type_struct_anon.end + fields_len + fields_len, .len = fields_len, }, }; } -fn extraTypeTupleAnon(ip: *const InternPool, extra_index: u32) Key.AnonStructType { - const type_struct_anon = ip.extraDataTrail(TypeStructAnon, extra_index); +fn extraTypeTupleAnon(tid: Zcu.PerThread.Id, extra: Local.Extra, extra_index: u32) Key.AnonStructType { + const type_struct_anon = extraDataTrail(extra, TypeStructAnon, extra_index); const fields_len = type_struct_anon.data.fields_len; return .{ .types = .{ + .tid = tid, .start = type_struct_anon.end, .len = fields_len, }, .values = .{ + .tid = tid, .start = type_struct_anon.end + fields_len, .len = fields_len, }, .names = .{ + .tid = tid, .start = 0, .len = 0, }, }; } -fn extraFuncType(ip: *const InternPool, extra_index: u32) Key.FuncType { - const type_function = ip.extraDataTrail(Tag.TypeFunction, extra_index); - var index: usize = type_function.end; +fn extraFuncType(tid: Zcu.PerThread.Id, extra: Local.Extra, extra_index: u32) Key.FuncType { + const type_function = extraDataTrail(extra, Tag.TypeFunction, extra_index); + var trail_index: usize = type_function.end; const comptime_bits: u32 = if (!type_function.data.flags.has_comptime_bits) 0 else b: { - const x = ip.extra.items[index]; - index += 1; + const x = extra.view().items(.@"0")[trail_index]; + trail_index += 1; break :b x; }; const noalias_bits: u32 = if (!type_function.data.flags.has_noalias_bits) 0 else b: { - const x = ip.extra.items[index]; - index += 1; + const x = extra.view().items(.@"0")[trail_index]; + trail_index += 1; break :b x; }; return .{ .param_types = .{ - .start = @intCast(index), + .tid = tid, + .start = @intCast(trail_index), .len = type_function.data.params_len, }, .return_type = type_function.data.return_type, @@ -5587,10 +5775,11 @@ fn extraFuncType(ip: *const InternPool, extra_index: u32) Key.FuncType { }; } -fn extraFuncDecl(ip: *const InternPool, extra_index: u32) Key.Func { +fn extraFuncDecl(tid: Zcu.PerThread.Id, extra: Local.Extra, extra_index: u32) Key.Func { const P = Tag.FuncDecl; - const func_decl = ip.extraDataTrail(P, extra_index); + const func_decl = extraDataTrail(extra, P, extra_index); return .{ + .tid = tid, .ty = func_decl.data.ty, .uncoerced_ty = func_decl.data.ty, .analysis_extra_index = extra_index + std.meta.fieldIndex(P, "analysis").?, @@ -5604,15 +5793,16 @@ fn extraFuncDecl(ip: *const InternPool, extra_index: u32) Key.Func { .lbrace_column = func_decl.data.lbrace_column, .rbrace_column = func_decl.data.rbrace_column, .generic_owner = .none, - .comptime_args = .{ .start = 0, .len = 0 }, + .comptime_args = Index.Slice.empty, }; } -fn extraFuncInstance(ip: *const InternPool, extra_index: u32) Key.Func { +fn extraFuncInstance(ip: *const InternPool, tid: Zcu.PerThread.Id, extra: Local.Extra, extra_index: u32) Key.Func { const P = Tag.FuncInstance; - const fi = ip.extraDataTrail(P, extra_index); + const fi = extraDataTrail(extra, P, extra_index); const func_decl = ip.funcDeclInfo(fi.data.generic_owner); return .{ + .tid = tid, .ty = fi.data.ty, .uncoerced_ty = fi.data.ty, .analysis_extra_index = extra_index + std.meta.fieldIndex(P, "analysis").?, @@ -5627,30 +5817,34 @@ fn extraFuncInstance(ip: *const InternPool, extra_index: u32) Key.Func { .rbrace_column = func_decl.rbrace_column, .generic_owner = fi.data.generic_owner, .comptime_args = .{ + .tid = tid, .start = fi.end + @intFromBool(fi.data.analysis.inferred_error_set), .len = ip.funcTypeParamsLen(func_decl.ty), }, }; } -fn extraFuncCoerced(ip: *const InternPool, extra_index: u32) Key.Func { - const func_coerced = ip.extraData(Tag.FuncCoerced, extra_index); - const sub_item = func_coerced.func.getItem(ip); +fn extraFuncCoerced(ip: *const InternPool, extra: Local.Extra, extra_index: u32) Key.Func { + const func_coerced = extraData(extra, Tag.FuncCoerced, extra_index); + const func_unwrapped = func_coerced.func.unwrap(ip); + const sub_item = func_unwrapped.getItem(ip); + const func_extra = func_unwrapped.getExtra(ip); var func: Key.Func = switch (sub_item.tag) { - .func_instance => ip.extraFuncInstance(sub_item.data), - .func_decl => ip.extraFuncDecl(sub_item.data), + .func_instance => ip.extraFuncInstance(func_unwrapped.tid, func_extra, sub_item.data), + .func_decl => extraFuncDecl(func_unwrapped.tid, func_extra, sub_item.data), else => unreachable, }; func.ty = func_coerced.ty; return func; } -fn indexToKeyBigInt(ip: *const InternPool, limb_index: u32, positive: bool) Key { - const int_info = ip.limbData(Int, limb_index); +fn indexToKeyBigInt(ip: *const InternPool, tid: Zcu.PerThread.Id, limb_index: u32, positive: bool) Key { + const limbs_items = ip.getLocalShared(tid).getLimbs().view().items(.@"0"); + const int: Int = @bitCast(limbs_items[limb_index..][0..Int.limbs_items_len].*); return .{ .int = .{ - .ty = int_info.ty, + .ty = int.ty, .storage = .{ .big_int = .{ - .limbs = ip.limbSlice(Int, limb_index, int_info.limbs_len), + .limbs = limbs_items[limb_index + Int.limbs_items_len ..][0..int.limbs_len], .positive = positive, } }, } }; @@ -5791,7 +5985,9 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All var gop = try ip.getOrPutKey(gpa, tid, key); defer gop.deinit(); if (gop == .existing) return gop.existing; - const items = ip.getLocal(tid).getMutableItems(gpa); + const local = ip.getLocal(tid); + const items = local.getMutableItems(gpa); + const extra = local.getMutableExtra(gpa); try items.ensureUnusedCapacity(1); switch (key) { .int_type => |int_type| { @@ -5827,7 +6023,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All items.appendAssumeCapacity(.{ .tag = .type_pointer, - .data = try ip.addExtra(gpa, ptr_type_adjusted), + .data = try addExtra(extra, ptr_type_adjusted), }); }, .array_type => |array_type| { @@ -5838,7 +6034,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All if (array_type.sentinel == .none) { items.appendAssumeCapacity(.{ .tag = .type_array_small, - .data = try ip.addExtra(gpa, Vector{ + .data = try addExtra(extra, Vector{ .len = len, .child = array_type.child, }), @@ -5850,7 +6046,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All const length = Array.Length.init(array_type.len); items.appendAssumeCapacity(.{ .tag = .type_array_big, - .data = try ip.addExtra(gpa, Array{ + .data = try addExtra(extra, Array{ .len0 = length.a, .len1 = length.b, .child = array_type.child, @@ -5861,7 +6057,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All .vector_type => |vector_type| { items.appendAssumeCapacity(.{ .tag = .type_vector, - .data = try ip.addExtra(gpa, Vector{ + .data = try addExtra(extra, Vector{ .len = vector_type.len, .child = vector_type.child, }), @@ -5887,7 +6083,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All .data = @intFromEnum(error_union_type.payload_type), } else .{ .tag = .type_error_union, - .data = try ip.addExtra(gpa, error_union_type), + .data = try addExtra(extra, error_union_type), }); }, .error_set_type => |error_set_type| { @@ -5897,15 +6093,15 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All const names_map = try ip.addMap(gpa, names.len); addStringsToMap(ip, names_map, names); const names_len = error_set_type.names.len; - try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Tag.ErrorSet).Struct.fields.len + names_len); + try extra.ensureUnusedCapacity(@typeInfo(Tag.ErrorSet).Struct.fields.len + names_len); items.appendAssumeCapacity(.{ .tag = .type_error_set, - .data = ip.addExtraAssumeCapacity(Tag.ErrorSet{ + .data = addExtraAssumeCapacity(extra, Tag.ErrorSet{ .names_len = names_len, .names_map = names_map, }), }); - ip.extra.appendSliceAssumeCapacity(@ptrCast(error_set_type.names.get(ip))); + extra.appendSliceAssumeCapacity(.{@ptrCast(error_set_type.names.get(ip))}); }, .inferred_error_set_type => |ies_index| { items.appendAssumeCapacity(.{ @@ -5914,14 +6110,14 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All }); }, .simple_type => |simple_type| { - assert(@intFromEnum(simple_type) == items.lenPtr().*); + assert(@intFromEnum(simple_type) == items.mutate.len); items.appendAssumeCapacity(.{ .tag = .simple_type, .data = 0, // avoid writing `undefined` bits to a file }); }, .simple_value => |simple_value| { - assert(@intFromEnum(simple_value) == items.lenPtr().*); + assert(@intFromEnum(simple_value) == items.mutate.len); items.appendAssumeCapacity(.{ .tag = .simple_value, .data = 0, // avoid writing `undefined` bits to a file @@ -5950,7 +6146,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All if (has_init) assert(variable.ty == ip.typeOf(variable.init)); items.appendAssumeCapacity(.{ .tag = .variable, - .data = try ip.addExtra(gpa, Tag.Variable{ + .data = try addExtra(extra, Tag.Variable{ .ty = variable.ty, .init = variable.init, .decl = variable.decl, @@ -5970,7 +6166,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All assert(ip.indexToKey(ip.typeOf(slice.ptr)).ptr_type.flags.size == .Many); items.appendAssumeCapacity(.{ .tag = .ptr_slice, - .data = try ip.addExtra(gpa, PtrSlice{ + .data = try addExtra(extra, PtrSlice{ .ty = slice.ty, .ptr = slice.ptr, .len = slice.len, @@ -5984,11 +6180,11 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All items.appendAssumeCapacity(switch (ptr.base_addr) { .decl => |decl| .{ .tag = .ptr_decl, - .data = try ip.addExtra(gpa, PtrDecl.init(ptr.ty, decl, ptr.byte_offset)), + .data = try addExtra(extra, PtrDecl.init(ptr.ty, decl, ptr.byte_offset)), }, .comptime_alloc => |alloc_index| .{ .tag = .ptr_comptime_alloc, - .data = try ip.addExtra(gpa, PtrComptimeAlloc.init(ptr.ty, alloc_index, ptr.byte_offset)), + .data = try addExtra(extra, PtrComptimeAlloc.init(ptr.ty, alloc_index, ptr.byte_offset)), }, .anon_decl => |anon_decl| if (ptrsHaveSameAlignment(ip, ptr.ty, ptr_type, anon_decl.orig_ty)) item: { if (ptr.ty != anon_decl.orig_ty) { @@ -5999,17 +6195,17 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All } break :item .{ .tag = .ptr_anon_decl, - .data = try ip.addExtra(gpa, PtrAnonDecl.init(ptr.ty, anon_decl.val, ptr.byte_offset)), + .data = try addExtra(extra, PtrAnonDecl.init(ptr.ty, anon_decl.val, ptr.byte_offset)), }; } else .{ .tag = .ptr_anon_decl_aligned, - .data = try ip.addExtra(gpa, PtrAnonDeclAligned.init(ptr.ty, anon_decl.val, anon_decl.orig_ty, ptr.byte_offset)), + .data = try addExtra(extra, PtrAnonDeclAligned.init(ptr.ty, anon_decl.val, anon_decl.orig_ty, ptr.byte_offset)), }, .comptime_field => |field_val| item: { assert(field_val != .none); break :item .{ .tag = .ptr_comptime_field, - .data = try ip.addExtra(gpa, PtrComptimeField.init(ptr.ty, field_val, ptr.byte_offset)), + .data = try addExtra(extra, PtrComptimeField.init(ptr.ty, field_val, ptr.byte_offset)), }; }, .eu_payload, .opt_payload => |base| item: { @@ -6028,12 +6224,12 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All .opt_payload => .ptr_opt_payload, else => unreachable, }, - .data = try ip.addExtra(gpa, PtrBase.init(ptr.ty, base, ptr.byte_offset)), + .data = try addExtra(extra, PtrBase.init(ptr.ty, base, ptr.byte_offset)), }; }, .int => .{ .tag = .ptr_int, - .data = try ip.addExtra(gpa, PtrInt.init(ptr.ty, ptr.byte_offset)), + .data = try addExtra(extra, PtrInt.init(ptr.ty, ptr.byte_offset)), }, .arr_elem, .field => |base_index| { const base_ptr_type = ip.indexToKey(ip.typeOf(base_index.base)).ptr_type; @@ -6077,7 +6273,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All .field => .ptr_field, else => unreachable, }, - .data = try ip.addExtra(gpa, PtrBaseIndex.init(ptr.ty, base_index.base, index_index, ptr.byte_offset)), + .data = try addExtra(extra, PtrBaseIndex.init(ptr.ty, base_index.base, index_index, ptr.byte_offset)), }); return gop.put(); }, @@ -6092,7 +6288,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All .data = @intFromEnum(opt.ty), } else .{ .tag = .opt_payload, - .data = try ip.addExtra(gpa, Tag.TypeValue{ + .data = try addExtra(extra, Tag.TypeValue{ .ty = opt.ty, .val = opt.val, }), @@ -6110,7 +6306,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All .lazy_align => .int_lazy_align, .lazy_size => .int_lazy_size, }, - .data = try ip.addExtra(gpa, IntLazy{ + .data = try addExtra(extra, IntLazy{ .ty = int.ty, .lazy_ty = lazy_ty, }), @@ -6251,7 +6447,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All if (big_int.to(u32)) |casted| { items.appendAssumeCapacity(.{ .tag = .int_small, - .data = try ip.addExtra(gpa, IntSmall{ + .data = try addExtra(extra, IntSmall{ .ty = int.ty, .value = casted, }), @@ -6266,7 +6462,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All if (std.math.cast(u32, x)) |casted| { items.appendAssumeCapacity(.{ .tag = .int_small, - .data = try ip.addExtra(gpa, IntSmall{ + .data = try addExtra(extra, IntSmall{ .ty = int.ty, .value = casted, }), @@ -6287,7 +6483,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All assert(ip.isErrorSetType(err.ty)); items.appendAssumeCapacity(.{ .tag = .error_set_error, - .data = try ip.addExtra(gpa, err), + .data = try addExtra(extra, err), }); }, @@ -6296,14 +6492,14 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All items.appendAssumeCapacity(switch (error_union.val) { .err_name => |err_name| .{ .tag = .error_union_error, - .data = try ip.addExtra(gpa, Key.Error{ + .data = try addExtra(extra, Key.Error{ .ty = error_union.ty, .name = err_name, }), }, .payload => |payload| .{ .tag = .error_union_payload, - .data = try ip.addExtra(gpa, Tag.TypeValue{ + .data = try addExtra(extra, Tag.TypeValue{ .ty = error_union.ty, .val = payload, }), @@ -6325,7 +6521,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All } items.appendAssumeCapacity(.{ .tag = .enum_tag, - .data = try ip.addExtra(gpa, enum_tag), + .data = try addExtra(extra, enum_tag), }); }, @@ -6346,29 +6542,29 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All }), .f64_type => items.appendAssumeCapacity(.{ .tag = .float_f64, - .data = try ip.addExtra(gpa, Float64.pack(float.storage.f64)), + .data = try addExtra(extra, Float64.pack(float.storage.f64)), }), .f80_type => items.appendAssumeCapacity(.{ .tag = .float_f80, - .data = try ip.addExtra(gpa, Float80.pack(float.storage.f80)), + .data = try addExtra(extra, Float80.pack(float.storage.f80)), }), .f128_type => items.appendAssumeCapacity(.{ .tag = .float_f128, - .data = try ip.addExtra(gpa, Float128.pack(float.storage.f128)), + .data = try addExtra(extra, Float128.pack(float.storage.f128)), }), .c_longdouble_type => switch (float.storage) { .f80 => |x| items.appendAssumeCapacity(.{ .tag = .float_c_longdouble_f80, - .data = try ip.addExtra(gpa, Float80.pack(x)), + .data = try addExtra(extra, Float80.pack(x)), }), inline .f16, .f32, .f64, .f128 => |x| items.appendAssumeCapacity(.{ .tag = .float_c_longdouble_f128, - .data = try ip.addExtra(gpa, Float128.pack(x)), + .data = try addExtra(extra, Float128.pack(x)), }), }, .comptime_float_type => items.appendAssumeCapacity(.{ .tag = .float_comptime_float, - .data = try ip.addExtra(gpa, Float128.pack(float.storage.f128)), + .data = try addExtra(extra, Float128.pack(float.storage.f128)), }), else => unreachable, } @@ -6490,13 +6686,10 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All .repeated_elem => |elem| elem, }; - try ip.extra.ensureUnusedCapacity( - gpa, - @typeInfo(Repeated).Struct.fields.len, - ); + try extra.ensureUnusedCapacity(@typeInfo(Repeated).Struct.fields.len); items.appendAssumeCapacity(.{ .tag = .repeated, - .data = ip.addExtraAssumeCapacity(Repeated{ + .data = addExtraAssumeCapacity(extra, Repeated{ .ty = aggregate.ty, .elem_val = elem, }), @@ -6506,9 +6699,9 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All if (child == .u8_type) bytes: { const strings = ip.getLocal(tid).getMutableStrings(gpa); - const start = strings.lenPtr().*; + const start = strings.mutate.len; try strings.ensureUnusedCapacity(@intCast(len_including_sentinel + 1)); - try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Bytes).Struct.fields.len); + try extra.ensureUnusedCapacity(@typeInfo(Bytes).Struct.fields.len); switch (aggregate.storage) { .bytes => |bytes| strings.appendSliceAssumeCapacity(.{bytes.toSlice(len, ip)}), .elems => |elems| for (elems[0..@intCast(len)]) |elem| switch (ip.indexToKey(elem)) { @@ -6539,7 +6732,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All ); items.appendAssumeCapacity(.{ .tag = .bytes, - .data = ip.addExtraAssumeCapacity(Bytes{ + .data = addExtraAssumeCapacity(extra, Bytes{ .ty = aggregate.ty, .bytes = string, }), @@ -6547,18 +6740,17 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All return gop.put(); } - try ip.extra.ensureUnusedCapacity( - gpa, + try extra.ensureUnusedCapacity( @typeInfo(Tag.Aggregate).Struct.fields.len + @as(usize, @intCast(len_including_sentinel + 1)), ); items.appendAssumeCapacity(.{ .tag = .aggregate, - .data = ip.addExtraAssumeCapacity(Tag.Aggregate{ + .data = addExtraAssumeCapacity(extra, Tag.Aggregate{ .ty = aggregate.ty, }), }); - ip.extra.appendSliceAssumeCapacity(@ptrCast(aggregate.storage.elems)); - if (sentinel != .none) ip.extra.appendAssumeCapacity(@intFromEnum(sentinel)); + extra.appendSliceAssumeCapacity(.{@ptrCast(aggregate.storage.elems)}); + if (sentinel != .none) extra.appendAssumeCapacity(.{@intFromEnum(sentinel)}); }, .un => |un| { @@ -6566,23 +6758,23 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All assert(un.val != .none); items.appendAssumeCapacity(.{ .tag = .union_value, - .data = try ip.addExtra(gpa, un), + .data = try addExtra(extra, un), }); }, .memoized_call => |memoized_call| { for (memoized_call.arg_values) |arg| assert(arg != .none); - try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(MemoizedCall).Struct.fields.len + + try extra.ensureUnusedCapacity(@typeInfo(MemoizedCall).Struct.fields.len + memoized_call.arg_values.len); items.appendAssumeCapacity(.{ .tag = .memoized_call, - .data = ip.addExtraAssumeCapacity(MemoizedCall{ + .data = addExtraAssumeCapacity(extra, MemoizedCall{ .func = memoized_call.func, .args_len = @intCast(memoized_call.arg_values.len), .result = memoized_call.result, }), }); - ip.extra.appendSliceAssumeCapacity(@ptrCast(memoized_call.arg_values)); + extra.appendSliceAssumeCapacity(.{@ptrCast(memoized_call.arg_values)}); }, } return gop.put(); @@ -6639,11 +6831,14 @@ pub fn getUnionType( defer gop.deinit(); if (gop == .existing) return .{ .existing = gop.existing }; - const items = ip.getLocal(tid).getMutableItems(gpa); + const local = ip.getLocal(tid); + const items = local.getMutableItems(gpa); + try items.ensureUnusedCapacity(1); + const extra = local.getMutableExtra(gpa); const align_elements_len = if (ini.flags.any_aligned_fields) (ini.fields_len + 3) / 4 else 0; const align_element: u32 = @bitCast([1]u8{@intFromEnum(Alignment.none)} ** 4); - try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Tag.TypeUnion).Struct.fields.len + + try extra.ensureUnusedCapacity(@typeInfo(Tag.TypeUnion).Struct.fields.len + // TODO: fmt bug // zig fmt: off switch (ini.key) { @@ -6653,9 +6848,8 @@ pub fn getUnionType( // zig fmt: on ini.fields_len + // field types align_elements_len); - try items.ensureUnusedCapacity(1); - const extra_index = ip.addExtraAssumeCapacity(Tag.TypeUnion{ + const extra_index = addExtraAssumeCapacity(extra, Tag.TypeUnion{ .flags = .{ .any_captures = ini.key == .declared and ini.key.declared.captures.len != 0, .runtime_tag = ini.flags.runtime_tag, @@ -6686,27 +6880,28 @@ pub fn getUnionType( switch (ini.key) { .declared => |d| if (d.captures.len != 0) { - ip.extra.appendAssumeCapacity(@intCast(d.captures.len)); - ip.extra.appendSliceAssumeCapacity(@ptrCast(d.captures)); + extra.appendAssumeCapacity(.{@intCast(d.captures.len)}); + extra.appendSliceAssumeCapacity(.{@ptrCast(d.captures)}); }, - .reified => |r| _ = ip.addExtraAssumeCapacity(PackedU64.init(r.type_hash)), + .reified => |r| _ = addExtraAssumeCapacity(extra, PackedU64.init(r.type_hash)), } // field types if (ini.field_types.len > 0) { assert(ini.field_types.len == ini.fields_len); - ip.extra.appendSliceAssumeCapacity(@ptrCast(ini.field_types)); + extra.appendSliceAssumeCapacity(.{@ptrCast(ini.field_types)}); } else { - ip.extra.appendNTimesAssumeCapacity(@intFromEnum(Index.none), ini.fields_len); + extra.appendNTimesAssumeCapacity(.{@intFromEnum(Index.none)}, ini.fields_len); } // field alignments if (ini.flags.any_aligned_fields) { - ip.extra.appendNTimesAssumeCapacity(align_element, align_elements_len); + extra.appendNTimesAssumeCapacity(.{align_element}, align_elements_len); if (ini.field_aligns.len > 0) { assert(ini.field_aligns.len == ini.fields_len); @memcpy((Alignment.Slice{ - .start = @intCast(ip.extra.items.len - align_elements_len), + .tid = tid, + .start = @intCast(extra.mutate.len - align_elements_len), .len = @intCast(ini.field_aligns.len), }).get(ip), ini.field_aligns); } @@ -6715,6 +6910,7 @@ pub fn getUnionType( } return .{ .wip = .{ + .tid = tid, .index = gop.put(), .decl_extra_index = extra_index + std.meta.fieldIndex(Tag.TypeUnion, "decl").?, .namespace_extra_index = if (ini.has_namespace) @@ -6725,13 +6921,15 @@ pub fn getUnionType( } pub const WipNamespaceType = struct { + tid: Zcu.PerThread.Id, index: Index, decl_extra_index: u32, namespace_extra_index: ?u32, pub fn finish(wip: WipNamespaceType, ip: *InternPool, decl: DeclIndex, namespace: OptionalNamespaceIndex) Index { - ip.extra.items[wip.decl_extra_index] = @intFromEnum(decl); + const extra_items = ip.getLocalShared(wip.tid).extra.acquire().view().items(.@"0"); + extra_items[wip.decl_extra_index] = @intFromEnum(decl); if (wip.namespace_extra_index) |i| { - ip.extra.items[i] = @intFromEnum(namespace.unwrap().?); + extra_items[i] = @intFromEnum(namespace.unwrap().?); } else { assert(namespace == .none); } @@ -6789,7 +6987,9 @@ pub fn getStructType( defer gop.deinit(); if (gop == .existing) return .{ .existing = gop.existing }; - const items = ip.getLocal(tid).getMutableItems(gpa); + const local = ip.getLocal(tid); + const items = local.getMutableItems(gpa); + const extra = local.getMutableExtra(gpa); const names_map = try ip.addMap(gpa, ini.fields_len); errdefer _ = ip.maps.pop(); @@ -6802,7 +7002,7 @@ pub fn getStructType( .auto => false, .@"extern" => true, .@"packed" => { - try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Tag.TypeStructPacked).Struct.fields.len + + try extra.ensureUnusedCapacity(@typeInfo(Tag.TypeStructPacked).Struct.fields.len + // TODO: fmt bug // zig fmt: off switch (ini.key) { @@ -6813,7 +7013,7 @@ pub fn getStructType( ini.fields_len + // types ini.fields_len + // names ini.fields_len); // inits - const extra_index = ip.addExtraAssumeCapacity(Tag.TypeStructPacked{ + const extra_index = addExtraAssumeCapacity(extra, Tag.TypeStructPacked{ .decl = undefined, // set by `finish` .zir_index = zir_index, .fields_len = ini.fields_len, @@ -6833,19 +7033,20 @@ pub fn getStructType( }); switch (ini.key) { .declared => |d| if (d.captures.len != 0) { - ip.extra.appendAssumeCapacity(@intCast(d.captures.len)); - ip.extra.appendSliceAssumeCapacity(@ptrCast(d.captures)); + extra.appendAssumeCapacity(.{@intCast(d.captures.len)}); + extra.appendSliceAssumeCapacity(.{@ptrCast(d.captures)}); }, .reified => |r| { - _ = ip.addExtraAssumeCapacity(PackedU64.init(r.type_hash)); + _ = addExtraAssumeCapacity(extra, PackedU64.init(r.type_hash)); }, } - ip.extra.appendNTimesAssumeCapacity(@intFromEnum(Index.none), ini.fields_len); - ip.extra.appendNTimesAssumeCapacity(@intFromEnum(OptionalNullTerminatedString.none), ini.fields_len); + extra.appendNTimesAssumeCapacity(.{@intFromEnum(Index.none)}, ini.fields_len); + extra.appendNTimesAssumeCapacity(.{@intFromEnum(OptionalNullTerminatedString.none)}, ini.fields_len); if (ini.any_default_inits) { - ip.extra.appendNTimesAssumeCapacity(@intFromEnum(Index.none), ini.fields_len); + extra.appendNTimesAssumeCapacity(.{@intFromEnum(Index.none)}, ini.fields_len); } return .{ .wip = .{ + .tid = tid, .index = gop.put(), .decl_extra_index = extra_index + std.meta.fieldIndex(Tag.TypeStructPacked, "decl").?, .namespace_extra_index = if (ini.has_namespace) @@ -6860,7 +7061,7 @@ pub fn getStructType( const align_element: u32 = @bitCast([1]u8{@intFromEnum(Alignment.none)} ** 4); const comptime_elements_len = if (ini.any_comptime_fields) (ini.fields_len + 31) / 32 else 0; - try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Tag.TypeStruct).Struct.fields.len + + try extra.ensureUnusedCapacity(@typeInfo(Tag.TypeStruct).Struct.fields.len + // TODO: fmt bug // zig fmt: off switch (ini.key) { @@ -6871,7 +7072,7 @@ pub fn getStructType( (ini.fields_len * 5) + // types, names, inits, runtime order, offsets align_elements_len + comptime_elements_len + 2); // names_map + namespace - const extra_index = ip.addExtraAssumeCapacity(Tag.TypeStruct{ + const extra_index = addExtraAssumeCapacity(extra, Tag.TypeStruct{ .decl = undefined, // set by `finish` .zir_index = zir_index, .fields_len = ini.fields_len, @@ -6905,36 +7106,37 @@ pub fn getStructType( }); switch (ini.key) { .declared => |d| if (d.captures.len != 0) { - ip.extra.appendAssumeCapacity(@intCast(d.captures.len)); - ip.extra.appendSliceAssumeCapacity(@ptrCast(d.captures)); + extra.appendAssumeCapacity(.{@intCast(d.captures.len)}); + extra.appendSliceAssumeCapacity(.{@ptrCast(d.captures)}); }, .reified => |r| { - _ = ip.addExtraAssumeCapacity(PackedU64.init(r.type_hash)); + _ = addExtraAssumeCapacity(extra, PackedU64.init(r.type_hash)); }, } - ip.extra.appendNTimesAssumeCapacity(@intFromEnum(Index.none), ini.fields_len); + extra.appendNTimesAssumeCapacity(.{@intFromEnum(Index.none)}, ini.fields_len); if (!ini.is_tuple) { - ip.extra.appendAssumeCapacity(@intFromEnum(names_map)); - ip.extra.appendNTimesAssumeCapacity(@intFromEnum(OptionalNullTerminatedString.none), ini.fields_len); + extra.appendAssumeCapacity(.{@intFromEnum(names_map)}); + extra.appendNTimesAssumeCapacity(.{@intFromEnum(OptionalNullTerminatedString.none)}, ini.fields_len); } if (ini.any_default_inits) { - ip.extra.appendNTimesAssumeCapacity(@intFromEnum(Index.none), ini.fields_len); + extra.appendNTimesAssumeCapacity(.{@intFromEnum(Index.none)}, ini.fields_len); } const namespace_extra_index: ?u32 = if (ini.has_namespace) i: { - ip.extra.appendAssumeCapacity(undefined); // set by `finish` - break :i @intCast(ip.extra.items.len - 1); + extra.appendAssumeCapacity(undefined); // set by `finish` + break :i @intCast(extra.mutate.len - 1); } else null; if (ini.any_aligned_fields) { - ip.extra.appendNTimesAssumeCapacity(align_element, align_elements_len); + extra.appendNTimesAssumeCapacity(.{align_element}, align_elements_len); } if (ini.any_comptime_fields) { - ip.extra.appendNTimesAssumeCapacity(0, comptime_elements_len); + extra.appendNTimesAssumeCapacity(.{0}, comptime_elements_len); } if (ini.layout == .auto) { - ip.extra.appendNTimesAssumeCapacity(@intFromEnum(LoadedStructType.RuntimeOrder.unresolved), ini.fields_len); + extra.appendNTimesAssumeCapacity(.{@intFromEnum(LoadedStructType.RuntimeOrder.unresolved)}, ini.fields_len); } - ip.extra.appendNTimesAssumeCapacity(std.math.maxInt(u32), ini.fields_len); + extra.appendNTimesAssumeCapacity(.{std.math.maxInt(u32)}, ini.fields_len); return .{ .wip = .{ + .tid = tid, .index = gop.put(), .decl_extra_index = extra_index + std.meta.fieldIndex(Tag.TypeStruct, "decl").?, .namespace_extra_index = namespace_extra_index, @@ -6958,34 +7160,35 @@ pub fn getAnonStructType( assert(ini.types.len == ini.values.len); for (ini.types) |elem| assert(elem != .none); - const items = ip.getLocal(tid).getMutableItems(gpa); + const local = ip.getLocal(tid); + const items = local.getMutableItems(gpa); + const extra = local.getMutableExtra(gpa); - const prev_extra_len = ip.extra.items.len; + const prev_extra_len = extra.mutate.len; const fields_len: u32 = @intCast(ini.types.len); - try ip.extra.ensureUnusedCapacity( - gpa, + try items.ensureUnusedCapacity(1); + try extra.ensureUnusedCapacity( @typeInfo(TypeStructAnon).Struct.fields.len + (fields_len * 3), ); - try items.ensureUnusedCapacity(1); - const extra_index = ip.addExtraAssumeCapacity(TypeStructAnon{ + const extra_index = addExtraAssumeCapacity(extra, TypeStructAnon{ .fields_len = fields_len, }); - ip.extra.appendSliceAssumeCapacity(@ptrCast(ini.types)); - ip.extra.appendSliceAssumeCapacity(@ptrCast(ini.values)); - errdefer ip.extra.items.len = prev_extra_len; + extra.appendSliceAssumeCapacity(.{@ptrCast(ini.types)}); + extra.appendSliceAssumeCapacity(.{@ptrCast(ini.values)}); + errdefer extra.mutate.len = prev_extra_len; var gop = try ip.getOrPutKey(gpa, tid, .{ - .anon_struct_type = if (ini.names.len == 0) extraTypeTupleAnon(ip, extra_index) else k: { + .anon_struct_type = if (ini.names.len == 0) extraTypeTupleAnon(tid, extra.list.*, extra_index) else k: { assert(ini.names.len == ini.types.len); - ip.extra.appendSliceAssumeCapacity(@ptrCast(ini.names)); - break :k extraTypeStructAnon(ip, extra_index); + extra.appendSliceAssumeCapacity(.{@ptrCast(ini.names)}); + break :k extraTypeStructAnon(tid, extra.list.*, extra_index); }, }); defer gop.deinit(); if (gop == .existing) { - ip.extra.items.len = prev_extra_len; + extra.mutate.len = prev_extra_len; return gop.existing; } @@ -7021,21 +7224,23 @@ pub fn getFuncType( assert(key.return_type != .none); for (key.param_types) |param_type| assert(param_type != .none); + const local = ip.getLocal(tid); + const items = local.getMutableItems(gpa); + try items.ensureUnusedCapacity(1); + const extra = local.getMutableExtra(gpa); + // The strategy here is to add the function type unconditionally, then to // ask if it already exists, and if so, revert the lengths of the mutated // arrays. This is similar to what `getOrPutTrailingString` does. - const prev_extra_len = ip.extra.items.len; + const prev_extra_len = extra.mutate.len; const params_len: u32 = @intCast(key.param_types.len); - try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Tag.TypeFunction).Struct.fields.len + + try extra.ensureUnusedCapacity(@typeInfo(Tag.TypeFunction).Struct.fields.len + @intFromBool(key.comptime_bits != 0) + @intFromBool(key.noalias_bits != 0) + params_len); - const items = ip.getLocal(tid).getMutableItems(gpa); - try items.ensureUnusedCapacity(1); - - const func_type_extra_index = ip.addExtraAssumeCapacity(Tag.TypeFunction{ + const func_type_extra_index = addExtraAssumeCapacity(extra, Tag.TypeFunction{ .params_len = params_len, .return_type = key.return_type, .flags = .{ @@ -7051,17 +7256,17 @@ pub fn getFuncType( }, }); - if (key.comptime_bits != 0) ip.extra.appendAssumeCapacity(key.comptime_bits); - if (key.noalias_bits != 0) ip.extra.appendAssumeCapacity(key.noalias_bits); - ip.extra.appendSliceAssumeCapacity(@ptrCast(key.param_types)); - errdefer ip.extra.items.len = prev_extra_len; + if (key.comptime_bits != 0) extra.appendAssumeCapacity(.{key.comptime_bits}); + if (key.noalias_bits != 0) extra.appendAssumeCapacity(.{key.noalias_bits}); + extra.appendSliceAssumeCapacity(.{@ptrCast(key.param_types)}); + errdefer extra.mutate.len = prev_extra_len; var gop = try ip.getOrPutKey(gpa, tid, .{ - .func_type = extraFuncType(ip, func_type_extra_index), + .func_type = extraFuncType(tid, extra.list.*, func_type_extra_index), }); defer gop.deinit(); if (gop == .existing) { - ip.extra.items.len = prev_extra_len; + extra.mutate.len = prev_extra_len; return gop.existing; } @@ -7081,15 +7286,20 @@ pub fn getExternFunc( var gop = try ip.getOrPutKey(gpa, tid, .{ .extern_func = key }); defer gop.deinit(); if (gop == .existing) return gop.existing; - const prev_extra_len = ip.extra.items.len; - const extra_index = try ip.addExtra(gpa, @as(Tag.ExternFunc, key)); - errdefer ip.extra.items.len = prev_extra_len; - const items = ip.getLocal(tid).getMutableItems(gpa); - try items.append(.{ + + const local = ip.getLocal(tid); + const items = local.getMutableItems(gpa); + try items.ensureUnusedCapacity(1); + const extra = local.getMutableExtra(gpa); + + const prev_extra_len = extra.mutate.len; + const extra_index = try addExtra(extra, @as(Tag.ExternFunc, key)); + errdefer extra.mutate.len = prev_extra_len; + items.appendAssumeCapacity(.{ .tag = .extern_func, .data = extra_index, }); - errdefer items.lenPtr().* -= 1; + errdefer items.mutate.len -= 1; return gop.put(); } @@ -7111,17 +7321,19 @@ pub fn getFuncDecl( tid: Zcu.PerThread.Id, key: GetFuncDeclKey, ) Allocator.Error!Index { + const local = ip.getLocal(tid); + const items = local.getMutableItems(gpa); + try items.ensureUnusedCapacity(1); + const extra = local.getMutableExtra(gpa); + // The strategy here is to add the function type unconditionally, then to // ask if it already exists, and if so, revert the lengths of the mutated // arrays. This is similar to what `getOrPutTrailingString` does. - const prev_extra_len = ip.extra.items.len; - - try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Tag.FuncDecl).Struct.fields.len); + const prev_extra_len = extra.mutate.len; - const items = ip.getLocal(tid).getMutableItems(gpa); - try items.ensureUnusedCapacity(1); + try extra.ensureUnusedCapacity(@typeInfo(Tag.FuncDecl).Struct.fields.len); - const func_decl_extra_index = ip.addExtraAssumeCapacity(Tag.FuncDecl{ + const func_decl_extra_index = addExtraAssumeCapacity(extra, Tag.FuncDecl{ .analysis = .{ .state = if (key.cc == .Inline) .inline_only else .none, .is_cold = false, @@ -7138,14 +7350,14 @@ pub fn getFuncDecl( .lbrace_column = key.lbrace_column, .rbrace_column = key.rbrace_column, }); - errdefer ip.extra.items.len = prev_extra_len; + errdefer extra.mutate.len = prev_extra_len; var gop = try ip.getOrPutKey(gpa, tid, .{ - .func = extraFuncDecl(ip, func_decl_extra_index), + .func = extraFuncDecl(tid, extra.list.*, func_decl_extra_index), }); defer gop.deinit(); if (gop == .existing) { - ip.extra.items.len = prev_extra_len; + extra.mutate.len = prev_extra_len; return gop.existing; } @@ -7188,13 +7400,18 @@ pub fn getFuncDeclIes( assert(key.bare_return_type != .none); for (key.param_types) |param_type| assert(param_type != .none); + const local = ip.getLocal(tid); + const items = local.getMutableItems(gpa); + try items.ensureUnusedCapacity(4); + const extra = local.getMutableExtra(gpa); + // The strategy here is to add the function decl unconditionally, then to // ask if it already exists, and if so, revert the lengths of the mutated // arrays. This is similar to what `getOrPutTrailingString` does. - const prev_extra_len = ip.extra.items.len; + const prev_extra_len = extra.mutate.len; const params_len: u32 = @intCast(key.param_types.len); - try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Tag.FuncDecl).Struct.fields.len + + try extra.ensureUnusedCapacity(@typeInfo(Tag.FuncDecl).Struct.fields.len + 1 + // inferred_error_set @typeInfo(Tag.ErrorUnionType).Struct.fields.len + @typeInfo(Tag.TypeFunction).Struct.fields.len + @@ -7202,27 +7419,24 @@ pub fn getFuncDeclIes( @intFromBool(key.noalias_bits != 0) + params_len); - const items = ip.getLocal(tid).getMutableItems(gpa); - try items.ensureUnusedCapacity(4); - const func_index = Index.Unwrapped.wrap(.{ .tid = tid, - .index = items.lenPtr().* + 0, + .index = items.mutate.len + 0, }, ip); const error_union_type = Index.Unwrapped.wrap(.{ .tid = tid, - .index = items.lenPtr().* + 1, + .index = items.mutate.len + 1, }, ip); const error_set_type = Index.Unwrapped.wrap(.{ .tid = tid, - .index = items.lenPtr().* + 2, + .index = items.mutate.len + 2, }, ip); const func_ty = Index.Unwrapped.wrap(.{ .tid = tid, - .index = items.lenPtr().* + 3, + .index = items.mutate.len + 3, }, ip); - const func_decl_extra_index = ip.addExtraAssumeCapacity(Tag.FuncDecl{ + const func_decl_extra_index = addExtraAssumeCapacity(extra, Tag.FuncDecl{ .analysis = .{ .state = if (key.cc == .Inline) .inline_only else .none, .is_cold = false, @@ -7239,9 +7453,9 @@ pub fn getFuncDeclIes( .lbrace_column = key.lbrace_column, .rbrace_column = key.rbrace_column, }); - ip.extra.appendAssumeCapacity(@intFromEnum(Index.none)); + extra.appendAssumeCapacity(.{@intFromEnum(Index.none)}); - const func_type_extra_index = ip.addExtraAssumeCapacity(Tag.TypeFunction{ + const func_type_extra_index = addExtraAssumeCapacity(extra, Tag.TypeFunction{ .params_len = params_len, .return_type = error_union_type, .flags = .{ @@ -7256,9 +7470,9 @@ pub fn getFuncDeclIes( .addrspace_is_generic = key.addrspace_is_generic, }, }); - if (key.comptime_bits != 0) ip.extra.appendAssumeCapacity(key.comptime_bits); - if (key.noalias_bits != 0) ip.extra.appendAssumeCapacity(key.noalias_bits); - ip.extra.appendSliceAssumeCapacity(@ptrCast(key.param_types)); + if (key.comptime_bits != 0) extra.appendAssumeCapacity(.{key.comptime_bits}); + if (key.noalias_bits != 0) extra.appendAssumeCapacity(.{key.noalias_bits}); + extra.appendSliceAssumeCapacity(.{@ptrCast(key.param_types)}); items.appendSliceAssumeCapacity(.{ .tag = &.{ @@ -7269,7 +7483,7 @@ pub fn getFuncDeclIes( }, .data = &.{ func_decl_extra_index, - ip.addExtraAssumeCapacity(Tag.ErrorUnionType{ + addExtraAssumeCapacity(extra, Tag.ErrorUnionType{ .error_set_type = error_set_type, .payload_type = key.bare_return_type, }), @@ -7278,18 +7492,18 @@ pub fn getFuncDeclIes( }, }); errdefer { - items.lenPtr().* -= 4; - ip.extra.items.len = prev_extra_len; + items.mutate.len -= 4; + extra.mutate.len = prev_extra_len; } var func_gop = try ip.getOrPutKey(gpa, tid, .{ - .func = extraFuncDecl(ip, func_decl_extra_index), + .func = extraFuncDecl(tid, extra.list.*, func_decl_extra_index), }); defer func_gop.deinit(); if (func_gop == .existing) { // An existing function type was found; undo the additions to our two arrays. - items.lenPtr().* -= 4; - ip.extra.items.len = prev_extra_len; + items.mutate.len -= 4; + extra.mutate.len = prev_extra_len; return func_gop.existing; } var error_union_type_gop = try ip.getOrPutKey(gpa, tid, .{ .error_union_type = .{ @@ -7302,7 +7516,7 @@ pub fn getFuncDeclIes( }); defer error_set_type_gop.deinit(); var func_ty_gop = try ip.getOrPutKey(gpa, tid, .{ - .func_type = extraFuncType(ip, func_type_extra_index), + .func_type = extraFuncType(tid, extra.list.*, func_type_extra_index), }); defer func_ty_gop.deinit(); assert(func_gop.putAt(3) == func_index); @@ -7320,38 +7534,40 @@ pub fn getErrorSetType( ) Allocator.Error!Index { assert(std.sort.isSorted(NullTerminatedString, names, {}, NullTerminatedString.indexLessThan)); + const local = ip.getLocal(tid); + const items = local.getMutableItems(gpa); + const extra = local.getMutableExtra(gpa); + try extra.ensureUnusedCapacity(@typeInfo(Tag.ErrorSet).Struct.fields.len + names.len); + // The strategy here is to add the type unconditionally, then to ask if it // already exists, and if so, revert the lengths of the mutated arrays. // This is similar to what `getOrPutTrailingString` does. - try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Tag.ErrorSet).Struct.fields.len + names.len); - - const prev_extra_len = ip.extra.items.len; - errdefer ip.extra.items.len = prev_extra_len; + const prev_extra_len = extra.mutate.len; + errdefer extra.mutate.len = prev_extra_len; const predicted_names_map: MapIndex = @enumFromInt(ip.maps.items.len); - const error_set_extra_index = ip.addExtraAssumeCapacity(Tag.ErrorSet{ + const error_set_extra_index = addExtraAssumeCapacity(extra, Tag.ErrorSet{ .names_len = @intCast(names.len), .names_map = predicted_names_map, }); - ip.extra.appendSliceAssumeCapacity(@ptrCast(names)); - errdefer ip.extra.items.len = prev_extra_len; + extra.appendSliceAssumeCapacity(.{@ptrCast(names)}); + errdefer extra.mutate.len = prev_extra_len; var gop = try ip.getOrPutKey(gpa, tid, .{ - .error_set_type = extraErrorSet(ip, error_set_extra_index), + .error_set_type = extraErrorSet(tid, extra.list.*, error_set_extra_index), }); defer gop.deinit(); if (gop == .existing) { - ip.extra.items.len = prev_extra_len; + extra.mutate.len = prev_extra_len; return gop.existing; } - const items = ip.getLocal(tid).getMutableItems(gpa); try items.append(.{ .tag = .type_error_set, .data = error_set_extra_index, }); - errdefer items.lenPtr().* -= 1; + errdefer items.mutate.len -= 1; const names_map = try ip.addMap(gpa, names.len); assert(names_map == predicted_names_map); @@ -7396,16 +7612,20 @@ pub fn getFuncInstance( .is_noinline = arg.is_noinline, }); + const local = ip.getLocal(tid); + const items = local.getMutableItems(gpa); + const extra = local.getMutableExtra(gpa); + try extra.ensureUnusedCapacity(@typeInfo(Tag.FuncInstance).Struct.fields.len + + arg.comptime_args.len); + const generic_owner = unwrapCoercedFunc(ip, arg.generic_owner); assert(arg.comptime_args.len == ip.funcTypeParamsLen(ip.typeOf(generic_owner))); - try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Tag.FuncInstance).Struct.fields.len + - arg.comptime_args.len); - const prev_extra_len = ip.extra.items.len; - errdefer ip.extra.items.len = prev_extra_len; + const prev_extra_len = extra.mutate.len; + errdefer extra.mutate.len = prev_extra_len; - const func_extra_index = ip.addExtraAssumeCapacity(Tag.FuncInstance{ + const func_extra_index = addExtraAssumeCapacity(extra, Tag.FuncInstance{ .analysis = .{ .state = if (arg.cc == .Inline) .inline_only else .none, .is_cold = false, @@ -7421,28 +7641,28 @@ pub fn getFuncInstance( .branch_quota = 0, .generic_owner = generic_owner, }); - ip.extra.appendSliceAssumeCapacity(@ptrCast(arg.comptime_args)); + extra.appendSliceAssumeCapacity(.{@ptrCast(arg.comptime_args)}); var gop = try ip.getOrPutKey(gpa, tid, .{ - .func = extraFuncInstance(ip, func_extra_index), + .func = ip.extraFuncInstance(tid, extra.list.*, func_extra_index), }); defer gop.deinit(); if (gop == .existing) { - ip.extra.items.len = prev_extra_len; + extra.mutate.len = prev_extra_len; return gop.existing; } - const items = ip.getLocal(tid).getMutableItems(gpa); - const func_index = Index.Unwrapped.wrap(.{ .tid = tid, .index = items.lenPtr().* }, ip); + const func_index = Index.Unwrapped.wrap(.{ .tid = tid, .index = items.mutate.len }, ip); try items.append(.{ .tag = .func_instance, .data = func_extra_index, }); - errdefer items.lenPtr().* -= 1; + errdefer items.mutate.len -= 1; try finishFuncInstance( ip, gpa, tid, + extra, generic_owner, func_index, func_extra_index, @@ -7466,15 +7686,20 @@ pub fn getFuncInstanceIes( assert(arg.bare_return_type != .none); for (arg.param_types) |param_type| assert(param_type != .none); + const local = ip.getLocal(tid); + const items = local.getMutableItems(gpa); + const extra = local.getMutableExtra(gpa); + try items.ensureUnusedCapacity(4); + const generic_owner = unwrapCoercedFunc(ip, arg.generic_owner); // The strategy here is to add the function decl unconditionally, then to // ask if it already exists, and if so, revert the lengths of the mutated // arrays. This is similar to what `getOrPutTrailingString` does. - const prev_extra_len = ip.extra.items.len; + const prev_extra_len = extra.mutate.len; const params_len: u32 = @intCast(arg.param_types.len); - try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Tag.FuncInstance).Struct.fields.len + + try extra.ensureUnusedCapacity(@typeInfo(Tag.FuncInstance).Struct.fields.len + 1 + // inferred_error_set arg.comptime_args.len + @typeInfo(Tag.ErrorUnionType).Struct.fields.len + @@ -7482,27 +7707,24 @@ pub fn getFuncInstanceIes( @intFromBool(arg.noalias_bits != 0) + params_len); - const items = ip.getLocal(tid).getMutableItems(gpa); - try items.ensureUnusedCapacity(4); - const func_index = Index.Unwrapped.wrap(.{ .tid = tid, - .index = items.lenPtr().* + 0, + .index = items.mutate.len + 0, }, ip); const error_union_type = Index.Unwrapped.wrap(.{ .tid = tid, - .index = items.lenPtr().* + 1, + .index = items.mutate.len + 1, }, ip); const error_set_type = Index.Unwrapped.wrap(.{ .tid = tid, - .index = items.lenPtr().* + 2, + .index = items.mutate.len + 2, }, ip); const func_ty = Index.Unwrapped.wrap(.{ .tid = tid, - .index = items.lenPtr().* + 3, + .index = items.mutate.len + 3, }, ip); - const func_extra_index = ip.addExtraAssumeCapacity(Tag.FuncInstance{ + const func_extra_index = addExtraAssumeCapacity(extra, Tag.FuncInstance{ .analysis = .{ .state = if (arg.cc == .Inline) .inline_only else .none, .is_cold = false, @@ -7518,10 +7740,10 @@ pub fn getFuncInstanceIes( .branch_quota = 0, .generic_owner = generic_owner, }); - ip.extra.appendAssumeCapacity(@intFromEnum(Index.none)); // resolved error set - ip.extra.appendSliceAssumeCapacity(@ptrCast(arg.comptime_args)); + extra.appendAssumeCapacity(.{@intFromEnum(Index.none)}); // resolved error set + extra.appendSliceAssumeCapacity(.{@ptrCast(arg.comptime_args)}); - const func_type_extra_index = ip.addExtraAssumeCapacity(Tag.TypeFunction{ + const func_type_extra_index = addExtraAssumeCapacity(extra, Tag.TypeFunction{ .params_len = params_len, .return_type = error_union_type, .flags = .{ @@ -7537,8 +7759,8 @@ pub fn getFuncInstanceIes( }, }); // no comptime_bits because has_comptime_bits is false - if (arg.noalias_bits != 0) ip.extra.appendAssumeCapacity(arg.noalias_bits); - ip.extra.appendSliceAssumeCapacity(@ptrCast(arg.param_types)); + if (arg.noalias_bits != 0) extra.appendAssumeCapacity(.{arg.noalias_bits}); + extra.appendSliceAssumeCapacity(.{@ptrCast(arg.param_types)}); items.appendSliceAssumeCapacity(.{ .tag = &.{ @@ -7549,7 +7771,7 @@ pub fn getFuncInstanceIes( }, .data = &.{ func_extra_index, - ip.addExtraAssumeCapacity(Tag.ErrorUnionType{ + addExtraAssumeCapacity(extra, Tag.ErrorUnionType{ .error_set_type = error_set_type, .payload_type = arg.bare_return_type, }), @@ -7558,18 +7780,18 @@ pub fn getFuncInstanceIes( }, }); errdefer { - items.lenPtr().* -= 4; - ip.extra.items.len = prev_extra_len; + items.mutate.len -= 4; + extra.mutate.len = prev_extra_len; } var func_gop = try ip.getOrPutKey(gpa, tid, .{ - .func = extraFuncInstance(ip, func_extra_index), + .func = ip.extraFuncInstance(tid, extra.list.*, func_extra_index), }); defer func_gop.deinit(); if (func_gop == .existing) { // Hot path: undo the additions to our two arrays. - items.lenPtr().* -= 4; - ip.extra.items.len = prev_extra_len; + items.mutate.len -= 4; + extra.mutate.len = prev_extra_len; return func_gop.existing; } var error_union_type_gop = try ip.getOrPutKey(gpa, tid, .{ .error_union_type = .{ @@ -7582,13 +7804,14 @@ pub fn getFuncInstanceIes( }); defer error_set_type_gop.deinit(); var func_ty_gop = try ip.getOrPutKey(gpa, tid, .{ - .func_type = extraFuncType(ip, func_type_extra_index), + .func_type = extraFuncType(tid, extra.list.*, func_type_extra_index), }); defer func_ty_gop.deinit(); try finishFuncInstance( ip, gpa, tid, + extra, generic_owner, func_index, func_extra_index, @@ -7606,6 +7829,7 @@ fn finishFuncInstance( ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, + extra: Local.Extra.Mutable, generic_owner: Index, func_index: Index, func_extra_index: u32, @@ -7631,7 +7855,7 @@ fn finishFuncInstance( errdefer ip.destroyDecl(gpa, decl_index); // Populate the owner_decl field which was left undefined until now. - ip.extra.items[ + extra.view().items(.@"0")[ func_extra_index + std.meta.fieldIndex(Tag.FuncInstance, "owner_decl").? ] = @intFromEnum(decl_index); @@ -7660,6 +7884,7 @@ pub const EnumTypeInit = struct { }; pub const WipEnumType = struct { + tid: Zcu.PerThread.Id, index: Index, tag_ty_index: u32, decl_index: u32, @@ -7675,9 +7900,11 @@ pub const WipEnumType = struct { decl: DeclIndex, namespace: OptionalNamespaceIndex, ) void { - ip.extra.items[wip.decl_index] = @intFromEnum(decl); + const extra = ip.getLocalShared(wip.tid).extra.acquire(); + const extra_items = extra.view().items(.@"0"); + extra_items[wip.decl_index] = @intFromEnum(decl); if (wip.namespace_index) |i| { - ip.extra.items[i] = @intFromEnum(namespace.unwrap().?); + extra_items[i] = @intFromEnum(namespace.unwrap().?); } else { assert(namespace == .none); } @@ -7685,7 +7912,8 @@ pub const WipEnumType = struct { pub fn setTagTy(wip: WipEnumType, ip: *InternPool, tag_ty: Index) void { assert(ip.isIntegerType(tag_ty)); - ip.extra.items[wip.tag_ty_index] = @intFromEnum(tag_ty); + const extra = ip.getLocalShared(wip.tid).extra.acquire(); + extra.view().items(.@"0")[wip.tag_ty_index] = @intFromEnum(tag_ty); } pub const FieldConflict = struct { @@ -7697,23 +7925,26 @@ pub const WipEnumType = struct { /// If the enum is automatially numbered, `value` must be `.none`. /// Otherwise, the type of `value` must be the integer tag type of the enum. pub fn nextField(wip: WipEnumType, ip: *InternPool, name: NullTerminatedString, value: Index) ?FieldConflict { - if (ip.addFieldName(wip.names_map, wip.names_start, name)) |conflict| { + const unwrapped_index = wip.index.unwrap(ip); + const extra_list = ip.getLocalShared(unwrapped_index.tid).extra.acquire(); + const extra_items = extra_list.view().items(.@"0"); + if (ip.addFieldName(extra_list, wip.names_map, wip.names_start, name)) |conflict| { return .{ .kind = .name, .prev_field_idx = conflict }; } if (value == .none) { assert(wip.values_map == .none); return null; } - assert(ip.typeOf(value) == @as(Index, @enumFromInt(ip.extra.items[wip.tag_ty_index]))); + assert(ip.typeOf(value) == @as(Index, @enumFromInt(extra_items[wip.tag_ty_index]))); const map = &ip.maps.items[@intFromEnum(wip.values_map.unwrap().?)]; const field_index = map.count(); - const indexes = ip.extra.items[wip.values_start..][0..field_index]; + const indexes = extra_items[wip.values_start..][0..field_index]; const adapter: Index.Adapter = .{ .indexes = @ptrCast(indexes) }; const gop = map.getOrPutAssumeCapacityAdapted(value, adapter); if (gop.found_existing) { return .{ .kind = .value, .prev_field_idx = @intCast(gop.index) }; } - ip.extra.items[wip.values_start + field_index] = @intFromEnum(value); + extra_items[wip.values_start + field_index] = @intFromEnum(value); return null; } @@ -7746,8 +7977,10 @@ pub fn getEnumType( defer gop.deinit(); if (gop == .existing) return .{ .existing = gop.existing }; - const items = ip.getLocal(tid).getMutableItems(gpa); + const local = ip.getLocal(tid); + const items = local.getMutableItems(gpa); try items.ensureUnusedCapacity(1); + const extra = local.getMutableExtra(gpa); const names_map = try ip.addMap(gpa, ini.fields_len); errdefer _ = ip.maps.pop(); @@ -7755,7 +7988,7 @@ pub fn getEnumType( switch (ini.tag_mode) { .auto => { assert(!ini.has_values); - try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(EnumAuto).Struct.fields.len + + try extra.ensureUnusedCapacity(@typeInfo(EnumAuto).Struct.fields.len + // TODO: fmt bug // zig fmt: off switch (ini.key) { @@ -7765,7 +7998,7 @@ pub fn getEnumType( // zig fmt: on ini.fields_len); // field types - const extra_index = ip.addExtraAssumeCapacity(EnumAuto{ + const extra_index = addExtraAssumeCapacity(extra, EnumAuto{ .decl = undefined, // set by `prepare` .captures_len = switch (ini.key) { .declared => |d| @intCast(d.captures.len), @@ -7784,12 +8017,13 @@ pub fn getEnumType( .data = extra_index, }); switch (ini.key) { - .declared => |d| ip.extra.appendSliceAssumeCapacity(@ptrCast(d.captures)), - .reified => |r| _ = ip.addExtraAssumeCapacity(PackedU64.init(r.type_hash)), + .declared => |d| extra.appendSliceAssumeCapacity(.{@ptrCast(d.captures)}), + .reified => |r| _ = addExtraAssumeCapacity(extra, PackedU64.init(r.type_hash)), } - const names_start = ip.extra.items.len; - ip.extra.appendNTimesAssumeCapacity(undefined, ini.fields_len); + const names_start = extra.mutate.len; + _ = extra.addManyAsSliceAssumeCapacity(ini.fields_len); return .{ .wip = .{ + .tid = tid, .index = gop.put(), .tag_ty_index = extra_index + std.meta.fieldIndex(EnumAuto, "int_tag_type").?, .decl_index = extra_index + std.meta.fieldIndex(EnumAuto, "decl").?, @@ -7809,7 +8043,7 @@ pub fn getEnumType( _ = ip.maps.pop(); }; - try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(EnumExplicit).Struct.fields.len + + try extra.ensureUnusedCapacity(@typeInfo(EnumExplicit).Struct.fields.len + // TODO: fmt bug // zig fmt: off switch (ini.key) { @@ -7820,7 +8054,7 @@ pub fn getEnumType( ini.fields_len + // field types ini.fields_len * @intFromBool(ini.has_values)); // field values - const extra_index = ip.addExtraAssumeCapacity(EnumExplicit{ + const extra_index = addExtraAssumeCapacity(extra, EnumExplicit{ .decl = undefined, // set by `prepare` .captures_len = switch (ini.key) { .declared => |d| @intCast(d.captures.len), @@ -7844,16 +8078,17 @@ pub fn getEnumType( .data = extra_index, }); switch (ini.key) { - .declared => |d| ip.extra.appendSliceAssumeCapacity(@ptrCast(d.captures)), - .reified => |r| _ = ip.addExtraAssumeCapacity(PackedU64.init(r.type_hash)), + .declared => |d| extra.appendSliceAssumeCapacity(.{@ptrCast(d.captures)}), + .reified => |r| _ = addExtraAssumeCapacity(extra, PackedU64.init(r.type_hash)), } - const names_start = ip.extra.items.len; - ip.extra.appendNTimesAssumeCapacity(undefined, ini.fields_len); - const values_start = ip.extra.items.len; + const names_start = extra.mutate.len; + _ = extra.addManyAsSliceAssumeCapacity(ini.fields_len); + const values_start = extra.mutate.len; if (ini.has_values) { - ip.extra.appendNTimesAssumeCapacity(undefined, ini.fields_len); + _ = extra.addManyAsSliceAssumeCapacity(ini.fields_len); } return .{ .wip = .{ + .tid = tid, .index = gop.put(), .tag_ty_index = extra_index + std.meta.fieldIndex(EnumAuto, "int_tag_type").?, .decl_index = extra_index + std.meta.fieldIndex(EnumAuto, "decl").?, @@ -7889,8 +8124,10 @@ pub fn getGeneratedTagEnumType( assert(ip.isIntegerType(ini.tag_ty)); for (ini.values) |val| assert(ip.typeOf(val) == ini.tag_ty); - const items = ip.getLocal(tid).getMutableItems(gpa); + const local = ip.getLocal(tid); + const items = local.getMutableItems(gpa); try items.ensureUnusedCapacity(1); + const extra = local.getMutableExtra(gpa); const names_map = try ip.addMap(gpa, ini.names.len); errdefer _ = ip.maps.pop(); @@ -7898,15 +8135,15 @@ pub fn getGeneratedTagEnumType( const fields_len: u32 = @intCast(ini.names.len); - const prev_extra_len = ip.extra.items.len; + const prev_extra_len = extra.mutate.len; switch (ini.tag_mode) { .auto => { - try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(EnumAuto).Struct.fields.len + + try extra.ensureUnusedCapacity(@typeInfo(EnumAuto).Struct.fields.len + 1 + // owner_union fields_len); // field names items.appendAssumeCapacity(.{ .tag = .type_enum_auto, - .data = ip.addExtraAssumeCapacity(EnumAuto{ + .data = addExtraAssumeCapacity(extra, EnumAuto{ .decl = ini.decl, .captures_len = 0, .namespace = .none, @@ -7916,11 +8153,11 @@ pub fn getGeneratedTagEnumType( .zir_index = .none, }), }); - ip.extra.appendAssumeCapacity(@intFromEnum(ini.owner_union_ty)); - ip.extra.appendSliceAssumeCapacity(@ptrCast(ini.names)); + extra.appendAssumeCapacity(.{@intFromEnum(ini.owner_union_ty)}); + extra.appendSliceAssumeCapacity(.{@ptrCast(ini.names)}); }, .explicit, .nonexhaustive => { - try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(EnumExplicit).Struct.fields.len + + try extra.ensureUnusedCapacity(@typeInfo(EnumExplicit).Struct.fields.len + 1 + // owner_union fields_len + // field names ini.values.len); // field values @@ -7939,7 +8176,7 @@ pub fn getGeneratedTagEnumType( .nonexhaustive => .type_enum_nonexhaustive, .auto => unreachable, }, - .data = ip.addExtraAssumeCapacity(EnumExplicit{ + .data = addExtraAssumeCapacity(extra, EnumExplicit{ .decl = ini.decl, .captures_len = 0, .namespace = .none, @@ -7950,12 +8187,12 @@ pub fn getGeneratedTagEnumType( .zir_index = .none, }), }); - ip.extra.appendAssumeCapacity(@intFromEnum(ini.owner_union_ty)); - ip.extra.appendSliceAssumeCapacity(@ptrCast(ini.names)); - ip.extra.appendSliceAssumeCapacity(@ptrCast(ini.values)); + extra.appendAssumeCapacity(.{@intFromEnum(ini.owner_union_ty)}); + extra.appendSliceAssumeCapacity(.{@ptrCast(ini.names)}); + extra.appendSliceAssumeCapacity(.{@ptrCast(ini.values)}); }, } - errdefer ip.extra.items.len = prev_extra_len; + errdefer extra.mutate.len = prev_extra_len; errdefer switch (ini.tag_mode) { .auto => {}, .explicit, .nonexhaustive => _ = if (ini.values.len != 0) ip.maps.pop(), @@ -8001,14 +8238,16 @@ pub fn getOpaqueType( defer gop.deinit(); if (gop == .existing) return .{ .existing = gop.existing }; - const items = ip.getLocal(tid).getMutableItems(gpa); + const local = ip.getLocal(tid); + const items = local.getMutableItems(gpa); + const extra = local.getMutableExtra(gpa); try items.ensureUnusedCapacity(1); - try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Tag.TypeOpaque).Struct.fields.len + switch (ini.key) { + try extra.ensureUnusedCapacity(@typeInfo(Tag.TypeOpaque).Struct.fields.len + switch (ini.key) { .declared => |d| d.captures.len, .reified => 0, }); - const extra_index = ip.addExtraAssumeCapacity(Tag.TypeOpaque{ + const extra_index = addExtraAssumeCapacity(extra, Tag.TypeOpaque{ .decl = undefined, // set by `finish` .namespace = .none, .zir_index = switch (ini.key) { @@ -8024,10 +8263,11 @@ pub fn getOpaqueType( .data = extra_index, }); switch (ini.key) { - .declared => |d| ip.extra.appendSliceAssumeCapacity(@ptrCast(d.captures)), + .declared => |d| extra.appendSliceAssumeCapacity(.{@ptrCast(d.captures)}), .reified => {}, } return .{ .wip = .{ + .tid = tid, .index = gop.put(), .decl_extra_index = extra_index + std.meta.fieldIndex(Tag.TypeOpaque, "decl").?, .namespace_extra_index = if (ini.has_namespace) @@ -8092,19 +8332,19 @@ fn addMap(ip: *InternPool, gpa: Allocator, cap: usize) Allocator.Error!MapIndex /// Leak the index until the next garbage collection. /// Invalidates all references to this index. pub fn remove(ip: *InternPool, tid: Zcu.PerThread.Id, index: Index) void { - const unwrapped = index.unwrap(ip); + const unwrapped_index = index.unwrap(ip); if (@intFromEnum(index) < static_keys.len) { // The item being removed replaced a special index via `InternPool.resolveBuiltinType`. // Restore the original item at this index. assert(static_keys[@intFromEnum(index)] == .simple_type); - const items = ip.getLocalShared(unwrapped.tid).items.view(); - @atomicStore(Tag, &items.items(.tag)[unwrapped.index], .simple_type, .monotonic); + const items = ip.getLocalShared(unwrapped_index.tid).items.acquire().view(); + @atomicStore(Tag, &items.items(.tag)[unwrapped_index.index], .simple_type, .monotonic); return; } - if (unwrapped.tid == tid) { - const items_len = &ip.getLocal(unwrapped.tid).mutate.items.len; - if (unwrapped.index == items_len.* - 1) { + if (unwrapped_index.tid == tid) { + const items_len = &ip.getLocal(unwrapped_index.tid).mutate.items.len; + if (unwrapped_index.index == items_len.* - 1) { // Happy case - we can just drop the item without affecting any other indices. items_len.* -= 1; return; @@ -8114,8 +8354,8 @@ pub fn remove(ip: *InternPool, tid: Zcu.PerThread.Id, index: Index) void { // We must preserve the item so that indices following it remain valid. // Thus, we will rewrite the tag to `removed`, leaking the item until // next GC but causing `KeyAdapter` to ignore it. - const items = ip.getLocalShared(unwrapped.tid).items.view(); - @atomicStore(Tag, &items.items(.tag)[unwrapped.index], .removed, .monotonic); + const items = ip.getLocalShared(unwrapped_index.tid).items.acquire().view(); + @atomicStore(Tag, &items.items(.tag)[unwrapped_index.index], .removed, .monotonic); } fn addInt( @@ -8126,28 +8366,32 @@ fn addInt( tag: Tag, limbs: []const Limb, ) !void { + const local = ip.getLocal(tid); + const items_list = local.getMutableItems(gpa); + const limbs_list = local.getMutableLimbs(gpa); const limbs_len: u32 = @intCast(limbs.len); - try ip.reserveLimbs(gpa, @typeInfo(Int).Struct.fields.len + limbs_len); - ip.getLocal(tid).getMutableItems(gpa).appendAssumeCapacity(.{ + try limbs_list.ensureUnusedCapacity(Int.limbs_items_len + limbs_len); + items_list.appendAssumeCapacity(.{ .tag = tag, - .data = ip.addLimbsExtraAssumeCapacity(Int{ - .ty = ty, - .limbs_len = limbs_len, - }), + .data = limbs_list.mutate.len, }); - ip.addLimbsAssumeCapacity(limbs); + limbs_list.addManyAsArrayAssumeCapacity(Int.limbs_items_len)[0].* = @bitCast(Int{ + .ty = ty, + .limbs_len = limbs_len, + }); + limbs_list.appendSliceAssumeCapacity(.{limbs}); } -fn addExtra(ip: *InternPool, gpa: Allocator, extra: anytype) Allocator.Error!u32 { - const fields = @typeInfo(@TypeOf(extra)).Struct.fields; - try ip.extra.ensureUnusedCapacity(gpa, fields.len); - return ip.addExtraAssumeCapacity(extra); +fn addExtra(extra: Local.Extra.Mutable, item: anytype) Allocator.Error!u32 { + const fields = @typeInfo(@TypeOf(item)).Struct.fields; + try extra.ensureUnusedCapacity(fields.len); + return addExtraAssumeCapacity(extra, item); } -fn addExtraAssumeCapacity(ip: *InternPool, extra: anytype) u32 { - const result: u32 = @intCast(ip.extra.items.len); - inline for (@typeInfo(@TypeOf(extra)).Struct.fields) |field| { - ip.extra.appendAssumeCapacity(switch (field.type) { +fn addExtraAssumeCapacity(extra: Local.Extra.Mutable, item: anytype) u32 { + const result: u32 = extra.mutate.len; + inline for (@typeInfo(@TypeOf(item)).Struct.fields) |field| { + extra.appendAssumeCapacity(.{switch (field.type) { Index, DeclIndex, NamespaceIndex, @@ -8162,7 +8406,7 @@ fn addExtraAssumeCapacity(ip: *InternPool, extra: anytype) u32 { TrackedInst.Index, TrackedInst.Index.Optional, ComptimeAllocIndex, - => @intFromEnum(@field(extra, field.name)), + => @intFromEnum(@field(item, field.name)), u32, i32, @@ -8174,22 +8418,14 @@ fn addExtraAssumeCapacity(ip: *InternPool, extra: anytype) u32 { Tag.TypeStruct.Flags, Tag.TypeStructPacked.Flags, Tag.Variable.Flags, - => @bitCast(@field(extra, field.name)), + => @bitCast(@field(item, field.name)), else => @compileError("bad field type: " ++ @typeName(field.type)), - }); + }}); } return result; } -fn reserveLimbs(ip: *InternPool, gpa: Allocator, n: usize) !void { - switch (@sizeOf(Limb)) { - @sizeOf(u32) => try ip.extra.ensureUnusedCapacity(gpa, n), - @sizeOf(u64) => try ip.limbs.ensureUnusedCapacity(gpa, n), - else => @compileError("unsupported host"), - } -} - fn addLimbsExtraAssumeCapacity(ip: *InternPool, extra: anytype) u32 { switch (@sizeOf(Limb)) { @sizeOf(u32) => return addExtraAssumeCapacity(ip, extra), @@ -8212,19 +8448,12 @@ fn addLimbsExtraAssumeCapacity(ip: *InternPool, extra: anytype) u32 { return result; } -fn addLimbsAssumeCapacity(ip: *InternPool, limbs: []const Limb) void { - switch (@sizeOf(Limb)) { - @sizeOf(u32) => ip.extra.appendSliceAssumeCapacity(limbs), - @sizeOf(u64) => ip.limbs.appendSliceAssumeCapacity(limbs), - else => @compileError("unsupported host"), - } -} - -fn extraDataTrail(ip: *const InternPool, comptime T: type, index: usize) struct { data: T, end: u32 } { +fn extraDataTrail(extra: Local.Extra, comptime T: type, index: u32) struct { data: T, end: u32 } { + const extra_items = extra.view().items(.@"0"); var result: T = undefined; const fields = @typeInfo(T).Struct.fields; - inline for (fields, 0..) |field, i| { - const int32 = ip.extra.items[i + index]; + inline for (fields, index..) |field, extra_index| { + const extra_item = extra_items[extra_index]; @field(result, field.name) = switch (field.type) { Index, DeclIndex, @@ -8240,7 +8469,7 @@ fn extraDataTrail(ip: *const InternPool, comptime T: type, index: usize) struct TrackedInst.Index, TrackedInst.Index.Optional, ComptimeAllocIndex, - => @enumFromInt(int32), + => @enumFromInt(extra_item), u32, i32, @@ -8252,7 +8481,7 @@ fn extraDataTrail(ip: *const InternPool, comptime T: type, index: usize) struct Tag.TypeStructPacked.Flags, Tag.Variable.Flags, FuncAnalysis, - => @bitCast(int32), + => @bitCast(extra_item), else => @compileError("bad field type: " ++ @typeName(field.type)), }; @@ -8263,75 +8492,8 @@ fn extraDataTrail(ip: *const InternPool, comptime T: type, index: usize) struct }; } -fn extraData(ip: *const InternPool, comptime T: type, index: usize) T { - return extraDataTrail(ip, T, index).data; -} - -/// Asserts the struct has 32-bit fields and the number of fields is evenly divisible by 2. -fn limbData(ip: *const InternPool, comptime T: type, index: usize) T { - switch (@sizeOf(Limb)) { - @sizeOf(u32) => return extraData(ip, T, index), - @sizeOf(u64) => {}, - else => @compileError("unsupported host"), - } - var result: T = undefined; - inline for (@typeInfo(T).Struct.fields, 0..) |field, i| { - const host_int = ip.limbs.items[index + i / 2]; - const int32 = if (i % 2 == 0) - @as(u32, @truncate(host_int)) - else - @as(u32, @truncate(host_int >> 32)); - - @field(result, field.name) = switch (field.type) { - u32 => int32, - Index => @enumFromInt(int32), - else => @compileError("bad field type: " ++ @typeName(field.type)), - }; - } - return result; -} - -/// This function returns the Limb slice that is trailing data after a payload. -fn limbSlice(ip: *const InternPool, comptime S: type, limb_index: u32, len: u32) []const Limb { - const field_count = @typeInfo(S).Struct.fields.len; - switch (@sizeOf(Limb)) { - @sizeOf(u32) => { - const start = limb_index + field_count; - return ip.extra.items[start..][0..len]; - }, - @sizeOf(u64) => { - const start = limb_index + @divExact(field_count, 2); - return ip.limbs.items[start..][0..len]; - }, - else => @compileError("unsupported host"), - } -} - -const LimbsAsIndexes = struct { - start: u32, - len: u32, -}; - -fn limbsSliceToIndex(ip: *const InternPool, limbs: []const Limb) LimbsAsIndexes { - const host_slice = switch (@sizeOf(Limb)) { - @sizeOf(u32) => ip.extra.items, - @sizeOf(u64) => ip.limbs.items, - else => @compileError("unsupported host"), - }; - // TODO: https://github.com/ziglang/zig/issues/1738 - return .{ - .start = @intCast(@divExact(@intFromPtr(limbs.ptr) - @intFromPtr(host_slice.ptr), @sizeOf(Limb))), - .len = @intCast(limbs.len), - }; -} - -/// This function converts Limb array indexes to a primitive slice type. -fn limbsIndexToSlice(ip: *const InternPool, limbs: LimbsAsIndexes) []const Limb { - return switch (@sizeOf(Limb)) { - @sizeOf(u32) => ip.extra.items[limbs.start..][0..limbs.len], - @sizeOf(u64) => ip.limbs.items[limbs.start..][0..limbs.len], - else => @compileError("unsupported host"), - }; +fn extraData(extra: Local.Extra, comptime T: type, index: u32) T { + return extraDataTrail(extra, T, index).data; } test "basic usage" { @@ -8381,7 +8543,7 @@ pub fn slicePtrType(ip: *const InternPool, index: Index) Index { .slice_const_u8_sentinel_0_type => return .manyptr_const_u8_sentinel_0_type, else => {}, } - const item = index.getItem(ip); + const item = index.unwrap(ip).getItem(ip); switch (item.tag) { .type_slice => return @enumFromInt(item.data), else => unreachable, // not a slice type @@ -8390,18 +8552,20 @@ pub fn slicePtrType(ip: *const InternPool, index: Index) Index { /// Given a slice value, returns the value of the ptr field. pub fn slicePtr(ip: *const InternPool, index: Index) Index { - const item = index.getItem(ip); + const unwrapped_index = index.unwrap(ip); + const item = unwrapped_index.getItem(ip); switch (item.tag) { - .ptr_slice => return ip.extraData(PtrSlice, item.data).ptr, + .ptr_slice => return extraData(unwrapped_index.getExtra(ip), PtrSlice, item.data).ptr, else => unreachable, // not a slice value } } /// Given a slice value, returns the value of the len field. pub fn sliceLen(ip: *const InternPool, index: Index) Index { - const item = index.getItem(ip); + const unwrapped_index = index.unwrap(ip); + const item = unwrapped_index.getItem(ip); switch (item.tag) { - .ptr_slice => return ip.extraData(PtrSlice, item.data).len, + .ptr_slice => return extraData(unwrapped_index.getExtra(ip), PtrSlice, item.data).len, else => unreachable, // not a slice value } } @@ -8461,20 +8625,24 @@ pub fn getCoerced( } }), }; }, - else => switch (val.getTag(ip)) { - .func_decl => return getCoercedFuncDecl(ip, gpa, tid, val, new_ty), - .func_instance => return getCoercedFuncInstance(ip, gpa, tid, val, new_ty), - .func_coerced => { - const func: Index = @enumFromInt( - ip.extra.items[val.getData(ip) + std.meta.fieldIndex(Tag.FuncCoerced, "func").?], - ); - switch (func.getTag(ip)) { - .func_decl => return getCoercedFuncDecl(ip, gpa, tid, val, new_ty), - .func_instance => return getCoercedFuncInstance(ip, gpa, tid, val, new_ty), - else => unreachable, - } - }, - else => {}, + else => { + const unwrapped_val = val.unwrap(ip); + const val_item = unwrapped_val.getItem(ip); + switch (val_item.tag) { + .func_decl => return getCoercedFuncDecl(ip, gpa, tid, val, new_ty), + .func_instance => return getCoercedFuncInstance(ip, gpa, tid, val, new_ty), + .func_coerced => { + const func: Index = @enumFromInt(unwrapped_val.getExtra(ip).view().items(.@"0")[ + val_item.data + std.meta.fieldIndex(Tag.FuncCoerced, "func").? + ]); + switch (func.unwrap(ip).getTag(ip)) { + .func_decl => return getCoercedFuncDecl(ip, gpa, tid, val, new_ty), + .func_instance => return getCoercedFuncInstance(ip, gpa, tid, val, new_ty), + else => unreachable, + } + }, + else => {}, + } }, } @@ -8712,9 +8880,10 @@ fn getCoercedFuncDecl( val: Index, new_ty: Index, ) Allocator.Error!Index { - const prev_ty: Index = @enumFromInt( - ip.extra.items[val.getData(ip) + std.meta.fieldIndex(Tag.FuncDecl, "ty").?], - ); + const unwrapped_val = val.unwrap(ip); + const prev_ty: Index = @enumFromInt(unwrapped_val.getExtra(ip).view().items(.@"0")[ + unwrapped_val.getData(ip) + std.meta.fieldIndex(Tag.FuncDecl, "ty").? + ]); if (new_ty == prev_ty) return val; return getCoercedFunc(ip, gpa, tid, val, new_ty); } @@ -8726,9 +8895,10 @@ fn getCoercedFuncInstance( val: Index, new_ty: Index, ) Allocator.Error!Index { - const prev_ty: Index = @enumFromInt( - ip.extra.items[val.getData(ip) + std.meta.fieldIndex(Tag.FuncInstance, "ty").?], - ); + const unwrapped_val = val.unwrap(ip); + const prev_ty: Index = @enumFromInt(unwrapped_val.getExtra(ip).view().items(.@"0")[ + unwrapped_val.getData(ip) + std.meta.fieldIndex(Tag.FuncInstance, "ty").? + ]); if (new_ty == prev_ty) return val; return getCoercedFunc(ip, gpa, tid, val, new_ty); } @@ -8740,24 +8910,26 @@ fn getCoercedFunc( func: Index, ty: Index, ) Allocator.Error!Index { - const prev_extra_len = ip.extra.items.len; - try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Tag.FuncCoerced).Struct.fields.len); - - const items = ip.getLocal(tid).getMutableItems(gpa); + const local = ip.getLocal(tid); + const items = local.getMutableItems(gpa); try items.ensureUnusedCapacity(1); + const extra = local.getMutableExtra(gpa); - const extra_index = ip.addExtraAssumeCapacity(Tag.FuncCoerced{ + const prev_extra_len = extra.mutate.len; + try extra.ensureUnusedCapacity(@typeInfo(Tag.FuncCoerced).Struct.fields.len); + + const extra_index = addExtraAssumeCapacity(extra, Tag.FuncCoerced{ .ty = ty, .func = func, }); - errdefer ip.extra.items.len = prev_extra_len; + errdefer extra.mutate.len = prev_extra_len; var gop = try ip.getOrPutKey(gpa, tid, .{ - .func = extraFuncCoerced(ip, extra_index), + .func = ip.extraFuncCoerced(extra.list.*, extra_index), }); defer gop.deinit(); if (gop == .existing) { - ip.extra.items.len = prev_extra_len; + extra.mutate.len = prev_extra_len; return gop.existing; } @@ -8771,34 +8943,17 @@ fn getCoercedFunc( /// Asserts `val` has an integer type. /// Assumes `new_ty` is an integer type. pub fn getCoercedInts(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, int: Key.Int, new_ty: Index) Allocator.Error!Index { - // The key cannot be passed directly to `get`, otherwise in the case of - // big_int storage, the limbs would be invalidated before they are read. - // Here we pre-reserve the limbs to ensure that the logic in `addInt` will - // not use an invalidated limbs pointer. - const new_storage: Key.Int.Storage = switch (int.storage) { - .u64, .i64, .lazy_align, .lazy_size => int.storage, - .big_int => |big_int| storage: { - const positive = big_int.positive; - const limbs = ip.limbsSliceToIndex(big_int.limbs); - // This line invalidates the limbs slice, but the indexes computed in the - // previous line are still correct. - try reserveLimbs(ip, gpa, @typeInfo(Int).Struct.fields.len + big_int.limbs.len); - break :storage .{ .big_int = .{ - .limbs = ip.limbsIndexToSlice(limbs), - .positive = positive, - } }; - }, - }; return ip.get(gpa, tid, .{ .int = .{ .ty = new_ty, - .storage = new_storage, + .storage = int.storage, } }); } pub fn indexToFuncType(ip: *const InternPool, val: Index) ?Key.FuncType { - const item = val.getItem(ip); + const unwrapped_val = val.unwrap(ip); + const item = unwrapped_val.getItem(ip); switch (item.tag) { - .type_function => return extraFuncType(ip, item.data), + .type_function => return extraFuncType(unwrapped_val.tid, unwrapped_val.getExtra(ip), item.data), else => return null, } } @@ -8819,7 +8974,7 @@ pub fn isIntegerType(ip: *const InternPool, ty: Index) bool { .c_ulonglong_type, .comptime_int_type, => true, - else => switch (ty.getTag(ip)) { + else => switch (ty.unwrap(ip).getTag(ip)) { .type_int_signed, .type_int_unsigned, => true, @@ -8895,9 +9050,11 @@ pub fn errorUnionPayload(ip: *const InternPool, ty: Index) Index { /// The is only legal because the initializer is not part of the hash. pub fn mutateVarInit(ip: *InternPool, index: Index, init_index: Index) void { - const item = index.getItem(ip); + const unwrapped_index = index.unwrap(ip); + const extra_list = unwrapped_index.getExtra(ip); + const item = unwrapped_index.getItem(ip); assert(item.tag == .variable); - ip.extra.items[item.data + std.meta.fieldIndex(Tag.Variable, "init").?] = @intFromEnum(init_index); + @atomicStore(u32, &extra_list.view().items(.@"0")[item.data + std.meta.fieldIndex(Tag.Variable, "init").?], @intFromEnum(init_index), .release); } pub fn dump(ip: *const InternPool) void { @@ -8907,12 +9064,16 @@ pub fn dump(ip: *const InternPool) void { fn dumpStatsFallible(ip: *const InternPool, arena: Allocator) anyerror!void { var items_len: usize = 0; + var extra_len: usize = 0; + var limbs_len: usize = 0; for (ip.locals) |*local| { items_len += local.mutate.items.len; + extra_len += local.mutate.extra.len; + limbs_len += local.mutate.limbs.len; } const items_size = (1 + 4) * items_len; - const extra_size = 4 * ip.extra.items.len; - const limbs_size = 8 * ip.limbs.items.len; + const extra_size = 4 * extra_len; + const limbs_size = 8 * limbs_len; const decls_size = ip.allocated_decls.len * @sizeOf(Module.Decl); // TODO: map overhead size is not taken into account @@ -8929,9 +9090,9 @@ fn dumpStatsFallible(ip: *const InternPool, arena: Allocator) anyerror!void { total_size, items_len, items_size, - ip.extra.items.len, + extra_len, extra_size, - ip.limbs.items.len, + limbs_len, limbs_size, ip.allocated_decls.len, decls_size, @@ -8943,7 +9104,9 @@ fn dumpStatsFallible(ip: *const InternPool, arena: Allocator) anyerror!void { }; var counts = std.AutoArrayHashMap(Tag, TagStats).init(arena); for (ip.locals) |*local| { - const items = local.shared.items.view(); + const items = local.shared.items.view().slice(); + const extra_list = local.shared.extra; + const extra_items = extra_list.view().items(.@"0"); for ( items.items(.tag)[0..local.mutate.items.len], items.items(.data)[0..local.mutate.items.len], @@ -8968,12 +9131,12 @@ fn dumpStatsFallible(ip: *const InternPool, arena: Allocator) anyerror!void { .type_error_union => @sizeOf(Key.ErrorUnionType), .type_anyerror_union => 0, .type_error_set => b: { - const info = ip.extraData(Tag.ErrorSet, data); + const info = extraData(extra_list, Tag.ErrorSet, data); break :b @sizeOf(Tag.ErrorSet) + (@sizeOf(u32) * info.names_len); }, .type_inferred_error_set => 0, .type_enum_explicit, .type_enum_nonexhaustive => b: { - const info = ip.extraData(EnumExplicit, data); + const info = extraData(extra_list, EnumExplicit, data); var ints = @typeInfo(EnumExplicit).Struct.fields.len; if (info.zir_index == .none) ints += 1; ints += if (info.captures_len != std.math.maxInt(u32)) @@ -8985,22 +9148,22 @@ fn dumpStatsFallible(ip: *const InternPool, arena: Allocator) anyerror!void { break :b @sizeOf(u32) * ints; }, .type_enum_auto => b: { - const info = ip.extraData(EnumAuto, data); + const info = extraData(extra_list, EnumAuto, data); const ints = @typeInfo(EnumAuto).Struct.fields.len + info.captures_len + info.fields_len; break :b @sizeOf(u32) * ints; }, .type_opaque => b: { - const info = ip.extraData(Tag.TypeOpaque, data); + const info = extraData(extra_list, Tag.TypeOpaque, data); const ints = @typeInfo(Tag.TypeOpaque).Struct.fields.len + info.captures_len; break :b @sizeOf(u32) * ints; }, .type_struct => b: { if (data == 0) break :b 0; - const extra = ip.extraDataTrail(Tag.TypeStruct, data); + const extra = extraDataTrail(extra_list, Tag.TypeStruct, data); const info = extra.data; var ints: usize = @typeInfo(Tag.TypeStruct).Struct.fields.len; if (info.flags.any_captures) { - const captures_len = ip.extra.items[extra.end]; + const captures_len = extra_items[extra.end]; ints += 1 + captures_len; } ints += info.fields_len; // types @@ -9021,13 +9184,13 @@ fn dumpStatsFallible(ip: *const InternPool, arena: Allocator) anyerror!void { break :b @sizeOf(u32) * ints; }, .type_struct_anon => b: { - const info = ip.extraData(TypeStructAnon, data); + const info = extraData(extra_list, TypeStructAnon, data); break :b @sizeOf(TypeStructAnon) + (@sizeOf(u32) * 3 * info.fields_len); }, .type_struct_packed => b: { - const extra = ip.extraDataTrail(Tag.TypeStructPacked, data); + const extra = extraDataTrail(extra_list, Tag.TypeStructPacked, data); const captures_len = if (extra.data.flags.any_captures) - ip.extra.items[extra.end] + extra_items[extra.end] else 0; break :b @sizeOf(u32) * (@typeInfo(Tag.TypeStructPacked).Struct.fields.len + @@ -9035,9 +9198,9 @@ fn dumpStatsFallible(ip: *const InternPool, arena: Allocator) anyerror!void { extra.data.fields_len * 2); }, .type_struct_packed_inits => b: { - const extra = ip.extraDataTrail(Tag.TypeStructPacked, data); + const extra = extraDataTrail(extra_list, Tag.TypeStructPacked, data); const captures_len = if (extra.data.flags.any_captures) - ip.extra.items[extra.end] + extra_items[extra.end] else 0; break :b @sizeOf(u32) * (@typeInfo(Tag.TypeStructPacked).Struct.fields.len + @@ -9045,14 +9208,14 @@ fn dumpStatsFallible(ip: *const InternPool, arena: Allocator) anyerror!void { extra.data.fields_len * 3); }, .type_tuple_anon => b: { - const info = ip.extraData(TypeStructAnon, data); + const info = extraData(extra_list, TypeStructAnon, data); break :b @sizeOf(TypeStructAnon) + (@sizeOf(u32) * 2 * info.fields_len); }, .type_union => b: { - const extra = ip.extraDataTrail(Tag.TypeUnion, data); + const extra = extraDataTrail(extra_list, Tag.TypeUnion, data); const captures_len = if (extra.data.flags.any_captures) - ip.extra.items[extra.end] + extra_items[extra.end] else 0; const per_field = @sizeOf(u32); // field type @@ -9067,7 +9230,7 @@ fn dumpStatsFallible(ip: *const InternPool, arena: Allocator) anyerror!void { }, .type_function => b: { - const info = ip.extraData(Tag.TypeFunction, data); + const info = extraData(extra_list, Tag.TypeFunction, data); break :b @sizeOf(Tag.TypeFunction) + (@sizeOf(Index) * info.params_len) + (@as(u32, 4) * @intFromBool(info.flags.has_comptime_bits)) + @@ -9102,8 +9265,9 @@ fn dumpStatsFallible(ip: *const InternPool, arena: Allocator) anyerror!void { .int_positive, .int_negative, => b: { - const int = ip.limbData(Int, data); - break :b @sizeOf(Int) + int.limbs_len * 8; + const limbs_list = local.shared.getLimbs(); + const int: Int = @bitCast(limbs_list.view().items(.@"0")[data..][0..Int.limbs_items_len].*); + break :b @sizeOf(Int) + int.limbs_len * @sizeOf(Limb); }, .int_lazy_align, .int_lazy_size => @sizeOf(IntLazy), @@ -9114,12 +9278,12 @@ fn dumpStatsFallible(ip: *const InternPool, arena: Allocator) anyerror!void { .enum_tag => @sizeOf(Tag.EnumTag), .bytes => b: { - const info = ip.extraData(Bytes, data); + const info = extraData(extra_list, Bytes, data); const len: usize = @intCast(ip.aggregateTypeLenIncludingSentinel(info.ty)); break :b @sizeOf(Bytes) + len + @intFromBool(info.bytes.at(len - 1, ip) != 0); }, .aggregate => b: { - const info = ip.extraData(Tag.Aggregate, data); + const info = extraData(extra_list, Tag.Aggregate, data); const fields_len: u32 = @intCast(ip.aggregateTypeLenIncludingSentinel(info.ty)); break :b @sizeOf(Tag.Aggregate) + (@sizeOf(Index) * fields_len); }, @@ -9137,7 +9301,7 @@ fn dumpStatsFallible(ip: *const InternPool, arena: Allocator) anyerror!void { .extern_func => @sizeOf(Tag.ExternFunc), .func_decl => @sizeOf(Tag.FuncDecl), .func_instance => b: { - const info = ip.extraData(Tag.FuncInstance, data); + const info = extraData(extra_list, Tag.FuncInstance, data); const ty = ip.typeOf(info.generic_owner); const params_len = ip.indexToKey(ty).func_type.param_types.len; break :b @sizeOf(Tag.FuncInstance) + @sizeOf(Index) * params_len; @@ -9147,7 +9311,7 @@ fn dumpStatsFallible(ip: *const InternPool, arena: Allocator) anyerror!void { .union_value => @sizeOf(Key.Union), .memoized_call => b: { - const info = ip.extraData(MemoizedCall, data); + const info = extraData(extra_list, MemoizedCall, data); break :b @sizeOf(MemoizedCall) + (@sizeOf(Index) * info.args_len); }, }); @@ -9287,14 +9451,15 @@ pub fn dumpGenericInstancesFallible(ip: *const InternPool, allocator: Allocator) var instances: std.AutoArrayHashMapUnmanaged(Index, std.ArrayListUnmanaged(Index)) = .{}; for (ip.locals, 0..) |*local, tid| { - const items = local.shared.items.view(); + const items = local.shared.items.view().slice(); + const extra_list = local.shared.extra; for ( items.items(.tag)[0..local.mutate.items.len], items.items(.data)[0..local.mutate.items.len], 0.., ) |tag, data, index| { if (tag != .func_instance) continue; - const info = ip.extraData(Tag.FuncInstance, data); + const info = extraData(extra_list, Tag.FuncInstance, data); const gop = try instances.getOrPut(arena, info.generic_owner); if (!gop.found_existing) gop.value_ptr.* = .{}; @@ -9319,7 +9484,8 @@ pub fn dumpGenericInstancesFallible(ip: *const InternPool, allocator: Allocator) const generic_fn_owner_decl = ip.declPtrConst(ip.funcDeclOwner(entry.key_ptr.*)); try w.print("{} ({}): \n", .{ generic_fn_owner_decl.name.fmt(ip), entry.value_ptr.items.len }); for (entry.value_ptr.items) |index| { - const func = ip.extraFuncInstance(index.getData(ip)); + const unwrapped_index = index.unwrap(ip); + const func = ip.extraFuncInstance(unwrapped_index.tid, unwrapped_index.getExtra(ip), unwrapped_index.getData(ip)); const owner_decl = ip.declPtrConst(func.owner_decl); try w.print(" {}: (", .{owner_decl.name.fmt(ip)}); for (func.comptime_args.get(ip)) |arg| { @@ -9465,9 +9631,9 @@ pub fn getOrPutTrailingString( comptime embedded_nulls: EmbeddedNulls, ) Allocator.Error!embedded_nulls.StringType() { const strings = ip.getLocal(tid).getMutableStrings(gpa); - const start: u32 = @intCast(strings.lenPtr().* - len); - if (len > 0 and strings.view().items(.@"0")[strings.lenPtr().* - 1] == 0) { - strings.lenPtr().* -= 1; + const start: u32 = @intCast(strings.mutate.len - len); + if (len > 0 and strings.view().items(.@"0")[strings.mutate.len - 1] == 0) { + strings.mutate.len -= 1; } else { try strings.ensureUnusedCapacity(1); } @@ -9674,105 +9840,112 @@ pub fn typeOf(ip: *const InternPool, index: Index) Index { // This optimization on tags is needed so that indexToKey can call // typeOf without being recursive. - _ => switch (index.getTag(ip)) { - .removed => unreachable, + _ => { + const unwrapped_index = index.unwrap(ip); + const item = unwrapped_index.getItem(ip); + return switch (item.tag) { + .removed => unreachable, - .type_int_signed, - .type_int_unsigned, - .type_array_big, - .type_array_small, - .type_vector, - .type_pointer, - .type_slice, - .type_optional, - .type_anyframe, - .type_error_union, - .type_anyerror_union, - .type_error_set, - .type_inferred_error_set, - .type_enum_auto, - .type_enum_explicit, - .type_enum_nonexhaustive, - .type_opaque, - .type_struct, - .type_struct_anon, - .type_struct_packed, - .type_struct_packed_inits, - .type_tuple_anon, - .type_union, - .type_function, - => .type_type, + .type_int_signed, + .type_int_unsigned, + .type_array_big, + .type_array_small, + .type_vector, + .type_pointer, + .type_slice, + .type_optional, + .type_anyframe, + .type_error_union, + .type_anyerror_union, + .type_error_set, + .type_inferred_error_set, + .type_enum_auto, + .type_enum_explicit, + .type_enum_nonexhaustive, + .type_opaque, + .type_struct, + .type_struct_anon, + .type_struct_packed, + .type_struct_packed_inits, + .type_tuple_anon, + .type_union, + .type_function, + => .type_type, - .undef, - .opt_null, - .only_possible_value, - => @enumFromInt(index.getData(ip)), + .undef, + .opt_null, + .only_possible_value, + => @enumFromInt(item.data), - .simple_type, .simple_value => unreachable, // handled via Index above + .simple_type, .simple_value => unreachable, // handled via Index above - inline .ptr_decl, - .ptr_comptime_alloc, - .ptr_anon_decl, - .ptr_anon_decl_aligned, - .ptr_comptime_field, - .ptr_int, - .ptr_eu_payload, - .ptr_opt_payload, - .ptr_elem, - .ptr_field, - .ptr_slice, - .opt_payload, - .error_union_payload, - .int_small, - .int_lazy_align, - .int_lazy_size, - .error_set_error, - .error_union_error, - .enum_tag, - .variable, - .extern_func, - .func_decl, - .func_instance, - .func_coerced, - .union_value, - .bytes, - .aggregate, - .repeated, - => |t| { - const extra_index = index.getData(ip); - const field_index = std.meta.fieldIndex(t.Payload(), "ty").?; - return @enumFromInt(ip.extra.items[extra_index + field_index]); - }, + inline .ptr_decl, + .ptr_comptime_alloc, + .ptr_anon_decl, + .ptr_anon_decl_aligned, + .ptr_comptime_field, + .ptr_int, + .ptr_eu_payload, + .ptr_opt_payload, + .ptr_elem, + .ptr_field, + .ptr_slice, + .opt_payload, + .error_union_payload, + .int_small, + .int_lazy_align, + .int_lazy_size, + .error_set_error, + .error_union_error, + .enum_tag, + .variable, + .extern_func, + .func_decl, + .func_instance, + .func_coerced, + .union_value, + .bytes, + .aggregate, + .repeated, + => |t| { + const extra_list = unwrapped_index.getExtra(ip); + return @enumFromInt(extra_list.view().items(.@"0")[item.data + std.meta.fieldIndex(t.Payload(), "ty").?]); + }, - .int_u8 => .u8_type, - .int_u16 => .u16_type, - .int_u32 => .u32_type, - .int_i32 => .i32_type, - .int_usize => .usize_type, + .int_u8 => .u8_type, + .int_u16 => .u16_type, + .int_u32 => .u32_type, + .int_i32 => .i32_type, + .int_usize => .usize_type, - .int_comptime_int_u32, - .int_comptime_int_i32, - => .comptime_int_type, + .int_comptime_int_u32, + .int_comptime_int_i32, + => .comptime_int_type, - // Note these are stored in limbs data, not extra data. - .int_positive, - .int_negative, - => ip.limbData(Int, index.getData(ip)).ty, + // Note these are stored in limbs data, not extra data. + .int_positive, + .int_negative, + => { + const limbs_list = ip.getLocalShared(unwrapped_index.tid).getLimbs(); + const int: Int = @bitCast(limbs_list.view().items(.@"0")[item.data..][0..Int.limbs_items_len].*); + return int.ty; + }, - .enum_literal => .enum_literal_type, - .float_f16 => .f16_type, - .float_f32 => .f32_type, - .float_f64 => .f64_type, - .float_f80 => .f80_type, - .float_f128 => .f128_type, + .enum_literal => .enum_literal_type, + .float_f16 => .f16_type, + .float_f32 => .f32_type, + .float_f64 => .f64_type, + .float_f80 => .f80_type, + .float_f128 => .f128_type, - .float_c_longdouble_f80, - .float_c_longdouble_f128, - => .c_longdouble_type, + .float_c_longdouble_f80, + .float_c_longdouble_f128, + => .c_longdouble_type, - .float_comptime_float => .comptime_float_type, + .float_comptime_float => .comptime_float_type, - .memoized_call => unreachable, + .memoized_call => unreachable, + }; }, .none => unreachable, @@ -9806,54 +9979,67 @@ pub fn aggregateTypeLenIncludingSentinel(ip: *const InternPool, ty: Index) u64 { } pub fn funcTypeReturnType(ip: *const InternPool, ty: Index) Index { - const item = ty.getItem(ip); - const child_item = switch (item.tag) { - .type_pointer => @as(Index, @enumFromInt(ip.extra.items[ - item.data + std.meta.fieldIndex(Tag.TypePointer, "child").? - ])).getItem(ip), - .type_function => item, + const unwrapped_ty = ty.unwrap(ip); + const ty_extra = unwrapped_ty.getExtra(ip); + const ty_item = unwrapped_ty.getItem(ip); + const child_extra, const child_item = switch (ty_item.tag) { + .type_pointer => child: { + const child_index: Index = @enumFromInt(ty_extra.view().items(.@"0")[ + ty_item.data + std.meta.fieldIndex(Tag.TypePointer, "child").? + ]); + const unwrapped_child = child_index.unwrap(ip); + break :child .{ unwrapped_child.getExtra(ip), unwrapped_child.getItem(ip) }; + }, + .type_function => .{ ty_extra, ty_item }, else => unreachable, }; assert(child_item.tag == .type_function); - return @enumFromInt(ip.extra.items[ + return @enumFromInt(child_extra.view().items(.@"0")[ child_item.data + std.meta.fieldIndex(Tag.TypeFunction, "return_type").? ]); } pub fn isNoReturn(ip: *const InternPool, ty: Index) bool { - return switch (ty) { - .noreturn_type => true, - else => switch (ty.getTag(ip)) { - .type_error_set => ip.extra.items[ty.getData(ip) + std.meta.fieldIndex(Tag.ErrorSet, "names_len").?] == 0, - else => false, + switch (ty) { + .noreturn_type => return true, + else => { + const unwrapped_ty = ty.unwrap(ip); + const ty_item = unwrapped_ty.getItem(ip); + return switch (ty_item.tag) { + .type_error_set => unwrapped_ty.getExtra(ip).view().items(.@"0")[ty_item.data + std.meta.fieldIndex(Tag.ErrorSet, "names_len").?] == 0, + else => false, + }; }, - }; + } } pub fn isUndef(ip: *const InternPool, val: Index) bool { - return val == .undef or val.getTag(ip) == .undef; + return val == .undef or val.unwrap(ip).getTag(ip) == .undef; } pub fn isVariable(ip: *const InternPool, val: Index) bool { - return val.getTag(ip) == .variable; + return val.unwrap(ip).getTag(ip) == .variable; } pub fn getBackingDecl(ip: *const InternPool, val: Index) OptionalDeclIndex { var base = val; while (true) { - switch (base.getTag(ip)) { - .ptr_decl => return @enumFromInt(ip.extra.items[ - base.getData(ip) + std.meta.fieldIndex(PtrDecl, "decl").? + const unwrapped_base = base.unwrap(ip); + const base_item = unwrapped_base.getItem(ip); + const base_extra_items = unwrapped_base.getExtra(ip).view().items(.@"0"); + switch (base_item.tag) { + .ptr_decl => return @enumFromInt(base_extra_items[ + base_item.data + std.meta.fieldIndex(PtrDecl, "decl").? ]), inline .ptr_eu_payload, .ptr_opt_payload, .ptr_elem, .ptr_field, - => |tag| base = @enumFromInt(ip.extra.items[ - base.getData(ip) + std.meta.fieldIndex(tag.Payload(), "base").? + => |tag| base = @enumFromInt(base_extra_items[ + base_item.data + std.meta.fieldIndex(tag.Payload(), "base").? ]), - .ptr_slice => base = @enumFromInt(ip.extra.items[ - base.getData(ip) + std.meta.fieldIndex(PtrSlice, "ptr").? + .ptr_slice => base = @enumFromInt(base_extra_items[ + base_item.data + std.meta.fieldIndex(PtrSlice, "ptr").? ]), else => return .none, } @@ -9863,7 +10049,9 @@ pub fn getBackingDecl(ip: *const InternPool, val: Index) OptionalDeclIndex { pub fn getBackingAddrTag(ip: *const InternPool, val: Index) ?Key.Ptr.BaseAddr.Tag { var base = val; while (true) { - switch (base.getTag(ip)) { + const unwrapped_base = base.unwrap(ip); + const base_item = unwrapped_base.getItem(ip); + switch (base_item.tag) { .ptr_decl => return .decl, .ptr_comptime_alloc => return .comptime_alloc, .ptr_anon_decl, @@ -9875,11 +10063,11 @@ pub fn getBackingAddrTag(ip: *const InternPool, val: Index) ?Key.Ptr.BaseAddr.Ta .ptr_opt_payload, .ptr_elem, .ptr_field, - => |tag| base = @enumFromInt(ip.extra.items[ - base.getData(ip) + std.meta.fieldIndex(tag.Payload(), "base").? + => |tag| base = @enumFromInt(unwrapped_base.getExtra(ip).view().items(.@"0")[ + base_item.data + std.meta.fieldIndex(tag.Payload(), "base").? ]), - inline .ptr_slice => |tag| base = @enumFromInt(ip.extra.items[ - base.getData(ip) + std.meta.fieldIndex(tag.Payload(), "ptr").? + inline .ptr_slice => |tag| base = @enumFromInt(unwrapped_base.getExtra(ip).view().items(.@"0")[ + base_item.data + std.meta.fieldIndex(tag.Payload(), "ptr").? ]), else => return null, } @@ -9989,7 +10177,7 @@ pub fn zigTypeTagOrPoison(ip: *const InternPool, index: Index) error{GenericPois .empty_struct => unreachable, .generic_poison => unreachable, - _ => switch (index.getTag(ip)) { + _ => switch (index.unwrap(ip).getTag(ip)) { .removed => unreachable, .type_int_signed, @@ -10097,30 +10285,35 @@ pub fn zigTypeTagOrPoison(ip: *const InternPool, index: Index) error{GenericPois } pub fn isFuncBody(ip: *const InternPool, index: Index) bool { - return switch (index.getTag(ip)) { + return switch (index.unwrap(ip).getTag(ip)) { .func_decl, .func_instance, .func_coerced => true, else => false, }; } pub fn funcAnalysis(ip: *const InternPool, index: Index) *FuncAnalysis { - const item = index.getItem(ip); + const unwrapped_index = index.unwrap(ip); + const extra = unwrapped_index.getExtra(ip); + const item = unwrapped_index.getItem(ip); const extra_index = switch (item.tag) { .func_decl => item.data + std.meta.fieldIndex(Tag.FuncDecl, "analysis").?, .func_instance => item.data + std.meta.fieldIndex(Tag.FuncInstance, "analysis").?, - .func_coerced => i: { + .func_coerced => { const extra_index = item.data + std.meta.fieldIndex(Tag.FuncCoerced, "func").?; - const func_index: Index = @enumFromInt(ip.extra.items[extra_index]); - const sub_item = func_index.getItem(ip); - break :i switch (sub_item.tag) { - .func_decl => sub_item.data + std.meta.fieldIndex(Tag.FuncDecl, "analysis").?, - .func_instance => sub_item.data + std.meta.fieldIndex(Tag.FuncInstance, "analysis").?, - else => unreachable, - }; + const func_index: Index = @enumFromInt(extra.view().items(.@"0")[extra_index]); + const unwrapped_func = func_index.unwrap(ip); + const func_item = unwrapped_func.getItem(ip); + return @ptrCast(&unwrapped_func.getExtra(ip).view().items(.@"0")[ + switch (func_item.tag) { + .func_decl => func_item.data + std.meta.fieldIndex(Tag.FuncDecl, "analysis").?, + .func_instance => func_item.data + std.meta.fieldIndex(Tag.FuncInstance, "analysis").?, + else => unreachable, + } + ]); }, else => unreachable, }; - return @ptrCast(&ip.extra.items[extra_index]); + return @ptrCast(&extra.view().items(.@"0")[extra_index]); } pub fn funcHasInferredErrorSet(ip: *const InternPool, i: Index) bool { @@ -10128,33 +10321,36 @@ pub fn funcHasInferredErrorSet(ip: *const InternPool, i: Index) bool { } pub fn funcZirBodyInst(ip: *const InternPool, index: Index) TrackedInst.Index { - const item = index.getItem(ip); + const unwrapped_index = index.unwrap(ip); + const item = unwrapped_index.getItem(ip); + const item_extra = unwrapped_index.getExtra(ip); const zir_body_inst_field_index = std.meta.fieldIndex(Tag.FuncDecl, "zir_body_inst").?; - const extra_index = switch (item.tag) { - .func_decl => item.data + zir_body_inst_field_index, - .func_instance => ei: { + switch (item.tag) { + .func_decl => return @enumFromInt(item_extra.view().items(.@"0")[item.data + zir_body_inst_field_index]), + .func_instance => { const generic_owner_field_index = std.meta.fieldIndex(Tag.FuncInstance, "generic_owner").?; - const func_decl_index: Index = @enumFromInt(ip.extra.items[item.data + generic_owner_field_index]); - const func_decl_item = func_decl_index.getItem(ip); + const func_decl_index: Index = @enumFromInt(item_extra.view().items(.@"0")[item.data + generic_owner_field_index]); + const unwrapped_func_decl = func_decl_index.unwrap(ip); + const func_decl_item = unwrapped_func_decl.getItem(ip); + const func_decl_extra = unwrapped_func_decl.getExtra(ip); assert(func_decl_item.tag == .func_decl); - break :ei func_decl_item.data + zir_body_inst_field_index; + return @enumFromInt(func_decl_extra.view().items(.@"0")[func_decl_item.data + zir_body_inst_field_index]); }, .func_coerced => { - const uncoerced_func_index: Index = @enumFromInt(ip.extra.items[ + const uncoerced_func_index: Index = @enumFromInt(item_extra.view().items(.@"0")[ item.data + std.meta.fieldIndex(Tag.FuncCoerced, "func").? ]); return ip.funcZirBodyInst(uncoerced_func_index); }, else => unreachable, - }; - return @enumFromInt(ip.extra.items[extra_index]); + } } pub fn iesFuncIndex(ip: *const InternPool, ies_index: Index) Index { - const item = ies_index.getItem(ip); + const item = ies_index.unwrap(ip).getItem(ip); assert(item.tag == .type_inferred_error_set); const func_index: Index = @enumFromInt(item.data); - switch (func_index.getTag(ip)) { + switch (func_index.unwrap(ip).getTag(ip)) { .func_decl, .func_instance => {}, else => unreachable, // assertion failed } @@ -10175,30 +10371,36 @@ pub fn iesResolved(ip: *const InternPool, ies_index: Index) *Index { /// added to `ip`. pub fn funcIesResolved(ip: *const InternPool, func_index: Index) *Index { assert(funcHasInferredErrorSet(ip, func_index)); - const func_item = func_index.getItem(ip); + const unwrapped_func = func_index.unwrap(ip); + const func_extra = unwrapped_func.getExtra(ip); + const func_item = unwrapped_func.getItem(ip); const extra_index = switch (func_item.tag) { .func_decl => func_item.data + @typeInfo(Tag.FuncDecl).Struct.fields.len, .func_instance => func_item.data + @typeInfo(Tag.FuncInstance).Struct.fields.len, - .func_coerced => i: { - const uncoerced_func_index: Index = @enumFromInt(ip.extra.items[ + .func_coerced => { + const uncoerced_func_index: Index = @enumFromInt(func_extra.view().items(.@"0")[ func_item.data + std.meta.fieldIndex(Tag.FuncCoerced, "func").? ]); - const uncoerced_func_item = uncoerced_func_index.getItem(ip); - break :i switch (uncoerced_func_item.tag) { - .func_decl => uncoerced_func_item.data + @typeInfo(Tag.FuncDecl).Struct.fields.len, - .func_instance => uncoerced_func_item.data + @typeInfo(Tag.FuncInstance).Struct.fields.len, - else => unreachable, - }; + const unwrapped_uncoerced_func = uncoerced_func_index.unwrap(ip); + const uncoerced_func_item = unwrapped_uncoerced_func.getItem(ip); + return @ptrCast(&unwrapped_uncoerced_func.getExtra(ip).view().items(.@"0")[ + switch (uncoerced_func_item.tag) { + .func_decl => uncoerced_func_item.data + @typeInfo(Tag.FuncDecl).Struct.fields.len, + .func_instance => uncoerced_func_item.data + @typeInfo(Tag.FuncInstance).Struct.fields.len, + else => unreachable, + } + ]); }, else => unreachable, }; - return @ptrCast(&ip.extra.items[extra_index]); + return @ptrCast(&func_extra.view().items(.@"0")[extra_index]); } pub fn funcDeclInfo(ip: *const InternPool, index: Index) Key.Func { - const item = index.getItem(ip); + const unwrapped_index = index.unwrap(ip); + const item = unwrapped_index.getItem(ip); assert(item.tag == .func_decl); - return extraFuncDecl(ip, item.data); + return extraFuncDecl(unwrapped_index.tid, unwrapped_index.getExtra(ip), item.data); } pub fn funcDeclOwner(ip: *const InternPool, index: Index) DeclIndex { @@ -10206,15 +10408,19 @@ pub fn funcDeclOwner(ip: *const InternPool, index: Index) DeclIndex { } pub fn funcTypeParamsLen(ip: *const InternPool, index: Index) u32 { - const item = index.getItem(ip); + const unwrapped_index = index.unwrap(ip); + const extra_list = unwrapped_index.getExtra(ip); + const item = unwrapped_index.getItem(ip); assert(item.tag == .type_function); - return ip.extra.items[item.data + std.meta.fieldIndex(Tag.TypeFunction, "params_len").?]; + return extra_list.view().items(.@"0")[item.data + std.meta.fieldIndex(Tag.TypeFunction, "params_len").?]; } pub fn unwrapCoercedFunc(ip: *const InternPool, index: Index) Index { - return switch (index.getTag(ip)) { - .func_coerced => @enumFromInt(ip.extra.items[ - index.getData(ip) + std.meta.fieldIndex(Tag.FuncCoerced, "func").? + const unwrapped_index = index.unwrap(ip); + const item = unwrapped_index.getItem(ip); + return switch (item.tag) { + .func_coerced => @enumFromInt(unwrapped_index.getExtra(ip).view().items(.@"0")[ + item.data + std.meta.fieldIndex(Tag.FuncCoerced, "func").? ]), .func_instance, .func_decl => index, else => unreachable, @@ -10241,11 +10447,11 @@ pub fn resolveBuiltinType( (ip.zigTypeTagOrPoison(resolved_index) catch unreachable)); // Copy the data - const item = resolved_index.getItem(ip); - const unwrapped = want_index.unwrap(ip); - var items = ip.getLocalShared(unwrapped.tid).items.view().slice(); - items.items(.data)[unwrapped.index] = item.data; - @atomicStore(Tag, &items.items(.tag)[unwrapped.index], item.tag, .release); + const item = resolved_index.unwrap(ip).getItem(ip); + const unwrapped_index = want_index.unwrap(ip); + var items = ip.getLocalShared(unwrapped_index.tid).items.acquire().view().slice(); + items.items(.data)[unwrapped_index.index] = item.data; + @atomicStore(Tag, &items.items(.tag)[unwrapped_index.index], item.tag, .release); ip.remove(tid, resolved_index); } @@ -10268,17 +10474,19 @@ pub fn structDecl(ip: *const InternPool, i: Index) OptionalDeclIndex { /// Returns the already-existing field with the same name, if any. pub fn addFieldName( ip: *InternPool, + extra: Local.Extra, names_map: MapIndex, names_start: u32, name: NullTerminatedString, ) ?u32 { + const extra_items = extra.view().items(.@"0"); const map = &ip.maps.items[@intFromEnum(names_map)]; const field_index = map.count(); - const strings = ip.extra.items[names_start..][0..field_index]; + const strings = extra_items[names_start..][0..field_index]; const adapter: NullTerminatedString.Adapter = .{ .strings = @ptrCast(strings) }; const gop = map.getOrPutAssumeCapacityAdapted(name, adapter); if (gop.found_existing) return @intCast(gop.index); - ip.extra.items[names_start + field_index] = @intFromEnum(name); + extra_items[names_start + field_index] = @intFromEnum(name); return null; } diff --git a/src/Sema.zig b/src/Sema.zig index 34db45795500..f1c61fdd2ace 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -36925,7 +36925,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { .none, => unreachable, - _ => switch (ty.toIntern().getTag(ip)) { + _ => switch (ty.toIntern().unwrap(ip).getTag(ip)) { .removed => unreachable, .type_int_signed, // i0 handled above diff --git a/src/Type.zig b/src/Type.zig index ba53535d40f8..57ac2310d5c0 100644 --- a/src/Type.zig +++ b/src/Type.zig @@ -3686,7 +3686,7 @@ pub fn resolveFields(ty: Type, pt: Zcu.PerThread) SemaError!void { .empty_struct => unreachable, .generic_poison => unreachable, - else => switch (ty_ip.getTag(ip)) { + else => switch (ty_ip.unwrap(ip).getTag(ip)) { .type_struct, .type_struct_packed, .type_struct_packed_inits, diff --git a/src/Value.zig b/src/Value.zig index c3e4b05fcb34..f114a2c7fa1b 100644 --- a/src/Value.zig +++ b/src/Value.zig @@ -110,14 +110,13 @@ fn arrayToIpString(val: Value, len_u64: u64, pt: Zcu.PerThread) !InternPool.Null const ip = &mod.intern_pool; const len: u32 = @intCast(len_u64); const strings = ip.getLocal(pt.tid).getMutableStrings(gpa); - const strings_len = strings.lenPtr(); try strings.ensureUnusedCapacity(len); for (0..len) |i| { // I don't think elemValue has the possibility to affect ip.string_bytes. Let's // assert just to be sure. - const prev_len = strings_len.*; + const prev_len = strings.mutate.len; const elem_val = try val.elemValue(pt, i); - assert(strings_len.* == prev_len); + assert(strings.mutate.len == prev_len); const byte: u8 = @intCast(elem_val.toUnsignedInt(pt)); strings.appendAssumeCapacity(.{byte}); } diff --git a/src/Zcu/PerThread.zig b/src/Zcu/PerThread.zig index 0396d06b98da..b0fc35b55229 100644 --- a/src/Zcu/PerThread.zig +++ b/src/Zcu/PerThread.zig @@ -3,7 +3,7 @@ zcu: *Zcu, /// Dense, per-thread unique index. tid: Id, -pub const Id = if (InternPool.single_threaded) enum { main } else enum(usize) { main, _ }; +pub const Id = if (InternPool.single_threaded) enum { main } else enum(u8) { main, _ }; pub fn astGenFile( pt: Zcu.PerThread, diff --git a/src/main.zig b/src/main.zig index 9fd9087b639d..c7bbb9883cf4 100644 --- a/src/main.zig +++ b/src/main.zig @@ -403,6 +403,7 @@ const usage_build_generic = \\General Options: \\ -h, --help Print this help and exit \\ --color [auto|off|on] Enable or disable colored error messages + \\ -j Limit concurrent jobs (default is to use all CPU cores) \\ -femit-bin[=path] (default) Output machine code \\ -fno-emit-bin Do not output machine code \\ -femit-asm[=path] Output .s (assembly code) @@ -1004,6 +1005,7 @@ fn buildOutputType( .on else .auto; + var n_jobs: ?u32 = null; switch (arg_mode) { .build, .translate_c, .zig_test, .run => { @@ -1141,6 +1143,17 @@ fn buildOutputType( color = std.meta.stringToEnum(Color, next_arg) orelse { fatal("expected [auto|on|off] after --color, found '{s}'", .{next_arg}); }; + } else if (mem.startsWith(u8, arg, "-j")) { + const str = arg["-j".len..]; + const num = std.fmt.parseUnsigned(u32, str, 10) catch |err| { + fatal("unable to parse jobs count '{s}': {s}", .{ + str, @errorName(err), + }); + }; + if (num < 1) { + fatal("number of jobs must be at least 1\n", .{}); + } + n_jobs = num; } else if (mem.eql(u8, arg, "--subsystem")) { subsystem = try parseSubSystem(args_iter.nextOrFatal()); } else if (mem.eql(u8, arg, "-O")) { @@ -3092,7 +3105,11 @@ fn buildOutputType( defer emit_implib_resolved.deinit(); var thread_pool: ThreadPool = undefined; - try thread_pool.init(.{ .allocator = gpa, .track_ids = true }); + try thread_pool.init(.{ + .allocator = gpa, + .n_jobs = @min(@max(n_jobs orelse std.Thread.getCpuCount() catch 1, 1), std.math.maxInt(u8)), + .track_ids = true, + }); defer thread_pool.deinit(); var cleanup_local_cache_dir: ?fs.Dir = null; @@ -4644,6 +4661,7 @@ const usage_build = \\ all Print the build summary in its entirety \\ failures (Default) Only print failed steps \\ none Do not print the build summary + \\ -j Limit concurrent jobs (default is to use all CPU cores) \\ --build-file [file] Override path to build.zig \\ --cache-dir [path] Override path to local Zig cache directory \\ --global-cache-dir [path] Override path to global Zig cache directory @@ -4718,6 +4736,7 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !void { try child_argv.append("-Z" ++ results_tmp_file_nonce); var color: Color = .auto; + var n_jobs: ?u32 = null; { var i: usize = 0; @@ -4811,6 +4830,17 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !void { }; try child_argv.appendSlice(&.{ arg, args[i] }); continue; + } else if (mem.startsWith(u8, arg, "-j")) { + const str = arg["-j".len..]; + const num = std.fmt.parseUnsigned(u32, str, 10) catch |err| { + fatal("unable to parse jobs count '{s}': {s}", .{ + str, @errorName(err), + }); + }; + if (num < 1) { + fatal("number of jobs must be at least 1\n", .{}); + } + n_jobs = num; } else if (mem.eql(u8, arg, "--seed")) { if (i + 1 >= args.len) fatal("expected argument after '{s}'", .{arg}); i += 1; @@ -4895,7 +4925,11 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !void { child_argv.items[argv_index_cache_dir] = local_cache_directory.path orelse cwd_path; var thread_pool: ThreadPool = undefined; - try thread_pool.init(.{ .allocator = gpa, .track_ids = true }); + try thread_pool.init(.{ + .allocator = gpa, + .n_jobs = @min(@max(n_jobs orelse std.Thread.getCpuCount() catch 1, 1), std.math.maxInt(u8)), + .track_ids = true, + }); defer thread_pool.deinit(); // Dummy http client that is not actually used when only_core_functionality is enabled. @@ -5329,7 +5363,11 @@ fn jitCmd( defer global_cache_directory.handle.close(); var thread_pool: ThreadPool = undefined; - try thread_pool.init(.{ .allocator = gpa, .track_ids = true }); + try thread_pool.init(.{ + .allocator = gpa, + .n_jobs = @min(@max(std.Thread.getCpuCount() catch 1, 1), std.math.maxInt(u8)), + .track_ids = true, + }); defer thread_pool.deinit(); var child_argv: std.ArrayListUnmanaged([]const u8) = .{}; From 166402c16bddccc364b9108a9e69af3a0dd6f1ab Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Sun, 7 Jul 2024 13:18:53 -0400 Subject: [PATCH 071/152] bootstrap: fix build --- lib/std/Thread.zig | 7 ++++++- src/link.zig | 2 +- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/lib/std/Thread.zig b/lib/std/Thread.zig index 5bfaef6127e1..25261053ee6f 100644 --- a/lib/std/Thread.zig +++ b/lib/std/Thread.zig @@ -280,12 +280,13 @@ pub fn getCurrentId() Id { pub const CpuCountError = error{ PermissionDenied, SystemResources, + Unsupported, Unexpected, }; /// Returns the platforms view on the number of logical CPU cores available. pub fn getCpuCount() CpuCountError!usize { - return Impl.getCpuCount(); + return try Impl.getCpuCount(); } /// Configuration options for hints on how to spawn threads. @@ -782,6 +783,10 @@ const WasiThreadImpl = struct { return tls_thread_id; } + fn getCpuCount() error{Unsupported}!noreturn { + return error.Unsupported; + } + fn getHandle(self: Impl) ThreadHandle { return self.thread.tid.load(.seq_cst); } diff --git a/src/link.zig b/src/link.zig index f407ad2f4c33..03ee3185ab6a 100644 --- a/src/link.zig +++ b/src/link.zig @@ -538,7 +538,7 @@ pub const File = struct { pub fn flush(base: *File, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) FlushError!void { if (build_options.only_c) { assert(base.tag == .c); - return @as(*C, @fieldParentPtr("base", base)).flush(arena, prog_node); + return @as(*C, @fieldParentPtr("base", base)).flush(arena, tid, prog_node); } const comp = base.comp; if (comp.clang_preprocessor_mode == .yes or comp.clang_preprocessor_mode == .pch) { From 1abc904075ee37b059777869ab144854e4db0711 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Sun, 7 Jul 2024 23:23:17 -0400 Subject: [PATCH 072/152] InternPool: start documenting new thread-safe fields --- src/InternPool.zig | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/src/InternPool.zig b/src/InternPool.zig index 9f179b601e12..8d72c20e2ebf 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -2,10 +2,16 @@ //! This data structure is self-contained, with the following exceptions: //! * Module.Namespace has a pointer to Module.File +/// One item per thread, indexed by `tid`, which is dense and unique per thread. locals: []Local = &.{}, +/// Length must be a power of two and represents the number of simultaneous +/// writers that can mutate any single sharded data structure. shards: []Shard = &.{}, +/// Cached number of active bits in a `tid`. tid_width: if (single_threaded) u0 else std.math.Log2Int(u32) = 0, +/// Cached shift amount to put a `tid` in the top bits of a 31-bit value. tid_shift_31: if (single_threaded) u0 else std.math.Log2Int(u32) = if (single_threaded) 0 else 31, +/// Cached shift amount to put a `tid` in the top bits of a 32-bit value. tid_shift_32: if (single_threaded) u0 else std.math.Log2Int(u32) = if (single_threaded) 0 else 31, /// Rather than allocating Decl objects with an Allocator, we instead allocate @@ -341,7 +347,11 @@ pub const DepEntry = extern struct { }; const Local = struct { + /// These fields can be accessed from any thread by calling `acquire`. + /// They are only modified by the owning thread. shared: Shared align(std.atomic.cache_line), + /// This state is fully local to the owning thread and does not require any + /// atomic access. mutate: struct { arena: std.heap.ArenaAllocator.State, items: Mutate, @@ -579,6 +589,7 @@ const Local = struct { const bytes_offset = std.mem.alignForward(usize, @sizeOf(Header), @alignOf(Elem)); const View = std.MultiArrayList(Elem); + /// Must be called when accessing from another thread. fn acquire(list: *const ListSelf) ListSelf { return .{ .bytes = @atomicLoad([*]align(@alignOf(Elem)) u8, &list.bytes, .acquire) }; } @@ -703,6 +714,7 @@ const Shard = struct { const alignment = @max(@alignOf(Header), @alignOf(Entry)); const entries_offset = std.mem.alignForward(usize, @sizeOf(Header), @alignOf(Entry)); + /// Must be called unless the mutate mutex is locked. fn acquire(map: *const @This()) @This() { return .{ .entries = @atomicLoad([*]Entry, &map.entries, .acquire) }; } From 14192019ffcce6a3c60c8e761ca4ccedc2f027b0 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Sun, 7 Jul 2024 23:45:13 -0400 Subject: [PATCH 073/152] InternPool: fix dumping of simple types --- src/InternPool.zig | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/InternPool.zig b/src/InternPool.zig index 8d72c20e2ebf..c327172abfb9 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -9362,8 +9362,8 @@ fn dumpAllFallible(ip: *const InternPool) anyerror!void { switch (tag) { .removed => {}, - .simple_type => try w.print("{s}", .{@tagName(@as(SimpleType, @enumFromInt(data)))}), - .simple_value => try w.print("{s}", .{@tagName(@as(SimpleValue, @enumFromInt(data)))}), + .simple_type => try w.print("{s}", .{@tagName(@as(SimpleType, @enumFromInt(@intFromEnum(i))))}), + .simple_value => try w.print("{s}", .{@tagName(@as(SimpleValue, @enumFromInt(@intFromEnum(i))))}), .type_int_signed, .type_int_unsigned, From c36e2bb9802ab4317980a98ea518483010fe2c80 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Mon, 8 Jul 2024 07:02:42 -0400 Subject: [PATCH 074/152] InternPool: fix multi-thread build --- src/InternPool.zig | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/InternPool.zig b/src/InternPool.zig index c327172abfb9..1c501cb28ee2 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -878,7 +878,7 @@ pub const String = enum(u32) { fn wrap(unwrapped: Unwrapped, ip: *const InternPool) String { assert(@intFromEnum(unwrapped.tid) <= ip.getTidMask()); assert(unwrapped.index <= ip.getIndexMask(u32)); - return @enumFromInt(@intFromEnum(unwrapped.tid) << ip.tid_shift_32 | unwrapped.index); + return @enumFromInt(@as(u32, @intFromEnum(unwrapped.tid)) << ip.tid_shift_32 | unwrapped.index); } }; fn unwrap(string: String, ip: *const InternPool) Unwrapped { @@ -3408,7 +3408,7 @@ pub const Index = enum(u32) { fn wrap(unwrapped: Unwrapped, ip: *const InternPool) Index { assert(@intFromEnum(unwrapped.tid) <= ip.getTidMask()); assert(unwrapped.index <= ip.getIndexMask(u31)); - return @enumFromInt(@intFromEnum(unwrapped.tid) << ip.tid_shift_31 | unwrapped.index); + return @enumFromInt(@as(u32, @intFromEnum(unwrapped.tid)) << ip.tid_shift_31 | unwrapped.index); } pub fn getExtra(unwrapped: Unwrapped, ip: *const InternPool) Local.Extra { @@ -9651,7 +9651,7 @@ pub fn getOrPutTrailingString( } const key: []const u8 = strings.view().items(.@"0")[start..]; const value: embedded_nulls.StringType() = - @enumFromInt(@intFromEnum(tid) << ip.tid_shift_32 | start); + @enumFromInt(@as(u32, @intFromEnum(tid)) << ip.tid_shift_32 | start); const has_embedded_null = std.mem.indexOfScalar(u8, key, 0) != null; switch (embedded_nulls) { .no_embedded_nulls => assert(!has_embedded_null), From 65ced4a33436fa762de75e22a986ae08a8c0d9cc Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Mon, 8 Jul 2024 09:05:30 -0400 Subject: [PATCH 075/152] Compilation: put supported codegen backends on a separate thread (There are no supported backends.) --- lib/std/Progress.zig | 2 +- lib/std/Thread/Pool.zig | 23 +++---- src/Compilation.zig | 137 +++++++++++++++++++++++++++++++------ src/Compilation/Config.zig | 8 +-- src/Zcu.zig | 14 ++-- src/Zcu/PerThread.zig | 4 +- src/target.zig | 50 ++++++++++---- 7 files changed, 173 insertions(+), 65 deletions(-) diff --git a/lib/std/Progress.zig b/lib/std/Progress.zig index 160894aae9b1..2028e95dd550 100644 --- a/lib/std/Progress.zig +++ b/lib/std/Progress.zig @@ -282,7 +282,7 @@ pub const Node = struct { } fn init(free_index: Index, parent: Parent, name: []const u8, estimated_total_items: usize) Node { - assert(parent != .unused); + assert(parent == .none or @intFromEnum(parent) < node_storage_buffer_len); const storage = storageByIndex(free_index); storage.* = .{ diff --git a/lib/std/Thread/Pool.zig b/lib/std/Thread/Pool.zig index d501b665204a..179f2f8521c4 100644 --- a/lib/std/Thread/Pool.zig +++ b/lib/std/Thread/Pool.zig @@ -21,11 +21,11 @@ const Runnable = struct { runFn: RunProto, }; -const RunProto = *const fn (*Runnable, id: ?u32) void; +const RunProto = *const fn (*Runnable, id: ?usize) void; pub const Options = struct { allocator: std.mem.Allocator, - n_jobs: ?u32 = null, + n_jobs: ?usize = null, track_ids: bool = false, }; @@ -109,7 +109,7 @@ pub fn spawnWg(pool: *Pool, wait_group: *WaitGroup, comptime func: anytype, args run_node: RunQueue.Node = .{ .data = .{ .runFn = runFn } }, wait_group: *WaitGroup, - fn runFn(runnable: *Runnable, _: ?u32) void { + fn runFn(runnable: *Runnable, _: ?usize) void { const run_node: *RunQueue.Node = @fieldParentPtr("data", runnable); const closure: *@This() = @alignCast(@fieldParentPtr("run_node", run_node)); @call(.auto, func, closure.arguments); @@ -150,7 +150,7 @@ pub fn spawnWg(pool: *Pool, wait_group: *WaitGroup, comptime func: anytype, args /// Runs `func` in the thread pool, calling `WaitGroup.start` beforehand, and /// `WaitGroup.finish` after it returns. /// -/// The first argument passed to `func` is a dense `u32` thread id, the rest +/// The first argument passed to `func` is a dense `usize` thread id, the rest /// of the arguments are passed from `args`. Requires the pool to have been /// initialized with `.track_ids = true`. /// @@ -172,7 +172,7 @@ pub fn spawnWgId(pool: *Pool, wait_group: *WaitGroup, comptime func: anytype, ar run_node: RunQueue.Node = .{ .data = .{ .runFn = runFn } }, wait_group: *WaitGroup, - fn runFn(runnable: *Runnable, id: ?u32) void { + fn runFn(runnable: *Runnable, id: ?usize) void { const run_node: *RunQueue.Node = @fieldParentPtr("data", runnable); const closure: *@This() = @alignCast(@fieldParentPtr("run_node", run_node)); @call(.auto, func, .{id.?} ++ closure.arguments); @@ -191,7 +191,7 @@ pub fn spawnWgId(pool: *Pool, wait_group: *WaitGroup, comptime func: anytype, ar pool.mutex.lock(); const closure = pool.allocator.create(Closure) catch { - const id = pool.ids.getIndex(std.Thread.getCurrentId()); + const id: ?usize = pool.ids.getIndex(std.Thread.getCurrentId()); pool.mutex.unlock(); @call(.auto, func, .{id.?} ++ args); wait_group.finish(); @@ -258,7 +258,7 @@ fn worker(pool: *Pool) void { pool.mutex.lock(); defer pool.mutex.unlock(); - const id: ?u32 = if (pool.ids.count() > 0) @intCast(pool.ids.count()) else null; + const id: ?usize = if (pool.ids.count() > 0) @intCast(pool.ids.count()) else null; if (id) |_| pool.ids.putAssumeCapacityNoClobber(std.Thread.getCurrentId(), {}); while (true) { @@ -280,15 +280,12 @@ fn worker(pool: *Pool) void { } pub fn waitAndWork(pool: *Pool, wait_group: *WaitGroup) void { - var id: ?u32 = null; + var id: ?usize = null; while (!wait_group.isDone()) { pool.mutex.lock(); if (pool.run_queue.popFirst()) |run_node| { - id = id orelse if (pool.ids.getIndex(std.Thread.getCurrentId())) |index| - @intCast(index) - else - null; + id = id orelse pool.ids.getIndex(std.Thread.getCurrentId()); pool.mutex.unlock(); run_node.data.runFn(&run_node.data, id); continue; @@ -300,6 +297,6 @@ pub fn waitAndWork(pool: *Pool, wait_group: *WaitGroup) void { } } -pub fn getIdCount(pool: *Pool) u32 { +pub fn getIdCount(pool: *Pool) usize { return @intCast(1 + pool.threads.len); } diff --git a/src/Compilation.zig b/src/Compilation.zig index 74e8222bc3a1..118e325ed7a9 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -103,6 +103,14 @@ lld_errors: std.ArrayListUnmanaged(LldError) = .{}, work_queue: std.fifo.LinearFifo(Job, .Dynamic), +codegen_work: if (InternPool.single_threaded) void else struct { + mutex: std.Thread.Mutex, + cond: std.Thread.Condition, + queue: std.fifo.LinearFifo(CodegenJob, .Dynamic), + job_error: ?JobError, + done: bool, +}, + /// These jobs are to invoke the Clang compiler to create an object file, which /// gets linked with the Compilation. c_object_work_queue: std.fifo.LinearFifo(*CObject, .Dynamic), @@ -362,6 +370,16 @@ const Job = union(enum) { windows_import_lib: usize, }; +const CodegenJob = union(enum) { + decl: InternPool.DeclIndex, + func: struct { + func: InternPool.Index, + /// This `Air` is owned by the `Job` and allocated with `gpa`. + /// It must be deinited when the job is processed. + air: Air, + }, +}; + pub const CObject = struct { /// Relative to cwd. Owned by arena. src: CSourceFile, @@ -1429,6 +1447,13 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil .emit_llvm_ir = options.emit_llvm_ir, .emit_llvm_bc = options.emit_llvm_bc, .work_queue = std.fifo.LinearFifo(Job, .Dynamic).init(gpa), + .codegen_work = if (InternPool.single_threaded) {} else .{ + .mutex = .{}, + .cond = .{}, + .queue = std.fifo.LinearFifo(CodegenJob, .Dynamic).init(gpa), + .job_error = null, + .done = false, + }, .c_object_work_queue = std.fifo.LinearFifo(*CObject, .Dynamic).init(gpa), .win32_resource_work_queue = if (build_options.only_core_functionality) {} else std.fifo.LinearFifo(*Win32Resource, .Dynamic).init(gpa), .astgen_work_queue = std.fifo.LinearFifo(Zcu.File.Index, .Dynamic).init(gpa), @@ -3310,7 +3335,21 @@ pub fn addZirErrorMessages(eb: *ErrorBundle.Wip, file: *Zcu.File) !void { pub fn performAllTheWork( comp: *Compilation, main_progress_node: std.Progress.Node, -) error{ TimerUnsupported, OutOfMemory }!void { +) JobError!void { + defer if (comp.module) |mod| { + mod.sema_prog_node.end(); + mod.sema_prog_node = std.Progress.Node.none; + mod.codegen_prog_node.end(); + mod.codegen_prog_node = std.Progress.Node.none; + }; + try comp.performAllTheWorkInner(main_progress_node); + if (!InternPool.single_threaded) if (comp.codegen_work.job_error) |job_error| return job_error; +} + +fn performAllTheWorkInner( + comp: *Compilation, + main_progress_node: std.Progress.Node, +) JobError!void { // Here we queue up all the AstGen tasks first, followed by C object compilation. // We wait until the AstGen tasks are all completed before proceeding to the // (at least for now) single-threaded main work queue. However, C object compilation @@ -3410,16 +3449,20 @@ pub fn performAllTheWork( mod.sema_prog_node = main_progress_node.start("Semantic Analysis", 0); mod.codegen_prog_node = main_progress_node.start("Code Generation", 0); } - defer if (comp.module) |mod| { - mod.sema_prog_node.end(); - mod.sema_prog_node = undefined; - mod.codegen_prog_node.end(); - mod.codegen_prog_node = undefined; + + if (!InternPool.single_threaded) comp.thread_pool.spawnWgId(&comp.work_queue_wait_group, codegenThread, .{comp}); + defer if (!InternPool.single_threaded) { + { + comp.codegen_work.mutex.lock(); + defer comp.codegen_work.mutex.unlock(); + comp.codegen_work.done = true; + } + comp.codegen_work.cond.signal(); }; while (true) { if (comp.work_queue.readItem()) |work_item| { - try processOneJob(0, comp, work_item, main_progress_node); + try processOneJob(@intFromEnum(Zcu.PerThread.Id.main), comp, work_item, main_progress_node); continue; } if (comp.module) |zcu| { @@ -3447,11 +3490,12 @@ pub fn performAllTheWork( } } -fn processOneJob(tid: usize, comp: *Compilation, job: Job, prog_node: std.Progress.Node) !void { +const JobError = Allocator.Error; + +fn processOneJob(tid: usize, comp: *Compilation, job: Job, prog_node: std.Progress.Node) JobError!void { switch (job) { .codegen_decl => |decl_index| { - const pt: Zcu.PerThread = .{ .zcu = comp.module.?, .tid = @enumFromInt(tid) }; - const decl = pt.zcu.declPtr(decl_index); + const decl = comp.module.?.declPtr(decl_index); switch (decl.analysis) { .unreferenced => unreachable, @@ -3461,26 +3505,20 @@ fn processOneJob(tid: usize, comp: *Compilation, job: Job, prog_node: std.Progre .sema_failure, .codegen_failure, .dependency_failure, - => return, + => {}, .complete => { - const named_frame = tracy.namedFrame("codegen_decl"); - defer named_frame.end(); - assert(decl.has_tv); - - try pt.linkerUpdateDecl(decl_index); - return; + try comp.queueCodegenJob(tid, .{ .decl = decl_index }); }, } }, .codegen_func => |func| { - const named_frame = tracy.namedFrame("codegen_func"); - defer named_frame.end(); - - const pt: Zcu.PerThread = .{ .zcu = comp.module.?, .tid = @enumFromInt(tid) }; // This call takes ownership of `func.air`. - try pt.linkerUpdateFunc(func.func, func.air); + try comp.queueCodegenJob(tid, .{ .func = .{ + .func = func.func, + .air = func.air, + } }); }, .analyze_func => |func| { const named_frame = tracy.namedFrame("analyze_func"); @@ -3772,6 +3810,61 @@ fn processOneJob(tid: usize, comp: *Compilation, job: Job, prog_node: std.Progre } } +fn queueCodegenJob(comp: *Compilation, tid: usize, codegen_job: CodegenJob) !void { + if (InternPool.single_threaded or + !comp.module.?.backendSupportsFeature(.separate_thread)) + return processOneCodegenJob(tid, comp, codegen_job); + + { + comp.codegen_work.mutex.lock(); + defer comp.codegen_work.mutex.unlock(); + try comp.codegen_work.queue.writeItem(codegen_job); + } + comp.codegen_work.cond.signal(); +} + +fn codegenThread(tid: usize, comp: *Compilation) void { + comp.codegen_work.mutex.lock(); + defer comp.codegen_work.mutex.unlock(); + + while (true) { + if (comp.codegen_work.queue.readItem()) |codegen_job| { + comp.codegen_work.mutex.unlock(); + defer comp.codegen_work.mutex.lock(); + + processOneCodegenJob(tid, comp, codegen_job) catch |job_error| { + comp.codegen_work.job_error = job_error; + break; + }; + continue; + } + + if (comp.codegen_work.done) break; + + comp.codegen_work.cond.wait(&comp.codegen_work.mutex); + } +} + +fn processOneCodegenJob(tid: usize, comp: *Compilation, codegen_job: CodegenJob) JobError!void { + switch (codegen_job) { + .decl => |decl_index| { + const named_frame = tracy.namedFrame("codegen_decl"); + defer named_frame.end(); + + const pt: Zcu.PerThread = .{ .zcu = comp.module.?, .tid = @enumFromInt(tid) }; + try pt.linkerUpdateDecl(decl_index); + }, + .func => |func| { + const named_frame = tracy.namedFrame("codegen_func"); + defer named_frame.end(); + + const pt: Zcu.PerThread = .{ .zcu = comp.module.?, .tid = @enumFromInt(tid) }; + // This call takes ownership of `func.air`. + try pt.linkerUpdateFunc(func.func, func.air); + }, + } +} + fn workerDocsCopy(comp: *Compilation) void { docsCopyFallible(comp) catch |err| { return comp.lockAndSetMiscFailure( diff --git a/src/Compilation/Config.zig b/src/Compilation/Config.zig index 2de218425284..6e28f5028c02 100644 --- a/src/Compilation/Config.zig +++ b/src/Compilation/Config.zig @@ -440,12 +440,8 @@ pub fn resolve(options: Options) ResolveError!Config { }; }; - const backend_supports_error_tracing = target_util.backendSupportsFeature( - target.cpu.arch, - target.ofmt, - use_llvm, - .error_return_trace, - ); + const backend = target_util.zigBackend(target, use_llvm); + const backend_supports_error_tracing = target_util.backendSupportsFeature(backend, .error_return_trace); const root_error_tracing = b: { if (options.root_error_tracing) |x| break :b x; diff --git a/src/Zcu.zig b/src/Zcu.zig index b855e4fcf0f6..2f87bcca0f0e 100644 --- a/src/Zcu.zig +++ b/src/Zcu.zig @@ -64,8 +64,8 @@ root_mod: *Package.Module, /// `root_mod` is the test runner, and `main_mod` is the user's source file which has the tests. main_mod: *Package.Module, std_mod: *Package.Module, -sema_prog_node: std.Progress.Node = undefined, -codegen_prog_node: std.Progress.Node = undefined, +sema_prog_node: std.Progress.Node = std.Progress.Node.none, +codegen_prog_node: std.Progress.Node = std.Progress.Node.none, /// Used by AstGen worker to load and store ZIR cache. global_zir_cache: Compilation.Directory, @@ -3557,13 +3557,13 @@ pub const Feature = enum { /// to generate better machine code in the backends. All backends should migrate to /// enabling this feature. safety_checked_instructions, + /// If the backend supports running from another thread. + separate_thread, }; -pub fn backendSupportsFeature(zcu: Module, feature: Feature) bool { - const cpu_arch = zcu.root_mod.resolved_target.result.cpu.arch; - const ofmt = zcu.root_mod.resolved_target.result.ofmt; - const use_llvm = zcu.comp.config.use_llvm; - return target_util.backendSupportsFeature(cpu_arch, ofmt, use_llvm, feature); +pub fn backendSupportsFeature(zcu: Module, comptime feature: Feature) bool { + const backend = target_util.zigBackend(zcu.root_mod.resolved_target.result, zcu.comp.config.use_llvm); + return target_util.backendSupportsFeature(backend, feature); } pub const AtomicPtrAlignmentError = error{ diff --git a/src/Zcu/PerThread.zig b/src/Zcu/PerThread.zig index b0fc35b55229..f8a3104dc0be 100644 --- a/src/Zcu/PerThread.zig +++ b/src/Zcu/PerThread.zig @@ -2129,7 +2129,7 @@ pub fn populateTestFunctions( zcu.sema_prog_node = main_progress_node.start("Semantic Analysis", 0); defer { zcu.sema_prog_node.end(); - zcu.sema_prog_node = undefined; + zcu.sema_prog_node = std.Progress.Node.none; } try pt.ensureDeclAnalyzed(decl_index); } @@ -2238,7 +2238,7 @@ pub fn populateTestFunctions( zcu.codegen_prog_node = main_progress_node.start("Code Generation", 0); defer { zcu.codegen_prog_node.end(); - zcu.codegen_prog_node = undefined; + zcu.codegen_prog_node = std.Progress.Node.none; } try pt.linkerUpdateDecl(decl_index); diff --git a/src/target.zig b/src/target.zig index a253c1fa0b81..2accc100b8e9 100644 --- a/src/target.zig +++ b/src/target.zig @@ -537,20 +537,42 @@ pub fn zigBackend(target: std.Target, use_llvm: bool) std.builtin.CompilerBacken }; } -pub fn backendSupportsFeature( - cpu_arch: std.Target.Cpu.Arch, - ofmt: std.Target.ObjectFormat, - use_llvm: bool, - feature: Feature, -) bool { +pub inline fn backendSupportsFeature(backend: std.builtin.CompilerBackend, comptime feature: Feature) bool { return switch (feature) { - .panic_fn => ofmt == .c or use_llvm or cpu_arch == .x86_64 or cpu_arch == .riscv64, - .panic_unwrap_error => ofmt == .c or use_llvm, - .safety_check_formatted => ofmt == .c or use_llvm, - .error_return_trace => use_llvm, - .is_named_enum_value => use_llvm, - .error_set_has_value => use_llvm or cpu_arch.isWasm(), - .field_reordering => ofmt == .c or use_llvm, - .safety_checked_instructions => use_llvm, + .panic_fn => switch (backend) { + .stage2_c, .stage2_llvm, .stage2_x86_64, .stage2_riscv64 => true, + else => false, + }, + .panic_unwrap_error => switch (backend) { + .stage2_c, .stage2_llvm => true, + else => false, + }, + .safety_check_formatted => switch (backend) { + .stage2_c, .stage2_llvm => true, + else => false, + }, + .error_return_trace => switch (backend) { + .stage2_llvm => true, + else => false, + }, + .is_named_enum_value => switch (backend) { + .stage2_llvm => true, + else => false, + }, + .error_set_has_value => switch (backend) { + .stage2_llvm, .stage2_wasm => true, + else => false, + }, + .field_reordering => switch (backend) { + .stage2_c, .stage2_llvm => true, + else => false, + }, + .safety_checked_instructions => switch (backend) { + .stage2_llvm => true, + else => false, + }, + .separate_thread => switch (backend) { + else => false, + }, }; } From 854e86c5676de82bc46b5c13a0c9c807596e438d Mon Sep 17 00:00:00 2001 From: PauloCampana Date: Sun, 7 Jul 2024 20:34:10 -0300 Subject: [PATCH 076/152] build_runner: fix oob access --- lib/compiler/build_runner.zig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/compiler/build_runner.zig b/lib/compiler/build_runner.zig index 76fb9f22f8f3..79585086b317 100644 --- a/lib/compiler/build_runner.zig +++ b/lib/compiler/build_runner.zig @@ -1208,7 +1208,7 @@ fn nextArg(args: [][:0]const u8, idx: *usize) ?[:0]const u8 { fn nextArgOrFatal(args: [][:0]const u8, idx: *usize) [:0]const u8 { return nextArg(args, idx) orelse { - std.debug.print("expected argument after '{s}'\n access the help menu with 'zig build -h'\n", .{args[idx.*]}); + std.debug.print("expected argument after '{s}'\n access the help menu with 'zig build -h'\n", .{args[idx.* - 1]}); process.exit(1); }; } From 47846bc17c54d50886b702b5994c00ca8670c82b Mon Sep 17 00:00:00 2001 From: mlugg Date: Thu, 4 Jul 2024 10:31:59 +0100 Subject: [PATCH 077/152] Zcu: fix passing exported decls with compile errors to the backend --- src/Zcu/PerThread.zig | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/src/Zcu/PerThread.zig b/src/Zcu/PerThread.zig index f8a3104dc0be..990317149285 100644 --- a/src/Zcu/PerThread.zig +++ b/src/Zcu/PerThread.zig @@ -2098,6 +2098,25 @@ fn processExportsInner( gop.value_ptr.* = export_idx; } } + + switch (exported) { + .decl_index => |idx| if (failed: { + const decl = zcu.declPtr(idx); + if (decl.analysis != .complete) break :failed true; + // Check if has owned function + if (!decl.owns_tv) break :failed false; + if (decl.typeOf(zcu).zigTypeTag(zcu) != .Fn) break :failed false; + // Check if owned function failed + const a = zcu.funcInfo(decl.val.toIntern()).analysis(&zcu.intern_pool); + break :failed a.state != .success; + }) { + // This `Decl` is failed, so was never sent to codegen. + // TODO: we should probably tell the backend to delete any old exports of this `Decl`? + return; + }, + .value => {}, + } + if (zcu.comp.bin_file) |lf| { try zcu.handleUpdateExports(export_indices, lf.updateExports(pt, exported, export_indices)); } else if (zcu.llvm_object) |llvm_object| { From 1b34ae19beffcaa988de636c05264b3e9357a66f Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Tue, 9 Jul 2024 12:51:48 -0400 Subject: [PATCH 078/152] debug: prevent segfaults on linux --- lib/std/debug.zig | 116 +++++++++++++++++++++++++++++++++---------- lib/std/os/linux.zig | 8 +-- 2 files changed, 93 insertions(+), 31 deletions(-) diff --git a/lib/std/debug.zig b/lib/std/debug.zig index fb6609b8b18c..5fc27a02f403 100644 --- a/lib/std/debug.zig +++ b/lib/std/debug.zig @@ -570,6 +570,7 @@ pub const StackIterator = struct { first_address: ?usize, // Last known value of the frame pointer register. fp: usize, + ma: MemoryAccessor = MemoryAccessor.init, // When DebugInfo and a register context is available, this iterator can unwind // stacks with frames that don't use a frame pointer (ie. -fomit-frame-pointer), @@ -616,16 +617,16 @@ pub const StackIterator = struct { } } - pub fn deinit(self: *StackIterator) void { - if (have_ucontext and self.unwind_state != null) self.unwind_state.?.dwarf_context.deinit(); + pub fn deinit(it: *StackIterator) void { + if (have_ucontext and it.unwind_state != null) it.unwind_state.?.dwarf_context.deinit(); } - pub fn getLastError(self: *StackIterator) ?struct { + pub fn getLastError(it: *StackIterator) ?struct { err: UnwindError, address: usize, } { if (!have_ucontext) return null; - if (self.unwind_state) |*unwind_state| { + if (it.unwind_state) |*unwind_state| { if (unwind_state.last_error) |err| { unwind_state.last_error = null; return .{ @@ -662,14 +663,14 @@ pub const StackIterator = struct { else @sizeOf(usize); - pub fn next(self: *StackIterator) ?usize { - var address = self.next_internal() orelse return null; + pub fn next(it: *StackIterator) ?usize { + var address = it.next_internal() orelse return null; - if (self.first_address) |first_address| { + if (it.first_address) |first_address| { while (address != first_address) { - address = self.next_internal() orelse return null; + address = it.next_internal() orelse return null; } - self.first_address = null; + it.first_address = null; } return address; @@ -718,8 +719,74 @@ pub const StackIterator = struct { } } - fn next_unwind(self: *StackIterator) !usize { - const unwind_state = &self.unwind_state.?; + pub const MemoryAccessor = struct { + var cached_pid: posix.pid_t = -1; + + mem: switch (native_os) { + .linux => File, + else => void, + }, + + pub const init: MemoryAccessor = .{ + .mem = switch (native_os) { + .linux => .{ .handle = -1 }, + else => {}, + }, + }; + + fn read(ma: *MemoryAccessor, address: usize, buf: []u8) bool { + switch (native_os) { + .linux => while (true) switch (ma.mem.handle) { + -2 => break, + -1 => { + const linux = std.os.linux; + const pid = switch (@atomicLoad(posix.pid_t, &cached_pid, .monotonic)) { + -1 => pid: { + const pid = linux.getpid(); + @atomicStore(posix.pid_t, &cached_pid, pid, .monotonic); + break :pid pid; + }, + else => |pid| pid, + }; + const bytes_read = linux.process_vm_readv( + pid, + &.{.{ .base = buf.ptr, .len = buf.len }}, + &.{.{ .base = @ptrFromInt(address), .len = buf.len }}, + 0, + ); + switch (linux.E.init(bytes_read)) { + .SUCCESS => return bytes_read == buf.len, + .FAULT => return false, + .INVAL, .PERM, .SRCH => unreachable, // own pid is always valid + .NOMEM, .NOSYS => {}, + else => unreachable, // unexpected + } + var path_buf: [ + std.fmt.count("/proc/{d}/mem", .{math.minInt(posix.pid_t)}) + ]u8 = undefined; + const path = std.fmt.bufPrint(&path_buf, "/proc/{d}/mem", .{pid}) catch + unreachable; + ma.mem = std.fs.openFileAbsolute(path, .{}) catch { + ma.mem.handle = -2; + break; + }; + }, + else => return (ma.mem.pread(buf, address) catch return false) == buf.len, + }, + else => {}, + } + if (!isValidMemory(address)) return false; + @memcpy(buf, @as([*]const u8, @ptrFromInt(address))); + return true; + } + pub fn load(ma: *MemoryAccessor, comptime Type: type, address: usize) ?Type { + var result: Type = undefined; + return if (ma.read(address, std.mem.asBytes(&result))) result else null; + } + }; + + fn next_unwind(it: *StackIterator) !usize { + const unwind_state = &it.unwind_state.?; const module = try unwind_state.debug_info.getModuleForAddress(unwind_state.dwarf_context.pc); switch (native_os) { .macos, .ios, .watchos, .tvos, .visionos => { @@ -741,13 +808,13 @@ pub const StackIterator = struct { } else return error.MissingDebugInfo; } - fn next_internal(self: *StackIterator) ?usize { + fn next_internal(it: *StackIterator) ?usize { if (have_ucontext) { - if (self.unwind_state) |*unwind_state| { + if (it.unwind_state) |*unwind_state| { if (!unwind_state.failed) { if (unwind_state.dwarf_context.pc == 0) return null; - defer self.fp = unwind_state.dwarf_context.getFp() catch 0; - if (self.next_unwind()) |return_address| { + defer it.fp = unwind_state.dwarf_context.getFp() catch 0; + if (it.next_unwind()) |return_address| { return return_address; } else |err| { unwind_state.last_error = err; @@ -763,29 +830,24 @@ pub const StackIterator = struct { const fp = if (comptime native_arch.isSPARC()) // On SPARC the offset is positive. (!) - math.add(usize, self.fp, fp_offset) catch return null + math.add(usize, it.fp, fp_offset) catch return null else - math.sub(usize, self.fp, fp_offset) catch return null; + math.sub(usize, it.fp, fp_offset) catch return null; // Sanity check. - if (fp == 0 or !mem.isAligned(fp, @alignOf(usize)) or !isValidMemory(fp)) + if (fp == 0 or !mem.isAligned(fp, @alignOf(usize))) return null; + const new_fp = math.add(usize, it.ma.load(usize, fp) orelse return null, fp_bias) catch return null; - const new_fp = math.add(usize, @as(*const usize, @ptrFromInt(fp)).*, fp_bias) catch return null; - // Sanity check: the stack grows down thus all the parent frames must be // be at addresses that are greater (or equal) than the previous one. // A zero frame pointer often signals this is the last frame, that case // is gracefully handled by the next call to next_internal. - if (new_fp != 0 and new_fp < self.fp) + if (new_fp != 0 and new_fp < it.fp) return null; + const new_pc = it.ma.load(usize, math.add(usize, fp, pc_offset) catch return null) orelse return null; - const new_pc = @as( - *const usize, - @ptrFromInt(math.add(usize, fp, pc_offset) catch return null), - ).*; - - self.fp = new_fp; + it.fp = new_fp; return new_pc; } diff --git a/lib/std/os/linux.zig b/lib/std/os/linux.zig index 3deac7a2f6ef..53343f92e0d0 100644 --- a/lib/std/os/linux.zig +++ b/lib/std/os/linux.zig @@ -1519,15 +1519,15 @@ pub fn setgroups(size: usize, list: [*]const gid_t) usize { } pub fn setsid() pid_t { - return @as(pid_t, @bitCast(@as(u32, @truncate(syscall0(.setsid))))); + return @bitCast(@as(u32, @truncate(syscall0(.setsid)))); } pub fn getpid() pid_t { - return @as(pid_t, @bitCast(@as(u32, @truncate(syscall0(.getpid))))); + return @bitCast(@as(u32, @truncate(syscall0(.getpid)))); } pub fn gettid() pid_t { - return @as(pid_t, @bitCast(@as(u32, @truncate(syscall0(.gettid))))); + return @bitCast(@as(u32, @truncate(syscall0(.gettid)))); } pub fn sigprocmask(flags: u32, noalias set: ?*const sigset_t, noalias oldset: ?*sigset_t) usize { @@ -2116,7 +2116,7 @@ pub fn pidfd_send_signal(pidfd: fd_t, sig: i32, info: ?*siginfo_t, flags: u32) u ); } -pub fn process_vm_readv(pid: pid_t, local: []iovec, remote: []const iovec_const, flags: usize) usize { +pub fn process_vm_readv(pid: pid_t, local: []const iovec, remote: []const iovec_const, flags: usize) usize { return syscall6( .process_vm_readv, @as(usize, @bitCast(@as(isize, pid))), From 2511830442fd96d04f578f2c4251b1994f08c994 Mon Sep 17 00:00:00 2001 From: Ian Johnson Date: Sun, 7 Jul 2024 22:12:37 -0400 Subject: [PATCH 079/152] Autodoc: only group structs under "namespaces" The old heuristic of checking only for the number of fields has the downside of classifying all opaque types, such as `std.c.FILE`, as "namespaces" rather than "types". --- lib/docs/main.js | 30 +++++++++++----------- lib/docs/wasm/Decl.zig | 4 +-- lib/docs/wasm/Walk.zig | 57 ++++++++++++++++++++++++++---------------- lib/docs/wasm/main.zig | 9 +------ 4 files changed, 53 insertions(+), 47 deletions(-) diff --git a/lib/docs/main.js b/lib/docs/main.js index d9888eac6898..49c277b9f512 100644 --- a/lib/docs/main.js +++ b/lib/docs/main.js @@ -1,14 +1,15 @@ (function() { const CAT_namespace = 0; - const CAT_global_variable = 1; - const CAT_function = 2; - const CAT_primitive = 3; - const CAT_error_set = 4; - const CAT_global_const = 5; - const CAT_alias = 6; - const CAT_type = 7; - const CAT_type_type = 8; - const CAT_type_function = 9; + const CAT_container = 1; + const CAT_global_variable = 2; + const CAT_function = 3; + const CAT_primitive = 4; + const CAT_error_set = 5; + const CAT_global_const = 6; + const CAT_alias = 7; + const CAT_type = 8; + const CAT_type_type = 9; + const CAT_type_function = 10; const domDocTestsCode = document.getElementById("docTestsCode"); const domFnErrorsAnyError = document.getElementById("fnErrorsAnyError"); @@ -184,6 +185,7 @@ const category = wasm_exports.categorize_decl(decl_index, 0); switch (category) { case CAT_namespace: + case CAT_container: return renderNamespacePage(decl_index); case CAT_global_variable: case CAT_primitive: @@ -426,16 +428,12 @@ while (true) { const member_category = wasm_exports.categorize_decl(member, 0); switch (member_category) { - case CAT_namespace: - if (wasm_exports.decl_field_count(member) > 0) { - typesList.push({original: original, member: member}); - } else { - namespacesList.push({original: original, member: member}); - } - continue member_loop; case CAT_namespace: namespacesList.push({original: original, member: member}); continue member_loop; + case CAT_container: + typesList.push({original: original, member: member}); + continue member_loop; case CAT_global_variable: varsList.push(member); continue member_loop; diff --git a/lib/docs/wasm/Decl.zig b/lib/docs/wasm/Decl.zig index 1df7790582f7..0260ce02850d 100644 --- a/lib/docs/wasm/Decl.zig +++ b/lib/docs/wasm/Decl.zig @@ -115,7 +115,7 @@ pub fn categorize(decl: *const Decl) Walk.Category { pub fn get_child(decl: *const Decl, name: []const u8) ?Decl.Index { switch (decl.categorize()) { .alias => |aliasee| return aliasee.get().get_child(name), - .namespace => |node| { + .namespace, .container => |node| { const file = decl.file.get(); const scope = file.scopes.get(node) orelse return null; const child_node = scope.get_child(name) orelse return null; @@ -128,7 +128,7 @@ pub fn get_child(decl: *const Decl, name: []const u8) ?Decl.Index { /// Looks up a decl by name accessible in `decl`'s namespace. pub fn lookup(decl: *const Decl, name: []const u8) ?Decl.Index { const namespace_node = switch (decl.categorize()) { - .namespace => |node| node, + .namespace, .container => |node| node, else => decl.parent.get().ast_node, }; const file = decl.file.get(); diff --git a/lib/docs/wasm/Walk.zig b/lib/docs/wasm/Walk.zig index 3f7ccc4bac3b..a22da861a8ac 100644 --- a/lib/docs/wasm/Walk.zig +++ b/lib/docs/wasm/Walk.zig @@ -7,7 +7,10 @@ file: File.Index, /// keep in sync with "CAT_" constants in main.js pub const Category = union(enum(u8)) { + /// A struct type used only to group declarations. namespace: Ast.Node.Index, + /// A container type (struct, union, enum, opaque). + container: Ast.Node.Index, global_variable: Ast.Node.Index, /// A function that has not been detected as returning a type. function: Ast.Node.Index, @@ -45,13 +48,6 @@ pub const File = struct { return file.node_decls.get(decl_node) orelse return .none; } - pub fn field_count(file: *const File, node: Ast.Node.Index) u32 { - const scope = file.scopes.get(node) orelse return 0; - if (scope.tag != .namespace) return 0; - const namespace: *Scope.Namespace = @alignCast(@fieldParentPtr("base", scope)); - return namespace.field_count; - } - pub const Index = enum(u32) { _, @@ -87,7 +83,18 @@ pub const File = struct { const node_tags = ast.nodes.items(.tag); const token_tags = ast.tokens.items(.tag); switch (node_tags[node]) { - .root => return .{ .namespace = node }, + .root => { + for (ast.rootDecls()) |member| { + switch (node_tags[member]) { + .container_field_init, + .container_field_align, + .container_field, + => return .{ .container = node }, + else => {}, + } + } + return .{ .namespace = node }; + }, .global_var_decl, .local_var_decl, @@ -122,7 +129,7 @@ pub const File = struct { full: Ast.full.FnProto, ) Category { return switch (categorize_expr(file_index, full.ast.return_type)) { - .namespace, .error_set, .type_type => .{ .type_function = node }, + .namespace, .container, .error_set, .type_type => .{ .type_function = node }, else => .{ .function = node }, }; } @@ -140,6 +147,7 @@ pub const File = struct { const node_tags = ast.nodes.items(.tag); const node_datas = ast.nodes.items(.data); const main_tokens = ast.nodes.items(.main_token); + const token_tags = ast.tokens.items(.tag); //log.debug("categorize_expr tag {s}", .{@tagName(node_tags[node])}); return switch (node_tags[node]) { .container_decl, @@ -154,7 +162,23 @@ pub const File = struct { .tagged_union_enum_tag_trailing, .tagged_union_two, .tagged_union_two_trailing, - => .{ .namespace = node }, + => { + var buf: [2]Ast.Node.Index = undefined; + const container_decl = ast.fullContainerDecl(&buf, node).?; + if (token_tags[container_decl.ast.main_token] != .keyword_struct) { + return .{ .container = node }; + } + for (container_decl.ast.members) |member| { + switch (node_tags[member]) { + .container_field_init, + .container_field_align, + .container_field, + => return .{ .container = node }, + else => {}, + } + } + return .{ .namespace = node }; + }, .error_set_decl, .merge_error_sets, @@ -240,6 +264,7 @@ pub const File = struct { return .{ .error_set = node }; } else if (then_cat == .type or else_cat == .type or then_cat == .namespace or else_cat == .namespace or + then_cat == .container or else_cat == .container or then_cat == .error_set or else_cat == .error_set or then_cat == .type_function or else_cat == .type_function) { @@ -346,7 +371,7 @@ pub const File = struct { any_type = true; all_type_type = false; }, - .type, .namespace, .type_function => { + .type, .namespace, .container, .type_function => { any_type = true; all_error_set = false; all_type_type = false; @@ -431,7 +456,6 @@ pub const Scope = struct { names: std.StringArrayHashMapUnmanaged(Ast.Node.Index) = .{}, doctests: std.StringArrayHashMapUnmanaged(Ast.Node.Index) = .{}, decl_index: Decl.Index, - field_count: u32, }; fn getNamespaceDecl(start_scope: *Scope) Decl.Index { @@ -500,7 +524,6 @@ fn struct_decl( namespace.* = .{ .parent = scope, .decl_index = parent_decl, - .field_count = 0, }; try w.file.get().scopes.putNoClobber(gpa, node, &namespace.base); try w.scanDecls(namespace, container_decl.ast.members); @@ -1061,14 +1084,6 @@ fn scanDecls(w: *Walk, namespace: *Scope.Namespace, members: []const Ast.Node.In continue; }, - .container_field_init, - .container_field_align, - .container_field, - => { - namespace.field_count += 1; - continue; - }, - else => continue, }; diff --git a/lib/docs/wasm/main.zig b/lib/docs/wasm/main.zig index 7108b1bd8427..f5ce02d7d669 100644 --- a/lib/docs/wasm/main.zig +++ b/lib/docs/wasm/main.zig @@ -274,13 +274,6 @@ export fn fn_error_set_decl(decl_index: Decl.Index, node: Ast.Node.Index) Decl.I }; } -export fn decl_field_count(decl_index: Decl.Index) u32 { - switch (decl_index.get().categorize()) { - .namespace => |node| return decl_index.get().file.get().field_count(node), - else => return 0, - } -} - fn decl_error_set_fallible(decl_index: Decl.Index) Oom![]ErrorIdentifier { error_set_result.clearRetainingCapacity(); try addErrorsFromDecl(decl_index, &error_set_result); @@ -583,7 +576,7 @@ export fn decl_category_name(decl_index: Decl.Index) String { const ast = decl.file.get_ast(); const token_tags = ast.tokens.items(.tag); const name = switch (decl.categorize()) { - .namespace => |node| { + .namespace, .container => |node| { const node_tags = ast.nodes.items(.tag); if (node_tags[decl.ast_node] == .root) return String.init("struct"); From c1e7eb738934399737b3a8452ad9b68bb26805d3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Igor=20Anic=CC=81?= Date: Mon, 8 Jul 2024 23:37:44 +0200 Subject: [PATCH 080/152] crypto.Certificate: case insensitive host name check This makes comparing host name with dns name from certificate case insensitive. I found a few domains (from the [cloudflare](https://radar.cloudflare.com/domains) list of top domains) for which tls.Client fails to connect. Error is: ```zig error: TlsInitializationFailed Code/zig/lib/std/crypto/Certificate.zig:336:9: 0x1177b1f in verifyHostName (http_get_std) return error.CertificateHostMismatch; Code/zig/lib/std/crypto/tls23/handshake_client.zig:461:25: 0x11752bd in parseServerCertificate (http_get_std) try subject.verifyHostName(opt.host); ``` In its certificate this domains have host names which are not strictly lower case. This is what checkHostName is comparing: |host_name | dns_name | |------------------------------------------------| |ey.com | EY.COM | |truist.com | Truist.com | |wscampanhas.bradesco | WSCAMPANHAS.BRADESCO | |dell.com | Dell.com | From [RFC2818](https://datatracker.ietf.org/doc/html/rfc2818#section-2.4): > Matching is performed using the matching rules specified by [RFC2459]. From [RFC2459](https://datatracker.ietf.org/doc/html/rfc2459#section-4.2.1.7): > When comparing URIs, conforming implementations > MUST compare the scheme and host without regard to case, but assume > the remainder of the scheme-specific-part is case sensitive. Testing with: ``` const std = @import("std"); pub fn main() !void { var gpa = std.heap.GeneralPurposeAllocator(.{}){}; const allocator = gpa.allocator(); const args = try std.process.argsAlloc(allocator); defer std.process.argsFree(allocator, args); if (args.len > 1) { const domain = args[1]; var client: std.http.Client = .{ .allocator = allocator }; defer client.deinit(); // Add https:// prefix if needed const url = brk: { const scheme = "https://"; if (domain.len >= scheme.len and std.mem.eql(u8, domain[0..scheme.len], scheme)) break :brk domain; var url_buf: [128]u8 = undefined; break :brk try std.fmt.bufPrint(&url_buf, "https://{s}", .{domain}); }; const uri = try std.Uri.parse(url); var server_header_buffer: [16 * 1024]u8 = undefined; var req = try client.open(.GET, uri, .{ .server_header_buffer = &server_header_buffer }); defer req.deinit(); try req.send(); try req.wait(); } } ``` `$ zig run example/main.zig -- truist.com ` --- lib/std/crypto/Certificate.zig | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/lib/std/crypto/Certificate.zig b/lib/std/crypto/Certificate.zig index 1e3bb9ca0b4e..9ddd587a190a 100644 --- a/lib/std/crypto/Certificate.zig +++ b/lib/std/crypto/Certificate.zig @@ -345,7 +345,7 @@ pub const Parsed = struct { // component or component fragment. E.g., *.a.com matches foo.a.com but // not bar.foo.a.com. f*.com matches foo.com but not bar.com. fn checkHostName(host_name: []const u8, dns_name: []const u8) bool { - if (mem.eql(u8, dns_name, host_name)) { + if (std.ascii.eqlIgnoreCase(dns_name, host_name)) { return true; // exact match } @@ -362,7 +362,7 @@ pub const Parsed = struct { // If not a wildcard and they dont // match then there is no match. - if (mem.eql(u8, dns.?, "*") == false and mem.eql(u8, dns.?, host.?) == false) { + if (mem.eql(u8, dns.?, "*") == false and std.ascii.eqlIgnoreCase(dns.?, host.?) == false) { return false; } }; @@ -381,6 +381,9 @@ test "Parsed.checkHostName" { try expectEqual(false, Parsed.checkHostName("foo.bar.ziglang.org", "*.ziglang.org")); try expectEqual(false, Parsed.checkHostName("ziglang.org", "zig*.org")); try expectEqual(false, Parsed.checkHostName("lang.org", "zig*.org")); + // host name check should be case insensitive + try expectEqual(true, Parsed.checkHostName("ziglang.org", "Ziglang.org")); + try expectEqual(true, Parsed.checkHostName("bar.ziglang.org", "*.Ziglang.ORG")); } pub const ParseError = der.Element.ParseElementError || ParseVersionError || ParseTimeError || ParseEnumError || ParseBitStringError; From 0cc42d090fefcdd10ab64f6ae484404a6c6a710e Mon Sep 17 00:00:00 2001 From: Krzysztof Wolicki Date: Tue, 9 Jul 2024 22:36:38 +0200 Subject: [PATCH 081/152] std.fs.Dir: Rename OpenDirOptions to OpenOptions (#20542) * std.fs.Dir: Rename OpenDirOptions to OpenOptions https://ziglang.org/documentation/master/#Avoid-Redundant-Names-in-Fully-Qualified-Namespaces * std.fs.Dir: Add deprecated alias `OpenDirOptions` --- lib/std/Build/Cache/Path.zig | 2 +- lib/std/fs.zig | 6 +++--- lib/std/fs/Dir.zig | 19 +++++++++++-------- lib/std/posix.zig | 2 +- lib/std/testing.zig | 2 +- 5 files changed, 17 insertions(+), 14 deletions(-) diff --git a/lib/std/Build/Cache/Path.zig b/lib/std/Build/Cache/Path.zig index d7266da9b0cc..48bb8c32beab 100644 --- a/lib/std/Build/Cache/Path.zig +++ b/lib/std/Build/Cache/Path.zig @@ -58,7 +58,7 @@ pub fn openFile( return p.root_dir.handle.openFile(joined_path, flags); } -pub fn makeOpenPath(p: Path, sub_path: []const u8, opts: fs.Dir.OpenDirOptions) !fs.Dir { +pub fn makeOpenPath(p: Path, sub_path: []const u8, opts: fs.Dir.OpenOptions) !fs.Dir { var buf: [fs.max_path_bytes]u8 = undefined; const joined_path = if (p.sub_path.len == 0) sub_path else p: { break :p std.fmt.bufPrint(&buf, "{s}" ++ fs.path.sep_str ++ "{s}", .{ diff --git a/lib/std/fs.zig b/lib/std/fs.zig index e56d68e40728..8dc79f9ef4df 100644 --- a/lib/std/fs.zig +++ b/lib/std/fs.zig @@ -278,18 +278,18 @@ pub fn defaultWasiCwd() std.os.wasi.fd_t { /// On Windows, `absolute_path` should be encoded as [WTF-8](https://simonsapin.github.io/wtf-8/). /// On WASI, `absolute_path` should be encoded as valid UTF-8. /// On other platforms, `absolute_path` is an opaque sequence of bytes with no particular encoding. -pub fn openDirAbsolute(absolute_path: []const u8, flags: Dir.OpenDirOptions) File.OpenError!Dir { +pub fn openDirAbsolute(absolute_path: []const u8, flags: Dir.OpenOptions) File.OpenError!Dir { assert(path.isAbsolute(absolute_path)); return cwd().openDir(absolute_path, flags); } /// Same as `openDirAbsolute` but the path parameter is null-terminated. -pub fn openDirAbsoluteZ(absolute_path_c: [*:0]const u8, flags: Dir.OpenDirOptions) File.OpenError!Dir { +pub fn openDirAbsoluteZ(absolute_path_c: [*:0]const u8, flags: Dir.OpenOptions) File.OpenError!Dir { assert(path.isAbsoluteZ(absolute_path_c)); return cwd().openDirZ(absolute_path_c, flags); } /// Same as `openDirAbsolute` but the path parameter is null-terminated. -pub fn openDirAbsoluteW(absolute_path_c: [*:0]const u16, flags: Dir.OpenDirOptions) File.OpenError!Dir { +pub fn openDirAbsoluteW(absolute_path_c: [*:0]const u16, flags: Dir.OpenOptions) File.OpenError!Dir { assert(path.isAbsoluteWindowsW(absolute_path_c)); return cwd().openDirW(absolute_path_c, flags); } diff --git a/lib/std/fs/Dir.zig b/lib/std/fs/Dir.zig index 597c158d631b..c7a436bc73af 100644 --- a/lib/std/fs/Dir.zig +++ b/lib/std/fs/Dir.zig @@ -740,7 +740,7 @@ pub const Walker = struct { /// Recursively iterates over a directory. /// -/// `self` must have been opened with `OpenDirOptions{.iterate = true}`. +/// `self` must have been opened with `OpenOptions{.iterate = true}`. /// /// `Walker.deinit` releases allocated memory and directory handles. /// @@ -1233,7 +1233,7 @@ fn makeOpenPathAccessMaskW(self: Dir, sub_path: []const u8, access_mask: u32, no /// On Windows, `sub_path` should be encoded as [WTF-8](https://simonsapin.github.io/wtf-8/). /// On WASI, `sub_path` should be encoded as valid UTF-8. /// On other platforms, `sub_path` is an opaque sequence of bytes with no particular encoding. -pub fn makeOpenPath(self: Dir, sub_path: []const u8, open_dir_options: OpenDirOptions) (MakeError || OpenError || StatFileError)!Dir { +pub fn makeOpenPath(self: Dir, sub_path: []const u8, open_dir_options: OpenOptions) (MakeError || OpenError || StatFileError)!Dir { return switch (native_os) { .windows => { const w = windows; @@ -1393,7 +1393,10 @@ pub fn setAsCwd(self: Dir) !void { try posix.fchdir(self.fd); } -pub const OpenDirOptions = struct { +/// Deprecated: use `OpenOptions` +pub const OpenDirOptions = OpenOptions; + +pub const OpenOptions = struct { /// `true` means the opened directory can be used as the `Dir` parameter /// for functions which operate based on an open directory handle. When `false`, /// such operations are Illegal Behavior. @@ -1415,7 +1418,7 @@ pub const OpenDirOptions = struct { /// On WASI, `sub_path` should be encoded as valid UTF-8. /// On other platforms, `sub_path` is an opaque sequence of bytes with no particular encoding. /// Asserts that the path parameter has no null bytes. -pub fn openDir(self: Dir, sub_path: []const u8, args: OpenDirOptions) OpenError!Dir { +pub fn openDir(self: Dir, sub_path: []const u8, args: OpenOptions) OpenError!Dir { switch (native_os) { .windows => { const sub_path_w = try windows.sliceToPrefixedFileW(self.fd, sub_path); @@ -1473,7 +1476,7 @@ pub fn openDir(self: Dir, sub_path: []const u8, args: OpenDirOptions) OpenError! } /// Same as `openDir` except the parameter is null-terminated. -pub fn openDirZ(self: Dir, sub_path_c: [*:0]const u8, args: OpenDirOptions) OpenError!Dir { +pub fn openDirZ(self: Dir, sub_path_c: [*:0]const u8, args: OpenOptions) OpenError!Dir { switch (native_os) { .windows => { const sub_path_w = try windows.cStrToPrefixedFileW(self.fd, sub_path_c); @@ -1530,7 +1533,7 @@ pub fn openDirZ(self: Dir, sub_path_c: [*:0]const u8, args: OpenDirOptions) Open /// Same as `openDir` except the path parameter is WTF-16 LE encoded, NT-prefixed. /// This function asserts the target OS is Windows. -pub fn openDirW(self: Dir, sub_path_w: [*:0]const u16, args: OpenDirOptions) OpenError!Dir { +pub fn openDirW(self: Dir, sub_path_w: [*:0]const u16, args: OpenOptions) OpenError!Dir { const w = windows; // TODO remove some of these flags if args.access_sub_paths is false const base_flags = w.STANDARD_RIGHTS_READ | w.FILE_READ_ATTRIBUTES | w.FILE_READ_EA | @@ -2650,7 +2653,7 @@ pub const ChmodError = File.ChmodError; /// The process must have the correct privileges in order to do this /// successfully, or must have the effective user ID matching the owner /// of the directory. Additionally, the directory must have been opened -/// with `OpenDirOptions{ .iterate = true }`. +/// with `OpenOptions{ .iterate = true }`. pub fn chmod(self: Dir, new_mode: File.Mode) ChmodError!void { const file: File = .{ .handle = self.fd }; try file.chmod(new_mode); @@ -2660,7 +2663,7 @@ pub fn chmod(self: Dir, new_mode: File.Mode) ChmodError!void { /// The process must have the correct privileges in order to do this /// successfully. The group may be changed by the owner of the directory to /// any group of which the owner is a member. Additionally, the directory -/// must have been opened with `OpenDirOptions{ .iterate = true }`. If the +/// must have been opened with `OpenOptions{ .iterate = true }`. If the /// owner or group is specified as `null`, the ID is not changed. pub fn chown(self: Dir, owner: ?File.Uid, group: ?File.Gid) ChownError!void { const file: File = .{ .handle = self.fd }; diff --git a/lib/std/posix.zig b/lib/std/posix.zig index 57eae4bbd66f..e2af96c48749 100644 --- a/lib/std/posix.zig +++ b/lib/std/posix.zig @@ -493,7 +493,7 @@ pub fn fchown(fd: fd_t, owner: ?uid_t, group: ?gid_t) FChownError!void { switch (errno(res)) { .SUCCESS => return, .INTR => continue, - .BADF => unreachable, // Can be reached if the fd refers to a directory opened without `OpenDirOptions{ .iterate = true }` + .BADF => unreachable, // Can be reached if the fd refers to a directory opened without `Dir.OpenOptions{ .iterate = true }` .FAULT => unreachable, .INVAL => unreachable, diff --git a/lib/std/testing.zig b/lib/std/testing.zig index 341161a64ba1..190182db70fb 100644 --- a/lib/std/testing.zig +++ b/lib/std/testing.zig @@ -558,7 +558,7 @@ pub const TmpDir = struct { } }; -pub fn tmpDir(opts: std.fs.Dir.OpenDirOptions) TmpDir { +pub fn tmpDir(opts: std.fs.Dir.OpenOptions) TmpDir { var random_bytes: [TmpDir.random_bytes_count]u8 = undefined; std.crypto.random.bytes(&random_bytes); var sub_path: [TmpDir.sub_path_len]u8 = undefined; From 1824bee579fffad3f17b639ebb1a94fd890ad68d Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Mon, 8 Jul 2024 17:58:41 -0400 Subject: [PATCH 082/152] Progress: suppress tsan races This removes the undefined behavior, but not the actual races. Closes #20477 --- lib/std/Progress.zig | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/lib/std/Progress.zig b/lib/std/Progress.zig index 2028e95dd550..dcbb12e69bc2 100644 --- a/lib/std/Progress.zig +++ b/lib/std/Progress.zig @@ -285,13 +285,11 @@ pub const Node = struct { assert(parent == .none or @intFromEnum(parent) < node_storage_buffer_len); const storage = storageByIndex(free_index); - storage.* = .{ - .completed_count = 0, - .estimated_total_count = std.math.lossyCast(u32, estimated_total_items), - .name = [1]u8{0} ** max_name_len, - }; + @atomicStore(u32, &storage.completed_count, 0, .monotonic); + @atomicStore(u32, &storage.estimated_total_count, std.math.lossyCast(u32, estimated_total_items), .monotonic); const name_len = @min(max_name_len, name.len); - @memcpy(storage.name[0..name_len], name[0..name_len]); + for (storage.name[0..name_len], name[0..name_len]) |*dest, src| @atomicStore(u8, dest, src, .monotonic); + for (storage.name[name_len..]) |*dest| @atomicStore(u8, dest, 0, .monotonic); const parent_ptr = parentByIndex(free_index); assert(parent_ptr.* == .unused); @@ -765,7 +763,7 @@ fn serialize(serialized_buffer: *Serialized.Buffer) Serialized { var begin_parent = @atomicLoad(Node.Parent, parent_ptr, .acquire); while (begin_parent != .unused) { const dest_storage = &serialized_buffer.storage[serialized_len]; - @memcpy(&dest_storage.name, &storage_ptr.name); + for (&dest_storage.name, &storage_ptr.name) |*dest, *src| dest.* = @atomicLoad(u8, src, .monotonic); dest_storage.estimated_total_count = @atomicLoad(u32, &storage_ptr.estimated_total_count, .acquire); dest_storage.completed_count = @atomicLoad(u32, &storage_ptr.completed_count, .monotonic); const end_parent = @atomicLoad(Node.Parent, parent_ptr, .acquire); From 49f2cca872c24d617082e9b21cbd8ac50e96bc50 Mon Sep 17 00:00:00 2001 From: Tw Date: Mon, 8 Jul 2024 15:19:32 +0800 Subject: [PATCH 083/152] bpf: sync map/prog/attach type with latest linux kernel Note that the original `cgroup_storage` MapType has been deprecated, so renamed to `cgroup_storage_deprecated`. Signed-off-by: Tw --- lib/std/os/linux/bpf.zig | 30 +++++++++++++++++++++++++++++- 1 file changed, 29 insertions(+), 1 deletion(-) diff --git a/lib/std/os/linux/bpf.zig b/lib/std/os/linux/bpf.zig index 61c704fce947..d20ebdd6c8b7 100644 --- a/lib/std/os/linux/bpf.zig +++ b/lib/std/os/linux/bpf.zig @@ -1001,7 +1001,7 @@ pub const MapType = enum(u32) { cpumap, xskmap, sockhash, - cgroup_storage, + cgroup_storage_deprecated, reuseport_sockarray, percpu_cgroup_storage, queue, @@ -1044,6 +1044,12 @@ pub const MapType = enum(u32) { /// value size: 0 /// max entries: size of ringbuf, must be power of 2 ringbuf, + inode_storage, + task_storage, + bloom_filter, + user_ringbuf, + cgroup_storage, + arena, _, }; @@ -1144,6 +1150,9 @@ pub const ProgType = enum(u32) { /// context type: void * syscall, + /// context type: bpf_nf_ctx + netfilter, + _, }; @@ -1186,6 +1195,25 @@ pub const AttachType = enum(u32) { xdp_cpumap, sk_lookup, xdp, + sk_skb_verdict, + sk_reuseport_select, + sk_reuseport_select_or_migrate, + perf_event, + trace_kprobe_multi, + lsm_cgroup, + struct_ops, + netfilter, + tcx_ingress, + tcx_egress, + trace_uprobe_multi, + cgroup_unix_connect, + cgroup_unix_sendmsg, + cgroup_unix_recvmsg, + cgroup_unix_getpeername, + cgroup_unix_getsockname, + netkit_primary, + netkit_peer, + trace_kprobe_session, _, }; From 13070448f5f1dba172946e6a1e1a5c885093cad8 Mon Sep 17 00:00:00 2001 From: Jora Troosh Date: Wed, 10 Jul 2024 00:25:42 +0300 Subject: [PATCH 084/152] std: fix typos (#20560) --- doc/langref.html.in | 4 ++-- doc/langref/test_coerce_tuples_arrays.zig | 2 +- lib/std/BitStack.zig | 4 ++-- lib/std/Build.zig | 2 +- lib/std/Target/aarch64.zig | 2 +- lib/std/Target/arm.zig | 2 +- lib/std/Target/csky.zig | 14 +++++++------- lib/std/Target/riscv.zig | 4 ++-- lib/std/array_list.zig | 8 ++++---- lib/std/c/darwin.zig | 2 +- lib/std/coff.zig | 2 +- lib/std/compress/flate.zig | 4 ++-- lib/std/compress/flate/CircularBuffer.zig | 2 +- lib/std/compress/flate/Lookup.zig | 2 +- lib/std/compress/flate/SlidingWindow.zig | 2 +- lib/std/compress/flate/Token.zig | 2 +- lib/std/compress/flate/bit_reader.zig | 8 ++++---- lib/std/compress/flate/block_writer.zig | 2 +- lib/std/compress/flate/consts.zig | 2 +- lib/std/compress/flate/deflate.zig | 10 +++++----- lib/std/compress/flate/huffman_decoder.zig | 6 +++--- lib/std/compress/flate/huffman_encoder.zig | 2 +- lib/std/compress/flate/inflate.zig | 6 +++--- lib/std/crypto/Certificate.zig | 2 +- lib/std/crypto/ff.zig | 2 +- lib/std/crypto/ml_kem.zig | 4 ++-- lib/std/crypto/salsa20.zig | 2 +- lib/std/crypto/sha2.zig | 2 +- lib/std/crypto/tls.zig | 4 ++-- lib/std/crypto/tls/Client.zig | 2 +- lib/std/dwarf/abi.zig | 2 +- lib/std/dwarf/expressions.zig | 6 +++--- lib/std/hash/crc/impl.zig | 2 +- lib/std/hash/wyhash.zig | 2 +- lib/std/meta.zig | 2 +- lib/std/os/linux.zig | 8 ++++---- lib/std/os/linux/IoUring.zig | 4 ++-- lib/std/os/linux/bpf.zig | 2 +- lib/std/os/linux/mips.zig | 2 +- lib/std/os/linux/mips64.zig | 2 +- lib/std/os/linux/sparc64.zig | 2 +- lib/std/os/plan9.zig | 2 +- lib/std/posix.zig | 2 +- lib/std/priority_queue.zig | 2 +- lib/std/process.zig | 4 ++-- lib/std/sort/pdq.zig | 2 +- lib/std/tar.zig | 4 ++-- lib/std/tar/test.zig | 2 +- lib/std/valgrind/callgrind.zig | 2 +- lib/std/zig/AstGen.zig | 10 +++++----- lib/std/zig/AstRlAnnotate.zig | 4 ++-- lib/std/zig/WindowsSdk.zig | 6 +++--- lib/std/zig/parser_test.zig | 2 +- lib/std/zig/system.zig | 2 +- 54 files changed, 95 insertions(+), 95 deletions(-) diff --git a/doc/langref.html.in b/doc/langref.html.in index d20c9028687d..aa606abd69fa 100644 --- a/doc/langref.html.in +++ b/doc/langref.html.in @@ -3498,7 +3498,7 @@ void do_a_thing(struct Foo *foo) {

As a motivating example, consider the statement {#syntax#}const x: u32 = 42;{#endsyntax#}. The type annotation here provides a result type of {#syntax#}u32{#endsyntax#} to the initialization expression - {#syntax#}42{#endsyntax#}, instructing the compiler to coerce this integer (initally of type + {#syntax#}42{#endsyntax#}, instructing the compiler to coerce this integer (initially of type {#syntax#}comptime_int{#endsyntax#}) to this type. We will see more examples shortly.

@@ -6833,7 +6833,7 @@ coding style.

  • utils, misc, or somebody's initials
  • Everything is a value, all types are data, everything is context, all logic manages state. - Nothing is communicated by using a word that applies to all types.

    + Nothing is communicated by using a word that applies to all types.

    Temptation to use "utilities", "miscellaneous", or somebody's initials is a failure to categorize, or more commonly, overcategorization. Such declarations can live at the root of a module that needs them with no diff --git a/doc/langref/test_coerce_tuples_arrays.zig b/doc/langref/test_coerce_tuples_arrays.zig index 42ae40a866a9..6d0e63e15c1c 100644 --- a/doc/langref/test_coerce_tuples_arrays.zig +++ b/doc/langref/test_coerce_tuples_arrays.zig @@ -2,7 +2,7 @@ const std = @import("std"); const expect = std.testing.expect; const Tuple = struct { u8, u8 }; -test "coercion from homogenous tuple to array" { +test "coercion from homogeneous tuple to array" { const tuple: Tuple = .{ 5, 6 }; const array: [2]u8 = tuple; _ = array; diff --git a/lib/std/BitStack.zig b/lib/std/BitStack.zig index 592b541d49bb..685c39fea8f9 100644 --- a/lib/std/BitStack.zig +++ b/lib/std/BitStack.zig @@ -20,8 +20,8 @@ pub fn deinit(self: *@This()) void { self.* = undefined; } -pub fn ensureTotalCapacity(self: *@This(), bit_capcity: usize) Allocator.Error!void { - const byte_capacity = (bit_capcity + 7) >> 3; +pub fn ensureTotalCapacity(self: *@This(), bit_capacity: usize) Allocator.Error!void { + const byte_capacity = (bit_capacity + 7) >> 3; try self.bytes.ensureTotalCapacity(byte_capacity); } diff --git a/lib/std/Build.zig b/lib/std/Build.zig index 942a8af406f2..0255245a3c63 100644 --- a/lib/std/Build.zig +++ b/lib/std/Build.zig @@ -2055,7 +2055,7 @@ pub fn dependencyFromBuildZig( } const full_path = b.pathFromRoot("build.zig.zon"); - debug.panic("'{}' is not a build.zig struct of a dependecy in '{s}'", .{ build_zig, full_path }); + debug.panic("'{}' is not a build.zig struct of a dependency in '{s}'", .{ build_zig, full_path }); } fn userValuesAreSame(lhs: UserValue, rhs: UserValue) bool { diff --git a/lib/std/Target/aarch64.zig b/lib/std/Target/aarch64.zig index ff402d50b314..a1f977d7747b 100644 --- a/lib/std/Target/aarch64.zig +++ b/lib/std/Target/aarch64.zig @@ -887,7 +887,7 @@ pub const all_features = blk: { }; result[@intFromEnum(Feature.nv)] = .{ .llvm_name = "nv", - .description = "Enable v8.4-A Nested Virtualization Enchancement (FEAT_NV, FEAT_NV2)", + .description = "Enable v8.4-A Nested Virtualization Enhancement (FEAT_NV, FEAT_NV2)", .dependencies = featureSet(&[_]Feature{}), }; result[@intFromEnum(Feature.outline_atomics)] = .{ diff --git a/lib/std/Target/arm.zig b/lib/std/Target/arm.zig index e1758b37f795..686d55a64261 100644 --- a/lib/std/Target/arm.zig +++ b/lib/std/Target/arm.zig @@ -446,7 +446,7 @@ pub const all_features = blk: { }; result[@intFromEnum(Feature.fix_cmse_cve_2021_35465)] = .{ .llvm_name = "fix-cmse-cve-2021-35465", - .description = "Mitigate against the cve-2021-35465 security vulnurability", + .description = "Mitigate against the cve-2021-35465 security vulnerability", .dependencies = featureSet(&[_]Feature{}), }; result[@intFromEnum(Feature.fix_cortex_a57_aes_1742098)] = .{ diff --git a/lib/std/Target/csky.zig b/lib/std/Target/csky.zig index 331e8057bfbd..66cd0fd6f032 100644 --- a/lib/std/Target/csky.zig +++ b/lib/std/Target/csky.zig @@ -214,7 +214,7 @@ pub const all_features = blk: { }; result[@intFromEnum(Feature.dsp_silan)] = .{ .llvm_name = "dsp_silan", - .description = "Enable DSP Silan instrutions", + .description = "Enable DSP Silan instructions", .dependencies = featureSet(&[_]Feature{}), }; result[@intFromEnum(Feature.dspe60)] = .{ @@ -224,7 +224,7 @@ pub const all_features = blk: { }; result[@intFromEnum(Feature.dspv2)] = .{ .llvm_name = "dspv2", - .description = "Enable DSP V2.0 instrutions", + .description = "Enable DSP V2.0 instructions", .dependencies = featureSet(&[_]Feature{}), }; result[@intFromEnum(Feature.e1)] = .{ @@ -243,7 +243,7 @@ pub const all_features = blk: { }; result[@intFromEnum(Feature.edsp)] = .{ .llvm_name = "edsp", - .description = "Enable DSP instrutions", + .description = "Enable DSP instructions", .dependencies = featureSet(&[_]Feature{}), }; result[@intFromEnum(Feature.elrw)] = .{ @@ -333,12 +333,12 @@ pub const all_features = blk: { }; result[@intFromEnum(Feature.hwdiv)] = .{ .llvm_name = "hwdiv", - .description = "Enable divide instrutions", + .description = "Enable divide instructions", .dependencies = featureSet(&[_]Feature{}), }; result[@intFromEnum(Feature.istack)] = .{ .llvm_name = "istack", - .description = "Enable interrput attribute", + .description = "Enable interrupt attribute", .dependencies = featureSet(&[_]Feature{}), }; result[@intFromEnum(Feature.java)] = .{ @@ -362,7 +362,7 @@ pub const all_features = blk: { }; result[@intFromEnum(Feature.multiple_stld)] = .{ .llvm_name = "multiple_stld", - .description = "Enable multiple load/store instrutions", + .description = "Enable multiple load/store instructions", .dependencies = featureSet(&[_]Feature{}), }; result[@intFromEnum(Feature.nvic)] = .{ @@ -372,7 +372,7 @@ pub const all_features = blk: { }; result[@intFromEnum(Feature.pushpop)] = .{ .llvm_name = "pushpop", - .description = "Enable push/pop instrutions", + .description = "Enable push/pop instructions", .dependencies = featureSet(&[_]Feature{}), }; result[@intFromEnum(Feature.smart)] = .{ diff --git a/lib/std/Target/riscv.zig b/lib/std/Target/riscv.zig index 158468b7fb05..3e378fdd11fa 100644 --- a/lib/std/Target/riscv.zig +++ b/lib/std/Target/riscv.zig @@ -823,14 +823,14 @@ pub const all_features = blk: { }; result[@intFromEnum(Feature.zcmp)] = .{ .llvm_name = "zcmp", - .description = "'Zcmp' (sequenced instuctions for code-size reduction)", + .description = "'Zcmp' (sequenced instructions for code-size reduction)", .dependencies = featureSet(&[_]Feature{ .zca, }), }; result[@intFromEnum(Feature.zcmt)] = .{ .llvm_name = "zcmt", - .description = "'Zcmt' (table jump instuctions for code-size reduction)", + .description = "'Zcmt' (table jump instructions for code-size reduction)", .dependencies = featureSet(&[_]Feature{ .zca, .zicsr, diff --git a/lib/std/array_list.zig b/lib/std/array_list.zig index 020c5391954b..29902db34fcb 100644 --- a/lib/std/array_list.zig +++ b/lib/std/array_list.zig @@ -170,7 +170,7 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type { /// operations. /// Invalidates pre-existing pointers to elements at and after `index`. /// Invalidates all pre-existing element pointers if capacity must be - /// increased to accomodate the new elements. + /// increased to accommodate the new elements. /// Asserts that the index is in bounds or equal to the length. pub fn addManyAt(self: *Self, index: usize, count: usize) Allocator.Error![]T { const new_len = try addOrOom(self.items.len, count); @@ -227,7 +227,7 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type { /// This operation is O(N). /// Invalidates pre-existing pointers to elements at and after `index`. /// Invalidates all pre-existing element pointers if capacity must be - /// increased to accomodate the new elements. + /// increased to accommodate the new elements. /// Asserts that the index is in bounds or equal to the length. pub fn insertSlice( self: *Self, @@ -740,7 +740,7 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ /// operations. /// Invalidates pre-existing pointers to elements at and after `index`. /// Invalidates all pre-existing element pointers if capacity must be - /// increased to accomodate the new elements. + /// increased to accommodate the new elements. /// Asserts that the index is in bounds or equal to the length. pub fn addManyAt( self: *Self, @@ -776,7 +776,7 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ /// This operation is O(N). /// Invalidates pre-existing pointers to elements at and after `index`. /// Invalidates all pre-existing element pointers if capacity must be - /// increased to accomodate the new elements. + /// increased to accommodate the new elements. /// Asserts that the index is in bounds or equal to the length. pub fn insertSlice( self: *Self, diff --git a/lib/std/c/darwin.zig b/lib/std/c/darwin.zig index ac2af38c1c02..bb641f5e97f7 100644 --- a/lib/std/c/darwin.zig +++ b/lib/std/c/darwin.zig @@ -2523,7 +2523,7 @@ pub const F = struct { /// add signature from same file (used by dyld for shared libs) pub const ADDFILESIGS = 61; /// used in conjunction with F.NOCACHE to indicate that DIRECT, synchronous writes - /// should not be used (i.e. its ok to temporaily create cached pages) + /// should not be used (i.e. its ok to temporarily create cached pages) pub const NODIRECT = 62; ///Get the protection class of a file from the EA, returns int pub const GETPROTECTIONCLASS = 63; diff --git a/lib/std/coff.zig b/lib/std/coff.zig index 211b1a181938..3f15352f436c 100644 --- a/lib/std/coff.zig +++ b/lib/std/coff.zig @@ -581,7 +581,7 @@ pub const SectionHeaderFlags = packed struct { /// This is valid for object files only. LNK_INFO: u1 = 0, - _reserverd_2: u1 = 0, + _reserved_2: u1 = 0, /// The section will not become part of the image. /// This is valid only for object files. diff --git a/lib/std/compress/flate.zig b/lib/std/compress/flate.zig index 65af44b7b4e9..6a111ac0fcfb 100644 --- a/lib/std/compress/flate.zig +++ b/lib/std/compress/flate.zig @@ -70,8 +70,8 @@ pub const store = struct { } }; -/// Container defines header/footer arround deflate bit stream. Gzip and zlib -/// compression algorithms are containers arround deflate bit stream body. +/// Container defines header/footer around deflate bit stream. Gzip and zlib +/// compression algorithms are containers around deflate bit stream body. const Container = @import("flate/container.zig").Container; const std = @import("std"); const testing = std.testing; diff --git a/lib/std/compress/flate/CircularBuffer.zig b/lib/std/compress/flate/CircularBuffer.zig index f92ca2e4c185..552d364894c7 100644 --- a/lib/std/compress/flate/CircularBuffer.zig +++ b/lib/std/compress/flate/CircularBuffer.zig @@ -109,7 +109,7 @@ const ReadBlock = struct { len: usize, }; -/// Returns position of continous read block data. +/// Returns position of continuous read block data. fn readBlock(self: *Self, max: usize) ReadBlock { const r = self.rp & mask; const w = self.wp & mask; diff --git a/lib/std/compress/flate/Lookup.zig b/lib/std/compress/flate/Lookup.zig index 31f59550b068..c646b2dd2896 100644 --- a/lib/std/compress/flate/Lookup.zig +++ b/lib/std/compress/flate/Lookup.zig @@ -26,7 +26,7 @@ pub fn add(self: *Self, data: []const u8, pos: u16) u16 { return self.set(h, pos); } -// Retruns previous location with the same hash value given the current +// Returns previous location with the same hash value given the current // position. pub fn prev(self: *Self, pos: u16) u16 { return self.chain[pos]; diff --git a/lib/std/compress/flate/SlidingWindow.zig b/lib/std/compress/flate/SlidingWindow.zig index 4a10383820b5..ece907c32fab 100644 --- a/lib/std/compress/flate/SlidingWindow.zig +++ b/lib/std/compress/flate/SlidingWindow.zig @@ -84,7 +84,7 @@ pub fn match(self: *Self, prev_pos: u16, curr_pos: u16, min_len: u16) u16 { const prev_lh = self.buffer[prev_pos..][0..max_len]; const curr_lh = self.buffer[curr_pos..][0..max_len]; - // If we alread have match (min_len > 0), + // If we already have match (min_len > 0), // test the first byte above previous len a[min_len] != b[min_len] // and then all the bytes from that position to zero. // That is likely positions to find difference than looping from first bytes. diff --git a/lib/std/compress/flate/Token.zig b/lib/std/compress/flate/Token.zig index cec23a139c36..a9641f6adc88 100644 --- a/lib/std/compress/flate/Token.zig +++ b/lib/std/compress/flate/Token.zig @@ -110,7 +110,7 @@ pub fn show(t: Token) void { } } -// Retruns index in match_lengths table for each length in range 0-255. +// Returns index in match_lengths table for each length in range 0-255. const match_lengths_index = [_]u8{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 12, 12, diff --git a/lib/std/compress/flate/bit_reader.zig b/lib/std/compress/flate/bit_reader.zig index 8fc94cd4b408..1e41f081c1c3 100644 --- a/lib/std/compress/flate/bit_reader.zig +++ b/lib/std/compress/flate/bit_reader.zig @@ -57,7 +57,7 @@ pub fn BitReader(comptime T: type, comptime ReaderType: type) type { /// it may be some extra zero bits in buffer. pub inline fn fill(self: *Self, nice: u6) !void { if (self.nbits >= nice and nice != 0) { - return; // We have enought bits + return; // We have enough bits } // Read more bits from forward reader @@ -96,7 +96,7 @@ pub fn BitReader(comptime T: type, comptime ReaderType: type) type { pub const flag = struct { pub const peek: u3 = 0b001; // dont advance internal buffer, just get bits, leave them in buffer pub const buffered: u3 = 0b010; // assume that there is no need to fill, fill should be called before - pub const reverse: u3 = 0b100; // bit reverse readed bits + pub const reverse: u3 = 0b100; // bit reverse read bits }; /// Alias for readF(U, 0). @@ -133,7 +133,7 @@ pub fn BitReader(comptime T: type, comptime ReaderType: type) type { try self.fill(n); return @truncate(self.bits); }, - flag.buffered => { // no fill, assume that buffer has enought bits + flag.buffered => { // no fill, assume that buffer has enough bits const u: U = @truncate(self.bits); try self.shift(n); return u; @@ -212,7 +212,7 @@ pub fn BitReader(comptime T: type, comptime ReaderType: type) type { } /// Read deflate fixed fixed code. - /// Reads first 7 bits, and then mybe 1 or 2 more to get full 7,8 or 9 bit code. + /// Reads first 7 bits, and then maybe 1 or 2 more to get full 7,8 or 9 bit code. /// ref: https://datatracker.ietf.org/doc/html/rfc1951#page-12 /// Lit Value Bits Codes /// --------- ---- ----- diff --git a/lib/std/compress/flate/block_writer.zig b/lib/std/compress/flate/block_writer.zig index d2a8cc0dcd9f..fa0d299e8432 100644 --- a/lib/std/compress/flate/block_writer.zig +++ b/lib/std/compress/flate/block_writer.zig @@ -48,7 +48,7 @@ pub fn BlockWriter(comptime WriterType: type) type { /// Should be called only when bit stream is at byte boundary. /// /// That is after final block; when last byte could be incomplete or - /// after stored block; which is aligned to the byte bounday (it has x + /// after stored block; which is aligned to the byte boundary (it has x /// padding bits after first 3 bits). pub fn flush(self: *Self) Error!void { try self.bit_writer.flush(); diff --git a/lib/std/compress/flate/consts.zig b/lib/std/compress/flate/consts.zig index c28b40f68ef3..b17083461bce 100644 --- a/lib/std/compress/flate/consts.zig +++ b/lib/std/compress/flate/consts.zig @@ -1,5 +1,5 @@ pub const deflate = struct { - // Number of tokens to accumlate in deflate before starting block encoding. + // Number of tokens to accumulate in deflate before starting block encoding. // // In zlib this depends on memlevel: 6 + memlevel, where default memlevel is // 8 and max 9 that gives 14 or 15 bits. diff --git a/lib/std/compress/flate/deflate.zig b/lib/std/compress/flate/deflate.zig index 794ab02247c3..e953ecb354c4 100644 --- a/lib/std/compress/flate/deflate.zig +++ b/lib/std/compress/flate/deflate.zig @@ -183,7 +183,7 @@ fn Deflate(comptime container: Container, comptime WriterType: type, comptime Bl // Write match from previous position. step = try self.addMatch(m) - 1; // we already advanced 1 from previous position } else { - // No match at previous postition. + // No match at previous position. // Write previous literal if any, and remember this literal. try self.addPrevLiteral(); self.prev_literal = literal; @@ -268,9 +268,9 @@ fn Deflate(comptime container: Container, comptime WriterType: type, comptime Bl fn flushTokens(self: *Self, flush_opt: FlushOption) !void { // Pass tokens to the token writer try self.block_writer.write(self.tokens.tokens(), flush_opt == .final, self.win.tokensBuffer()); - // Stored block ensures byte aligment. + // Stored block ensures byte alignment. // It has 3 bits (final, block_type) and then padding until byte boundary. - // After that everyting is aligned to the boundary in the stored block. + // After that everything is aligned to the boundary in the stored block. // Empty stored block is Ob000 + (0-7) bits of padding + 0x00 0x00 0xFF 0xFF. // Last 4 bytes are byte aligned. if (flush_opt == .flush) { @@ -572,7 +572,7 @@ test "tokenization" { } } -// Tests that tokens writen are equal to expected token list. +// Tests that tokens written are equal to expected token list. const TestTokenWriter = struct { const Self = @This(); @@ -655,7 +655,7 @@ test "file tokenization" { const TokenWriter = TokenDecoder(@TypeOf(writer)); var cmp = try Deflate(.raw, WriterType, TokenWriter).init(writer, .{ .level = level }); - // Stream uncompressed `orignal` data to the compressor. It will + // Stream uncompressed `original` data to the compressor. It will // produce tokens list and pass that list to the TokenDecoder. This // TokenDecoder uses CircularBuffer from inflate to convert list of // tokens back to the uncompressed stream. diff --git a/lib/std/compress/flate/huffman_decoder.zig b/lib/std/compress/flate/huffman_decoder.zig index bc8b59f492bb..abff915f761a 100644 --- a/lib/std/compress/flate/huffman_decoder.zig +++ b/lib/std/compress/flate/huffman_decoder.zig @@ -132,7 +132,7 @@ fn HuffmanDecoder( if (n > max) max = n; count[n] += 1; } - if (max == 0) // emtpy tree + if (max == 0) // empty tree return; // check for an over-subscribed or incomplete set of lengths @@ -255,7 +255,7 @@ test "encode/decode literals" { for (1..286) |j| { // for all different number of codes var enc: LiteralEncoder = .{}; - // create freqencies + // create frequencies var freq = [_]u16{0} ** 286; freq[256] = 1; // ensure we have end of block code for (&freq, 1..) |*f, i| { @@ -263,7 +263,7 @@ test "encode/decode literals" { f.* = @intCast(i); } - // encoder from freqencies + // encoder from frequencies enc.generate(&freq, 15); // get code_lens from encoder diff --git a/lib/std/compress/flate/huffman_encoder.zig b/lib/std/compress/flate/huffman_encoder.zig index c1c5148c97d5..42cf9a20c238 100644 --- a/lib/std/compress/flate/huffman_encoder.zig +++ b/lib/std/compress/flate/huffman_encoder.zig @@ -168,7 +168,7 @@ pub fn HuffmanEncoder(comptime size: usize) type { while (true) { var l = &levels[level]; if (l.next_pair_freq == math.maxInt(i32) and l.next_char_freq == math.maxInt(i32)) { - // We've run out of both leafs and pairs. + // We've run out of both leaves and pairs. // End all calculations for this level. // To make sure we never come back to this level or any lower level, // set next_pair_freq impossibly large. diff --git a/lib/std/compress/flate/inflate.zig b/lib/std/compress/flate/inflate.zig index 69ee41e096f6..bb4d158acadb 100644 --- a/lib/std/compress/flate/inflate.zig +++ b/lib/std/compress/flate/inflate.zig @@ -99,7 +99,7 @@ pub fn Inflate(comptime container: Container, comptime LookaheadType: type, comp fn storedBlock(self: *Self) !bool { self.bits.alignToByte(); // skip padding until byte boundary - // everyting after this is byte aligned in stored block + // everything after this is byte aligned in stored block var len = try self.bits.read(u16); const nlen = try self.bits.read(u16); if (len != ~nlen) return error.WrongStoredBlockNlen; @@ -155,7 +155,7 @@ pub fn Inflate(comptime container: Container, comptime LookaheadType: type, comp fn dynamicBlockHeader(self: *Self) !void { const hlit: u16 = @as(u16, try self.bits.read(u5)) + 257; // number of ll code entries present - 257 const hdist: u16 = @as(u16, try self.bits.read(u5)) + 1; // number of distance code entries - 1 - const hclen: u8 = @as(u8, try self.bits.read(u4)) + 4; // hclen + 4 code lenths are encoded + const hclen: u8 = @as(u8, try self.bits.read(u4)) + 4; // hclen + 4 code lengths are encoded if (hlit > 286 or hdist > 30) return error.InvalidDynamicBlockHeader; @@ -180,7 +180,7 @@ pub fn Inflate(comptime container: Container, comptime LookaheadType: type, comp return error.InvalidDynamicBlockHeader; } - // literal code lengts to literal decoder + // literal code lengths to literal decoder try self.lit_dec.generate(dec_lens[0..hlit]); // distance code lengths to distance decoder diff --git a/lib/std/crypto/Certificate.zig b/lib/std/crypto/Certificate.zig index 9ddd587a190a..3580d11fcdc1 100644 --- a/lib/std/crypto/Certificate.zig +++ b/lib/std/crypto/Certificate.zig @@ -977,7 +977,7 @@ pub const rsa = struct { // the hash function (2^61 - 1 octets for SHA-1), output // "inconsistent" and stop. // All the cryptographic hash functions in the standard library have a limit of >= 2^61 - 1. - // Even then, this check is only there for paranoia. In the context of TLS certifcates, emBit cannot exceed 4096. + // Even then, this check is only there for paranoia. In the context of TLS certificates, emBit cannot exceed 4096. if (emBit >= 1 << 61) return error.InvalidSignature; // emLen = \ceil(emBits/8) diff --git a/lib/std/crypto/ff.zig b/lib/std/crypto/ff.zig index 31792318d05e..10c4402a4fb6 100644 --- a/lib/std/crypto/ff.zig +++ b/lib/std/crypto/ff.zig @@ -41,7 +41,7 @@ pub const OverflowError = error{Overflow}; /// Invalid modulus. Modulus must be odd. pub const InvalidModulusError = error{ EvenModulus, ModulusTooSmall }; -/// Exponentation with a null exponent. +/// Exponentiation with a null exponent. /// Exponentiation in cryptographic protocols is almost always a sign of a bug which can lead to trivial attacks. /// Therefore, this module returns an error when a null exponent is encountered, encouraging applications to handle this case explicitly. pub const NullExponentError = error{NullExponent}; diff --git a/lib/std/crypto/ml_kem.zig b/lib/std/crypto/ml_kem.zig index b2f38b82fe9f..c3cb5805eef5 100644 --- a/lib/std/crypto/ml_kem.zig +++ b/lib/std/crypto/ml_kem.zig @@ -379,7 +379,7 @@ fn Kyber(comptime p: Params) type { /// Create a new key pair. /// If seed is null, a random seed will be generated. - /// If a seed is provided, the key pair will be determinsitic. + /// If a seed is provided, the key pair will be deterministic. pub fn create(seed_: ?[seed_length]u8) !KeyPair { const seed = seed_ orelse sk: { var random_seed: [seed_length]u8 = undefined; @@ -1253,7 +1253,7 @@ const Poly = struct { t |= @as(T, buf[batch_bytes * i + j]) << (8 * j); } - // Accumelate `a's and `b's together by masking them out, shifting + // Accumulate `a's and `b's together by masking them out, shifting // and adding. For η=3, we have d = a₁ + a₂ + a₃ + 8(b₁ + b₂ + b₃) + … var d: T = 0; inline for (0..eta) |j| { diff --git a/lib/std/crypto/salsa20.zig b/lib/std/crypto/salsa20.zig index c791c6b7737f..40b190f41a8e 100644 --- a/lib/std/crypto/salsa20.zig +++ b/lib/std/crypto/salsa20.zig @@ -487,7 +487,7 @@ pub const Box = struct { /// A key pair. pub const KeyPair = X25519.KeyPair; - /// Compute a secret suitable for `secretbox` given a recipent's public key and a sender's secret key. + /// Compute a secret suitable for `secretbox` given a recipient's public key and a sender's secret key. pub fn createSharedSecret(public_key: [public_length]u8, secret_key: [secret_length]u8) (IdentityElementError || WeakPublicKeyError)![shared_length]u8 { const p = try X25519.scalarmult(secret_key, public_key); const zero = [_]u8{0} ** 16; diff --git a/lib/std/crypto/sha2.zig b/lib/std/crypto/sha2.zig index c4556b668b92..1ff43e46a611 100644 --- a/lib/std/crypto/sha2.zig +++ b/lib/std/crypto/sha2.zig @@ -1,6 +1,6 @@ //! Secure Hashing Algorithm 2 (SHA2) //! -//! Published by the National Institue of Standards and Technology (NIST) [1] [2]. +//! Published by the National Institute of Standards and Technology (NIST) [1] [2]. //! //! Truncation mitigates length-extension attacks but increases vulnerability to collision //! attacks. Collision attacks remain impractical for all types defined here. diff --git a/lib/std/crypto/tls.zig b/lib/std/crypto/tls.zig index 7fff68471caa..fbb41a3fd7ab 100644 --- a/lib/std/crypto/tls.zig +++ b/lib/std/crypto/tls.zig @@ -40,8 +40,8 @@ const assert = std.debug.assert; pub const Client = @import("tls/Client.zig"); pub const record_header_len = 5; -pub const max_cipertext_inner_record_len = 1 << 14; -pub const max_ciphertext_len = max_cipertext_inner_record_len + 256; +pub const max_ciphertext_inner_record_len = 1 << 14; +pub const max_ciphertext_len = max_ciphertext_inner_record_len + 256; pub const max_ciphertext_record_len = max_ciphertext_len + record_header_len; pub const hello_retry_request_sequence = [32]u8{ 0xCF, 0x21, 0xAD, 0x74, 0xE5, 0x9A, 0x61, 0x11, 0xBE, 0x1D, 0x8C, 0x02, 0x1E, 0x65, 0xB8, 0x91, diff --git a/lib/std/crypto/tls/Client.zig b/lib/std/crypto/tls/Client.zig index 9a09b2135306..9474953748d0 100644 --- a/lib/std/crypto/tls/Client.zig +++ b/lib/std/crypto/tls/Client.zig @@ -819,7 +819,7 @@ fn prepareCiphertextRecord( const close_notify_alert_reserved = tls.close_notify_alert.len + overhead_len; while (true) { const encrypted_content_len: u16 = @intCast(@min( - @min(bytes.len - bytes_i, tls.max_cipertext_inner_record_len), + @min(bytes.len - bytes_i, tls.max_ciphertext_inner_record_len), ciphertext_buf.len -| (close_notify_alert_reserved + overhead_len + ciphertext_end), )); diff --git a/lib/std/dwarf/abi.zig b/lib/std/dwarf/abi.zig index 75ff3d1e9e0e..543a4b9ac1d5 100644 --- a/lib/std/dwarf/abi.zig +++ b/lib/std/dwarf/abi.zig @@ -38,7 +38,7 @@ pub fn ipRegNum() u8 { pub fn fpRegNum(reg_context: RegisterContext) u8 { return switch (builtin.cpu.arch) { - // GCC on OS X historicaly did the opposite of ELF for these registers (only in .eh_frame), and that is now the convention for MachO + // GCC on OS X historically did the opposite of ELF for these registers (only in .eh_frame), and that is now the convention for MachO .x86 => if (reg_context.eh_frame and reg_context.is_macho) 4 else 5, .x86_64 => 6, .arm => 11, diff --git a/lib/std/dwarf/expressions.zig b/lib/std/dwarf/expressions.zig index d24d559a822f..ab446d052713 100644 --- a/lib/std/dwarf/expressions.zig +++ b/lib/std/dwarf/expressions.zig @@ -15,7 +15,7 @@ pub const ExpressionContext = struct { /// The dwarf format of the section this expression is in format: dwarf.Format = .@"32", - /// If specified, any addresses will pass through this function before being acccessed + /// If specified, any addresses will pass through this function before being accessed isValidMemory: ?*const fn (address: usize) bool = null, /// The compilation unit this expression relates to, if any @@ -42,14 +42,14 @@ pub const ExpressionOptions = struct { /// The address size of the target architecture addr_size: u8 = @sizeOf(usize), - /// Endianess of the target architecture + /// Endianness of the target architecture endian: std.builtin.Endian = builtin.target.cpu.arch.endian(), /// Restrict the stack machine to a subset of opcodes used in call frame instructions call_frame_context: bool = false, }; -// Explcitly defined to support executing sub-expressions +// Explicitly defined to support executing sub-expressions pub const ExpressionError = error{ UnimplementedExpressionCall, UnimplementedOpcode, diff --git a/lib/std/hash/crc/impl.zig b/lib/std/hash/crc/impl.zig index 00a7a1956b4a..253a7b0a6287 100644 --- a/lib/std/hash/crc/impl.zig +++ b/lib/std/hash/crc/impl.zig @@ -1,4 +1,4 @@ -// There is a generic CRC implementation "Crc()" which can be paramterized via +// There is a generic CRC implementation "Crc()" which can be parameterized via // the Algorithm struct for a plethora of uses. // // The primary interface for all of the standard CRC algorithms is the diff --git a/lib/std/hash/wyhash.zig b/lib/std/hash/wyhash.zig index dc1167d79aba..d658ed4118c5 100644 --- a/lib/std/hash/wyhash.zig +++ b/lib/std/hash/wyhash.zig @@ -79,7 +79,7 @@ pub const Wyhash = struct { @memcpy(scratch[0..rem], self.buf[self.buf.len - rem ..][0..rem]); @memcpy(scratch[rem..][0..self.buf_len], self.buf[0..self.buf_len]); - // Same as input but with additional bytes preceeding start in case of a short buffer + // Same as input but with additional bytes preceding start in case of a short buffer input = &scratch; offset = rem; } diff --git a/lib/std/meta.zig b/lib/std/meta.zig index 676d7423c6ed..81e60a083b87 100644 --- a/lib/std/meta.zig +++ b/lib/std/meta.zig @@ -902,7 +902,7 @@ pub fn intToEnum(comptime EnumTag: type, tag_int: anytype) IntToEnumError!EnumTa return error.InvalidEnumTag; } - // We don't direcly iterate over the fields of EnumTag, as that + // We don't directly iterate over the fields of EnumTag, as that // would require an inline loop. Instead, we create an array of // values that is comptime-know, but can be iterated at runtime // without requiring an inline loop. This generates better diff --git a/lib/std/os/linux.zig b/lib/std/os/linux.zig index 3deac7a2f6ef..a69c419453c8 100644 --- a/lib/std/os/linux.zig +++ b/lib/std/os/linux.zig @@ -570,7 +570,7 @@ pub fn futex_wake(uaddr: *const i32, futex_op: u32, val: i32) usize { /// Returns the array index of one of the woken futexes. /// No further information is provided: any number of other futexes may also /// have been woken by the same event, and if more than one futex was woken, -/// the retrned index may refer to any one of them. +/// the returned index may refer to any one of them. /// (It is not necessaryily the futex with the smallest index, nor the one /// most recently woken, nor...) pub fn futex2_waitv( @@ -648,7 +648,7 @@ pub fn futex2_wake( pub fn futex2_requeue( /// Array describing the source and destination futex. waiters: [*]futex_waitv, - /// Unsed. + /// Unused. flags: u32, /// Number of futexes to wake. nr_wake: i32, @@ -6009,7 +6009,7 @@ else /// values of this resource limit. NICE, - /// Maximum realtime priority allowed for non-priviledged + /// Maximum realtime priority allowed for non-privileged /// processes. RTPRIO, @@ -7228,7 +7228,7 @@ pub const futex_waitv = extern struct { uaddr: u64, /// Flags for this waiter. flags: u32, - /// Reserved memeber to preserve alignment. + /// Reserved member to preserve alignment. /// Should be 0. __reserved: u32, }; diff --git a/lib/std/os/linux/IoUring.zig b/lib/std/os/linux/IoUring.zig index 281f4e4bf12e..e547478c7879 100644 --- a/lib/std/os/linux/IoUring.zig +++ b/lib/std/os/linux/IoUring.zig @@ -3938,7 +3938,7 @@ test BufferGroup { // Server uses buffer group receive { - // Submit recv operation, buffer will be choosen from buffer group + // Submit recv operation, buffer will be chosen from buffer group _ = try buf_grp.recv(2, fds.server, 0); const submitted = try ring.submit(); try testing.expectEqual(1, submitted); @@ -3956,7 +3956,7 @@ test BufferGroup { // Get buffer from pool const buf = buf_grp.get(buffer_id)[0..len]; try testing.expectEqualSlices(u8, &data, buf); - // Releaase buffer to the kernel when application is done with it + // Release buffer to the kernel when application is done with it buf_grp.put(buffer_id); } } diff --git a/lib/std/os/linux/bpf.zig b/lib/std/os/linux/bpf.zig index d20ebdd6c8b7..7661ba9d7caa 100644 --- a/lib/std/os/linux/bpf.zig +++ b/lib/std/os/linux/bpf.zig @@ -140,7 +140,7 @@ pub const F_STRICT_ALIGNMENT = 0x1; /// If BPF_F_ANY_ALIGNMENT is used in BPF_PROF_LOAD command, the verifier will /// allow any alignment whatsoever. On platforms with strict alignment -/// requirements for loads ands stores (such as sparc and mips) the verifier +/// requirements for loads and stores (such as sparc and mips) the verifier /// validates that all loads and stores provably follow this requirement. This /// flag turns that checking and enforcement off. /// diff --git a/lib/std/os/linux/mips.zig b/lib/std/os/linux/mips.zig index b0f33894c896..03098a7d1c91 100644 --- a/lib/std/os/linux/mips.zig +++ b/lib/std/os/linux/mips.zig @@ -385,7 +385,7 @@ pub const rlimit_resource = enum(c_int) { /// values of this resource limit. NICE, - /// Maximum realtime priority allowed for non-priviledged + /// Maximum realtime priority allowed for non-privileged /// processes. RTPRIO, diff --git a/lib/std/os/linux/mips64.zig b/lib/std/os/linux/mips64.zig index c1b352328bc9..159a20a032c5 100644 --- a/lib/std/os/linux/mips64.zig +++ b/lib/std/os/linux/mips64.zig @@ -370,7 +370,7 @@ pub const rlimit_resource = enum(c_int) { /// values of this resource limit. NICE, - /// Maximum realtime priority allowed for non-priviledged + /// Maximum realtime priority allowed for non-privileged /// processes. RTPRIO, diff --git a/lib/std/os/linux/sparc64.zig b/lib/std/os/linux/sparc64.zig index 796c6e090558..c7de55771fde 100644 --- a/lib/std/os/linux/sparc64.zig +++ b/lib/std/os/linux/sparc64.zig @@ -460,7 +460,7 @@ pub const rlimit_resource = enum(c_int) { /// values of this resource limit. NICE, - /// Maximum realtime priority allowed for non-priviledged + /// Maximum realtime priority allowed for non-privileged /// processes. RTPRIO, diff --git a/lib/std/os/plan9.zig b/lib/std/os/plan9.zig index 3e487ef7fb3b..0c48493b4cd6 100644 --- a/lib/std/os/plan9.zig +++ b/lib/std/os/plan9.zig @@ -184,7 +184,7 @@ pub const SIG = struct { pub const sigset_t = c_long; pub const empty_sigset = 0; pub const siginfo_t = c_long; -// TODO plan9 doesn't have sigaction_fn. Sigaction is not a union, but we incude it here to be compatible. +// TODO plan9 doesn't have sigaction_fn. Sigaction is not a union, but we include it here to be compatible. pub const Sigaction = extern struct { pub const handler_fn = *const fn (i32) callconv(.C) void; pub const sigaction_fn = *const fn (i32, *const siginfo_t, ?*anyopaque) callconv(.C) void; diff --git a/lib/std/posix.zig b/lib/std/posix.zig index e2af96c48749..35052ab5791a 100644 --- a/lib/std/posix.zig +++ b/lib/std/posix.zig @@ -3474,7 +3474,7 @@ pub const SocketError = error{ pub fn socket(domain: u32, socket_type: u32, protocol: u32) SocketError!socket_t { if (native_os == .windows) { // NOTE: windows translates the SOCK.NONBLOCK/SOCK.CLOEXEC flags into - // windows-analagous operations + // windows-analogous operations const filtered_sock_type = socket_type & ~@as(u32, SOCK.NONBLOCK | SOCK.CLOEXEC); const flags: u32 = if ((socket_type & SOCK.CLOEXEC) != 0) windows.ws2_32.WSA_FLAG_NO_HANDLE_INHERIT diff --git a/lib/std/priority_queue.zig b/lib/std/priority_queue.zig index 69dccbdd5a20..a5ea649c467a 100644 --- a/lib/std/priority_queue.zig +++ b/lib/std/priority_queue.zig @@ -617,7 +617,7 @@ fn contextLessThan(context: []const u32, a: usize, b: usize) Order { const CPQlt = PriorityQueue(usize, []const u32, contextLessThan); -test "add and remove min heap with contextful comparator" { +test "add and remove min heap with context comparator" { const context = [_]u32{ 5, 3, 4, 2, 2, 8, 0 }; var queue = CPQlt.init(testing.allocator, context[0..]); diff --git a/lib/std/process.zig b/lib/std/process.zig index 787831e61cd4..fce77175abac 100644 --- a/lib/std/process.zig +++ b/lib/std/process.zig @@ -1818,7 +1818,7 @@ pub const CreateEnvironOptions = struct { zig_progress_fd: ?i32 = null, }; -/// Creates a null-deliminated environment variable block in the format +/// Creates a null-delimited environment variable block in the format /// expected by POSIX, from a hash map plus options. pub fn createEnvironFromMap( arena: Allocator, @@ -1880,7 +1880,7 @@ pub fn createEnvironFromMap( return envp_buf; } -/// Creates a null-deliminated environment variable block in the format +/// Creates a null-delimited environment variable block in the format /// expected by POSIX, from a hash map plus options. pub fn createEnvironFromExisting( arena: Allocator, diff --git a/lib/std/sort/pdq.zig b/lib/std/sort/pdq.zig index d74c7788a4c1..61d78797c569 100644 --- a/lib/std/sort/pdq.zig +++ b/lib/std/sort/pdq.zig @@ -268,7 +268,7 @@ fn breakPatterns(a: usize, b: usize, context: anytype) void { } } -/// choses a pivot in `items[a..b]`. +/// chooses a pivot in `items[a..b]`. /// swaps likely_sorted when `items[a..b]` seems to be already sorted. fn chosePivot(a: usize, b: usize, pivot: *usize, context: anytype) Hint { // minimum length for using the Tukey's ninther method diff --git a/lib/std/tar.zig b/lib/std/tar.zig index 04571fb3c88f..e81aac0eb55c 100644 --- a/lib/std/tar.zig +++ b/lib/std/tar.zig @@ -315,7 +315,7 @@ pub const FileKind = enum { file, }; -/// Iteartor over entries in the tar file represented by reader. +/// Iterator over entries in the tar file represented by reader. pub fn Iterator(comptime ReaderType: type) type { return struct { reader: ReaderType, @@ -423,7 +423,7 @@ pub fn Iterator(comptime ReaderType: type) type { self.padding = blockPadding(size); switch (kind) { - // File types to retrun upstream + // File types to return upstream .directory, .normal, .symbolic_link => { file.kind = switch (kind) { .directory => .directory, diff --git a/lib/std/tar/test.zig b/lib/std/tar/test.zig index 62a910f43d19..3bcb5af90c95 100644 --- a/lib/std/tar/test.zig +++ b/lib/std/tar/test.zig @@ -9,7 +9,7 @@ const Case = struct { mode: u32 = 0, link_name: []const u8 = &[0]u8{}, kind: tar.FileKind = .file, - truncated: bool = false, // when there is no file body, just header, usefull for huge files + truncated: bool = false, // when there is no file body, just header, useful for huge files }; data: []const u8, // testdata file content diff --git a/lib/std/valgrind/callgrind.zig b/lib/std/valgrind/callgrind.zig index ded2d3661a36..716573e7b0e4 100644 --- a/lib/std/valgrind/callgrind.zig +++ b/lib/std/valgrind/callgrind.zig @@ -54,7 +54,7 @@ pub fn startInstrumentation() void { /// Stop full callgrind instrumentation if not already switched off. /// This flushes Valgrinds translation cache, and does no additional -/// instrumentation afterwards, which effectivly will run at the same +/// instrumentation afterwards, which effectively will run at the same /// speed as the "none" tool (ie. at minimal slowdown). /// Use this to bypass Callgrind aggregation for uninteresting code parts. /// To start Callgrind in this mode to ignore the setup phase, use diff --git a/lib/std/zig/AstGen.zig b/lib/std/zig/AstGen.zig index 18012b802c77..92be592a439d 100644 --- a/lib/std/zig/AstGen.zig +++ b/lib/std/zig/AstGen.zig @@ -5085,7 +5085,7 @@ fn structDeclInner( any_default_inits = true; // The decl_inst is used as here so that we can easily reconstruct a mapping - // between it and the field type when the fields inits are analzyed. + // between it and the field type when the fields inits are analyzed. const ri: ResultInfo = .{ .rl = if (field_type == .none) .none else .{ .coerced_ty = decl_inst.toRef() } }; const default_inst = try expr(&block_scope, &namespace.base, ri, member.ast.value_expr); @@ -11559,7 +11559,7 @@ fn identAsString(astgen: *AstGen, ident_token: Ast.TokenIndex) !Zir.NullTerminat } /// Adds a doc comment block to `string_bytes` by walking backwards from `end_token`. -/// `end_token` must point at the first token after the last doc coment line. +/// `end_token` must point at the first token after the last doc comment line. /// Returns 0 if no doc comment is present. fn docCommentAsString(astgen: *AstGen, end_token: Ast.TokenIndex) !Zir.NullTerminatedString { if (end_token == 0) return .empty; @@ -11780,7 +11780,7 @@ const Scope = struct { inst: Zir.Inst.Ref, /// Source location of the corresponding variable declaration. token_src: Ast.TokenIndex, - /// Track the first identifer where it is referenced. + /// Track the first identifier where it is referenced. /// 0 means never referenced. used: Ast.TokenIndex = 0, /// Track the identifier where it is discarded, like this `_ = foo;`. @@ -11803,13 +11803,13 @@ const Scope = struct { ptr: Zir.Inst.Ref, /// Source location of the corresponding variable declaration. token_src: Ast.TokenIndex, - /// Track the first identifer where it is referenced. + /// Track the first identifier where it is referenced. /// 0 means never referenced. used: Ast.TokenIndex = 0, /// Track the identifier where it is discarded, like this `_ = foo;`. /// 0 means never discarded. discarded: Ast.TokenIndex = 0, - /// Whether this value is used as an lvalue after inititialization. + /// Whether this value is used as an lvalue after initialization. /// If not, we know it can be `const`, so will emit a compile error if it is `var`. used_as_lvalue: bool = false, /// String table index. diff --git a/lib/std/zig/AstRlAnnotate.zig b/lib/std/zig/AstRlAnnotate.zig index 4a1203ca09fc..543c2799812a 100644 --- a/lib/std/zig/AstRlAnnotate.zig +++ b/lib/std/zig/AstRlAnnotate.zig @@ -7,8 +7,8 @@ //! occur. Thus, we want to provide a real result pointer (from an alloc) only //! when necessary. //! -//! To achive this, we need to determine which expressions require a result -//! pointer. This pass is reponsible for analyzing all syntax forms which may +//! To achieve this, we need to determine which expressions require a result +//! pointer. This pass is responsible for analyzing all syntax forms which may //! provide a result location and, if sub-expressions consume this result //! pointer non-trivially (e.g. writing through field pointers), marking the //! node as requiring a result location. diff --git a/lib/std/zig/WindowsSdk.zig b/lib/std/zig/WindowsSdk.zig index 99ad52bf1dc4..caa389b0102e 100644 --- a/lib/std/zig/WindowsSdk.zig +++ b/lib/std/zig/WindowsSdk.zig @@ -878,7 +878,7 @@ const MsvcLibDir = struct { error.OutOfMemory => return error.OutOfMemory, else => continue, }; - if (source_directories_value.len > (std.fs.max_path_bytes * 30)) { // note(bratishkaerik): guessing from the fact that on my computer it has 15 pathes and at least some of them are not of max length + if (source_directories_value.len > (std.fs.max_path_bytes * 30)) { // note(bratishkaerik): guessing from the fact that on my computer it has 15 paths and at least some of them are not of max length allocator.free(source_directories_value); continue; } @@ -887,10 +887,10 @@ const MsvcLibDir = struct { } else return error.PathNotFound; defer allocator.free(source_directories); - var source_directories_splitted = std.mem.splitScalar(u8, source_directories, ';'); + var source_directories_split = std.mem.splitScalar(u8, source_directories, ';'); const msvc_dir: []const u8 = msvc_dir: { - const msvc_include_dir_maybe_with_trailing_slash = try allocator.dupe(u8, source_directories_splitted.first()); + const msvc_include_dir_maybe_with_trailing_slash = try allocator.dupe(u8, source_directories_split.first()); if (msvc_include_dir_maybe_with_trailing_slash.len > std.fs.max_path_bytes or !std.fs.path.isAbsolute(msvc_include_dir_maybe_with_trailing_slash)) { allocator.free(msvc_include_dir_maybe_with_trailing_slash); diff --git a/lib/std/zig/parser_test.zig b/lib/std/zig/parser_test.zig index 38d279461ec5..ec53ab50464f 100644 --- a/lib/std/zig/parser_test.zig +++ b/lib/std/zig/parser_test.zig @@ -4540,7 +4540,7 @@ test "zig fmt: decimal float literals with underscore separators" { ); } -test "zig fmt: hexadeciaml float literals with underscore separators" { +test "zig fmt: hexadecimal float literals with underscore separators" { try testTransform( \\pub fn main() void { \\ const a: f64 = (0x10.0p-0+(0x10.0p+0))+0x10_00.00_00p-8+0x00_00.00_10p+16; diff --git a/lib/std/zig/system.zig b/lib/std/zig/system.zig index 83c798c34204..2c2f81825aea 100644 --- a/lib/std/zig/system.zig +++ b/lib/std/zig/system.zig @@ -1044,7 +1044,7 @@ fn detectAbiAndDynamicLinker( defer if (is_elf_file == false) file.close(); // Shortest working interpreter path is "#!/i" (4) - // (interpreter is "/i", assuming all pathes are absolute, like in above comment). + // (interpreter is "/i", assuming all paths are absolute, like in above comment). // ELF magic number length is also 4. // // If file is shorter than that, it is definitely not ELF file From c5283eb49b50c0d8b0d590b90f43523bed96e80a Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Mon, 8 Jul 2024 21:48:57 -0400 Subject: [PATCH 085/152] InternPool: implement thread-safe allocated lists --- src/InternPool.zig | 308 ++++++++++++++++++++++++++++++++---------- src/Sema.zig | 52 +++---- src/Zcu.zig | 105 +------------- src/Zcu/PerThread.zig | 104 +++++++++++++- 4 files changed, 363 insertions(+), 206 deletions(-) diff --git a/src/InternPool.zig b/src/InternPool.zig index 1c501cb28ee2..6f7bb17b141e 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -14,25 +14,6 @@ tid_shift_31: if (single_threaded) u0 else std.math.Log2Int(u32) = if (single_th /// Cached shift amount to put a `tid` in the top bits of a 32-bit value. tid_shift_32: if (single_threaded) u0 else std.math.Log2Int(u32) = if (single_threaded) 0 else 31, -/// Rather than allocating Decl objects with an Allocator, we instead allocate -/// them with this SegmentedList. This provides four advantages: -/// * Stable memory so that one thread can access a Decl object while another -/// thread allocates additional Decl objects from this list. -/// * It allows us to use u32 indexes to reference Decl objects rather than -/// pointers, saving memory in Type, Value, and dependency sets. -/// * Using integers to reference Decl objects rather than pointers makes -/// serialization trivial. -/// * It provides a unique integer to be used for anonymous symbol names, avoiding -/// multi-threaded contention on an atomic counter. -allocated_decls: std.SegmentedList(Module.Decl, 0) = .{}, -/// When a Decl object is freed from `allocated_decls`, it is pushed into this stack. -decls_free_list: std.ArrayListUnmanaged(DeclIndex) = .{}, - -/// Same pattern as with `allocated_decls`. -allocated_namespaces: std.SegmentedList(Module.Namespace, 0) = .{}, -/// Same pattern as with `decls_free_list`. -namespaces_free_list: std.ArrayListUnmanaged(NamespaceIndex) = .{}, - /// Some types such as enums, structs, and unions need to store mappings from field names /// to field index, or value to field index. In such cases, they will store the underlying /// field names and values directly, relying on one of these maps, stored separately, @@ -354,10 +335,14 @@ const Local = struct { /// atomic access. mutate: struct { arena: std.heap.ArenaAllocator.State, - items: Mutate, - extra: Mutate, - limbs: Mutate, - strings: Mutate, + + items: ListMutate, + extra: ListMutate, + limbs: ListMutate, + strings: ListMutate, + + decls: BucketListMutate, + namespaces: BucketListMutate, } align(std.atomic.cache_line), const Shared = struct { @@ -366,6 +351,9 @@ const Local = struct { limbs: Limbs, strings: Strings, + decls: Decls, + namespaces: Namespaces, + pub fn getLimbs(shared: *const Local.Shared) Limbs { return switch (@sizeOf(Limb)) { @sizeOf(u32) => shared.extra, @@ -383,14 +371,38 @@ const Local = struct { }; const Strings = List(struct { u8 }); - const Mutate = struct { + const decls_bucket_width = 8; + const decls_bucket_mask = (1 << decls_bucket_width) - 1; + const decl_next_free_field = "src_namespace"; + const Decls = List(struct { *[1 << decls_bucket_width]Module.Decl }); + + const namespaces_bucket_width = 8; + const namespaces_bucket_mask = (1 << namespaces_bucket_width) - 1; + const namespace_next_free_field = "decl_index"; + const Namespaces = List(struct { *[1 << namespaces_bucket_width]Module.Namespace }); + + const ListMutate = struct { len: u32, - const empty: Mutate = .{ + const empty: ListMutate = .{ .len = 0, }; }; + const BucketListMutate = struct { + last_bucket_len: u32, + buckets_list: ListMutate, + free_list: u32, + + const free_list_sentinel = std.math.maxInt(u32); + + const empty: BucketListMutate = .{ + .last_bucket_len = 0, + .buckets_list = ListMutate.empty, + .free_list = free_list_sentinel, + }; + }; + fn List(comptime Elem: type) type { assert(@typeInfo(Elem) == .Struct); return struct { @@ -400,7 +412,7 @@ const Local = struct { const Mutable = struct { gpa: std.mem.Allocator, arena: *std.heap.ArenaAllocator.State, - mutate: *Mutate, + mutate: *ListMutate, list: *ListSelf, const fields = std.enums.values(std.meta.FieldEnum(Elem)); @@ -664,6 +676,35 @@ const Local = struct { .list = &local.shared.strings, }; } + + /// Rather than allocating Decl objects with an Allocator, we instead allocate + /// them with this BucketList. This provides four advantages: + /// * Stable memory so that one thread can access a Decl object while another + /// thread allocates additional Decl objects from this list. + /// * It allows us to use u32 indexes to reference Decl objects rather than + /// pointers, saving memory in Type, Value, and dependency sets. + /// * Using integers to reference Decl objects rather than pointers makes + /// serialization trivial. + /// * It provides a unique integer to be used for anonymous symbol names, avoiding + /// multi-threaded contention on an atomic counter. + pub fn getMutableDecls(local: *Local, gpa: std.mem.Allocator) Decls.Mutable { + return .{ + .gpa = gpa, + .arena = &local.mutate.arena, + .mutate = &local.mutate.decls.buckets_list, + .list = &local.shared.decls, + }; + } + + /// Same pattern as with `getMutableDecls`. + pub fn getMutableNamespaces(local: *Local, gpa: std.mem.Allocator) Namespaces.Mutable { + return .{ + .gpa = gpa, + .arena = &local.mutate.arena, + .mutate = &local.mutate.namespaces.buckets_list, + .list = &local.shared.namespaces, + }; + } }; pub fn getLocal(ip: *InternPool, tid: Zcu.PerThread.Id) *Local { @@ -810,6 +851,29 @@ pub const ComptimeAllocIndex = enum(u32) { _ }; pub const DeclIndex = enum(u32) { _, + const Unwrapped = struct { + tid: Zcu.PerThread.Id, + bucket_index: u32, + index: u32, + + fn wrap(unwrapped: Unwrapped, ip: *const InternPool) DeclIndex { + assert(@intFromEnum(unwrapped.tid) <= ip.getTidMask()); + assert(unwrapped.bucket_index <= ip.getIndexMask(u32) >> Local.decls_bucket_width); + assert(unwrapped.index <= Local.decls_bucket_mask); + return @enumFromInt(@as(u32, @intFromEnum(unwrapped.tid)) << ip.tid_shift_32 | + unwrapped.bucket_index << Local.decls_bucket_width | + unwrapped.index); + } + }; + fn unwrap(decl_index: DeclIndex, ip: *const InternPool) Unwrapped { + const index = @intFromEnum(decl_index) & ip.getIndexMask(u32); + return .{ + .tid = @enumFromInt(@intFromEnum(decl_index) >> ip.tid_shift_32 & ip.getTidMask()), + .bucket_index = index >> Local.decls_bucket_width, + .index = index & Local.decls_bucket_mask, + }; + } + pub fn toOptional(i: DeclIndex) OptionalDeclIndex { return @enumFromInt(@intFromEnum(i)); } @@ -832,6 +896,29 @@ pub const OptionalDeclIndex = enum(u32) { pub const NamespaceIndex = enum(u32) { _, + const Unwrapped = struct { + tid: Zcu.PerThread.Id, + bucket_index: u32, + index: u32, + + fn wrap(unwrapped: Unwrapped, ip: *const InternPool) NamespaceIndex { + assert(@intFromEnum(unwrapped.tid) <= ip.getTidMask()); + assert(unwrapped.bucket_index <= ip.getIndexMask(u32) >> Local.namespaces_bucket_width); + assert(unwrapped.index <= Local.namespaces_bucket_mask); + return @enumFromInt(@as(u32, @intFromEnum(unwrapped.tid)) << ip.tid_shift_32 | + unwrapped.bucket_index << Local.namespaces_bucket_width | + unwrapped.index); + } + }; + fn unwrap(namespace_index: NamespaceIndex, ip: *const InternPool) Unwrapped { + const index = @intFromEnum(namespace_index) & ip.getIndexMask(u32); + return .{ + .tid = @enumFromInt(@intFromEnum(namespace_index) >> ip.tid_shift_32 & ip.getTidMask()), + .bucket_index = index >> Local.namespaces_bucket_width, + .index = index & Local.namespaces_bucket_mask, + }; + } + pub fn toOptional(i: NamespaceIndex) OptionalNamespaceIndex { return @enumFromInt(@intFromEnum(i)); } @@ -5114,13 +5201,20 @@ pub fn init(ip: *InternPool, gpa: Allocator, available_threads: usize) !void { .extra = Local.Extra.empty, .limbs = Local.Limbs.empty, .strings = Local.Strings.empty, + + .decls = Local.Decls.empty, + .namespaces = Local.Namespaces.empty, }, .mutate = .{ .arena = .{}, - .items = Local.Mutate.empty, - .extra = Local.Mutate.empty, - .limbs = Local.Mutate.empty, - .strings = Local.Mutate.empty, + + .items = Local.ListMutate.empty, + .extra = Local.ListMutate.empty, + .limbs = Local.ListMutate.empty, + .strings = Local.ListMutate.empty, + + .decls = Local.BucketListMutate.empty, + .namespaces = Local.BucketListMutate.empty, }, }); @@ -5173,12 +5267,6 @@ pub fn init(ip: *InternPool, gpa: Allocator, available_threads: usize) !void { } pub fn deinit(ip: *InternPool, gpa: Allocator) void { - ip.decls_free_list.deinit(gpa); - ip.allocated_decls.deinit(gpa); - - ip.namespaces_free_list.deinit(gpa); - ip.allocated_namespaces.deinit(gpa); - for (ip.maps.items) |*map| map.deinit(gpa); ip.maps.deinit(gpa); @@ -5198,7 +5286,23 @@ pub fn deinit(ip: *InternPool, gpa: Allocator) void { ip.files.deinit(gpa); gpa.free(ip.shards); - for (ip.locals) |*local| local.mutate.arena.promote(gpa).deinit(); + for (ip.locals) |*local| { + const buckets_len = local.mutate.namespaces.buckets_list.len; + if (buckets_len > 0) for ( + local.shared.namespaces.view().items(.@"0")[0..buckets_len], + 0.., + ) |namespace_bucket, buckets_index| { + for (namespace_bucket[0..if (buckets_index < buckets_len - 1) + namespace_bucket.len + else + local.mutate.namespaces.last_bucket_len]) |*namespace| + { + namespace.decls.deinit(gpa); + namespace.usingnamespace_set.deinit(gpa); + } + }; + local.mutate.arena.promote(gpa).deinit(); + } gpa.free(ip.locals); ip.* = undefined; @@ -7849,7 +7953,7 @@ fn finishFuncInstance( section: OptionalNullTerminatedString, ) Allocator.Error!void { const fn_owner_decl = ip.declPtr(ip.funcDeclOwner(generic_owner)); - const decl_index = try ip.createDecl(gpa, .{ + const decl_index = try ip.createDecl(gpa, tid, .{ .name = undefined, .src_namespace = fn_owner_decl.src_namespace, .has_tv = true, @@ -7864,7 +7968,7 @@ fn finishFuncInstance( .is_exported = fn_owner_decl.is_exported, .kind = .anon, }); - errdefer ip.destroyDecl(gpa, decl_index); + errdefer ip.destroyDecl(tid, decl_index); // Populate the owner_decl field which was left undefined until now. extra.view().items(.@"0")[ @@ -9078,15 +9182,17 @@ fn dumpStatsFallible(ip: *const InternPool, arena: Allocator) anyerror!void { var items_len: usize = 0; var extra_len: usize = 0; var limbs_len: usize = 0; + var decls_len: usize = 0; for (ip.locals) |*local| { items_len += local.mutate.items.len; extra_len += local.mutate.extra.len; limbs_len += local.mutate.limbs.len; + decls_len += local.mutate.decls.buckets_list.len; } const items_size = (1 + 4) * items_len; const extra_size = 4 * extra_len; const limbs_size = 8 * limbs_len; - const decls_size = ip.allocated_decls.len * @sizeOf(Module.Decl); + const decls_size = @sizeOf(Module.Decl) * decls_len; // TODO: map overhead size is not taken into account const total_size = @sizeOf(InternPool) + items_size + extra_size + limbs_size + decls_size; @@ -9106,7 +9212,7 @@ fn dumpStatsFallible(ip: *const InternPool, arena: Allocator) anyerror!void { extra_size, limbs_len, limbs_size, - ip.allocated_decls.len, + decls_len, decls_size, }); @@ -9513,64 +9619,120 @@ pub fn dumpGenericInstancesFallible(ip: *const InternPool, allocator: Allocator) try bw.flush(); } -pub fn declPtr(ip: *InternPool, index: DeclIndex) *Module.Decl { - return ip.allocated_decls.at(@intFromEnum(index)); +pub fn declPtr(ip: *InternPool, decl_index: DeclIndex) *Module.Decl { + return @constCast(ip.declPtrConst(decl_index)); } -pub fn declPtrConst(ip: *const InternPool, index: DeclIndex) *const Module.Decl { - return ip.allocated_decls.at(@intFromEnum(index)); +pub fn declPtrConst(ip: *const InternPool, decl_index: DeclIndex) *const Module.Decl { + const unwrapped_decl_index = decl_index.unwrap(ip); + const decls = ip.getLocalShared(unwrapped_decl_index.tid).decls.acquire(); + const decls_bucket = decls.view().items(.@"0")[unwrapped_decl_index.bucket_index]; + return &decls_bucket[unwrapped_decl_index.index]; } -pub fn namespacePtr(ip: *InternPool, index: NamespaceIndex) *Module.Namespace { - return ip.allocated_namespaces.at(@intFromEnum(index)); +pub fn namespacePtr(ip: *InternPool, namespace_index: NamespaceIndex) *Module.Namespace { + const unwrapped_namespace_index = namespace_index.unwrap(ip); + const namespaces = ip.getLocalShared(unwrapped_namespace_index.tid).namespaces.acquire(); + const namespaces_bucket = namespaces.view().items(.@"0")[unwrapped_namespace_index.bucket_index]; + return &namespaces_bucket[unwrapped_namespace_index.index]; } pub fn createDecl( ip: *InternPool, gpa: Allocator, + tid: Zcu.PerThread.Id, initialization: Module.Decl, ) Allocator.Error!DeclIndex { - if (ip.decls_free_list.popOrNull()) |index| { - ip.allocated_decls.at(@intFromEnum(index)).* = initialization; - return index; - } - const ptr = try ip.allocated_decls.addOne(gpa); - ptr.* = initialization; - return @enumFromInt(ip.allocated_decls.len - 1); + const local = ip.getLocal(tid); + const free_list_next = local.mutate.decls.free_list; + if (free_list_next != Local.BucketListMutate.free_list_sentinel) { + const reused_decl_index: DeclIndex = @enumFromInt(free_list_next); + const reused_decl = ip.declPtr(reused_decl_index); + local.mutate.decls.free_list = @intFromEnum(@field(reused_decl, Local.decl_next_free_field)); + reused_decl.* = initialization; + return reused_decl_index; + } + const decls = local.getMutableDecls(gpa); + if (local.mutate.decls.last_bucket_len == 0) { + try decls.ensureUnusedCapacity(1); + var arena = decls.arena.promote(decls.gpa); + defer decls.arena.* = arena.state; + decls.appendAssumeCapacity(.{try arena.allocator().create( + [1 << Local.decls_bucket_width]Module.Decl, + )}); + } + const unwrapped_decl_index: DeclIndex.Unwrapped = .{ + .tid = tid, + .bucket_index = decls.mutate.len - 1, + .index = local.mutate.decls.last_bucket_len, + }; + local.mutate.decls.last_bucket_len = + (unwrapped_decl_index.index + 1) & Local.namespaces_bucket_mask; + const decl_index = unwrapped_decl_index.wrap(ip); + ip.declPtr(decl_index).* = initialization; + return decl_index; } -pub fn destroyDecl(ip: *InternPool, gpa: Allocator, index: DeclIndex) void { - ip.declPtr(index).* = undefined; - ip.decls_free_list.append(gpa, index) catch { - // In order to keep `destroyDecl` a non-fallible function, we ignore memory - // allocation failures here, instead leaking the Decl until garbage collection. - }; +pub fn destroyDecl(ip: *InternPool, tid: Zcu.PerThread.Id, decl_index: DeclIndex) void { + const local = ip.getLocal(tid); + const decl = ip.declPtr(decl_index); + decl.* = undefined; + @field(decl, Local.decl_next_free_field) = @enumFromInt(local.mutate.decls.free_list); + local.mutate.decls.free_list = @intFromEnum(decl_index); } pub fn createNamespace( ip: *InternPool, gpa: Allocator, + tid: Zcu.PerThread.Id, initialization: Module.Namespace, ) Allocator.Error!NamespaceIndex { - if (ip.namespaces_free_list.popOrNull()) |index| { - ip.allocated_namespaces.at(@intFromEnum(index)).* = initialization; - return index; - } - const ptr = try ip.allocated_namespaces.addOne(gpa); - ptr.* = initialization; - return @enumFromInt(ip.allocated_namespaces.len - 1); + const local = ip.getLocal(tid); + const free_list_next = local.mutate.namespaces.free_list; + if (free_list_next != Local.BucketListMutate.free_list_sentinel) { + const reused_namespace_index: NamespaceIndex = @enumFromInt(free_list_next); + const reused_namespace = ip.namespacePtr(reused_namespace_index); + local.mutate.namespaces.free_list = + @intFromEnum(@field(reused_namespace, Local.namespace_next_free_field)); + reused_namespace.* = initialization; + return reused_namespace_index; + } + const namespaces = local.getMutableNamespaces(gpa); + if (local.mutate.namespaces.last_bucket_len == 0) { + try namespaces.ensureUnusedCapacity(1); + var arena = namespaces.arena.promote(namespaces.gpa); + defer namespaces.arena.* = arena.state; + namespaces.appendAssumeCapacity(.{try arena.allocator().create( + [1 << Local.namespaces_bucket_width]Module.Namespace, + )}); + } + const unwrapped_namespace_index: NamespaceIndex.Unwrapped = .{ + .tid = tid, + .bucket_index = namespaces.mutate.len - 1, + .index = local.mutate.namespaces.last_bucket_len, + }; + local.mutate.namespaces.last_bucket_len = + (unwrapped_namespace_index.index + 1) & Local.namespaces_bucket_mask; + const namespace_index = unwrapped_namespace_index.wrap(ip); + ip.namespacePtr(namespace_index).* = initialization; + return namespace_index; } -pub fn destroyNamespace(ip: *InternPool, gpa: Allocator, index: NamespaceIndex) void { - ip.namespacePtr(index).* = .{ +pub fn destroyNamespace( + ip: *InternPool, + tid: Zcu.PerThread.Id, + namespace_index: NamespaceIndex, +) void { + const local = ip.getLocal(tid); + const namespace = ip.namespacePtr(namespace_index); + namespace.* = .{ .parent = undefined, .file_scope = undefined, .decl_index = undefined, }; - ip.namespaces_free_list.append(gpa, index) catch { - // In order to keep `destroyNamespace` a non-fallible function, we ignore memory - // allocation failures here, instead leaking the Namespace until garbage collection. - }; + @field(namespace, Local.namespace_next_free_field) = + @enumFromInt(local.mutate.namespaces.free_list); + local.mutate.namespaces.free_list = @intFromEnum(namespace_index); } const EmbeddedNulls = enum { diff --git a/src/Sema.zig b/src/Sema.zig index f1c61fdd2ace..b897228f58ce 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -2830,7 +2830,7 @@ fn zirStructDecl( inst, ); mod.declPtr(new_decl_index).owns_tv = true; - errdefer mod.abortAnonDecl(new_decl_index); + errdefer pt.abortAnonDecl(new_decl_index); if (pt.zcu.comp.debug_incremental) { try ip.addDependency( @@ -2841,12 +2841,12 @@ fn zirStructDecl( } // TODO: if AstGen tells us `@This` was not used in the fields, we can elide the namespace. - const new_namespace_index: InternPool.OptionalNamespaceIndex = if (true or decls_len > 0) (try mod.createNamespace(.{ + const new_namespace_index: InternPool.OptionalNamespaceIndex = if (true or decls_len > 0) (try pt.createNamespace(.{ .parent = block.namespace.toOptional(), .decl_index = new_decl_index, .file_scope = block.getFileScopeIndex(mod), })).toOptional() else .none; - errdefer if (new_namespace_index.unwrap()) |ns| mod.destroyNamespace(ns); + errdefer if (new_namespace_index.unwrap()) |ns| pt.destroyNamespace(ns); if (new_namespace_index.unwrap()) |ns| { const decls = sema.code.bodySlice(extra_index, decls_len); @@ -2872,8 +2872,8 @@ fn createAnonymousDeclTypeNamed( const ip = &zcu.intern_pool; const gpa = sema.gpa; const namespace = block.namespace; - const new_decl_index = try zcu.allocateNewDecl(namespace); - errdefer zcu.destroyDecl(new_decl_index); + const new_decl_index = try pt.allocateNewDecl(namespace); + errdefer pt.destroyDecl(new_decl_index); switch (name_strategy) { .anon => {}, // handled after switch @@ -3068,7 +3068,7 @@ fn zirEnumDecl( ); const new_decl = mod.declPtr(new_decl_index); new_decl.owns_tv = true; - errdefer if (!done) mod.abortAnonDecl(new_decl_index); + errdefer if (!done) pt.abortAnonDecl(new_decl_index); if (pt.zcu.comp.debug_incremental) { try mod.intern_pool.addDependency( @@ -3079,12 +3079,12 @@ fn zirEnumDecl( } // TODO: if AstGen tells us `@This` was not used in the fields, we can elide the namespace. - const new_namespace_index: InternPool.OptionalNamespaceIndex = if (true or decls_len > 0) (try mod.createNamespace(.{ + const new_namespace_index: InternPool.OptionalNamespaceIndex = if (true or decls_len > 0) (try pt.createNamespace(.{ .parent = block.namespace.toOptional(), .decl_index = new_decl_index, .file_scope = block.getFileScopeIndex(mod), })).toOptional() else .none; - errdefer if (!done) if (new_namespace_index.unwrap()) |ns| mod.destroyNamespace(ns); + errdefer if (!done) if (new_namespace_index.unwrap()) |ns| pt.destroyNamespace(ns); if (new_namespace_index.unwrap()) |ns| { try pt.scanNamespace(ns, decls, new_decl); @@ -3335,7 +3335,7 @@ fn zirUnionDecl( inst, ); mod.declPtr(new_decl_index).owns_tv = true; - errdefer mod.abortAnonDecl(new_decl_index); + errdefer pt.abortAnonDecl(new_decl_index); if (pt.zcu.comp.debug_incremental) { try mod.intern_pool.addDependency( @@ -3346,12 +3346,12 @@ fn zirUnionDecl( } // TODO: if AstGen tells us `@This` was not used in the fields, we can elide the namespace. - const new_namespace_index: InternPool.OptionalNamespaceIndex = if (true or decls_len > 0) (try mod.createNamespace(.{ + const new_namespace_index: InternPool.OptionalNamespaceIndex = if (true or decls_len > 0) (try pt.createNamespace(.{ .parent = block.namespace.toOptional(), .decl_index = new_decl_index, .file_scope = block.getFileScopeIndex(mod), })).toOptional() else .none; - errdefer if (new_namespace_index.unwrap()) |ns| mod.destroyNamespace(ns); + errdefer if (new_namespace_index.unwrap()) |ns| pt.destroyNamespace(ns); if (new_namespace_index.unwrap()) |ns| { const decls = sema.code.bodySlice(extra_index, decls_len); @@ -3425,7 +3425,7 @@ fn zirOpaqueDecl( inst, ); mod.declPtr(new_decl_index).owns_tv = true; - errdefer mod.abortAnonDecl(new_decl_index); + errdefer pt.abortAnonDecl(new_decl_index); if (pt.zcu.comp.debug_incremental) { try ip.addDependency( @@ -3435,12 +3435,12 @@ fn zirOpaqueDecl( ); } - const new_namespace_index: InternPool.OptionalNamespaceIndex = if (decls_len > 0) (try mod.createNamespace(.{ + const new_namespace_index: InternPool.OptionalNamespaceIndex = if (decls_len > 0) (try pt.createNamespace(.{ .parent = block.namespace.toOptional(), .decl_index = new_decl_index, .file_scope = block.getFileScopeIndex(mod), })).toOptional() else .none; - errdefer if (new_namespace_index.unwrap()) |ns| mod.destroyNamespace(ns); + errdefer if (new_namespace_index.unwrap()) |ns| pt.destroyNamespace(ns); if (new_namespace_index.unwrap()) |ns| { const decls = sema.code.bodySlice(extra_index, decls_len); @@ -21716,7 +21716,7 @@ fn zirReify( inst, ); mod.declPtr(new_decl_index).owns_tv = true; - errdefer mod.abortAnonDecl(new_decl_index); + errdefer pt.abortAnonDecl(new_decl_index); try pt.finalizeAnonDecl(new_decl_index); @@ -21916,7 +21916,7 @@ fn reifyEnum( inst, ); mod.declPtr(new_decl_index).owns_tv = true; - errdefer mod.abortAnonDecl(new_decl_index); + errdefer pt.abortAnonDecl(new_decl_index); wip_ty.prepare(ip, new_decl_index, .none); wip_ty.setTagTy(ip, tag_ty.toIntern()); @@ -22063,7 +22063,7 @@ fn reifyUnion( inst, ); mod.declPtr(new_decl_index).owns_tv = true; - errdefer mod.abortAnonDecl(new_decl_index); + errdefer pt.abortAnonDecl(new_decl_index); const field_types = try sema.arena.alloc(InternPool.Index, fields_len); const field_aligns = if (any_aligns) try sema.arena.alloc(InternPool.Alignment, fields_len) else undefined; @@ -22322,7 +22322,7 @@ fn reifyStruct( inst, ); mod.declPtr(new_decl_index).owns_tv = true; - errdefer mod.abortAnonDecl(new_decl_index); + errdefer pt.abortAnonDecl(new_decl_index); const struct_type = ip.loadStructType(wip_ty.index); @@ -26497,8 +26497,8 @@ fn zirBuiltinExtern( } const ptr_info = ty.ptrInfo(mod); - const new_decl_index = try mod.allocateNewDecl(sema.owner_decl.src_namespace); - errdefer mod.destroyDecl(new_decl_index); + const new_decl_index = try pt.allocateNewDecl(sema.owner_decl.src_namespace); + errdefer pt.destroyDecl(new_decl_index); const new_decl = mod.declPtr(new_decl_index); try mod.initNewAnonDecl( new_decl_index, @@ -36733,8 +36733,8 @@ fn generateUnionTagTypeNumbered( const gpa = sema.gpa; const ip = &mod.intern_pool; - const new_decl_index = try mod.allocateNewDecl(block.namespace); - errdefer mod.destroyDecl(new_decl_index); + const new_decl_index = try pt.allocateNewDecl(block.namespace); + errdefer pt.destroyDecl(new_decl_index); const fqn = try union_owner_decl.fullyQualifiedName(pt); const name = try ip.getOrPutStringFmt( gpa, @@ -36748,7 +36748,7 @@ fn generateUnionTagTypeNumbered( Value.@"unreachable", name, ); - errdefer mod.abortAnonDecl(new_decl_index); + errdefer pt.abortAnonDecl(new_decl_index); const new_decl = mod.declPtr(new_decl_index); new_decl.owns_tv = true; @@ -36785,8 +36785,8 @@ fn generateUnionTagTypeSimple( const new_decl_index = new_decl_index: { const fqn = try union_owner_decl.fullyQualifiedName(pt); - const new_decl_index = try mod.allocateNewDecl(block.namespace); - errdefer mod.destroyDecl(new_decl_index); + const new_decl_index = try pt.allocateNewDecl(block.namespace); + errdefer pt.destroyDecl(new_decl_index); const name = try ip.getOrPutStringFmt( gpa, pt.tid, @@ -36802,7 +36802,7 @@ fn generateUnionTagTypeSimple( mod.declPtr(new_decl_index).name_fully_qualified = true; break :new_decl_index new_decl_index; }; - errdefer mod.abortAnonDecl(new_decl_index); + errdefer pt.abortAnonDecl(new_decl_index); const enum_ty = try ip.getGeneratedTagEnumType(gpa, pt.tid, .{ .decl = new_decl_index, diff --git a/src/Zcu.zig b/src/Zcu.zig index 2f87bcca0f0e..92a0765ebb4a 100644 --- a/src/Zcu.zig +++ b/src/Zcu.zig @@ -2410,6 +2410,7 @@ pub fn init(mod: *Module, thread_count: usize) !void { } pub fn deinit(zcu: *Zcu) void { + const pt: Zcu.PerThread = .{ .tid = .main, .zcu = zcu }; const gpa = zcu.gpa; if (zcu.llvm_object) |llvm_object| { @@ -2422,7 +2423,7 @@ pub fn deinit(zcu: *Zcu) void { } for (0..zcu.import_table.entries.len) |file_index_usize| { const file_index: File.Index = @enumFromInt(file_index_usize); - zcu.destroyFile(file_index); + pt.destroyFile(file_index); } zcu.import_table.deinit(gpa); @@ -2497,68 +2498,9 @@ pub fn deinit(zcu: *Zcu) void { zcu.all_references.deinit(gpa); zcu.free_references.deinit(gpa); - { - var it = zcu.intern_pool.allocated_namespaces.iterator(0); - while (it.next()) |namespace| { - namespace.decls.deinit(gpa); - namespace.usingnamespace_set.deinit(gpa); - } - } - zcu.intern_pool.deinit(gpa); } -pub fn destroyDecl(mod: *Module, decl_index: Decl.Index) void { - const gpa = mod.gpa; - const ip = &mod.intern_pool; - - { - _ = mod.test_functions.swapRemove(decl_index); - if (mod.global_assembly.fetchSwapRemove(decl_index)) |kv| { - gpa.free(kv.value); - } - } - - ip.destroyDecl(gpa, decl_index); - - if (mod.emit_h) |mod_emit_h| { - const decl_emit_h = mod_emit_h.declPtr(decl_index); - decl_emit_h.fwd_decl.deinit(gpa); - decl_emit_h.* = undefined; - } -} - -fn deinitFile(zcu: *Zcu, file_index: File.Index) void { - const gpa = zcu.gpa; - const file = zcu.fileByIndex(file_index); - const is_builtin = file.mod.isBuiltin(); - log.debug("deinit File {s}", .{file.sub_file_path}); - if (is_builtin) { - file.unloadTree(gpa); - file.unloadZir(gpa); - } else { - gpa.free(file.sub_file_path); - file.unload(gpa); - } - file.references.deinit(gpa); - if (zcu.fileRootDecl(file_index).unwrap()) |root_decl| { - zcu.destroyDecl(root_decl); - } - if (file.prev_zir) |prev_zir| { - prev_zir.deinit(gpa); - gpa.destroy(prev_zir); - } - file.* = undefined; -} - -pub fn destroyFile(zcu: *Zcu, file_index: File.Index) void { - const gpa = zcu.gpa; - const file = zcu.fileByIndex(file_index); - const is_builtin = file.mod.isBuiltin(); - zcu.deinitFile(file_index); - if (!is_builtin) gpa.destroy(file); -} - pub fn declPtr(mod: *Module, index: Decl.Index) *Decl { return mod.intern_pool.declPtr(index); } @@ -3269,13 +3211,6 @@ fn computePathDigest(zcu: *Zcu, mod: *Package.Module, sub_file_path: []const u8) return bin; } -/// Cancel the creation of an anon decl and delete any references to it. -/// If other decls depend on this decl, they must be aborted first. -pub fn abortAnonDecl(mod: *Module, decl_index: Decl.Index) void { - assert(!mod.declIsRoot(decl_index)); - mod.destroyDecl(decl_index); -} - /// Delete all the Export objects that are caused by this `AnalUnit`. Re-analysis of /// this `AnalUnit` will cause them to be re-created (or not). pub fn deleteUnitExports(zcu: *Zcu, anal_unit: AnalUnit) void { @@ -3357,42 +3292,6 @@ pub fn addUnitReference(zcu: *Zcu, src_unit: AnalUnit, referenced_unit: AnalUnit gop.value_ptr.* = @intCast(ref_idx); } -pub fn createNamespace(mod: *Module, initialization: Namespace) !Namespace.Index { - return mod.intern_pool.createNamespace(mod.gpa, initialization); -} - -pub fn destroyNamespace(mod: *Module, index: Namespace.Index) void { - return mod.intern_pool.destroyNamespace(mod.gpa, index); -} - -pub fn allocateNewDecl(zcu: *Zcu, namespace: Namespace.Index) !Decl.Index { - const gpa = zcu.gpa; - const decl_index = try zcu.intern_pool.createDecl(gpa, .{ - .name = undefined, - .src_namespace = namespace, - .has_tv = false, - .owns_tv = false, - .val = undefined, - .alignment = undefined, - .@"linksection" = .none, - .@"addrspace" = .generic, - .analysis = .unreferenced, - .zir_decl_index = .none, - .is_pub = false, - .is_exported = false, - .kind = .anon, - }); - - if (zcu.emit_h) |zcu_emit_h| { - if (@intFromEnum(decl_index) >= zcu_emit_h.allocated_emit_h.len) { - try zcu_emit_h.allocated_emit_h.append(gpa, .{}); - assert(@intFromEnum(decl_index) == zcu_emit_h.allocated_emit_h.len); - } - } - - return decl_index; -} - pub fn getErrorValue( mod: *Module, name: InternPool.NullTerminatedString, diff --git a/src/Zcu/PerThread.zig b/src/Zcu/PerThread.zig index f8a3104dc0be..a46f136a44fe 100644 --- a/src/Zcu/PerThread.zig +++ b/src/Zcu/PerThread.zig @@ -5,6 +5,58 @@ tid: Id, pub const Id = if (InternPool.single_threaded) enum { main } else enum(u8) { main, _ }; +pub fn destroyDecl(pt: Zcu.PerThread, decl_index: Zcu.Decl.Index) void { + const zcu = pt.zcu; + const gpa = zcu.gpa; + + { + _ = zcu.test_functions.swapRemove(decl_index); + if (zcu.global_assembly.fetchSwapRemove(decl_index)) |kv| { + gpa.free(kv.value); + } + } + + pt.zcu.intern_pool.destroyDecl(pt.tid, decl_index); + + if (zcu.emit_h) |zcu_emit_h| { + const decl_emit_h = zcu_emit_h.declPtr(decl_index); + decl_emit_h.fwd_decl.deinit(gpa); + decl_emit_h.* = undefined; + } +} + +fn deinitFile(pt: Zcu.PerThread, file_index: Zcu.File.Index) void { + const zcu = pt.zcu; + const gpa = zcu.gpa; + const file = zcu.fileByIndex(file_index); + const is_builtin = file.mod.isBuiltin(); + log.debug("deinit File {s}", .{file.sub_file_path}); + if (is_builtin) { + file.unloadTree(gpa); + file.unloadZir(gpa); + } else { + gpa.free(file.sub_file_path); + file.unload(gpa); + } + file.references.deinit(gpa); + if (zcu.fileRootDecl(file_index).unwrap()) |root_decl| { + pt.zcu.intern_pool.destroyDecl(pt.tid, root_decl); + } + if (file.prev_zir) |prev_zir| { + prev_zir.deinit(gpa); + gpa.destroy(prev_zir); + } + file.* = undefined; +} + +pub fn destroyFile(pt: Zcu.PerThread, file_index: Zcu.File.Index) void { + const gpa = pt.zcu.gpa; + const file = pt.zcu.fileByIndex(file_index); + const is_builtin = file.mod.isBuiltin(); + pt.deinitFile(file_index); + if (!is_builtin) gpa.destroy(file); +} + pub fn astGenFile( pt: Zcu.PerThread, file: *Zcu.File, @@ -930,14 +982,14 @@ fn semaFile(pt: Zcu.PerThread, file_index: Zcu.File.Index) Zcu.SemaError!void { // Because these three things each reference each other, `undefined` // placeholders are used before being set after the struct type gains an // InternPool index. - const new_namespace_index = try zcu.createNamespace(.{ + const new_namespace_index = try pt.createNamespace(.{ .parent = .none, .decl_index = undefined, .file_scope = file_index, }); - errdefer zcu.destroyNamespace(new_namespace_index); + errdefer pt.destroyNamespace(new_namespace_index); - const new_decl_index = try zcu.allocateNewDecl(new_namespace_index); + const new_decl_index = try pt.allocateNewDecl(new_namespace_index); const new_decl = zcu.declPtr(new_decl_index); errdefer @panic("TODO error handling"); @@ -1380,6 +1432,13 @@ pub fn embedFile( return pt.newEmbedFile(cur_file.mod, sub_file_path, resolved_path, gop.value_ptr, src_loc); } +/// Cancel the creation of an anon decl and delete any references to it. +/// If other decls depend on this decl, they must be aborted first. +pub fn abortAnonDecl(pt: Zcu.PerThread, decl_index: Zcu.Decl.Index) void { + assert(!pt.zcu.declIsRoot(decl_index)); + pt.destroyDecl(decl_index); +} + /// Finalize the creation of an anon decl. pub fn finalizeAnonDecl(pt: Zcu.PerThread, decl_index: Zcu.Decl.Index) Allocator.Error!void { if (pt.zcu.declPtr(decl_index).typeOf(pt.zcu).isFnOrHasRuntimeBits(pt)) { @@ -1674,7 +1733,7 @@ const ScanDeclIter = struct { break :decl_index .{ was_exported, decl_index }; } else decl_index: { // Create and set up a new Decl. - const new_decl_index = try zcu.allocateNewDecl(namespace_index); + const new_decl_index = try pt.allocateNewDecl(namespace_index); const new_decl = zcu.declPtr(new_decl_index); new_decl.kind = kind; new_decl.name = decl_name; @@ -1981,6 +2040,43 @@ pub fn analyzeFnBody(pt: Zcu.PerThread, func_index: InternPool.Index, arena: All }; } +pub fn createNamespace(pt: Zcu.PerThread, initialization: Zcu.Namespace) !Zcu.Namespace.Index { + return pt.zcu.intern_pool.createNamespace(pt.zcu.gpa, pt.tid, initialization); +} + +pub fn destroyNamespace(pt: Zcu.PerThread, namespace_index: Zcu.Namespace.Index) void { + return pt.zcu.intern_pool.destroyNamespace(pt.tid, namespace_index); +} + +pub fn allocateNewDecl(pt: Zcu.PerThread, namespace: Zcu.Namespace.Index) !Zcu.Decl.Index { + const zcu = pt.zcu; + const gpa = zcu.gpa; + const decl_index = try zcu.intern_pool.createDecl(gpa, pt.tid, .{ + .name = undefined, + .src_namespace = namespace, + .has_tv = false, + .owns_tv = false, + .val = undefined, + .alignment = undefined, + .@"linksection" = .none, + .@"addrspace" = .generic, + .analysis = .unreferenced, + .zir_decl_index = .none, + .is_pub = false, + .is_exported = false, + .kind = .anon, + }); + + if (zcu.emit_h) |zcu_emit_h| { + if (@intFromEnum(decl_index) >= zcu_emit_h.allocated_emit_h.len) { + try zcu_emit_h.allocated_emit_h.append(gpa, .{}); + assert(@intFromEnum(decl_index) == zcu_emit_h.allocated_emit_h.len); + } + } + + return decl_index; +} + fn lockAndClearFileCompileError(pt: Zcu.PerThread, file: *Zcu.File) void { switch (file.status) { .success_zir, .retryable_failure => {}, From 6446596ba106f7ec528eab3ac64e8f3dba5dfd4f Mon Sep 17 00:00:00 2001 From: mochalins <117967760+mochalins@users.noreply.github.com> Date: Tue, 9 Jul 2024 10:48:16 +0900 Subject: [PATCH 086/152] fix: Update `spawn`'s' `runFn` signature --- lib/std/Thread/Pool.zig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/std/Thread/Pool.zig b/lib/std/Thread/Pool.zig index 179f2f8521c4..1a67b9735f47 100644 --- a/lib/std/Thread/Pool.zig +++ b/lib/std/Thread/Pool.zig @@ -223,7 +223,7 @@ pub fn spawn(pool: *Pool, comptime func: anytype, args: anytype) !void { pool: *Pool, run_node: RunQueue.Node = .{ .data = .{ .runFn = runFn } }, - fn runFn(runnable: *Runnable) void { + fn runFn(runnable: *Runnable, _: ?usize) void { const run_node: *RunQueue.Node = @fieldParentPtr("data", runnable); const closure: *@This() = @alignCast(@fieldParentPtr("run_node", run_node)); @call(.auto, func, closure.arguments); From c8e00953623bd3f4a12c8654a83a1b6cac2b2b2f Mon Sep 17 00:00:00 2001 From: mochalins <117967760+mochalins@users.noreply.github.com> Date: Tue, 9 Jul 2024 11:25:19 +0900 Subject: [PATCH 087/152] test: Add `spawn` behavior test --- lib/std/Thread.zig | 1 + lib/std/Thread/Pool.zig | 21 +++++++++++++++++++++ 2 files changed, 22 insertions(+) diff --git a/lib/std/Thread.zig b/lib/std/Thread.zig index 25261053ee6f..3c2c9251d04b 100644 --- a/lib/std/Thread.zig +++ b/lib/std/Thread.zig @@ -1465,6 +1465,7 @@ test { _ = Semaphore; _ = Condition; _ = RwLock; + _ = Pool; } fn testIncrementNotify(value: *usize, event: *ResetEvent) void { diff --git a/lib/std/Thread/Pool.zig b/lib/std/Thread/Pool.zig index 1a67b9735f47..86bac7ce46b3 100644 --- a/lib/std/Thread/Pool.zig +++ b/lib/std/Thread/Pool.zig @@ -254,6 +254,27 @@ pub fn spawn(pool: *Pool, comptime func: anytype, args: anytype) !void { pool.cond.signal(); } +test spawn { + const TestFn = struct { + fn checkRun(completed: *bool) void { + completed.* = true; + } + }; + + var completed: bool = false; + + { + var pool: Pool = undefined; + try pool.init(.{ + .allocator = std.testing.allocator, + }); + defer pool.deinit(); + try pool.spawn(TestFn.checkRun, .{&completed}); + } + + try std.testing.expectEqual(true, completed); +} + fn worker(pool: *Pool) void { pool.mutex.lock(); defer pool.mutex.unlock(); From 9363e995fcf49a9496523c857fa925539fc7c2d6 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Tue, 9 Jul 2024 14:18:23 -0700 Subject: [PATCH 088/152] std.Progress: slightly better atomic memcpy Let's at least do aligned usize loads/stores where possible. --- lib/std/Progress.zig | 35 +++++++++++++++++++++++++++++++---- 1 file changed, 31 insertions(+), 4 deletions(-) diff --git a/lib/std/Progress.zig b/lib/std/Progress.zig index dcbb12e69bc2..bb5adcefd1e0 100644 --- a/lib/std/Progress.zig +++ b/lib/std/Progress.zig @@ -90,7 +90,7 @@ pub const Node = struct { /// 0 means unknown. /// Little endian. estimated_total_count: u32, - name: [max_name_len]u8, + name: [max_name_len]u8 align(@alignOf(usize)), /// Not thread-safe. fn getIpcFd(s: Storage) ?posix.fd_t { @@ -288,8 +288,9 @@ pub const Node = struct { @atomicStore(u32, &storage.completed_count, 0, .monotonic); @atomicStore(u32, &storage.estimated_total_count, std.math.lossyCast(u32, estimated_total_items), .monotonic); const name_len = @min(max_name_len, name.len); - for (storage.name[0..name_len], name[0..name_len]) |*dest, src| @atomicStore(u8, dest, src, .monotonic); - for (storage.name[name_len..]) |*dest| @atomicStore(u8, dest, 0, .monotonic); + copyAtomicStore(storage.name[0..name_len], name[0..name_len]); + if (name_len < storage.name.len) + @atomicStore(u8, &storage.name[name_len], 0, .monotonic); const parent_ptr = parentByIndex(free_index); assert(parent_ptr.* == .unused); @@ -763,7 +764,7 @@ fn serialize(serialized_buffer: *Serialized.Buffer) Serialized { var begin_parent = @atomicLoad(Node.Parent, parent_ptr, .acquire); while (begin_parent != .unused) { const dest_storage = &serialized_buffer.storage[serialized_len]; - for (&dest_storage.name, &storage_ptr.name) |*dest, *src| dest.* = @atomicLoad(u8, src, .monotonic); + copyAtomicLoad(&dest_storage.name, &storage_ptr.name); dest_storage.estimated_total_count = @atomicLoad(u32, &storage_ptr.estimated_total_count, .acquire); dest_storage.completed_count = @atomicLoad(u32, &storage_ptr.completed_count, .monotonic); const end_parent = @atomicLoad(Node.Parent, parent_ptr, .acquire); @@ -1384,3 +1385,29 @@ const have_sigwinch = switch (builtin.os.tag) { /// stderr mutex is held still dumps the stack trace and other debug /// information. var stderr_mutex = std.Thread.Mutex.Recursive.init; + +fn copyAtomicStore(dest: []align(@alignOf(usize)) u8, src: []const u8) void { + assert(dest.len == src.len); + const chunked_len = dest.len / @sizeOf(usize); + const dest_chunked: []usize = @as([*]usize, @ptrCast(dest))[0..chunked_len]; + const src_chunked: []align(1) const usize = @as([*]align(1) const usize, @ptrCast(src))[0..chunked_len]; + for (dest_chunked, src_chunked) |*d, s| { + @atomicStore(usize, d, s, .monotonic); + } + const remainder_start = chunked_len * @sizeOf(usize); + for (dest[remainder_start..], src[remainder_start..]) |*d, s| { + @atomicStore(u8, d, s, .monotonic); + } +} + +fn copyAtomicLoad( + dest: *align(@alignOf(usize)) [Node.max_name_len]u8, + src: *align(@alignOf(usize)) const [Node.max_name_len]u8, +) void { + const chunked_len = @divExact(dest.len, @sizeOf(usize)); + const dest_chunked: *[chunked_len]usize = @ptrCast(dest); + const src_chunked: *const [chunked_len]usize = @ptrCast(src); + for (dest_chunked, src_chunked) |*d, *s| { + d.* = @atomicLoad(usize, s, .monotonic); + } +} From f58ee387c7c9a512d802f819f870190c900cb6e2 Mon Sep 17 00:00:00 2001 From: mochalins <117967760+mochalins@users.noreply.github.com> Date: Sun, 23 Jun 2024 14:23:53 +0900 Subject: [PATCH 089/152] fix: Use `std.os.windows.poll` rather than `libc` --- lib/std/posix.zig | 42 ++++++++++++++++++++---------------------- 1 file changed, 20 insertions(+), 22 deletions(-) diff --git a/lib/std/posix.zig b/lib/std/posix.zig index 35052ab5791a..d244dad51d10 100644 --- a/lib/std/posix.zig +++ b/lib/std/posix.zig @@ -6479,33 +6479,31 @@ pub const PollError = error{ } || UnexpectedError; pub fn poll(fds: []pollfd, timeout: i32) PollError!usize { + if (native_os == .windows) { + switch (windows.poll(fds.ptr, @intCast(fds.len), timeout)) { + windows.ws2_32.SOCKET_ERROR => switch (windows.ws2_32.WSAGetLastError()) { + .WSANOTINITIALISED => unreachable, + .WSAENETDOWN => return error.NetworkSubsystemFailed, + .WSAENOBUFS => return error.SystemResources, + // TODO: handle more errors + else => |err| return windows.unexpectedWSAError(err), + }, + else => |rc| return @intCast(rc), + } + } while (true) { const fds_count = cast(nfds_t, fds.len) orelse return error.SystemResources; const rc = system.poll(fds.ptr, fds_count, timeout); - if (native_os == .windows) { - if (rc == windows.ws2_32.SOCKET_ERROR) { - switch (windows.ws2_32.WSAGetLastError()) { - .WSANOTINITIALISED => unreachable, - .WSAENETDOWN => return error.NetworkSubsystemFailed, - .WSAENOBUFS => return error.SystemResources, - // TODO: handle more errors - else => |err| return windows.unexpectedWSAError(err), - } - } else { - return @intCast(rc); - } - } else { - switch (errno(rc)) { - .SUCCESS => return @intCast(rc), - .FAULT => unreachable, - .INTR => continue, - .INVAL => unreachable, - .NOMEM => return error.SystemResources, - else => |err| return unexpectedErrno(err), - } + switch (errno(rc)) { + .SUCCESS => return @intCast(rc), + .FAULT => unreachable, + .INTR => continue, + .INVAL => unreachable, + .NOMEM => return error.SystemResources, + else => |err| return unexpectedErrno(err), } - unreachable; } + unreachable; } pub const PPollError = error{ From 95d9292a7a09ed883e65510ec054619747315c48 Mon Sep 17 00:00:00 2001 From: kcbanner Date: Wed, 10 Jul 2024 02:25:19 -0400 Subject: [PATCH 090/152] dwarf: use StackIterator.MemoryAccessor to check memory accesses instead of isValidMemory --- lib/std/debug.zig | 6 +- lib/std/dwarf.zig | 138 ++++++++++++++++++++++------------ lib/std/dwarf/call_frame.zig | 5 +- lib/std/dwarf/expressions.zig | 15 +++- 4 files changed, 107 insertions(+), 57 deletions(-) diff --git a/lib/std/debug.zig b/lib/std/debug.zig index 5fc27a02f403..29294be0d9a5 100644 --- a/lib/std/debug.zig +++ b/lib/std/debug.zig @@ -610,7 +610,7 @@ pub const StackIterator = struct { var iterator = init(first_address, null); iterator.unwind_state = .{ .debug_info = debug_info, - .dwarf_context = try DW.UnwindContext.init(debug_info.allocator, context, &isValidMemory), + .dwarf_context = try DW.UnwindContext.init(debug_info.allocator, context), }; return iterator; @@ -793,7 +793,7 @@ pub const StackIterator = struct { // __unwind_info is a requirement for unwinding on Darwin. It may fall back to DWARF, but unwinding // via DWARF before attempting to use the compact unwind info will produce incorrect results. if (module.unwind_info) |unwind_info| { - if (DW.unwindFrameMachO(&unwind_state.dwarf_context, unwind_info, module.eh_frame, module.base_address)) |return_address| { + if (DW.unwindFrameMachO(&unwind_state.dwarf_context, &it.ma, unwind_info, module.eh_frame, module.base_address)) |return_address| { return return_address; } else |err| { if (err != error.RequiresDWARFUnwind) return err; @@ -804,7 +804,7 @@ pub const StackIterator = struct { } if (try module.getDwarfInfoForAddress(unwind_state.debug_info.allocator, unwind_state.dwarf_context.pc)) |di| { - return di.unwindFrame(&unwind_state.dwarf_context, null); + return di.unwindFrame(&unwind_state.dwarf_context, &it.ma, null); } else return error.MissingDebugInfo; } diff --git a/lib/std/dwarf.zig b/lib/std/dwarf.zig index 9bd15b05b4dc..25171f51b9d5 100644 --- a/lib/std/dwarf.zig +++ b/lib/std/dwarf.zig @@ -475,8 +475,8 @@ const UnitHeader = struct { header_length: u4, unit_length: u64, }; -fn readUnitHeader(fbr: *FixedBufferReader) !UnitHeader { - return switch (try fbr.readInt(u32)) { +fn readUnitHeader(fbr: *FixedBufferReader, opt_ma: ?*debug.StackIterator.MemoryAccessor) !UnitHeader { + return switch (try if (opt_ma) |ma| fbr.readIntChecked(u32, ma) else fbr.readInt(u32)) { 0...0xfffffff0 - 1 => |unit_length| .{ .format = .@"32", .header_length = 4, @@ -486,7 +486,7 @@ fn readUnitHeader(fbr: *FixedBufferReader) !UnitHeader { 0xffffffff => .{ .format = .@"64", .header_length = 12, - .unit_length = try fbr.readInt(u64), + .unit_length = try if (opt_ma) |ma| fbr.readIntChecked(u64, ma) else fbr.readInt(u64), }, }; } @@ -663,7 +663,7 @@ pub const DwarfInfo = struct { while (this_unit_offset < fbr.buf.len) { try fbr.seekTo(this_unit_offset); - const unit_header = try readUnitHeader(&fbr); + const unit_header = try readUnitHeader(&fbr, null); if (unit_header.unit_length == 0) return; const next_offset = unit_header.header_length + unit_header.unit_length; @@ -853,7 +853,7 @@ pub const DwarfInfo = struct { while (this_unit_offset < fbr.buf.len) { try fbr.seekTo(this_unit_offset); - const unit_header = try readUnitHeader(&fbr); + const unit_header = try readUnitHeader(&fbr, null); if (unit_header.unit_length == 0) return; const next_offset = unit_header.header_length + unit_header.unit_length; @@ -1200,7 +1200,7 @@ pub const DwarfInfo = struct { var fbr: FixedBufferReader = .{ .buf = di.section(.debug_line).?, .endian = di.endian }; try fbr.seekTo(line_info_offset); - const unit_header = try readUnitHeader(&fbr); + const unit_header = try readUnitHeader(&fbr, null); if (unit_header.unit_length == 0) return missingDwarf(); const next_offset = unit_header.header_length + unit_header.unit_length; @@ -1532,7 +1532,7 @@ pub const DwarfInfo = struct { if (di.section(frame_section)) |section_data| { var fbr: FixedBufferReader = .{ .buf = section_data, .endian = di.endian }; while (fbr.pos < fbr.buf.len) { - const entry_header = try EntryHeader.read(&fbr, frame_section); + const entry_header = try EntryHeader.read(&fbr, null, frame_section); switch (entry_header.type) { .cie => { const cie = try CommonInformationEntry.parse( @@ -1580,7 +1580,7 @@ pub const DwarfInfo = struct { /// /// `explicit_fde_offset` is for cases where the FDE offset is known, such as when __unwind_info /// defers unwinding to DWARF. This is an offset into the `.eh_frame` section. - pub fn unwindFrame(di: *const DwarfInfo, context: *UnwindContext, explicit_fde_offset: ?usize) !usize { + pub fn unwindFrame(di: *const DwarfInfo, context: *UnwindContext, ma: *debug.StackIterator.MemoryAccessor, explicit_fde_offset: ?usize) !usize { if (!comptime abi.supportsUnwinding(builtin.target)) return error.UnsupportedCpuArchitecture; if (context.pc == 0) return 0; @@ -1599,14 +1599,14 @@ pub const DwarfInfo = struct { .endian = di.endian, }; - const fde_entry_header = try EntryHeader.read(&fbr, dwarf_section); + const fde_entry_header = try EntryHeader.read(&fbr, null, dwarf_section); if (fde_entry_header.type != .fde) return error.MissingFDE; const cie_offset = fde_entry_header.type.fde; try fbr.seekTo(cie_offset); fbr.endian = native_endian; - const cie_entry_header = try EntryHeader.read(&fbr, dwarf_section); + const cie_entry_header = try EntryHeader.read(&fbr, null, dwarf_section); if (cie_entry_header.type != .cie) return badDwarf(); cie = try CommonInformationEntry.parse( @@ -1631,7 +1631,7 @@ pub const DwarfInfo = struct { } else if (di.eh_frame_hdr) |header| { const eh_frame_len = if (di.section(.eh_frame)) |eh_frame| eh_frame.len else null; try header.findEntry( - context.isValidMemory, + ma, eh_frame_len, @intFromPtr(di.section(.eh_frame_hdr).?.ptr), context.pc, @@ -1656,7 +1656,7 @@ pub const DwarfInfo = struct { var expression_context: expressions.ExpressionContext = .{ .format = cie.format, - .isValidMemory = context.isValidMemory, + .memory_accessor = ma, .compile_unit = di.findCompileUnit(fde.pc_begin) catch null, .thread_context = context.thread_context, .reg_context = context.reg_context, @@ -1691,7 +1691,7 @@ pub const DwarfInfo = struct { else => return error.InvalidCFARule, }; - if (!context.isValidMemory(context.cfa.?)) return error.InvalidCFA; + if (ma.load(usize, context.cfa.?) == null) return error.InvalidCFA; expression_context.cfa = context.cfa; // Buffering the modifications is done because copying the thread context is not portable, @@ -1730,6 +1730,7 @@ pub const DwarfInfo = struct { try column.resolveValue( context, expression_context, + ma, src, ); } @@ -1788,7 +1789,13 @@ const macho = std.macho; /// Unwind a frame using MachO compact unwind info (from __unwind_info). /// If the compact encoding can't encode a way to unwind a frame, it will /// defer unwinding to DWARF, in which case `.eh_frame` will be used if available. -pub fn unwindFrameMachO(context: *UnwindContext, unwind_info: []const u8, eh_frame: ?[]const u8, module_base_address: usize) !usize { +pub fn unwindFrameMachO( + context: *UnwindContext, + ma: *debug.StackIterator.MemoryAccessor, + unwind_info: []const u8, + eh_frame: ?[]const u8, + module_base_address: usize, +) !usize { const header = mem.bytesAsValue( macho.unwind_info_section_header, unwind_info[0..@sizeOf(macho.unwind_info_section_header)], @@ -1950,7 +1957,7 @@ pub fn unwindFrameMachO(context: *UnwindContext, unwind_info: []const u8, eh_fra const new_sp = fp + 2 * @sizeOf(usize); // Verify the stack range we're about to read register values from - if (!context.isValidMemory(new_sp) or !context.isValidMemory(fp - frame_offset + max_reg * @sizeOf(usize))) return error.InvalidUnwindInfo; + if (ma.load(usize, new_sp) == null or ma.load(usize, fp - frame_offset + max_reg * @sizeOf(usize)) == null) return error.InvalidUnwindInfo; const ip_ptr = fp + @sizeOf(usize); const new_ip = @as(*const usize, @ptrFromInt(ip_ptr)).*; @@ -1981,7 +1988,7 @@ pub fn unwindFrameMachO(context: *UnwindContext, unwind_info: []const u8, eh_fra module_base_address + entry.function_offset + encoding.value.x86_64.frameless.stack.indirect.sub_offset; - if (!context.isValidMemory(sub_offset_addr)) return error.InvalidUnwindInfo; + if (ma.load(usize, sub_offset_addr) == null) return error.InvalidUnwindInfo; // `sub_offset_addr` points to the offset of the literal within the instruction const sub_operand = @as(*align(1) const u32, @ptrFromInt(sub_offset_addr)).*; @@ -2023,7 +2030,7 @@ pub fn unwindFrameMachO(context: *UnwindContext, unwind_info: []const u8, eh_fra } var reg_addr = sp + stack_size - @sizeOf(usize) * @as(usize, reg_count + 1); - if (!context.isValidMemory(reg_addr)) return error.InvalidUnwindInfo; + if (ma.load(usize, reg_addr) == null) return error.InvalidUnwindInfo; for (0..reg_count) |i| { const reg_number = try compactUnwindToDwarfRegNumber(registers[i]); (try abi.regValueNative(usize, context.thread_context, reg_number, reg_context)).* = @as(*const usize, @ptrFromInt(reg_addr)).*; @@ -2035,7 +2042,7 @@ pub fn unwindFrameMachO(context: *UnwindContext, unwind_info: []const u8, eh_fra const new_ip = @as(*const usize, @ptrFromInt(ip_ptr)).*; const new_sp = ip_ptr + @sizeOf(usize); - if (!context.isValidMemory(new_sp)) return error.InvalidUnwindInfo; + if (ma.load(usize, new_sp) == null) return error.InvalidUnwindInfo; (try abi.regValueNative(usize, context.thread_context, abi.spRegNum(reg_context), reg_context)).* = new_sp; (try abi.regValueNative(usize, context.thread_context, abi.ipRegNum(), reg_context)).* = new_ip; @@ -2043,7 +2050,7 @@ pub fn unwindFrameMachO(context: *UnwindContext, unwind_info: []const u8, eh_fra break :blk new_ip; }, .DWARF => { - return unwindFrameMachODwarf(context, eh_frame orelse return error.MissingEhFrame, @intCast(encoding.value.x86_64.dwarf)); + return unwindFrameMachODwarf(context, ma, eh_frame orelse return error.MissingEhFrame, @intCast(encoding.value.x86_64.dwarf)); }, }, .aarch64 => switch (encoding.mode.arm64) { @@ -2052,12 +2059,12 @@ pub fn unwindFrameMachO(context: *UnwindContext, unwind_info: []const u8, eh_fra const sp = (try abi.regValueNative(usize, context.thread_context, abi.spRegNum(reg_context), reg_context)).*; const new_sp = sp + encoding.value.arm64.frameless.stack_size * 16; const new_ip = (try abi.regValueNative(usize, context.thread_context, 30, reg_context)).*; - if (!context.isValidMemory(new_sp)) return error.InvalidUnwindInfo; + if (ma.load(usize, new_sp) == null) return error.InvalidUnwindInfo; (try abi.regValueNative(usize, context.thread_context, abi.spRegNum(reg_context), reg_context)).* = new_sp; break :blk new_ip; }, .DWARF => { - return unwindFrameMachODwarf(context, eh_frame orelse return error.MissingEhFrame, @intCast(encoding.value.arm64.dwarf)); + return unwindFrameMachODwarf(context, ma, eh_frame orelse return error.MissingEhFrame, @intCast(encoding.value.arm64.dwarf)); }, .FRAME => blk: { const fp = (try abi.regValueNative(usize, context.thread_context, abi.fpRegNum(reg_context), reg_context)).*; @@ -2069,7 +2076,7 @@ pub fn unwindFrameMachO(context: *UnwindContext, unwind_info: []const u8, eh_fra @popCount(@as(u4, @bitCast(encoding.value.arm64.frame.d_reg_pairs))); const min_reg_addr = fp - num_restored_pairs * 2 * @sizeOf(usize); - if (!context.isValidMemory(new_sp) or !context.isValidMemory(min_reg_addr)) return error.InvalidUnwindInfo; + if (ma.load(usize, new_sp) == null or ma.load(usize, min_reg_addr) == null) return error.InvalidUnwindInfo; var reg_addr = fp - @sizeOf(usize); inline for (@typeInfo(@TypeOf(encoding.value.arm64.frame.x_reg_pairs)).Struct.fields, 0..) |field, i| { @@ -2114,7 +2121,7 @@ pub fn unwindFrameMachO(context: *UnwindContext, unwind_info: []const u8, eh_fra return new_ip; } -fn unwindFrameMachODwarf(context: *UnwindContext, eh_frame: []const u8, fde_offset: usize) !usize { +fn unwindFrameMachODwarf(context: *UnwindContext, ma: *debug.StackIterator.MemoryAccessor, eh_frame: []const u8, fde_offset: usize) !usize { var di = DwarfInfo{ .endian = native_endian, .is_macho = true, @@ -2126,7 +2133,7 @@ fn unwindFrameMachODwarf(context: *UnwindContext, eh_frame: []const u8, fde_offs .owned = false, }; - return di.unwindFrame(context, fde_offset); + return di.unwindFrame(context, ma, fde_offset); } pub const UnwindContext = struct { @@ -2135,12 +2142,21 @@ pub const UnwindContext = struct { pc: usize, thread_context: *debug.ThreadContext, reg_context: abi.RegisterContext, - isValidMemory: *const fn (address: usize) bool, vm: call_frame.VirtualMachine, stack_machine: expressions.StackMachine(.{ .call_frame_context = true }), - pub fn init(allocator: mem.Allocator, thread_context: *const debug.ThreadContext, isValidMemory: *const fn (address: usize) bool) !UnwindContext { - const pc = abi.stripInstructionPtrAuthCode((try abi.regValueNative(usize, thread_context, abi.ipRegNum(), null)).*); + pub fn init( + allocator: mem.Allocator, + thread_context: *const debug.ThreadContext, + ) !UnwindContext { + const pc = abi.stripInstructionPtrAuthCode( + (try abi.regValueNative( + usize, + thread_context, + abi.ipRegNum(), + null, + )).*, + ); const context_copy = try allocator.create(debug.ThreadContext); debug.copyContext(thread_context, context_copy); @@ -2151,7 +2167,6 @@ pub const UnwindContext = struct { .pc = pc, .thread_context = context_copy, .reg_context = undefined, - .isValidMemory = isValidMemory, .vm = .{}, .stack_machine = .{}, }; @@ -2297,25 +2312,26 @@ pub const ExceptionFrameHeader = struct { fn isValidPtr( self: ExceptionFrameHeader, + comptime T: type, ptr: usize, - isValidMemory: *const fn (address: usize) bool, + ma: *debug.StackIterator.MemoryAccessor, eh_frame_len: ?usize, ) bool { if (eh_frame_len) |len| { - return ptr >= self.eh_frame_ptr and ptr < self.eh_frame_ptr + len; + return ptr >= self.eh_frame_ptr and ptr <= self.eh_frame_ptr + len - @sizeOf(T); } else { - return isValidMemory(ptr); + return ma.load(T, ptr) != null; } } /// Find an entry by binary searching the eh_frame_hdr section. /// /// Since the length of the eh_frame section (`eh_frame_len`) may not be known by the caller, - /// `isValidMemory` will be called before accessing any memory referenced by - /// the header entries. If `eh_frame_len` is provided, then these checks can be skipped. + /// MemoryAccessor will be used to verify readability of the header entries. + /// If `eh_frame_len` is provided, then these checks can be skipped. pub fn findEntry( self: ExceptionFrameHeader, - isValidMemory: *const fn (address: usize) bool, + ma: *debug.StackIterator.MemoryAccessor, eh_frame_len: ?usize, eh_frame_hdr_ptr: usize, pc: usize, @@ -2364,14 +2380,9 @@ pub const ExceptionFrameHeader = struct { .data_rel_base = eh_frame_hdr_ptr, }) orelse return badDwarf()) orelse return badDwarf(); - // Verify the length fields of the FDE header are readable - if (!self.isValidPtr(fde_ptr, isValidMemory, eh_frame_len) or fde_ptr < self.eh_frame_ptr) return badDwarf(); - - var fde_entry_header_len: usize = 4; - if (!self.isValidPtr(fde_ptr + 3, isValidMemory, eh_frame_len)) return badDwarf(); - if (self.isValidPtr(fde_ptr + 11, isValidMemory, eh_frame_len)) fde_entry_header_len = 12; + if (fde_ptr < self.eh_frame_ptr) return badDwarf(); - // Even if eh_frame_len is not specified, all ranges accssed are checked by isValidPtr + // Even if eh_frame_len is not specified, all ranges accssed are checked via MemoryAccessor const eh_frame = @as([*]const u8, @ptrFromInt(self.eh_frame_ptr))[0 .. eh_frame_len orelse math.maxInt(u32)]; const fde_offset = fde_ptr - self.eh_frame_ptr; @@ -2381,15 +2392,15 @@ pub const ExceptionFrameHeader = struct { .endian = native_endian, }; - const fde_entry_header = try EntryHeader.read(&eh_frame_fbr, .eh_frame); - if (!self.isValidPtr(@intFromPtr(&fde_entry_header.entry_bytes[fde_entry_header.entry_bytes.len - 1]), isValidMemory, eh_frame_len)) return badDwarf(); + const fde_entry_header = try EntryHeader.read(&eh_frame_fbr, if (eh_frame_len == null) ma else null, .eh_frame); + if (!self.isValidPtr(u8, @intFromPtr(&fde_entry_header.entry_bytes[fde_entry_header.entry_bytes.len - 1]), ma, eh_frame_len)) return badDwarf(); if (fde_entry_header.type != .fde) return badDwarf(); // CIEs always come before FDEs (the offset is a subtraction), so we can assume this memory is readable const cie_offset = fde_entry_header.type.fde; try eh_frame_fbr.seekTo(cie_offset); - const cie_entry_header = try EntryHeader.read(&eh_frame_fbr, .eh_frame); - if (!self.isValidPtr(@intFromPtr(&cie_entry_header.entry_bytes[cie_entry_header.entry_bytes.len - 1]), isValidMemory, eh_frame_len)) return badDwarf(); + const cie_entry_header = try EntryHeader.read(&eh_frame_fbr, if (eh_frame_len == null) ma else null, .eh_frame); + if (!self.isValidPtr(u8, @intFromPtr(&cie_entry_header.entry_bytes[cie_entry_header.entry_bytes.len - 1]), ma, eh_frame_len)) return badDwarf(); if (cie_entry_header.type != .cie) return badDwarf(); cie.* = try CommonInformationEntry.parse( @@ -2434,11 +2445,15 @@ pub const EntryHeader = struct { /// Reads a header for either an FDE or a CIE, then advances the fbr to the position after the trailing structure. /// `fbr` must be a FixedBufferReader backed by either the .eh_frame or .debug_frame sections. - pub fn read(fbr: *FixedBufferReader, dwarf_section: DwarfSection) !EntryHeader { + pub fn read( + fbr: *FixedBufferReader, + opt_ma: ?*debug.StackIterator.MemoryAccessor, + dwarf_section: DwarfSection, + ) !EntryHeader { assert(dwarf_section == .eh_frame or dwarf_section == .debug_frame); const length_offset = fbr.pos; - const unit_header = try readUnitHeader(fbr); + const unit_header = try readUnitHeader(fbr, opt_ma); const unit_length = math.cast(usize, unit_header.unit_length) orelse return badDwarf(); if (unit_length == 0) return .{ .length_offset = length_offset, @@ -2450,7 +2465,10 @@ pub const EntryHeader = struct { const end_offset = start_offset + unit_length; defer fbr.pos = end_offset; - const id = try fbr.readAddress(unit_header.format); + const id = try if (opt_ma) |ma| + fbr.readAddressChecked(unit_header.format, ma) + else + fbr.readAddress(unit_header.format); const entry_bytes = fbr.buf[fbr.pos..end_offset]; const cie_id: u64 = switch (dwarf_section) { .eh_frame => CommonInformationEntry.eh_id, @@ -2732,7 +2750,7 @@ pub const FixedBufferReader = struct { pos: usize = 0, endian: std.builtin.Endian, - pub const Error = error{ EndOfBuffer, Overflow }; + pub const Error = error{ EndOfBuffer, Overflow, InvalidBuffer }; fn seekTo(fbr: *FixedBufferReader, pos: u64) Error!void { if (pos > fbr.buf.len) return error.EndOfBuffer; @@ -2761,6 +2779,17 @@ pub const FixedBufferReader = struct { return mem.readInt(T, fbr.buf[fbr.pos..][0..size], fbr.endian); } + fn readIntChecked( + fbr: *FixedBufferReader, + comptime T: type, + ma: *debug.StackIterator.MemoryAccessor, + ) Error!T { + if (ma.load(T, @intFromPtr(fbr.buf[fbr.pos..].ptr)) == null) + return error.InvalidBuffer; + + return readInt(fbr, T); + } + fn readUleb128(fbr: *FixedBufferReader, comptime T: type) Error!T { return std.leb.readUleb128(T, fbr); } @@ -2776,6 +2805,17 @@ pub const FixedBufferReader = struct { }; } + fn readAddressChecked( + fbr: *FixedBufferReader, + format: Format, + ma: *debug.StackIterator.MemoryAccessor, + ) Error!u64 { + return switch (format) { + .@"32" => try fbr.readIntChecked(u32, ma), + .@"64" => try fbr.readIntChecked(u64, ma), + }; + } + fn readBytes(fbr: *FixedBufferReader, len: usize) Error![]const u8 { if (fbr.buf.len - fbr.pos < len) return error.EndOfBuffer; defer fbr.pos += len; diff --git a/lib/std/dwarf/call_frame.zig b/lib/std/dwarf/call_frame.zig index 9a9b2e6d5de6..7aff897cea39 100644 --- a/lib/std/dwarf/call_frame.zig +++ b/lib/std/dwarf/call_frame.zig @@ -365,6 +365,7 @@ pub const VirtualMachine = struct { self: Column, context: *dwarf.UnwindContext, expression_context: dwarf.expressions.ExpressionContext, + ma: *debug.StackIterator.MemoryAccessor, out: []u8, ) !void { switch (self.rule) { @@ -385,7 +386,7 @@ pub const VirtualMachine = struct { .offset => |offset| { if (context.cfa) |cfa| { const addr = try applyOffset(cfa, offset); - if (expression_context.isValidMemory) |isValidMemory| if (!isValidMemory(addr)) return error.InvalidAddress; + if (ma.load(usize, addr) == null) return error.InvalidAddress; const ptr: *const usize = @ptrFromInt(addr); mem.writeInt(usize, out[0..@sizeOf(usize)], ptr.*, native_endian); } else return error.InvalidCFA; @@ -408,7 +409,7 @@ pub const VirtualMachine = struct { break :blk v.generic; } else return error.NoExpressionValue; - if (!context.isValidMemory(addr)) return error.InvalidExpressionAddress; + if (ma.load(usize, addr) == null) return error.InvalidExpressionAddress; const ptr: *usize = @ptrFromInt(addr); mem.writeInt(usize, out[0..@sizeOf(usize)], ptr.*, native_endian); }, diff --git a/lib/std/dwarf/expressions.zig b/lib/std/dwarf/expressions.zig index ab446d052713..f853c5fe5a58 100644 --- a/lib/std/dwarf/expressions.zig +++ b/lib/std/dwarf/expressions.zig @@ -15,8 +15,8 @@ pub const ExpressionContext = struct { /// The dwarf format of the section this expression is in format: dwarf.Format = .@"32", - /// If specified, any addresses will pass through this function before being accessed - isValidMemory: ?*const fn (address: usize) bool = null, + /// If specified, any addresses will pass through before being accessed + memory_accessor: ?*std.debug.StackIterator.MemoryAccessor = null, /// The compilation unit this expression relates to, if any compile_unit: ?*const dwarf.CompileUnit = null, @@ -460,7 +460,6 @@ pub fn StackMachine(comptime options: ExpressionOptions) type { // This code will need to be updated to handle any architectures that utilize this. _ = addr_space_identifier; - if (context.isValidMemory) |isValidMemory| if (!isValidMemory(addr)) return error.InvalidExpression; const size = switch (opcode) { OP.deref, OP.xderef, @@ -474,6 +473,16 @@ pub fn StackMachine(comptime options: ExpressionOptions) type { else => unreachable, }; + if (context.memory_accessor) |memory_accessor| { + if (!switch (size) { + 1 => memory_accessor.load(u8, addr) != null, + 2 => memory_accessor.load(u16, addr) != null, + 4 => memory_accessor.load(u32, addr) != null, + 8 => memory_accessor.load(u64, addr) != null, + else => return error.InvalidExpression, + }) return error.InvalidExpression; + } + const value: addr_type = std.math.cast(addr_type, @as(u64, switch (size) { 1 => @as(*const u8, @ptrFromInt(addr)).*, 2 => @as(*const u16, @ptrFromInt(addr)).*, From 667b4f9054cd0d4c8e9912bddc18049d09107678 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Tue, 9 Jul 2024 18:48:37 -0400 Subject: [PATCH 091/152] Zcu: cache fully qualified name on Decl This avoids needing to mutate the intern pool from backends. --- src/InternPool.zig | 1 + src/Sema.zig | 35 +++++++++++----------- src/Type.zig | 14 ++++----- src/Zcu.zig | 44 ++++----------------------- src/Zcu/PerThread.zig | 58 +++++++++++++++++++++++------------- src/arch/riscv64/CodeGen.zig | 2 +- src/arch/wasm/CodeGen.zig | 4 +-- src/arch/x86_64/CodeGen.zig | 2 +- src/codegen/c.zig | 20 ++++--------- src/codegen/llvm.zig | 41 +++++++++++-------------- src/codegen/spirv.zig | 11 +++---- src/link/Coff.zig | 17 +++++------ src/link/Dwarf.zig | 6 ++-- src/link/Elf/ZigObject.zig | 20 ++++++------- src/link/MachO/ZigObject.zig | 24 +++++++-------- src/link/Plan9.zig | 4 +-- src/link/Wasm/ZigObject.zig | 15 ++++------ src/print_value.zig | 4 +-- 18 files changed, 135 insertions(+), 187 deletions(-) diff --git a/src/InternPool.zig b/src/InternPool.zig index 6f7bb17b141e..f1b7cbd7a559 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -7955,6 +7955,7 @@ fn finishFuncInstance( const fn_owner_decl = ip.declPtr(ip.funcDeclOwner(generic_owner)); const decl_index = try ip.createDecl(gpa, tid, .{ .name = undefined, + .fqn = undefined, .src_namespace = fn_owner_decl.src_namespace, .has_tv = true, .owns_tv = true, diff --git a/src/Sema.zig b/src/Sema.zig index b897228f58ce..41087a6360ef 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -2878,7 +2878,7 @@ fn createAnonymousDeclTypeNamed( switch (name_strategy) { .anon => {}, // handled after switch .parent => { - try zcu.initNewAnonDecl(new_decl_index, val, block.type_name_ctx); + try pt.initNewAnonDecl(new_decl_index, val, block.type_name_ctx, .none); return new_decl_index; }, .func => func_strat: { @@ -2923,7 +2923,7 @@ fn createAnonymousDeclTypeNamed( try writer.writeByte(')'); const name = try ip.getOrPutString(gpa, pt.tid, buf.items, .no_embedded_nulls); - try zcu.initNewAnonDecl(new_decl_index, val, name); + try pt.initNewAnonDecl(new_decl_index, val, name, .none); return new_decl_index; }, .dbg_var => { @@ -2937,7 +2937,7 @@ fn createAnonymousDeclTypeNamed( const name = try ip.getOrPutStringFmt(gpa, pt.tid, "{}.{s}", .{ block.type_name_ctx.fmt(ip), zir_data[i].str_op.getStr(sema.code), }, .no_embedded_nulls); - try zcu.initNewAnonDecl(new_decl_index, val, name); + try pt.initNewAnonDecl(new_decl_index, val, name, .none); return new_decl_index; }, else => {}, @@ -2958,7 +2958,7 @@ fn createAnonymousDeclTypeNamed( const name = ip.getOrPutStringFmt(gpa, pt.tid, "{}__{s}_{d}", .{ block.type_name_ctx.fmt(ip), anon_prefix, @intFromEnum(new_decl_index), }, .no_embedded_nulls) catch unreachable; - try zcu.initNewAnonDecl(new_decl_index, val, name); + try pt.initNewAnonDecl(new_decl_index, val, name, .none); return new_decl_index; } @@ -5527,13 +5527,12 @@ fn failWithBadStructFieldAccess( const zcu = pt.zcu; const ip = &zcu.intern_pool; const decl = zcu.declPtr(struct_type.decl.unwrap().?); - const fqn = try decl.fullyQualifiedName(pt); const msg = msg: { const msg = try sema.errMsg( field_src, "no field named '{}' in struct '{}'", - .{ field_name.fmt(ip), fqn.fmt(ip) }, + .{ field_name.fmt(ip), decl.fqn.fmt(ip) }, ); errdefer msg.destroy(sema.gpa); try sema.errNote(struct_ty.srcLoc(zcu), msg, "struct declared here", .{}); @@ -5554,15 +5553,13 @@ fn failWithBadUnionFieldAccess( const zcu = pt.zcu; const ip = &zcu.intern_pool; const gpa = sema.gpa; - const decl = zcu.declPtr(union_obj.decl); - const fqn = try decl.fullyQualifiedName(pt); const msg = msg: { const msg = try sema.errMsg( field_src, "no field named '{}' in union '{}'", - .{ field_name.fmt(ip), fqn.fmt(ip) }, + .{ field_name.fmt(ip), decl.fqn.fmt(ip) }, ); errdefer msg.destroy(gpa); try sema.errNote(union_ty.srcLoc(zcu), msg, "union declared here", .{}); @@ -9733,6 +9730,9 @@ fn funcCommon( .generic_owner = sema.generic_owner, .comptime_args = sema.comptime_args, }); + const func_decl = mod.declPtr(ip.indexToKey(func_index).func.owner_decl); + func_decl.fqn = + try ip.namespacePtr(func_decl.src_namespace).internFullyQualifiedName(pt, func_decl.name); return finishFunc( sema, block, @@ -26500,7 +26500,7 @@ fn zirBuiltinExtern( const new_decl_index = try pt.allocateNewDecl(sema.owner_decl.src_namespace); errdefer pt.destroyDecl(new_decl_index); const new_decl = mod.declPtr(new_decl_index); - try mod.initNewAnonDecl( + try pt.initNewAnonDecl( new_decl_index, Value.fromInterned( if (Type.fromInterned(ptr_info.child).zigTypeTag(mod) == .Fn) @@ -26522,6 +26522,7 @@ fn zirBuiltinExtern( } }), ), options.name, + .none, ); new_decl.owns_tv = true; // Note that this will queue the anon decl for codegen, so that the backend can @@ -36735,24 +36736,23 @@ fn generateUnionTagTypeNumbered( const new_decl_index = try pt.allocateNewDecl(block.namespace); errdefer pt.destroyDecl(new_decl_index); - const fqn = try union_owner_decl.fullyQualifiedName(pt); const name = try ip.getOrPutStringFmt( gpa, pt.tid, "@typeInfo({}).Union.tag_type.?", - .{fqn.fmt(ip)}, + .{union_owner_decl.fqn.fmt(ip)}, .no_embedded_nulls, ); - try mod.initNewAnonDecl( + try pt.initNewAnonDecl( new_decl_index, Value.@"unreachable", name, + name.toOptional(), ); errdefer pt.abortAnonDecl(new_decl_index); const new_decl = mod.declPtr(new_decl_index); new_decl.owns_tv = true; - new_decl.name_fully_qualified = true; const enum_ty = try ip.getGeneratedTagEnumType(gpa, pt.tid, .{ .decl = new_decl_index, @@ -36784,22 +36784,21 @@ fn generateUnionTagTypeSimple( const gpa = sema.gpa; const new_decl_index = new_decl_index: { - const fqn = try union_owner_decl.fullyQualifiedName(pt); const new_decl_index = try pt.allocateNewDecl(block.namespace); errdefer pt.destroyDecl(new_decl_index); const name = try ip.getOrPutStringFmt( gpa, pt.tid, "@typeInfo({}).Union.tag_type.?", - .{fqn.fmt(ip)}, + .{union_owner_decl.fqn.fmt(ip)}, .no_embedded_nulls, ); - try mod.initNewAnonDecl( + try pt.initNewAnonDecl( new_decl_index, Value.@"unreachable", name, + name.toOptional(), ); - mod.declPtr(new_decl_index).name_fully_qualified = true; break :new_decl_index new_decl_index; }; errdefer pt.abortAnonDecl(new_decl_index); diff --git a/src/Type.zig b/src/Type.zig index 57ac2310d5c0..b22f8650ab2f 100644 --- a/src/Type.zig +++ b/src/Type.zig @@ -268,10 +268,10 @@ pub fn print(ty: Type, writer: anytype, pt: Zcu.PerThread) @TypeOf(writer).Error return; }, .inferred_error_set_type => |func_index| { - try writer.writeAll("@typeInfo(@typeInfo(@TypeOf("); const owner_decl = mod.funcOwnerDeclPtr(func_index); - try owner_decl.renderFullyQualifiedName(mod, writer); - try writer.writeAll(")).Fn.return_type.?).ErrorUnion.error_set"); + try writer.print("@typeInfo(@typeInfo(@TypeOf({})).Fn.return_type.?).ErrorUnion.error_set", .{ + owner_decl.fqn.fmt(ip), + }); }, .error_set_type => |error_set_type| { const names = error_set_type.names; @@ -334,7 +334,7 @@ pub fn print(ty: Type, writer: anytype, pt: Zcu.PerThread) @TypeOf(writer).Error const struct_type = ip.loadStructType(ty.toIntern()); if (struct_type.decl.unwrap()) |decl_index| { const decl = mod.declPtr(decl_index); - try decl.renderFullyQualifiedName(mod, writer); + try writer.print("{}", .{decl.fqn.fmt(ip)}); } else if (ip.loadStructType(ty.toIntern()).namespace.unwrap()) |namespace_index| { const namespace = mod.namespacePtr(namespace_index); try namespace.renderFullyQualifiedName(mod, .empty, writer); @@ -367,15 +367,15 @@ pub fn print(ty: Type, writer: anytype, pt: Zcu.PerThread) @TypeOf(writer).Error .union_type => { const decl = mod.declPtr(ip.loadUnionType(ty.toIntern()).decl); - try decl.renderFullyQualifiedName(mod, writer); + try writer.print("{}", .{decl.fqn.fmt(ip)}); }, .opaque_type => { const decl = mod.declPtr(ip.loadOpaqueType(ty.toIntern()).decl); - try decl.renderFullyQualifiedName(mod, writer); + try writer.print("{}", .{decl.fqn.fmt(ip)}); }, .enum_type => { const decl = mod.declPtr(ip.loadEnumType(ty.toIntern()).decl); - try decl.renderFullyQualifiedName(mod, writer); + try writer.print("{}", .{decl.fqn.fmt(ip)}); }, .func_type => |fn_info| { if (fn_info.is_noinline) { diff --git a/src/Zcu.zig b/src/Zcu.zig index 92a0765ebb4a..492858134956 100644 --- a/src/Zcu.zig +++ b/src/Zcu.zig @@ -326,7 +326,10 @@ pub const Reference = struct { }; pub const Decl = struct { + /// Equal to `fqn` if already fully qualified. name: InternPool.NullTerminatedString, + /// Fully qualified name. + fqn: InternPool.NullTerminatedString, /// The most recent Value of the Decl after a successful semantic analysis. /// Populated when `has_tv`. val: Value, @@ -384,8 +387,6 @@ pub const Decl = struct { is_pub: bool, /// Whether the corresponding AST decl has a `export` keyword. is_exported: bool, - /// If true `name` is already fully qualified. - name_fully_qualified: bool = false, /// What kind of a declaration is this. kind: Kind, @@ -408,25 +409,6 @@ pub const Decl = struct { return extra.data.getBodies(@intCast(extra.end), zir); } - pub fn renderFullyQualifiedName(decl: Decl, zcu: *Zcu, writer: anytype) !void { - if (decl.name_fully_qualified) { - try writer.print("{}", .{decl.name.fmt(&zcu.intern_pool)}); - } else { - try zcu.namespacePtr(decl.src_namespace).renderFullyQualifiedName(zcu, decl.name, writer); - } - } - - pub fn renderFullyQualifiedDebugName(decl: Decl, zcu: *Zcu, writer: anytype) !void { - return zcu.namespacePtr(decl.src_namespace).renderFullyQualifiedDebugName(zcu, decl.name, writer); - } - - pub fn fullyQualifiedName(decl: Decl, pt: Zcu.PerThread) !InternPool.NullTerminatedString { - return if (decl.name_fully_qualified) - decl.name - else - pt.zcu.namespacePtr(decl.src_namespace).fullyQualifiedName(pt, decl.name); - } - pub fn typeOf(decl: Decl, zcu: *const Zcu) Type { assert(decl.has_tv); return decl.val.typeOf(zcu); @@ -686,7 +668,7 @@ pub const Namespace = struct { if (name != .empty) try writer.print("{c}{}", .{ sep, name.fmt(&zcu.intern_pool) }); } - pub fn fullyQualifiedName( + pub fn internFullyQualifiedName( ns: Namespace, pt: Zcu.PerThread, name: InternPool.NullTerminatedString, @@ -882,7 +864,7 @@ pub const File = struct { }; } - pub fn fullyQualifiedName(file: File, pt: Zcu.PerThread) !InternPool.NullTerminatedString { + pub fn internFullyQualifiedName(file: File, pt: Zcu.PerThread) !InternPool.NullTerminatedString { const gpa = pt.zcu.gpa; const ip = &pt.zcu.intern_pool; const strings = ip.getLocal(pt.tid).getMutableStrings(gpa); @@ -3313,22 +3295,6 @@ pub fn errorSetBits(mod: *Module) u16 { return std.math.log2_int_ceil(ErrorInt, mod.error_limit + 1); // +1 for no error } -pub fn initNewAnonDecl( - mod: *Module, - new_decl_index: Decl.Index, - val: Value, - name: InternPool.NullTerminatedString, -) Allocator.Error!void { - const new_decl = mod.declPtr(new_decl_index); - - new_decl.name = name; - new_decl.val = val; - new_decl.alignment = .none; - new_decl.@"linksection" = .none; - new_decl.has_tv = true; - new_decl.analysis = .complete; -} - pub fn errNote( mod: *Module, src_loc: LazySrcLoc, diff --git a/src/Zcu/PerThread.zig b/src/Zcu/PerThread.zig index f638ffc53822..eaff3231958a 100644 --- a/src/Zcu/PerThread.zig +++ b/src/Zcu/PerThread.zig @@ -548,7 +548,7 @@ pub fn ensureDeclAnalyzed(pt: Zcu.PerThread, decl_index: Zcu.Decl.Index) Zcu.Sem }; } - const decl_prog_node = mod.sema_prog_node.start((try decl.fullyQualifiedName(pt)).toSlice(ip), 0); + const decl_prog_node = mod.sema_prog_node.start(decl.fqn.toSlice(ip), 0); defer decl_prog_node.end(); break :blk pt.semaDecl(decl_index) catch |err| switch (err) { @@ -747,10 +747,9 @@ pub fn linkerUpdateFunc(pt: Zcu.PerThread, func_index: InternPool.Index, air: Ai defer liveness.deinit(gpa); if (build_options.enable_debug_extensions and comp.verbose_air) { - const fqn = try decl.fullyQualifiedName(pt); - std.debug.print("# Begin Function AIR: {}:\n", .{fqn.fmt(ip)}); + std.debug.print("# Begin Function AIR: {}:\n", .{decl.fqn.fmt(ip)}); @import("../print_air.zig").dump(pt, air, liveness); - std.debug.print("# End Function AIR: {}\n\n", .{fqn.fmt(ip)}); + std.debug.print("# End Function AIR: {}\n\n", .{decl.fqn.fmt(ip)}); } if (std.debug.runtime_safety) { @@ -781,7 +780,7 @@ pub fn linkerUpdateFunc(pt: Zcu.PerThread, func_index: InternPool.Index, air: Ai }; } - const codegen_prog_node = zcu.codegen_prog_node.start((try decl.fullyQualifiedName(pt)).toSlice(ip), 0); + const codegen_prog_node = zcu.codegen_prog_node.start(decl.fqn.toSlice(ip), 0); defer codegen_prog_node.end(); if (!air.typesFullyResolved(zcu)) { @@ -996,8 +995,8 @@ fn semaFile(pt: Zcu.PerThread, file_index: Zcu.File.Index) Zcu.SemaError!void { zcu.setFileRootDecl(file_index, new_decl_index.toOptional()); zcu.namespacePtr(new_namespace_index).decl_index = new_decl_index; - new_decl.name = try file.fullyQualifiedName(pt); - new_decl.name_fully_qualified = true; + new_decl.fqn = try file.internFullyQualifiedName(pt); + new_decl.name = new_decl.fqn; new_decl.is_pub = true; new_decl.is_exported = false; new_decl.alignment = .none; @@ -1058,10 +1057,8 @@ fn semaDecl(pt: Zcu.PerThread, decl_index: Zcu.Decl.Index) !Zcu.SemaDeclResult { } log.debug("semaDecl '{d}'", .{@intFromEnum(decl_index)}); - log.debug("decl name '{}'", .{(try decl.fullyQualifiedName(pt)).fmt(ip)}); - defer blk: { - log.debug("finish decl name '{}'", .{(decl.fullyQualifiedName(pt) catch break :blk).fmt(ip)}); - } + log.debug("decl name '{}'", .{decl.fqn.fmt(ip)}); + defer log.debug("finish decl name '{}'", .{decl.fqn.fmt(ip)}); const old_has_tv = decl.has_tv; // The following values are ignored if `!old_has_tv` @@ -1728,6 +1725,7 @@ const ScanDeclIter = struct { const was_exported = decl.is_exported; assert(decl.kind == kind); // ZIR tracking should preserve this decl.name = decl_name; + decl.fqn = try namespace.internFullyQualifiedName(pt, decl_name); decl.is_pub = declaration.flags.is_pub; decl.is_exported = declaration.flags.is_export; break :decl_index .{ was_exported, decl_index }; @@ -1737,6 +1735,7 @@ const ScanDeclIter = struct { const new_decl = zcu.declPtr(new_decl_index); new_decl.kind = kind; new_decl.name = decl_name; + new_decl.fqn = try namespace.internFullyQualifiedName(pt, decl_name); new_decl.is_pub = declaration.flags.is_pub; new_decl.is_exported = declaration.flags.is_export; new_decl.zir_decl_index = tracked_inst.toOptional(); @@ -1761,10 +1760,9 @@ const ScanDeclIter = struct { if (!comp.config.is_test) break :a false; if (decl_mod != zcu.main_mod) break :a false; if (is_named_test and comp.test_filters.len > 0) { - const decl_fqn = try namespace.fullyQualifiedName(pt, decl_name); - const decl_fqn_slice = decl_fqn.toSlice(ip); + const decl_fqn = decl.fqn.toSlice(ip); for (comp.test_filters) |test_filter| { - if (std.mem.indexOf(u8, decl_fqn_slice, test_filter)) |_| break; + if (std.mem.indexOf(u8, decl_fqn, test_filter)) |_| break; } else break :a false; } zcu.test_functions.putAssumeCapacity(decl_index, {}); // may clobber on incremental update @@ -1805,12 +1803,10 @@ pub fn analyzeFnBody(pt: Zcu.PerThread, func_index: InternPool.Index, arena: All const decl_index = func.owner_decl; const decl = mod.declPtr(decl_index); - log.debug("func name '{}'", .{(try decl.fullyQualifiedName(pt)).fmt(ip)}); - defer blk: { - log.debug("finish func name '{}'", .{(decl.fullyQualifiedName(pt) catch break :blk).fmt(ip)}); - } + log.debug("func name '{}'", .{decl.fqn.fmt(ip)}); + defer log.debug("finish func name '{}'", .{decl.fqn.fmt(ip)}); - const decl_prog_node = mod.sema_prog_node.start((try decl.fullyQualifiedName(pt)).toSlice(ip), 0); + const decl_prog_node = mod.sema_prog_node.start(decl.fqn.toSlice(ip), 0); defer decl_prog_node.end(); mod.intern_pool.removeDependenciesForDepender(gpa, InternPool.AnalUnit.wrap(.{ .func = func_index })); @@ -2053,6 +2049,7 @@ pub fn allocateNewDecl(pt: Zcu.PerThread, namespace: Zcu.Namespace.Index) !Zcu.D const gpa = zcu.gpa; const decl_index = try zcu.intern_pool.createDecl(gpa, pt.tid, .{ .name = undefined, + .fqn = undefined, .src_namespace = namespace, .has_tv = false, .owns_tv = false, @@ -2077,6 +2074,25 @@ pub fn allocateNewDecl(pt: Zcu.PerThread, namespace: Zcu.Namespace.Index) !Zcu.D return decl_index; } +pub fn initNewAnonDecl( + pt: Zcu.PerThread, + new_decl_index: Zcu.Decl.Index, + val: Value, + name: InternPool.NullTerminatedString, + fqn: InternPool.OptionalNullTerminatedString, +) Allocator.Error!void { + const new_decl = pt.zcu.declPtr(new_decl_index); + + new_decl.name = name; + new_decl.fqn = fqn.unwrap() orelse + try pt.zcu.namespacePtr(new_decl.src_namespace).internFullyQualifiedName(pt, name); + new_decl.val = val; + new_decl.alignment = .none; + new_decl.@"linksection" = .none; + new_decl.has_tv = true; + new_decl.analysis = .complete; +} + fn lockAndClearFileCompileError(pt: Zcu.PerThread, file: *Zcu.File) void { switch (file.status) { .success_zir, .retryable_failure => {}, @@ -2260,7 +2276,7 @@ pub fn populateTestFunctions( for (test_fn_vals, zcu.test_functions.keys()) |*test_fn_val, test_decl_index| { const test_decl = zcu.declPtr(test_decl_index); - const test_decl_name = try test_decl.fullyQualifiedName(pt); + const test_decl_name = test_decl.fqn; const test_decl_name_len = test_decl_name.length(ip); const test_name_anon_decl: InternPool.Key.Ptr.BaseAddr.AnonDecl = n: { const test_name_ty = try pt.arrayType(.{ @@ -2366,7 +2382,7 @@ pub fn linkerUpdateDecl(pt: Zcu.PerThread, decl_index: Zcu.Decl.Index) !void { const decl = zcu.declPtr(decl_index); - const codegen_prog_node = zcu.codegen_prog_node.start((try decl.fullyQualifiedName(pt)).toSlice(&zcu.intern_pool), 0); + const codegen_prog_node = zcu.codegen_prog_node.start(decl.fqn.toSlice(&zcu.intern_pool), 0); defer codegen_prog_node.end(); if (comp.bin_file) |lf| { diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index f4f0ff09724b..6bea66024377 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -933,7 +933,7 @@ fn formatDecl( _: std.fmt.FormatOptions, writer: anytype, ) @TypeOf(writer).Error!void { - try data.mod.declPtr(data.decl_index).renderFullyQualifiedName(data.mod, writer); + try writer.print("{}", .{data.mod.declPtr(data.decl_index).fqn.fmt(&data.mod.intern_pool)}); } fn fmtDecl(func: *Func, decl_index: InternPool.DeclIndex) std.fmt.Formatter(formatDecl) { return .{ .data = .{ diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index 8873c5cb1bbb..3b81ee9c190e 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -7284,8 +7284,8 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 { defer arena_allocator.deinit(); const arena = arena_allocator.allocator(); - const fqn = try mod.declPtr(enum_decl_index).fullyQualifiedName(pt); - const func_name = try std.fmt.allocPrintZ(arena, "__zig_tag_name_{}", .{fqn.fmt(ip)}); + const decl = mod.declPtr(enum_decl_index); + const func_name = try std.fmt.allocPrintZ(arena, "__zig_tag_name_{}", .{decl.fqn.fmt(ip)}); // check if we already generated code for this. if (func.bin_file.findGlobalSymbol(func_name)) |loc| { diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index dafeed00b80e..870716966fb1 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -1077,7 +1077,7 @@ fn formatDecl( _: std.fmt.FormatOptions, writer: anytype, ) @TypeOf(writer).Error!void { - try data.zcu.declPtr(data.decl_index).renderFullyQualifiedName(data.zcu, writer); + try writer.print("{}", .{data.zcu.declPtr(data.decl_index).fqn.fmt(&data.zcu.intern_pool)}); } fn fmtDecl(self: *Self, decl_index: InternPool.DeclIndex) std.fmt.Formatter(formatDecl) { return .{ .data = .{ diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 2fa8a98cbb64..6b723967a56c 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -2194,13 +2194,9 @@ pub const DeclGen = struct { }) else { // MSVC has a limit of 4095 character token length limit, and fmtIdent can (worst case), // expand to 3x the length of its input, but let's cut it off at a much shorter limit. - var name: [100]u8 = undefined; - var name_stream = std.io.fixedBufferStream(&name); - decl.renderFullyQualifiedName(zcu, name_stream.writer()) catch |err| switch (err) { - error.NoSpaceLeft => {}, - }; + const fqn_slice = decl.fqn.toSlice(ip); try writer.print("{}__{d}", .{ - fmtIdent(name_stream.getWritten()), + fmtIdent(fqn_slice[0..@min(fqn_slice.len, 100)]), @intFromEnum(decl_index), }); } @@ -2587,11 +2583,9 @@ pub fn genTypeDecl( try writer.writeByte(';'); const owner_decl = zcu.declPtr(owner_decl_index); const owner_mod = zcu.namespacePtr(owner_decl.src_namespace).fileScope(zcu).mod; - if (!owner_mod.strip) { - try writer.writeAll(" /* "); - try owner_decl.renderFullyQualifiedName(zcu, writer); - try writer.writeAll(" */"); - } + if (!owner_mod.strip) try writer.print(" /* {} */", .{ + owner_decl.fqn.fmt(&zcu.intern_pool), + }); try writer.writeByte('\n'); }, }, @@ -4563,9 +4557,7 @@ fn airDbgInlineBlock(f: *Function, inst: Air.Inst.Index) !CValue { const extra = f.air.extraData(Air.DbgInlineBlock, ty_pl.payload); const owner_decl = zcu.funcOwnerDeclPtr(extra.data.func); const writer = f.object.writer(); - try writer.writeAll("/* inline:"); - try owner_decl.renderFullyQualifiedName(zcu, writer); - try writer.writeAll(" */\n"); + try writer.print("/* inline:{} */\n", .{owner_decl.fqn.fmt(&zcu.intern_pool)}); return lowerBlock(f, inst, @ptrCast(f.air.extra[extra.end..][0..extra.data.body_len])); } diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 0f13c9fd9b05..0a23d687d24d 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -1744,7 +1744,7 @@ pub const Object = struct { if (export_indices.len != 0) { return updateExportedGlobal(self, zcu, global_index, export_indices); } else { - const fqn = try self.builder.strtabString((try decl.fullyQualifiedName(pt)).toSlice(ip)); + const fqn = try self.builder.strtabString(decl.fqn.toSlice(ip)); try global_index.rename(fqn, &self.builder); global_index.setLinkage(.internal, &self.builder); if (comp.config.dll_export_fns) @@ -2863,10 +2863,7 @@ pub const Object = struct { const is_extern = decl.isExtern(zcu); const function_index = try o.builder.addFunction( try o.lowerType(zig_fn_type), - try o.builder.strtabString((if (is_extern) - decl.name - else - try decl.fullyQualifiedName(pt)).toSlice(ip)), + try o.builder.strtabString((if (is_extern) decl.name else decl.fqn).toSlice(ip)), toLlvmAddressSpace(decl.@"addrspace", target), ); gop.value_ptr.* = function_index.ptrConst(&o.builder).global; @@ -3077,14 +3074,12 @@ pub const Object = struct { const pt = o.pt; const zcu = pt.zcu; + const ip = &zcu.intern_pool; const decl = zcu.declPtr(decl_index); const is_extern = decl.isExtern(zcu); const variable_index = try o.builder.addVariable( - try o.builder.strtabString((if (is_extern) - decl.name - else - try decl.fullyQualifiedName(pt)).toSlice(&zcu.intern_pool)), + try o.builder.strtabString((if (is_extern) decl.name else decl.fqn).toSlice(ip)), try o.lowerType(decl.typeOf(zcu)), toLlvmGlobalAddressSpace(decl.@"addrspace", zcu.getTarget()), ); @@ -3312,7 +3307,7 @@ pub const Object = struct { return int_ty; } - const fqn = try mod.declPtr(struct_type.decl.unwrap().?).fullyQualifiedName(pt); + const decl = mod.declPtr(struct_type.decl.unwrap().?); var llvm_field_types = std.ArrayListUnmanaged(Builder.Type){}; defer llvm_field_types.deinit(o.gpa); @@ -3377,7 +3372,7 @@ pub const Object = struct { ); } - const ty = try o.builder.opaqueType(try o.builder.string(fqn.toSlice(ip))); + const ty = try o.builder.opaqueType(try o.builder.string(decl.fqn.toSlice(ip))); try o.type_map.put(o.gpa, t.toIntern(), ty); o.builder.namedTypeSetBody( @@ -3466,7 +3461,7 @@ pub const Object = struct { return enum_tag_ty; } - const fqn = try mod.declPtr(union_obj.decl).fullyQualifiedName(pt); + const decl = mod.declPtr(union_obj.decl); const aligned_field_ty = Type.fromInterned(union_obj.field_types.get(ip)[layout.most_aligned_field]); const aligned_field_llvm_ty = try o.lowerType(aligned_field_ty); @@ -3486,7 +3481,7 @@ pub const Object = struct { }; if (layout.tag_size == 0) { - const ty = try o.builder.opaqueType(try o.builder.string(fqn.toSlice(ip))); + const ty = try o.builder.opaqueType(try o.builder.string(decl.fqn.toSlice(ip))); try o.type_map.put(o.gpa, t.toIntern(), ty); o.builder.namedTypeSetBody( @@ -3514,7 +3509,7 @@ pub const Object = struct { llvm_fields_len += 1; } - const ty = try o.builder.opaqueType(try o.builder.string(fqn.toSlice(ip))); + const ty = try o.builder.opaqueType(try o.builder.string(decl.fqn.toSlice(ip))); try o.type_map.put(o.gpa, t.toIntern(), ty); o.builder.namedTypeSetBody( @@ -3527,8 +3522,7 @@ pub const Object = struct { const gop = try o.type_map.getOrPut(o.gpa, t.toIntern()); if (!gop.found_existing) { const decl = mod.declPtr(ip.loadOpaqueType(t.toIntern()).decl); - const fqn = try decl.fullyQualifiedName(pt); - gop.value_ptr.* = try o.builder.opaqueType(try o.builder.string(fqn.toSlice(ip))); + gop.value_ptr.* = try o.builder.opaqueType(try o.builder.string(decl.fqn.toSlice(ip))); } return gop.value_ptr.*; }, @@ -4587,11 +4581,11 @@ pub const Object = struct { const usize_ty = try o.lowerType(Type.usize); const ret_ty = try o.lowerType(Type.slice_const_u8_sentinel_0); - const fqn = try zcu.declPtr(enum_type.decl).fullyQualifiedName(pt); + const decl = zcu.declPtr(enum_type.decl); const target = zcu.root_mod.resolved_target.result; const function_index = try o.builder.addFunction( try o.builder.fnType(ret_ty, &.{try o.lowerType(Type.fromInterned(enum_type.tag_ty))}, .normal), - try o.builder.strtabStringFmt("__zig_tag_name_{}", .{fqn.fmt(ip)}), + try o.builder.strtabStringFmt("__zig_tag_name_{}", .{decl.fqn.fmt(ip)}), toLlvmAddressSpace(.generic, target), ); @@ -5175,8 +5169,6 @@ pub const FuncGen = struct { const line_number = decl.navSrcLine(zcu) + 1; self.inlined = self.wip.debug_location; - const fqn = try decl.fullyQualifiedName(pt); - const fn_ty = try pt.funcType(.{ .param_types = &.{}, .return_type = .void_type, @@ -5185,7 +5177,7 @@ pub const FuncGen = struct { self.scope = try o.builder.debugSubprogram( self.file, try o.builder.metadataString(decl.name.toSlice(&zcu.intern_pool)), - try o.builder.metadataString(fqn.toSlice(&zcu.intern_pool)), + try o.builder.metadataString(decl.fqn.toSlice(&zcu.intern_pool)), line_number, line_number + func.lbrace_line, try o.lowerDebugType(fn_ty), @@ -9702,18 +9694,19 @@ pub const FuncGen = struct { const o = self.dg.object; const pt = o.pt; const zcu = pt.zcu; - const enum_type = zcu.intern_pool.loadEnumType(enum_ty.toIntern()); + const ip = &zcu.intern_pool; + const enum_type = ip.loadEnumType(enum_ty.toIntern()); // TODO: detect when the type changes and re-emit this function. const gop = try o.named_enum_map.getOrPut(o.gpa, enum_type.decl); if (gop.found_existing) return gop.value_ptr.*; errdefer assert(o.named_enum_map.remove(enum_type.decl)); - const fqn = try zcu.declPtr(enum_type.decl).fullyQualifiedName(pt); + const decl = zcu.declPtr(enum_type.decl); const target = zcu.root_mod.resolved_target.result; const function_index = try o.builder.addFunction( try o.builder.fnType(.i1, &.{try o.lowerType(Type.fromInterned(enum_type.tag_ty))}, .normal), - try o.builder.strtabStringFmt("__zig_is_named_enum_value_{}", .{fqn.fmt(&zcu.intern_pool)}), + try o.builder.strtabStringFmt("__zig_is_named_enum_value_{}", .{decl.fqn.fmt(ip)}), toLlvmAddressSpace(.generic, target), ); diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 92cff8b2d004..9346a60a1fff 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -3012,12 +3012,11 @@ const DeclGen = struct { // Append the actual code into the functions section. try self.spv.addFunction(spv_decl_index, self.func); - const fqn = try decl.fullyQualifiedName(self.pt); - try self.spv.debugName(result_id, fqn.toSlice(ip)); + try self.spv.debugName(result_id, decl.fqn.toSlice(ip)); // Temporarily generate a test kernel declaration if this is a test function. if (self.pt.zcu.test_functions.contains(self.decl_index)) { - try self.generateTestEntryPoint(fqn.toSlice(ip), spv_decl_index); + try self.generateTestEntryPoint(decl.fqn.toSlice(ip), spv_decl_index); } }, .global => { @@ -3041,8 +3040,7 @@ const DeclGen = struct { .storage_class = final_storage_class, }); - const fqn = try decl.fullyQualifiedName(self.pt); - try self.spv.debugName(result_id, fqn.toSlice(ip)); + try self.spv.debugName(result_id, decl.fqn.toSlice(ip)); try self.spv.declareDeclDeps(spv_decl_index, &.{}); }, .invocation_global => { @@ -3086,8 +3084,7 @@ const DeclGen = struct { try self.func.body.emit(self.spv.gpa, .OpFunctionEnd, {}); try self.spv.addFunction(spv_decl_index, self.func); - const fqn = try decl.fullyQualifiedName(self.pt); - try self.spv.debugNameFmt(initializer_id, "initializer of {}", .{fqn.fmt(ip)}); + try self.spv.debugNameFmt(initializer_id, "initializer of {}", .{decl.fqn.fmt(ip)}); try self.spv.sections.types_globals_constants.emit(self.spv.gpa, .OpExtInst, .{ .id_result_type = ptr_ty_id, diff --git a/src/link/Coff.zig b/src/link/Coff.zig index bd1c96bf8b4e..84d26b76104a 100644 --- a/src/link/Coff.zig +++ b/src/link/Coff.zig @@ -1176,9 +1176,10 @@ pub fn lowerUnnamedConst(self: *Coff, pt: Zcu.PerThread, val: Value, decl_index: gop.value_ptr.* = .{}; } const unnamed_consts = gop.value_ptr; - const decl_name = try decl.fullyQualifiedName(pt); const index = unnamed_consts.items.len; - const sym_name = try std.fmt.allocPrint(gpa, "__unnamed_{}_{d}", .{ decl_name.fmt(&mod.intern_pool), index }); + const sym_name = try std.fmt.allocPrint(gpa, "__unnamed_{}_{d}", .{ + decl.fqn.fmt(&mod.intern_pool), index, + }); defer gpa.free(sym_name); const ty = val.typeOf(mod); const atom_index = switch (try self.lowerConst(pt, sym_name, val, ty.abiAlignment(pt), self.rdata_section_index.?, decl.navSrcLoc(mod))) { @@ -1427,9 +1428,7 @@ fn updateDeclCode(self: *Coff, pt: Zcu.PerThread, decl_index: InternPool.DeclInd const mod = pt.zcu; const decl = mod.declPtr(decl_index); - const decl_name = try decl.fullyQualifiedName(pt); - - log.debug("updateDeclCode {}{*}", .{ decl_name.fmt(&mod.intern_pool), decl }); + log.debug("updateDeclCode {}{*}", .{ decl.fqn.fmt(&mod.intern_pool), decl }); const required_alignment: u32 = @intCast(decl.getAlignment(pt).toByteUnits() orelse 0); const decl_metadata = self.decls.get(decl_index).?; @@ -1441,7 +1440,7 @@ fn updateDeclCode(self: *Coff, pt: Zcu.PerThread, decl_index: InternPool.DeclInd if (atom.size != 0) { const sym = atom.getSymbolPtr(self); - try self.setSymbolName(sym, decl_name.toSlice(&mod.intern_pool)); + try self.setSymbolName(sym, decl.fqn.toSlice(&mod.intern_pool)); sym.section_number = @as(coff.SectionNumber, @enumFromInt(sect_index + 1)); sym.type = .{ .complex_type = complex_type, .base_type = .NULL }; @@ -1449,7 +1448,7 @@ fn updateDeclCode(self: *Coff, pt: Zcu.PerThread, decl_index: InternPool.DeclInd const need_realloc = code.len > capacity or !mem.isAlignedGeneric(u64, sym.value, required_alignment); if (need_realloc) { const vaddr = try self.growAtom(atom_index, code_len, required_alignment); - log.debug("growing {} from 0x{x} to 0x{x}", .{ decl_name.fmt(&mod.intern_pool), sym.value, vaddr }); + log.debug("growing {} from 0x{x} to 0x{x}", .{ decl.fqn.fmt(&mod.intern_pool), sym.value, vaddr }); log.debug(" (required alignment 0x{x}", .{required_alignment}); if (vaddr != sym.value) { @@ -1465,13 +1464,13 @@ fn updateDeclCode(self: *Coff, pt: Zcu.PerThread, decl_index: InternPool.DeclInd self.getAtomPtr(atom_index).size = code_len; } else { const sym = atom.getSymbolPtr(self); - try self.setSymbolName(sym, decl_name.toSlice(&mod.intern_pool)); + try self.setSymbolName(sym, decl.fqn.toSlice(&mod.intern_pool)); sym.section_number = @as(coff.SectionNumber, @enumFromInt(sect_index + 1)); sym.type = .{ .complex_type = complex_type, .base_type = .NULL }; const vaddr = try self.allocateAtom(atom_index, code_len, required_alignment); errdefer self.freeAtom(atom_index); - log.debug("allocated atom for {} at 0x{x}", .{ decl_name.fmt(&mod.intern_pool), vaddr }); + log.debug("allocated atom for {} at 0x{x}", .{ decl.fqn.fmt(&mod.intern_pool), vaddr }); self.getAtomPtr(atom_index).size = code_len; sym.value = vaddr; diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig index 9ae4ee3be66c..caa11e4cc4e5 100644 --- a/src/link/Dwarf.zig +++ b/src/link/Dwarf.zig @@ -1082,9 +1082,7 @@ pub fn initDeclState(self: *Dwarf, pt: Zcu.PerThread, decl_index: InternPool.Dec defer tracy.end(); const decl = pt.zcu.declPtr(decl_index); - const decl_linkage_name = try decl.fullyQualifiedName(pt); - - log.debug("initDeclState {}{*}", .{ decl_linkage_name.fmt(&pt.zcu.intern_pool), decl }); + log.debug("initDeclState {}{*}", .{ decl.fqn.fmt(&pt.zcu.intern_pool), decl }); const gpa = self.allocator; var decl_state: DeclState = .{ @@ -1157,7 +1155,7 @@ pub fn initDeclState(self: *Dwarf, pt: Zcu.PerThread, decl_index: InternPool.Dec // .debug_info subprogram const decl_name_slice = decl.name.toSlice(&pt.zcu.intern_pool); - const decl_linkage_name_slice = decl_linkage_name.toSlice(&pt.zcu.intern_pool); + const decl_linkage_name_slice = decl.fqn.toSlice(&pt.zcu.intern_pool); try dbg_info_buffer.ensureUnusedCapacity(1 + ptr_width_bytes + 4 + 4 + (decl_name_slice.len + 1) + (decl_linkage_name_slice.len + 1)); diff --git a/src/link/Elf/ZigObject.zig b/src/link/Elf/ZigObject.zig index 7a419750d4af..46c0fd23a33b 100644 --- a/src/link/Elf/ZigObject.zig +++ b/src/link/Elf/ZigObject.zig @@ -907,10 +907,10 @@ fn updateDeclCode( ) !void { const gpa = elf_file.base.comp.gpa; const mod = pt.zcu; + const ip = &mod.intern_pool; const decl = mod.declPtr(decl_index); - const decl_name = try decl.fullyQualifiedName(pt); - log.debug("updateDeclCode {}{*}", .{ decl_name.fmt(&mod.intern_pool), decl }); + log.debug("updateDeclCode {}{*}", .{ decl.fqn.fmt(ip), decl }); const required_alignment = decl.getAlignment(pt).max( target_util.minFunctionAlignment(mod.getTarget()), @@ -923,7 +923,7 @@ fn updateDeclCode( sym.output_section_index = shdr_index; atom_ptr.output_section_index = shdr_index; - sym.name_offset = try self.strtab.insert(gpa, decl_name.toSlice(&mod.intern_pool)); + sym.name_offset = try self.strtab.insert(gpa, decl.fqn.toSlice(ip)); atom_ptr.flags.alive = true; atom_ptr.name_offset = sym.name_offset; esym.st_name = sym.name_offset; @@ -940,7 +940,7 @@ fn updateDeclCode( const need_realloc = code.len > capacity or !required_alignment.check(@intCast(atom_ptr.value)); if (need_realloc) { try atom_ptr.grow(elf_file); - log.debug("growing {} from 0x{x} to 0x{x}", .{ decl_name.fmt(&mod.intern_pool), old_vaddr, atom_ptr.value }); + log.debug("growing {} from 0x{x} to 0x{x}", .{ decl.fqn.fmt(ip), old_vaddr, atom_ptr.value }); if (old_vaddr != atom_ptr.value) { sym.value = 0; esym.st_value = 0; @@ -1007,11 +1007,11 @@ fn updateTlv( code: []const u8, ) !void { const mod = pt.zcu; + const ip = &mod.intern_pool; const gpa = mod.gpa; const decl = mod.declPtr(decl_index); - const decl_name = try decl.fullyQualifiedName(pt); - log.debug("updateTlv {} ({*})", .{ decl_name.fmt(&mod.intern_pool), decl }); + log.debug("updateTlv {} ({*})", .{ decl.fqn.fmt(ip), decl }); const required_alignment = decl.getAlignment(pt); @@ -1023,7 +1023,7 @@ fn updateTlv( sym.output_section_index = shndx; atom_ptr.output_section_index = shndx; - sym.name_offset = try self.strtab.insert(gpa, decl_name.toSlice(&mod.intern_pool)); + sym.name_offset = try self.strtab.insert(gpa, decl.fqn.toSlice(ip)); atom_ptr.flags.alive = true; atom_ptr.name_offset = sym.name_offset; esym.st_value = 0; @@ -1286,9 +1286,8 @@ pub fn lowerUnnamedConst( } const unnamed_consts = gop.value_ptr; const decl = mod.declPtr(decl_index); - const decl_name = try decl.fullyQualifiedName(pt); const index = unnamed_consts.items.len; - const name = try std.fmt.allocPrint(gpa, "__unnamed_{}_{d}", .{ decl_name.fmt(&mod.intern_pool), index }); + const name = try std.fmt.allocPrint(gpa, "__unnamed_{}_{d}", .{ decl.fqn.fmt(&mod.intern_pool), index }); defer gpa.free(name); const ty = val.typeOf(mod); const sym_index = switch (try self.lowerConst( @@ -1473,9 +1472,8 @@ pub fn updateDeclLineNumber( defer tracy.end(); const decl = pt.zcu.declPtr(decl_index); - const decl_name = try decl.fullyQualifiedName(pt); - log.debug("updateDeclLineNumber {}{*}", .{ decl_name.fmt(&pt.zcu.intern_pool), decl }); + log.debug("updateDeclLineNumber {}{*}", .{ decl.fqn.fmt(&pt.zcu.intern_pool), decl }); if (self.dwarf) |*dw| { try dw.updateDeclLineNumber(pt.zcu, decl_index); diff --git a/src/link/MachO/ZigObject.zig b/src/link/MachO/ZigObject.zig index 03e659c497c5..79e1ae4e02a9 100644 --- a/src/link/MachO/ZigObject.zig +++ b/src/link/MachO/ZigObject.zig @@ -809,10 +809,10 @@ fn updateDeclCode( ) !void { const gpa = macho_file.base.comp.gpa; const mod = pt.zcu; + const ip = &mod.intern_pool; const decl = mod.declPtr(decl_index); - const decl_name = try decl.fullyQualifiedName(pt); - log.debug("updateDeclCode {}{*}", .{ decl_name.fmt(&mod.intern_pool), decl }); + log.debug("updateDeclCode {}{*}", .{ decl.fqn.fmt(ip), decl }); const required_alignment = decl.getAlignment(pt); @@ -824,7 +824,7 @@ fn updateDeclCode( sym.out_n_sect = sect_index; atom.out_n_sect = sect_index; - sym.name = try self.strtab.insert(gpa, decl_name.toSlice(&mod.intern_pool)); + sym.name = try self.strtab.insert(gpa, decl.fqn.toSlice(ip)); atom.flags.alive = true; atom.name = sym.name; nlist.n_strx = sym.name; @@ -843,7 +843,7 @@ fn updateDeclCode( if (need_realloc) { try atom.grow(macho_file); - log.debug("growing {} from 0x{x} to 0x{x}", .{ decl_name.fmt(&mod.intern_pool), old_vaddr, atom.value }); + log.debug("growing {} from 0x{x} to 0x{x}", .{ decl.fqn.fmt(ip), old_vaddr, atom.value }); if (old_vaddr != atom.value) { sym.value = 0; nlist.n_value = 0; @@ -893,25 +893,22 @@ fn updateTlv( sect_index: u8, code: []const u8, ) !void { + const ip = &pt.zcu.intern_pool; const decl = pt.zcu.declPtr(decl_index); - const decl_name = try decl.fullyQualifiedName(pt); - log.debug("updateTlv {} ({*})", .{ decl_name.fmt(&pt.zcu.intern_pool), decl }); - - const decl_name_slice = decl_name.toSlice(&pt.zcu.intern_pool); - const required_alignment = decl.getAlignment(pt); + log.debug("updateTlv {} ({*})", .{ decl.fqn.fmt(&pt.zcu.intern_pool), decl }); // 1. Lower TLV initializer const init_sym_index = try self.createTlvInitializer( macho_file, - decl_name_slice, - required_alignment, + decl.fqn.toSlice(ip), + decl.getAlignment(pt), sect_index, code, ); // 2. Create TLV descriptor - try self.createTlvDescriptor(macho_file, sym_index, init_sym_index, decl_name_slice); + try self.createTlvDescriptor(macho_file, sym_index, init_sym_index, decl.fqn.toSlice(ip)); } fn createTlvInitializer( @@ -1099,9 +1096,8 @@ pub fn lowerUnnamedConst( } const unnamed_consts = gop.value_ptr; const decl = mod.declPtr(decl_index); - const decl_name = try decl.fullyQualifiedName(pt); const index = unnamed_consts.items.len; - const name = try std.fmt.allocPrint(gpa, "__unnamed_{}_{d}", .{ decl_name.fmt(&mod.intern_pool), index }); + const name = try std.fmt.allocPrint(gpa, "__unnamed_{}_{d}", .{ decl.fqn.fmt(&mod.intern_pool), index }); defer gpa.free(name); const sym_index = switch (try self.lowerConst( macho_file, diff --git a/src/link/Plan9.zig b/src/link/Plan9.zig index cfc8435906fb..091aee54c4a3 100644 --- a/src/link/Plan9.zig +++ b/src/link/Plan9.zig @@ -483,11 +483,9 @@ pub fn lowerUnnamedConst(self: *Plan9, pt: Zcu.PerThread, val: Value, decl_index } const unnamed_consts = gop.value_ptr; - const decl_name = try decl.fullyQualifiedName(pt); - const index = unnamed_consts.items.len; // name is freed when the unnamed const is freed - const name = try std.fmt.allocPrint(gpa, "__unnamed_{}_{d}", .{ decl_name.fmt(&mod.intern_pool), index }); + const name = try std.fmt.allocPrint(gpa, "__unnamed_{}_{d}", .{ decl.fqn.fmt(&mod.intern_pool), index }); const sym_index = try self.allocateSymbolIndex(); const new_atom_idx = try self.createAtom(); diff --git a/src/link/Wasm/ZigObject.zig b/src/link/Wasm/ZigObject.zig index f95c8fc7945c..180e45d91d07 100644 --- a/src/link/Wasm/ZigObject.zig +++ b/src/link/Wasm/ZigObject.zig @@ -346,8 +346,7 @@ fn finishUpdateDecl( const atom_index = decl_info.atom; const atom = wasm_file.getAtomPtr(atom_index); const sym = zig_object.symbol(atom.sym_index); - const full_name = try decl.fullyQualifiedName(pt); - sym.name = try zig_object.string_table.insert(gpa, full_name.toSlice(ip)); + sym.name = try zig_object.string_table.insert(gpa, decl.fqn.toSlice(ip)); try atom.code.appendSlice(gpa, code); atom.size = @intCast(code.len); @@ -387,7 +386,7 @@ fn finishUpdateDecl( // Will be freed upon freeing of decl or after cleanup of Wasm binary. const full_segment_name = try std.mem.concat(gpa, u8, &.{ segment_name, - full_name.toSlice(ip), + decl.fqn.toSlice(ip), }); errdefer gpa.free(full_segment_name); sym.tag = .data; @@ -436,9 +435,8 @@ pub fn getOrCreateAtomForDecl( const sym_index = try zig_object.allocateSymbol(gpa); gop.value_ptr.* = .{ .atom = try wasm_file.createAtom(sym_index, zig_object.index) }; const decl = pt.zcu.declPtr(decl_index); - const full_name = try decl.fullyQualifiedName(pt); const sym = zig_object.symbol(sym_index); - sym.name = try zig_object.string_table.insert(gpa, full_name.toSlice(&pt.zcu.intern_pool)); + sym.name = try zig_object.string_table.insert(gpa, decl.fqn.toSlice(&pt.zcu.intern_pool)); } return gop.value_ptr.atom; } @@ -494,9 +492,8 @@ pub fn lowerUnnamedConst( const parent_atom_index = try zig_object.getOrCreateAtomForDecl(wasm_file, pt, decl_index); const parent_atom = wasm_file.getAtom(parent_atom_index); const local_index = parent_atom.locals.items.len; - const fqn = try decl.fullyQualifiedName(pt); const name = try std.fmt.allocPrintZ(gpa, "__unnamed_{}_{d}", .{ - fqn.fmt(&mod.intern_pool), local_index, + decl.fqn.fmt(&mod.intern_pool), local_index, }); defer gpa.free(name); @@ -1127,9 +1124,7 @@ pub fn updateDeclLineNumber( ) !void { if (zig_object.dwarf) |*dw| { const decl = pt.zcu.declPtr(decl_index); - const decl_name = try decl.fullyQualifiedName(pt); - - log.debug("updateDeclLineNumber {}{*}", .{ decl_name.fmt(&pt.zcu.intern_pool), decl }); + log.debug("updateDeclLineNumber {}{*}", .{ decl.fqn.fmt(&pt.zcu.intern_pool), decl }); try dw.updateDeclLineNumber(pt.zcu, decl_index); } } diff --git a/src/print_value.zig b/src/print_value.zig index 19e70d0564b0..c19ba43af32c 100644 --- a/src/print_value.zig +++ b/src/print_value.zig @@ -299,8 +299,8 @@ fn printPtrDerivation(derivation: Value.PointerDeriveStep, writer: anytype, leve int.ptr_ty.fmt(pt), int.addr, }), - .decl_ptr => |decl| { - try zcu.declPtr(decl).renderFullyQualifiedName(zcu, writer); + .decl_ptr => |decl_index| { + try writer.print("{}", .{zcu.declPtr(decl_index).fqn.fmt(ip)}); }, .anon_decl_ptr => |anon| { const ty = Value.fromInterned(anon.val).typeOf(zcu); From 9cf42b103601c26294739143f6aeb4b93e1858d8 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Tue, 9 Jul 2024 19:50:45 -0400 Subject: [PATCH 092/152] InternPool: fix race on `FuncInstance.branch_quota` --- src/InternPool.zig | 27 ++++++++++++++++----------- 1 file changed, 16 insertions(+), 11 deletions(-) diff --git a/src/InternPool.zig b/src/InternPool.zig index f1b7cbd7a559..f862fedef9cd 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -5914,27 +5914,32 @@ fn extraFuncDecl(tid: Zcu.PerThread.Id, extra: Local.Extra, extra_index: u32) Ke } fn extraFuncInstance(ip: *const InternPool, tid: Zcu.PerThread.Id, extra: Local.Extra, extra_index: u32) Key.Func { - const P = Tag.FuncInstance; - const fi = extraDataTrail(extra, P, extra_index); - const func_decl = ip.funcDeclInfo(fi.data.generic_owner); + const extra_items = extra.view().items(.@"0"); + const analysis_extra_index = extra_index + std.meta.fieldIndex(Tag.FuncInstance, "analysis").?; + const analysis: FuncAnalysis = @bitCast(@atomicLoad(u32, &extra_items[analysis_extra_index], .monotonic)); + const owner_decl: DeclIndex = @enumFromInt(extra_items[extra_index + std.meta.fieldIndex(Tag.FuncInstance, "owner_decl").?]); + const ty: Index = @enumFromInt(extra_items[extra_index + std.meta.fieldIndex(Tag.FuncInstance, "ty").?]); + const generic_owner: Index = @enumFromInt(extra_items[extra_index + std.meta.fieldIndex(Tag.FuncInstance, "generic_owner").?]); + const func_decl = ip.funcDeclInfo(generic_owner); + const end_extra_index = extra_index + @as(u32, @typeInfo(Tag.FuncInstance).Struct.fields.len); return .{ .tid = tid, - .ty = fi.data.ty, - .uncoerced_ty = fi.data.ty, - .analysis_extra_index = extra_index + std.meta.fieldIndex(P, "analysis").?, + .ty = ty, + .uncoerced_ty = ty, + .analysis_extra_index = analysis_extra_index, .zir_body_inst_extra_index = func_decl.zir_body_inst_extra_index, - .resolved_error_set_extra_index = if (fi.data.analysis.inferred_error_set) fi.end else 0, - .branch_quota_extra_index = extra_index + std.meta.fieldIndex(P, "branch_quota").?, - .owner_decl = fi.data.owner_decl, + .resolved_error_set_extra_index = if (analysis.inferred_error_set) end_extra_index else 0, + .branch_quota_extra_index = extra_index + std.meta.fieldIndex(Tag.FuncInstance, "branch_quota").?, + .owner_decl = owner_decl, .zir_body_inst = func_decl.zir_body_inst, .lbrace_line = func_decl.lbrace_line, .rbrace_line = func_decl.rbrace_line, .lbrace_column = func_decl.lbrace_column, .rbrace_column = func_decl.rbrace_column, - .generic_owner = fi.data.generic_owner, + .generic_owner = generic_owner, .comptime_args = .{ .tid = tid, - .start = fi.end + @intFromBool(fi.data.analysis.inferred_error_set), + .start = end_extra_index + @intFromBool(analysis.inferred_error_set), .len = ip.funcTypeParamsLen(func_decl.ty), }, }; From 3aa48bf859b3eba17c0431c15ae79ab303219eff Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Tue, 9 Jul 2024 19:50:52 -0400 Subject: [PATCH 093/152] InternPool: fix race on struct flags --- src/InternPool.zig | 175 ++++++++++++++++++++++++--------------------- 1 file changed, 95 insertions(+), 80 deletions(-) diff --git a/src/InternPool.zig b/src/InternPool.zig index f862fedef9cd..1d5e327efd74 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -2783,20 +2783,22 @@ pub const LoadedStructType = struct { } pub fn setInitsWip(s: LoadedStructType, ip: *InternPool) bool { - switch (s.layout) { - .@"packed" => { - const flag = &s.packedFlagsPtr(ip).field_inits_wip; - if (flag.*) return true; - flag.* = true; - return false; - }, - .auto, .@"extern" => { - const flag = &s.flagsPtr(ip).field_inits_wip; - if (flag.*) return true; - flag.* = true; - return false; - }, - } + return switch (s.layout) { + .@"packed" => @as(Tag.TypeStructPacked.Flags, @bitCast(@atomicRmw( + u32, + @as(*u32, @ptrCast(s.packedFlagsPtr(ip))), + .Or, + @bitCast(Tag.TypeStructPacked.Flags{ .field_inits_wip = true }), + .acq_rel, + ))).field_inits_wip, + .auto, .@"extern" => @as(Tag.TypeStruct.Flags, @bitCast(@atomicRmw( + u32, + @as(*u32, @ptrCast(s.flagsPtr(ip))), + .Or, + @bitCast(Tag.TypeStruct.Flags{ .field_inits_wip = true }), + .acq_rel, + ))).field_inits_wip, + }; } pub fn clearInitsWip(s: LoadedStructType, ip: *InternPool) void { @@ -2962,6 +2964,7 @@ pub const LoadedStructType = struct { pub fn loadStructType(ip: *const InternPool, index: Index) LoadedStructType { const unwrapped_index = index.unwrap(ip); const extra_list = unwrapped_index.getExtra(ip); + const extra_items = extra_list.view().items(.@"0"); const item = unwrapped_index.getItem(ip); switch (item.tag) { .type_struct => { @@ -2982,10 +2985,12 @@ pub fn loadStructType(ip: *const InternPool, index: Index) LoadedStructType { .names_map = .none, .captures = CaptureValue.Slice.empty, }; - const extra = extraDataTrail(extra_list, Tag.TypeStruct, item.data); - const fields_len = extra.data.fields_len; - var extra_index = extra.end; - const captures_len = if (extra.data.flags.any_captures) c: { + const decl: DeclIndex = @enumFromInt(extra_items[item.data + std.meta.fieldIndex(Tag.TypeStruct, "decl").?]); + const zir_index: TrackedInst.Index = @enumFromInt(extra_items[item.data + std.meta.fieldIndex(Tag.TypeStruct, "zir_index").?]); + const fields_len = extra_items[item.data + std.meta.fieldIndex(Tag.TypeStruct, "fields_len").?]; + const flags: Tag.TypeStruct.Flags = @bitCast(@atomicLoad(u32, &extra_items[item.data + std.meta.fieldIndex(Tag.TypeStruct, "flags").?], .monotonic)); + var extra_index = item.data + @as(u32, @typeInfo(Tag.TypeStruct).Struct.fields.len); + const captures_len = if (flags.any_captures) c: { const len = extra_list.view().items(.@"0")[extra_index]; extra_index += 1; break :c len; @@ -2996,7 +3001,7 @@ pub fn loadStructType(ip: *const InternPool, index: Index) LoadedStructType { .len = captures_len, }; extra_index += captures_len; - if (extra.data.flags.is_reified) { + if (flags.is_reified) { extra_index += 2; // PackedU64 } const field_types: Index.Slice = .{ @@ -3005,7 +3010,7 @@ pub fn loadStructType(ip: *const InternPool, index: Index) LoadedStructType { .len = fields_len, }; extra_index += fields_len; - const names_map: OptionalMapIndex, const names = if (!extra.data.flags.is_tuple) n: { + const names_map: OptionalMapIndex, const names = if (!flags.is_tuple) n: { const names_map: OptionalMapIndex = @enumFromInt(extra_list.view().items(.@"0")[extra_index]); extra_index += 1; const names: NullTerminatedString.Slice = .{ @@ -3016,7 +3021,7 @@ pub fn loadStructType(ip: *const InternPool, index: Index) LoadedStructType { extra_index += fields_len; break :n .{ names_map, names }; } else .{ .none, NullTerminatedString.Slice.empty }; - const inits: Index.Slice = if (extra.data.flags.any_default_inits) i: { + const inits: Index.Slice = if (flags.any_default_inits) i: { const inits: Index.Slice = .{ .tid = unwrapped_index.tid, .start = extra_index, @@ -3025,12 +3030,12 @@ pub fn loadStructType(ip: *const InternPool, index: Index) LoadedStructType { extra_index += fields_len; break :i inits; } else Index.Slice.empty; - const namespace: OptionalNamespaceIndex = if (extra.data.flags.has_namespace) n: { + const namespace: OptionalNamespaceIndex = if (flags.has_namespace) n: { const n: NamespaceIndex = @enumFromInt(extra_list.view().items(.@"0")[extra_index]); extra_index += 1; break :n n.toOptional(); } else .none; - const aligns: Alignment.Slice = if (extra.data.flags.any_aligned_fields) a: { + const aligns: Alignment.Slice = if (flags.any_aligned_fields) a: { const a: Alignment.Slice = .{ .tid = unwrapped_index.tid, .start = extra_index, @@ -3039,7 +3044,7 @@ pub fn loadStructType(ip: *const InternPool, index: Index) LoadedStructType { extra_index += std.math.divCeil(u32, fields_len, 4) catch unreachable; break :a a; } else Alignment.Slice.empty; - const comptime_bits: LoadedStructType.ComptimeBits = if (extra.data.flags.any_comptime_fields) c: { + const comptime_bits: LoadedStructType.ComptimeBits = if (flags.any_comptime_fields) c: { const len = std.math.divCeil(u32, fields_len, 32) catch unreachable; const c: LoadedStructType.ComptimeBits = .{ .tid = unwrapped_index.tid, @@ -3049,7 +3054,7 @@ pub fn loadStructType(ip: *const InternPool, index: Index) LoadedStructType { extra_index += len; break :c c; } else LoadedStructType.ComptimeBits.empty; - const runtime_order: LoadedStructType.RuntimeOrder.Slice = if (!extra.data.flags.is_extern) ro: { + const runtime_order: LoadedStructType.RuntimeOrder.Slice = if (!flags.is_extern) ro: { const ro: LoadedStructType.RuntimeOrder.Slice = .{ .tid = unwrapped_index.tid, .start = extra_index, @@ -3070,10 +3075,10 @@ pub fn loadStructType(ip: *const InternPool, index: Index) LoadedStructType { return .{ .tid = unwrapped_index.tid, .extra_index = item.data, - .decl = extra.data.decl.toOptional(), + .decl = decl.toOptional(), .namespace = namespace, - .zir_index = extra.data.zir_index.toOptional(), - .layout = if (extra.data.flags.is_extern) .@"extern" else .auto, + .zir_index = zir_index.toOptional(), + .layout = if (flags.is_extern) .@"extern" else .auto, .field_names = names, .field_types = field_types, .field_inits = inits, @@ -3086,11 +3091,15 @@ pub fn loadStructType(ip: *const InternPool, index: Index) LoadedStructType { }; }, .type_struct_packed, .type_struct_packed_inits => { - const extra = extraDataTrail(extra_list, Tag.TypeStructPacked, item.data); + const decl: DeclIndex = @enumFromInt(extra_items[item.data + std.meta.fieldIndex(Tag.TypeStructPacked, "decl").?]); + const zir_index: TrackedInst.Index = @enumFromInt(extra_items[item.data + std.meta.fieldIndex(Tag.TypeStructPacked, "zir_index").?]); + const fields_len = extra_items[item.data + std.meta.fieldIndex(Tag.TypeStructPacked, "fields_len").?]; + const namespace: OptionalNamespaceIndex = @enumFromInt(extra_items[item.data + std.meta.fieldIndex(Tag.TypeStructPacked, "namespace").?]); + const names_map: MapIndex = @enumFromInt(extra_items[item.data + std.meta.fieldIndex(Tag.TypeStructPacked, "names_map").?]); + const flags: Tag.TypeStructPacked.Flags = @bitCast(@atomicLoad(u32, &extra_items[item.data + std.meta.fieldIndex(Tag.TypeStructPacked, "flags").?], .monotonic)); + var extra_index = item.data + @as(u32, @typeInfo(Tag.TypeStructPacked).Struct.fields.len); const has_inits = item.tag == .type_struct_packed_inits; - const fields_len = extra.data.fields_len; - var extra_index = extra.end; - const captures_len = if (extra.data.flags.any_captures) c: { + const captures_len = if (flags.any_captures) c: { const len = extra_list.view().items(.@"0")[extra_index]; extra_index += 1; break :c len; @@ -3101,7 +3110,7 @@ pub fn loadStructType(ip: *const InternPool, index: Index) LoadedStructType { .len = captures_len, }; extra_index += captures_len; - if (extra.data.flags.is_reified) { + if (flags.is_reified) { extra_index += 2; // PackedU64 } const field_types: Index.Slice = .{ @@ -3128,9 +3137,9 @@ pub fn loadStructType(ip: *const InternPool, index: Index) LoadedStructType { return .{ .tid = unwrapped_index.tid, .extra_index = item.data, - .decl = extra.data.decl.toOptional(), - .namespace = extra.data.namespace, - .zir_index = extra.data.zir_index.toOptional(), + .decl = decl.toOptional(), + .namespace = namespace, + .zir_index = zir_index.toOptional(), .layout = .@"packed", .field_names = field_names, .field_types = field_types, @@ -3139,7 +3148,7 @@ pub fn loadStructType(ip: *const InternPool, index: Index) LoadedStructType { .runtime_order = LoadedStructType.RuntimeOrder.Slice.empty, .comptime_bits = LoadedStructType.ComptimeBits.empty, .offsets = LoadedStructType.Offsets.empty, - .names_map = extra.data.names_map.toOptional(), + .names_map = names_map.toOptional(), .captures = captures, }; }, @@ -4476,11 +4485,11 @@ pub const Tag = enum(u8) { flags: Flags, pub const Flags = packed struct(u32) { - any_captures: bool, + any_captures: bool = false, /// Dependency loop detection when resolving field inits. - field_inits_wip: bool, - inits_resolved: bool, - is_reified: bool, + field_inits_wip: bool = false, + inits_resolved: bool = false, + is_reified: bool = false, _: u28 = 0, }; }; @@ -4526,36 +4535,36 @@ pub const Tag = enum(u8) { size: u32, pub const Flags = packed struct(u32) { - any_captures: bool, - is_extern: bool, - known_non_opv: bool, - requires_comptime: RequiresComptime, - is_tuple: bool, - assumed_runtime_bits: bool, - assumed_pointer_aligned: bool, - has_namespace: bool, - any_comptime_fields: bool, - any_default_inits: bool, - any_aligned_fields: bool, + any_captures: bool = false, + is_extern: bool = false, + known_non_opv: bool = false, + requires_comptime: RequiresComptime = @enumFromInt(0), + is_tuple: bool = false, + assumed_runtime_bits: bool = false, + assumed_pointer_aligned: bool = false, + has_namespace: bool = false, + any_comptime_fields: bool = false, + any_default_inits: bool = false, + any_aligned_fields: bool = false, /// `.none` until layout_resolved - alignment: Alignment, + alignment: Alignment = @enumFromInt(0), /// Dependency loop detection when resolving struct alignment. - alignment_wip: bool, + alignment_wip: bool = false, /// Dependency loop detection when resolving field types. - field_types_wip: bool, + field_types_wip: bool = false, /// Dependency loop detection when resolving struct layout. - layout_wip: bool, + layout_wip: bool = false, /// Indicates whether `size`, `alignment`, runtime field order, and /// field offets are populated. - layout_resolved: bool, + layout_resolved: bool = false, /// Dependency loop detection when resolving field inits. - field_inits_wip: bool, + field_inits_wip: bool = false, /// Indicates whether `field_inits` has been resolved. - inits_resolved: bool, - // The types and all its fields have had their layout resolved. Even through pointer, + inits_resolved: bool = false, + // The types and all its fields have had their layout resolved. Even through pointer = false, // which `layout_resolved` does not ensure. - fully_resolved: bool, - is_reified: bool, + fully_resolved: bool = false, + is_reified: bool = false, _: u6 = 0, }; }; @@ -5400,40 +5409,46 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { .type_struct => .{ .struct_type = ns: { if (data == 0) break :ns .empty_struct; const extra_list = unwrapped_index.getExtra(ip); - const extra = extraDataTrail(extra_list, Tag.TypeStruct, data); - if (extra.data.flags.is_reified) { - assert(!extra.data.flags.any_captures); + const extra_items = extra_list.view().items(.@"0"); + const zir_index: TrackedInst.Index = @enumFromInt(extra_items[data + std.meta.fieldIndex(Tag.TypeStruct, "zir_index").?]); + const flags: Tag.TypeStruct.Flags = @bitCast(@atomicLoad(u32, &extra_items[data + std.meta.fieldIndex(Tag.TypeStruct, "flags").?], .monotonic)); + const end_extra_index = data + @as(u32, @typeInfo(Tag.TypeStruct).Struct.fields.len); + if (flags.is_reified) { + assert(!flags.any_captures); break :ns .{ .reified = .{ - .zir_index = extra.data.zir_index, - .type_hash = extraData(extra_list, PackedU64, extra.end).get(), + .zir_index = zir_index, + .type_hash = extraData(extra_list, PackedU64, end_extra_index).get(), } }; } break :ns .{ .declared = .{ - .zir_index = extra.data.zir_index, - .captures = .{ .owned = if (extra.data.flags.any_captures) .{ + .zir_index = zir_index, + .captures = .{ .owned = if (flags.any_captures) .{ .tid = unwrapped_index.tid, - .start = extra.end + 1, - .len = extra_list.view().items(.@"0")[extra.end], + .start = end_extra_index + 1, + .len = extra_list.view().items(.@"0")[end_extra_index], } else CaptureValue.Slice.empty }, } }; } }, .type_struct_packed, .type_struct_packed_inits => .{ .struct_type = ns: { const extra_list = unwrapped_index.getExtra(ip); - const extra = extraDataTrail(extra_list, Tag.TypeStructPacked, data); - if (extra.data.flags.is_reified) { - assert(!extra.data.flags.any_captures); + const extra_items = extra_list.view().items(.@"0"); + const zir_index: TrackedInst.Index = @enumFromInt(extra_items[item.data + std.meta.fieldIndex(Tag.TypeStructPacked, "zir_index").?]); + const flags: Tag.TypeStructPacked.Flags = @bitCast(@atomicLoad(u32, &extra_items[item.data + std.meta.fieldIndex(Tag.TypeStructPacked, "flags").?], .monotonic)); + const end_extra_index = data + @as(u32, @typeInfo(Tag.TypeStructPacked).Struct.fields.len); + if (flags.is_reified) { + assert(!flags.any_captures); break :ns .{ .reified = .{ - .zir_index = extra.data.zir_index, - .type_hash = extraData(extra_list, PackedU64, extra.end).get(), + .zir_index = zir_index, + .type_hash = extraData(extra_list, PackedU64, end_extra_index).get(), } }; } break :ns .{ .declared = .{ - .zir_index = extra.data.zir_index, - .captures = .{ .owned = if (extra.data.flags.any_captures) .{ + .zir_index = zir_index, + .captures = .{ .owned = if (flags.any_captures) .{ .tid = unwrapped_index.tid, - .start = extra.end + 1, - .len = extra_list.view().items(.@"0")[extra.end], + .start = end_extra_index + 1, + .len = extra_items[end_extra_index], } else CaptureValue.Slice.empty }, } }; } }, From f93a10f664fbbb67aeda031583a790e2a842fb01 Mon Sep 17 00:00:00 2001 From: mlugg Date: Thu, 4 Jul 2024 10:31:59 +0100 Subject: [PATCH 094/152] Air: store param names directly instead of referencing Zir --- src/Air.zig | 7 ++++++- src/Sema.zig | 35 +++++++++++++++++++++-------------- src/Zcu/PerThread.zig | 14 ++++++++++++-- src/arch/aarch64/CodeGen.zig | 20 ++++++++++---------- src/arch/arm/CodeGen.zig | 20 ++++++++++---------- src/arch/riscv64/CodeGen.zig | 3 ++- src/arch/sparc64/CodeGen.zig | 3 ++- src/arch/wasm/CodeGen.zig | 12 +++++++----- src/arch/x86_64/CodeGen.zig | 8 +++++--- src/codegen/llvm.zig | 8 +++++--- src/print_air.zig | 8 +++++++- 11 files changed, 87 insertions(+), 51 deletions(-) diff --git a/src/Air.zig b/src/Air.zig index b291dbee7a9b..d1c6d184e684 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -1034,7 +1034,12 @@ pub const Inst = struct { ty: Type, arg: struct { ty: Ref, - src_index: u32, + /// Index into `extra` of a null-terminated string representing the parameter name. + /// This is `.none` if debug info is stripped. + name: enum(u32) { + none = std.math.maxInt(u32), + _, + }, }, ty_op: struct { ty: Ref, diff --git a/src/Sema.zig b/src/Sema.zig index 41087a6360ef..170b773d1f27 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -6718,13 +6718,7 @@ fn addDbgVar( if (block.need_debug_scope) |ptr| ptr.* = true; // Add the name to the AIR. - const name_extra_index: u32 = @intCast(sema.air_extra.items.len); - const elements_used = name.len / 4 + 1; - try sema.air_extra.ensureUnusedCapacity(sema.gpa, elements_used); - const buffer = mem.sliceAsBytes(sema.air_extra.unusedCapacitySlice()); - @memcpy(buffer[0..name.len], name); - buffer[name.len] = 0; - sema.air_extra.items.len += elements_used; + const name_extra_index = try sema.appendAirString(name); _ = try block.addInst(.{ .tag = air_tag, @@ -6735,6 +6729,16 @@ fn addDbgVar( }); } +pub fn appendAirString(sema: *Sema, str: []const u8) Allocator.Error!u32 { + const str_extra_index: u32 = @intCast(sema.air_extra.items.len); + const elements_used = str.len / 4 + 1; + const elements = try sema.air_extra.addManyAsSlice(sema.gpa, elements_used); + const buffer = mem.sliceAsBytes(elements); + @memcpy(buffer[0..str.len], str); + buffer[str.len] = 0; + return str_extra_index; +} + fn zirDeclRef(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; @@ -8354,13 +8358,6 @@ fn instantiateGenericCall( } } else { // The parameter is runtime-known. - child_sema.inst_map.putAssumeCapacityNoClobber(param_inst, try child_block.addInst(.{ - .tag = .arg, - .data = .{ .arg = .{ - .ty = Air.internedToRef(arg_ty.toIntern()), - .src_index = @intCast(arg_index), - } }, - })); const param_name: Zir.NullTerminatedString = switch (param_tag) { .param_anytype => fn_zir.instructions.items(.data)[@intFromEnum(param_inst)].str_tok.start, .param => name: { @@ -8370,6 +8367,16 @@ fn instantiateGenericCall( }, else => unreachable, }; + child_sema.inst_map.putAssumeCapacityNoClobber(param_inst, try child_block.addInst(.{ + .tag = .arg, + .data = .{ .arg = .{ + .ty = Air.internedToRef(arg_ty.toIntern()), + .name = if (child_block.ownerModule().strip) + .none + else + @enumFromInt(try sema.appendAirString(fn_zir.nullTerminatedString(param_name))), + } }, + })); try child_block.params.append(sema.arena, .{ .ty = arg_ty.toIntern(), // This is the type after coercion .is_comptime = false, // We're adding only runtime args to the instantiation diff --git a/src/Zcu/PerThread.zig b/src/Zcu/PerThread.zig index eaff3231958a..48350033af06 100644 --- a/src/Zcu/PerThread.zig +++ b/src/Zcu/PerThread.zig @@ -1907,10 +1907,17 @@ pub fn analyzeFnBody(pt: Zcu.PerThread, func_index: InternPool.Index, arena: All runtime_params_len; var runtime_param_index: usize = 0; - for (fn_info.param_body[0..src_params_len], 0..) |inst, src_param_index| { + for (fn_info.param_body[0..src_params_len]) |inst| { const gop = sema.inst_map.getOrPutAssumeCapacity(inst); if (gop.found_existing) continue; // provided above by comptime arg + const inst_info = sema.code.instructions.get(@intFromEnum(inst)); + const param_name: Zir.NullTerminatedString = switch (inst_info.tag) { + .param_anytype => inst_info.data.str_tok.start, + .param => sema.code.extraData(Zir.Inst.Param, inst_info.data.pl_tok.payload_index).data.name, + else => unreachable, + }; + const param_ty = fn_ty_info.param_types.get(ip)[runtime_param_index]; runtime_param_index += 1; @@ -1931,7 +1938,10 @@ pub fn analyzeFnBody(pt: Zcu.PerThread, func_index: InternPool.Index, arena: All .tag = .arg, .data = .{ .arg = .{ .ty = Air.internedToRef(param_ty), - .src_index = @intCast(src_param_index), + .name = if (inner_block.ownerModule().strip) + .none + else + @enumFromInt(try sema.appendAirString(sema.code.nullTerminatedString(param_name))), } }, }); } diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig index 7a2c0178cd07..8a020fc521d6 100644 --- a/src/arch/aarch64/CodeGen.zig +++ b/src/arch/aarch64/CodeGen.zig @@ -4231,19 +4231,19 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void { while (self.args[arg_index] == .none) arg_index += 1; self.arg_index = arg_index + 1; - const pt = self.pt; - const mod = pt.zcu; const ty = self.typeOfIndex(inst); const tag = self.air.instructions.items(.tag)[@intFromEnum(inst)]; - const src_index = self.air.instructions.items(.data)[@intFromEnum(inst)].arg.src_index; - const name = mod.getParamName(self.func_index, src_index); - try self.dbg_info_relocs.append(self.gpa, .{ - .tag = tag, - .ty = ty, - .name = name, - .mcv = self.args[arg_index], - }); + const name_nts = self.air.instructions.items(.data)[@intFromEnum(inst)].arg.name; + if (name_nts != .none) { + const name = self.air.nullTerminatedString(@intFromEnum(name_nts)); + try self.dbg_info_relocs.append(self.gpa, .{ + .tag = tag, + .ty = ty, + .name = name, + .mcv = self.args[arg_index], + }); + } const result: MCValue = if (self.liveness.isUnused(inst)) .dead else self.args[arg_index]; return self.finishAir(inst, result, .{ .none, .none, .none }); diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index 0dd513d4fe44..f923c001e131 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -4206,19 +4206,19 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void { while (self.args[arg_index] == .none) arg_index += 1; self.arg_index = arg_index + 1; - const pt = self.pt; - const mod = pt.zcu; const ty = self.typeOfIndex(inst); const tag = self.air.instructions.items(.tag)[@intFromEnum(inst)]; - const src_index = self.air.instructions.items(.data)[@intFromEnum(inst)].arg.src_index; - const name = mod.getParamName(self.func_index, src_index); - try self.dbg_info_relocs.append(self.gpa, .{ - .tag = tag, - .ty = ty, - .name = name, - .mcv = self.args[arg_index], - }); + const name_nts = self.air.instructions.items(.data)[@intFromEnum(inst)].arg.name; + if (name_nts != .none) { + const name = self.air.nullTerminatedString(@intFromEnum(name_nts)); + try self.dbg_info_relocs.append(self.gpa, .{ + .tag = tag, + .ty = ty, + .name = name, + .mcv = self.args[arg_index], + }); + } const result: MCValue = if (self.liveness.isUnused(inst)) .dead else self.args[arg_index]; return self.finishAir(inst, result, .{ .none, .none, .none }); diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index 6bea66024377..70876a298bcd 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -4051,7 +4051,8 @@ fn genArgDbgInfo(func: Func, inst: Air.Inst.Index, mcv: MCValue) !void { const arg = func.air.instructions.items(.data)[@intFromEnum(inst)].arg; const ty = arg.ty.toType(); const owner_decl = zcu.funcOwnerDeclIndex(func.func_index); - const name = zcu.getParamName(func.func_index, arg.src_index); + if (arg.name == .none) return; + const name = func.air.nullTerminatedString(@intFromEnum(arg.name)); switch (func.debug_output) { .dwarf => |dw| switch (mcv) { diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig index 6dea4977538a..1c3b2327b63b 100644 --- a/src/arch/sparc64/CodeGen.zig +++ b/src/arch/sparc64/CodeGen.zig @@ -3614,7 +3614,8 @@ fn genArgDbgInfo(self: Self, inst: Air.Inst.Index, mcv: MCValue) !void { const arg = self.air.instructions.items(.data)[@intFromEnum(inst)].arg; const ty = arg.ty.toType(); const owner_decl = mod.funcOwnerDeclIndex(self.func_index); - const name = mod.getParamName(self.func_index, arg.src_index); + if (arg.name == .none) return; + const name = self.air.nullTerminatedString(@intFromEnum(arg.name)); switch (self.debug_output) { .dwarf => |dw| switch (mcv) { diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index 3b81ee9c190e..deab261666db 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -2585,11 +2585,13 @@ fn airArg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { switch (func.debug_output) { .dwarf => |dwarf| { - const src_index = func.air.instructions.items(.data)[@intFromEnum(inst)].arg.src_index; - const name = mod.getParamName(func.func_index, src_index); - try dwarf.genArgDbgInfo(name, arg_ty, mod.funcOwnerDeclIndex(func.func_index), .{ - .wasm_local = arg.local.value, - }); + const name_nts = func.air.instructions.items(.data)[@intFromEnum(inst)].arg.name; + if (name_nts != .none) { + const name = func.air.nullTerminatedString(@intFromEnum(name_nts)); + try dwarf.genArgDbgInfo(name, arg_ty, mod.funcOwnerDeclIndex(func.func_index), .{ + .wasm_local = arg.local.value, + }); + } }, else => {}, } diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 870716966fb1..1b1c1dd7d73b 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -11920,9 +11920,11 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void { else => return self.fail("TODO implement arg for {}", .{src_mcv}), }; - const src_index = self.air.instructions.items(.data)[@intFromEnum(inst)].arg.src_index; - const name = mod.getParamName(self.owner.func_index, src_index); - try self.genArgDbgInfo(arg_ty, name, src_mcv); + const name_nts = self.air.instructions.items(.data)[@intFromEnum(inst)].arg.name; + switch (name_nts) { + .none => {}, + _ => try self.genArgDbgInfo(arg_ty, self.air.nullTerminatedString(@intFromEnum(name_nts)), src_mcv), + } break :result dst_mcv; }; diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 0a23d687d24d..40110993b93b 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -8859,19 +8859,21 @@ pub const FuncGen = struct { self.arg_index += 1; // llvm does not support debug info for naked function arguments - if (self.wip.strip or self.is_naked) return arg_val; + if (self.is_naked) return arg_val; const inst_ty = self.typeOfIndex(inst); if (needDbgVarWorkaround(o)) return arg_val; - const src_index = self.air.instructions.items(.data)[@intFromEnum(inst)].arg.src_index; + const name = self.air.instructions.items(.data)[@intFromEnum(inst)].arg.name; + if (name == .none) return arg_val; + const func_index = self.dg.decl.getOwnedFunctionIndex(); const func = mod.funcInfo(func_index); const lbrace_line = mod.declPtr(func.owner_decl).navSrcLine(mod) + func.lbrace_line + 1; const lbrace_col = func.lbrace_column + 1; const debug_parameter = try o.builder.debugParameter( - try o.builder.metadataString(mod.getParamName(func_index, src_index)), + try o.builder.metadataString(self.air.nullTerminatedString(@intFromEnum(name))), self.file, self.scope, lbrace_line, diff --git a/src/print_air.zig b/src/print_air.zig index d85750bd2795..1872b480f53d 100644 --- a/src/print_air.zig +++ b/src/print_air.zig @@ -356,7 +356,13 @@ const Writer = struct { fn writeArg(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { const arg = w.air.instructions.items(.data)[@intFromEnum(inst)].arg; try w.writeType(s, arg.ty.toType()); - try s.print(", {d}", .{arg.src_index}); + switch (arg.name) { + .none => {}, + _ => { + const name = w.air.nullTerminatedString(@intFromEnum(arg.name)); + try s.print(", \"{}\"", .{std.zig.fmtEscapes(name)}); + }, + } } fn writeTyOp(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { From 3d2dfbe8289c2ecb45e1ba1fe79c4d7e21dd26c3 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Wed, 10 Jul 2024 10:04:33 -0400 Subject: [PATCH 095/152] InternPool: add `FileIndex` to `*File` mapping --- src/Compilation.zig | 41 ++++---- src/InternPool.zig | 102 ++++++++++++++----- src/Sema.zig | 4 +- src/Type.zig | 2 +- src/Zcu.zig | 193 ++---------------------------------- src/Zcu/PerThread.zig | 221 ++++++++++++++++++++++++++++++++++++++---- src/codegen/llvm.zig | 2 +- 7 files changed, 314 insertions(+), 251 deletions(-) diff --git a/src/Compilation.zig b/src/Compilation.zig index 118e325ed7a9..a474d1955a1a 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -2119,12 +2119,14 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void { } if (comp.module) |zcu| { + const pt: Zcu.PerThread = .{ .zcu = zcu, .tid = .main }; + zcu.compile_log_text.shrinkAndFree(gpa, 0); // Make sure std.zig is inside the import_table. We unconditionally need // it for start.zig. const std_mod = zcu.std_mod; - _ = try zcu.importPkg(std_mod); + _ = try pt.importPkg(std_mod); // Normally we rely on importing std to in turn import the root source file // in the start code, but when using the stage1 backend that won't happen, @@ -2133,20 +2135,19 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void { // Likewise, in the case of `zig test`, the test runner is the root source file, // and so there is nothing to import the main file. if (comp.config.is_test) { - _ = try zcu.importPkg(zcu.main_mod); + _ = try pt.importPkg(zcu.main_mod); } if (zcu.root_mod.deps.get("compiler_rt")) |compiler_rt_mod| { - _ = try zcu.importPkg(compiler_rt_mod); + _ = try pt.importPkg(compiler_rt_mod); } // Put a work item in for every known source file to detect if // it changed, and, if so, re-compute ZIR and then queue the job // to update it. try comp.astgen_work_queue.ensureUnusedCapacity(zcu.import_table.count()); - for (zcu.import_table.values(), 0..) |file, file_index_usize| { - const file_index: Zcu.File.Index = @enumFromInt(file_index_usize); - if (file.mod.isBuiltin()) continue; + for (zcu.import_table.values()) |file_index| { + if (zcu.fileByIndex(file_index).mod.isBuiltin()) continue; comp.astgen_work_queue.writeItemAssumeCapacity(file_index); } @@ -2641,7 +2642,8 @@ fn resolveEmitLoc( return slice.ptr; } -fn reportMultiModuleErrors(zcu: *Zcu) !void { +fn reportMultiModuleErrors(pt: Zcu.PerThread) !void { + const zcu = pt.zcu; const gpa = zcu.gpa; const ip = &zcu.intern_pool; // Some cases can give you a whole bunch of multi-module errors, which it's not helpful to @@ -2651,14 +2653,13 @@ fn reportMultiModuleErrors(zcu: *Zcu) !void { // Attach the "some omitted" note to the final error message var last_err: ?*Zcu.ErrorMsg = null; - for (zcu.import_table.values(), 0..) |file, file_index_usize| { + for (zcu.import_table.values()) |file_index| { + const file = zcu.fileByIndex(file_index); if (!file.multi_pkg) continue; num_errors += 1; if (num_errors > max_errors) continue; - const file_index: Zcu.File.Index = @enumFromInt(file_index_usize); - const err = err_blk: { // Like with errors, let's cap the number of notes to prevent a huge error spew. const max_notes = 5; @@ -2749,8 +2750,9 @@ fn reportMultiModuleErrors(zcu: *Zcu) !void { // to add this flag after reporting the errors however, as otherwise // we'd get an error for every single downstream file, which wouldn't be // very useful. - for (zcu.import_table.values()) |file| { - if (file.multi_pkg) file.recursiveMarkMultiPkg(zcu); + for (zcu.import_table.values()) |file_index| { + const file = zcu.fileByIndex(file_index); + if (file.multi_pkg) file.recursiveMarkMultiPkg(pt); } } @@ -3443,11 +3445,12 @@ fn performAllTheWorkInner( } } - if (comp.module) |mod| { - try reportMultiModuleErrors(mod); - try mod.flushRetryableFailures(); - mod.sema_prog_node = main_progress_node.start("Semantic Analysis", 0); - mod.codegen_prog_node = main_progress_node.start("Code Generation", 0); + if (comp.module) |zcu| { + const pt: Zcu.PerThread = .{ .zcu = comp.module.?, .tid = .main }; + try reportMultiModuleErrors(pt); + try zcu.flushRetryableFailures(); + zcu.sema_prog_node = main_progress_node.start("Semantic Analysis", 0); + zcu.codegen_prog_node = main_progress_node.start("Code Generation", 0); } if (!InternPool.single_threaded) comp.thread_pool.spawnWgId(&comp.work_queue_wait_group, codegenThread, .{comp}); @@ -4189,9 +4192,9 @@ fn workerAstGenFile( comp.mutex.lock(); defer comp.mutex.unlock(); - const res = pt.zcu.importFile(file, import_path) catch continue; + const res = pt.importFile(file, import_path) catch continue; if (!res.is_pkg) { - res.file.addReference(pt.zcu.*, .{ .import = .{ + res.file.addReference(pt.zcu, .{ .import = .{ .file = file_index, .token = item.data.token, } }) catch continue; diff --git a/src/InternPool.zig b/src/InternPool.zig index 1d5e327efd74..37ff1352a792 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -1,6 +1,5 @@ //! All interned objects have both a value and a type. -//! This data structure is self-contained, with the following exceptions: -//! * Module.Namespace has a pointer to Module.File +//! This data structure is self-contained. /// One item per thread, indexed by `tid`, which is dense and unique per thread. locals: []Local = &.{}, @@ -79,10 +78,6 @@ const want_multi_threaded = false; /// Whether a single-threaded intern pool impl is in use. pub const single_threaded = builtin.single_threaded or !want_multi_threaded; -pub const FileIndex = enum(u32) { - _, -}; - pub const TrackedInst = extern struct { file: FileIndex, inst: Zir.Inst.Index, @@ -340,6 +335,7 @@ const Local = struct { extra: ListMutate, limbs: ListMutate, strings: ListMutate, + files: ListMutate, decls: BucketListMutate, namespaces: BucketListMutate, @@ -350,6 +346,7 @@ const Local = struct { extra: Extra, limbs: Limbs, strings: Strings, + files: Files, decls: Decls, namespaces: Namespaces, @@ -370,16 +367,17 @@ const Local = struct { else => @compileError("unsupported host"), }; const Strings = List(struct { u8 }); + const Files = List(struct { *Zcu.File }); const decls_bucket_width = 8; const decls_bucket_mask = (1 << decls_bucket_width) - 1; const decl_next_free_field = "src_namespace"; - const Decls = List(struct { *[1 << decls_bucket_width]Module.Decl }); + const Decls = List(struct { *[1 << decls_bucket_width]Zcu.Decl }); const namespaces_bucket_width = 8; const namespaces_bucket_mask = (1 << namespaces_bucket_width) - 1; const namespace_next_free_field = "decl_index"; - const Namespaces = List(struct { *[1 << namespaces_bucket_width]Module.Namespace }); + const Namespaces = List(struct { *[1 << namespaces_bucket_width]Zcu.Namespace }); const ListMutate = struct { len: u32, @@ -677,6 +675,15 @@ const Local = struct { }; } + pub fn getMutableFiles(local: *Local, gpa: std.mem.Allocator) Files.Mutable { + return .{ + .gpa = gpa, + .arena = &local.mutate.arena, + .mutate = &local.mutate.files, + .list = &local.shared.files, + }; + } + /// Rather than allocating Decl objects with an Allocator, we instead allocate /// them with this BucketList. This provides four advantages: /// * Stable memory so that one thread can access a Decl object while another @@ -812,8 +819,6 @@ const Hash = std.hash.Wyhash; const InternPool = @This(); const Zcu = @import("Zcu.zig"); -/// Deprecated. -const Module = Zcu; const Zir = std.zig.Zir; /// An index into `maps` which might be `none`. @@ -938,6 +943,28 @@ pub const OptionalNamespaceIndex = enum(u32) { } }; +pub const FileIndex = enum(u32) { + _, + + const Unwrapped = struct { + tid: Zcu.PerThread.Id, + index: u32, + + fn wrap(unwrapped: Unwrapped, ip: *const InternPool) FileIndex { + assert(@intFromEnum(unwrapped.tid) <= ip.getTidMask()); + assert(unwrapped.index <= ip.getIndexMask(u32)); + return @enumFromInt(@as(u32, @intFromEnum(unwrapped.tid)) << ip.tid_shift_32 | + unwrapped.index); + } + }; + fn unwrap(file_index: FileIndex, ip: *const InternPool) Unwrapped { + return .{ + .tid = @enumFromInt(@intFromEnum(file_index) >> ip.tid_shift_32 & ip.getTidMask()), + .index = @intFromEnum(file_index) & ip.getIndexMask(u32), + }; + } +}; + /// An index into `strings`. pub const String = enum(u32) { /// An empty string. @@ -4608,12 +4635,12 @@ pub const FuncAnalysis = packed struct(u32) { /// inline, which means no runtime version of the function will be generated. inline_only, in_progress, - /// There will be a corresponding ErrorMsg in Module.failed_decls + /// There will be a corresponding ErrorMsg in Zcu.failed_decls sema_failure, /// This function might be OK but it depends on another Decl which did not /// successfully complete semantic analysis. dependency_failure, - /// There will be a corresponding ErrorMsg in Module.failed_decls. + /// There will be a corresponding ErrorMsg in Zcu.failed_decls. /// Indicates that semantic analysis succeeded, but code generation for /// this function failed. codegen_failure, @@ -5210,6 +5237,7 @@ pub fn init(ip: *InternPool, gpa: Allocator, available_threads: usize) !void { .extra = Local.Extra.empty, .limbs = Local.Limbs.empty, .strings = Local.Strings.empty, + .files = Local.Files.empty, .decls = Local.Decls.empty, .namespaces = Local.Namespaces.empty, @@ -5221,6 +5249,7 @@ pub fn init(ip: *InternPool, gpa: Allocator, available_threads: usize) !void { .extra = Local.ListMutate.empty, .limbs = Local.ListMutate.empty, .strings = Local.ListMutate.empty, + .files = Local.ListMutate.empty, .decls = Local.BucketListMutate.empty, .namespaces = Local.BucketListMutate.empty, @@ -9213,7 +9242,7 @@ fn dumpStatsFallible(ip: *const InternPool, arena: Allocator) anyerror!void { const items_size = (1 + 4) * items_len; const extra_size = 4 * extra_len; const limbs_size = 8 * limbs_len; - const decls_size = @sizeOf(Module.Decl) * decls_len; + const decls_size = @sizeOf(Zcu.Decl) * decls_len; // TODO: map overhead size is not taken into account const total_size = @sizeOf(InternPool) + items_size + extra_size + limbs_size + decls_size; @@ -9640,29 +9669,22 @@ pub fn dumpGenericInstancesFallible(ip: *const InternPool, allocator: Allocator) try bw.flush(); } -pub fn declPtr(ip: *InternPool, decl_index: DeclIndex) *Module.Decl { +pub fn declPtr(ip: *InternPool, decl_index: DeclIndex) *Zcu.Decl { return @constCast(ip.declPtrConst(decl_index)); } -pub fn declPtrConst(ip: *const InternPool, decl_index: DeclIndex) *const Module.Decl { +pub fn declPtrConst(ip: *const InternPool, decl_index: DeclIndex) *const Zcu.Decl { const unwrapped_decl_index = decl_index.unwrap(ip); const decls = ip.getLocalShared(unwrapped_decl_index.tid).decls.acquire(); const decls_bucket = decls.view().items(.@"0")[unwrapped_decl_index.bucket_index]; return &decls_bucket[unwrapped_decl_index.index]; } -pub fn namespacePtr(ip: *InternPool, namespace_index: NamespaceIndex) *Module.Namespace { - const unwrapped_namespace_index = namespace_index.unwrap(ip); - const namespaces = ip.getLocalShared(unwrapped_namespace_index.tid).namespaces.acquire(); - const namespaces_bucket = namespaces.view().items(.@"0")[unwrapped_namespace_index.bucket_index]; - return &namespaces_bucket[unwrapped_namespace_index.index]; -} - pub fn createDecl( ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, - initialization: Module.Decl, + initialization: Zcu.Decl, ) Allocator.Error!DeclIndex { const local = ip.getLocal(tid); const free_list_next = local.mutate.decls.free_list; @@ -9679,7 +9701,7 @@ pub fn createDecl( var arena = decls.arena.promote(decls.gpa); defer decls.arena.* = arena.state; decls.appendAssumeCapacity(.{try arena.allocator().create( - [1 << Local.decls_bucket_width]Module.Decl, + [1 << Local.decls_bucket_width]Zcu.Decl, )}); } const unwrapped_decl_index: DeclIndex.Unwrapped = .{ @@ -9702,11 +9724,18 @@ pub fn destroyDecl(ip: *InternPool, tid: Zcu.PerThread.Id, decl_index: DeclIndex local.mutate.decls.free_list = @intFromEnum(decl_index); } +pub fn namespacePtr(ip: *InternPool, namespace_index: NamespaceIndex) *Zcu.Namespace { + const unwrapped_namespace_index = namespace_index.unwrap(ip); + const namespaces = ip.getLocalShared(unwrapped_namespace_index.tid).namespaces.acquire(); + const namespaces_bucket = namespaces.view().items(.@"0")[unwrapped_namespace_index.bucket_index]; + return &namespaces_bucket[unwrapped_namespace_index.index]; +} + pub fn createNamespace( ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, - initialization: Module.Namespace, + initialization: Zcu.Namespace, ) Allocator.Error!NamespaceIndex { const local = ip.getLocal(tid); const free_list_next = local.mutate.namespaces.free_list; @@ -9724,7 +9753,7 @@ pub fn createNamespace( var arena = namespaces.arena.promote(namespaces.gpa); defer namespaces.arena.* = arena.state; namespaces.appendAssumeCapacity(.{try arena.allocator().create( - [1 << Local.namespaces_bucket_width]Module.Namespace, + [1 << Local.namespaces_bucket_width]Zcu.Namespace, )}); } const unwrapped_namespace_index: NamespaceIndex.Unwrapped = .{ @@ -9756,6 +9785,27 @@ pub fn destroyNamespace( local.mutate.namespaces.free_list = @intFromEnum(namespace_index); } +pub fn filePtr(ip: *InternPool, file_index: FileIndex) *Zcu.File { + const file_index_unwrapped = file_index.unwrap(ip); + const files = ip.getLocalShared(file_index_unwrapped.tid).files.acquire(); + return files.view().items(.@"0")[file_index_unwrapped.index]; +} + +pub fn createFile( + ip: *InternPool, + gpa: Allocator, + tid: Zcu.PerThread.Id, + file: *Zcu.File, +) Allocator.Error!FileIndex { + const files = ip.getLocal(tid).getMutableFiles(gpa); + const file_index_unwrapped: FileIndex.Unwrapped = .{ + .tid = tid, + .index = files.mutate.len, + }; + try files.append(.{file}); + return file_index_unwrapped.wrap(ip); +} + const EmbeddedNulls = enum { no_embedded_nulls, maybe_embedded_nulls, diff --git a/src/Sema.zig b/src/Sema.zig index 170b773d1f27..bcae8201484f 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -6056,7 +6056,7 @@ fn zirCImport(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileEr else => |e| return e, }; - const result = zcu.importPkg(c_import_mod) catch |err| + const result = pt.importPkg(c_import_mod) catch |err| return sema.fail(&child_block, src, "C import failed: {s}", .{@errorName(err)}); const path_digest = zcu.filePathDigest(result.file_index); @@ -13950,7 +13950,7 @@ fn zirImport(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. const operand_src = block.tokenOffset(inst_data.src_tok); const operand = inst_data.get(sema.code); - const result = zcu.importFile(block.getFileScope(zcu), operand) catch |err| switch (err) { + const result = pt.importFile(block.getFileScope(zcu), operand) catch |err| switch (err) { error.ImportOutsideModulePath => { return sema.fail(block, operand_src, "import of file outside module path: '{s}'", .{operand}); }, diff --git a/src/Type.zig b/src/Type.zig index b22f8650ab2f..bea0474e9af9 100644 --- a/src/Type.zig +++ b/src/Type.zig @@ -3451,7 +3451,7 @@ pub fn typeDeclInst(ty: Type, zcu: *const Zcu) ?InternPool.TrackedInst.Index { }; } -pub fn typeDeclSrcLine(ty: Type, zcu: *const Zcu) ?u32 { +pub fn typeDeclSrcLine(ty: Type, zcu: *Zcu) ?u32 { const ip = &zcu.intern_pool; const tracked = switch (ip.indexToKey(ty.toIntern())) { .struct_type, .union_type, .opaque_type, .enum_type => |info| switch (info) { diff --git a/src/Zcu.zig b/src/Zcu.zig index 492858134956..5dcda2ea86d7 100644 --- a/src/Zcu.zig +++ b/src/Zcu.zig @@ -102,7 +102,7 @@ multi_exports: std.AutoArrayHashMapUnmanaged(AnalUnit, extern struct { /// `Compilation.update` of the process for a given `Compilation`. /// /// Indexes correspond 1:1 to `files`. -import_table: std.StringArrayHashMapUnmanaged(*File) = .{}, +import_table: std.StringArrayHashMapUnmanaged(File.Index) = .{}, /// The set of all the files which have been loaded with `@embedFile` in the Module. /// We keep track of this in order to iterate over it and check which files have been @@ -892,7 +892,7 @@ pub const File = struct { } /// Add a reference to this file during AstGen. - pub fn addReference(file: *File, zcu: Zcu, ref: File.Reference) !void { + pub fn addReference(file: *File, zcu: *Zcu, ref: File.Reference) !void { // Don't add the same module root twice. Note that since we always add module roots at the // front of the references array (see below), this loop is actually O(1) on valid code. if (ref == .root) { @@ -924,7 +924,7 @@ pub const File = struct { /// Mark this file and every file referenced by it as multi_pkg and report an /// astgen_failure error for them. AstGen must have completed in its entirety. - pub fn recursiveMarkMultiPkg(file: *File, mod: *Module) void { + pub fn recursiveMarkMultiPkg(file: *File, pt: Zcu.PerThread) void { file.multi_pkg = true; file.status = .astgen_failure; @@ -944,9 +944,9 @@ pub const File = struct { const import_path = file.zir.nullTerminatedString(item.data.name); if (mem.eql(u8, import_path, "builtin")) continue; - const res = mod.importFile(file, import_path) catch continue; + const res = pt.importFile(file, import_path) catch continue; if (!res.is_pkg and !res.file.multi_pkg) { - res.file.recursiveMarkMultiPkg(mod); + res.file.recursiveMarkMultiPkg(pt); } } } @@ -3002,183 +3002,7 @@ pub const ImportFileResult = struct { is_pkg: bool, }; -pub fn importPkg(zcu: *Zcu, mod: *Package.Module) !ImportFileResult { - const gpa = zcu.gpa; - - // The resolved path is used as the key in the import table, to detect if - // an import refers to the same as another, despite different relative paths - // or differently mapped package names. - const resolved_path = try std.fs.path.resolve(gpa, &.{ - mod.root.root_dir.path orelse ".", - mod.root.sub_path, - mod.root_src_path, - }); - var keep_resolved_path = false; - defer if (!keep_resolved_path) gpa.free(resolved_path); - - const gop = try zcu.import_table.getOrPut(gpa, resolved_path); - errdefer _ = zcu.import_table.pop(); - if (gop.found_existing) { - try gop.value_ptr.*.addReference(zcu.*, .{ .root = mod }); - return .{ - .file = gop.value_ptr.*, - .file_index = @enumFromInt(gop.index), - .is_new = false, - .is_pkg = true, - }; - } - - const ip = &zcu.intern_pool; - - try ip.files.ensureUnusedCapacity(gpa, 1); - - if (mod.builtin_file) |builtin_file| { - keep_resolved_path = true; // It's now owned by import_table. - gop.value_ptr.* = builtin_file; - try builtin_file.addReference(zcu.*, .{ .root = mod }); - const path_digest = computePathDigest(zcu, mod, builtin_file.sub_file_path); - ip.files.putAssumeCapacityNoClobber(path_digest, .none); - return .{ - .file = builtin_file, - .file_index = @enumFromInt(ip.files.entries.len - 1), - .is_new = false, - .is_pkg = true, - }; - } - - const sub_file_path = try gpa.dupe(u8, mod.root_src_path); - errdefer gpa.free(sub_file_path); - - const new_file = try gpa.create(File); - errdefer gpa.destroy(new_file); - - keep_resolved_path = true; // It's now owned by import_table. - gop.value_ptr.* = new_file; - new_file.* = .{ - .sub_file_path = sub_file_path, - .source = undefined, - .source_loaded = false, - .tree_loaded = false, - .zir_loaded = false, - .stat = undefined, - .tree = undefined, - .zir = undefined, - .status = .never_loaded, - .mod = mod, - }; - - const path_digest = computePathDigest(zcu, mod, sub_file_path); - - try new_file.addReference(zcu.*, .{ .root = mod }); - ip.files.putAssumeCapacityNoClobber(path_digest, .none); - return .{ - .file = new_file, - .file_index = @enumFromInt(ip.files.entries.len - 1), - .is_new = true, - .is_pkg = true, - }; -} - -/// Called from a worker thread during AstGen. -/// Also called from Sema during semantic analysis. -pub fn importFile( - zcu: *Zcu, - cur_file: *File, - import_string: []const u8, -) !ImportFileResult { - const mod = cur_file.mod; - - if (std.mem.eql(u8, import_string, "std")) { - return zcu.importPkg(zcu.std_mod); - } - if (std.mem.eql(u8, import_string, "root")) { - return zcu.importPkg(zcu.root_mod); - } - if (mod.deps.get(import_string)) |pkg| { - return zcu.importPkg(pkg); - } - if (!mem.endsWith(u8, import_string, ".zig")) { - return error.ModuleNotFound; - } - const gpa = zcu.gpa; - - // The resolved path is used as the key in the import table, to detect if - // an import refers to the same as another, despite different relative paths - // or differently mapped package names. - const resolved_path = try std.fs.path.resolve(gpa, &.{ - mod.root.root_dir.path orelse ".", - mod.root.sub_path, - cur_file.sub_file_path, - "..", - import_string, - }); - - var keep_resolved_path = false; - defer if (!keep_resolved_path) gpa.free(resolved_path); - - const gop = try zcu.import_table.getOrPut(gpa, resolved_path); - errdefer _ = zcu.import_table.pop(); - if (gop.found_existing) return .{ - .file = gop.value_ptr.*, - .file_index = @enumFromInt(gop.index), - .is_new = false, - .is_pkg = false, - }; - - const ip = &zcu.intern_pool; - - try ip.files.ensureUnusedCapacity(gpa, 1); - - const new_file = try gpa.create(File); - errdefer gpa.destroy(new_file); - - const resolved_root_path = try std.fs.path.resolve(gpa, &.{ - mod.root.root_dir.path orelse ".", - mod.root.sub_path, - }); - defer gpa.free(resolved_root_path); - - const sub_file_path = p: { - const relative = try std.fs.path.relative(gpa, resolved_root_path, resolved_path); - errdefer gpa.free(relative); - - if (!isUpDir(relative) and !std.fs.path.isAbsolute(relative)) { - break :p relative; - } - return error.ImportOutsideModulePath; - }; - errdefer gpa.free(sub_file_path); - - log.debug("new importFile. resolved_root_path={s}, resolved_path={s}, sub_file_path={s}, import_string={s}", .{ - resolved_root_path, resolved_path, sub_file_path, import_string, - }); - - keep_resolved_path = true; // It's now owned by import_table. - gop.value_ptr.* = new_file; - new_file.* = .{ - .sub_file_path = sub_file_path, - .source = undefined, - .source_loaded = false, - .tree_loaded = false, - .zir_loaded = false, - .stat = undefined, - .tree = undefined, - .zir = undefined, - .status = .never_loaded, - .mod = mod, - }; - - const path_digest = computePathDigest(zcu, mod, sub_file_path); - ip.files.putAssumeCapacityNoClobber(path_digest, .none); - return .{ - .file = new_file, - .file_index = @enumFromInt(ip.files.entries.len - 1), - .is_new = true, - .is_pkg = false, - }; -} - -fn computePathDigest(zcu: *Zcu, mod: *Package.Module, sub_file_path: []const u8) Cache.BinDigest { +pub fn computePathDigest(zcu: *Zcu, mod: *Package.Module, sub_file_path: []const u8) Cache.BinDigest { const want_local_cache = mod == zcu.main_mod; var path_hash: Cache.HashHelper = .{}; path_hash.addBytes(build_options.version); @@ -3710,8 +3534,9 @@ pub fn resolveReferences(zcu: *Zcu) !std.AutoHashMapUnmanaged(AnalUnit, Resolved return result; } -pub fn fileByIndex(zcu: *const Zcu, i: File.Index) *File { - return zcu.import_table.values()[@intFromEnum(i)]; +pub fn fileByIndex(zcu: *Zcu, i: File.Index) *File { + const ip = &zcu.intern_pool; + return ip.filePtr(i); } /// Returns the `Decl` of the struct that represents this `File`. diff --git a/src/Zcu/PerThread.zig b/src/Zcu/PerThread.zig index 48350033af06..48559b81f140 100644 --- a/src/Zcu/PerThread.zig +++ b/src/Zcu/PerThread.zig @@ -817,7 +817,7 @@ pub fn linkerUpdateFunc(pt: Zcu.PerThread, func_index: InternPool.Index, air: Ai /// https://github.com/ziglang/zig/issues/14307 pub fn semaPkg(pt: Zcu.PerThread, pkg: *Module) !void { - const import_file_result = try pt.zcu.importPkg(pkg); + const import_file_result = try pt.importPkg(pkg); const root_decl_index = pt.zcu.fileRootDecl(import_file_result.file_index); if (root_decl_index == .none) { return pt.semaFile(import_file_result.file_index); @@ -1081,7 +1081,7 @@ fn semaDecl(pt: Zcu.PerThread, decl_index: Zcu.Decl.Index) !Zcu.SemaDeclResult { const std_mod = zcu.std_mod; if (decl.getFileScope(zcu).mod != std_mod) break :ip_index .none; // We're in the std module. - const std_file_imported = try zcu.importPkg(std_mod); + const std_file_imported = try pt.importPkg(std_mod); const std_file_root_decl_index = zcu.fileRootDecl(std_file_imported.file_index); const std_decl = zcu.declPtr(std_file_root_decl_index.unwrap().?); const std_namespace = std_decl.getInnerNamespace(zcu).?; @@ -1356,6 +1356,191 @@ pub fn semaAnonOwnerDecl(pt: Zcu.PerThread, decl_index: Zcu.Decl.Index) !Zcu.Sem }; } +pub fn importPkg(pt: Zcu.PerThread, mod: *Module) !Zcu.ImportFileResult { + const zcu = pt.zcu; + const gpa = zcu.gpa; + + // The resolved path is used as the key in the import table, to detect if + // an import refers to the same as another, despite different relative paths + // or differently mapped package names. + const resolved_path = try std.fs.path.resolve(gpa, &.{ + mod.root.root_dir.path orelse ".", + mod.root.sub_path, + mod.root_src_path, + }); + var keep_resolved_path = false; + defer if (!keep_resolved_path) gpa.free(resolved_path); + + const gop = try zcu.import_table.getOrPut(gpa, resolved_path); + errdefer _ = zcu.import_table.pop(); + if (gop.found_existing) { + const file_index = gop.value_ptr.*; + const file = zcu.fileByIndex(file_index); + try file.addReference(zcu, .{ .root = mod }); + return .{ + .file = file, + .file_index = file_index, + .is_new = false, + .is_pkg = true, + }; + } + + const ip = &zcu.intern_pool; + try ip.files.ensureUnusedCapacity(gpa, 1); + + if (mod.builtin_file) |builtin_file| { + const file_index = try ip.createFile(gpa, pt.tid, builtin_file); + keep_resolved_path = true; // It's now owned by import_table. + gop.value_ptr.* = file_index; + try builtin_file.addReference(zcu, .{ .root = mod }); + const path_digest = Zcu.computePathDigest(zcu, mod, builtin_file.sub_file_path); + ip.files.putAssumeCapacityNoClobber(path_digest, .none); + return .{ + .file = builtin_file, + .file_index = file_index, + .is_new = false, + .is_pkg = true, + }; + } + + const sub_file_path = try gpa.dupe(u8, mod.root_src_path); + errdefer gpa.free(sub_file_path); + + const new_file = try gpa.create(Zcu.File); + errdefer gpa.destroy(new_file); + + const new_file_index = try ip.createFile(gpa, pt.tid, new_file); + keep_resolved_path = true; // It's now owned by import_table. + gop.value_ptr.* = new_file_index; + new_file.* = .{ + .sub_file_path = sub_file_path, + .source = undefined, + .source_loaded = false, + .tree_loaded = false, + .zir_loaded = false, + .stat = undefined, + .tree = undefined, + .zir = undefined, + .status = .never_loaded, + .mod = mod, + }; + + const path_digest = zcu.computePathDigest(mod, sub_file_path); + + try new_file.addReference(zcu, .{ .root = mod }); + ip.files.putAssumeCapacityNoClobber(path_digest, .none); + return .{ + .file = new_file, + .file_index = new_file_index, + .is_new = true, + .is_pkg = true, + }; +} + +/// Called from a worker thread during AstGen. +/// Also called from Sema during semantic analysis. +pub fn importFile( + pt: Zcu.PerThread, + cur_file: *Zcu.File, + import_string: []const u8, +) !Zcu.ImportFileResult { + const zcu = pt.zcu; + const mod = cur_file.mod; + + if (std.mem.eql(u8, import_string, "std")) { + return pt.importPkg(zcu.std_mod); + } + if (std.mem.eql(u8, import_string, "root")) { + return pt.importPkg(zcu.root_mod); + } + if (mod.deps.get(import_string)) |pkg| { + return pt.importPkg(pkg); + } + if (!std.mem.endsWith(u8, import_string, ".zig")) { + return error.ModuleNotFound; + } + const gpa = zcu.gpa; + + // The resolved path is used as the key in the import table, to detect if + // an import refers to the same as another, despite different relative paths + // or differently mapped package names. + const resolved_path = try std.fs.path.resolve(gpa, &.{ + mod.root.root_dir.path orelse ".", + mod.root.sub_path, + cur_file.sub_file_path, + "..", + import_string, + }); + + var keep_resolved_path = false; + defer if (!keep_resolved_path) gpa.free(resolved_path); + + const gop = try zcu.import_table.getOrPut(gpa, resolved_path); + errdefer _ = zcu.import_table.pop(); + if (gop.found_existing) { + const file_index = gop.value_ptr.*; + return .{ + .file = zcu.fileByIndex(file_index), + .file_index = file_index, + .is_new = false, + .is_pkg = false, + }; + } + + const ip = &zcu.intern_pool; + + try ip.files.ensureUnusedCapacity(gpa, 1); + + const new_file = try gpa.create(Zcu.File); + errdefer gpa.destroy(new_file); + + const resolved_root_path = try std.fs.path.resolve(gpa, &.{ + mod.root.root_dir.path orelse ".", + mod.root.sub_path, + }); + defer gpa.free(resolved_root_path); + + const sub_file_path = p: { + const relative = try std.fs.path.relative(gpa, resolved_root_path, resolved_path); + errdefer gpa.free(relative); + + if (!isUpDir(relative) and !std.fs.path.isAbsolute(relative)) { + break :p relative; + } + return error.ImportOutsideModulePath; + }; + errdefer gpa.free(sub_file_path); + + log.debug("new importFile. resolved_root_path={s}, resolved_path={s}, sub_file_path={s}, import_string={s}", .{ + resolved_root_path, resolved_path, sub_file_path, import_string, + }); + + const new_file_index = try ip.createFile(gpa, pt.tid, new_file); + keep_resolved_path = true; // It's now owned by import_table. + gop.value_ptr.* = new_file_index; + new_file.* = .{ + .sub_file_path = sub_file_path, + .source = undefined, + .source_loaded = false, + .tree_loaded = false, + .zir_loaded = false, + .stat = undefined, + .tree = undefined, + .zir = undefined, + .status = .never_loaded, + .mod = mod, + }; + + const path_digest = zcu.computePathDigest(mod, sub_file_path); + ip.files.putAssumeCapacityNoClobber(path_digest, .none); + return .{ + .file = new_file, + .file_index = new_file_index, + .is_new = true, + .is_pkg = false, + }; +} + pub fn embedFile( pt: Zcu.PerThread, cur_file: *Zcu.File, @@ -1429,20 +1614,6 @@ pub fn embedFile( return pt.newEmbedFile(cur_file.mod, sub_file_path, resolved_path, gop.value_ptr, src_loc); } -/// Cancel the creation of an anon decl and delete any references to it. -/// If other decls depend on this decl, they must be aborted first. -pub fn abortAnonDecl(pt: Zcu.PerThread, decl_index: Zcu.Decl.Index) void { - assert(!pt.zcu.declIsRoot(decl_index)); - pt.destroyDecl(decl_index); -} - -/// Finalize the creation of an anon decl. -pub fn finalizeAnonDecl(pt: Zcu.PerThread, decl_index: Zcu.Decl.Index) Allocator.Error!void { - if (pt.zcu.declPtr(decl_index).typeOf(pt.zcu).isFnOrHasRuntimeBits(pt)) { - try pt.zcu.comp.work_queue.writeItem(.{ .codegen_decl = decl_index }); - } -} - /// https://github.com/ziglang/zig/issues/14307 fn newEmbedFile( pt: Zcu.PerThread, @@ -1792,6 +1963,20 @@ const ScanDeclIter = struct { } }; +/// Cancel the creation of an anon decl and delete any references to it. +/// If other decls depend on this decl, they must be aborted first. +pub fn abortAnonDecl(pt: Zcu.PerThread, decl_index: Zcu.Decl.Index) void { + assert(!pt.zcu.declIsRoot(decl_index)); + pt.destroyDecl(decl_index); +} + +/// Finalize the creation of an anon decl. +pub fn finalizeAnonDecl(pt: Zcu.PerThread, decl_index: Zcu.Decl.Index) Allocator.Error!void { + if (pt.zcu.declPtr(decl_index).typeOf(pt.zcu).isFnOrHasRuntimeBits(pt)) { + try pt.zcu.comp.work_queue.writeItem(.{ .codegen_decl = decl_index }); + } +} + pub fn analyzeFnBody(pt: Zcu.PerThread, func_index: InternPool.Index, arena: Allocator) Zcu.SemaError!Air { const tracy = trace(@src()); defer tracy.end(); @@ -2255,7 +2440,7 @@ pub fn populateTestFunctions( const gpa = zcu.gpa; const ip = &zcu.intern_pool; const builtin_mod = zcu.root_mod.getBuiltinDependency(); - const builtin_file_index = (zcu.importPkg(builtin_mod) catch unreachable).file_index; + const builtin_file_index = (pt.importPkg(builtin_mod) catch unreachable).file_index; const root_decl_index = zcu.fileRootDecl(builtin_file_index); const root_decl = zcu.declPtr(root_decl_index.unwrap().?); const builtin_namespace = zcu.namespacePtr(root_decl.src_namespace); @@ -2923,7 +3108,7 @@ pub fn getBuiltinDecl(pt: Zcu.PerThread, name: []const u8) Allocator.Error!Inter const zcu = pt.zcu; const gpa = zcu.gpa; const ip = &zcu.intern_pool; - const std_file_imported = zcu.importPkg(zcu.std_mod) catch @panic("failed to import lib/std.zig"); + const std_file_imported = pt.importPkg(zcu.std_mod) catch @panic("failed to import lib/std.zig"); const std_file_root_decl = zcu.fileRootDecl(std_file_imported.file_index).unwrap().?; const std_namespace = zcu.declPtr(std_file_root_decl).getOwnedInnerNamespace(zcu).?; const builtin_str = try ip.getOrPutString(gpa, pt.tid, "builtin", .no_embedded_nulls); diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 40110993b93b..7152a55d4689 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -2811,7 +2811,7 @@ pub const Object = struct { const zcu = pt.zcu; const std_mod = zcu.std_mod; - const std_file_imported = zcu.importPkg(std_mod) catch unreachable; + const std_file_imported = pt.importPkg(std_mod) catch unreachable; const builtin_str = try zcu.intern_pool.getOrPutString(zcu.gpa, pt.tid, "builtin", .no_embedded_nulls); const std_file_root_decl = zcu.fileRootDecl(std_file_imported.file_index); From 8f292431b03055e789f75aa98888c0f49520e268 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Wed, 10 Jul 2024 10:17:32 -0400 Subject: [PATCH 096/152] InternPool: fix undefined decl fully qualified name This is now possible after moving `File.Index` to `*File` mapping into intern pool. --- src/InternPool.zig | 2 ++ src/Sema.zig | 3 --- src/Type.zig | 2 +- src/Zcu.zig | 53 +++++++++++++++++++++++-------------------- src/Zcu/PerThread.zig | 8 +++---- 5 files changed, 35 insertions(+), 33 deletions(-) diff --git a/src/InternPool.zig b/src/InternPool.zig index 37ff1352a792..4dba928d18ca 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -8030,6 +8030,8 @@ fn finishFuncInstance( decl.name = try ip.getOrPutStringFmt(gpa, tid, "{}__anon_{d}", .{ fn_owner_decl.name.fmt(ip), @intFromEnum(decl_index), }, .no_embedded_nulls); + decl.fqn = try ip.namespacePtr(fn_owner_decl.src_namespace) + .internFullyQualifiedName(ip, gpa, tid, decl.name); } pub const EnumTypeInit = struct { diff --git a/src/Sema.zig b/src/Sema.zig index bcae8201484f..1c485164762d 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -9737,9 +9737,6 @@ fn funcCommon( .generic_owner = sema.generic_owner, .comptime_args = sema.comptime_args, }); - const func_decl = mod.declPtr(ip.indexToKey(func_index).func.owner_decl); - func_decl.fqn = - try ip.namespacePtr(func_decl.src_namespace).internFullyQualifiedName(pt, func_decl.name); return finishFunc( sema, block, diff --git a/src/Type.zig b/src/Type.zig index bea0474e9af9..65176e9a80c8 100644 --- a/src/Type.zig +++ b/src/Type.zig @@ -337,7 +337,7 @@ pub fn print(ty: Type, writer: anytype, pt: Zcu.PerThread) @TypeOf(writer).Error try writer.print("{}", .{decl.fqn.fmt(ip)}); } else if (ip.loadStructType(ty.toIntern()).namespace.unwrap()) |namespace_index| { const namespace = mod.namespacePtr(namespace_index); - try namespace.renderFullyQualifiedName(mod, .empty, writer); + try namespace.renderFullyQualifiedName(ip, .empty, writer); } else { try writer.writeAll("@TypeOf(.{})"); } diff --git a/src/Zcu.zig b/src/Zcu.zig index 5dcda2ea86d7..a5de69fb62e5 100644 --- a/src/Zcu.zig +++ b/src/Zcu.zig @@ -628,23 +628,27 @@ pub const Namespace = struct { return zcu.fileByIndex(ns.file_scope); } + pub fn fileScopeIp(ns: Namespace, ip: *InternPool) *File { + return ip.filePtr(ns.file_scope); + } + // This renders e.g. "std.fs.Dir.OpenOptions" pub fn renderFullyQualifiedName( ns: Namespace, - zcu: *Zcu, + ip: *InternPool, name: InternPool.NullTerminatedString, writer: anytype, ) @TypeOf(writer).Error!void { if (ns.parent.unwrap()) |parent| { - try zcu.namespacePtr(parent).renderFullyQualifiedName( - zcu, - zcu.declPtr(ns.decl_index).name, + try ip.namespacePtr(parent).renderFullyQualifiedName( + ip, + ip.declPtr(ns.decl_index).name, writer, ); } else { - try ns.fileScope(zcu).renderFullyQualifiedName(writer); + try ns.fileScopeIp(ip).renderFullyQualifiedName(writer); } - if (name != .empty) try writer.print(".{}", .{name.fmt(&zcu.intern_pool)}); + if (name != .empty) try writer.print(".{}", .{name.fmt(ip)}); } /// This renders e.g. "std/fs.zig:Dir.OpenOptions" @@ -670,44 +674,43 @@ pub const Namespace = struct { pub fn internFullyQualifiedName( ns: Namespace, - pt: Zcu.PerThread, + ip: *InternPool, + gpa: Allocator, + tid: Zcu.PerThread.Id, name: InternPool.NullTerminatedString, ) !InternPool.NullTerminatedString { - const zcu = pt.zcu; - const ip = &zcu.intern_pool; - - const gpa = zcu.gpa; - const strings = ip.getLocal(pt.tid).getMutableStrings(gpa); + const strings = ip.getLocal(tid).getMutableStrings(gpa); // Protects reads of interned strings from being reallocated during the call to // renderFullyQualifiedName. const slice = try strings.addManyAsSlice(count: { var count: usize = name.length(ip) + 1; var cur_ns = &ns; while (true) { - const decl = zcu.declPtr(cur_ns.decl_index); - cur_ns = zcu.namespacePtr(cur_ns.parent.unwrap() orelse { - count += ns.fileScope(zcu).fullyQualifiedNameLen(); + const decl = ip.declPtr(cur_ns.decl_index); + cur_ns = ip.namespacePtr(cur_ns.parent.unwrap() orelse { + count += ns.fileScopeIp(ip).fullyQualifiedNameLen(); break :count count; }); count += decl.name.length(ip) + 1; } }); var fbs = std.io.fixedBufferStream(slice[0]); - ns.renderFullyQualifiedName(zcu, name, fbs.writer()) catch unreachable; + ns.renderFullyQualifiedName(ip, name, fbs.writer()) catch unreachable; assert(fbs.pos == slice[0].len); // Sanitize the name for nvptx which is more restrictive. // TODO This should be handled by the backend, not the frontend. Have a // look at how the C backend does it for inspiration. - const cpu_arch = zcu.root_mod.resolved_target.result.cpu.arch; - if (cpu_arch.isNvptx()) { - for (slice[0]) |*byte| switch (byte.*) { - '{', '}', '*', '[', ']', '(', ')', ',', ' ', '\'' => byte.* = '_', - else => {}, - }; - } - - return ip.getOrPutTrailingString(gpa, pt.tid, @intCast(slice[0].len), .no_embedded_nulls); + // FIXME This has bitrotted and is no longer able to be implemented here. + //const cpu_arch = zcu.root_mod.resolved_target.result.cpu.arch; + //if (cpu_arch.isNvptx()) { + // for (slice[0]) |*byte| switch (byte.*) { + // '{', '}', '*', '[', ']', '(', ')', ',', ' ', '\'' => byte.* = '_', + // else => {}, + // }; + //} + + return ip.getOrPutTrailingString(gpa, tid, @intCast(slice[0].len), .no_embedded_nulls); } pub fn getType(ns: Namespace, zcu: *Zcu) Type { diff --git a/src/Zcu/PerThread.zig b/src/Zcu/PerThread.zig index 48559b81f140..03d9b0826132 100644 --- a/src/Zcu/PerThread.zig +++ b/src/Zcu/PerThread.zig @@ -1896,7 +1896,7 @@ const ScanDeclIter = struct { const was_exported = decl.is_exported; assert(decl.kind == kind); // ZIR tracking should preserve this decl.name = decl_name; - decl.fqn = try namespace.internFullyQualifiedName(pt, decl_name); + decl.fqn = try namespace.internFullyQualifiedName(ip, gpa, pt.tid, decl_name); decl.is_pub = declaration.flags.is_pub; decl.is_exported = declaration.flags.is_export; break :decl_index .{ was_exported, decl_index }; @@ -1906,7 +1906,7 @@ const ScanDeclIter = struct { const new_decl = zcu.declPtr(new_decl_index); new_decl.kind = kind; new_decl.name = decl_name; - new_decl.fqn = try namespace.internFullyQualifiedName(pt, decl_name); + new_decl.fqn = try namespace.internFullyQualifiedName(ip, gpa, pt.tid, decl_name); new_decl.is_pub = declaration.flags.is_pub; new_decl.is_exported = declaration.flags.is_export; new_decl.zir_decl_index = tracked_inst.toOptional(); @@ -2279,8 +2279,8 @@ pub fn initNewAnonDecl( const new_decl = pt.zcu.declPtr(new_decl_index); new_decl.name = name; - new_decl.fqn = fqn.unwrap() orelse - try pt.zcu.namespacePtr(new_decl.src_namespace).internFullyQualifiedName(pt, name); + new_decl.fqn = fqn.unwrap() orelse try pt.zcu.namespacePtr(new_decl.src_namespace) + .internFullyQualifiedName(&pt.zcu.intern_pool, pt.zcu.gpa, pt.tid, name); new_decl.val = val; new_decl.alignment = .none; new_decl.@"linksection" = .none; From f290b54f891a67af456529da0f4f824a1e27b4ef Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Wed, 10 Jul 2024 10:43:51 -0400 Subject: [PATCH 097/152] InternPool: make `files` more thread-safe --- src/Compilation.zig | 8 ++++---- src/InternPool.zig | 47 ++++++++++++++++++++++--------------------- src/Zcu.zig | 26 ++++++++++++++---------- src/Zcu/PerThread.zig | 32 ++++++++++++++++------------- 4 files changed, 61 insertions(+), 52 deletions(-) diff --git a/src/Compilation.zig b/src/Compilation.zig index a474d1955a1a..c9f9c65e654d 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -2784,7 +2784,7 @@ const Header = extern struct { first_dependency_len: u32, dep_entries_len: u32, free_dep_entries_len: u32, - files_len: u32, + //files_len: u32, }, }; @@ -2813,7 +2813,7 @@ pub fn saveState(comp: *Compilation) !void { .first_dependency_len = @intCast(ip.first_dependency.count()), .dep_entries_len = @intCast(ip.dep_entries.items.len), .free_dep_entries_len = @intCast(ip.free_dep_entries.items.len), - .files_len = @intCast(ip.files.entries.len), + //.files_len = @intCast(ip.files.entries.len), }, }; addBuf(&bufs_list, &bufs_len, mem.asBytes(&header)); @@ -2838,8 +2838,8 @@ pub fn saveState(comp: *Compilation) !void { addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.dep_entries.items)); addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.free_dep_entries.items)); - addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.files.keys())); - addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.files.values())); + //addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.files.keys())); + //addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.files.values())); // TODO: compilation errors // TODO: namespaces diff --git a/src/InternPool.zig b/src/InternPool.zig index 4dba928d18ca..ab5de7c36054 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -59,17 +59,6 @@ dep_entries: std.ArrayListUnmanaged(DepEntry) = .{}, /// garbage collection pass. free_dep_entries: std.ArrayListUnmanaged(DepEntry.Index) = .{}, -/// Elements are ordered identically to the `import_table` field of `Zcu`. -/// -/// Unlike `import_table`, this data is serialized as part of incremental -/// compilation state. -/// -/// Key is the hash of the path to this file, used to store -/// `InternPool.TrackedInst`. -/// -/// Value is the `Decl` of the struct that represents this `File`. -files: std.AutoArrayHashMapUnmanaged(Cache.BinDigest, OptionalDeclIndex) = .{}, - /// Whether a multi-threaded intern pool is useful. /// Currently `false` until the intern pool is actually accessed /// from multiple threads to reduce the cost of this data structure. @@ -346,7 +335,7 @@ const Local = struct { extra: Extra, limbs: Limbs, strings: Strings, - files: Files, + files: List(File), decls: Decls, namespaces: Namespaces, @@ -367,7 +356,6 @@ const Local = struct { else => @compileError("unsupported host"), }; const Strings = List(struct { u8 }); - const Files = List(struct { *Zcu.File }); const decls_bucket_width = 8; const decls_bucket_mask = (1 << decls_bucket_width) - 1; @@ -600,7 +588,7 @@ const Local = struct { const View = std.MultiArrayList(Elem); /// Must be called when accessing from another thread. - fn acquire(list: *const ListSelf) ListSelf { + pub fn acquire(list: *const ListSelf) ListSelf { return .{ .bytes = @atomicLoad([*]align(@alignOf(Elem)) u8, &list.bytes, .acquire) }; } fn release(list: *ListSelf, new_list: ListSelf) void { @@ -614,7 +602,7 @@ const Local = struct { return @ptrFromInt(@intFromPtr(list.bytes) - bytes_offset); } - fn view(list: ListSelf) View { + pub fn view(list: ListSelf) View { const capacity = list.header().capacity; assert(capacity > 0); // optimizes `MultiArrayList.Slice.items` return .{ @@ -675,7 +663,16 @@ const Local = struct { }; } - pub fn getMutableFiles(local: *Local, gpa: std.mem.Allocator) Files.Mutable { + /// Elements are ordered identically to the `import_table` field of `Zcu`. + /// + /// Unlike `import_table`, this data is serialized as part of incremental + /// compilation state. + /// + /// Key is the hash of the path to this file, used to store + /// `InternPool.TrackedInst`. + /// + /// Value is the `Decl` of the struct that represents this `File`. + pub fn getMutableFiles(local: *Local, gpa: std.mem.Allocator) List(File).Mutable { return .{ .gpa = gpa, .arena = &local.mutate.arena, @@ -957,7 +954,7 @@ pub const FileIndex = enum(u32) { unwrapped.index); } }; - fn unwrap(file_index: FileIndex, ip: *const InternPool) Unwrapped { + pub fn unwrap(file_index: FileIndex, ip: *const InternPool) Unwrapped { return .{ .tid = @enumFromInt(@intFromEnum(file_index) >> ip.tid_shift_32 & ip.getTidMask()), .index = @intFromEnum(file_index) & ip.getIndexMask(u32), @@ -965,6 +962,12 @@ pub const FileIndex = enum(u32) { } }; +const File = struct { + bin_digest: Cache.BinDigest, + file: *Zcu.File, + root_decl: OptionalDeclIndex, +}; + /// An index into `strings`. pub const String = enum(u32) { /// An empty string. @@ -5237,7 +5240,7 @@ pub fn init(ip: *InternPool, gpa: Allocator, available_threads: usize) !void { .extra = Local.Extra.empty, .limbs = Local.Limbs.empty, .strings = Local.Strings.empty, - .files = Local.Files.empty, + .files = Local.List(File).empty, .decls = Local.Decls.empty, .namespaces = Local.Namespaces.empty, @@ -5321,8 +5324,6 @@ pub fn deinit(ip: *InternPool, gpa: Allocator) void { ip.dep_entries.deinit(gpa); ip.free_dep_entries.deinit(gpa); - ip.files.deinit(gpa); - gpa.free(ip.shards); for (ip.locals) |*local| { const buckets_len = local.mutate.namespaces.buckets_list.len; @@ -9790,21 +9791,21 @@ pub fn destroyNamespace( pub fn filePtr(ip: *InternPool, file_index: FileIndex) *Zcu.File { const file_index_unwrapped = file_index.unwrap(ip); const files = ip.getLocalShared(file_index_unwrapped.tid).files.acquire(); - return files.view().items(.@"0")[file_index_unwrapped.index]; + return files.view().items(.file)[file_index_unwrapped.index]; } pub fn createFile( ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, - file: *Zcu.File, + file: File, ) Allocator.Error!FileIndex { const files = ip.getLocal(tid).getMutableFiles(gpa); const file_index_unwrapped: FileIndex.Unwrapped = .{ .tid = tid, .index = files.mutate.len, }; - try files.append(.{file}); + try files.append(file); return file_index_unwrapped.wrap(ip); } diff --git a/src/Zcu.zig b/src/Zcu.zig index a5de69fb62e5..91b60c610888 100644 --- a/src/Zcu.zig +++ b/src/Zcu.zig @@ -2406,8 +2406,7 @@ pub fn deinit(zcu: *Zcu) void { for (zcu.import_table.keys()) |key| { gpa.free(key); } - for (0..zcu.import_table.entries.len) |file_index_usize| { - const file_index: File.Index = @enumFromInt(file_index_usize); + for (zcu.import_table.values()) |file_index| { pt.destroyFile(file_index); } zcu.import_table.deinit(gpa); @@ -3537,23 +3536,28 @@ pub fn resolveReferences(zcu: *Zcu) !std.AutoHashMapUnmanaged(AnalUnit, Resolved return result; } -pub fn fileByIndex(zcu: *Zcu, i: File.Index) *File { - const ip = &zcu.intern_pool; - return ip.filePtr(i); +pub fn fileByIndex(zcu: *Zcu, file_index: File.Index) *File { + return zcu.intern_pool.filePtr(file_index); } /// Returns the `Decl` of the struct that represents this `File`. -pub fn fileRootDecl(zcu: *const Zcu, i: File.Index) Decl.OptionalIndex { +pub fn fileRootDecl(zcu: *const Zcu, file_index: File.Index) Decl.OptionalIndex { const ip = &zcu.intern_pool; - return ip.files.values()[@intFromEnum(i)]; + const file_index_unwrapped = file_index.unwrap(ip); + const files = ip.getLocalShared(file_index_unwrapped.tid).files.acquire(); + return files.view().items(.root_decl)[file_index_unwrapped.index]; } -pub fn setFileRootDecl(zcu: *Zcu, i: File.Index, root_decl: Decl.OptionalIndex) void { +pub fn setFileRootDecl(zcu: *Zcu, file_index: File.Index, root_decl: Decl.OptionalIndex) void { const ip = &zcu.intern_pool; - ip.files.values()[@intFromEnum(i)] = root_decl; + const file_index_unwrapped = file_index.unwrap(ip); + const files = ip.getLocalShared(file_index_unwrapped.tid).files.acquire(); + files.view().items(.root_decl)[file_index_unwrapped.index] = root_decl; } -pub fn filePathDigest(zcu: *const Zcu, i: File.Index) Cache.BinDigest { +pub fn filePathDigest(zcu: *const Zcu, file_index: File.Index) Cache.BinDigest { const ip = &zcu.intern_pool; - return ip.files.keys()[@intFromEnum(i)]; + const file_index_unwrapped = file_index.unwrap(ip); + const files = ip.getLocalShared(file_index_unwrapped.tid).files.acquire(); + return files.view().items(.bin_digest)[file_index_unwrapped.index]; } diff --git a/src/Zcu/PerThread.zig b/src/Zcu/PerThread.zig index 03d9b0826132..7fa9f89d4016 100644 --- a/src/Zcu/PerThread.zig +++ b/src/Zcu/PerThread.zig @@ -1386,15 +1386,16 @@ pub fn importPkg(pt: Zcu.PerThread, mod: *Module) !Zcu.ImportFileResult { } const ip = &zcu.intern_pool; - try ip.files.ensureUnusedCapacity(gpa, 1); - if (mod.builtin_file) |builtin_file| { - const file_index = try ip.createFile(gpa, pt.tid, builtin_file); + const path_digest = Zcu.computePathDigest(zcu, mod, builtin_file.sub_file_path); + const file_index = try ip.createFile(gpa, pt.tid, .{ + .bin_digest = path_digest, + .file = builtin_file, + .root_decl = .none, + }); keep_resolved_path = true; // It's now owned by import_table. gop.value_ptr.* = file_index; try builtin_file.addReference(zcu, .{ .root = mod }); - const path_digest = Zcu.computePathDigest(zcu, mod, builtin_file.sub_file_path); - ip.files.putAssumeCapacityNoClobber(path_digest, .none); return .{ .file = builtin_file, .file_index = file_index, @@ -1409,7 +1410,12 @@ pub fn importPkg(pt: Zcu.PerThread, mod: *Module) !Zcu.ImportFileResult { const new_file = try gpa.create(Zcu.File); errdefer gpa.destroy(new_file); - const new_file_index = try ip.createFile(gpa, pt.tid, new_file); + const path_digest = zcu.computePathDigest(mod, sub_file_path); + const new_file_index = try ip.createFile(gpa, pt.tid, .{ + .bin_digest = path_digest, + .file = new_file, + .root_decl = .none, + }); keep_resolved_path = true; // It's now owned by import_table. gop.value_ptr.* = new_file_index; new_file.* = .{ @@ -1425,10 +1431,7 @@ pub fn importPkg(pt: Zcu.PerThread, mod: *Module) !Zcu.ImportFileResult { .mod = mod, }; - const path_digest = zcu.computePathDigest(mod, sub_file_path); - try new_file.addReference(zcu, .{ .root = mod }); - ip.files.putAssumeCapacityNoClobber(path_digest, .none); return .{ .file = new_file, .file_index = new_file_index, @@ -1489,8 +1492,6 @@ pub fn importFile( const ip = &zcu.intern_pool; - try ip.files.ensureUnusedCapacity(gpa, 1); - const new_file = try gpa.create(Zcu.File); errdefer gpa.destroy(new_file); @@ -1515,7 +1516,12 @@ pub fn importFile( resolved_root_path, resolved_path, sub_file_path, import_string, }); - const new_file_index = try ip.createFile(gpa, pt.tid, new_file); + const path_digest = zcu.computePathDigest(mod, sub_file_path); + const new_file_index = try ip.createFile(gpa, pt.tid, .{ + .bin_digest = path_digest, + .file = new_file, + .root_decl = .none, + }); keep_resolved_path = true; // It's now owned by import_table. gop.value_ptr.* = new_file_index; new_file.* = .{ @@ -1531,8 +1537,6 @@ pub fn importFile( .mod = mod, }; - const path_digest = zcu.computePathDigest(mod, sub_file_path); - ip.files.putAssumeCapacityNoClobber(path_digest, .none); return .{ .file = new_file, .file_index = new_file_index, From afa66fa392f5a32d16da7f4705c59dad369f6d48 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Wed, 10 Jul 2024 14:33:46 -0400 Subject: [PATCH 098/152] InternPool: make `tracked_insts` thread-safe --- src/Compilation.zig | 75 ++++------- src/InternPool.zig | 182 +++++++++++++++++++++++---- src/Sema.zig | 11 +- src/Zcu.zig | 43 ++----- src/Zcu/PerThread.zig | 287 ++++++++++++++++++++++++++++-------------- 5 files changed, 382 insertions(+), 216 deletions(-) diff --git a/src/Compilation.zig b/src/Compilation.zig index c9f9c65e654d..f4674ba20c2a 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -2675,7 +2675,10 @@ fn reportMultiModuleErrors(pt: Zcu.PerThread) !void { .import => |import| try Zcu.ErrorMsg.init( gpa, .{ - .base_node_inst = try ip.trackZir(gpa, import.file, .main_struct_inst), + .base_node_inst = try ip.trackZir(gpa, pt.tid, .{ + .file = import.file, + .inst = .main_struct_inst, + }), .offset = .{ .token_abs = import.token }, }, "imported from module {s}", @@ -2684,7 +2687,10 @@ fn reportMultiModuleErrors(pt: Zcu.PerThread) !void { .root => |pkg| try Zcu.ErrorMsg.init( gpa, .{ - .base_node_inst = try ip.trackZir(gpa, file_index, .main_struct_inst), + .base_node_inst = try ip.trackZir(gpa, pt.tid, .{ + .file = file_index, + .inst = .main_struct_inst, + }), .offset = .entire_file, }, "root of module {s}", @@ -2698,7 +2704,10 @@ fn reportMultiModuleErrors(pt: Zcu.PerThread) !void { notes[num_notes] = try Zcu.ErrorMsg.init( gpa, .{ - .base_node_inst = try ip.trackZir(gpa, file_index, .main_struct_inst), + .base_node_inst = try ip.trackZir(gpa, pt.tid, .{ + .file = file_index, + .inst = .main_struct_inst, + }), .offset = .entire_file, }, "{} more references omitted", @@ -2710,7 +2719,10 @@ fn reportMultiModuleErrors(pt: Zcu.PerThread) !void { const err = try Zcu.ErrorMsg.create( gpa, .{ - .base_node_inst = try ip.trackZir(gpa, file_index, .main_struct_inst), + .base_node_inst = try ip.trackZir(gpa, pt.tid, .{ + .file = file_index, + .inst = .main_struct_inst, + }), .offset = .entire_file, }, "file exists in multiple modules", @@ -2776,7 +2788,7 @@ const Header = extern struct { //extra_len: u32, //limbs_len: u32, //string_bytes_len: u32, - tracked_insts_len: u32, + //tracked_insts_len: u32, src_hash_deps_len: u32, decl_val_deps_len: u32, namespace_deps_len: u32, @@ -2805,7 +2817,7 @@ pub fn saveState(comp: *Compilation) !void { //.extra_len = @intCast(ip.extra.items.len), //.limbs_len = @intCast(ip.limbs.items.len), //.string_bytes_len = @intCast(ip.string_bytes.items.len), - .tracked_insts_len = @intCast(ip.tracked_insts.count()), + //.tracked_insts_len = @intCast(ip.tracked_insts.count()), .src_hash_deps_len = @intCast(ip.src_hash_deps.count()), .decl_val_deps_len = @intCast(ip.decl_val_deps.count()), .namespace_deps_len = @intCast(ip.namespace_deps.count()), @@ -2822,7 +2834,7 @@ pub fn saveState(comp: *Compilation) !void { //addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.items.items(.data))); //addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.items.items(.tag))); //addBuf(&bufs_list, &bufs_len, ip.string_bytes.items); - addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.tracked_insts.keys())); + //addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.tracked_insts.keys())); addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.src_hash_deps.keys())); addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.src_hash_deps.values())); @@ -4134,14 +4146,6 @@ fn workerDocsWasmFallible(comp: *Compilation, prog_node: std.Progress.Node) anye }; } -const AstGenSrc = union(enum) { - root, - import: struct { - importing_file: Zcu.File.Index, - import_tok: std.zig.Ast.TokenIndex, - }, -}; - fn workerAstGenFile( tid: usize, comp: *Compilation, @@ -4151,7 +4155,7 @@ fn workerAstGenFile( root_decl: Zcu.Decl.OptionalIndex, prog_node: std.Progress.Node, wg: *WaitGroup, - src: AstGenSrc, + src: Zcu.AstGenSrc, ) void { const child_prog_node = prog_node.start(file.sub_file_path, 0); defer child_prog_node.end(); @@ -4161,7 +4165,7 @@ fn workerAstGenFile( error.AnalysisFail => return, else => { file.status = .retryable_failure; - comp.reportRetryableAstGenError(src, file_index, err) catch |oom| switch (oom) { + pt.reportRetryableAstGenError(src, file_index, err) catch |oom| switch (oom) { // Swallowing this error is OK because it's implied to be OOM when // there is a missing `failed_files` error message. error.OutOfMemory => {}, @@ -4207,7 +4211,7 @@ fn workerAstGenFile( log.debug("AstGen of {s} has import '{s}'; queuing AstGen of {s}", .{ file.sub_file_path, import_path, import_result.file.sub_file_path, }); - const sub_src: AstGenSrc = .{ .import = .{ + const sub_src: Zcu.AstGenSrc = .{ .import = .{ .importing_file = file_index, .import_tok = item.data.token, } }; @@ -4560,41 +4564,6 @@ fn reportRetryableWin32ResourceError( } } -fn reportRetryableAstGenError( - comp: *Compilation, - src: AstGenSrc, - file_index: Zcu.File.Index, - err: anyerror, -) error{OutOfMemory}!void { - const zcu = comp.module.?; - const gpa = zcu.gpa; - - const file = zcu.fileByIndex(file_index); - file.status = .retryable_failure; - - const src_loc: Zcu.LazySrcLoc = switch (src) { - .root => .{ - .base_node_inst = try zcu.intern_pool.trackZir(gpa, file_index, .main_struct_inst), - .offset = .entire_file, - }, - .import => |info| .{ - .base_node_inst = try zcu.intern_pool.trackZir(gpa, info.importing_file, .main_struct_inst), - .offset = .{ .token_abs = info.import_tok }, - }, - }; - - const err_msg = try Zcu.ErrorMsg.create(gpa, src_loc, "unable to load '{}{s}': {s}", .{ - file.mod.root, file.sub_file_path, @errorName(err), - }); - errdefer err_msg.destroy(gpa); - - { - comp.mutex.lock(); - defer comp.mutex.unlock(); - try zcu.failed_files.putNoClobber(gpa, file, err_msg); - } -} - fn reportRetryableEmbedFileError( comp: *Compilation, embed_file: *Zcu.EmbedFile, diff --git a/src/InternPool.zig b/src/InternPool.zig index ab5de7c36054..9fef7e290f12 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -20,10 +20,6 @@ tid_shift_32: if (single_threaded) u0 else std.math.Log2Int(u32) = if (single_th /// These are not serialized; it is computed upon deserialization. maps: std.ArrayListUnmanaged(FieldMap) = .{}, -/// An index into `tracked_insts` gives a reference to a single ZIR instruction which -/// persists across incremental updates. -tracked_insts: std.AutoArrayHashMapUnmanaged(TrackedInst, void) = .{}, - /// Dependencies on the source code hash associated with a ZIR instruction. /// * For a `declaration`, this is the entire declaration body. /// * For a `struct_decl`, `union_decl`, etc, this is the source of the fields (but not declarations). @@ -76,12 +72,15 @@ pub const TrackedInst = extern struct { } pub const Index = enum(u32) { _, - pub fn resolveFull(i: TrackedInst.Index, ip: *const InternPool) TrackedInst { - return ip.tracked_insts.keys()[@intFromEnum(i)]; + pub fn resolveFull(tracked_inst_index: TrackedInst.Index, ip: *const InternPool) TrackedInst { + const tracked_inst_unwrapped = tracked_inst_index.unwrap(ip); + const tracked_insts = ip.getLocalShared(tracked_inst_unwrapped.tid).tracked_insts.acquire(); + return tracked_insts.view().items(.@"0")[tracked_inst_unwrapped.index]; } pub fn resolve(i: TrackedInst.Index, ip: *const InternPool) Zir.Inst.Index { return i.resolveFull(ip).inst; } + pub fn toOptional(i: TrackedInst.Index) Optional { return @enumFromInt(@intFromEnum(i)); } @@ -95,21 +94,124 @@ pub const TrackedInst = extern struct { }; } }; + + pub const Unwrapped = struct { + tid: Zcu.PerThread.Id, + index: u32, + + pub fn wrap(unwrapped: Unwrapped, ip: *const InternPool) TrackedInst.Index { + assert(@intFromEnum(unwrapped.tid) <= ip.getTidMask()); + assert(unwrapped.index <= ip.getIndexMask(u32)); + return @enumFromInt(@as(u32, @intFromEnum(unwrapped.tid)) << ip.tid_shift_32 | + unwrapped.index); + } + }; + pub fn unwrap(tracked_inst_index: TrackedInst.Index, ip: *const InternPool) Unwrapped { + return .{ + .tid = @enumFromInt(@intFromEnum(tracked_inst_index) >> ip.tid_shift_32 & ip.getTidMask()), + .index = @intFromEnum(tracked_inst_index) & ip.getIndexMask(u32), + }; + } }; }; pub fn trackZir( ip: *InternPool, gpa: Allocator, - file: FileIndex, - inst: Zir.Inst.Index, + tid: Zcu.PerThread.Id, + key: TrackedInst, ) Allocator.Error!TrackedInst.Index { - const key: TrackedInst = .{ - .file = file, - .inst = inst, - }; - const gop = try ip.tracked_insts.getOrPut(gpa, key); - return @enumFromInt(gop.index); + const full_hash = Hash.hash(0, std.mem.asBytes(&key)); + const hash: u32 = @truncate(full_hash >> 32); + const shard = &ip.shards[@intCast(full_hash & (ip.shards.len - 1))]; + var map = shard.shared.tracked_inst_map.acquire(); + const Map = @TypeOf(map); + var map_mask = map.header().mask(); + var map_index = hash; + while (true) : (map_index += 1) { + map_index &= map_mask; + const entry = &map.entries[map_index]; + const index = entry.acquire().unwrap() orelse break; + if (entry.hash != hash) continue; + if (std.meta.eql(index.resolveFull(ip), key)) return index; + } + shard.mutate.tracked_inst_map.mutex.lock(); + defer shard.mutate.tracked_inst_map.mutex.unlock(); + if (map.entries != shard.shared.tracked_inst_map.entries) { + shard.mutate.tracked_inst_map.len += 1; + map = shard.shared.tracked_inst_map; + map_mask = map.header().mask(); + map_index = hash; + } + while (true) : (map_index += 1) { + map_index &= map_mask; + const entry = &map.entries[map_index]; + const index = entry.acquire().unwrap() orelse break; + if (entry.hash != hash) continue; + if (std.meta.eql(index.resolveFull(ip), key)) return index; + } + defer shard.mutate.tracked_inst_map.len += 1; + const local = ip.getLocal(tid); + local.mutate.tracked_insts.mutex.lock(); + defer local.mutate.tracked_insts.mutex.unlock(); + const list = local.getMutableTrackedInsts(gpa); + try list.ensureUnusedCapacity(1); + const map_header = map.header().*; + if (shard.mutate.tracked_inst_map.len < map_header.capacity * 3 / 5) { + const entry = &map.entries[map_index]; + entry.hash = hash; + const index = (TrackedInst.Index.Unwrapped{ + .tid = tid, + .index = list.mutate.len, + }).wrap(ip); + list.appendAssumeCapacity(.{key}); + entry.release(index.toOptional()); + return index; + } + const arena_state = &local.mutate.arena; + var arena = arena_state.promote(gpa); + defer arena_state.* = arena.state; + const new_map_capacity = map_header.capacity * 2; + const new_map_buf = try arena.allocator().alignedAlloc( + u8, + Map.alignment, + Map.entries_offset + new_map_capacity * @sizeOf(Map.Entry), + ); + const new_map: Map = .{ .entries = @ptrCast(new_map_buf[Map.entries_offset..].ptr) }; + new_map.header().* = .{ .capacity = new_map_capacity }; + @memset(new_map.entries[0..new_map_capacity], .{ .value = .none, .hash = undefined }); + const new_map_mask = new_map.header().mask(); + map_index = 0; + while (map_index < map_header.capacity) : (map_index += 1) { + const entry = &map.entries[map_index]; + const index = entry.value.unwrap() orelse continue; + const item_hash = entry.hash; + var new_map_index = item_hash; + while (true) : (new_map_index += 1) { + new_map_index &= new_map_mask; + const new_entry = &new_map.entries[new_map_index]; + if (new_entry.value != .none) continue; + new_entry.* = .{ + .value = index.toOptional(), + .hash = item_hash, + }; + break; + } + } + map = new_map; + map_index = hash; + while (true) : (map_index += 1) { + map_index &= new_map_mask; + if (map.entries[map_index].value == .none) break; + } + const index = (TrackedInst.Index.Unwrapped{ + .tid = tid, + .index = list.mutate.len, + }).wrap(ip); + list.appendAssumeCapacity(.{key}); + map.entries[map_index] = .{ .value = index.toOptional(), .hash = hash }; + shard.shared.tracked_inst_map.release(new_map); + return index; } /// Analysis Unit. Represents a single entity which undergoes semantic analysis. @@ -324,6 +426,7 @@ const Local = struct { extra: ListMutate, limbs: ListMutate, strings: ListMutate, + tracked_insts: MutexListMutate, files: ListMutate, decls: BucketListMutate, @@ -335,6 +438,7 @@ const Local = struct { extra: Extra, limbs: Limbs, strings: Strings, + tracked_insts: TrackedInsts, files: List(File), decls: Decls, @@ -356,6 +460,7 @@ const Local = struct { else => @compileError("unsupported host"), }; const Strings = List(struct { u8 }); + const TrackedInsts = List(struct { TrackedInst }); const decls_bucket_width = 8; const decls_bucket_mask = (1 << decls_bucket_width) - 1; @@ -375,6 +480,16 @@ const Local = struct { }; }; + const MutexListMutate = struct { + mutex: std.Thread.Mutex, + list: ListMutate, + + const empty: MutexListMutate = .{ + .mutex = .{}, + .list = ListMutate.empty, + }; + }; + const BucketListMutate = struct { last_bucket_len: u32, buckets_list: ListMutate, @@ -396,7 +511,7 @@ const Local = struct { const ListSelf = @This(); const Mutable = struct { - gpa: std.mem.Allocator, + gpa: Allocator, arena: *std.heap.ArenaAllocator.State, mutate: *ListMutate, list: *ListSelf, @@ -564,7 +679,7 @@ const Local = struct { mutable.list.release(new_list); } - fn view(mutable: Mutable) View { + pub fn view(mutable: Mutable) View { const capacity = mutable.list.header().capacity; assert(capacity > 0); // optimizes `MultiArrayList.Slice.items` return .{ @@ -614,7 +729,7 @@ const Local = struct { }; } - pub fn getMutableItems(local: *Local, gpa: std.mem.Allocator) List(Item).Mutable { + pub fn getMutableItems(local: *Local, gpa: Allocator) List(Item).Mutable { return .{ .gpa = gpa, .arena = &local.mutate.arena, @@ -623,7 +738,7 @@ const Local = struct { }; } - pub fn getMutableExtra(local: *Local, gpa: std.mem.Allocator) Extra.Mutable { + pub fn getMutableExtra(local: *Local, gpa: Allocator) Extra.Mutable { return .{ .gpa = gpa, .arena = &local.mutate.arena, @@ -636,7 +751,7 @@ const Local = struct { /// On 64-bit systems, this array is used for big integers and associated metadata. /// Use the helper methods instead of accessing this directly in order to not /// violate the above mechanism. - pub fn getMutableLimbs(local: *Local, gpa: std.mem.Allocator) Limbs.Mutable { + pub fn getMutableLimbs(local: *Local, gpa: Allocator) Limbs.Mutable { return switch (@sizeOf(Limb)) { @sizeOf(u32) => local.getMutableExtra(gpa), @sizeOf(u64) => .{ @@ -654,7 +769,7 @@ const Local = struct { /// is referencing the data here whether they want to store both index and length, /// thus allowing null bytes, or store only index, and use null-termination. The /// `strings` array is agnostic to either usage. - pub fn getMutableStrings(local: *Local, gpa: std.mem.Allocator) Strings.Mutable { + pub fn getMutableStrings(local: *Local, gpa: Allocator) Strings.Mutable { return .{ .gpa = gpa, .arena = &local.mutate.arena, @@ -663,6 +778,17 @@ const Local = struct { }; } + /// An index into `tracked_insts` gives a reference to a single ZIR instruction which + /// persists across incremental updates. + pub fn getMutableTrackedInsts(local: *Local, gpa: Allocator) TrackedInsts.Mutable { + return .{ + .gpa = gpa, + .arena = &local.mutate.arena, + .mutate = &local.mutate.tracked_insts.list, + .list = &local.shared.tracked_insts, + }; + } + /// Elements are ordered identically to the `import_table` field of `Zcu`. /// /// Unlike `import_table`, this data is serialized as part of incremental @@ -672,7 +798,7 @@ const Local = struct { /// `InternPool.TrackedInst`. /// /// Value is the `Decl` of the struct that represents this `File`. - pub fn getMutableFiles(local: *Local, gpa: std.mem.Allocator) List(File).Mutable { + pub fn getMutableFiles(local: *Local, gpa: Allocator) List(File).Mutable { return .{ .gpa = gpa, .arena = &local.mutate.arena, @@ -691,7 +817,7 @@ const Local = struct { /// serialization trivial. /// * It provides a unique integer to be used for anonymous symbol names, avoiding /// multi-threaded contention on an atomic counter. - pub fn getMutableDecls(local: *Local, gpa: std.mem.Allocator) Decls.Mutable { + pub fn getMutableDecls(local: *Local, gpa: Allocator) Decls.Mutable { return .{ .gpa = gpa, .arena = &local.mutate.arena, @@ -701,7 +827,7 @@ const Local = struct { } /// Same pattern as with `getMutableDecls`. - pub fn getMutableNamespaces(local: *Local, gpa: std.mem.Allocator) Namespaces.Mutable { + pub fn getMutableNamespaces(local: *Local, gpa: Allocator) Namespaces.Mutable { return .{ .gpa = gpa, .arena = &local.mutate.arena, @@ -723,11 +849,13 @@ const Shard = struct { shared: struct { map: Map(Index), string_map: Map(OptionalNullTerminatedString), + tracked_inst_map: Map(TrackedInst.Index.Optional), } align(std.atomic.cache_line), mutate: struct { // TODO: measure cost of sharing unrelated mutate state map: Mutate align(std.atomic.cache_line), string_map: Mutate align(std.atomic.cache_line), + tracked_inst_map: Mutate align(std.atomic.cache_line), }, const Mutate = struct { @@ -5240,6 +5368,7 @@ pub fn init(ip: *InternPool, gpa: Allocator, available_threads: usize) !void { .extra = Local.Extra.empty, .limbs = Local.Limbs.empty, .strings = Local.Strings.empty, + .tracked_insts = Local.TrackedInsts.empty, .files = Local.List(File).empty, .decls = Local.Decls.empty, @@ -5252,6 +5381,7 @@ pub fn init(ip: *InternPool, gpa: Allocator, available_threads: usize) !void { .extra = Local.ListMutate.empty, .limbs = Local.ListMutate.empty, .strings = Local.ListMutate.empty, + .tracked_insts = Local.MutexListMutate.empty, .files = Local.ListMutate.empty, .decls = Local.BucketListMutate.empty, @@ -5267,10 +5397,12 @@ pub fn init(ip: *InternPool, gpa: Allocator, available_threads: usize) !void { .shared = .{ .map = Shard.Map(Index).empty, .string_map = Shard.Map(OptionalNullTerminatedString).empty, + .tracked_inst_map = Shard.Map(TrackedInst.Index.Optional).empty, }, .mutate = .{ .map = Shard.Mutate.empty, .string_map = Shard.Mutate.empty, + .tracked_inst_map = Shard.Mutate.empty, }, }); @@ -5311,8 +5443,6 @@ pub fn deinit(ip: *InternPool, gpa: Allocator) void { for (ip.maps.items) |*map| map.deinit(gpa); ip.maps.deinit(gpa); - ip.tracked_insts.deinit(gpa); - ip.src_hash_deps.deinit(gpa); ip.decl_val_deps.deinit(gpa); ip.func_ies_deps.deinit(gpa); @@ -9887,7 +10017,7 @@ pub fn getOrPutTrailingString( } const key: []const u8 = strings.view().items(.@"0")[start..]; const value: embedded_nulls.StringType() = - @enumFromInt(@as(u32, @intFromEnum(tid)) << ip.tid_shift_32 | start); + @enumFromInt(@intFromEnum((String.Unwrapped{ .tid = tid, .index = start }).wrap(ip))); const has_embedded_null = std.mem.indexOfScalar(u8, key, 0) != null; switch (embedded_nulls) { .no_embedded_nulls => assert(!has_embedded_null), diff --git a/src/Sema.zig b/src/Sema.zig index 1c485164762d..3bcc830f8cb5 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -835,12 +835,11 @@ pub const Block = struct { } fn trackZir(block: *Block, inst: Zir.Inst.Index) Allocator.Error!InternPool.TrackedInst.Index { - const sema = block.sema; - const gpa = sema.gpa; - const zcu = sema.pt.zcu; - const ip = &zcu.intern_pool; - const file_index = block.getFileScopeIndex(zcu); - return ip.trackZir(gpa, file_index, inst); + const pt = block.sema.pt; + return pt.zcu.intern_pool.trackZir(pt.zcu.gpa, pt.tid, .{ + .file = block.getFileScopeIndex(pt.zcu), + .inst = inst, + }); } }; diff --git a/src/Zcu.zig b/src/Zcu.zig index 91b60c610888..fb7ee4fac2cb 100644 --- a/src/Zcu.zig +++ b/src/Zcu.zig @@ -1018,6 +1018,14 @@ pub const ErrorMsg = struct { } }; +pub const AstGenSrc = union(enum) { + root, + import: struct { + importing_file: Zcu.File.Index, + import_tok: std.zig.Ast.TokenIndex, + }, +}; + /// Canonical reference to a position within a source file. pub const SrcLoc = struct { file_scope: *File, @@ -3186,41 +3194,6 @@ pub fn handleUpdateExports( }; } -pub fn reportRetryableFileError( - zcu: *Zcu, - file_index: File.Index, - comptime format: []const u8, - args: anytype, -) error{OutOfMemory}!void { - const gpa = zcu.gpa; - const ip = &zcu.intern_pool; - - const file = zcu.fileByIndex(file_index); - file.status = .retryable_failure; - - const err_msg = try ErrorMsg.create( - gpa, - .{ - .base_node_inst = try ip.trackZir(gpa, file_index, .main_struct_inst), - .offset = .entire_file, - }, - format, - args, - ); - errdefer err_msg.destroy(gpa); - - zcu.comp.mutex.lock(); - defer zcu.comp.mutex.unlock(); - - const gop = try zcu.failed_files.getOrPut(gpa, file); - if (gop.found_existing) { - if (gop.value_ptr.*) |old_err_msg| { - old_err_msg.destroy(gpa); - } - } - gop.value_ptr.* = err_msg; -} - pub fn addGlobalAssembly(mod: *Module, decl_index: Decl.Index, source: []const u8) !void { const gop = try mod.global_assembly.getOrPut(mod.gpa, decl_index); if (gop.found_existing) { diff --git a/src/Zcu/PerThread.zig b/src/Zcu/PerThread.zig index 7fa9f89d4016..b72f7cc1aea4 100644 --- a/src/Zcu/PerThread.zig +++ b/src/Zcu/PerThread.zig @@ -342,6 +342,7 @@ pub fn astGenFile( /// the Compilation mutex when acting on shared state. fn updateZirRefs(pt: Zcu.PerThread, file: *Zcu.File, file_index: Zcu.File.Index, old_zir: Zir) !void { const zcu = pt.zcu; + const ip = &zcu.intern_pool; const gpa = zcu.gpa; const new_zir = file.zir; @@ -355,109 +356,117 @@ fn updateZirRefs(pt: Zcu.PerThread, file: *Zcu.File, file_index: Zcu.File.Index, // TODO: this should be done after all AstGen workers complete, to avoid // iterating over this full set for every updated file. - for (zcu.intern_pool.tracked_insts.keys(), 0..) |*ti, idx_raw| { - const ti_idx: InternPool.TrackedInst.Index = @enumFromInt(idx_raw); - if (ti.file != file_index) continue; - const old_inst = ti.inst; - ti.inst = inst_map.get(ti.inst) orelse { - // Tracking failed for this instruction. Invalidate associated `src_hash` deps. - zcu.comp.mutex.lock(); - defer zcu.comp.mutex.unlock(); - log.debug("tracking failed for %{d}", .{old_inst}); - try zcu.markDependeeOutdated(.{ .src_hash = ti_idx }); - continue; - }; + for (ip.locals, 0..) |*local, tid| { + local.mutate.tracked_insts.mutex.lock(); + defer local.mutate.tracked_insts.mutex.unlock(); + const tracked_insts_list = local.getMutableTrackedInsts(gpa); + for (tracked_insts_list.view().items(.@"0"), 0..) |*tracked_inst, tracked_inst_unwrapped_index| { + if (tracked_inst.file != file_index) continue; + const old_inst = tracked_inst.inst; + const tracked_inst_index = (InternPool.TrackedInst.Index.Unwrapped{ + .tid = @enumFromInt(tid), + .index = @intCast(tracked_inst_unwrapped_index), + }).wrap(ip); + tracked_inst.inst = inst_map.get(old_inst) orelse { + // Tracking failed for this instruction. Invalidate associated `src_hash` deps. + zcu.comp.mutex.lock(); + defer zcu.comp.mutex.unlock(); + log.debug("tracking failed for %{d}", .{old_inst}); + try zcu.markDependeeOutdated(.{ .src_hash = tracked_inst_index }); + continue; + }; - if (old_zir.getAssociatedSrcHash(old_inst)) |old_hash| hash_changed: { - if (new_zir.getAssociatedSrcHash(ti.inst)) |new_hash| { - if (std.zig.srcHashEql(old_hash, new_hash)) { - break :hash_changed; + if (old_zir.getAssociatedSrcHash(old_inst)) |old_hash| hash_changed: { + if (new_zir.getAssociatedSrcHash(tracked_inst.inst)) |new_hash| { + if (std.zig.srcHashEql(old_hash, new_hash)) { + break :hash_changed; + } + log.debug("hash for (%{d} -> %{d}) changed: {} -> {}", .{ + old_inst, + tracked_inst.inst, + std.fmt.fmtSliceHexLower(&old_hash), + std.fmt.fmtSliceHexLower(&new_hash), + }); } - log.debug("hash for (%{d} -> %{d}) changed: {} -> {}", .{ - old_inst, - ti.inst, - std.fmt.fmtSliceHexLower(&old_hash), - std.fmt.fmtSliceHexLower(&new_hash), - }); + // The source hash associated with this instruction changed - invalidate relevant dependencies. + zcu.comp.mutex.lock(); + defer zcu.comp.mutex.unlock(); + try zcu.markDependeeOutdated(.{ .src_hash = tracked_inst_index }); } - // The source hash associated with this instruction changed - invalidate relevant dependencies. - zcu.comp.mutex.lock(); - defer zcu.comp.mutex.unlock(); - try zcu.markDependeeOutdated(.{ .src_hash = ti_idx }); - } - // If this is a `struct_decl` etc, we must invalidate any outdated namespace dependencies. - const has_namespace = switch (old_tag[@intFromEnum(old_inst)]) { - .extended => switch (old_data[@intFromEnum(old_inst)].extended.opcode) { - .struct_decl, .union_decl, .opaque_decl, .enum_decl => true, + // If this is a `struct_decl` etc, we must invalidate any outdated namespace dependencies. + const has_namespace = switch (old_tag[@intFromEnum(old_inst)]) { + .extended => switch (old_data[@intFromEnum(old_inst)].extended.opcode) { + .struct_decl, .union_decl, .opaque_decl, .enum_decl => true, + else => false, + }, else => false, - }, - else => false, - }; - if (!has_namespace) continue; - - var old_names: std.AutoArrayHashMapUnmanaged(InternPool.NullTerminatedString, void) = .{}; - defer old_names.deinit(zcu.gpa); - { - var it = old_zir.declIterator(old_inst); - while (it.next()) |decl_inst| { - const decl_name = old_zir.getDeclaration(decl_inst)[0].name; - switch (decl_name) { - .@"comptime", .@"usingnamespace", .unnamed_test, .decltest => continue, - _ => if (decl_name.isNamedTest(old_zir)) continue, + }; + if (!has_namespace) continue; + + var old_names: std.AutoArrayHashMapUnmanaged(InternPool.NullTerminatedString, void) = .{}; + defer old_names.deinit(zcu.gpa); + { + var it = old_zir.declIterator(old_inst); + while (it.next()) |decl_inst| { + const decl_name = old_zir.getDeclaration(decl_inst)[0].name; + switch (decl_name) { + .@"comptime", .@"usingnamespace", .unnamed_test, .decltest => continue, + _ => if (decl_name.isNamedTest(old_zir)) continue, + } + const name_zir = decl_name.toString(old_zir).?; + const name_ip = try zcu.intern_pool.getOrPutString( + zcu.gpa, + pt.tid, + old_zir.nullTerminatedString(name_zir), + .no_embedded_nulls, + ); + try old_names.put(zcu.gpa, name_ip, {}); } - const name_zir = decl_name.toString(old_zir).?; - const name_ip = try zcu.intern_pool.getOrPutString( - zcu.gpa, - pt.tid, - old_zir.nullTerminatedString(name_zir), - .no_embedded_nulls, - ); - try old_names.put(zcu.gpa, name_ip, {}); } - } - var any_change = false; - { - var it = new_zir.declIterator(ti.inst); - while (it.next()) |decl_inst| { - const decl_name = old_zir.getDeclaration(decl_inst)[0].name; - switch (decl_name) { - .@"comptime", .@"usingnamespace", .unnamed_test, .decltest => continue, - _ => if (decl_name.isNamedTest(old_zir)) continue, + var any_change = false; + { + var it = new_zir.declIterator(tracked_inst.inst); + while (it.next()) |decl_inst| { + const decl_name = old_zir.getDeclaration(decl_inst)[0].name; + switch (decl_name) { + .@"comptime", .@"usingnamespace", .unnamed_test, .decltest => continue, + _ => if (decl_name.isNamedTest(old_zir)) continue, + } + const name_zir = decl_name.toString(old_zir).?; + const name_ip = try zcu.intern_pool.getOrPutString( + zcu.gpa, + pt.tid, + old_zir.nullTerminatedString(name_zir), + .no_embedded_nulls, + ); + if (!old_names.swapRemove(name_ip)) continue; + // Name added + any_change = true; + zcu.comp.mutex.lock(); + defer zcu.comp.mutex.unlock(); + try zcu.markDependeeOutdated(.{ .namespace_name = .{ + .namespace = tracked_inst_index, + .name = name_ip, + } }); } - const name_zir = decl_name.toString(old_zir).?; - const name_ip = try zcu.intern_pool.getOrPutString( - zcu.gpa, - pt.tid, - old_zir.nullTerminatedString(name_zir), - .no_embedded_nulls, - ); - if (!old_names.swapRemove(name_ip)) continue; - // Name added + } + // The only elements remaining in `old_names` now are any names which were removed. + for (old_names.keys()) |name_ip| { any_change = true; zcu.comp.mutex.lock(); defer zcu.comp.mutex.unlock(); try zcu.markDependeeOutdated(.{ .namespace_name = .{ - .namespace = ti_idx, + .namespace = tracked_inst_index, .name = name_ip, } }); } - } - // The only elements remaining in `old_names` now are any names which were removed. - for (old_names.keys()) |name_ip| { - any_change = true; - zcu.comp.mutex.lock(); - defer zcu.comp.mutex.unlock(); - try zcu.markDependeeOutdated(.{ .namespace_name = .{ - .namespace = ti_idx, - .name = name_ip, - } }); - } - if (any_change) { - zcu.comp.mutex.lock(); - defer zcu.comp.mutex.unlock(); - try zcu.markDependeeOutdated(.{ .namespace = ti_idx }); + if (any_change) { + zcu.comp.mutex.lock(); + defer zcu.comp.mutex.unlock(); + try zcu.markDependeeOutdated(.{ .namespace = tracked_inst_index }); + } } } } @@ -854,7 +863,10 @@ fn getFileRootStruct( const decls = file.zir.bodySlice(extra_index, decls_len); extra_index += decls_len; - const tracked_inst = try ip.trackZir(gpa, file_index, .main_struct_inst); + const tracked_inst = try ip.trackZir(gpa, pt.tid, .{ + .file = file_index, + .inst = .main_struct_inst, + }); const wip_ty = switch (try ip.getStructType(gpa, pt.tid, .{ .layout = .auto, .fields_len = fields_len, @@ -1015,7 +1027,7 @@ fn semaFile(pt: Zcu.PerThread, file_index: Zcu.File.Index) Zcu.SemaError!void { switch (zcu.comp.cache_use) { .whole => |whole| if (whole.cache_manifest) |man| { const source = file.getSource(gpa) catch |err| { - try Zcu.reportRetryableFileError(zcu, file_index, "unable to load source: {s}", .{@errorName(err)}); + try pt.reportRetryableFileError(file_index, "unable to load source: {s}", .{@errorName(err)}); return error.AnalysisFail; }; @@ -1024,7 +1036,7 @@ fn semaFile(pt: Zcu.PerThread, file_index: Zcu.File.Index) Zcu.SemaError!void { file.mod.root.sub_path, file.sub_file_path, }) catch |err| { - try Zcu.reportRetryableFileError(zcu, file_index, "unable to resolve path: {s}", .{@errorName(err)}); + try pt.reportRetryableFileError(file_index, "unable to resolve path: {s}", .{@errorName(err)}); return error.AnalysisFail; }; errdefer gpa.free(resolved_path); @@ -1148,11 +1160,10 @@ fn semaDecl(pt: Zcu.PerThread, decl_index: Zcu.Decl.Index) !Zcu.SemaDeclResult { defer sema.deinit(); // Every Decl (other than file root Decls, which do not have a ZIR index) has a dependency on its own source. - try sema.declareDependency(.{ .src_hash = try ip.trackZir( - gpa, - decl.getFileScopeIndex(zcu), - decl_inst, - ) }); + try sema.declareDependency(.{ .src_hash = try ip.trackZir(gpa, pt.tid, .{ + .file = decl.getFileScopeIndex(zcu), + .inst = decl_inst, + }) }); var block_scope: Sema.Block = .{ .parent = null, @@ -1890,7 +1901,10 @@ const ScanDeclIter = struct { } const parent_file_scope_index = iter.parent_decl.getFileScopeIndex(zcu); - const tracked_inst = try ip.trackZir(gpa, parent_file_scope_index, decl_inst); + const tracked_inst = try ip.trackZir(gpa, pt.tid, .{ + .file = parent_file_scope_index, + .inst = decl_inst, + }); // We create a Decl for it regardless of analysis status. @@ -2611,6 +2625,87 @@ pub fn linkerUpdateDecl(pt: Zcu.PerThread, decl_index: Zcu.Decl.Index) !void { } } +pub fn reportRetryableAstGenError( + pt: Zcu.PerThread, + src: Zcu.AstGenSrc, + file_index: Zcu.File.Index, + err: anyerror, +) error{OutOfMemory}!void { + const zcu = pt.zcu; + const gpa = zcu.gpa; + const ip = &zcu.intern_pool; + + const file = zcu.fileByIndex(file_index); + file.status = .retryable_failure; + + const src_loc: Zcu.LazySrcLoc = switch (src) { + .root => .{ + .base_node_inst = try ip.trackZir(gpa, pt.tid, .{ + .file = file_index, + .inst = .main_struct_inst, + }), + .offset = .entire_file, + }, + .import => |info| .{ + .base_node_inst = try ip.trackZir(gpa, pt.tid, .{ + .file = info.importing_file, + .inst = .main_struct_inst, + }), + .offset = .{ .token_abs = info.import_tok }, + }, + }; + + const err_msg = try Zcu.ErrorMsg.create(gpa, src_loc, "unable to load '{}{s}': {s}", .{ + file.mod.root, file.sub_file_path, @errorName(err), + }); + errdefer err_msg.destroy(gpa); + + { + zcu.comp.mutex.lock(); + defer zcu.comp.mutex.unlock(); + try zcu.failed_files.putNoClobber(gpa, file, err_msg); + } +} + +pub fn reportRetryableFileError( + pt: Zcu.PerThread, + file_index: Zcu.File.Index, + comptime format: []const u8, + args: anytype, +) error{OutOfMemory}!void { + const zcu = pt.zcu; + const gpa = zcu.gpa; + const ip = &zcu.intern_pool; + + const file = zcu.fileByIndex(file_index); + file.status = .retryable_failure; + + const err_msg = try Zcu.ErrorMsg.create( + gpa, + .{ + .base_node_inst = try ip.trackZir(gpa, pt.tid, .{ + .file = file_index, + .inst = .main_struct_inst, + }), + .offset = .entire_file, + }, + format, + args, + ); + errdefer err_msg.destroy(gpa); + + zcu.comp.mutex.lock(); + defer zcu.comp.mutex.unlock(); + + const gop = try zcu.failed_files.getOrPut(gpa, file); + if (gop.found_existing) { + if (gop.value_ptr.*) |old_err_msg| { + old_err_msg.destroy(gpa); + } + } + gop.value_ptr.* = err_msg; +} + /// Shortcut for calling `intern_pool.get`. pub fn intern(pt: Zcu.PerThread, key: InternPool.Key) Allocator.Error!InternPool.Index { return pt.zcu.intern_pool.get(pt.zcu.gpa, pt.tid, key); From 2c89f3b65427e8ca733a9977ded4e8c6f0a89650 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Wed, 10 Jul 2024 15:37:04 -0400 Subject: [PATCH 099/152] InternPool: make `maps` thread-safe --- src/InternPool.zig | 170 ++++++++++++++++++++++++++++++--------------- 1 file changed, 115 insertions(+), 55 deletions(-) diff --git a/src/InternPool.zig b/src/InternPool.zig index 9fef7e290f12..c5c361f7f62e 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -13,13 +13,6 @@ tid_shift_31: if (single_threaded) u0 else std.math.Log2Int(u32) = if (single_th /// Cached shift amount to put a `tid` in the top bits of a 32-bit value. tid_shift_32: if (single_threaded) u0 else std.math.Log2Int(u32) = if (single_threaded) 0 else 31, -/// Some types such as enums, structs, and unions need to store mappings from field names -/// to field index, or value to field index. In such cases, they will store the underlying -/// field names and values directly, relying on one of these maps, stored separately, -/// to provide lookup. -/// These are not serialized; it is computed upon deserialization. -maps: std.ArrayListUnmanaged(FieldMap) = .{}, - /// Dependencies on the source code hash associated with a ZIR instruction. /// * For a `declaration`, this is the entire declaration body. /// * For a `struct_decl`, `union_decl`, etc, this is the source of the fields (but not declarations). @@ -428,6 +421,7 @@ const Local = struct { strings: ListMutate, tracked_insts: MutexListMutate, files: ListMutate, + maps: ListMutate, decls: BucketListMutate, namespaces: BucketListMutate, @@ -440,6 +434,7 @@ const Local = struct { strings: Strings, tracked_insts: TrackedInsts, files: List(File), + maps: Maps, decls: Decls, namespaces: Namespaces, @@ -461,6 +456,7 @@ const Local = struct { }; const Strings = List(struct { u8 }); const TrackedInsts = List(struct { TrackedInst }); + const Maps = List(struct { FieldMap }); const decls_bucket_width = 8; const decls_bucket_mask = (1 << decls_bucket_width) - 1; @@ -536,14 +532,17 @@ const Local = struct { .is_tuple = elem_info.is_tuple, } }); } - fn SliceElem(comptime opts: struct { is_const: bool = false }) type { + fn PtrElem(comptime opts: struct { + size: std.builtin.Type.Pointer.Size, + is_const: bool = false, + }) type { const elem_info = @typeInfo(Elem).Struct; const elem_fields = elem_info.fields; var new_fields: [elem_fields.len]std.builtin.Type.StructField = undefined; for (&new_fields, elem_fields) |*new_field, elem_field| new_field.* = .{ .name = elem_field.name, .type = @Type(.{ .Pointer = .{ - .size = .Slice, + .size = opts.size, .is_const = opts.is_const, .is_volatile = false, .alignment = 0, @@ -564,6 +563,23 @@ const Local = struct { } }); } + pub fn addOne(mutable: Mutable) Allocator.Error!PtrElem(.{ .size = .One }) { + try mutable.ensureUnusedCapacity(1); + return mutable.addOneAssumeCapacity(); + } + + pub fn addOneAssumeCapacity(mutable: Mutable) PtrElem(.{ .size = .One }) { + const index = mutable.mutate.len; + assert(index < mutable.list.header().capacity); + mutable.mutate.len = index + 1; + const mutable_view = mutable.view().slice(); + var ptr: PtrElem(.{ .size = .One }) = undefined; + inline for (fields) |field| { + @field(ptr, @tagName(field)) = &mutable_view.items(field)[index]; + } + return ptr; + } + pub fn append(mutable: Mutable, elem: Elem) Allocator.Error!void { try mutable.ensureUnusedCapacity(1); mutable.appendAssumeCapacity(elem); @@ -577,14 +593,14 @@ const Local = struct { pub fn appendSliceAssumeCapacity( mutable: Mutable, - slice: SliceElem(.{ .is_const = true }), + slice: PtrElem(.{ .size = .Slice, .is_const = true }), ) void { if (fields.len == 0) return; const start = mutable.mutate.len; const slice_len = @field(slice, @tagName(fields[0])).len; assert(slice_len <= mutable.list.header().capacity - start); mutable.mutate.len = @intCast(start + slice_len); - const mutable_view = mutable.view(); + const mutable_view = mutable.view().slice(); inline for (fields) |field| { const field_slice = @field(slice, @tagName(field)); assert(field_slice.len == slice_len); @@ -601,7 +617,7 @@ const Local = struct { const start = mutable.mutate.len; assert(len <= mutable.list.header().capacity - start); mutable.mutate.len = @intCast(start + len); - const mutable_view = mutable.view(); + const mutable_view = mutable.view().slice(); inline for (fields) |field| { @memset(mutable_view.items(field)[start..][0..len], @field(elem, @tagName(field))); } @@ -616,7 +632,7 @@ const Local = struct { const start = mutable.mutate.len; assert(len <= mutable.list.header().capacity - start); mutable.mutate.len = @intCast(start + len); - const mutable_view = mutable.view(); + const mutable_view = mutable.view().slice(); var ptr_array: PtrArrayElem(len) = undefined; inline for (fields) |field| { @field(ptr_array, @tagName(field)) = mutable_view.items(field)[start..][0..len]; @@ -624,17 +640,17 @@ const Local = struct { return ptr_array; } - pub fn addManyAsSlice(mutable: Mutable, len: usize) Allocator.Error!SliceElem(.{}) { + pub fn addManyAsSlice(mutable: Mutable, len: usize) Allocator.Error!PtrElem(.{ .size = .Slice }) { try mutable.ensureUnusedCapacity(len); return mutable.addManyAsSliceAssumeCapacity(len); } - pub fn addManyAsSliceAssumeCapacity(mutable: Mutable, len: usize) SliceElem(.{}) { + pub fn addManyAsSliceAssumeCapacity(mutable: Mutable, len: usize) PtrElem(.{ .size = .Slice }) { const start = mutable.mutate.len; assert(len <= mutable.list.header().capacity - start); mutable.mutate.len = @intCast(start + len); - const mutable_view = mutable.view(); - var slice: SliceElem(.{}) = undefined; + const mutable_view = mutable.view().slice(); + var slice: PtrElem(.{ .size = .Slice }) = undefined; inline for (fields) |field| { @field(slice, @tagName(field)) = mutable_view.items(field)[start..][0..len]; } @@ -807,6 +823,20 @@ const Local = struct { }; } + /// Some types such as enums, structs, and unions need to store mappings from field names + /// to field index, or value to field index. In such cases, they will store the underlying + /// field names and values directly, relying on one of these maps, stored separately, + /// to provide lookup. + /// These are not serialized; it is computed upon deserialization. + pub fn getMutableMaps(local: *Local, gpa: Allocator) Maps.Mutable { + return .{ + .gpa = gpa, + .arena = &local.mutate.arena, + .mutate = &local.mutate.maps, + .list = &local.shared.maps, + }; + } + /// Rather than allocating Decl objects with an Allocator, we instead allocate /// them with this BucketList. This provides four advantages: /// * Stable memory so that one thread can access a Decl object while another @@ -961,9 +991,37 @@ pub const OptionalMapIndex = enum(u32) { pub const MapIndex = enum(u32) { _, + pub fn get(map_index: MapIndex, ip: *InternPool) *FieldMap { + const unwrapped_map_index = map_index.unwrap(ip); + const maps = ip.getLocalShared(unwrapped_map_index.tid).maps.acquire(); + return &maps.view().items(.@"0")[unwrapped_map_index.index]; + } + + pub fn getConst(map_index: MapIndex, ip: *const InternPool) FieldMap { + return map_index.get(@constCast(ip)).*; + } + pub fn toOptional(i: MapIndex) OptionalMapIndex { return @enumFromInt(@intFromEnum(i)); } + + const Unwrapped = struct { + tid: Zcu.PerThread.Id, + index: u32, + + fn wrap(unwrapped: Unwrapped, ip: *const InternPool) MapIndex { + assert(@intFromEnum(unwrapped.tid) <= ip.getTidMask()); + assert(unwrapped.index <= ip.getIndexMask(u32)); + return @enumFromInt(@as(u32, @intFromEnum(unwrapped.tid)) << ip.tid_shift_32 | + unwrapped.index); + } + }; + fn unwrap(map_index: MapIndex, ip: *const InternPool) Unwrapped { + return .{ + .tid = @enumFromInt(@intFromEnum(map_index) >> ip.tid_shift_32 & ip.getTidMask()), + .index = @intFromEnum(map_index) & ip.getIndexMask(u32), + }; + } }; pub const RuntimeIndex = enum(u32) { @@ -1398,7 +1456,7 @@ pub const Key = union(enum) { /// Look up field index based on field name. pub fn nameIndex(self: ErrorSetType, ip: *const InternPool, name: NullTerminatedString) ?u32 { - const map = &ip.maps.items[@intFromEnum(self.names_map.unwrap().?)]; + const map = self.names_map.unwrap().?.getConst(ip); const adapter: NullTerminatedString.Adapter = .{ .strings = self.names.get(ip) }; const field_index = map.getIndexAdapted(name, adapter) orelse return null; return @intCast(field_index); @@ -2823,7 +2881,7 @@ pub const LoadedStructType = struct { if (i >= self.field_types.len) return null; return i; }; - const map = &ip.maps.items[@intFromEnum(names_map)]; + const map = names_map.getConst(ip); const adapter: NullTerminatedString.Adapter = .{ .strings = self.field_names.get(ip) }; const field_index = map.getIndexAdapted(name, adapter) orelse return null; return @intCast(field_index); @@ -3350,7 +3408,7 @@ const LoadedEnumType = struct { /// Look up field index based on field name. pub fn nameIndex(self: LoadedEnumType, ip: *const InternPool, name: NullTerminatedString) ?u32 { - const map = &ip.maps.items[@intFromEnum(self.names_map)]; + const map = self.names_map.getConst(ip); const adapter: NullTerminatedString.Adapter = .{ .strings = self.names.get(ip) }; const field_index = map.getIndexAdapted(name, adapter) orelse return null; return @intCast(field_index); @@ -3370,7 +3428,7 @@ const LoadedEnumType = struct { else => unreachable, }; if (self.values_map.unwrap()) |values_map| { - const map = &ip.maps.items[@intFromEnum(values_map)]; + const map = values_map.getConst(ip); const adapter: Index.Adapter = .{ .indexes = self.values.get(ip) }; const field_index = map.getIndexAdapted(int_tag_val, adapter) orelse return null; return @intCast(field_index); @@ -5370,6 +5428,7 @@ pub fn init(ip: *InternPool, gpa: Allocator, available_threads: usize) !void { .strings = Local.Strings.empty, .tracked_insts = Local.TrackedInsts.empty, .files = Local.List(File).empty, + .maps = Local.Maps.empty, .decls = Local.Decls.empty, .namespaces = Local.Namespaces.empty, @@ -5383,6 +5442,7 @@ pub fn init(ip: *InternPool, gpa: Allocator, available_threads: usize) !void { .strings = Local.ListMutate.empty, .tracked_insts = Local.MutexListMutate.empty, .files = Local.ListMutate.empty, + .maps = Local.ListMutate.empty, .decls = Local.BucketListMutate.empty, .namespaces = Local.BucketListMutate.empty, @@ -5440,9 +5500,6 @@ pub fn init(ip: *InternPool, gpa: Allocator, available_threads: usize) !void { } pub fn deinit(ip: *InternPool, gpa: Allocator) void { - for (ip.maps.items) |*map| map.deinit(gpa); - ip.maps.deinit(gpa); - ip.src_hash_deps.deinit(gpa); ip.decl_val_deps.deinit(gpa); ip.func_ies_deps.deinit(gpa); @@ -5470,6 +5527,8 @@ pub fn deinit(ip: *InternPool, gpa: Allocator) void { namespace.usingnamespace_set.deinit(gpa); } }; + const maps = local.getMutableMaps(gpa); + if (maps.mutate.len > 0) for (maps.view().items(.@"0")) |*map| map.deinit(gpa); local.mutate.arena.promote(gpa).deinit(); } gpa.free(ip.locals); @@ -6386,8 +6445,8 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All assert(error_set_type.names_map == .none); assert(std.sort.isSorted(NullTerminatedString, error_set_type.names.get(ip), {}, NullTerminatedString.indexLessThan)); const names = error_set_type.names.get(ip); - const names_map = try ip.addMap(gpa, names.len); - addStringsToMap(ip, names_map, names); + const names_map = try ip.addMap(gpa, tid, names.len); + ip.addStringsToMap(names_map, names); const names_len = error_set_type.names.len; try extra.ensureUnusedCapacity(@typeInfo(Tag.ErrorSet).Struct.fields.len + names_len); items.appendAssumeCapacity(.{ @@ -7287,8 +7346,8 @@ pub fn getStructType( const items = local.getMutableItems(gpa); const extra = local.getMutableExtra(gpa); - const names_map = try ip.addMap(gpa, ini.fields_len); - errdefer _ = ip.maps.pop(); + const names_map = try ip.addMap(gpa, tid, ini.fields_len); + errdefer local.mutate.maps.len -= 1; const zir_index = switch (ini.key) { inline else => |x| x.zir_index, @@ -7835,17 +7894,18 @@ pub fn getErrorSetType( const extra = local.getMutableExtra(gpa); try extra.ensureUnusedCapacity(@typeInfo(Tag.ErrorSet).Struct.fields.len + names.len); + const names_map = try ip.addMap(gpa, tid, names.len); + errdefer local.mutate.maps.len -= 1; + // The strategy here is to add the type unconditionally, then to ask if it // already exists, and if so, revert the lengths of the mutated arrays. // This is similar to what `getOrPutTrailingString` does. const prev_extra_len = extra.mutate.len; errdefer extra.mutate.len = prev_extra_len; - const predicted_names_map: MapIndex = @enumFromInt(ip.maps.items.len); - const error_set_extra_index = addExtraAssumeCapacity(extra, Tag.ErrorSet{ .names_len = @intCast(names.len), - .names_map = predicted_names_map, + .names_map = names_map, }); extra.appendSliceAssumeCapacity(.{@ptrCast(names)}); errdefer extra.mutate.len = prev_extra_len; @@ -7865,11 +7925,7 @@ pub fn getErrorSetType( }); errdefer items.mutate.len -= 1; - const names_map = try ip.addMap(gpa, names.len); - assert(names_map == predicted_names_map); - errdefer _ = ip.maps.pop(); - - addStringsToMap(ip, names_map, names); + ip.addStringsToMap(names_map, names); return gop.put(); } @@ -8235,7 +8291,7 @@ pub const WipEnumType = struct { return null; } assert(ip.typeOf(value) == @as(Index, @enumFromInt(extra_items[wip.tag_ty_index]))); - const map = &ip.maps.items[@intFromEnum(wip.values_map.unwrap().?)]; + const map = wip.values_map.unwrap().?.get(ip); const field_index = map.count(); const indexes = extra_items[wip.values_start..][0..field_index]; const adapter: Index.Adapter = .{ .indexes = @ptrCast(indexes) }; @@ -8281,8 +8337,8 @@ pub fn getEnumType( try items.ensureUnusedCapacity(1); const extra = local.getMutableExtra(gpa); - const names_map = try ip.addMap(gpa, ini.fields_len); - errdefer _ = ip.maps.pop(); + const names_map = try ip.addMap(gpa, tid, ini.fields_len); + errdefer local.mutate.maps.len -= 1; switch (ini.tag_mode) { .auto => { @@ -8335,11 +8391,11 @@ pub fn getEnumType( }, .explicit, .nonexhaustive => { const values_map: OptionalMapIndex = if (!ini.has_values) .none else m: { - const values_map = try ip.addMap(gpa, ini.fields_len); + const values_map = try ip.addMap(gpa, tid, ini.fields_len); break :m values_map.toOptional(); }; errdefer if (ini.has_values) { - _ = ip.maps.pop(); + local.mutate.maps.len -= 1; }; try extra.ensureUnusedCapacity(@typeInfo(EnumExplicit).Struct.fields.len + @@ -8428,8 +8484,8 @@ pub fn getGeneratedTagEnumType( try items.ensureUnusedCapacity(1); const extra = local.getMutableExtra(gpa); - const names_map = try ip.addMap(gpa, ini.names.len); - errdefer _ = ip.maps.pop(); + const names_map = try ip.addMap(gpa, tid, ini.names.len); + errdefer local.mutate.maps.len -= 1; ip.addStringsToMap(names_map, ini.names); const fields_len: u32 = @intCast(ini.names.len); @@ -8462,8 +8518,8 @@ pub fn getGeneratedTagEnumType( ini.values.len); // field values const values_map: OptionalMapIndex = if (ini.values.len != 0) m: { - const map = try ip.addMap(gpa, ini.values.len); - addIndexesToMap(ip, map, ini.values); + const map = try ip.addMap(gpa, tid, ini.values.len); + ip.addIndexesToMap(map, ini.values); break :m map.toOptional(); } else .none; // We don't clean up the values map on error! @@ -8494,7 +8550,9 @@ pub fn getGeneratedTagEnumType( errdefer extra.mutate.len = prev_extra_len; errdefer switch (ini.tag_mode) { .auto => {}, - .explicit, .nonexhaustive => _ = if (ini.values.len != 0) ip.maps.pop(), + .explicit, .nonexhaustive => if (ini.values.len != 0) { + local.mutate.maps.len -= 1; + }, }; var gop = try ip.getOrPutKey(gpa, tid, .{ .enum_type = .{ @@ -8598,7 +8656,7 @@ fn addStringsToMap( map_index: MapIndex, strings: []const NullTerminatedString, ) void { - const map = &ip.maps.items[@intFromEnum(map_index)]; + const map = map_index.get(ip); const adapter: NullTerminatedString.Adapter = .{ .strings = strings }; for (strings) |string| { const gop = map.getOrPutAssumeCapacityAdapted(string, adapter); @@ -8611,7 +8669,7 @@ fn addIndexesToMap( map_index: MapIndex, indexes: []const Index, ) void { - const map = &ip.maps.items[@intFromEnum(map_index)]; + const map = map_index.get(ip); const adapter: Index.Adapter = .{ .indexes = indexes }; for (indexes) |index| { const gop = map.getOrPutAssumeCapacityAdapted(index, adapter); @@ -8619,12 +8677,14 @@ fn addIndexesToMap( } } -fn addMap(ip: *InternPool, gpa: Allocator, cap: usize) Allocator.Error!MapIndex { - const ptr = try ip.maps.addOne(gpa); - errdefer _ = ip.maps.pop(); - ptr.* = .{}; - try ptr.ensureTotalCapacity(gpa, cap); - return @enumFromInt(ip.maps.items.len - 1); +fn addMap(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, cap: usize) Allocator.Error!MapIndex { + const maps = ip.getLocal(tid).getMutableMaps(gpa); + const unwrapped: MapIndex.Unwrapped = .{ .tid = tid, .index = maps.mutate.len }; + const ptr = try maps.addOne(); + errdefer maps.mutate.len = unwrapped.index; + ptr[0].* = .{}; + try ptr[0].ensureTotalCapacity(gpa, cap); + return unwrapped.wrap(ip); } /// This operation only happens under compile error conditions. @@ -10858,7 +10918,7 @@ pub fn addFieldName( name: NullTerminatedString, ) ?u32 { const extra_items = extra.view().items(.@"0"); - const map = &ip.maps.items[@intFromEnum(names_map)]; + const map = names_map.get(ip); const field_index = map.count(); const strings = extra_items[names_start..][0..field_index]; const adapter: NullTerminatedString.Adapter = .{ .strings = @ptrCast(strings) }; From 98f3a262a7aec25e0a7f0872dc7fafc9008be1d2 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Wed, 10 Jul 2024 19:03:07 -0400 Subject: [PATCH 100/152] InternPool: fix extra mutation races --- src/InternPool.zig | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/src/InternPool.zig b/src/InternPool.zig index c5c361f7f62e..1d23a9522502 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -416,7 +416,7 @@ const Local = struct { arena: std.heap.ArenaAllocator.State, items: ListMutate, - extra: ListMutate, + extra: MutexListMutate, limbs: ListMutate, strings: ListMutate, tracked_insts: MutexListMutate, @@ -758,7 +758,7 @@ const Local = struct { return .{ .gpa = gpa, .arena = &local.mutate.arena, - .mutate = &local.mutate.extra, + .mutate = &local.mutate.extra.list, .list = &local.shared.extra, }; } @@ -2999,6 +2999,9 @@ pub const LoadedStructType = struct { } pub fn setInitsWip(s: LoadedStructType, ip: *InternPool) bool { + const local = ip.getLocal(s.tid); + local.mutate.extra.mutex.lock(); + defer local.mutate.extra.mutex.unlock(); return switch (s.layout) { .@"packed" => @as(Tag.TypeStructPacked.Flags, @bitCast(@atomicRmw( u32, @@ -5437,7 +5440,7 @@ pub fn init(ip: *InternPool, gpa: Allocator, available_threads: usize) !void { .arena = .{}, .items = Local.ListMutate.empty, - .extra = Local.ListMutate.empty, + .extra = Local.MutexListMutate.empty, .limbs = Local.ListMutate.empty, .strings = Local.ListMutate.empty, .tracked_insts = Local.MutexListMutate.empty, @@ -9410,10 +9413,13 @@ pub fn errorUnionPayload(ip: *const InternPool, ty: Index) Index { /// The is only legal because the initializer is not part of the hash. pub fn mutateVarInit(ip: *InternPool, index: Index, init_index: Index) void { const unwrapped_index = index.unwrap(ip); - const extra_list = unwrapped_index.getExtra(ip); + const local = ip.getLocal(unwrapped_index.tid); + local.mutate.extra.mutex.lock(); + defer local.mutate.extra.mutex.unlock(); + const extra_items = local.shared.extra.view().items(.@"0"); const item = unwrapped_index.getItem(ip); assert(item.tag == .variable); - @atomicStore(u32, &extra_list.view().items(.@"0")[item.data + std.meta.fieldIndex(Tag.Variable, "init").?], @intFromEnum(init_index), .release); + @atomicStore(u32, &extra_items[item.data + std.meta.fieldIndex(Tag.Variable, "init").?], @intFromEnum(init_index), .release); } pub fn dump(ip: *const InternPool) void { @@ -9428,7 +9434,7 @@ fn dumpStatsFallible(ip: *const InternPool, arena: Allocator) anyerror!void { var decls_len: usize = 0; for (ip.locals) |*local| { items_len += local.mutate.items.len; - extra_len += local.mutate.extra.len; + extra_len += local.mutate.extra.list.len; limbs_len += local.mutate.limbs.len; decls_len += local.mutate.decls.buckets_list.len; } From c2316c52285b1319d7b44a7f7135d9e79786fd77 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Wed, 10 Jul 2024 21:39:11 -0400 Subject: [PATCH 101/152] InternPool: make `global_error_set` thread-safe --- src/Compilation.zig | 4 +- src/InternPool.zig | 162 +++++++++++++++++++++++++++++++++++- src/Sema.zig | 31 +++---- src/Value.zig | 10 +-- src/Zcu.zig | 22 ----- src/Zcu/PerThread.zig | 11 +++ src/arch/wasm/CodeGen.zig | 23 ++--- src/arch/x86_64/CodeGen.zig | 4 +- src/codegen.zig | 10 +-- src/codegen/c.zig | 15 ++-- src/codegen/llvm.zig | 21 ++--- src/codegen/spirv.zig | 2 +- src/link/Dwarf.zig | 4 +- src/link/SpirV.zig | 8 +- src/link/Wasm/ZigObject.zig | 21 +++-- 15 files changed, 252 insertions(+), 96 deletions(-) diff --git a/src/Compilation.zig b/src/Compilation.zig index f4674ba20c2a..5bf6d57b88d2 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -2943,7 +2943,7 @@ pub fn totalErrorCount(comp: *Compilation) u32 { } } - if (zcu.global_error_set.entries.len - 1 > zcu.error_limit) { + if (zcu.intern_pool.global_error_set.mutate.list.len > zcu.error_limit) { total += 1; } } @@ -3072,7 +3072,7 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle { try addModuleErrorMsg(zcu, &bundle, value.*, &all_references); } - const actual_error_count = zcu.global_error_set.entries.len - 1; + const actual_error_count = zcu.intern_pool.global_error_set.mutate.list.len; if (actual_error_count > zcu.error_limit) { try bundle.addRootErrorMessage(.{ .msg = try bundle.printString("ZCU used more errors than possible: used {d}, max {d}", .{ diff --git a/src/InternPool.zig b/src/InternPool.zig index 1d23a9522502..258bc72a6de4 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -6,6 +6,8 @@ locals: []Local = &.{}, /// Length must be a power of two and represents the number of simultaneous /// writers that can mutate any single sharded data structure. shards: []Shard = &.{}, +/// Key is the error name, index is the error tag value. Index 0 has a length-0 string. +global_error_set: GlobalErrorSet = GlobalErrorSet.empty, /// Cached number of active bits in a `tid`. tid_width: if (single_threaded) u0 else std.math.Log2Int(u32) = 0, /// Cached shift amount to put a `tid` in the top bits of a 31-bit value. @@ -10129,10 +10131,10 @@ pub fn getOrPutTrailingString( defer shard.mutate.string_map.len += 1; const map_header = map.header().*; if (shard.mutate.string_map.len < map_header.capacity * 3 / 5) { + strings.appendAssumeCapacity(.{0}); const entry = &map.entries[map_index]; entry.hash = hash; entry.release(@enumFromInt(@intFromEnum(value))); - strings.appendAssumeCapacity(.{0}); return value; } const arena_state = &ip.getLocal(tid).mutate.arena; @@ -10171,12 +10173,12 @@ pub fn getOrPutTrailingString( map_index &= new_map_mask; if (map.entries[map_index].value == .none) break; } + strings.appendAssumeCapacity(.{0}); map.entries[map_index] = .{ .value = @enumFromInt(@intFromEnum(value)), .hash = hash, }; shard.shared.string_map.release(new_map); - strings.appendAssumeCapacity(.{0}); return value; } @@ -10942,3 +10944,159 @@ fn ptrsHaveSameAlignment(ip: *InternPool, a_ty: Index, a_info: Key.PtrType, b_ty return a_info.flags.alignment == b_info.flags.alignment and (a_info.child == b_info.child or a_info.flags.alignment != .none); } + +const GlobalErrorSet = struct { + shared: struct { + names: Names, + map: Shard.Map(GlobalErrorSet.Index), + } align(std.atomic.cache_line), + mutate: Local.MutexListMutate align(std.atomic.cache_line), + + const Names = Local.List(struct { NullTerminatedString }); + + const empty: GlobalErrorSet = .{ + .shared = .{ + .names = Names.empty, + .map = Shard.Map(GlobalErrorSet.Index).empty, + }, + .mutate = Local.MutexListMutate.empty, + }; + + const Index = enum(Zcu.ErrorInt) { + none = 0, + _, + }; + + /// Not thread-safe, may only be called from the main thread. + pub fn getNamesFromMainThread(ges: *const GlobalErrorSet) []const NullTerminatedString { + return ges.shared.names.view().items(.@"0")[0..ges.mutate.list.len]; + } + + fn getErrorValue( + ges: *GlobalErrorSet, + gpa: Allocator, + arena_state: *std.heap.ArenaAllocator.State, + name: NullTerminatedString, + ) Allocator.Error!GlobalErrorSet.Index { + if (name == .empty) return .none; + const hash = std.hash.uint32(@intFromEnum(name)); + var map = ges.shared.map.acquire(); + const Map = @TypeOf(map); + var map_mask = map.header().mask(); + const names = ges.shared.names.acquire(); + var map_index = hash; + while (true) : (map_index += 1) { + map_index &= map_mask; + const entry = &map.entries[map_index]; + const index = entry.acquire(); + if (index == .none) break; + if (entry.hash != hash) continue; + if (names.view().items(.@"0")[@intFromEnum(index) - 1] == name) return index; + } + ges.mutate.mutex.lock(); + defer ges.mutate.mutex.unlock(); + if (map.entries != ges.shared.map.entries) { + map = ges.shared.map; + map_mask = map.header().mask(); + map_index = hash; + } + while (true) : (map_index += 1) { + map_index &= map_mask; + const entry = &map.entries[map_index]; + const index = entry.value; + if (index == .none) break; + if (entry.hash != hash) continue; + if (names.view().items(.@"0")[@intFromEnum(index) - 1] == name) return index; + } + const mutable_names: Names.Mutable = .{ + .gpa = gpa, + .arena = arena_state, + .mutate = &ges.mutate.list, + .list = &ges.shared.names, + }; + try mutable_names.ensureUnusedCapacity(1); + const map_header = map.header().*; + if (ges.mutate.list.len < map_header.capacity * 3 / 5) { + mutable_names.appendAssumeCapacity(.{name}); + const index: GlobalErrorSet.Index = @enumFromInt(mutable_names.mutate.len); + const entry = &map.entries[map_index]; + entry.hash = hash; + entry.release(index); + return index; + } + var arena = arena_state.promote(gpa); + defer arena_state.* = arena.state; + const new_map_capacity = map_header.capacity * 2; + const new_map_buf = try arena.allocator().alignedAlloc( + u8, + Map.alignment, + Map.entries_offset + new_map_capacity * @sizeOf(Map.Entry), + ); + const new_map: Map = .{ .entries = @ptrCast(new_map_buf[Map.entries_offset..].ptr) }; + new_map.header().* = .{ .capacity = new_map_capacity }; + @memset(new_map.entries[0..new_map_capacity], .{ .value = .none, .hash = undefined }); + const new_map_mask = new_map.header().mask(); + map_index = 0; + while (map_index < map_header.capacity) : (map_index += 1) { + const entry = &map.entries[map_index]; + const index = entry.value; + if (index == .none) continue; + const item_hash = entry.hash; + var new_map_index = item_hash; + while (true) : (new_map_index += 1) { + new_map_index &= new_map_mask; + const new_entry = &new_map.entries[new_map_index]; + if (new_entry.value != .none) continue; + new_entry.* = .{ + .value = index, + .hash = item_hash, + }; + break; + } + } + map = new_map; + map_index = hash; + while (true) : (map_index += 1) { + map_index &= new_map_mask; + if (map.entries[map_index].value == .none) break; + } + mutable_names.appendAssumeCapacity(.{name}); + const index: GlobalErrorSet.Index = @enumFromInt(mutable_names.mutate.len); + map.entries[map_index] = .{ .value = index, .hash = hash }; + ges.shared.map.release(new_map); + return index; + } + + fn getErrorValueIfExists( + ges: *const GlobalErrorSet, + name: NullTerminatedString, + ) ?GlobalErrorSet.Index { + if (name == .empty) return .none; + const hash = std.hash.uint32(@intFromEnum(name)); + const map = ges.shared.map.acquire(); + const map_mask = map.header().mask(); + const names_items = ges.shared.names.acquire().view().items(.@"0"); + var map_index = hash; + while (true) : (map_index += 1) { + map_index &= map_mask; + const entry = &map.entries[map_index]; + const index = entry.acquire(); + if (index == .none) return null; + if (entry.hash != hash) continue; + if (names_items[@intFromEnum(index) - 1] == name) return index; + } + } +}; + +pub fn getErrorValue( + ip: *InternPool, + gpa: Allocator, + tid: Zcu.PerThread.Id, + name: NullTerminatedString, +) Allocator.Error!Zcu.ErrorInt { + return @intFromEnum(try ip.global_error_set.getErrorValue(gpa, &ip.getLocal(tid).mutate.arena, name)); +} + +pub fn getErrorValueIfExists(ip: *const InternPool, name: NullTerminatedString) ?Zcu.ErrorInt { + return @intFromEnum(ip.global_error_set.getErrorValueIfExists(name) orelse return null); +} diff --git a/src/Sema.zig b/src/Sema.zig index 3bcc830f8cb5..1062ece2be5a 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -3473,7 +3473,7 @@ fn zirErrorSetDecl( const name_index: Zir.NullTerminatedString = @enumFromInt(sema.code.extra[extra_index]); const name = sema.code.nullTerminatedString(name_index); const name_ip = try mod.intern_pool.getOrPutString(gpa, pt.tid, name, .no_embedded_nulls); - _ = try mod.getErrorValue(name_ip); + _ = try pt.getErrorValue(name_ip); const result = names.getOrPutAssumeCapacity(name_ip); assert(!result.found_existing); // verified in AstGen } @@ -8705,7 +8705,7 @@ fn zirErrorValue(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! inst_data.get(sema.code), .no_embedded_nulls, ); - _ = try pt.zcu.getErrorValue(name); + _ = try pt.getErrorValue(name); // Create an error set type with only this error value, and return the value. const error_set_type = try pt.singleErrorSetType(name); return Air.internedToRef((try pt.intern(.{ .err = .{ @@ -8735,7 +8735,7 @@ fn zirIntFromError(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstD const err_name = ip.indexToKey(val.toIntern()).err.name; return Air.internedToRef((try pt.intValue( err_int_ty, - try mod.getErrorValue(err_name), + try pt.getErrorValue(err_name), )).toIntern()); } @@ -8746,10 +8746,7 @@ fn zirIntFromError(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstD const names = ip.indexToKey(err_set_ty_index).error_set_type.names; switch (names.len) { 0 => return Air.internedToRef((try pt.intValue(err_int_ty, 0)).toIntern()), - 1 => { - const int: Module.ErrorInt = @intCast(mod.global_error_set.getIndex(names.get(ip)[0]).?); - return pt.intRef(err_int_ty, int); - }, + 1 => return pt.intRef(err_int_ty, ip.getErrorValueIfExists(names.get(ip)[0]).?), else => {}, } }, @@ -8765,6 +8762,7 @@ fn zirErrorFromInt(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstD const pt = sema.pt; const mod = pt.zcu; + const ip = &mod.intern_pool; const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data; const src = block.nodeOffset(extra.node); const operand_src = block.builtinCallArgSrc(extra.node, 0); @@ -8774,11 +8772,16 @@ fn zirErrorFromInt(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstD if (try sema.resolveDefinedValue(block, operand_src, operand)) |value| { const int = try sema.usizeCast(block, operand_src, try value.toUnsignedIntSema(pt)); - if (int > mod.global_error_set.count() or int == 0) + if (int > len: { + const mutate = &ip.global_error_set.mutate; + mutate.mutex.lock(); + defer mutate.mutex.unlock(); + break :len mutate.list.len; + } or int == 0) return sema.fail(block, operand_src, "integer value '{d}' represents no error", .{int}); return Air.internedToRef((try pt.intern(.{ .err = .{ .ty = .anyerror_type, - .name = mod.global_error_set.keys()[int], + .name = ip.global_error_set.shared.names.acquire().view().items(.@"0")[int - 1], } }))); } try sema.requireRuntimeBlock(block, src, operand_src); @@ -14005,7 +14008,7 @@ fn zirRetErrValueCode(sema: *Sema, inst: Zir.Inst.Index) CompileError!Air.Inst.R inst_data.get(sema.code), .no_embedded_nulls, ); - _ = try mod.getErrorValue(name); + _ = try pt.getErrorValue(name); const error_set_type = try pt.singleErrorSetType(name); return Air.internedToRef((try pt.intern(.{ .err = .{ .ty = error_set_type.toIntern(), @@ -19564,7 +19567,7 @@ fn zirRetErrValue( inst_data.get(sema.code), .no_embedded_nulls, ); - _ = try mod.getErrorValue(err_name); + _ = try pt.getErrorValue(err_name); // Return the error code from the function. const error_set_type = try pt.singleErrorSetType(err_name); const result_inst = Air.internedToRef((try pt.intern(.{ .err = .{ @@ -21607,7 +21610,7 @@ fn zirReify( const name = try sema.sliceToIpString(block, src, name_val, .{ .needed_comptime_reason = "error set contents must be comptime-known", }); - _ = try mod.getErrorValue(name); + _ = try pt.getErrorValue(name); const gop = names.getOrPutAssumeCapacity(name); if (gop.found_existing) { return sema.fail(block, src, "duplicate error '{}'", .{ @@ -27485,7 +27488,7 @@ fn fieldVal( }, .simple_type => |t| { assert(t == .anyerror); - _ = try mod.getErrorValue(field_name); + _ = try pt.getErrorValue(field_name); }, else => unreachable, } @@ -27725,7 +27728,7 @@ fn fieldPtr( }, .simple_type => |t| { assert(t == .anyerror); - _ = try mod.getErrorValue(field_name); + _ = try pt.getErrorValue(field_name); }, else => unreachable, } diff --git a/src/Value.zig b/src/Value.zig index f114a2c7fa1b..da1151139f50 100644 --- a/src/Value.zig +++ b/src/Value.zig @@ -417,7 +417,7 @@ pub fn writeToMemory(val: Value, ty: Type, pt: Zcu.PerThread, buffer: []u8) erro var bigint_buffer: BigIntSpace = undefined; const bigint = BigIntMutable.init( &bigint_buffer.limbs, - mod.global_error_set.getIndex(name).?, + ip.getErrorValueIfExists(name).?, ).toConst(); bigint.writeTwosComplement(buffer[0..byte_count], endian); }, @@ -427,7 +427,7 @@ pub fn writeToMemory(val: Value, ty: Type, pt: Zcu.PerThread, buffer: []u8) erro if (val.unionTag(mod)) |union_tag| { const union_obj = mod.typeToUnion(ty).?; const field_index = mod.unionTagFieldIndex(union_obj, union_tag).?; - const field_type = Type.fromInterned(union_obj.field_types.get(&mod.intern_pool)[field_index]); + const field_type = Type.fromInterned(union_obj.field_types.get(ip)[field_index]); const field_val = try val.fieldValue(pt, field_index); const byte_count: usize = @intCast(field_type.abiSize(pt)); return writeToMemory(field_val, field_type, pt, buffer[0..byte_count]); @@ -1455,9 +1455,9 @@ pub fn getErrorName(val: Value, mod: *const Module) InternPool.OptionalNullTermi }; } -pub fn getErrorInt(val: Value, mod: *const Module) Module.ErrorInt { - return if (getErrorName(val, mod).unwrap()) |err_name| - @intCast(mod.global_error_set.getIndex(err_name).?) +pub fn getErrorInt(val: Value, zcu: *Zcu) Module.ErrorInt { + return if (getErrorName(val, zcu).unwrap()) |err_name| + zcu.intern_pool.getErrorValueIfExists(err_name).? else 0; } diff --git a/src/Zcu.zig b/src/Zcu.zig index fb7ee4fac2cb..a9d80b4fdf04 100644 --- a/src/Zcu.zig +++ b/src/Zcu.zig @@ -141,9 +141,6 @@ failed_exports: std.AutoArrayHashMapUnmanaged(u32, *ErrorMsg) = .{}, /// are stored here. cimport_errors: std.AutoArrayHashMapUnmanaged(AnalUnit, std.zig.ErrorBundle) = .{}, -/// Key is the error name, index is the error tag value. Index 0 has a length-0 string. -global_error_set: GlobalErrorSet = .{}, - /// Maximum amount of distinct error values, set by --error-limit error_limit: ErrorInt, @@ -2399,7 +2396,6 @@ pub const CompileError = error{ pub fn init(mod: *Module, thread_count: usize) !void { const gpa = mod.gpa; try mod.intern_pool.init(gpa, thread_count); - try mod.global_error_set.put(gpa, .empty, {}); } pub fn deinit(zcu: *Zcu) void { @@ -2471,8 +2467,6 @@ pub fn deinit(zcu: *Zcu) void { zcu.single_exports.deinit(gpa); zcu.multi_exports.deinit(gpa); - zcu.global_error_set.deinit(gpa); - zcu.potentially_outdated.deinit(gpa); zcu.outdated.deinit(gpa); zcu.outdated_ready.deinit(gpa); @@ -3108,22 +3102,6 @@ pub fn addUnitReference(zcu: *Zcu, src_unit: AnalUnit, referenced_unit: AnalUnit gop.value_ptr.* = @intCast(ref_idx); } -pub fn getErrorValue( - mod: *Module, - name: InternPool.NullTerminatedString, -) Allocator.Error!ErrorInt { - const gop = try mod.global_error_set.getOrPut(mod.gpa, name); - return @as(ErrorInt, @intCast(gop.index)); -} - -pub fn getErrorValueFromSlice( - mod: *Module, - name: []const u8, -) Allocator.Error!ErrorInt { - const interned_name = try mod.intern_pool.getOrPutString(mod.gpa, name); - return getErrorValue(mod, interned_name); -} - pub fn errorSetBits(mod: *Module) u16 { if (mod.error_limit == 0) return 0; return std.math.log2_int_ceil(ErrorInt, mod.error_limit + 1); // +1 for no error diff --git a/src/Zcu/PerThread.zig b/src/Zcu/PerThread.zig index b72f7cc1aea4..4a1f257ddfe2 100644 --- a/src/Zcu/PerThread.zig +++ b/src/Zcu/PerThread.zig @@ -2287,6 +2287,17 @@ pub fn allocateNewDecl(pt: Zcu.PerThread, namespace: Zcu.Namespace.Index) !Zcu.D return decl_index; } +pub fn getErrorValue( + pt: Zcu.PerThread, + name: InternPool.NullTerminatedString, +) Allocator.Error!Zcu.ErrorInt { + return pt.zcu.intern_pool.getErrorValue(pt.zcu.gpa, pt.tid, name); +} + +pub fn getErrorValueFromSlice(pt: Zcu.PerThread, name: []const u8) Allocator.Error!Zcu.ErrorInt { + return pt.getErrorValue(try pt.zcu.intern_pool.getOrPutString(pt.zcu.gpa, name)); +} + pub fn initNewAnonDecl( pt: Zcu.PerThread, new_decl_index: Zcu.Decl.Index, diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index deab261666db..32b3b42389ec 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -3304,7 +3304,7 @@ fn lowerConstant(func: *CodeGen, val: Value, ty: Type) InnerError!WValue { } }, .err => |err| { - const int = try mod.getErrorValue(err.name); + const int = try pt.getErrorValue(err.name); return WValue{ .imm32 = int }; }, .error_union => |error_union| { @@ -3452,30 +3452,25 @@ fn emitUndefined(func: *CodeGen, ty: Type) InnerError!WValue { /// Returns a `Value` as a signed 32 bit value. /// It's illegal to provide a value with a type that cannot be represented /// as an integer value. -fn valueAsI32(func: *const CodeGen, val: Value, ty: Type) i32 { +fn valueAsI32(func: *const CodeGen, val: Value) i32 { const pt = func.pt; const mod = pt.zcu; + const ip = &mod.intern_pool; - switch (val.ip_index) { - .none => {}, + switch (val.toIntern()) { .bool_true => return 1, .bool_false => return 0, - else => return switch (mod.intern_pool.indexToKey(val.ip_index)) { - .enum_tag => |enum_tag| intIndexAsI32(&mod.intern_pool, enum_tag.int, pt), + else => return switch (ip.indexToKey(val.ip_index)) { + .enum_tag => |enum_tag| intIndexAsI32(ip, enum_tag.int, pt), .int => |int| intStorageAsI32(int.storage, pt), .ptr => |ptr| { assert(ptr.base_addr == .int); return @intCast(ptr.byte_offset); }, - .err => |err| @as(i32, @bitCast(@as(Zcu.ErrorInt, @intCast(mod.global_error_set.getIndex(err.name).?)))), + .err => |err| @bitCast(ip.getErrorValueIfExists(err.name).?), else => unreachable, }, } - - return switch (ty.zigTypeTag(mod)) { - .ErrorSet => @as(i32, @bitCast(val.getErrorInt(mod))), - else => unreachable, // Programmer called this function for an illegal type - }; } fn intIndexAsI32(ip: *const InternPool, int: InternPool.Index, pt: Zcu.PerThread) i32 { @@ -4098,7 +4093,7 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { for (items, 0..) |ref, i| { const item_val = (try func.air.value(ref, pt)).?; - const int_val = func.valueAsI32(item_val, target_ty); + const int_val = func.valueAsI32(item_val); if (lowest_maybe == null or int_val < lowest_maybe.?) { lowest_maybe = int_val; } @@ -7454,7 +7449,7 @@ fn airErrorSetHasValue(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { var lowest: ?u32 = null; var highest: ?u32 = null; for (0..names.len) |name_index| { - const err_int: Zcu.ErrorInt = @intCast(mod.global_error_set.getIndex(names.get(ip)[name_index]).?); + const err_int = ip.getErrorValueIfExists(names.get(ip)[name_index]).?; if (lowest) |*l| { if (err_int < l.*) { l.* = err_int; diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 1b1c1dd7d73b..92aac552d8c7 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -16435,7 +16435,7 @@ fn airErrorName(self: *Self, inst: Air.Inst.Index) !void { .size = .dword, .index = err_reg.to64(), .scale = .@"4", - .disp = 4, + .disp = (1 - 1) * 4, } }, }, ); @@ -16448,7 +16448,7 @@ fn airErrorName(self: *Self, inst: Air.Inst.Index) !void { .size = .dword, .index = err_reg.to64(), .scale = .@"4", - .disp = 8, + .disp = (2 - 1) * 4, } }, }, ); diff --git a/src/codegen.zig b/src/codegen.zig index 0513682d73c4..d05cb4272893 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -137,10 +137,10 @@ pub fn generateLazySymbol( if (lazy_sym.ty.isAnyError(pt.zcu)) { alignment.* = .@"4"; - const err_names = pt.zcu.global_error_set.keys(); + const err_names = ip.global_error_set.getNamesFromMainThread(); mem.writeInt(u32, try code.addManyAsArray(4), @intCast(err_names.len), endian); var offset = code.items.len; - try code.resize((1 + err_names.len + 1) * 4); + try code.resize((err_names.len + 1) * 4); for (err_names) |err_name_nts| { const err_name = err_name_nts.toSlice(ip); mem.writeInt(u32, code.items[offset..][0..4], @intCast(code.items.len), endian); @@ -243,13 +243,13 @@ pub fn generateSymbol( int_val.writeTwosComplement(try code.addManyAsSlice(abi_size), endian); }, .err => |err| { - const int = try mod.getErrorValue(err.name); + const int = try pt.getErrorValue(err.name); try code.writer().writeInt(u16, @intCast(int), endian); }, .error_union => |error_union| { const payload_ty = ty.errorUnionPayload(mod); const err_val: u16 = switch (error_union.val) { - .err_name => |err_name| @intCast(try mod.getErrorValue(err_name)), + .err_name => |err_name| @intCast(try pt.getErrorValue(err_name)), .payload => 0, }; @@ -1058,7 +1058,7 @@ pub fn genTypedValue( }, .ErrorSet => { const err_name = ip.indexToKey(val.toIntern()).err.name; - const error_index = zcu.global_error_set.getIndex(err_name).?; + const error_index = try pt.getErrorValue(err_name); return GenResult.mcv(.{ .immediate = error_index }); }, .ErrorUnion => { diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 6b723967a56c..60b07f0e3f9e 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -2622,10 +2622,11 @@ pub fn genErrDecls(o: *Object) !void { var max_name_len: usize = 0; // do not generate an invalid empty enum when the global error set is empty - if (zcu.global_error_set.keys().len > 1) { + const names = ip.global_error_set.getNamesFromMainThread(); + if (names.len > 0) { try writer.writeAll("enum {\n"); o.indent_writer.pushIndent(); - for (zcu.global_error_set.keys()[1..], 1..) |name_nts, value| { + for (names, 1..) |name_nts, value| { const name = name_nts.toSlice(ip); max_name_len = @max(name.len, max_name_len); const err_val = try pt.intern(.{ .err = .{ @@ -2644,7 +2645,7 @@ pub fn genErrDecls(o: *Object) !void { defer o.dg.gpa.free(name_buf); @memcpy(name_buf[0..name_prefix.len], name_prefix); - for (zcu.global_error_set.keys()) |name| { + for (names) |name| { const name_slice = name.toSlice(ip); @memcpy(name_buf[name_prefix.len..][0..name_slice.len], name_slice); const identifier = name_buf[0 .. name_prefix.len + name_slice.len]; @@ -2674,7 +2675,7 @@ pub fn genErrDecls(o: *Object) !void { } const name_array_ty = try pt.arrayType(.{ - .len = zcu.global_error_set.count(), + .len = 1 + names.len, .child = .slice_const_u8_sentinel_0_type, }); @@ -2688,9 +2689,9 @@ pub fn genErrDecls(o: *Object) !void { .complete, ); try writer.writeAll(" = {"); - for (zcu.global_error_set.keys(), 0..) |name_nts, value| { + for (names, 1..) |name_nts, val| { const name = name_nts.toSlice(ip); - if (value != 0) try writer.writeByte(','); + if (val > 1) try writer.writeAll(", "); try writer.print("{{" ++ name_prefix ++ "{}, {}}}", .{ fmtIdent(name), try o.dg.fmtIntLiteral(try pt.intValue(Type.usize, name.len), .StaticInitializer), @@ -6873,7 +6874,7 @@ fn airErrorName(f: *Function, inst: Air.Inst.Index) !CValue { try writer.writeAll(" = zig_errorName["); try f.writeCValue(writer, operand, .Other); - try writer.writeAll("];\n"); + try writer.writeAll(" - 1];\n"); return local; } diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 7152a55d4689..abc6a9dc9d1c 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -1036,20 +1036,21 @@ pub const Object = struct { const pt = o.pt; const mod = pt.zcu; + const ip = &mod.intern_pool; - const error_name_list = mod.global_error_set.keys(); - const llvm_errors = try mod.gpa.alloc(Builder.Constant, error_name_list.len); + const error_name_list = ip.global_error_set.getNamesFromMainThread(); + const llvm_errors = try mod.gpa.alloc(Builder.Constant, 1 + error_name_list.len); defer mod.gpa.free(llvm_errors); // TODO: Address space const slice_ty = Type.slice_const_u8_sentinel_0; const llvm_usize_ty = try o.lowerType(Type.usize); const llvm_slice_ty = try o.lowerType(slice_ty); - const llvm_table_ty = try o.builder.arrayType(error_name_list.len, llvm_slice_ty); + const llvm_table_ty = try o.builder.arrayType(1 + error_name_list.len, llvm_slice_ty); llvm_errors[0] = try o.builder.undefConst(llvm_slice_ty); - for (llvm_errors[1..], error_name_list[1..]) |*llvm_error, name| { - const name_string = try o.builder.stringNull(name.toSlice(&mod.intern_pool)); + for (llvm_errors[1..], error_name_list) |*llvm_error, name| { + const name_string = try o.builder.stringNull(name.toSlice(ip)); const name_init = try o.builder.stringConst(name_string); const name_variable_index = try o.builder.addVariable(.empty, name_init.typeOf(&o.builder), .default); @@ -1085,7 +1086,7 @@ pub const Object = struct { // If there is no such function in the module, it means the source code does not need it. const name = o.builder.strtabStringIfExists(lt_errors_fn_name) orelse return; const llvm_fn = o.builder.getGlobal(name) orelse return; - const errors_len = o.pt.zcu.global_error_set.count(); + const errors_len = o.pt.zcu.intern_pool.global_error_set.mutate.list.len; var wip = try Builder.WipFunction.init(&o.builder, .{ .function = llvm_fn.ptrConst(&o.builder).kind.function, @@ -1096,12 +1097,12 @@ pub const Object = struct { // Example source of the following LLVM IR: // fn __zig_lt_errors_len(index: u16) bool { - // return index < total_errors_len; + // return index <= total_errors_len; // } const lhs = wip.arg(0); const rhs = try o.builder.intValue(try o.errorIntType(), errors_len); - const is_lt = try wip.icmp(.ult, lhs, rhs, ""); + const is_lt = try wip.icmp(.ule, lhs, rhs, ""); _ = try wip.ret(is_lt); try wip.finish(); } @@ -3820,7 +3821,7 @@ pub const Object = struct { return lowerBigInt(o, ty, bigint); }, .err => |err| { - const int = try mod.getErrorValue(err.name); + const int = try pt.getErrorValue(err.name); const llvm_int = try o.builder.intConst(try o.errorIntType(), int); return llvm_int; }, @@ -9658,7 +9659,7 @@ pub const FuncGen = struct { defer wip_switch.finish(&self.wip); for (0..names.len) |name_index| { - const err_int = mod.global_error_set.getIndex(names.get(ip)[name_index]).?; + const err_int = ip.getErrorValueIfExists(names.get(ip)[name_index]).?; const this_tag_int_value = try o.builder.intConst(try o.errorIntType(), err_int); try wip_switch.addCase(this_tag_int_value, valid_block, &self.wip); } diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 9346a60a1fff..7a45429da606 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -963,7 +963,7 @@ const DeclGen = struct { break :cache result_id; }, .err => |err| { - const value = try mod.getErrorValue(err.name); + const value = try pt.getErrorValue(err.name); break :cache try self.constInt(ty, value, repr); }, .error_union => |error_union| { diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig index caa11e4cc4e5..9f2781549c31 100644 --- a/src/link/Dwarf.zig +++ b/src/link/Dwarf.zig @@ -2698,7 +2698,7 @@ pub fn flushModule(self: *Dwarf, pt: Zcu.PerThread) !void { try addDbgInfoErrorSetNames( pt, Type.anyerror, - pt.zcu.global_error_set.keys(), + pt.zcu.intern_pool.global_error_set.getNamesFromMainThread(), target, &dbg_info_buffer, ); @@ -2867,7 +2867,7 @@ fn addDbgInfoErrorSetNames( mem.writeInt(u64, dbg_info_buffer.addManyAsArrayAssumeCapacity(8), 0, target_endian); for (error_names) |error_name| { - const int = try pt.zcu.getErrorValue(error_name); + const int = try pt.getErrorValue(error_name); const error_name_slice = error_name.toSlice(&pt.zcu.intern_pool); // DW.AT.enumerator try dbg_info_buffer.ensureUnusedCapacity(error_name_slice.len + 2 + @sizeOf(u64)); diff --git a/src/link/SpirV.zig b/src/link/SpirV.zig index 14020433bf69..ce7e25824ce7 100644 --- a/src/link/SpirV.zig +++ b/src/link/SpirV.zig @@ -227,9 +227,9 @@ pub fn flushModule(self: *SpirV, arena: Allocator, tid: Zcu.PerThread.Id, prog_n var error_info = std.ArrayList(u8).init(self.object.gpa); defer error_info.deinit(); - try error_info.appendSlice("zig_errors"); - const mod = self.base.comp.module.?; - for (mod.global_error_set.keys()) |name| { + try error_info.appendSlice("zig_errors:"); + const ip = &self.base.comp.module.?.intern_pool; + for (ip.global_error_set.getNamesFromMainThread()) |name| { // Errors can contain pretty much any character - to encode them in a string we must escape // them somehow. Easiest here is to use some established scheme, one which also preseves the // name if it contains no strange characters is nice for debugging. URI encoding fits the bill. @@ -238,7 +238,7 @@ pub fn flushModule(self: *SpirV, arena: Allocator, tid: Zcu.PerThread.Id, prog_n try error_info.append(':'); try std.Uri.Component.percentEncode( error_info.writer(), - name.toSlice(&mod.intern_pool), + name.toSlice(ip), struct { fn isValidChar(c: u8) bool { return switch (c) { diff --git a/src/link/Wasm/ZigObject.zig b/src/link/Wasm/ZigObject.zig index 180e45d91d07..f74705e17c3e 100644 --- a/src/link/Wasm/ZigObject.zig +++ b/src/link/Wasm/ZigObject.zig @@ -652,13 +652,22 @@ fn populateErrorNameTable(zig_object: *ZigObject, wasm_file: *Wasm, tid: Zcu.Per // Addend for each relocation to the table var addend: u32 = 0; const pt: Zcu.PerThread = .{ .zcu = wasm_file.base.comp.module.?, .tid = tid }; - for (pt.zcu.global_error_set.keys()) |error_name| { - const atom = wasm_file.getAtomPtr(atom_index); + const slice_ty = Type.slice_const_u8_sentinel_0; + const atom = wasm_file.getAtomPtr(atom_index); + { + // TODO: remove this unreachable entry + try atom.code.appendNTimes(gpa, 0, 4); + try atom.code.writer(gpa).writeInt(u32, 0, .little); + atom.size += @intCast(slice_ty.abiSize(pt)); + addend += 1; - const error_name_slice = error_name.toSlice(&pt.zcu.intern_pool); + try names_atom.code.append(gpa, 0); + } + const ip = &pt.zcu.intern_pool; + for (ip.global_error_set.getNamesFromMainThread()) |error_name| { + const error_name_slice = error_name.toSlice(ip); const len: u32 = @intCast(error_name_slice.len + 1); // names are 0-terminated - const slice_ty = Type.slice_const_u8_sentinel_0; const offset = @as(u32, @intCast(atom.code.items.len)); // first we create the data for the slice of the name try atom.code.appendNTimes(gpa, 0, 4); // ptr to name, will be relocated @@ -677,7 +686,7 @@ fn populateErrorNameTable(zig_object: *ZigObject, wasm_file: *Wasm, tid: Zcu.Per try names_atom.code.ensureUnusedCapacity(gpa, len); names_atom.code.appendSliceAssumeCapacity(error_name_slice[0..len]); - log.debug("Populated error name: '{}'", .{error_name.fmt(&pt.zcu.intern_pool)}); + log.debug("Populated error name: '{}'", .{error_name.fmt(ip)}); } names_atom.size = addend; zig_object.error_names_atom = names_atom_index; @@ -1042,7 +1051,7 @@ fn setupErrorsLen(zig_object: *ZigObject, wasm_file: *Wasm) !void { const gpa = wasm_file.base.comp.gpa; const sym_index = zig_object.findGlobalSymbol("__zig_errors_len") orelse return; - const errors_len = wasm_file.base.comp.module.?.global_error_set.count(); + const errors_len = 1 + wasm_file.base.comp.module.?.intern_pool.global_error_set.mutate.list.len; // overwrite existing atom if it already exists (maybe the error set has increased) // if not, allcoate a new atom. const atom_index = if (wasm_file.symbol_atom.get(.{ .file = zig_object.index, .index = sym_index })) |index| blk: { From c79d3e4aab6f4fb69a1fe45ff3045a51926edfac Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Wed, 10 Jul 2024 22:05:43 -0400 Subject: [PATCH 102/152] Compilation: fix leak --- src/Compilation.zig | 1 + 1 file changed, 1 insertion(+) diff --git a/src/Compilation.zig b/src/Compilation.zig index 5bf6d57b88d2..cc5fd1a9eba2 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -1877,6 +1877,7 @@ pub fn destroy(comp: *Compilation) void { if (comp.module) |zcu| zcu.deinit(); comp.cache_use.deinit(); comp.work_queue.deinit(); + if (!InternPool.single_threaded) comp.codegen_work.queue.deinit(); comp.c_object_work_queue.deinit(); if (!build_options.only_core_functionality) { comp.win32_resource_work_queue.deinit(); From 77810f288216ef3e35f3d0df4a04351297560a5e Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Wed, 10 Jul 2024 22:55:54 -0400 Subject: [PATCH 103/152] InternPool: fix optimization assertion failure --- src/InternPool.zig | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/InternPool.zig b/src/InternPool.zig index 258bc72a6de4..2b21b25a1d85 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -10969,7 +10969,8 @@ const GlobalErrorSet = struct { /// Not thread-safe, may only be called from the main thread. pub fn getNamesFromMainThread(ges: *const GlobalErrorSet) []const NullTerminatedString { - return ges.shared.names.view().items(.@"0")[0..ges.mutate.list.len]; + const len = ges.mutate.list.len; + return if (len > 0) ges.shared.names.view().items(.@"0")[0..len] else &.{}; } fn getErrorValue( From 45be80364659332807b527670514332a4b835f84 Mon Sep 17 00:00:00 2001 From: Ryan Sepassi Date: Thu, 11 Jul 2024 13:20:06 -0700 Subject: [PATCH 104/152] Update `__chkstk_ms` to have weak linkage (#20138) * Update `__chkstk_ms` to have weak linkage `__chkstk_ms` was causing conflicts during linking in some circumstances (specifically with linking object files from Rust sources). This PR switches `__chkstk_ms` to have weak linkage. #15107 * Update stack_probe.zig to weak linkage for all symbols --- lib/compiler_rt/stack_probe.zig | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/lib/compiler_rt/stack_probe.zig b/lib/compiler_rt/stack_probe.zig index 5533464dcd36..21e41574cc3d 100644 --- a/lib/compiler_rt/stack_probe.zig +++ b/lib/compiler_rt/stack_probe.zig @@ -16,16 +16,16 @@ comptime { if (builtin.os.tag == .windows) { // Default stack-probe functions emitted by LLVM if (is_mingw) { - @export(_chkstk, .{ .name = "_alloca", .linkage = strong_linkage }); - @export(___chkstk_ms, .{ .name = "___chkstk_ms", .linkage = strong_linkage }); + @export(_chkstk, .{ .name = "_alloca", .linkage = linkage }); + @export(___chkstk_ms, .{ .name = "___chkstk_ms", .linkage = linkage }); if (arch.isAARCH64()) { - @export(__chkstk, .{ .name = "__chkstk", .linkage = strong_linkage }); + @export(__chkstk, .{ .name = "__chkstk", .linkage = linkage }); } } else if (!builtin.link_libc) { // This symbols are otherwise exported by MSVCRT.lib - @export(_chkstk, .{ .name = "_chkstk", .linkage = strong_linkage }); - @export(__chkstk, .{ .name = "__chkstk", .linkage = strong_linkage }); + @export(_chkstk, .{ .name = "_chkstk", .linkage = linkage }); + @export(__chkstk, .{ .name = "__chkstk", .linkage = linkage }); } } From ca752c61c08eaa06458bdc6fa3cc724c09a62f77 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Igor=20Anic=CC=81?= Date: Thu, 11 Jul 2024 17:01:39 +0200 Subject: [PATCH 105/152] tls.Client: fix out of bounds panic When calculating how much ciphertext from the stream can fit into user and internal buffers we should also take into account ciphertext data which are already in internal buffer. Fixes: 15226 Tested with [this](https://github.com/ziglang/zig/issues/15226#issuecomment-2218809140). Using client with different read buffers until I, hopefully, understood what is happening. Not relevant to this fix, but this [part](https://github.com/ziglang/zig/blob/95d9292a7a09ed883e65510ec054619747315c48/lib/std/crypto/tls/Client.zig#L988-L991) is still mystery to me. Why we don't use free_size in buf_cap calculation. Seems like rudiment from previous implementation without iovec. --- lib/std/crypto/tls/Client.zig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/std/crypto/tls/Client.zig b/lib/std/crypto/tls/Client.zig index 9474953748d0..b68740bf6bb1 100644 --- a/lib/std/crypto/tls/Client.zig +++ b/lib/std/crypto/tls/Client.zig @@ -1012,7 +1012,7 @@ pub fn readvAdvanced(c: *Client, stream: anytype, iovecs: []const std.posix.iove // Cleartext capacity of output buffer, in records. Minimum one full record. const buf_cap = @max(cleartext_buf_len / max_ciphertext_len, 1); const wanted_read_len = buf_cap * (max_ciphertext_len + tls.record_header_len); - const ask_len = @max(wanted_read_len, cleartext_stack_buffer.len); + const ask_len = @max(wanted_read_len, cleartext_stack_buffer.len) - c.partial_ciphertext_end; const ask_iovecs = limitVecs(&ask_iovecs_buf, ask_len); const actual_read_len = try stream.readv(ask_iovecs); if (actual_read_len == 0) { From 908c2c902ada674eae30d81a8fb5b6c43cdfe141 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 5 Jul 2024 12:23:54 -0700 Subject: [PATCH 106/152] std.Build.Cache.Path: add eql method --- lib/std/Build/Cache/Path.zig | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lib/std/Build/Cache/Path.zig b/lib/std/Build/Cache/Path.zig index 48bb8c32beab..cb6e4c708758 100644 --- a/lib/std/Build/Cache/Path.zig +++ b/lib/std/Build/Cache/Path.zig @@ -147,6 +147,10 @@ pub fn format( } } +pub fn eql(self: Path, other: Path) bool { + return self.root_dir.eql(other.root_dir) and std.mem.eql(u8, self.sub_path, other.sub_path); +} + const Path = @This(); const std = @import("../../std.zig"); const fs = std.fs; From d2bec8f92f15ac16a0714ddc8282ab31dd5bb889 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 5 Jul 2024 12:24:14 -0700 Subject: [PATCH 107/152] delete dead CLI usage code from main.zig --- src/main.zig | 25 ------------------------- 1 file changed, 25 deletions(-) diff --git a/src/main.zig b/src/main.zig index c7bbb9883cf4..e00442f399e5 100644 --- a/src/main.zig +++ b/src/main.zig @@ -4649,31 +4649,6 @@ fn cmdInit(gpa: Allocator, arena: Allocator, args: []const []const u8) !void { return cleanExit(); } -const usage_build = - \\Usage: zig build [steps] [options] - \\ - \\ Build a project from build.zig. - \\ - \\Options: - \\ -freference-trace[=num] How many lines of reference trace should be shown per compile error - \\ -fno-reference-trace Disable reference trace - \\ --summary [mode] Control the printing of the build summary - \\ all Print the build summary in its entirety - \\ failures (Default) Only print failed steps - \\ none Do not print the build summary - \\ -j Limit concurrent jobs (default is to use all CPU cores) - \\ --build-file [file] Override path to build.zig - \\ --cache-dir [path] Override path to local Zig cache directory - \\ --global-cache-dir [path] Override path to global Zig cache directory - \\ --zig-lib-dir [arg] Override path to Zig lib directory - \\ --build-runner [file] Override path to build runner - \\ --prominent-compile-errors Buffer compile errors and display at end - \\ --seed [integer] For shuffling dependency traversal order (default: random) - \\ --fetch Exit after fetching dependency tree - \\ -h, --help Print this help and exit - \\ -; - fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !void { var build_file: ?[]const u8 = null; var override_lib_dir: ?[]const u8 = try EnvVar.ZIG_LIB_DIR.get(arena); From 6e025fc2e298c633ab36e9058a2cc610f57e4522 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 5 Jul 2024 12:24:32 -0700 Subject: [PATCH 108/152] build system: add --watch flag and report source file in InstallFile This direction is not quite right because it mutates shared state in a threaded context, so the next commit will need to fix this. --- lib/compiler/build_runner.zig | 66 ++++++++++++++++++++---------- lib/std/Build.zig | 55 +++++++++++++++++++++++++ lib/std/Build/Step.zig | 46 +++++++++++++++++++++ lib/std/Build/Step/InstallFile.zig | 1 + 4 files changed, 147 insertions(+), 21 deletions(-) diff --git a/lib/compiler/build_runner.zig b/lib/compiler/build_runner.zig index 79585086b317..cf78ad320f4f 100644 --- a/lib/compiler/build_runner.zig +++ b/lib/compiler/build_runner.zig @@ -74,6 +74,7 @@ pub fn main() !void { .query = .{}, .result = try std.zig.system.resolveTargetQuery(.{}), }, + .watch = null, }; graph.cache.addPrefix(.{ .path = null, .handle = std.fs.cwd() }); @@ -97,12 +98,12 @@ pub fn main() !void { var dir_list = std.Build.DirList{}; var summary: ?Summary = null; var max_rss: u64 = 0; - var skip_oom_steps: bool = false; + var skip_oom_steps = false; var color: Color = .auto; var seed: u32 = 0; - var prominent_compile_errors: bool = false; - var help_menu: bool = false; - var steps_menu: bool = false; + var prominent_compile_errors = false; + var help_menu = false; + var steps_menu = false; var output_tmp_nonce: ?[16]u8 = null; while (nextArg(args, &arg_idx)) |arg| { @@ -227,6 +228,10 @@ pub fn main() !void { builder.verbose_llvm_cpu_features = true; } else if (mem.eql(u8, arg, "--prominent-compile-errors")) { prominent_compile_errors = true; + } else if (mem.eql(u8, arg, "--watch")) { + const watch = try arena.create(std.Build.Watch); + watch.* = std.Build.Watch.init; + graph.watch = watch; } else if (mem.eql(u8, arg, "-fwine")) { builder.enable_wine = true; } else if (mem.eql(u8, arg, "-fno-wine")) { @@ -344,7 +349,7 @@ pub fn main() !void { .prominent_compile_errors = prominent_compile_errors, .claimed_rss = 0, - .summary = summary, + .summary = summary orelse if (graph.watch != null) .new else .failures, .ttyconf = ttyconf, .stderr = stderr, }; @@ -363,7 +368,10 @@ pub fn main() !void { &run, seed, ) catch |err| switch (err) { - error.UncleanExit => process.exit(1), + error.UncleanExit => { + if (graph.watch == null) + process.exit(1); + }, else => return err, }; } @@ -377,7 +385,7 @@ const Run = struct { prominent_compile_errors: bool, claimed_rss: usize, - summary: ?Summary, + summary: Summary, ttyconf: std.io.tty.Config, stderr: File, }; @@ -417,7 +425,7 @@ fn runStepNames( for (starting_steps) |s| { constructGraphAndCheckForDependencyLoop(b, s, &step_stack, rand) catch |err| switch (err) { - error.DependencyLoopDetected => return error.UncleanExit, + error.DependencyLoopDetected => return uncleanExit(), else => |e| return e, }; } @@ -442,7 +450,7 @@ fn runStepNames( if (run.max_rss_is_default) { std.debug.print("note: use --maxrss to override the default", .{}); } - return error.UncleanExit; + return uncleanExit(); } } @@ -524,13 +532,19 @@ fn runStepNames( // A proper command line application defaults to silently succeeding. // The user may request verbose mode if they have a different preference. - const failures_only = run.summary != .all and run.summary != .new; - if (failure_count == 0 and failures_only) return cleanExit(); + const failures_only = switch (run.summary) { + .failures, .none => true, + else => false, + }; + if (failure_count == 0 and failures_only) { + if (b.graph.watch != null) return; + return cleanExit(); + } const ttyconf = run.ttyconf; const stderr = run.stderr; - if (run.summary != Summary.none) { + if (run.summary != .none) { const total_count = success_count + failure_count + pending_count + skipped_count; ttyconf.setColor(stderr, .cyan) catch {}; stderr.writeAll("Build Summary:") catch {}; @@ -544,11 +558,6 @@ fn runStepNames( if (test_fail_count > 0) stderr.writer().print("; {d} failed", .{test_fail_count}) catch {}; if (test_leak_count > 0) stderr.writer().print("; {d} leaked", .{test_leak_count}) catch {}; - if (run.summary == null) { - ttyconf.setColor(stderr, .dim) catch {}; - stderr.writeAll(" (disable with --summary none)") catch {}; - ttyconf.setColor(stderr, .reset) catch {}; - } stderr.writeAll("\n") catch {}; // Print a fancy tree with build results. @@ -562,7 +571,7 @@ fn runStepNames( while (i > 0) { i -= 1; const step = b.top_level_steps.get(step_names[i]).?.step; - const found = switch (run.summary orelse .failures) { + const found = switch (run.summary) { .all, .none => unreachable, .failures => step.state != .success, .new => !step.result_cached, @@ -579,7 +588,10 @@ fn runStepNames( } } - if (failure_count == 0) return cleanExit(); + if (failure_count == 0) { + if (b.graph.watch != null) return; + return cleanExit(); + } // Finally, render compile errors at the bottom of the terminal. // We use a separate compile_error_steps array list because step_stack is destructively @@ -591,13 +603,24 @@ fn runStepNames( } } + if (b.graph.watch != null) return uncleanExit(); + // Signal to parent process that we have printed compile errors. The // parent process may choose to omit the "following command failed" // line in this case. process.exit(2); } - process.exit(1); + return uncleanExit(); +} + +fn uncleanExit() error{UncleanExit}!void { + if (builtin.mode == .Debug) { + return error.UncleanExit; + } else { + std.debug.lockStdErr(); + process.exit(1); + } } const PrintNode = struct { @@ -768,7 +791,7 @@ fn printTreeStep( step_stack: *std.AutoArrayHashMapUnmanaged(*Step, void), ) !void { const first = step_stack.swapRemove(s); - const summary = run.summary orelse .failures; + const summary = run.summary; const skip = switch (summary) { .none => unreachable, .all => false, @@ -1124,6 +1147,7 @@ fn usage(b: *std.Build, out_stream: anytype) !void { \\ --maxrss Limit memory usage (default is to use available memory) \\ --skip-oom-steps Instead of failing, skip steps that would exceed --maxrss \\ --fetch Exit after fetching dependency tree + \\ --watch Continuously rebuild when source files are modified \\ \\Project-Specific Options: \\ diff --git a/lib/std/Build.zig b/lib/std/Build.zig index 0255245a3c63..bdfd71891bd0 100644 --- a/lib/std/Build.zig +++ b/lib/std/Build.zig @@ -120,6 +120,61 @@ pub const Graph = struct { needed_lazy_dependencies: std.StringArrayHashMapUnmanaged(void) = .{}, /// Information about the native target. Computed before build() is invoked. host: ResolvedTarget, + /// When `--watch` is provided, collects the set of files that should be + /// watched and the state to required to poll the system for changes. + watch: ?*Watch, +}; + +pub const Watch = struct { + table: Table, + + pub const init: Watch = .{ + .table = .{}, + }; + + /// Key is the directory to watch which contains one or more files we are + /// interested in noticing changes to. + pub const Table = std.ArrayHashMapUnmanaged(Cache.Path, ReactionSet, TableContext, false); + + const Hash = std.hash.Wyhash; + + pub const TableContext = struct { + pub fn hash(self: TableContext, a: Cache.Path) u32 { + _ = self; + const seed: u32 = @bitCast(a.root_dir.handle.fd); + return @truncate(Hash.hash(seed, a.sub_path)); + } + pub fn eql(self: TableContext, a: Cache.Path, b: Cache.Path, b_index: usize) bool { + _ = self; + _ = b_index; + return a.eql(b); + } + }; + + pub const ReactionSet = std.ArrayHashMapUnmanaged(Match, void, Match.Context, false); + + pub const Match = struct { + /// Relative to the watched directory, the file path that triggers this + /// match. + basename: []const u8, + /// The step to re-run when file corresponding to `basename` is changed. + step: *Step, + + pub const Context = struct { + pub fn hash(self: Context, a: Match) u32 { + _ = self; + var hasher = Hash.init(0); + std.hash.autoHash(&hasher, a.step); + hasher.update(a.basename); + return @truncate(hasher.final()); + } + pub fn eql(self: Context, a: Match, b: Match, b_index: usize) bool { + _ = self; + _ = b_index; + return a.step == b.step and mem.eql(u8, a.basename, b.basename); + } + }; + }; }; const AvailableDeps = []const struct { []const u8, []const u8 }; diff --git a/lib/std/Build/Step.zig b/lib/std/Build/Step.zig index 0813aba6897f..91fe29866696 100644 --- a/lib/std/Build/Step.zig +++ b/lib/std/Build/Step.zig @@ -562,6 +562,52 @@ pub fn writeManifest(s: *Step, man: *std.Build.Cache.Manifest) !void { } } +fn oom(err: anytype) noreturn { + switch (err) { + error.OutOfMemory => @panic("out of memory"), + } +} + +pub fn addWatchInput(step: *Step, lazy_path: std.Build.LazyPath) void { + errdefer |err| oom(err); + const w = step.owner.graph.watch orelse return; + switch (lazy_path) { + .src_path => |src_path| try addWatchInputFromBuilder(step, w, src_path.owner, src_path.sub_path), + .dependency => |d| try addWatchInputFromBuilder(step, w, d.dependency.builder, d.sub_path), + .cwd_relative => |path_string| { + try addWatchInputFromPath(w, .{ + .root_dir = .{ + .path = null, + .handle = std.fs.cwd(), + }, + .sub_path = std.fs.path.dirname(path_string) orelse "", + }, .{ + .step = step, + .basename = std.fs.path.basename(path_string), + }); + }, + // Nothing to watch because this dependency edge is modeled instead via `dependants`. + .generated => {}, + } +} + +fn addWatchInputFromBuilder(step: *Step, w: *std.Build.Watch, builder: *std.Build, sub_path: []const u8) !void { + return addWatchInputFromPath(w, .{ + .root_dir = builder.build_root, + .sub_path = std.fs.path.dirname(sub_path) orelse "", + }, .{ + .step = step, + .basename = std.fs.path.basename(sub_path), + }); +} + +fn addWatchInputFromPath(w: *std.Build.Watch, path: std.Build.Cache.Path, match: std.Build.Watch.Match) !void { + const gpa = match.step.owner.allocator; + const gop = try w.table.getOrPut(gpa, path); + if (!gop.found_existing) gop.value_ptr.* = .{}; + try gop.value_ptr.put(gpa, match, {}); +} + test { _ = CheckFile; _ = CheckObject; diff --git a/lib/std/Build/Step/InstallFile.zig b/lib/std/Build/Step/InstallFile.zig index 8202a9d79648..c7f1088c48df 100644 --- a/lib/std/Build/Step/InstallFile.zig +++ b/lib/std/Build/Step/InstallFile.zig @@ -40,6 +40,7 @@ fn make(step: *Step, prog_node: std.Progress.Node) !void { _ = prog_node; const b = step.owner; const install_file: *InstallFile = @fieldParentPtr("step", step); + step.addWatchInput(install_file.source); const full_src_path = install_file.source.getPath2(b, step); const full_dest_path = b.getInstallPath(install_file.dir, install_file.dest_rel_path); const cwd = std.fs.cwd(); From 26d506c0f8b5249fe29186506b82e5f515fcc56f Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 5 Jul 2024 13:05:56 -0700 Subject: [PATCH 109/152] std.Build: remove the "push installed file" mechanism Tracked by #14943 --- lib/std/Build.zig | 36 ++---------------------------- lib/std/Build/Step/InstallDir.zig | 1 - lib/std/Build/Step/InstallFile.zig | 1 - 3 files changed, 2 insertions(+), 36 deletions(-) diff --git a/lib/std/Build.zig b/lib/std/Build.zig index bdfd71891bd0..bfab90971c78 100644 --- a/lib/std/Build.zig +++ b/lib/std/Build.zig @@ -50,7 +50,6 @@ install_path: []const u8, sysroot: ?[]const u8 = null, search_prefixes: std.ArrayListUnmanaged([]const u8), libc_file: ?[]const u8 = null, -installed_files: ArrayList(InstalledFile), /// Path to the directory containing build.zig. build_root: Cache.Directory, cache_root: Cache.Directory, @@ -331,7 +330,6 @@ pub fn create( .exe_dir = undefined, .h_dir = undefined, .dest_dir = graph.env_map.get("DESTDIR"), - .installed_files = ArrayList(InstalledFile).init(arena), .install_tls = .{ .step = Step.init(.{ .id = TopLevelStep.base_id, @@ -433,7 +431,6 @@ fn createChildOnly( .sysroot = parent.sysroot, .search_prefixes = parent.search_prefixes, .libc_file = parent.libc_file, - .installed_files = ArrayList(InstalledFile).init(allocator), .build_root = build_root, .cache_root = parent.cache_root, .zig_lib_dir = parent.zig_lib_dir, @@ -1138,15 +1135,8 @@ fn makeUninstall(uninstall_step: *Step, prog_node: std.Progress.Node) anyerror!v const uninstall_tls: *TopLevelStep = @fieldParentPtr("step", uninstall_step); const b: *Build = @fieldParentPtr("uninstall_tls", uninstall_tls); - for (b.installed_files.items) |installed_file| { - const full_path = b.getInstallPath(installed_file.dir, installed_file.path); - if (b.verbose) { - log.info("rm {s}", .{full_path}); - } - fs.cwd().deleteTree(full_path) catch {}; - } - - // TODO remove empty directories + _ = b; + @panic("TODO implement https://github.com/ziglang/zig/issues/14943"); } /// Creates a configuration option to be passed to the build.zig script. @@ -1719,15 +1709,6 @@ pub fn addCheckFile( return Step.CheckFile.create(b, file_source, options); } -/// deprecated: https://github.com/ziglang/zig/issues/14943 -pub fn pushInstalledFile(b: *Build, dir: InstallDir, dest_rel_path: []const u8) void { - const file = InstalledFile{ - .dir = dir, - .path = dest_rel_path, - }; - b.installed_files.append(file.dupe(b)) catch @panic("OOM"); -} - pub fn truncateFile(b: *Build, dest_path: []const u8) !void { if (b.verbose) { log.info("truncate {s}", .{dest_path}); @@ -2567,19 +2548,6 @@ pub const InstallDir = union(enum) { } }; -pub const InstalledFile = struct { - dir: InstallDir, - path: []const u8, - - /// Duplicates the installed file path and directory. - pub fn dupe(file: InstalledFile, builder: *Build) InstalledFile { - return .{ - .dir = file.dir.dupe(builder), - .path = builder.dupe(file.path), - }; - } -}; - /// This function is intended to be called in the `configure` phase only. /// It returns an absolute directory path, which is potentially going to be a /// source of API breakage in the future, so keep that in mind when using this diff --git a/lib/std/Build/Step/InstallDir.zig b/lib/std/Build/Step/InstallDir.zig index 0a6edafb338e..ca38e09ec199 100644 --- a/lib/std/Build/Step/InstallDir.zig +++ b/lib/std/Build/Step/InstallDir.zig @@ -41,7 +41,6 @@ pub const Options = struct { }; pub fn create(owner: *std.Build, options: Options) *InstallDir { - owner.pushInstalledFile(options.install_dir, options.install_subdir); const install_dir = owner.allocator.create(InstallDir) catch @panic("OOM"); install_dir.* = .{ .step = Step.init(.{ diff --git a/lib/std/Build/Step/InstallFile.zig b/lib/std/Build/Step/InstallFile.zig index c7f1088c48df..dd6aa2d1c4a6 100644 --- a/lib/std/Build/Step/InstallFile.zig +++ b/lib/std/Build/Step/InstallFile.zig @@ -19,7 +19,6 @@ pub fn create( dest_rel_path: []const u8, ) *InstallFile { assert(dest_rel_path.len != 0); - owner.pushInstalledFile(dir, dest_rel_path); const install_file = owner.allocator.create(InstallFile) catch @panic("OOM"); install_file.* = .{ .step = Step.init(.{ From 6c64090e7af56ccbf374e1a927fe232b340ed681 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 8 Jul 2024 23:35:00 -0700 Subject: [PATCH 110/152] std.os.linux: fanotify_init, fanotify_mark, name_to_handle_at * Delete existing `FAN` struct in favor of a `fanotify` struct which has type-safe bindings (breaking). * Add name_to_handle_at syscall wrapper. * Add file_handle * Add kernel_fsid_t * Add fsid_t * Add and update std.posix wrappers. --- lib/std/os/linux.zig | 232 +++++++++++++++++++++++++++++++++---------- lib/std/posix.zig | 60 ++++++++++- 2 files changed, 235 insertions(+), 57 deletions(-) diff --git a/lib/std/os/linux.zig b/lib/std/os/linux.zig index 8ef74dec7b1f..91a050ee22b5 100644 --- a/lib/std/os/linux.zig +++ b/lib/std/os/linux.zig @@ -698,12 +698,42 @@ pub fn inotify_rm_watch(fd: i32, wd: i32) usize { return syscall2(.inotify_rm_watch, @as(usize, @bitCast(@as(isize, fd))), @as(usize, @bitCast(@as(isize, wd)))); } -pub fn fanotify_init(flags: u32, event_f_flags: u32) usize { - return syscall2(.fanotify_init, flags, event_f_flags); +pub fn fanotify_init(flags: fanotify.InitFlags, event_f_flags: u32) usize { + return syscall2(.fanotify_init, @as(u32, @bitCast(flags)), event_f_flags); } -pub fn fanotify_mark(fd: i32, flags: u32, mask: u64, dirfd: i32, pathname: ?[*:0]const u8) usize { - return syscall5(.fanotify_mark, @as(usize, @bitCast(@as(isize, fd))), flags, mask, @as(usize, @bitCast(@as(isize, dirfd))), @intFromPtr(pathname)); +pub fn fanotify_mark( + fd: fd_t, + flags: fanotify.MarkFlags, + mask: fanotify.MarkMask, + dirfd: fd_t, + pathname: ?[*:0]const u8, +) usize { + return syscall5( + .fanotify_mark, + @bitCast(@as(isize, fd)), + @as(u32, @bitCast(flags)), + @bitCast(mask), + @bitCast(@as(isize, dirfd)), + @intFromPtr(pathname), + ); +} + +pub fn name_to_handle_at( + dirfd: fd_t, + pathname: [*:0]const u8, + handle: *std.os.linux.file_handle, + mount_id: *i32, + flags: u32, +) usize { + return syscall5( + .name_to_handle_at, + @as(u32, @bitCast(dirfd)), + @intFromPtr(pathname), + @intFromPtr(handle), + @intFromPtr(mount_id), + flags, + ); } pub fn readlink(noalias path: [*:0]const u8, noalias buf_ptr: [*]u8, buf_len: usize) usize { @@ -4135,58 +4165,156 @@ pub const IN = struct { pub const ONESHOT = 0x80000000; }; -pub const FAN = struct { - pub const ACCESS = 0x00000001; - pub const MODIFY = 0x00000002; - pub const CLOSE_WRITE = 0x00000008; - pub const CLOSE_NOWRITE = 0x00000010; - pub const OPEN = 0x00000020; - pub const Q_OVERFLOW = 0x00004000; - pub const OPEN_PERM = 0x00010000; - pub const ACCESS_PERM = 0x00020000; - pub const ONDIR = 0x40000000; - pub const EVENT_ON_CHILD = 0x08000000; - pub const CLOSE = CLOSE_WRITE | CLOSE_NOWRITE; - pub const CLOEXEC = 0x00000001; - pub const NONBLOCK = 0x00000002; - pub const CLASS_NOTIF = 0x00000000; - pub const CLASS_CONTENT = 0x00000004; - pub const CLASS_PRE_CONTENT = 0x00000008; - pub const ALL_CLASS_BITS = CLASS_NOTIF | CLASS_CONTENT | CLASS_PRE_CONTENT; - pub const UNLIMITED_QUEUE = 0x00000010; - pub const UNLIMITED_MARKS = 0x00000020; - pub const ALL_INIT_FLAGS = CLOEXEC | NONBLOCK | ALL_CLASS_BITS | UNLIMITED_QUEUE | UNLIMITED_MARKS; - pub const MARK_ADD = 0x00000001; - pub const MARK_REMOVE = 0x00000002; - pub const MARK_DONT_FOLLOW = 0x00000004; - pub const MARK_ONLYDIR = 0x00000008; - pub const MARK_MOUNT = 0x00000010; - pub const MARK_IGNORED_MASK = 0x00000020; - pub const MARK_IGNORED_SURV_MODIFY = 0x00000040; - pub const MARK_FLUSH = 0x00000080; - pub const ALL_MARK_FLAGS = MARK_ADD | MARK_REMOVE | MARK_DONT_FOLLOW | MARK_ONLYDIR | MARK_MOUNT | MARK_IGNORED_MASK | MARK_IGNORED_SURV_MODIFY | MARK_FLUSH; - pub const ALL_EVENTS = ACCESS | MODIFY | CLOSE | OPEN; - pub const ALL_PERM_EVENTS = OPEN_PERM | ACCESS_PERM; - pub const ALL_OUTGOING_EVENTS = ALL_EVENTS | ALL_PERM_EVENTS | Q_OVERFLOW; - pub const ALLOW = 0x01; - pub const DENY = 0x02; -}; - -pub const fanotify_event_metadata = extern struct { - event_len: u32, - vers: u8, - reserved: u8, - metadata_len: u16, - mask: u64 align(8), - fd: i32, - pid: i32, +pub const fanotify = struct { + pub const InitFlags = packed struct(u32) { + CLOEXEC: bool = false, + NONBLOCK: bool = false, + CLASS: enum(u2) { + NOTIF = 0, + CONTENT = 1, + PRE_CONTENT = 2, + } = .NOTIF, + UNLIMITED_QUEUE: bool = false, + UNLIMITED_MARKS: bool = false, + ENABLE_AUDIT: bool = false, + REPORT_PIDFD: bool = false, + REPORT_TID: bool = false, + REPORT_FID: bool = false, + REPORT_DIR_FID: bool = false, + REPORT_NAME: bool = false, + REPORT_TARGET_FID: bool = false, + _: u19 = 0, + }; + + pub const MarkFlags = packed struct(u32) { + ADD: bool = false, + REMOVE: bool = false, + DONT_FOLLOW: bool = false, + ONLYDIR: bool = false, + MOUNT: bool = false, + /// Mutually exclusive with `IGNORE` + IGNORED_MASK: bool = false, + IGNORED_SURV_MODIFY: bool = false, + FLUSH: bool = false, + FILESYSTEM: bool = false, + EVICTABLE: bool = false, + /// Mutually exclusive with `IGNORED_MASK` + IGNORE: bool = false, + _: u21 = 0, + }; + + pub const MarkMask = packed struct(u64) { + /// File was accessed + ACCESS: bool = false, + /// File was modified + MODIFY: bool = false, + /// Metadata changed + ATTRIB: bool = false, + /// Writtable file closed + CLOSE_WRITE: bool = false, + /// Unwrittable file closed + CLOSE_NOWRITE: bool = false, + /// File was opened + OPEN: bool = false, + /// File was moved from X + MOVED_FROM: bool = false, + /// File was moved to Y + MOVED_TO: bool = false, + + /// Subfile was created + CREATE: bool = false, + /// Subfile was deleted + DELETE: bool = false, + /// Self was deleted + DELETE_SELF: bool = false, + /// Self was moved + MOVE_SELF: bool = false, + /// File was opened for exec + OPEN_EXEC: bool = false, + reserved13: u1 = 0, + /// Event queued overflowed + Q_OVERFLOW: bool = false, + /// Filesystem error + FS_ERROR: bool = false, + + /// File open in perm check + OPEN_PERM: bool = false, + /// File accessed in perm check + ACCESS_PERM: bool = false, + /// File open/exec in perm check + OPEN_EXEC_PERM: bool = false, + reserved19: u8 = 0, + /// Interested in child events + EVENT_ON_CHILD: bool = false, + /// File was renamed + RENAME: bool = false, + reserved30: u1 = 0, + /// Event occurred against dir + ONDIR: bool = false, + reserved31: u33 = 0, + }; + + pub const event_metadata = extern struct { + event_len: u32, + vers: u8, + reserved: u8, + metadata_len: u16, + mask: u64 align(8), + fd: i32, + pid: i32, + + pub const VERSION = 3; + }; + + pub const response = extern struct { + fd: i32, + response: u32, + }; + + /// Unique file identifier info record. + /// + /// This structure is used for records of types `EVENT_INFO_TYPE.FID`. + /// `EVENT_INFO_TYPE.DFID` and `EVENT_INFO_TYPE.DFID_NAME`. + /// + /// For `EVENT_INFO_TYPE.DFID_NAME` there is additionally a null terminated + /// name immediately after the file handle. + pub const event_info_fid = extern struct { + hdr: event_info_header, + fsid: kernel_fsid_t, + /// Following is an opaque struct file_handle that can be passed as + /// an argument to open_by_handle_at(2). + handle: [0]u8, + }; + + /// Variable length info record following event metadata. + pub const event_info_header = extern struct { + info_type: EVENT_INFO_TYPE, + pad: u8, + len: u16, + }; + + pub const EVENT_INFO_TYPE = enum(u8) { + FID = 1, + DFID_NAME = 2, + DFID = 3, + PIDFD = 4, + ERROR = 5, + OLD_DFID_NAME = 10, + OLD_DFID = 11, + NEW_DFID_NAME = 12, + NEW_DFID = 13, + }; }; -pub const fanotify_response = extern struct { - fd: i32, - response: u32, +pub const file_handle = extern struct { + handle_bytes: u32, + handle_type: i32, + f_handle: [0]u8, }; +pub const kernel_fsid_t = fsid_t; +pub const fsid_t = [2]i32; + pub const S = struct { pub const IFMT = 0o170000; diff --git a/lib/std/posix.zig b/lib/std/posix.zig index d244dad51d10..199b4d117d46 100644 --- a/lib/std/posix.zig +++ b/lib/std/posix.zig @@ -4501,7 +4501,7 @@ pub const FanotifyInitError = error{ PermissionDenied, } || UnexpectedError; -pub fn fanotify_init(flags: u32, event_f_flags: u32) FanotifyInitError!i32 { +pub fn fanotify_init(flags: std.os.linux.fanotify.InitFlags, event_f_flags: u32) FanotifyInitError!i32 { const rc = system.fanotify_init(flags, event_f_flags); switch (errno(rc)) { .SUCCESS => return @intCast(rc), @@ -4530,16 +4530,28 @@ pub const FanotifyMarkError = error{ NameTooLong, } || UnexpectedError; -pub fn fanotify_mark(fanotify_fd: i32, flags: u32, mask: u64, dirfd: i32, pathname: ?[]const u8) FanotifyMarkError!void { +pub fn fanotify_mark( + fanotify_fd: fd_t, + flags: std.os.linux.fanotify.MarkFlags, + mask: std.os.linux.fanotify.MarkMask, + dirfd: fd_t, + pathname: ?[]const u8, +) FanotifyMarkError!void { if (pathname) |path| { const path_c = try toPosixPath(path); return fanotify_markZ(fanotify_fd, flags, mask, dirfd, &path_c); + } else { + return fanotify_markZ(fanotify_fd, flags, mask, dirfd, null); } - - return fanotify_markZ(fanotify_fd, flags, mask, dirfd, null); } -pub fn fanotify_markZ(fanotify_fd: i32, flags: u32, mask: u64, dirfd: i32, pathname: ?[*:0]const u8) FanotifyMarkError!void { +pub fn fanotify_markZ( + fanotify_fd: fd_t, + flags: std.os.linux.fanotify.MarkFlags, + mask: std.os.linux.fanotify.MarkMask, + dirfd: fd_t, + pathname: ?[*:0]const u8, +) FanotifyMarkError!void { const rc = system.fanotify_mark(fanotify_fd, flags, mask, dirfd, pathname); switch (errno(rc)) { .SUCCESS => return, @@ -7274,6 +7286,44 @@ pub fn ptrace(request: u32, pid: pid_t, addr: usize, signal: usize) PtraceError! }; } +pub const NameToFileHandleAtError = error{ + FileNotFound, + NotDir, + OperationNotSupported, + NameTooLong, + Unexpected, +}; + +pub fn name_to_handle_at( + dirfd: fd_t, + pathname: []const u8, + handle: *std.os.linux.file_handle, + mount_id: *i32, + flags: u32, +) NameToFileHandleAtError!void { + const pathname_c = try toPosixPath(pathname); + return name_to_handle_atZ(dirfd, &pathname_c, handle, mount_id, flags); +} + +pub fn name_to_handle_atZ( + dirfd: fd_t, + pathname_z: [*:0]const u8, + handle: *std.os.linux.file_handle, + mount_id: *i32, + flags: u32, +) NameToFileHandleAtError!void { + switch (errno(system.name_to_handle_at(dirfd, pathname_z, handle, mount_id, flags))) { + .SUCCESS => {}, + .FAULT => unreachable, // pathname, mount_id, or handle outside accessible address space + .INVAL => unreachable, // bad flags, or handle_bytes too big + .NOENT => return error.FileNotFound, + .NOTDIR => return error.NotDir, + .OPNOTSUPP => return error.OperationNotSupported, + .OVERFLOW => return error.NameTooLong, + else => |err| return unexpectedErrno(err), + } +} + pub const IoCtl_SIOCGIFINDEX_Error = error{ FileSystem, InterfaceNotFound, From deea36250ffe458d92b32b1ad090b8a958ba8082 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 8 Jul 2024 23:41:08 -0700 Subject: [PATCH 111/152] std.Build.Cache.Path: add `subPathOpt` and `TableAdapter` Helpful methods when using one of these structs as a hash table key. --- lib/std/Build/Cache/Path.zig | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/lib/std/Build/Cache/Path.zig b/lib/std/Build/Cache/Path.zig index cb6e4c708758..89dba6b577dc 100644 --- a/lib/std/Build/Cache/Path.zig +++ b/lib/std/Build/Cache/Path.zig @@ -151,6 +151,26 @@ pub fn eql(self: Path, other: Path) bool { return self.root_dir.eql(other.root_dir) and std.mem.eql(u8, self.sub_path, other.sub_path); } +pub fn subPathOpt(self: Path) ?[]const u8 { + return if (self.sub_path.len == 0) null else self.sub_path; +} + +/// Useful to make `Path` a key in `std.ArrayHashMap`. +pub const TableAdapter = struct { + pub const Hash = std.hash.Wyhash; + + pub fn hash(self: TableAdapter, a: Cache.Path) u32 { + _ = self; + const seed: u32 = @bitCast(a.root_dir.handle.fd); + return @truncate(Hash.hash(seed, a.sub_path)); + } + pub fn eql(self: TableAdapter, a: Cache.Path, b: Cache.Path, b_index: usize) bool { + _ = self; + _ = b_index; + return a.eql(b); + } +}; + const Path = @This(); const std = @import("../../std.zig"); const fs = std.fs; From bbd90a562efd6e802ed41df2649a05fad763a4de Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 8 Jul 2024 23:42:20 -0700 Subject: [PATCH 112/152] build runner: implement --watch (work-in-progress) I'm still learning how the fanotify API works but I think after playing with it in this commit, I finally know how to implement it, at least on Linux. This commit does not accomplish the goal but I want to take the code in a different direction and still be able to reference this point in time by viewing a source control diff. I think the move is going to be saving the file_handle for the parent directory, which combined with the dirent names is how we can correlate the events back to the Step instances that have registered file system inputs. I predict this to be similar to implementations on other operating systems. --- lib/compiler/build_runner.zig | 418 ++++++++++++++++++++++++----- lib/std/Build.zig | 55 ---- lib/std/Build/Step.zig | 73 +++-- lib/std/Build/Step/InstallFile.zig | 5 +- 4 files changed, 407 insertions(+), 144 deletions(-) diff --git a/lib/compiler/build_runner.zig b/lib/compiler/build_runner.zig index cf78ad320f4f..d1c6af6189a2 100644 --- a/lib/compiler/build_runner.zig +++ b/lib/compiler/build_runner.zig @@ -8,6 +8,7 @@ const process = std.process; const ArrayList = std.ArrayList; const File = std.fs.File; const Step = std.Build.Step; +const Allocator = std.mem.Allocator; pub const root = @import("@build"); pub const dependencies = @import("@dependencies"); @@ -74,7 +75,6 @@ pub fn main() !void { .query = .{}, .result = try std.zig.system.resolveTargetQuery(.{}), }, - .watch = null, }; graph.cache.addPrefix(.{ .path = null, .handle = std.fs.cwd() }); @@ -105,6 +105,7 @@ pub fn main() !void { var help_menu = false; var steps_menu = false; var output_tmp_nonce: ?[16]u8 = null; + var watch = false; while (nextArg(args, &arg_idx)) |arg| { if (mem.startsWith(u8, arg, "-Z")) { @@ -229,9 +230,7 @@ pub fn main() !void { } else if (mem.eql(u8, arg, "--prominent-compile-errors")) { prominent_compile_errors = true; } else if (mem.eql(u8, arg, "--watch")) { - const watch = try arena.create(std.Build.Watch); - watch.* = std.Build.Watch.init; - graph.watch = watch; + watch = true; } else if (mem.eql(u8, arg, "-fwine")) { builder.enable_wine = true; } else if (mem.eql(u8, arg, "-fno-wine")) { @@ -297,6 +296,7 @@ pub fn main() !void { const main_progress_node = std.Progress.start(.{ .disable_printing = (color == .off), }); + defer main_progress_node.end(); builder.debug_log_scopes = debug_log_scopes.items; builder.resolveInstallPrefix(install_prefix, dir_list); @@ -345,13 +345,16 @@ pub fn main() !void { .max_rss_is_default = false, .max_rss_mutex = .{}, .skip_oom_steps = skip_oom_steps, + .watch = watch, .memory_blocked_steps = std.ArrayList(*Step).init(arena), + .step_stack = .{}, .prominent_compile_errors = prominent_compile_errors, .claimed_rss = 0, - .summary = summary orelse if (graph.watch != null) .new else .failures, + .summary = summary orelse if (watch) .new else .failures, .ttyconf = ttyconf, .stderr = stderr, + .thread_pool = undefined, }; if (run.max_rss == 0) { @@ -359,30 +362,311 @@ pub fn main() !void { run.max_rss_is_default = true; } - runStepNames( - arena, - builder, - targets.items, - main_progress_node, - thread_pool_options, - &run, - seed, - ) catch |err| switch (err) { - error.UncleanExit => { - if (graph.watch == null) - process.exit(1); - }, + const gpa = arena; + prepare(gpa, arena, builder, targets.items, &run, seed) catch |err| switch (err) { + error.UncleanExit => process.exit(1), else => return err, }; + + var w = Watch.init; + if (watch) { + w.fan_fd = try std.posix.fanotify_init(.{ + .CLASS = .NOTIF, + .CLOEXEC = true, + .NONBLOCK = true, + .REPORT_NAME = true, + .REPORT_DIR_FID = true, + .REPORT_FID = true, + .REPORT_TARGET_FID = true, + }, 0); + } + + try run.thread_pool.init(thread_pool_options); + defer run.thread_pool.deinit(); + + rebuild: while (true) { + runStepNames( + gpa, + builder, + targets.items, + main_progress_node, + &run, + ) catch |err| switch (err) { + error.UncleanExit => { + assert(!run.watch); + process.exit(1); + }, + else => return err, + }; + if (!watch) return cleanExit(); + + // Clear all file handles. + for (w.handle_table.keys(), w.handle_table.values()) |lfh, *step_set| { + lfh.destroy(gpa); + step_set.clearAndFree(gpa); + } + w.handle_table.clearRetainingCapacity(); + + // Add missing marks and note persisted ones. + for (run.step_stack.keys()) |step| { + for (step.inputs.table.keys(), step.inputs.table.values()) |path, *files| { + { + const gop = try w.dir_table.getOrPut(gpa, path); + gop.value_ptr.* = w.generation; + if (!gop.found_existing) { + try std.posix.fanotify_mark(w.fan_fd, .{ + .ADD = true, + .ONLYDIR = true, + }, Watch.fan_mask, path.root_dir.handle.fd, path.subPathOpt()); + } + } + for (files.items) |basename| { + const file_handle = try Watch.getFileHandle(gpa, path, basename); + std.debug.print("watching file_handle '{}{s}' = {}\n", .{ + path, basename, std.fmt.fmtSliceHexLower(file_handle.slice()), + }); + const gop = try w.handle_table.getOrPut(gpa, file_handle); + if (!gop.found_existing) gop.value_ptr.* = .{}; + try gop.value_ptr.put(gpa, step, {}); + } + } + } + + { + // Remove marks for files that are no longer inputs. + var i: usize = 0; + while (i < w.dir_table.entries.len) { + const generations = w.dir_table.values(); + if (generations[i] == w.generation) { + i += 1; + continue; + } + + const path = w.dir_table.keys()[i]; + + try std.posix.fanotify_mark(w.fan_fd, .{ + .REMOVE = true, + .ONLYDIR = true, + }, Watch.fan_mask, path.root_dir.handle.fd, path.subPathOpt()); + + w.dir_table.swapRemoveAt(i); + } + w.generation +%= 1; + } + + // Wait until a file system notification arrives. Read all such events + // until the buffer is empty. Then wait for a debounce interval, resetting + // if any more events come in. After the debounce interval has passed, + // trigger a rebuild on all steps with modified inputs, as well as their + // recursive dependants. + const debounce_interval_ms = 10; + var poll_fds: [1]std.posix.pollfd = .{ + .{ + .fd = w.fan_fd, + .events = std.posix.POLL.IN, + .revents = undefined, + }, + }; + var caption_buf: [40]u8 = undefined; + const caption = std.fmt.bufPrint(&caption_buf, "Watching {d} Directories", .{ + w.dir_table.entries.len, + }) catch &caption_buf; + var debouncing_node = main_progress_node.start(caption, 0); + var debouncing = false; + while (true) { + const timeout: i32 = if (debouncing) debounce_interval_ms else -1; + const events_len = try std.posix.poll(&poll_fds, timeout); + if (events_len == 0) { + debouncing_node.end(); + continue :rebuild; + } + if (try markDirtySteps(&w)) { + if (!debouncing) { + debouncing = true; + debouncing_node.end(); + debouncing_node = main_progress_node.start("Debouncing (Change Detected)", 0); + } + } + } + } } +fn markDirtySteps(w: *Watch) !bool { + const fanotify = std.os.linux.fanotify; + const M = fanotify.event_metadata; + var events_buf: [256 + 4096]u8 = undefined; + var any_dirty = false; + while (true) { + var len = std.posix.read(w.fan_fd, &events_buf) catch |err| switch (err) { + error.WouldBlock => return any_dirty, + else => |e| return e, + }; + //std.debug.dump_hex(events_buf[0..len]); + var meta: [*]align(1) M = @ptrCast(&events_buf); + while (len >= @sizeOf(M) and meta[0].event_len >= @sizeOf(M) and meta[0].event_len <= len) : ({ + len -= meta[0].event_len; + meta = @ptrCast(@as([*]u8, @ptrCast(meta)) + meta[0].event_len); + }) { + assert(meta[0].vers == M.VERSION); + std.debug.print("meta = {any}\n", .{meta[0]}); + const fid: *align(1) fanotify.event_info_fid = @ptrCast(meta + 1); + switch (fid.hdr.info_type) { + .DFID_NAME => { + const file_handle: *align(1) std.os.linux.file_handle = @ptrCast(&fid.handle); + const file_name_z: [*:0]u8 = @ptrCast((&file_handle.f_handle).ptr + file_handle.handle_bytes); + const file_name = mem.span(file_name_z); + std.debug.print("DFID_NAME file_handle = {any}, found: '{s}'\n", .{ file_handle.*, file_name }); + const lfh: Watch.LinuxFileHandle = .{ .handle = file_handle }; + if (w.handle_table.get(lfh)) |step_set| { + for (step_set.keys()) |step| { + std.debug.print("DFID_NAME marking step '{s}' dirty\n", .{step.name}); + step.state = .precheck_done; + any_dirty = true; + } + } else { + std.debug.print("DFID_NAME changed file did not match any steps: '{}'\n", .{ + std.fmt.fmtSliceHexLower(lfh.slice()), + }); + } + }, + .FID => { + const file_handle: *align(1) std.os.linux.file_handle = @ptrCast(&fid.handle); + const lfh: Watch.LinuxFileHandle = .{ .handle = file_handle }; + if (w.handle_table.get(lfh)) |step_set| { + for (step_set.keys()) |step| { + std.debug.print("FID marking step '{s}' dirty\n", .{step.name}); + step.state = .precheck_done; + any_dirty = true; + } + } else { + std.debug.print("FID changed file did not match any steps: '{}'\n", .{ + std.fmt.fmtSliceHexLower(lfh.slice()), + }); + } + }, + .DFID => { + const file_handle: *align(1) std.os.linux.file_handle = @ptrCast(&fid.handle); + const lfh: Watch.LinuxFileHandle = .{ .handle = file_handle }; + if (w.handle_table.get(lfh)) |step_set| { + for (step_set.keys()) |step| { + std.debug.print("DFID marking step '{s}' dirty\n", .{step.name}); + step.state = .precheck_done; + any_dirty = true; + } + } else { + std.debug.print("DFID changed file did not match any steps\n", .{}); + } + }, + else => |t| { + std.debug.panic("TODO: received event type '{s}'", .{@tagName(t)}); + }, + } + } + } +} + +const Watch = struct { + dir_table: DirTable, + handle_table: HandleTable, + fan_fd: std.posix.fd_t, + generation: u8, + + const fan_mask: std.os.linux.fanotify.MarkMask = .{ + .CLOSE_WRITE = true, + .DELETE = true, + .MOVED_FROM = true, + .MOVED_TO = true, + .EVENT_ON_CHILD = true, + }; + + const init: Watch = .{ + .dir_table = .{}, + .handle_table = .{}, + .fan_fd = -1, + .generation = 0, + }; + + /// Key is the directory to watch which contains one or more files we are + /// interested in noticing changes to. + /// + /// Value is generation. + const DirTable = std.ArrayHashMapUnmanaged(Cache.Path, u8, Cache.Path.TableAdapter, false); + + const HandleTable = std.ArrayHashMapUnmanaged(LinuxFileHandle, StepSet, LinuxFileHandle.Adapter, false); + const StepSet = std.AutoArrayHashMapUnmanaged(*Step, void); + + const Hash = std.hash.Wyhash; + const Cache = std.Build.Cache; + + const LinuxFileHandle = struct { + handle: *align(1) std.os.linux.file_handle, + + fn clone(lfh: LinuxFileHandle, gpa: Allocator) Allocator.Error!LinuxFileHandle { + const bytes = lfh.slice(); + const new_ptr = try gpa.alignedAlloc( + u8, + @alignOf(std.os.linux.file_handle), + @sizeOf(std.os.linux.file_handle) + bytes.len, + ); + const new_header: *std.os.linux.file_handle = @ptrCast(new_ptr); + new_header.* = lfh.handle.*; + const new: LinuxFileHandle = .{ .handle = new_header }; + @memcpy(new.slice(), lfh.slice()); + return new; + } + + fn destroy(lfh: LinuxFileHandle, gpa: Allocator) void { + const ptr: [*]u8 = @ptrCast(lfh.handle); + const allocated_slice = ptr[0 .. @sizeOf(std.os.linux.file_handle) + lfh.handle.handle_bytes]; + return gpa.free(allocated_slice); + } + + fn slice(lfh: LinuxFileHandle) []u8 { + const ptr: [*]u8 = &lfh.handle.f_handle; + return ptr[0..lfh.handle.handle_bytes]; + } + + const Adapter = struct { + pub fn hash(self: Adapter, a: LinuxFileHandle) u32 { + _ = self; + const unsigned_type: u32 = @bitCast(a.handle.handle_type); + return @truncate(Hash.hash(unsigned_type, a.slice())); + } + pub fn eql(self: Adapter, a: LinuxFileHandle, b: LinuxFileHandle, b_index: usize) bool { + _ = self; + _ = b_index; + return a.handle.handle_type == b.handle.handle_type and mem.eql(u8, a.slice(), b.slice()); + } + }; + }; + + fn getFileHandle(gpa: Allocator, path: std.Build.Cache.Path, basename: []const u8) !LinuxFileHandle { + var file_handle_buffer: [@sizeOf(std.os.linux.file_handle) + 128]u8 align(@alignOf(std.os.linux.file_handle)) = undefined; + var mount_id: i32 = undefined; + var buf: [std.fs.max_path_bytes]u8 = undefined; + const joined_path = if (path.sub_path.len == 0) basename else path: { + break :path std.fmt.bufPrint(&buf, "{s}" ++ std.fs.path.sep_str ++ "{s}", .{ + path.sub_path, basename, + }) catch return error.NameTooLong; + }; + const stack_ptr: *std.os.linux.file_handle = @ptrCast(&file_handle_buffer); + stack_ptr.handle_bytes = file_handle_buffer.len - @sizeOf(std.os.linux.file_handle); + try std.posix.name_to_handle_at(path.root_dir.handle.fd, joined_path, stack_ptr, &mount_id, 0); + const stack_lfh: LinuxFileHandle = .{ .handle = stack_ptr }; + return stack_lfh.clone(gpa); + } +}; + const Run = struct { max_rss: u64, max_rss_is_default: bool, max_rss_mutex: std.Thread.Mutex, skip_oom_steps: bool, + watch: bool, memory_blocked_steps: std.ArrayList(*Step), + step_stack: std.AutoArrayHashMapUnmanaged(*Step, void), prominent_compile_errors: bool, + thread_pool: std.Thread.Pool, claimed_rss: usize, summary: Summary, @@ -390,18 +674,15 @@ const Run = struct { stderr: File, }; -fn runStepNames( - arena: std.mem.Allocator, +fn prepare( + gpa: Allocator, + arena: Allocator, b: *std.Build, step_names: []const []const u8, - parent_prog_node: std.Progress.Node, - thread_pool_options: std.Thread.Pool.Options, run: *Run, seed: u32, ) !void { - const gpa = b.allocator; - var step_stack: std.AutoArrayHashMapUnmanaged(*Step, void) = .{}; - defer step_stack.deinit(gpa); + const step_stack = &run.step_stack; if (step_names.len == 0) { try step_stack.put(gpa, b.default_step, {}); @@ -424,7 +705,7 @@ fn runStepNames( rand.shuffle(*Step, starting_steps); for (starting_steps) |s| { - constructGraphAndCheckForDependencyLoop(b, s, &step_stack, rand) catch |err| switch (err) { + constructGraphAndCheckForDependencyLoop(b, s, &run.step_stack, rand) catch |err| switch (err) { error.DependencyLoopDetected => return uncleanExit(), else => |e| return e, }; @@ -453,14 +734,19 @@ fn runStepNames( return uncleanExit(); } } +} - var thread_pool: std.Thread.Pool = undefined; - try thread_pool.init(thread_pool_options); - defer thread_pool.deinit(); +fn runStepNames( + gpa: Allocator, + b: *std.Build, + step_names: []const []const u8, + parent_prog_node: std.Progress.Node, + run: *Run, +) !void { + const step_stack = &run.step_stack; + const thread_pool = &run.thread_pool; { - defer parent_prog_node.end(); - const step_prog = parent_prog_node.start("steps", step_stack.count()); defer step_prog.end(); @@ -476,7 +762,7 @@ fn runStepNames( if (step.state == .skipped_oom) continue; thread_pool.spawnWg(&wait_group, workerMakeOneStep, .{ - &wait_group, &thread_pool, b, step, step_prog, run, + &wait_group, b, step, step_prog, run, }); } } @@ -493,8 +779,6 @@ fn runStepNames( var failure_count: usize = 0; var pending_count: usize = 0; var total_compile_errors: usize = 0; - var compile_error_steps: std.ArrayListUnmanaged(*Step) = .{}; - defer compile_error_steps.deinit(gpa); for (step_stack.keys()) |s| { test_fail_count += s.test_results.fail_count; @@ -524,7 +808,6 @@ fn runStepNames( const compile_errors_len = s.result_error_bundle.errorMessageCount(); if (compile_errors_len > 0) { total_compile_errors += compile_errors_len; - try compile_error_steps.append(gpa, s); } }, } @@ -537,8 +820,8 @@ fn runStepNames( else => false, }; if (failure_count == 0 and failures_only) { - if (b.graph.watch != null) return; - return cleanExit(); + if (!run.watch) cleanExit(); + return; } const ttyconf = run.ttyconf; @@ -561,10 +844,13 @@ fn runStepNames( stderr.writeAll("\n") catch {}; // Print a fancy tree with build results. + var step_stack_copy = try step_stack.clone(gpa); + defer step_stack_copy.deinit(gpa); + var print_node: PrintNode = .{ .parent = null }; if (step_names.len == 0) { print_node.last = true; - printTreeStep(b, b.default_step, run, stderr, ttyconf, &print_node, &step_stack) catch {}; + printTreeStep(b, b.default_step, run, stderr, ttyconf, &print_node, &step_stack_copy) catch {}; } else { const last_index = if (run.summary == .all) b.top_level_steps.count() else blk: { var i: usize = step_names.len; @@ -583,44 +869,34 @@ fn runStepNames( for (step_names, 0..) |step_name, i| { const tls = b.top_level_steps.get(step_name).?; print_node.last = i + 1 == last_index; - printTreeStep(b, &tls.step, run, stderr, ttyconf, &print_node, &step_stack) catch {}; + printTreeStep(b, &tls.step, run, stderr, ttyconf, &print_node, &step_stack_copy) catch {}; } } } if (failure_count == 0) { - if (b.graph.watch != null) return; - return cleanExit(); + if (!run.watch) cleanExit(); + return; } // Finally, render compile errors at the bottom of the terminal. - // We use a separate compile_error_steps array list because step_stack is destructively - // mutated in printTreeStep above. if (run.prominent_compile_errors and total_compile_errors > 0) { - for (compile_error_steps.items) |s| { + for (step_stack.keys()) |s| { if (s.result_error_bundle.errorMessageCount() > 0) { s.result_error_bundle.renderToStdErr(renderOptions(ttyconf)); } } - if (b.graph.watch != null) return uncleanExit(); - - // Signal to parent process that we have printed compile errors. The - // parent process may choose to omit the "following command failed" - // line in this case. - process.exit(2); + if (!run.watch) { + // Signal to parent process that we have printed compile errors. The + // parent process may choose to omit the "following command failed" + // line in this case. + std.debug.lockStdErr(); + process.exit(2); + } } - return uncleanExit(); -} - -fn uncleanExit() error{UncleanExit}!void { - if (builtin.mode == .Debug) { - return error.UncleanExit; - } else { - std.debug.lockStdErr(); - process.exit(1); - } + if (!run.watch) return uncleanExit(); } const PrintNode = struct { @@ -912,12 +1188,13 @@ fn constructGraphAndCheckForDependencyLoop( fn workerMakeOneStep( wg: *std.Thread.WaitGroup, - thread_pool: *std.Thread.Pool, b: *std.Build, s: *Step, prog_node: std.Progress.Node, run: *Run, ) void { + const thread_pool = &run.thread_pool; + // First, check the conditions for running this step. If they are not met, // then we return without doing the step, relying on another worker to // queue this step up again when dependencies are met. @@ -997,7 +1274,7 @@ fn workerMakeOneStep( // Successful completion of a step, so we queue up its dependants as well. for (s.dependants.items) |dep| { thread_pool.spawnWg(wg, workerMakeOneStep, .{ - wg, thread_pool, b, dep, prog_node, run, + wg, b, dep, prog_node, run, }); } } @@ -1022,7 +1299,7 @@ fn workerMakeOneStep( remaining -= dep.max_rss; thread_pool.spawnWg(wg, workerMakeOneStep, .{ - wg, thread_pool, b, dep, prog_node, run, + wg, b, dep, prog_node, run, }); } else { run.memory_blocked_steps.items[i] = dep; @@ -1242,13 +1519,22 @@ fn argsRest(args: [][:0]const u8, idx: usize) ?[][:0]const u8 { return args[idx..]; } +/// Perhaps in the future there could be an Advanced Options flag such as +/// --debug-build-runner-leaks which would make this function return instead of +/// calling exit. fn cleanExit() void { - // Perhaps in the future there could be an Advanced Options flag such as - // --debug-build-runner-leaks which would make this function return instead - // of calling exit. + std.debug.lockStdErr(); process.exit(0); } +/// Perhaps in the future there could be an Advanced Options flag such as +/// --debug-build-runner-leaks which would make this function return instead of +/// calling exit. +fn uncleanExit() error{UncleanExit} { + std.debug.lockStdErr(); + process.exit(1); +} + const Color = std.zig.Color; const Summary = enum { all, new, failures, none }; diff --git a/lib/std/Build.zig b/lib/std/Build.zig index bfab90971c78..e46f3ea0bac4 100644 --- a/lib/std/Build.zig +++ b/lib/std/Build.zig @@ -119,61 +119,6 @@ pub const Graph = struct { needed_lazy_dependencies: std.StringArrayHashMapUnmanaged(void) = .{}, /// Information about the native target. Computed before build() is invoked. host: ResolvedTarget, - /// When `--watch` is provided, collects the set of files that should be - /// watched and the state to required to poll the system for changes. - watch: ?*Watch, -}; - -pub const Watch = struct { - table: Table, - - pub const init: Watch = .{ - .table = .{}, - }; - - /// Key is the directory to watch which contains one or more files we are - /// interested in noticing changes to. - pub const Table = std.ArrayHashMapUnmanaged(Cache.Path, ReactionSet, TableContext, false); - - const Hash = std.hash.Wyhash; - - pub const TableContext = struct { - pub fn hash(self: TableContext, a: Cache.Path) u32 { - _ = self; - const seed: u32 = @bitCast(a.root_dir.handle.fd); - return @truncate(Hash.hash(seed, a.sub_path)); - } - pub fn eql(self: TableContext, a: Cache.Path, b: Cache.Path, b_index: usize) bool { - _ = self; - _ = b_index; - return a.eql(b); - } - }; - - pub const ReactionSet = std.ArrayHashMapUnmanaged(Match, void, Match.Context, false); - - pub const Match = struct { - /// Relative to the watched directory, the file path that triggers this - /// match. - basename: []const u8, - /// The step to re-run when file corresponding to `basename` is changed. - step: *Step, - - pub const Context = struct { - pub fn hash(self: Context, a: Match) u32 { - _ = self; - var hasher = Hash.init(0); - std.hash.autoHash(&hasher, a.step); - hasher.update(a.basename); - return @truncate(hasher.final()); - } - pub fn eql(self: Context, a: Match, b: Match, b_index: usize) bool { - _ = self; - _ = b_index; - return a.step == b.step and mem.eql(u8, a.basename, b.basename); - } - }; - }; }; const AvailableDeps = []const struct { []const u8, []const u8 }; diff --git a/lib/std/Build/Step.zig b/lib/std/Build/Step.zig index 91fe29866696..fcab831f676d 100644 --- a/lib/std/Build/Step.zig +++ b/lib/std/Build/Step.zig @@ -7,6 +7,16 @@ dependencies: std.ArrayList(*Step), /// This field is empty during execution of the user's build script, and /// then populated during dependency loop checking in the build runner. dependants: std.ArrayListUnmanaged(*Step), +/// Collects the set of files that retrigger this step to run. +/// +/// This is used by the build system's implementation of `--watch` but it can +/// also be potentially useful for IDEs to know what effects editing a +/// particular file has. +/// +/// Populated within `make`. Implementation may choose to clear and repopulate, +/// retain previous value, or update. +inputs: Inputs, + state: State, /// Set this field to declare an upper bound on the amount of bytes of memory it will /// take to run the step. Zero means no limit. @@ -63,6 +73,11 @@ pub const MakeFn = *const fn (step: *Step, prog_node: std.Progress.Node) anyerro pub const State = enum { precheck_unstarted, precheck_started, + /// This is also used to indicate "dirty" steps that have been modified + /// after a previous build completed, in which case, the step may or may + /// not have been completed before. Either way, one or more of its direct + /// file system inputs have been modified, meaning that the step needs to + /// be re-evaluated. precheck_done, running, dependency_failure, @@ -134,6 +149,26 @@ pub const Run = @import("Step/Run.zig"); pub const TranslateC = @import("Step/TranslateC.zig"); pub const WriteFile = @import("Step/WriteFile.zig"); +pub const Inputs = struct { + table: Table, + + pub const init: Inputs = .{ + .table = .{}, + }; + + pub const Table = std.ArrayHashMapUnmanaged(Build.Cache.Path, Files, Build.Cache.Path.TableAdapter, false); + pub const Files = std.ArrayListUnmanaged([]const u8); + + pub fn populated(inputs: *Inputs) bool { + return inputs.table.count() != 0; + } + + pub fn clear(inputs: *Inputs, gpa: Allocator) void { + for (inputs.table.values()) |*files| files.deinit(gpa); + inputs.table.clearRetainingCapacity(); + } +}; + pub const StepOptions = struct { id: Id, name: []const u8, @@ -153,6 +188,7 @@ pub fn init(options: StepOptions) Step { .makeFn = options.makeFn, .dependencies = std.ArrayList(*Step).init(arena), .dependants = .{}, + .inputs = Inputs.init, .state = .precheck_unstarted, .max_rss = options.max_rss, .debug_stack_trace = blk: { @@ -542,19 +578,19 @@ pub fn allocPrintCmd2( return buf.toOwnedSlice(arena); } -pub fn cacheHit(s: *Step, man: *std.Build.Cache.Manifest) !bool { +pub fn cacheHit(s: *Step, man: *Build.Cache.Manifest) !bool { s.result_cached = man.hit() catch |err| return failWithCacheError(s, man, err); return s.result_cached; } -fn failWithCacheError(s: *Step, man: *const std.Build.Cache.Manifest, err: anyerror) anyerror { +fn failWithCacheError(s: *Step, man: *const Build.Cache.Manifest, err: anyerror) anyerror { const i = man.failed_file_index orelse return err; const pp = man.files.keys()[i].prefixed_path; const prefix = man.cache.prefixes()[pp.prefix].path orelse ""; return s.fail("{s}: {s}/{s}", .{ @errorName(err), prefix, pp.sub_path }); } -pub fn writeManifest(s: *Step, man: *std.Build.Cache.Manifest) !void { +pub fn writeManifest(s: *Step, man: *Build.Cache.Manifest) !void { if (s.test_results.isSuccess()) { man.writeManifest() catch |err| { try s.addError("unable to write cache manifest: {s}", .{@errorName(err)}); @@ -568,44 +604,37 @@ fn oom(err: anytype) noreturn { } } -pub fn addWatchInput(step: *Step, lazy_path: std.Build.LazyPath) void { +pub fn addWatchInput(step: *Step, lazy_path: Build.LazyPath) void { errdefer |err| oom(err); - const w = step.owner.graph.watch orelse return; switch (lazy_path) { - .src_path => |src_path| try addWatchInputFromBuilder(step, w, src_path.owner, src_path.sub_path), - .dependency => |d| try addWatchInputFromBuilder(step, w, d.dependency.builder, d.sub_path), + .src_path => |src_path| try addWatchInputFromBuilder(step, src_path.owner, src_path.sub_path), + .dependency => |d| try addWatchInputFromBuilder(step, d.dependency.builder, d.sub_path), .cwd_relative => |path_string| { - try addWatchInputFromPath(w, .{ + try addWatchInputFromPath(step, .{ .root_dir = .{ .path = null, .handle = std.fs.cwd(), }, .sub_path = std.fs.path.dirname(path_string) orelse "", - }, .{ - .step = step, - .basename = std.fs.path.basename(path_string), - }); + }, std.fs.path.basename(path_string)); }, // Nothing to watch because this dependency edge is modeled instead via `dependants`. .generated => {}, } } -fn addWatchInputFromBuilder(step: *Step, w: *std.Build.Watch, builder: *std.Build, sub_path: []const u8) !void { - return addWatchInputFromPath(w, .{ +fn addWatchInputFromBuilder(step: *Step, builder: *Build, sub_path: []const u8) !void { + return addWatchInputFromPath(step, .{ .root_dir = builder.build_root, .sub_path = std.fs.path.dirname(sub_path) orelse "", - }, .{ - .step = step, - .basename = std.fs.path.basename(sub_path), - }); + }, std.fs.path.basename(sub_path)); } -fn addWatchInputFromPath(w: *std.Build.Watch, path: std.Build.Cache.Path, match: std.Build.Watch.Match) !void { - const gpa = match.step.owner.allocator; - const gop = try w.table.getOrPut(gpa, path); +fn addWatchInputFromPath(step: *Step, path: Build.Cache.Path, basename: []const u8) !void { + const gpa = step.owner.allocator; + const gop = try step.inputs.table.getOrPut(gpa, path); if (!gop.found_existing) gop.value_ptr.* = .{}; - try gop.value_ptr.put(gpa, match, {}); + try gop.value_ptr.append(gpa, basename); } test { diff --git a/lib/std/Build/Step/InstallFile.zig b/lib/std/Build/Step/InstallFile.zig index dd6aa2d1c4a6..f68e6e8aa023 100644 --- a/lib/std/Build/Step/InstallFile.zig +++ b/lib/std/Build/Step/InstallFile.zig @@ -39,7 +39,10 @@ fn make(step: *Step, prog_node: std.Progress.Node) !void { _ = prog_node; const b = step.owner; const install_file: *InstallFile = @fieldParentPtr("step", step); - step.addWatchInput(install_file.source); + + // Inputs never change when re-running `make`. + if (!step.inputs.populated()) step.addWatchInput(install_file.source); + const full_src_path = install_file.source.getPath2(b, step); const full_dest_path = b.getInstallPath(install_file.dir, install_file.dest_rel_path); const cwd = std.fs.cwd(); From c5a4177140f417b80c3d2e86f247ee5af769a39a Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Tue, 9 Jul 2024 18:17:01 -0700 Subject: [PATCH 113/152] std.os.linux: add AT.HANDLE_FID --- lib/std/os/linux.zig | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/std/os/linux.zig b/lib/std/os/linux.zig index 91a050ee22b5..56930c6b290f 100644 --- a/lib/std/os/linux.zig +++ b/lib/std/os/linux.zig @@ -2946,6 +2946,8 @@ pub const AT = struct { /// Apply to the entire subtree pub const RECURSIVE = 0x8000; + + pub const HANDLE_FID = REMOVEDIR; }; pub const FALLOC = struct { From 5ee3971b1828468c89104cb01e19edc87edf35a6 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Tue, 9 Jul 2024 18:17:27 -0700 Subject: [PATCH 114/152] proof-of-concept --watch implementation based on fanotify So far, only implemented for InstallFile steps. Default debounce interval bumped to 50ms. I think it should be configurable. Next I have an idea to simplify the fanotify implementation, but other OS implementations might want to refer back to this commit before I make those changes. --- lib/compiler/build_runner.zig | 220 +++++++++++----------------------- lib/std/Build.zig | 1 + lib/std/Build/Watch.zig | 135 +++++++++++++++++++++ 3 files changed, 208 insertions(+), 148 deletions(-) create mode 100644 lib/std/Build/Watch.zig diff --git a/lib/compiler/build_runner.zig b/lib/compiler/build_runner.zig index d1c6af6189a2..c9fd00e0e098 100644 --- a/lib/compiler/build_runner.zig +++ b/lib/compiler/build_runner.zig @@ -8,6 +8,7 @@ const process = std.process; const ArrayList = std.ArrayList; const File = std.fs.File; const Step = std.Build.Step; +const Watch = std.Build.Watch; const Allocator = std.mem.Allocator; pub const root = @import("@build"); @@ -400,34 +401,26 @@ pub fn main() !void { }; if (!watch) return cleanExit(); - // Clear all file handles. - for (w.handle_table.keys(), w.handle_table.values()) |lfh, *step_set| { - lfh.destroy(gpa); - step_set.clearAndFree(gpa); - } - w.handle_table.clearRetainingCapacity(); - // Add missing marks and note persisted ones. for (run.step_stack.keys()) |step| { for (step.inputs.table.keys(), step.inputs.table.values()) |path, *files| { - { + const reaction_set = rs: { const gop = try w.dir_table.getOrPut(gpa, path); - gop.value_ptr.* = w.generation; if (!gop.found_existing) { try std.posix.fanotify_mark(w.fan_fd, .{ .ADD = true, .ONLYDIR = true, }, Watch.fan_mask, path.root_dir.handle.fd, path.subPathOpt()); + + const dir_handle = try Watch.getDirHandle(gpa, path); + try w.handle_table.putNoClobber(gpa, dir_handle, .{}); } - } + break :rs &w.handle_table.values()[gop.index]; + }; for (files.items) |basename| { - const file_handle = try Watch.getFileHandle(gpa, path, basename); - std.debug.print("watching file_handle '{}{s}' = {}\n", .{ - path, basename, std.fmt.fmtSliceHexLower(file_handle.slice()), - }); - const gop = try w.handle_table.getOrPut(gpa, file_handle); + const gop = try reaction_set.getOrPut(gpa, basename); if (!gop.found_existing) gop.value_ptr.* = .{}; - try gop.value_ptr.put(gpa, step, {}); + try gop.value_ptr.put(gpa, step, w.generation); } } } @@ -435,11 +428,31 @@ pub fn main() !void { { // Remove marks for files that are no longer inputs. var i: usize = 0; - while (i < w.dir_table.entries.len) { - const generations = w.dir_table.values(); - if (generations[i] == w.generation) { - i += 1; - continue; + while (i < w.handle_table.entries.len) { + { + const reaction_set = &w.handle_table.values()[i]; + var step_set_i: usize = 0; + while (step_set_i < reaction_set.entries.len) { + const step_set = &reaction_set.values()[step_set_i]; + var dirent_i: usize = 0; + while (dirent_i < step_set.entries.len) { + const generations = step_set.values(); + if (generations[dirent_i] == w.generation) { + dirent_i += 1; + continue; + } + step_set.swapRemoveAt(dirent_i); + } + if (step_set.entries.len > 0) { + step_set_i += 1; + continue; + } + reaction_set.swapRemoveAt(step_set_i); + } + if (reaction_set.entries.len > 0) { + i += 1; + continue; + } } const path = w.dir_table.keys()[i]; @@ -450,6 +463,7 @@ pub fn main() !void { }, Watch.fan_mask, path.root_dir.handle.fd, path.subPathOpt()); w.dir_table.swapRemoveAt(i); + w.handle_table.swapRemoveAt(i); } w.generation +%= 1; } @@ -459,7 +473,7 @@ pub fn main() !void { // if any more events come in. After the debounce interval has passed, // trigger a rebuild on all steps with modified inputs, as well as their // recursive dependants. - const debounce_interval_ms = 10; + const debounce_interval_ms = 50; var poll_fds: [1]std.posix.pollfd = .{ .{ .fd = w.fan_fd, @@ -517,46 +531,48 @@ fn markDirtySteps(w: *Watch) !bool { const file_name = mem.span(file_name_z); std.debug.print("DFID_NAME file_handle = {any}, found: '{s}'\n", .{ file_handle.*, file_name }); const lfh: Watch.LinuxFileHandle = .{ .handle = file_handle }; - if (w.handle_table.get(lfh)) |step_set| { - for (step_set.keys()) |step| { - std.debug.print("DFID_NAME marking step '{s}' dirty\n", .{step.name}); - step.state = .precheck_done; - any_dirty = true; + if (w.handle_table.getPtr(lfh)) |reaction_set| { + if (reaction_set.getPtr(file_name)) |step_set| { + for (step_set.keys()) |step| { + std.debug.print("DFID_NAME marking step '{s}' dirty\n", .{step.name}); + step.state = .precheck_done; + any_dirty = true; + } } } else { - std.debug.print("DFID_NAME changed file did not match any steps: '{}'\n", .{ + std.debug.print("DFID_NAME changed file did not match any directories: '{}'\n", .{ std.fmt.fmtSliceHexLower(lfh.slice()), }); } }, - .FID => { - const file_handle: *align(1) std.os.linux.file_handle = @ptrCast(&fid.handle); - const lfh: Watch.LinuxFileHandle = .{ .handle = file_handle }; - if (w.handle_table.get(lfh)) |step_set| { - for (step_set.keys()) |step| { - std.debug.print("FID marking step '{s}' dirty\n", .{step.name}); - step.state = .precheck_done; - any_dirty = true; - } - } else { - std.debug.print("FID changed file did not match any steps: '{}'\n", .{ - std.fmt.fmtSliceHexLower(lfh.slice()), - }); - } - }, - .DFID => { - const file_handle: *align(1) std.os.linux.file_handle = @ptrCast(&fid.handle); - const lfh: Watch.LinuxFileHandle = .{ .handle = file_handle }; - if (w.handle_table.get(lfh)) |step_set| { - for (step_set.keys()) |step| { - std.debug.print("DFID marking step '{s}' dirty\n", .{step.name}); - step.state = .precheck_done; - any_dirty = true; - } - } else { - std.debug.print("DFID changed file did not match any steps\n", .{}); - } - }, + //.FID => { + // const file_handle: *align(1) std.os.linux.file_handle = @ptrCast(&fid.handle); + // const lfh: Watch.LinuxFileHandle = .{ .handle = file_handle }; + // if (w.handle_table.get(lfh)) |step_set| { + // for (step_set.keys()) |step| { + // std.debug.print("FID marking step '{s}' dirty\n", .{step.name}); + // step.state = .precheck_done; + // any_dirty = true; + // } + // } else { + // std.debug.print("FID changed file did not match any steps: '{}'\n", .{ + // std.fmt.fmtSliceHexLower(lfh.slice()), + // }); + // } + //}, + //.DFID => { + // const file_handle: *align(1) std.os.linux.file_handle = @ptrCast(&fid.handle); + // const lfh: Watch.LinuxFileHandle = .{ .handle = file_handle }; + // if (w.handle_table.get(lfh)) |step_set| { + // for (step_set.keys()) |step| { + // std.debug.print("DFID marking step '{s}' dirty\n", .{step.name}); + // step.state = .precheck_done; + // any_dirty = true; + // } + // } else { + // std.debug.print("DFID changed file did not match any steps\n", .{}); + // } + //}, else => |t| { std.debug.panic("TODO: received event type '{s}'", .{@tagName(t)}); }, @@ -565,98 +581,6 @@ fn markDirtySteps(w: *Watch) !bool { } } -const Watch = struct { - dir_table: DirTable, - handle_table: HandleTable, - fan_fd: std.posix.fd_t, - generation: u8, - - const fan_mask: std.os.linux.fanotify.MarkMask = .{ - .CLOSE_WRITE = true, - .DELETE = true, - .MOVED_FROM = true, - .MOVED_TO = true, - .EVENT_ON_CHILD = true, - }; - - const init: Watch = .{ - .dir_table = .{}, - .handle_table = .{}, - .fan_fd = -1, - .generation = 0, - }; - - /// Key is the directory to watch which contains one or more files we are - /// interested in noticing changes to. - /// - /// Value is generation. - const DirTable = std.ArrayHashMapUnmanaged(Cache.Path, u8, Cache.Path.TableAdapter, false); - - const HandleTable = std.ArrayHashMapUnmanaged(LinuxFileHandle, StepSet, LinuxFileHandle.Adapter, false); - const StepSet = std.AutoArrayHashMapUnmanaged(*Step, void); - - const Hash = std.hash.Wyhash; - const Cache = std.Build.Cache; - - const LinuxFileHandle = struct { - handle: *align(1) std.os.linux.file_handle, - - fn clone(lfh: LinuxFileHandle, gpa: Allocator) Allocator.Error!LinuxFileHandle { - const bytes = lfh.slice(); - const new_ptr = try gpa.alignedAlloc( - u8, - @alignOf(std.os.linux.file_handle), - @sizeOf(std.os.linux.file_handle) + bytes.len, - ); - const new_header: *std.os.linux.file_handle = @ptrCast(new_ptr); - new_header.* = lfh.handle.*; - const new: LinuxFileHandle = .{ .handle = new_header }; - @memcpy(new.slice(), lfh.slice()); - return new; - } - - fn destroy(lfh: LinuxFileHandle, gpa: Allocator) void { - const ptr: [*]u8 = @ptrCast(lfh.handle); - const allocated_slice = ptr[0 .. @sizeOf(std.os.linux.file_handle) + lfh.handle.handle_bytes]; - return gpa.free(allocated_slice); - } - - fn slice(lfh: LinuxFileHandle) []u8 { - const ptr: [*]u8 = &lfh.handle.f_handle; - return ptr[0..lfh.handle.handle_bytes]; - } - - const Adapter = struct { - pub fn hash(self: Adapter, a: LinuxFileHandle) u32 { - _ = self; - const unsigned_type: u32 = @bitCast(a.handle.handle_type); - return @truncate(Hash.hash(unsigned_type, a.slice())); - } - pub fn eql(self: Adapter, a: LinuxFileHandle, b: LinuxFileHandle, b_index: usize) bool { - _ = self; - _ = b_index; - return a.handle.handle_type == b.handle.handle_type and mem.eql(u8, a.slice(), b.slice()); - } - }; - }; - - fn getFileHandle(gpa: Allocator, path: std.Build.Cache.Path, basename: []const u8) !LinuxFileHandle { - var file_handle_buffer: [@sizeOf(std.os.linux.file_handle) + 128]u8 align(@alignOf(std.os.linux.file_handle)) = undefined; - var mount_id: i32 = undefined; - var buf: [std.fs.max_path_bytes]u8 = undefined; - const joined_path = if (path.sub_path.len == 0) basename else path: { - break :path std.fmt.bufPrint(&buf, "{s}" ++ std.fs.path.sep_str ++ "{s}", .{ - path.sub_path, basename, - }) catch return error.NameTooLong; - }; - const stack_ptr: *std.os.linux.file_handle = @ptrCast(&file_handle_buffer); - stack_ptr.handle_bytes = file_handle_buffer.len - @sizeOf(std.os.linux.file_handle); - try std.posix.name_to_handle_at(path.root_dir.handle.fd, joined_path, stack_ptr, &mount_id, 0); - const stack_lfh: LinuxFileHandle = .{ .handle = stack_ptr }; - return stack_lfh.clone(gpa); - } -}; - const Run = struct { max_rss: u64, max_rss_is_default: bool, diff --git a/lib/std/Build.zig b/lib/std/Build.zig index e46f3ea0bac4..30aa5b72db1e 100644 --- a/lib/std/Build.zig +++ b/lib/std/Build.zig @@ -20,6 +20,7 @@ const Build = @This(); pub const Cache = @import("Build/Cache.zig"); pub const Step = @import("Build/Step.zig"); pub const Module = @import("Build/Module.zig"); +pub const Watch = @import("Build/Watch.zig"); /// Shared state among all Build instances. graph: *Graph, diff --git a/lib/std/Build/Watch.zig b/lib/std/Build/Watch.zig new file mode 100644 index 000000000000..d85b0f0a9223 --- /dev/null +++ b/lib/std/Build/Watch.zig @@ -0,0 +1,135 @@ +const std = @import("../std.zig"); +const Watch = @This(); +const Step = std.Build.Step; +const Allocator = std.mem.Allocator; + +dir_table: DirTable, +/// Keyed differently but indexes correspond 1:1 with `dir_table`. +handle_table: HandleTable, +fan_fd: std.posix.fd_t, +generation: Generation, + +pub const fan_mask: std.os.linux.fanotify.MarkMask = .{ + .CLOSE_WRITE = true, + .DELETE = true, + .MOVED_FROM = true, + .MOVED_TO = true, + .EVENT_ON_CHILD = true, +}; + +pub const init: Watch = .{ + .dir_table = .{}, + .handle_table = .{}, + .fan_fd = -1, + .generation = 0, +}; + +/// Key is the directory to watch which contains one or more files we are +/// interested in noticing changes to. +/// +/// Value is generation. +const DirTable = std.ArrayHashMapUnmanaged(Cache.Path, void, Cache.Path.TableAdapter, false); + +const HandleTable = std.ArrayHashMapUnmanaged(LinuxFileHandle, ReactionSet, LinuxFileHandle.Adapter, false); +const ReactionSet = std.StringArrayHashMapUnmanaged(StepSet); +const StepSet = std.AutoArrayHashMapUnmanaged(*Step, Generation); + +const Generation = u8; + +const Hash = std.hash.Wyhash; +const Cache = std.Build.Cache; + +pub const Match = struct { + /// Relative to the watched directory, the file path that triggers this + /// match. + basename: []const u8, + /// The step to re-run when file corresponding to `basename` is changed. + step: *Step, + + pub const Context = struct { + pub fn hash(self: Context, a: Match) u32 { + _ = self; + var hasher = Hash.init(0); + std.hash.autoHash(&hasher, a.step); + hasher.update(a.basename); + return @truncate(hasher.final()); + } + pub fn eql(self: Context, a: Match, b: Match, b_index: usize) bool { + _ = self; + _ = b_index; + return a.step == b.step and std.mem.eql(u8, a.basename, b.basename); + } + }; +}; + +pub const LinuxFileHandle = struct { + handle: *align(1) std.os.linux.file_handle, + + pub fn clone(lfh: LinuxFileHandle, gpa: Allocator) Allocator.Error!LinuxFileHandle { + const bytes = lfh.slice(); + const new_ptr = try gpa.alignedAlloc( + u8, + @alignOf(std.os.linux.file_handle), + @sizeOf(std.os.linux.file_handle) + bytes.len, + ); + const new_header: *std.os.linux.file_handle = @ptrCast(new_ptr); + new_header.* = lfh.handle.*; + const new: LinuxFileHandle = .{ .handle = new_header }; + @memcpy(new.slice(), lfh.slice()); + return new; + } + + pub fn destroy(lfh: LinuxFileHandle, gpa: Allocator) void { + const ptr: [*]u8 = @ptrCast(lfh.handle); + const allocated_slice = ptr[0 .. @sizeOf(std.os.linux.file_handle) + lfh.handle.handle_bytes]; + return gpa.free(allocated_slice); + } + + pub fn slice(lfh: LinuxFileHandle) []u8 { + const ptr: [*]u8 = &lfh.handle.f_handle; + return ptr[0..lfh.handle.handle_bytes]; + } + + pub const Adapter = struct { + pub fn hash(self: Adapter, a: LinuxFileHandle) u32 { + _ = self; + const unsigned_type: u32 = @bitCast(a.handle.handle_type); + return @truncate(Hash.hash(unsigned_type, a.slice())); + } + pub fn eql(self: Adapter, a: LinuxFileHandle, b: LinuxFileHandle, b_index: usize) bool { + _ = self; + _ = b_index; + return a.handle.handle_type == b.handle.handle_type and std.mem.eql(u8, a.slice(), b.slice()); + } + }; +}; + +pub fn getFileHandle(gpa: Allocator, path: std.Build.Cache.Path, basename: []const u8) !LinuxFileHandle { + var file_handle_buffer: [@sizeOf(std.os.linux.file_handle) + 128]u8 align(@alignOf(std.os.linux.file_handle)) = undefined; + var mount_id: i32 = undefined; + var buf: [std.fs.max_path_bytes]u8 = undefined; + const joined_path = if (path.sub_path.len == 0) basename else path: { + break :path std.fmt.bufPrint(&buf, "{s}/{s}", .{ + path.sub_path, basename, + }) catch return error.NameTooLong; + }; + const stack_ptr: *std.os.linux.file_handle = @ptrCast(&file_handle_buffer); + stack_ptr.handle_bytes = file_handle_buffer.len - @sizeOf(std.os.linux.file_handle); + try std.posix.name_to_handle_at(path.root_dir.handle.fd, joined_path, stack_ptr, &mount_id, 0); + const stack_lfh: LinuxFileHandle = .{ .handle = stack_ptr }; + return stack_lfh.clone(gpa); +} + +pub fn getDirHandle(gpa: Allocator, path: std.Build.Cache.Path) !LinuxFileHandle { + var file_handle_buffer: [@sizeOf(std.os.linux.file_handle) + 128]u8 align(@alignOf(std.os.linux.file_handle)) = undefined; + var mount_id: i32 = undefined; + var buf: [std.fs.max_path_bytes]u8 = undefined; + const adjusted_path = if (path.sub_path.len == 0) "./" else std.fmt.bufPrint(&buf, "{s}/", .{ + path.sub_path, + }) catch return error.NameTooLong; + const stack_ptr: *std.os.linux.file_handle = @ptrCast(&file_handle_buffer); + stack_ptr.handle_bytes = file_handle_buffer.len - @sizeOf(std.os.linux.file_handle); + try std.posix.name_to_handle_at(path.root_dir.handle.fd, adjusted_path, stack_ptr, &mount_id, std.os.linux.AT.HANDLE_FID); + const stack_lfh: LinuxFileHandle = .{ .handle = stack_ptr }; + return stack_lfh.clone(gpa); +} From 6f89824c22b57e14951870c8f589df33e180f552 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Tue, 9 Jul 2024 18:37:20 -0700 Subject: [PATCH 115/152] build system: make debounce interval CLI-configurable --- lib/compiler/build_runner.zig | 53 ++++++++--------------------------- lib/std/Build/Watch.zig | 16 ----------- 2 files changed, 12 insertions(+), 57 deletions(-) diff --git a/lib/compiler/build_runner.zig b/lib/compiler/build_runner.zig index c9fd00e0e098..cf883a581157 100644 --- a/lib/compiler/build_runner.zig +++ b/lib/compiler/build_runner.zig @@ -107,6 +107,7 @@ pub fn main() !void { var steps_menu = false; var output_tmp_nonce: ?[16]u8 = null; var watch = false; + var debounce_interval_ms: u16 = 50; while (nextArg(args, &arg_idx)) |arg| { if (mem.startsWith(u8, arg, "-Z")) { @@ -194,7 +195,15 @@ pub fn main() !void { const next_arg = nextArg(args, &arg_idx) orelse fatalWithHint("expected u32 after '{s}'", .{arg}); seed = std.fmt.parseUnsigned(u32, next_arg, 0) catch |err| { - fatal("unable to parse seed '{s}' as 32-bit integer: {s}\n", .{ + fatal("unable to parse seed '{s}' as unsigned 32-bit integer: {s}\n", .{ + next_arg, @errorName(err), + }); + }; + } else if (mem.eql(u8, arg, "--debounce")) { + const next_arg = nextArg(args, &arg_idx) orelse + fatalWithHint("expected u16 after '{s}'", .{arg}); + debounce_interval_ms = std.fmt.parseUnsigned(u16, next_arg, 0) catch |err| { + fatal("unable to parse debounce interval '{s}' as unsigned 16-bit integer: {s}\n", .{ next_arg, @errorName(err), }); }; @@ -473,7 +482,6 @@ pub fn main() !void { // if any more events come in. After the debounce interval has passed, // trigger a rebuild on all steps with modified inputs, as well as their // recursive dependants. - const debounce_interval_ms = 50; var poll_fds: [1]std.posix.pollfd = .{ .{ .fd = w.fan_fd, @@ -515,67 +523,29 @@ fn markDirtySteps(w: *Watch) !bool { error.WouldBlock => return any_dirty, else => |e| return e, }; - //std.debug.dump_hex(events_buf[0..len]); var meta: [*]align(1) M = @ptrCast(&events_buf); while (len >= @sizeOf(M) and meta[0].event_len >= @sizeOf(M) and meta[0].event_len <= len) : ({ len -= meta[0].event_len; meta = @ptrCast(@as([*]u8, @ptrCast(meta)) + meta[0].event_len); }) { assert(meta[0].vers == M.VERSION); - std.debug.print("meta = {any}\n", .{meta[0]}); const fid: *align(1) fanotify.event_info_fid = @ptrCast(meta + 1); switch (fid.hdr.info_type) { .DFID_NAME => { const file_handle: *align(1) std.os.linux.file_handle = @ptrCast(&fid.handle); const file_name_z: [*:0]u8 = @ptrCast((&file_handle.f_handle).ptr + file_handle.handle_bytes); const file_name = mem.span(file_name_z); - std.debug.print("DFID_NAME file_handle = {any}, found: '{s}'\n", .{ file_handle.*, file_name }); const lfh: Watch.LinuxFileHandle = .{ .handle = file_handle }; if (w.handle_table.getPtr(lfh)) |reaction_set| { if (reaction_set.getPtr(file_name)) |step_set| { for (step_set.keys()) |step| { - std.debug.print("DFID_NAME marking step '{s}' dirty\n", .{step.name}); step.state = .precheck_done; any_dirty = true; } } - } else { - std.debug.print("DFID_NAME changed file did not match any directories: '{}'\n", .{ - std.fmt.fmtSliceHexLower(lfh.slice()), - }); } }, - //.FID => { - // const file_handle: *align(1) std.os.linux.file_handle = @ptrCast(&fid.handle); - // const lfh: Watch.LinuxFileHandle = .{ .handle = file_handle }; - // if (w.handle_table.get(lfh)) |step_set| { - // for (step_set.keys()) |step| { - // std.debug.print("FID marking step '{s}' dirty\n", .{step.name}); - // step.state = .precheck_done; - // any_dirty = true; - // } - // } else { - // std.debug.print("FID changed file did not match any steps: '{}'\n", .{ - // std.fmt.fmtSliceHexLower(lfh.slice()), - // }); - // } - //}, - //.DFID => { - // const file_handle: *align(1) std.os.linux.file_handle = @ptrCast(&fid.handle); - // const lfh: Watch.LinuxFileHandle = .{ .handle = file_handle }; - // if (w.handle_table.get(lfh)) |step_set| { - // for (step_set.keys()) |step| { - // std.debug.print("DFID marking step '{s}' dirty\n", .{step.name}); - // step.state = .precheck_done; - // any_dirty = true; - // } - // } else { - // std.debug.print("DFID changed file did not match any steps\n", .{}); - // } - //}, - else => |t| { - std.debug.panic("TODO: received event type '{s}'", .{@tagName(t)}); - }, + else => |t| std.log.warn("unexpected fanotify event '{s}'", .{@tagName(t)}), } } } @@ -1349,6 +1319,7 @@ fn usage(b: *std.Build, out_stream: anytype) !void { \\ --skip-oom-steps Instead of failing, skip steps that would exceed --maxrss \\ --fetch Exit after fetching dependency tree \\ --watch Continuously rebuild when source files are modified + \\ --debounce Delay before rebuilding after watched file detection \\ \\Project-Specific Options: \\ diff --git a/lib/std/Build/Watch.zig b/lib/std/Build/Watch.zig index d85b0f0a9223..b04bfcb475d1 100644 --- a/lib/std/Build/Watch.zig +++ b/lib/std/Build/Watch.zig @@ -104,22 +104,6 @@ pub const LinuxFileHandle = struct { }; }; -pub fn getFileHandle(gpa: Allocator, path: std.Build.Cache.Path, basename: []const u8) !LinuxFileHandle { - var file_handle_buffer: [@sizeOf(std.os.linux.file_handle) + 128]u8 align(@alignOf(std.os.linux.file_handle)) = undefined; - var mount_id: i32 = undefined; - var buf: [std.fs.max_path_bytes]u8 = undefined; - const joined_path = if (path.sub_path.len == 0) basename else path: { - break :path std.fmt.bufPrint(&buf, "{s}/{s}", .{ - path.sub_path, basename, - }) catch return error.NameTooLong; - }; - const stack_ptr: *std.os.linux.file_handle = @ptrCast(&file_handle_buffer); - stack_ptr.handle_bytes = file_handle_buffer.len - @sizeOf(std.os.linux.file_handle); - try std.posix.name_to_handle_at(path.root_dir.handle.fd, joined_path, stack_ptr, &mount_id, 0); - const stack_lfh: LinuxFileHandle = .{ .handle = stack_ptr }; - return stack_lfh.clone(gpa); -} - pub fn getDirHandle(gpa: Allocator, path: std.Build.Cache.Path) !LinuxFileHandle { var file_handle_buffer: [@sizeOf(std.os.linux.file_handle) + 128]u8 align(@alignOf(std.os.linux.file_handle)) = undefined; var mount_id: i32 = undefined; From 001ff7b3b2176467a9169069938396dcca75e93c Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Tue, 9 Jul 2024 20:05:37 -0700 Subject: [PATCH 116/152] std.Build.Watch: make dirty steps invalidate each other and make failed steps always be invalidated and make steps that don't need to be reevaluated marked as cached --- lib/compiler/build_runner.zig | 43 ++-------------------------- lib/std/Build/Step.zig | 25 ++++++++++++++++ lib/std/Build/Watch.zig | 54 +++++++++++++++++++++++++++++++++++ 3 files changed, 82 insertions(+), 40 deletions(-) diff --git a/lib/compiler/build_runner.zig b/lib/compiler/build_runner.zig index cf883a581157..ea1cfda2668a 100644 --- a/lib/compiler/build_runner.zig +++ b/lib/compiler/build_runner.zig @@ -500,9 +500,10 @@ pub fn main() !void { const events_len = try std.posix.poll(&poll_fds, timeout); if (events_len == 0) { debouncing_node.end(); + Watch.markFailedStepsDirty(gpa, run.step_stack.keys()); continue :rebuild; } - if (try markDirtySteps(&w)) { + if (try w.markDirtySteps(gpa)) { if (!debouncing) { debouncing = true; debouncing_node.end(); @@ -513,44 +514,6 @@ pub fn main() !void { } } -fn markDirtySteps(w: *Watch) !bool { - const fanotify = std.os.linux.fanotify; - const M = fanotify.event_metadata; - var events_buf: [256 + 4096]u8 = undefined; - var any_dirty = false; - while (true) { - var len = std.posix.read(w.fan_fd, &events_buf) catch |err| switch (err) { - error.WouldBlock => return any_dirty, - else => |e| return e, - }; - var meta: [*]align(1) M = @ptrCast(&events_buf); - while (len >= @sizeOf(M) and meta[0].event_len >= @sizeOf(M) and meta[0].event_len <= len) : ({ - len -= meta[0].event_len; - meta = @ptrCast(@as([*]u8, @ptrCast(meta)) + meta[0].event_len); - }) { - assert(meta[0].vers == M.VERSION); - const fid: *align(1) fanotify.event_info_fid = @ptrCast(meta + 1); - switch (fid.hdr.info_type) { - .DFID_NAME => { - const file_handle: *align(1) std.os.linux.file_handle = @ptrCast(&fid.handle); - const file_name_z: [*:0]u8 = @ptrCast((&file_handle.f_handle).ptr + file_handle.handle_bytes); - const file_name = mem.span(file_name_z); - const lfh: Watch.LinuxFileHandle = .{ .handle = file_handle }; - if (w.handle_table.getPtr(lfh)) |reaction_set| { - if (reaction_set.getPtr(file_name)) |step_set| { - for (step_set.keys()) |step| { - step.state = .precheck_done; - any_dirty = true; - } - } - } - }, - else => |t| std.log.warn("unexpected fanotify event '{s}'", .{@tagName(t)}), - } - } - } -} - const Run = struct { max_rss: u64, max_rss_is_default: bool, @@ -1319,7 +1282,7 @@ fn usage(b: *std.Build, out_stream: anytype) !void { \\ --skip-oom-steps Instead of failing, skip steps that would exceed --maxrss \\ --fetch Exit after fetching dependency tree \\ --watch Continuously rebuild when source files are modified - \\ --debounce Delay before rebuilding after watched file detection + \\ --debounce Delay before rebuilding after changed file detected \\ \\Project-Specific Options: \\ diff --git a/lib/std/Build/Step.zig b/lib/std/Build/Step.zig index fcab831f676d..4b958be284e0 100644 --- a/lib/std/Build/Step.zig +++ b/lib/std/Build/Step.zig @@ -637,6 +637,31 @@ fn addWatchInputFromPath(step: *Step, path: Build.Cache.Path, basename: []const try gop.value_ptr.append(gpa, basename); } +fn reset(step: *Step, gpa: Allocator) void { + assert(step.state == .precheck_done); + + step.result_error_msgs.clearRetainingCapacity(); + step.result_stderr = ""; + step.result_cached = false; + step.result_duration_ns = null; + step.result_peak_rss = 0; + step.test_results = .{}; + + step.result_error_bundle.deinit(gpa); + step.result_error_bundle = std.zig.ErrorBundle.empty; +} + +/// Implementation detail of file watching. Prepares the step for being re-evaluated. +pub fn recursiveReset(step: *Step, gpa: Allocator) void { + assert(step.state != .precheck_done); + step.state = .precheck_done; + step.reset(gpa); + for (step.dependants.items) |dep| { + if (dep.state == .precheck_done) continue; + dep.recursiveReset(gpa); + } +} + test { _ = CheckFile; _ = CheckObject; diff --git a/lib/std/Build/Watch.zig b/lib/std/Build/Watch.zig index b04bfcb475d1..b58e524cab38 100644 --- a/lib/std/Build/Watch.zig +++ b/lib/std/Build/Watch.zig @@ -2,6 +2,7 @@ const std = @import("../std.zig"); const Watch = @This(); const Step = std.Build.Step; const Allocator = std.mem.Allocator; +const assert = std.debug.assert; dir_table: DirTable, /// Keyed differently but indexes correspond 1:1 with `dir_table`. @@ -117,3 +118,56 @@ pub fn getDirHandle(gpa: Allocator, path: std.Build.Cache.Path) !LinuxFileHandle const stack_lfh: LinuxFileHandle = .{ .handle = stack_ptr }; return stack_lfh.clone(gpa); } + +pub fn markDirtySteps(w: *Watch, gpa: Allocator) !bool { + const fanotify = std.os.linux.fanotify; + const M = fanotify.event_metadata; + var events_buf: [256 + 4096]u8 = undefined; + var any_dirty = false; + while (true) { + var len = std.posix.read(w.fan_fd, &events_buf) catch |err| switch (err) { + error.WouldBlock => return any_dirty, + else => |e| return e, + }; + var meta: [*]align(1) M = @ptrCast(&events_buf); + while (len >= @sizeOf(M) and meta[0].event_len >= @sizeOf(M) and meta[0].event_len <= len) : ({ + len -= meta[0].event_len; + meta = @ptrCast(@as([*]u8, @ptrCast(meta)) + meta[0].event_len); + }) { + assert(meta[0].vers == M.VERSION); + const fid: *align(1) fanotify.event_info_fid = @ptrCast(meta + 1); + switch (fid.hdr.info_type) { + .DFID_NAME => { + const file_handle: *align(1) std.os.linux.file_handle = @ptrCast(&fid.handle); + const file_name_z: [*:0]u8 = @ptrCast((&file_handle.f_handle).ptr + file_handle.handle_bytes); + const file_name = std.mem.span(file_name_z); + const lfh: Watch.LinuxFileHandle = .{ .handle = file_handle }; + if (w.handle_table.getPtr(lfh)) |reaction_set| { + if (reaction_set.getPtr(file_name)) |step_set| { + for (step_set.keys()) |step| { + if (step.state != .precheck_done) { + step.recursiveReset(gpa); + any_dirty = true; + } + } + } + } + }, + else => |t| std.log.warn("unexpected fanotify event '{s}'", .{@tagName(t)}), + } + } + } +} + +pub fn markFailedStepsDirty(gpa: Allocator, all_steps: []const *Step) void { + for (all_steps) |step| switch (step.state) { + .dependency_failure, .failure, .skipped => step.recursiveReset(gpa), + else => continue, + }; + // Now that all dirty steps have been found, the remaining steps that + // succeeded from last run shall be marked "cached". + for (all_steps) |step| switch (step.state) { + .success => step.result_cached = true, + else => continue, + }; +} From e6b6a728b36335d9ac32dd216082aaf1faa6327e Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Tue, 9 Jul 2024 20:06:56 -0700 Subject: [PATCH 117/152] build runner: fix build summary painting over CLI progress by obtaining the stderr lock when printing the build summary --- lib/compiler/build_runner.zig | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/lib/compiler/build_runner.zig b/lib/compiler/build_runner.zig index ea1cfda2668a..7cc3d2bcd7d0 100644 --- a/lib/compiler/build_runner.zig +++ b/lib/compiler/build_runner.zig @@ -682,9 +682,12 @@ fn runStepNames( } const ttyconf = run.ttyconf; - const stderr = run.stderr; if (run.summary != .none) { + std.debug.lockStdErr(); + defer std.debug.unlockStdErr(); + const stderr = run.stderr; + const total_count = success_count + failure_count + pending_count + skipped_count; ttyconf.setColor(stderr, .cyan) catch {}; stderr.writeAll("Build Summary:") catch {}; From e712ca595f3ac2e25911fd0d647bfb8c97019ce8 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Tue, 9 Jul 2024 20:25:53 -0700 Subject: [PATCH 118/152] std.os.linux: type safety for fanotify metadata event mask field --- lib/std/os/linux.zig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/std/os/linux.zig b/lib/std/os/linux.zig index 56930c6b290f..43abbcc1a94d 100644 --- a/lib/std/os/linux.zig +++ b/lib/std/os/linux.zig @@ -4261,7 +4261,7 @@ pub const fanotify = struct { vers: u8, reserved: u8, metadata_len: u16, - mask: u64 align(8), + mask: MarkMask align(8), fd: i32, pid: i32, From 956f1ebc707f8a2530e49b80357768f3bf1235ac Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Tue, 9 Jul 2024 20:26:16 -0700 Subject: [PATCH 119/152] std.Build.Watch: gracefully handle fanotify queue overflow --- lib/std/Build/Watch.zig | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/lib/std/Build/Watch.zig b/lib/std/Build/Watch.zig index b58e524cab38..2ffcd27ca7a8 100644 --- a/lib/std/Build/Watch.zig +++ b/lib/std/Build/Watch.zig @@ -135,6 +135,12 @@ pub fn markDirtySteps(w: *Watch, gpa: Allocator) !bool { meta = @ptrCast(@as([*]u8, @ptrCast(meta)) + meta[0].event_len); }) { assert(meta[0].vers == M.VERSION); + if (meta[0].mask.Q_OVERFLOW) { + any_dirty = true; + std.log.warn("file system watch queue overflowed; falling back to fstat", .{}); + markAllFilesDirty(w, gpa); + return true; + } const fid: *align(1) fanotify.event_info_fid = @ptrCast(meta + 1); switch (fid.hdr.info_type) { .DFID_NAME => { @@ -171,3 +177,13 @@ pub fn markFailedStepsDirty(gpa: Allocator, all_steps: []const *Step) void { else => continue, }; } + +fn markAllFilesDirty(w: *Watch, gpa: Allocator) void { + for (w.handle_table.values()) |reaction_set| { + for (reaction_set.values()) |step_set| { + for (step_set.keys()) |step| { + step.recursiveReset(gpa); + } + } + } +} From 0cc492a272ef9c03a34b57a26bf570b242615ddf Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Tue, 9 Jul 2024 21:08:20 -0700 Subject: [PATCH 120/152] make more build steps integrate with the watch system --- lib/std/Build.zig | 2 +- lib/std/Build/Step.zig | 15 +++++++++------ lib/std/Build/Step/CheckFile.zig | 1 + lib/std/Build/Step/CheckObject.zig | 1 + lib/std/Build/Step/ConfigHeader.zig | 2 ++ lib/std/Build/Step/InstallFile.zig | 4 +--- lib/std/Build/Step/ObjCopy.zig | 1 + lib/std/Build/Step/Options.zig | 3 +++ lib/std/Build/Step/RemoveDir.zig | 20 +++++++++++++------- test/tests.zig | 6 +++--- 10 files changed, 35 insertions(+), 20 deletions(-) diff --git a/lib/std/Build.zig b/lib/std/Build.zig index 30aa5b72db1e..72108adaf52b 100644 --- a/lib/std/Build.zig +++ b/lib/std/Build.zig @@ -1052,7 +1052,7 @@ pub fn addWriteFiles(b: *Build) *Step.WriteFile { return Step.WriteFile.create(b); } -pub fn addRemoveDirTree(b: *Build, dir_path: []const u8) *Step.RemoveDir { +pub fn addRemoveDirTree(b: *Build, dir_path: LazyPath) *Step.RemoveDir { return Step.RemoveDir.create(b, dir_path); } diff --git a/lib/std/Build/Step.zig b/lib/std/Build/Step.zig index 4b958be284e0..91d3924611ae 100644 --- a/lib/std/Build/Step.zig +++ b/lib/std/Build/Step.zig @@ -598,14 +598,17 @@ pub fn writeManifest(s: *Step, man: *Build.Cache.Manifest) !void { } } -fn oom(err: anytype) noreturn { - switch (err) { - error.OutOfMemory => @panic("out of memory"), - } +/// For steps that have a single input that never changes when re-running `make`. +pub fn singleUnchangingWatchInput(step: *Step, lazy_path: Build.LazyPath) Allocator.Error!void { + if (!step.inputs.populated()) try step.addWatchInput(lazy_path); +} + +pub fn clearWatchInputs(step: *Step) void { + const gpa = step.owner.allocator; + step.inputs.clear(gpa); } -pub fn addWatchInput(step: *Step, lazy_path: Build.LazyPath) void { - errdefer |err| oom(err); +pub fn addWatchInput(step: *Step, lazy_path: Build.LazyPath) Allocator.Error!void { switch (lazy_path) { .src_path => |src_path| try addWatchInputFromBuilder(step, src_path.owner, src_path.sub_path), .dependency => |d| try addWatchInputFromBuilder(step, d.dependency.builder, d.sub_path), diff --git a/lib/std/Build/Step/CheckFile.zig b/lib/std/Build/Step/CheckFile.zig index b7ce2ded6136..c7a2046c1f63 100644 --- a/lib/std/Build/Step/CheckFile.zig +++ b/lib/std/Build/Step/CheckFile.zig @@ -50,6 +50,7 @@ fn make(step: *Step, prog_node: std.Progress.Node) !void { _ = prog_node; const b = step.owner; const check_file: *CheckFile = @fieldParentPtr("step", step); + try step.singleUnchangingWatchInput(check_file.source); const src_path = check_file.source.getPath2(b, step); const contents = fs.cwd().readFileAlloc(b.allocator, src_path, check_file.max_bytes) catch |err| { diff --git a/lib/std/Build/Step/CheckObject.zig b/lib/std/Build/Step/CheckObject.zig index f25708c55b36..93ee57e3b47b 100644 --- a/lib/std/Build/Step/CheckObject.zig +++ b/lib/std/Build/Step/CheckObject.zig @@ -555,6 +555,7 @@ fn make(step: *Step, prog_node: std.Progress.Node) !void { const b = step.owner; const gpa = b.allocator; const check_object: *CheckObject = @fieldParentPtr("step", step); + try step.singleUnchangingWatchInput(check_object.source); const src_path = check_object.source.getPath2(b, step); const contents = fs.cwd().readFileAllocOptions( diff --git a/lib/std/Build/Step/ConfigHeader.zig b/lib/std/Build/Step/ConfigHeader.zig index 6390a88da79d..fd655125cf11 100644 --- a/lib/std/Build/Step/ConfigHeader.zig +++ b/lib/std/Build/Step/ConfigHeader.zig @@ -168,6 +168,8 @@ fn make(step: *Step, prog_node: std.Progress.Node) !void { _ = prog_node; const b = step.owner; const config_header: *ConfigHeader = @fieldParentPtr("step", step); + if (config_header.style.getPath()) |lp| try step.singleUnchangingWatchInput(lp); + const gpa = b.allocator; const arena = b.allocator; diff --git a/lib/std/Build/Step/InstallFile.zig b/lib/std/Build/Step/InstallFile.zig index f68e6e8aa023..d29ac21c1cd4 100644 --- a/lib/std/Build/Step/InstallFile.zig +++ b/lib/std/Build/Step/InstallFile.zig @@ -39,9 +39,7 @@ fn make(step: *Step, prog_node: std.Progress.Node) !void { _ = prog_node; const b = step.owner; const install_file: *InstallFile = @fieldParentPtr("step", step); - - // Inputs never change when re-running `make`. - if (!step.inputs.populated()) step.addWatchInput(install_file.source); + try step.singleUnchangingWatchInput(install_file.source); const full_src_path = install_file.source.getPath2(b, step); const full_dest_path = b.getInstallPath(install_file.dir, install_file.dest_rel_path); diff --git a/lib/std/Build/Step/ObjCopy.zig b/lib/std/Build/Step/ObjCopy.zig index 06c9a8ef0d8d..908341aefb47 100644 --- a/lib/std/Build/Step/ObjCopy.zig +++ b/lib/std/Build/Step/ObjCopy.zig @@ -93,6 +93,7 @@ pub fn getOutputSeparatedDebug(objcopy: *const ObjCopy) ?std.Build.LazyPath { fn make(step: *Step, prog_node: std.Progress.Node) !void { const b = step.owner; const objcopy: *ObjCopy = @fieldParentPtr("step", step); + try step.singleUnchangingWatchInput(objcopy.input_file); var man = b.graph.cache.obtain(); defer man.deinit(); diff --git a/lib/std/Build/Step/Options.zig b/lib/std/Build/Step/Options.zig index 2937cf70e1ee..9ce23e0802a6 100644 --- a/lib/std/Build/Step/Options.zig +++ b/lib/std/Build/Step/Options.zig @@ -424,6 +424,9 @@ fn make(step: *Step, prog_node: std.Progress.Node) !void { item.path.getPath2(b, step), ); } + if (!step.inputs.populated()) for (options.args.items) |item| { + try step.addWatchInput(item.path); + }; const basename = "options.zig"; diff --git a/lib/std/Build/Step/RemoveDir.zig b/lib/std/Build/Step/RemoveDir.zig index 6483a684aaea..1b7dc7feb8be 100644 --- a/lib/std/Build/Step/RemoveDir.zig +++ b/lib/std/Build/Step/RemoveDir.zig @@ -2,22 +2,23 @@ const std = @import("std"); const fs = std.fs; const Step = std.Build.Step; const RemoveDir = @This(); +const LazyPath = std.Build.LazyPath; pub const base_id: Step.Id = .remove_dir; step: Step, -dir_path: []const u8, +doomed_path: LazyPath, -pub fn create(owner: *std.Build, dir_path: []const u8) *RemoveDir { +pub fn create(owner: *std.Build, doomed_path: LazyPath) *RemoveDir { const remove_dir = owner.allocator.create(RemoveDir) catch @panic("OOM"); remove_dir.* = .{ .step = Step.init(.{ .id = base_id, - .name = owner.fmt("RemoveDir {s}", .{dir_path}), + .name = owner.fmt("RemoveDir {s}", .{doomed_path.getDisplayName()}), .owner = owner, .makeFn = make, }), - .dir_path = owner.dupePath(dir_path), + .doomed_path = doomed_path.dupe(owner), }; return remove_dir; } @@ -30,14 +31,19 @@ fn make(step: *Step, prog_node: std.Progress.Node) !void { const b = step.owner; const remove_dir: *RemoveDir = @fieldParentPtr("step", step); - b.build_root.handle.deleteTree(remove_dir.dir_path) catch |err| { + step.clearWatchInputs(); + try step.addWatchInput(remove_dir.doomed_path); + + const full_doomed_path = remove_dir.doomed_path.getPath2(b, step); + + b.build_root.handle.deleteTree(full_doomed_path) catch |err| { if (b.build_root.path) |base| { return step.fail("unable to recursively delete path '{s}/{s}': {s}", .{ - base, remove_dir.dir_path, @errorName(err), + base, full_doomed_path, @errorName(err), }); } else { return step.fail("unable to recursively delete path '{s}': {s}", .{ - remove_dir.dir_path, @errorName(err), + full_doomed_path, @errorName(err), }); } }; diff --git a/test/tests.zig b/test/tests.zig index 95a86c68f66d..e19a9efccfde 100644 --- a/test/tests.zig +++ b/test/tests.zig @@ -771,7 +771,7 @@ pub fn addCliTests(b: *std.Build) *Step { run_run.expectStdErrEqual("All your codebase are belong to us.\n"); run_run.step.dependOn(&init_exe.step); - const cleanup = b.addRemoveDirTree(tmp_path); + const cleanup = b.addRemoveDirTree(.{ .cwd_relative = tmp_path }); cleanup.step.dependOn(&run_test.step); cleanup.step.dependOn(&run_run.step); cleanup.step.dependOn(&run_bad.step); @@ -816,7 +816,7 @@ pub fn addCliTests(b: *std.Build) *Step { }); checkfile.setName("check godbolt.org CLI usage generating valid asm"); - const cleanup = b.addRemoveDirTree(tmp_path); + const cleanup = b.addRemoveDirTree(.{ .cwd_relative = tmp_path }); cleanup.step.dependOn(&checkfile.step); step.dependOn(&cleanup.step); @@ -902,7 +902,7 @@ pub fn addCliTests(b: *std.Build) *Step { }); check6.step.dependOn(&run6.step); - const cleanup = b.addRemoveDirTree(tmp_path); + const cleanup = b.addRemoveDirTree(.{ .cwd_relative = tmp_path }); cleanup.step.dependOn(&check6.step); step.dependOn(&cleanup.step); From d1c14f2f52ddec476eca6d605b985a27f4d4fe28 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Tue, 9 Jul 2024 21:47:26 -0700 Subject: [PATCH 121/152] std.Build.Step.WriteFile: extract UpdateSourceFiles This has been planned for quite some time; this commit finally does it. Also implements file system watching integration in the make() implementation for UpdateSourceFiles and fixes the reporting of step caching for both. WriteFile does not yet have file system watching integration. --- build.zig | 2 +- lib/std/Build.zig | 4 + lib/std/Build/Step.zig | 4 + lib/std/Build/Step/UpdateSourceFiles.zig | 114 +++++++++++++++++++++++ lib/std/Build/Step/WriteFile.zig | 89 +----------------- test/tests.zig | 2 +- 6 files changed, 128 insertions(+), 87 deletions(-) create mode 100644 lib/std/Build/Step/UpdateSourceFiles.zig diff --git a/build.zig b/build.zig index 0f0d7d4d67a2..a364982ce993 100644 --- a/build.zig +++ b/build.zig @@ -595,7 +595,7 @@ fn addWasiUpdateStep(b: *std.Build, version: [:0]const u8) !void { run_opt.addArg("-o"); run_opt.addFileArg(b.path("stage1/zig1.wasm")); - const copy_zig_h = b.addWriteFiles(); + const copy_zig_h = b.addUpdateSourceFiles(); copy_zig_h.addCopyFileToSource(b.path("lib/zig.h"), "stage1/zig.h"); const update_zig1_step = b.step("update-zig1", "Update stage1/zig1.wasm"); diff --git a/lib/std/Build.zig b/lib/std/Build.zig index 72108adaf52b..87bb0eeeda68 100644 --- a/lib/std/Build.zig +++ b/lib/std/Build.zig @@ -1052,6 +1052,10 @@ pub fn addWriteFiles(b: *Build) *Step.WriteFile { return Step.WriteFile.create(b); } +pub fn addUpdateSourceFiles(b: *Build) *Step.UpdateSourceFiles { + return Step.UpdateSourceFiles.create(b); +} + pub fn addRemoveDirTree(b: *Build, dir_path: LazyPath) *Step.RemoveDir { return Step.RemoveDir.create(b, dir_path); } diff --git a/lib/std/Build/Step.zig b/lib/std/Build/Step.zig index 91d3924611ae..e41912d54843 100644 --- a/lib/std/Build/Step.zig +++ b/lib/std/Build/Step.zig @@ -102,6 +102,7 @@ pub const Id = enum { fmt, translate_c, write_file, + update_source_files, run, check_file, check_object, @@ -122,6 +123,7 @@ pub const Id = enum { .fmt => Fmt, .translate_c => TranslateC, .write_file => WriteFile, + .update_source_files => UpdateSourceFiles, .run => Run, .check_file => CheckFile, .check_object => CheckObject, @@ -148,6 +150,7 @@ pub const RemoveDir = @import("Step/RemoveDir.zig"); pub const Run = @import("Step/Run.zig"); pub const TranslateC = @import("Step/TranslateC.zig"); pub const WriteFile = @import("Step/WriteFile.zig"); +pub const UpdateSourceFiles = @import("Step/UpdateSourceFiles.zig"); pub const Inputs = struct { table: Table, @@ -680,4 +683,5 @@ test { _ = Run; _ = TranslateC; _ = WriteFile; + _ = UpdateSourceFiles; } diff --git a/lib/std/Build/Step/UpdateSourceFiles.zig b/lib/std/Build/Step/UpdateSourceFiles.zig new file mode 100644 index 000000000000..9d1c8e20fe62 --- /dev/null +++ b/lib/std/Build/Step/UpdateSourceFiles.zig @@ -0,0 +1,114 @@ +//! Writes data to paths relative to the package root, effectively mutating the +//! package's source files. Be careful with the latter functionality; it should +//! not be used during the normal build process, but as a utility run by a +//! developer with intention to update source files, which will then be +//! committed to version control. +const std = @import("std"); +const Step = std.Build.Step; +const fs = std.fs; +const ArrayList = std.ArrayList; +const UpdateSourceFiles = @This(); + +step: Step, +output_source_files: std.ArrayListUnmanaged(OutputSourceFile), + +pub const base_id: Step.Id = .update_source_files; + +pub const OutputSourceFile = struct { + contents: Contents, + sub_path: []const u8, +}; + +pub const Contents = union(enum) { + bytes: []const u8, + copy: std.Build.LazyPath, +}; + +pub fn create(owner: *std.Build) *UpdateSourceFiles { + const usf = owner.allocator.create(UpdateSourceFiles) catch @panic("OOM"); + usf.* = .{ + .step = Step.init(.{ + .id = base_id, + .name = "UpdateSourceFiles", + .owner = owner, + .makeFn = make, + }), + .output_source_files = .{}, + }; + return usf; +} + +/// A path relative to the package root. +/// +/// Be careful with this because it updates source files. This should not be +/// used as part of the normal build process, but as a utility occasionally +/// run by a developer with intent to modify source files and then commit +/// those changes to version control. +pub fn addCopyFileToSource(usf: *UpdateSourceFiles, source: std.Build.LazyPath, sub_path: []const u8) void { + const b = usf.step.owner; + usf.output_source_files.append(b.allocator, .{ + .contents = .{ .copy = source }, + .sub_path = sub_path, + }) catch @panic("OOM"); + source.addStepDependencies(&usf.step); +} + +/// A path relative to the package root. +/// +/// Be careful with this because it updates source files. This should not be +/// used as part of the normal build process, but as a utility occasionally +/// run by a developer with intent to modify source files and then commit +/// those changes to version control. +pub fn addBytesToSource(usf: *UpdateSourceFiles, bytes: []const u8, sub_path: []const u8) void { + const b = usf.step.owner; + usf.output_source_files.append(b.allocator, .{ + .contents = .{ .bytes = bytes }, + .sub_path = sub_path, + }) catch @panic("OOM"); +} + +fn make(step: *Step, prog_node: std.Progress.Node) !void { + _ = prog_node; + const b = step.owner; + const usf: *UpdateSourceFiles = @fieldParentPtr("step", step); + + var any_miss = false; + for (usf.output_source_files.items) |output_source_file| { + if (fs.path.dirname(output_source_file.sub_path)) |dirname| { + b.build_root.handle.makePath(dirname) catch |err| { + return step.fail("unable to make path '{}{s}': {s}", .{ + b.build_root, dirname, @errorName(err), + }); + }; + } + switch (output_source_file.contents) { + .bytes => |bytes| { + b.build_root.handle.writeFile(.{ .sub_path = output_source_file.sub_path, .data = bytes }) catch |err| { + return step.fail("unable to write file '{}{s}': {s}", .{ + b.build_root, output_source_file.sub_path, @errorName(err), + }); + }; + any_miss = true; + }, + .copy => |file_source| { + if (!step.inputs.populated()) try step.addWatchInput(file_source); + + const source_path = file_source.getPath2(b, step); + const prev_status = fs.Dir.updateFile( + fs.cwd(), + source_path, + b.build_root.handle, + output_source_file.sub_path, + .{}, + ) catch |err| { + return step.fail("unable to update file from '{s}' to '{}{s}': {s}", .{ + source_path, b.build_root, output_source_file.sub_path, @errorName(err), + }); + }; + any_miss = any_miss or prev_status == .stale; + }, + } + } + + step.result_cached = !any_miss; +} diff --git a/lib/std/Build/Step/WriteFile.zig b/lib/std/Build/Step/WriteFile.zig index 013c58890a2d..f35bf09b7e83 100644 --- a/lib/std/Build/Step/WriteFile.zig +++ b/lib/std/Build/Step/WriteFile.zig @@ -1,13 +1,6 @@ -//! WriteFile is primarily used to create a directory in an appropriate -//! location inside the local cache which has a set of files that have either -//! been generated during the build, or are copied from the source package. -//! -//! However, this step has an additional capability of writing data to paths -//! relative to the package root, effectively mutating the package's source -//! files. Be careful with the latter functionality; it should not be used -//! during the normal build process, but as a utility run by a developer with -//! intention to update source files, which will then be committed to version -//! control. +//! WriteFile is used to create a directory in an appropriate location inside +//! the local cache which has a set of files that have either been generated +//! during the build, or are copied from the source package. const std = @import("std"); const Step = std.Build.Step; const fs = std.fs; @@ -19,8 +12,6 @@ step: Step, // The elements here are pointers because we need stable pointers for the GeneratedFile field. files: std.ArrayListUnmanaged(File), directories: std.ArrayListUnmanaged(Directory), - -output_source_files: std.ArrayListUnmanaged(OutputSourceFile), generated_directory: std.Build.GeneratedFile, pub const base_id: Step.Id = .write_file; @@ -52,11 +43,6 @@ pub const Directory = struct { }; }; -pub const OutputSourceFile = struct { - contents: Contents, - sub_path: []const u8, -}; - pub const Contents = union(enum) { bytes: []const u8, copy: std.Build.LazyPath, @@ -73,7 +59,6 @@ pub fn create(owner: *std.Build) *WriteFile { }), .files = .{}, .directories = .{}, - .output_source_files = .{}, .generated_directory = .{ .step = &write_file.step }, }; return write_file; @@ -150,33 +135,6 @@ pub fn addCopyDirectory( }; } -/// A path relative to the package root. -/// Be careful with this because it updates source files. This should not be -/// used as part of the normal build process, but as a utility occasionally -/// run by a developer with intent to modify source files and then commit -/// those changes to version control. -pub fn addCopyFileToSource(write_file: *WriteFile, source: std.Build.LazyPath, sub_path: []const u8) void { - const b = write_file.step.owner; - write_file.output_source_files.append(b.allocator, .{ - .contents = .{ .copy = source }, - .sub_path = sub_path, - }) catch @panic("OOM"); - source.addStepDependencies(&write_file.step); -} - -/// A path relative to the package root. -/// Be careful with this because it updates source files. This should not be -/// used as part of the normal build process, but as a utility occasionally -/// run by a developer with intent to modify source files and then commit -/// those changes to version control. -pub fn addBytesToSource(write_file: *WriteFile, bytes: []const u8, sub_path: []const u8) void { - const b = write_file.step.owner; - write_file.output_source_files.append(b.allocator, .{ - .contents = .{ .bytes = bytes }, - .sub_path = sub_path, - }) catch @panic("OOM"); -} - /// Returns a `LazyPath` representing the base directory that contains all the /// files from this `WriteFile`. pub fn getDirectory(write_file: *WriteFile) std.Build.LazyPath { @@ -202,46 +160,6 @@ fn make(step: *Step, prog_node: std.Progress.Node) !void { const b = step.owner; const write_file: *WriteFile = @fieldParentPtr("step", step); - // Writing to source files is kind of an extra capability of this - // WriteFile - arguably it should be a different step. But anyway here - // it is, it happens unconditionally and does not interact with the other - // files here. - var any_miss = false; - for (write_file.output_source_files.items) |output_source_file| { - if (fs.path.dirname(output_source_file.sub_path)) |dirname| { - b.build_root.handle.makePath(dirname) catch |err| { - return step.fail("unable to make path '{}{s}': {s}", .{ - b.build_root, dirname, @errorName(err), - }); - }; - } - switch (output_source_file.contents) { - .bytes => |bytes| { - b.build_root.handle.writeFile(.{ .sub_path = output_source_file.sub_path, .data = bytes }) catch |err| { - return step.fail("unable to write file '{}{s}': {s}", .{ - b.build_root, output_source_file.sub_path, @errorName(err), - }); - }; - any_miss = true; - }, - .copy => |file_source| { - const source_path = file_source.getPath2(b, step); - const prev_status = fs.Dir.updateFile( - fs.cwd(), - source_path, - b.build_root.handle, - output_source_file.sub_path, - .{}, - ) catch |err| { - return step.fail("unable to update file from '{s}' to '{}{s}': {s}", .{ - source_path, b.build_root, output_source_file.sub_path, @errorName(err), - }); - }; - any_miss = any_miss or prev_status == .stale; - }, - } - } - // The cache is used here not really as a way to speed things up - because writing // the data to a file would probably be very fast - but as a way to find a canonical // location to put build artifacts. @@ -278,6 +196,7 @@ fn make(step: *Step, prog_node: std.Progress.Node) !void { if (try step.cacheHit(&man)) { const digest = man.final(); write_file.generated_directory.path = try b.cache_root.join(b.allocator, &.{ "o", &digest }); + step.result_cached = true; return; } diff --git a/test/tests.zig b/test/tests.zig index e19a9efccfde..0862f8deb0e5 100644 --- a/test/tests.zig +++ b/test/tests.zig @@ -882,7 +882,7 @@ pub fn addCliTests(b: *std.Build) *Step { const unformatted_code_utf16 = "\xff\xfe \x00 \x00 \x00 \x00/\x00/\x00 \x00n\x00o\x00 \x00r\x00e\x00a\x00s\x00o\x00n\x00"; const fmt6_path = std.fs.path.join(b.allocator, &.{ tmp_path, "fmt6.zig" }) catch @panic("OOM"); - const write6 = b.addWriteFiles(); + const write6 = b.addUpdateSourceFiles(); write6.addBytesToSource(unformatted_code_utf16, fmt6_path); write6.step.dependOn(&run5.step); From dcbb3aa1f329ba0f12f8a039df62cd347b8f2559 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Tue, 9 Jul 2024 23:13:03 -0700 Subject: [PATCH 122/152] std.Build.Cache.Path: fix format function for absolute paths --- lib/std/Build/Cache/Path.zig | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/lib/std/Build/Cache/Path.zig b/lib/std/Build/Cache/Path.zig index 89dba6b577dc..f6e469c254ad 100644 --- a/lib/std/Build/Cache/Path.zig +++ b/lib/std/Build/Cache/Path.zig @@ -58,6 +58,20 @@ pub fn openFile( return p.root_dir.handle.openFile(joined_path, flags); } +pub fn openDir( + p: Path, + sub_path: []const u8, + args: fs.Dir.OpenOptions, +) fs.Dir.OpenError!fs.Dir { + var buf: [fs.max_path_bytes]u8 = undefined; + const joined_path = if (p.sub_path.len == 0) sub_path else p: { + break :p std.fmt.bufPrint(&buf, "{s}" ++ fs.path.sep_str ++ "{s}", .{ + p.sub_path, sub_path, + }) catch return error.NameTooLong; + }; + return p.root_dir.handle.openDir(joined_path, args); +} + pub fn makeOpenPath(p: Path, sub_path: []const u8, opts: fs.Dir.OpenOptions) !fs.Dir { var buf: [fs.max_path_bytes]u8 = undefined; const joined_path = if (p.sub_path.len == 0) sub_path else p: { @@ -137,6 +151,10 @@ pub fn format( } if (fmt_string.len > 0) std.fmt.invalidFmtError(fmt_string, self); + if (std.fs.path.isAbsolute(self.sub_path)) { + try writer.writeAll(self.sub_path); + return; + } if (self.root_dir.path) |p| { try writer.writeAll(p); try writer.writeAll(fs.path.sep_str); From 0994e22a646a797aa91602a56a6a0cddf8cdbd28 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Tue, 9 Jul 2024 23:13:28 -0700 Subject: [PATCH 123/152] build runner: more useful failure handling for fanotify_mark --- lib/compiler/build_runner.zig | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/lib/compiler/build_runner.zig b/lib/compiler/build_runner.zig index 7cc3d2bcd7d0..990eb1b4ba1e 100644 --- a/lib/compiler/build_runner.zig +++ b/lib/compiler/build_runner.zig @@ -416,10 +416,12 @@ pub fn main() !void { const reaction_set = rs: { const gop = try w.dir_table.getOrPut(gpa, path); if (!gop.found_existing) { - try std.posix.fanotify_mark(w.fan_fd, .{ + std.posix.fanotify_mark(w.fan_fd, .{ .ADD = true, .ONLYDIR = true, - }, Watch.fan_mask, path.root_dir.handle.fd, path.subPathOpt()); + }, Watch.fan_mask, path.root_dir.handle.fd, path.subPathOpt()) catch |err| { + fatal("unable to watch {}: {s}", .{ path, @errorName(err) }); + }; const dir_handle = try Watch.getDirHandle(gpa, path); try w.handle_table.putNoClobber(gpa, dir_handle, .{}); @@ -489,7 +491,7 @@ pub fn main() !void { .revents = undefined, }, }; - var caption_buf: [40]u8 = undefined; + var caption_buf: [std.Progress.Node.max_name_len]u8 = undefined; const caption = std.fmt.bufPrint(&caption_buf, "Watching {d} Directories", .{ w.dir_table.entries.len, }) catch &caption_buf; From 26bdc836d2d9b2654f7f95fec34c6276070f2a59 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Tue, 9 Jul 2024 23:14:18 -0700 Subject: [PATCH 124/152] std.Build.LazyPath: add getPath3; deprecate getPath2 and getPath The goal is to move towards using `std.Build.Cache.Path` instead of absolute path names. This was helpful for implementing file watching integration to the InstallDir Step --- lib/std/Build.zig | 53 ++++++++++++++++++++++++++++++++--------------- 1 file changed, 36 insertions(+), 17 deletions(-) diff --git a/lib/std/Build.zig b/lib/std/Build.zig index 87bb0eeeda68..556ed89e8d5a 100644 --- a/lib/std/Build.zig +++ b/lib/std/Build.zig @@ -2327,36 +2327,52 @@ pub const LazyPath = union(enum) { } } - /// Returns an absolute path. - /// Intended to be used during the make phase only. + /// Deprecated, see `getPath3`. pub fn getPath(lazy_path: LazyPath, src_builder: *Build) []const u8 { return getPath2(lazy_path, src_builder, null); } - /// Returns an absolute path. + /// Deprecated, see `getPath3`. + pub fn getPath2(lazy_path: LazyPath, src_builder: *Build, asking_step: ?*Step) []const u8 { + const p = getPath3(lazy_path, src_builder, asking_step); + return src_builder.pathResolve(&.{ p.root_dir.path orelse ".", p.sub_path }); + } + /// Intended to be used during the make phase only. /// /// `asking_step` is only used for debugging purposes; it's the step being /// run that is asking for the path. - pub fn getPath2(lazy_path: LazyPath, src_builder: *Build, asking_step: ?*Step) []const u8 { + pub fn getPath3(lazy_path: LazyPath, src_builder: *Build, asking_step: ?*Step) Cache.Path { switch (lazy_path) { - .src_path => |sp| return sp.owner.pathFromRoot(sp.sub_path), - .cwd_relative => |p| return src_builder.pathFromCwd(p), + .src_path => |sp| return .{ + .root_dir = sp.owner.build_root, + .sub_path = sp.sub_path, + }, + .cwd_relative => |sub_path| return .{ + .root_dir = Cache.Directory.cwd(), + .sub_path = sub_path, + }, .generated => |gen| { - var file_path: []const u8 = gen.file.step.owner.pathFromRoot(gen.file.path orelse { - std.debug.lockStdErr(); - const stderr = std.io.getStdErr(); - dumpBadGetPathHelp(gen.file.step, stderr, src_builder, asking_step) catch {}; - std.debug.unlockStdErr(); - @panic("misconfigured build script"); - }); + // TODO make gen.file.path not be absolute and use that as the + // basis for not traversing up too many directories. + + var file_path: Cache.Path = .{ + .root_dir = gen.file.step.owner.build_root, + .sub_path = gen.file.path orelse { + std.debug.lockStdErr(); + const stderr = std.io.getStdErr(); + dumpBadGetPathHelp(gen.file.step, stderr, src_builder, asking_step) catch {}; + std.debug.unlockStdErr(); + @panic("misconfigured build script"); + }, + }; if (gen.up > 0) { const cache_root_path = src_builder.cache_root.path orelse (src_builder.cache_root.join(src_builder.allocator, &.{"."}) catch @panic("OOM")); for (0..gen.up) |_| { - if (mem.eql(u8, file_path, cache_root_path)) { + if (mem.eql(u8, file_path.sub_path, cache_root_path)) { // If we hit the cache root and there's still more to go, // the script attempted to go too far. dumpBadDirnameHelp(gen.file.step, asking_step, @@ -2370,7 +2386,7 @@ pub const LazyPath = union(enum) { // path is absolute. // dirname will return null only if we're at root. // Typically, we'll stop well before that at the cache root. - file_path = fs.path.dirname(file_path) orelse { + file_path.sub_path = fs.path.dirname(file_path.sub_path) orelse { dumpBadDirnameHelp(gen.file.step, asking_step, \\dirname() reached root. \\No more directories left to go up. @@ -2381,9 +2397,12 @@ pub const LazyPath = union(enum) { } } - return src_builder.pathResolve(&.{ file_path, gen.sub_path }); + return file_path.join(src_builder.allocator, gen.sub_path) catch @panic("OOM"); + }, + .dependency => |dep| return .{ + .root_dir = dep.dependency.builder.build_root, + .sub_path = dep.sub_path, }, - .dependency => |dep| return dep.dependency.builder.pathFromRoot(dep.sub_path), } } From 7bccef3e4e4d48c2e1d34ec28754d8c33902f2bb Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Tue, 9 Jul 2024 23:15:22 -0700 Subject: [PATCH 125/152] std.Build.Watch: introduce special file "." to watch entire dir And use it to implement InstallDir Step watch integration. I'm not seeing any events triggered when I run `mkdir` in the watched directory, however, and I have not yet figured out why. --- lib/std/Build/Step.zig | 41 +++++++++++++++++++++++++++++-- lib/std/Build/Step/InstallDir.zig | 26 ++++++++++++-------- lib/std/Build/Watch.zig | 29 +++++++++++++++------- 3 files changed, 75 insertions(+), 21 deletions(-) diff --git a/lib/std/Build/Step.zig b/lib/std/Build/Step.zig index e41912d54843..a2640492ba8d 100644 --- a/lib/std/Build/Step.zig +++ b/lib/std/Build/Step.zig @@ -160,6 +160,7 @@ pub const Inputs = struct { }; pub const Table = std.ArrayHashMapUnmanaged(Build.Cache.Path, Files, Build.Cache.Path.TableAdapter, false); + /// The special file name "." means any changes inside the directory. pub const Files = std.ArrayListUnmanaged([]const u8); pub fn populated(inputs: *Inputs) bool { @@ -611,8 +612,9 @@ pub fn clearWatchInputs(step: *Step) void { step.inputs.clear(gpa); } -pub fn addWatchInput(step: *Step, lazy_path: Build.LazyPath) Allocator.Error!void { - switch (lazy_path) { +/// Places a *file* dependency on the path. +pub fn addWatchInput(step: *Step, lazy_file: Build.LazyPath) Allocator.Error!void { + switch (lazy_file) { .src_path => |src_path| try addWatchInputFromBuilder(step, src_path.owner, src_path.sub_path), .dependency => |d| try addWatchInputFromBuilder(step, d.dependency.builder, d.sub_path), .cwd_relative => |path_string| { @@ -629,6 +631,34 @@ pub fn addWatchInput(step: *Step, lazy_path: Build.LazyPath) Allocator.Error!voi } } +/// Any changes inside the directory will trigger invalidation. +/// +/// See also `addDirectoryWatchInputFromPath` which takes a `Build.Cache.Path` instead. +pub fn addDirectoryWatchInput(step: *Step, lazy_directory: Build.LazyPath) Allocator.Error!void { + switch (lazy_directory) { + .src_path => |src_path| try addDirectoryWatchInputFromBuilder(step, src_path.owner, src_path.sub_path), + .dependency => |d| try addDirectoryWatchInputFromBuilder(step, d.dependency.builder, d.sub_path), + .cwd_relative => |path_string| { + try addDirectoryWatchInputFromPath(step, .{ + .root_dir = .{ + .path = null, + .handle = std.fs.cwd(), + }, + .sub_path = path_string, + }); + }, + // Nothing to watch because this dependency edge is modeled instead via `dependants`. + .generated => {}, + } +} + +/// Any changes inside the directory will trigger invalidation. +/// +/// See also `addDirectoryWatchInput` which takes a `Build.LazyPath` instead. +pub fn addDirectoryWatchInputFromPath(step: *Step, path: Build.Cache.Path) !void { + return addWatchInputFromPath(step, path, "."); +} + fn addWatchInputFromBuilder(step: *Step, builder: *Build, sub_path: []const u8) !void { return addWatchInputFromPath(step, .{ .root_dir = builder.build_root, @@ -636,6 +666,13 @@ fn addWatchInputFromBuilder(step: *Step, builder: *Build, sub_path: []const u8) }, std.fs.path.basename(sub_path)); } +fn addDirectoryWatchInputFromBuilder(step: *Step, builder: *Build, sub_path: []const u8) !void { + return addDirectoryWatchInputFromPath(step, .{ + .root_dir = builder.build_root, + .sub_path = sub_path, + }); +} + fn addWatchInputFromPath(step: *Step, path: Build.Cache.Path, basename: []const u8) !void { const gpa = step.owner.allocator; const gop = try step.inputs.table.getOrPut(gpa, path); diff --git a/lib/std/Build/Step/InstallDir.zig b/lib/std/Build/Step/InstallDir.zig index ca38e09ec199..9cd71e7828db 100644 --- a/lib/std/Build/Step/InstallDir.zig +++ b/lib/std/Build/Step/InstallDir.zig @@ -59,12 +59,14 @@ fn make(step: *Step, prog_node: std.Progress.Node) !void { _ = prog_node; const b = step.owner; const install_dir: *InstallDir = @fieldParentPtr("step", step); + step.clearWatchInputs(); const arena = b.allocator; const dest_prefix = b.getInstallPath(install_dir.options.install_dir, install_dir.options.install_subdir); - const src_dir_path = install_dir.options.source_dir.getPath2(b, step); - var src_dir = b.build_root.handle.openDir(src_dir_path, .{ .iterate = true }) catch |err| { - return step.fail("unable to open source directory '{}{s}': {s}", .{ - b.build_root, src_dir_path, @errorName(err), + const src_dir_path = install_dir.options.source_dir.getPath3(b, step); + try step.addDirectoryWatchInput(install_dir.options.source_dir); + var src_dir = src_dir_path.root_dir.handle.openDir(src_dir_path.subPathOpt() orelse ".", .{ .iterate = true }) catch |err| { + return step.fail("unable to open source directory '{}': {s}", .{ + src_dir_path, @errorName(err), }); }; defer src_dir.close(); @@ -88,12 +90,16 @@ fn make(step: *Step, prog_node: std.Progress.Node) !void { } // relative to src build root - const src_sub_path = b.pathJoin(&.{ src_dir_path, entry.path }); + const src_sub_path = try src_dir_path.join(arena, entry.path); const dest_path = b.pathJoin(&.{ dest_prefix, entry.path }); const cwd = fs.cwd(); switch (entry.kind) { - .directory => try cwd.makePath(dest_path), + .directory => { + const subdir_path = try src_dir_path.join(arena, entry.path); + try step.addDirectoryWatchInputFromPath(subdir_path); + try cwd.makePath(dest_path); + }, .file => { for (install_dir.options.blank_extensions) |ext| { if (mem.endsWith(u8, entry.path, ext)) { @@ -103,14 +109,14 @@ fn make(step: *Step, prog_node: std.Progress.Node) !void { } const prev_status = fs.Dir.updateFile( - b.build_root.handle, - src_sub_path, + src_sub_path.root_dir.handle, + src_sub_path.sub_path, cwd, dest_path, .{}, ) catch |err| { - return step.fail("unable to update file from '{}{s}' to '{s}': {s}", .{ - b.build_root, src_sub_path, dest_path, @errorName(err), + return step.fail("unable to update file from '{}' to '{s}': {s}", .{ + src_sub_path, dest_path, @errorName(err), }); }; all_cached = all_cached and prev_status == .fresh; diff --git a/lib/std/Build/Watch.zig b/lib/std/Build/Watch.zig index 2ffcd27ca7a8..e9b4449748b6 100644 --- a/lib/std/Build/Watch.zig +++ b/lib/std/Build/Watch.zig @@ -12,10 +12,13 @@ generation: Generation, pub const fan_mask: std.os.linux.fanotify.MarkMask = .{ .CLOSE_WRITE = true, + .CREATE = true, .DELETE = true, + .DELETE_SELF = true, + .EVENT_ON_CHILD = true, .MOVED_FROM = true, .MOVED_TO = true, - .EVENT_ON_CHILD = true, + .MOVE_SELF = true, }; pub const init: Watch = .{ @@ -32,6 +35,7 @@ pub const init: Watch = .{ const DirTable = std.ArrayHashMapUnmanaged(Cache.Path, void, Cache.Path.TableAdapter, false); const HandleTable = std.ArrayHashMapUnmanaged(LinuxFileHandle, ReactionSet, LinuxFileHandle.Adapter, false); +/// Special key of "." means any changes in this directory trigger the steps. const ReactionSet = std.StringArrayHashMapUnmanaged(StepSet); const StepSet = std.AutoArrayHashMapUnmanaged(*Step, Generation); @@ -149,14 +153,10 @@ pub fn markDirtySteps(w: *Watch, gpa: Allocator) !bool { const file_name = std.mem.span(file_name_z); const lfh: Watch.LinuxFileHandle = .{ .handle = file_handle }; if (w.handle_table.getPtr(lfh)) |reaction_set| { - if (reaction_set.getPtr(file_name)) |step_set| { - for (step_set.keys()) |step| { - if (step.state != .precheck_done) { - step.recursiveReset(gpa); - any_dirty = true; - } - } - } + if (reaction_set.getPtr(".")) |glob_set| + any_dirty = markStepSetDirty(gpa, glob_set, any_dirty); + if (reaction_set.getPtr(file_name)) |step_set| + any_dirty = markStepSetDirty(gpa, step_set, any_dirty); } }, else => |t| std.log.warn("unexpected fanotify event '{s}'", .{@tagName(t)}), @@ -187,3 +187,14 @@ fn markAllFilesDirty(w: *Watch, gpa: Allocator) void { } } } + +fn markStepSetDirty(gpa: Allocator, step_set: *StepSet, any_dirty: bool) bool { + var this_any_dirty = false; + for (step_set.keys()) |step| { + if (step.state != .precheck_done) { + step.recursiveReset(gpa); + this_any_dirty = true; + } + } + return any_dirty or this_any_dirty; +} From 2ebf021061b8789226fa8bafe36f7827925d7022 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Tue, 9 Jul 2024 23:34:08 -0700 Subject: [PATCH 126/152] build runner: don't pass a dirfd + null to fanotify_mark Otherwise it reports EBADF. --- lib/compiler/build_runner.zig | 4 ++-- lib/std/Build/Cache/Path.zig | 4 ++++ 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/lib/compiler/build_runner.zig b/lib/compiler/build_runner.zig index 990eb1b4ba1e..b11ca783cc64 100644 --- a/lib/compiler/build_runner.zig +++ b/lib/compiler/build_runner.zig @@ -419,7 +419,7 @@ pub fn main() !void { std.posix.fanotify_mark(w.fan_fd, .{ .ADD = true, .ONLYDIR = true, - }, Watch.fan_mask, path.root_dir.handle.fd, path.subPathOpt()) catch |err| { + }, Watch.fan_mask, path.root_dir.handle.fd, path.subPathOrDot()) catch |err| { fatal("unable to watch {}: {s}", .{ path, @errorName(err) }); }; @@ -471,7 +471,7 @@ pub fn main() !void { try std.posix.fanotify_mark(w.fan_fd, .{ .REMOVE = true, .ONLYDIR = true, - }, Watch.fan_mask, path.root_dir.handle.fd, path.subPathOpt()); + }, Watch.fan_mask, path.root_dir.handle.fd, path.subPathOrDot()); w.dir_table.swapRemoveAt(i); w.handle_table.swapRemoveAt(i); diff --git a/lib/std/Build/Cache/Path.zig b/lib/std/Build/Cache/Path.zig index f6e469c254ad..c7160ea14350 100644 --- a/lib/std/Build/Cache/Path.zig +++ b/lib/std/Build/Cache/Path.zig @@ -173,6 +173,10 @@ pub fn subPathOpt(self: Path) ?[]const u8 { return if (self.sub_path.len == 0) null else self.sub_path; } +pub fn subPathOrDot(self: Path) []const u8 { + return if (self.sub_path.len == 0) "." else self.sub_path; +} + /// Useful to make `Path` a key in `std.ArrayHashMap`. pub const TableAdapter = struct { pub const Hash = std.hash.Wyhash; From b6ed833083e5174d7b616de7c9205596d1aa0a9f Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 10 Jul 2024 00:41:35 -0700 Subject: [PATCH 127/152] build runner: ignore ENOENT of fanotify_mark REMOVE This happens when deleting watched directories and is harmless. --- lib/compiler/build_runner.zig | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/lib/compiler/build_runner.zig b/lib/compiler/build_runner.zig index b11ca783cc64..b3bdd8804d93 100644 --- a/lib/compiler/build_runner.zig +++ b/lib/compiler/build_runner.zig @@ -468,10 +468,13 @@ pub fn main() !void { const path = w.dir_table.keys()[i]; - try std.posix.fanotify_mark(w.fan_fd, .{ + std.posix.fanotify_mark(w.fan_fd, .{ .REMOVE = true, .ONLYDIR = true, - }, Watch.fan_mask, path.root_dir.handle.fd, path.subPathOrDot()); + }, Watch.fan_mask, path.root_dir.handle.fd, path.subPathOrDot()) catch |err| switch (err) { + error.FileNotFound => {}, // Expected, harmless. + else => |e| std.log.warn("unable to unwatch '{}': {s}", .{ path, @errorName(e) }), + }; w.dir_table.swapRemoveAt(i); w.handle_table.swapRemoveAt(i); From 61d2234743e192bbee1d05c1d231427c3338d2af Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 10 Jul 2024 00:42:16 -0700 Subject: [PATCH 128/152] std.Build.Watch: add ONDIR to fanotify event mask This makes mkdir/rmdir events show up. --- lib/std/Build/Watch.zig | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/std/Build/Watch.zig b/lib/std/Build/Watch.zig index e9b4449748b6..e4bef7ca4ef6 100644 --- a/lib/std/Build/Watch.zig +++ b/lib/std/Build/Watch.zig @@ -19,6 +19,7 @@ pub const fan_mask: std.os.linux.fanotify.MarkMask = .{ .MOVED_FROM = true, .MOVED_TO = true, .MOVE_SELF = true, + .ONDIR = true, }; pub const init: Watch = .{ From 5c3fae3a329bed39f780827b4c5bfc494dee1c6b Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 10 Jul 2024 00:42:41 -0700 Subject: [PATCH 129/152] td.Build.Step.InstallDir: leave hint for wrong cached status Since I spent a couple minutes debugging this, hopefully this saves someone some future trouble doing the same. --- lib/std/Build/Step/InstallDir.zig | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/std/Build/Step/InstallDir.zig b/lib/std/Build/Step/InstallDir.zig index 9cd71e7828db..8cb06641ec5f 100644 --- a/lib/std/Build/Step/InstallDir.zig +++ b/lib/std/Build/Step/InstallDir.zig @@ -99,6 +99,7 @@ fn make(step: *Step, prog_node: std.Progress.Node) !void { const subdir_path = try src_dir_path.join(arena, entry.path); try step.addDirectoryWatchInputFromPath(subdir_path); try cwd.makePath(dest_path); + // TODO: set result_cached=false if the directory did not already exist. }, .file => { for (install_dir.options.blank_extensions) |ext| { From f2856403c6997ff1317c968abed0871df9586c7c Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 10 Jul 2024 15:08:23 -0700 Subject: [PATCH 130/152] introduce std.Build.Cache.Manifest.addFilePath and deprecate `addFile`. Part of an effort to move towards using `std.Build.Cache.Path` abstraction in more places, which makes it easier to avoid absolute paths and path resolution. --- lib/std/Build/Cache.zig | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/lib/std/Build/Cache.zig b/lib/std/Build/Cache.zig index 6d43361ae457..e78353fa455b 100644 --- a/lib/std/Build/Cache.zig +++ b/lib/std/Build/Cache.zig @@ -354,6 +354,19 @@ pub const Manifest = struct { /// ``` /// var file_contents = cache_hash.files.keys()[file_index].contents.?; /// ``` + pub fn addFilePath(m: *Manifest, file_path: Path, max_file_size: ?usize) !usize { + const gpa = m.cache.gpa; + try m.files.ensureUnusedCapacity(gpa, 1); + const resolved_path = try fs.path.resolve(gpa, &.{ + file_path.root_dir.path orelse ".", + file_path.subPathOrDot(), + }); + errdefer gpa.free(resolved_path); + const prefixed_path = try m.cache.findPrefixResolved(resolved_path); + return addFileInner(m, prefixed_path, max_file_size); + } + + /// Deprecated; use `addFilePath`. pub fn addFile(self: *Manifest, file_path: []const u8, max_file_size: ?usize) !usize { assert(self.manifest_file == null); @@ -362,6 +375,10 @@ pub const Manifest = struct { const prefixed_path = try self.cache.findPrefix(file_path); errdefer gpa.free(prefixed_path.sub_path); + return addFileInner(self, prefixed_path, max_file_size); + } + + fn addFileInner(self: *Manifest, prefixed_path: PrefixedPath, max_file_size: ?usize) !usize { const gop = self.files.getOrPutAssumeCapacityAdapted(prefixed_path, FilesAdapter{}); if (gop.found_existing) { gop.key_ptr.updateMaxSize(max_file_size); From a966eee090d55c7d61484333af675c80115bf188 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 10 Jul 2024 15:09:46 -0700 Subject: [PATCH 131/152] std.Build.Step.WriteFile: fix handling of directories and add file system watching integration. `addDirectoryWatchInput` now returns a `bool` which helps remind the caller to 1. call addDirectoryWatchInputFromPath on any derived paths 2. but only if the dependency is not already captured by a step dependency edge. The make function now recursively walks all directories and adds the found files to the cache hash rather than incorrectly only adding the directory name to the cache hash. closes #20571 --- lib/std/Build/Step.zig | 14 +++- lib/std/Build/Step/InstallDir.zig | 7 +- lib/std/Build/Step/WriteFile.zig | 117 ++++++++++++++++++++---------- 3 files changed, 95 insertions(+), 43 deletions(-) diff --git a/lib/std/Build/Step.zig b/lib/std/Build/Step.zig index a2640492ba8d..5c77bd3367b6 100644 --- a/lib/std/Build/Step.zig +++ b/lib/std/Build/Step.zig @@ -634,7 +634,11 @@ pub fn addWatchInput(step: *Step, lazy_file: Build.LazyPath) Allocator.Error!voi /// Any changes inside the directory will trigger invalidation. /// /// See also `addDirectoryWatchInputFromPath` which takes a `Build.Cache.Path` instead. -pub fn addDirectoryWatchInput(step: *Step, lazy_directory: Build.LazyPath) Allocator.Error!void { +/// +/// Paths derived from this directory should also be manually added via +/// `addDirectoryWatchInputFromPath` if and only if this function returns +/// `true`. +pub fn addDirectoryWatchInput(step: *Step, lazy_directory: Build.LazyPath) Allocator.Error!bool { switch (lazy_directory) { .src_path => |src_path| try addDirectoryWatchInputFromBuilder(step, src_path.owner, src_path.sub_path), .dependency => |d| try addDirectoryWatchInputFromBuilder(step, d.dependency.builder, d.sub_path), @@ -648,13 +652,19 @@ pub fn addDirectoryWatchInput(step: *Step, lazy_directory: Build.LazyPath) Alloc }); }, // Nothing to watch because this dependency edge is modeled instead via `dependants`. - .generated => {}, + .generated => return false, } + return true; } /// Any changes inside the directory will trigger invalidation. /// /// See also `addDirectoryWatchInput` which takes a `Build.LazyPath` instead. +/// +/// This function should only be called when it has been verified that the +/// dependency on `path` is not already accounted for by a `Step` dependency. +/// In other words, before calling this function, first check that the +/// `Build.LazyPath` which this `path` is derived from is not `generated`. pub fn addDirectoryWatchInputFromPath(step: *Step, path: Build.Cache.Path) !void { return addWatchInputFromPath(step, path, "."); } diff --git a/lib/std/Build/Step/InstallDir.zig b/lib/std/Build/Step/InstallDir.zig index 8cb06641ec5f..78281e56d9df 100644 --- a/lib/std/Build/Step/InstallDir.zig +++ b/lib/std/Build/Step/InstallDir.zig @@ -63,8 +63,8 @@ fn make(step: *Step, prog_node: std.Progress.Node) !void { const arena = b.allocator; const dest_prefix = b.getInstallPath(install_dir.options.install_dir, install_dir.options.install_subdir); const src_dir_path = install_dir.options.source_dir.getPath3(b, step); - try step.addDirectoryWatchInput(install_dir.options.source_dir); - var src_dir = src_dir_path.root_dir.handle.openDir(src_dir_path.subPathOpt() orelse ".", .{ .iterate = true }) catch |err| { + const need_derived_inputs = try step.addDirectoryWatchInput(install_dir.options.source_dir); + var src_dir = src_dir_path.root_dir.handle.openDir(src_dir_path.subPathOrDot(), .{ .iterate = true }) catch |err| { return step.fail("unable to open source directory '{}': {s}", .{ src_dir_path, @errorName(err), }); @@ -96,8 +96,7 @@ fn make(step: *Step, prog_node: std.Progress.Node) !void { switch (entry.kind) { .directory => { - const subdir_path = try src_dir_path.join(arena, entry.path); - try step.addDirectoryWatchInputFromPath(subdir_path); + if (need_derived_inputs) try step.addDirectoryWatchInputFromPath(src_sub_path); try cwd.makePath(dest_path); // TODO: set result_cached=false if the directory did not already exist. }, diff --git a/lib/std/Build/Step/WriteFile.zig b/lib/std/Build/Step/WriteFile.zig index f35bf09b7e83..6c0770e45866 100644 --- a/lib/std/Build/Step/WriteFile.zig +++ b/lib/std/Build/Step/WriteFile.zig @@ -40,6 +40,22 @@ pub const Directory = struct { .include_extensions = if (opts.include_extensions) |incs| b.dupeStrings(incs) else null, }; } + + pub fn pathIncluded(opts: Options, path: []const u8) bool { + for (opts.exclude_extensions) |ext| { + if (std.mem.endsWith(u8, path, ext)) + return false; + } + if (opts.include_extensions) |incs| { + for (incs) |inc| { + if (std.mem.endsWith(u8, path, inc)) + return true; + } else { + return false; + } + } + return true; + } }; }; @@ -158,7 +174,10 @@ fn maybeUpdateName(write_file: *WriteFile) void { fn make(step: *Step, prog_node: std.Progress.Node) !void { _ = prog_node; const b = step.owner; + const arena = b.allocator; + const gpa = arena; const write_file: *WriteFile = @fieldParentPtr("step", step); + step.clearWatchInputs(); // The cache is used here not really as a way to speed things up - because writing // the data to a file would probably be very fast - but as a way to find a canonical @@ -173,29 +192,67 @@ fn make(step: *Step, prog_node: std.Progress.Node) !void { // Random bytes to make WriteFile unique. Refresh this with // new random bytes when WriteFile implementation is modified // in a non-backwards-compatible way. - man.hash.add(@as(u32, 0xd767ee59)); + man.hash.add(@as(u32, 0xc2a287d0)); for (write_file.files.items) |file| { man.hash.addBytes(file.sub_path); + switch (file.contents) { .bytes => |bytes| { man.hash.addBytes(bytes); }, - .copy => |file_source| { - _ = try man.addFile(file_source.getPath2(b, step), null); + .copy => |lazy_path| { + const path = lazy_path.getPath3(b, step); + _ = try man.addFilePath(path, null); + try step.addWatchInput(lazy_path); }, } } - for (write_file.directories.items) |dir| { - man.hash.addBytes(dir.source.getPath2(b, step)); + + const open_dir_cache = try arena.alloc(fs.Dir, write_file.directories.items.len); + var open_dirs_count: usize = 0; + defer closeDirs(open_dir_cache[0..open_dirs_count]); + + for (write_file.directories.items, open_dir_cache) |dir, *open_dir_cache_elem| { man.hash.addBytes(dir.sub_path); for (dir.options.exclude_extensions) |ext| man.hash.addBytes(ext); if (dir.options.include_extensions) |incs| for (incs) |inc| man.hash.addBytes(inc); + + const need_derived_inputs = try step.addDirectoryWatchInput(dir.source); + const src_dir_path = dir.source.getPath3(b, step); + + var src_dir = src_dir_path.root_dir.handle.openDir(src_dir_path.subPathOrDot(), .{ .iterate = true }) catch |err| { + return step.fail("unable to open source directory '{}': {s}", .{ + src_dir_path, @errorName(err), + }); + }; + open_dir_cache_elem.* = src_dir; + open_dirs_count += 1; + + var it = try src_dir.walk(gpa); + defer it.deinit(); + while (try it.next()) |entry| { + if (!dir.options.pathIncluded(entry.path)) continue; + + switch (entry.kind) { + .directory => { + if (need_derived_inputs) { + const entry_path = try src_dir_path.join(arena, entry.path); + try step.addDirectoryWatchInputFromPath(entry_path); + } + }, + .file => { + const entry_path = try src_dir_path.join(arena, entry.path); + _ = try man.addFilePath(entry_path, null); + }, + else => continue, + } + } } if (try step.cacheHit(&man)) { const digest = man.final(); - write_file.generated_directory.path = try b.cache_root.join(b.allocator, &.{ "o", &digest }); + write_file.generated_directory.path = try b.cache_root.join(arena, &.{ "o", &digest }); step.result_cached = true; return; } @@ -203,7 +260,7 @@ fn make(step: *Step, prog_node: std.Progress.Node) !void { const digest = man.final(); const cache_path = "o" ++ fs.path.sep_str ++ digest; - write_file.generated_directory.path = try b.cache_root.join(b.allocator, &.{ "o", &digest }); + write_file.generated_directory.path = try b.cache_root.join(arena, &.{ "o", &digest }); var cache_dir = b.cache_root.handle.makeOpenPath(cache_path, .{}) catch |err| { return step.fail("unable to make path '{}{s}': {s}", .{ @@ -256,8 +313,9 @@ fn make(step: *Step, prog_node: std.Progress.Node) !void { }, } } - for (write_file.directories.items) |dir| { - const full_src_dir_path = dir.source.getPath2(b, step); + + for (write_file.directories.items, open_dir_cache) |dir, already_open_dir| { + const src_dir_path = dir.source.getPath3(b, step); const dest_dirname = dir.sub_path; if (dest_dirname.len != 0) { @@ -268,44 +326,25 @@ fn make(step: *Step, prog_node: std.Progress.Node) !void { }; } - var src_dir = b.build_root.handle.openDir(full_src_dir_path, .{ .iterate = true }) catch |err| { - return step.fail("unable to open source directory '{s}': {s}", .{ - full_src_dir_path, @errorName(err), - }); - }; - defer src_dir.close(); + var it = try already_open_dir.walk(gpa); + defer it.deinit(); + while (try it.next()) |entry| { + if (!dir.options.pathIncluded(entry.path)) continue; - var it = try src_dir.walk(b.allocator); - next_entry: while (try it.next()) |entry| { - for (dir.options.exclude_extensions) |ext| { - if (std.mem.endsWith(u8, entry.path, ext)) continue :next_entry; - } - if (dir.options.include_extensions) |incs| { - for (incs) |inc| { - if (std.mem.endsWith(u8, entry.path, inc)) break; - } else { - continue :next_entry; - } - } - const full_src_entry_path = b.pathJoin(&.{ full_src_dir_path, entry.path }); + const src_entry_path = try src_dir_path.join(arena, entry.path); const dest_path = b.pathJoin(&.{ dest_dirname, entry.path }); switch (entry.kind) { .directory => try cache_dir.makePath(dest_path), .file => { const prev_status = fs.Dir.updateFile( - cwd, - full_src_entry_path, + src_entry_path.root_dir.handle, + src_entry_path.sub_path, cache_dir, dest_path, .{}, ) catch |err| { - return step.fail("unable to update file from '{s}' to '{}{s}{c}{s}': {s}", .{ - full_src_entry_path, - b.cache_root, - cache_path, - fs.path.sep, - dest_path, - @errorName(err), + return step.fail("unable to update file from '{}' to '{}{s}{c}{s}': {s}", .{ + src_entry_path, b.cache_root, cache_path, fs.path.sep, dest_path, @errorName(err), }); }; _ = prev_status; @@ -317,3 +356,7 @@ fn make(step: *Step, prog_node: std.Progress.Node) !void { try step.writeManifest(&man); } + +fn closeDirs(dirs: []fs.Dir) void { + for (dirs) |*d| d.close(); +} From 6fcb1897d263130b5ff7a25dd12f027bddd75b2f Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 10 Jul 2024 17:11:53 -0700 Subject: [PATCH 132/152] std.Build.Step.WriteFile: remove random bytes from cache hash The cache hash already has the zig version in there, so it's not really needed. --- lib/std/Build/Step/WriteFile.zig | 5 ----- 1 file changed, 5 deletions(-) diff --git a/lib/std/Build/Step/WriteFile.zig b/lib/std/Build/Step/WriteFile.zig index 6c0770e45866..c1488a23d2ed 100644 --- a/lib/std/Build/Step/WriteFile.zig +++ b/lib/std/Build/Step/WriteFile.zig @@ -189,11 +189,6 @@ fn make(step: *Step, prog_node: std.Progress.Node) !void { var man = b.graph.cache.obtain(); defer man.deinit(); - // Random bytes to make WriteFile unique. Refresh this with - // new random bytes when WriteFile implementation is modified - // in a non-backwards-compatible way. - man.hash.add(@as(u32, 0xc2a287d0)); - for (write_file.files.items) |file| { man.hash.addBytes(file.sub_path); From 2e429697865ebcadc001a7d167ef964b3f1393a2 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 10 Jul 2024 17:14:54 -0700 Subject: [PATCH 133/152] std.Build.Step.Run: integrate with --watch --- lib/std/Build/Step.zig | 40 ++++++++++++++++++++++++++++++++++++++ lib/std/Build/Step/Run.zig | 8 ++++---- 2 files changed, 44 insertions(+), 4 deletions(-) diff --git a/lib/std/Build/Step.zig b/lib/std/Build/Step.zig index 5c77bd3367b6..13cd47981bb2 100644 --- a/lib/std/Build/Step.zig +++ b/lib/std/Build/Step.zig @@ -582,11 +582,26 @@ pub fn allocPrintCmd2( return buf.toOwnedSlice(arena); } +/// Prefer `cacheHitAndWatch` unless you already added watch inputs +/// separately from using the cache system. pub fn cacheHit(s: *Step, man: *Build.Cache.Manifest) !bool { s.result_cached = man.hit() catch |err| return failWithCacheError(s, man, err); return s.result_cached; } +/// Clears previous watch inputs, if any, and then populates watch inputs from +/// the full set of files picked up by the cache manifest. +/// +/// Must be accompanied with `writeManifestAndWatch`. +pub fn cacheHitAndWatch(s: *Step, man: *Build.Cache.Manifest) !bool { + const is_hit = man.hit() catch |err| return failWithCacheError(s, man, err); + s.result_cached = is_hit; + // The above call to hit() populates the manifest with files, so in case of + // a hit, we need to populate watch inputs. + if (is_hit) try setWatchInputsFromManifest(s, man); + return is_hit; +} + fn failWithCacheError(s: *Step, man: *const Build.Cache.Manifest, err: anyerror) anyerror { const i = man.failed_file_index orelse return err; const pp = man.files.keys()[i].prefixed_path; @@ -594,6 +609,8 @@ fn failWithCacheError(s: *Step, man: *const Build.Cache.Manifest, err: anyerror) return s.fail("{s}: {s}/{s}", .{ @errorName(err), prefix, pp.sub_path }); } +/// Prefer `writeManifestAndWatch` unless you already added watch inputs +/// separately from using the cache system. pub fn writeManifest(s: *Step, man: *Build.Cache.Manifest) !void { if (s.test_results.isSuccess()) { man.writeManifest() catch |err| { @@ -602,6 +619,29 @@ pub fn writeManifest(s: *Step, man: *Build.Cache.Manifest) !void { } } +/// Clears previous watch inputs, if any, and then populates watch inputs from +/// the full set of files picked up by the cache manifest. +/// +/// Must be accompanied with `cacheHitAndWatch`. +pub fn writeManifestAndWatch(s: *Step, man: *Build.Cache.Manifest) !void { + try writeManifest(s, man); + try setWatchInputsFromManifest(s, man); +} + +fn setWatchInputsFromManifest(s: *Step, man: *Build.Cache.Manifest) !void { + const arena = s.owner.allocator; + const prefixes = man.cache.prefixes(); + clearWatchInputs(s); + for (man.files.keys()) |file| { + // The file path data is freed when the cache manifest is cleaned up at the end of `make`. + const sub_path = try arena.dupe(u8, file.prefixed_path.sub_path); + try addWatchInputFromPath(s, .{ + .root_dir = prefixes[file.prefixed_path.prefix], + .sub_path = std.fs.path.dirname(sub_path) orelse "", + }, std.fs.path.basename(sub_path)); + } +} + /// For steps that have a single input that never changes when re-running `make`. pub fn singleUnchangingWatchInput(step: *Step, lazy_path: Build.LazyPath) Allocator.Error!void { if (!step.inputs.populated()) try step.addWatchInput(lazy_path); diff --git a/lib/std/Build/Step/Run.zig b/lib/std/Build/Step/Run.zig index 69d6b393fd41..b1e7060f3e95 100644 --- a/lib/std/Build/Step/Run.zig +++ b/lib/std/Build/Step/Run.zig @@ -615,7 +615,7 @@ fn make(step: *Step, prog_node: std.Progress.Node) !void { // On Windows we don't have rpaths so we have to add .dll search paths to PATH run.addPathForDynLibs(artifact); } - const file_path = artifact.installed_path orelse artifact.generated_bin.?.path.?; // the path is guaranteed to be set + const file_path = artifact.installed_path orelse artifact.generated_bin.?.path.?; try argv_list.append(file_path); @@ -665,7 +665,7 @@ fn make(step: *Step, prog_node: std.Progress.Node) !void { _ = try man.addFile(lazy_path.getPath2(b, step), null); } - if (!has_side_effects and try step.cacheHit(&man)) { + if (!has_side_effects and try step.cacheHitAndWatch(&man)) { // cache hit, skip running command const digest = man.final(); @@ -719,7 +719,7 @@ fn make(step: *Step, prog_node: std.Progress.Node) !void { } try runCommand(run, argv_list.items, has_side_effects, output_dir_path, prog_node); - if (!has_side_effects) try step.writeManifest(&man); + if (!has_side_effects) try step.writeManifestAndWatch(&man); return; }; @@ -795,7 +795,7 @@ fn make(step: *Step, prog_node: std.Progress.Node) !void { }; } - if (!has_side_effects) try step.writeManifest(&man); + if (!has_side_effects) try step.writeManifestAndWatch(&man); try populateGeneratedPaths( arena, From dad07fb6f33d9024dc6528ca3de37715fbc13182 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 10 Jul 2024 17:27:30 -0700 Subject: [PATCH 134/152] std.Build.Cache.Path: fix hash impl on windows --- lib/std/Build/Cache/Path.zig | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/lib/std/Build/Cache/Path.zig b/lib/std/Build/Cache/Path.zig index c7160ea14350..0abe79d373d0 100644 --- a/lib/std/Build/Cache/Path.zig +++ b/lib/std/Build/Cache/Path.zig @@ -183,7 +183,11 @@ pub const TableAdapter = struct { pub fn hash(self: TableAdapter, a: Cache.Path) u32 { _ = self; - const seed: u32 = @bitCast(a.root_dir.handle.fd); + const seed = switch (@typeInfo(@TypeOf(a.root_dir.handle.fd))) { + .Pointer => @intFromPtr(a.root_dir.handle.fd), + .Int => @as(u32, @bitCast(a.root_dir.handle.fd)), + else => @compileError("unimplemented hash function"), + }; return @truncate(Hash.hash(seed, a.sub_path)); } pub fn eql(self: TableAdapter, a: Cache.Path, b: Cache.Path, b_index: usize) bool { From 768cb7e406fe26711167cbb2d67efdc10f830fdc Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Thu, 11 Jul 2024 16:23:49 -0700 Subject: [PATCH 135/152] objcopy: use the fatal helper method --- lib/compiler/objcopy.zig | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/lib/compiler/objcopy.zig b/lib/compiler/objcopy.zig index ff69c4b28750..f3360c8108ea 100644 --- a/lib/compiler/objcopy.zig +++ b/lib/compiler/objcopy.zig @@ -198,20 +198,14 @@ fn cmdObjCopy( return std.process.cleanExit(); }, .update => { - if (seen_update) { - std.debug.print("zig objcopy only supports 1 update for now\n", .{}); - std.process.exit(1); - } + if (seen_update) fatal("zig objcopy only supports 1 update for now", .{}); seen_update = true; try server.serveEmitBinPath(output, .{ .flags = .{ .cache_hit = false }, }); }, - else => { - std.debug.print("unsupported message: {s}", .{@tagName(hdr.tag)}); - std.process.exit(1); - }, + else => fatal("unsupported message: {s}", .{@tagName(hdr.tag)}), } } } From 818f9cb5a07bec1273d3fc1ecb54e7189a9e6106 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Thu, 11 Jul 2024 16:24:48 -0700 Subject: [PATCH 136/152] std.Build.Step.ObjCopy: remove random bytes from cache hash The cache hash already has the zig version in there, so it's not really needed. --- lib/std/Build/Step/ObjCopy.zig | 4 ---- 1 file changed, 4 deletions(-) diff --git a/lib/std/Build/Step/ObjCopy.zig b/lib/std/Build/Step/ObjCopy.zig index 908341aefb47..d314550f601d 100644 --- a/lib/std/Build/Step/ObjCopy.zig +++ b/lib/std/Build/Step/ObjCopy.zig @@ -98,10 +98,6 @@ fn make(step: *Step, prog_node: std.Progress.Node) !void { var man = b.graph.cache.obtain(); defer man.deinit(); - // Random bytes to make ObjCopy unique. Refresh this with new random - // bytes when ObjCopy implementation is modified incompatibly. - man.hash.add(@as(u32, 0xe18b7baf)); - const full_src_path = objcopy.input_file.getPath2(b, step); _ = try man.addFile(full_src_path, null); man.hash.addOptionalBytes(objcopy.only_section); From fd4d366009e92c79137ee681334f216bbfc9b5f5 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Thu, 11 Jul 2024 16:25:21 -0700 Subject: [PATCH 137/152] std.Build.Cache.Path: fix the format method This function previously wrote a trailing directory separator, but that's not correct if the path refers to a file. --- lib/std/Build/Cache/Path.zig | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/lib/std/Build/Cache/Path.zig b/lib/std/Build/Cache/Path.zig index 0abe79d373d0..b81786d0a8b6 100644 --- a/lib/std/Build/Cache/Path.zig +++ b/lib/std/Build/Cache/Path.zig @@ -157,12 +157,17 @@ pub fn format( } if (self.root_dir.path) |p| { try writer.writeAll(p); - try writer.writeAll(fs.path.sep_str); + if (self.sub_path.len > 0) { + try writer.writeAll(fs.path.sep_str); + try writer.writeAll(self.sub_path); + } + return; } if (self.sub_path.len > 0) { try writer.writeAll(self.sub_path); - try writer.writeAll(fs.path.sep_str); + return; } + try writer.writeByte('.'); } pub fn eql(self: Path, other: Path) bool { From a3c20dffaed77727494d34f7b4b03c0d10771270 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Thu, 11 Jul 2024 16:26:04 -0700 Subject: [PATCH 138/152] integrate Compile steps with file watching Updates the build runner to unconditionally require a zig lib directory parameter. This parameter is needed in order to correctly understand file system inputs from zig compiler subprocesses, since they will refer to "the zig lib directory", and the build runner needs to place file system watches on directories in there. The build runner's fanotify file watching implementation now accounts for when two or more Cache.Path instances compare unequal but ultimately refer to the same directory in the file system. Breaking change: std.Build no longer has a zig_lib_dir field. Instead, there is the Graph zig_lib_directory field, and individual Compile steps can still have their zig lib directories overridden. I think this is unlikely to break anyone's build in practice. The compiler now sends a "file_system_inputs" message to the build runner which shares the full set of files that were added to the cache system with the build system, so that the build runner can watch properly and redo the Compile step. This is implemented for whole cache mode but not yet for incremental cache mode. --- build.zig | 9 +++--- lib/compiler/build_runner.zig | 55 +++++++++++++++++++---------------- lib/std/Build.zig | 16 +++++----- lib/std/Build/Cache.zig | 16 ++++++++++ lib/std/Build/Step.zig | 38 ++++++++++++++++++++++++ lib/std/zig/Server.zig | 16 +++++++++- src/Compilation.zig | 14 +++++++++ src/main.zig | 32 +++++++++++++++++--- 8 files changed, 152 insertions(+), 44 deletions(-) diff --git a/build.zig b/build.zig index a364982ce993..d234c68c8eea 100644 --- a/build.zig +++ b/build.zig @@ -1261,7 +1261,9 @@ fn generateLangRef(b: *std.Build) std.Build.LazyPath { }); var dir = b.build_root.handle.openDir("doc/langref", .{ .iterate = true }) catch |err| { - std.debug.panic("unable to open 'doc/langref' directory: {s}", .{@errorName(err)}); + std.debug.panic("unable to open '{}doc/langref' directory: {s}", .{ + b.build_root, @errorName(err), + }); }; defer dir.close(); @@ -1280,10 +1282,7 @@ fn generateLangRef(b: *std.Build) std.Build.LazyPath { // in a temporary directory "--cache-root", b.cache_root.path orelse ".", }); - if (b.zig_lib_dir) |p| { - cmd.addArg("--zig-lib-dir"); - cmd.addDirectoryArg(p); - } + cmd.addArgs(&.{ "--zig-lib-dir", b.fmt("{}", .{b.graph.zig_lib_directory}) }); cmd.addArgs(&.{"-i"}); cmd.addFileArg(b.path(b.fmt("doc/langref/{s}", .{entry.name}))); diff --git a/lib/compiler/build_runner.zig b/lib/compiler/build_runner.zig index b3bdd8804d93..384c16438046 100644 --- a/lib/compiler/build_runner.zig +++ b/lib/compiler/build_runner.zig @@ -31,21 +31,15 @@ pub fn main() !void { // skip my own exe name var arg_idx: usize = 1; - const zig_exe = nextArg(args, &arg_idx) orelse { - std.debug.print("Expected path to zig compiler\n", .{}); - return error.InvalidArgs; - }; - const build_root = nextArg(args, &arg_idx) orelse { - std.debug.print("Expected build root directory path\n", .{}); - return error.InvalidArgs; - }; - const cache_root = nextArg(args, &arg_idx) orelse { - std.debug.print("Expected cache root directory path\n", .{}); - return error.InvalidArgs; - }; - const global_cache_root = nextArg(args, &arg_idx) orelse { - std.debug.print("Expected global cache root directory path\n", .{}); - return error.InvalidArgs; + const zig_exe = nextArg(args, &arg_idx) orelse fatal("missing zig compiler path", .{}); + const zig_lib_dir = nextArg(args, &arg_idx) orelse fatal("missing zig lib directory path", .{}); + const build_root = nextArg(args, &arg_idx) orelse fatal("missing build root directory path", .{}); + const cache_root = nextArg(args, &arg_idx) orelse fatal("missing cache root directory path", .{}); + const global_cache_root = nextArg(args, &arg_idx) orelse fatal("missing global cache root directory path", .{}); + + const zig_lib_directory: std.Build.Cache.Directory = .{ + .path = zig_lib_dir, + .handle = try std.fs.cwd().openDir(zig_lib_dir, .{}), }; const build_root_directory: std.Build.Cache.Directory = .{ @@ -72,6 +66,7 @@ pub fn main() !void { .zig_exe = zig_exe, .env_map = try process.getEnvMap(arena), .global_cache_root = global_cache_directory, + .zig_lib_directory = zig_lib_directory, .host = .{ .query = .{}, .result = try std.zig.system.resolveTargetQuery(.{}), @@ -189,8 +184,6 @@ pub fn main() !void { arg, next_arg, }); }; - } else if (mem.eql(u8, arg, "--zig-lib-dir")) { - builder.zig_lib_dir = .{ .cwd_relative = nextArgOrFatal(args, &arg_idx) }; } else if (mem.eql(u8, arg, "--seed")) { const next_arg = nextArg(args, &arg_idx) orelse fatalWithHint("expected u32 after '{s}'", .{arg}); @@ -416,15 +409,27 @@ pub fn main() !void { const reaction_set = rs: { const gop = try w.dir_table.getOrPut(gpa, path); if (!gop.found_existing) { - std.posix.fanotify_mark(w.fan_fd, .{ - .ADD = true, - .ONLYDIR = true, - }, Watch.fan_mask, path.root_dir.handle.fd, path.subPathOrDot()) catch |err| { - fatal("unable to watch {}: {s}", .{ path, @errorName(err) }); - }; - const dir_handle = try Watch.getDirHandle(gpa, path); - try w.handle_table.putNoClobber(gpa, dir_handle, .{}); + // `dir_handle` may already be present in the table in + // the case that we have multiple Cache.Path instances + // that compare inequal but ultimately point to the same + // directory on the file system. + // In such case, we must revert adding this directory, but keep + // the additions to the step set. + const dh_gop = try w.handle_table.getOrPut(gpa, dir_handle); + if (dh_gop.found_existing) { + _ = w.dir_table.pop(); + } else { + assert(dh_gop.index == gop.index); + dh_gop.value_ptr.* = .{}; + std.posix.fanotify_mark(w.fan_fd, .{ + .ADD = true, + .ONLYDIR = true, + }, Watch.fan_mask, path.root_dir.handle.fd, path.subPathOrDot()) catch |err| { + fatal("unable to watch {}: {s}", .{ path, @errorName(err) }); + }; + } + break :rs dh_gop.value_ptr; } break :rs &w.handle_table.values()[gop.index]; }; diff --git a/lib/std/Build.zig b/lib/std/Build.zig index 556ed89e8d5a..36f7396c8eb6 100644 --- a/lib/std/Build.zig +++ b/lib/std/Build.zig @@ -54,7 +54,6 @@ libc_file: ?[]const u8 = null, /// Path to the directory containing build.zig. build_root: Cache.Directory, cache_root: Cache.Directory, -zig_lib_dir: ?LazyPath, pkg_config_pkg_list: ?(PkgConfigError![]const PkgConfigPkg) = null, args: ?[]const []const u8 = null, debug_log_scopes: []const []const u8 = &.{}, @@ -117,6 +116,7 @@ pub const Graph = struct { zig_exe: [:0]const u8, env_map: EnvMap, global_cache_root: Cache.Directory, + zig_lib_directory: Cache.Directory, needed_lazy_dependencies: std.StringArrayHashMapUnmanaged(void) = .{}, /// Information about the native target. Computed before build() is invoked. host: ResolvedTarget, @@ -293,7 +293,6 @@ pub fn create( }), .description = "Remove build artifacts from prefix path", }, - .zig_lib_dir = null, .install_path = undefined, .args = null, .host = graph.host, @@ -379,7 +378,6 @@ fn createChildOnly( .libc_file = parent.libc_file, .build_root = build_root, .cache_root = parent.cache_root, - .zig_lib_dir = parent.zig_lib_dir, .debug_log_scopes = parent.debug_log_scopes, .debug_compile_errors = parent.debug_compile_errors, .debug_pkg_config = parent.debug_pkg_config, @@ -687,7 +685,7 @@ pub fn addExecutable(b: *Build, options: ExecutableOptions) *Step.Compile { .max_rss = options.max_rss, .use_llvm = options.use_llvm, .use_lld = options.use_lld, - .zig_lib_dir = options.zig_lib_dir orelse b.zig_lib_dir, + .zig_lib_dir = options.zig_lib_dir, .win32_manifest = options.win32_manifest, }); } @@ -735,7 +733,7 @@ pub fn addObject(b: *Build, options: ObjectOptions) *Step.Compile { .max_rss = options.max_rss, .use_llvm = options.use_llvm, .use_lld = options.use_lld, - .zig_lib_dir = options.zig_lib_dir orelse b.zig_lib_dir, + .zig_lib_dir = options.zig_lib_dir, }); } @@ -791,7 +789,7 @@ pub fn addSharedLibrary(b: *Build, options: SharedLibraryOptions) *Step.Compile .max_rss = options.max_rss, .use_llvm = options.use_llvm, .use_lld = options.use_lld, - .zig_lib_dir = options.zig_lib_dir orelse b.zig_lib_dir, + .zig_lib_dir = options.zig_lib_dir, .win32_manifest = options.win32_manifest, }); } @@ -842,7 +840,7 @@ pub fn addStaticLibrary(b: *Build, options: StaticLibraryOptions) *Step.Compile .max_rss = options.max_rss, .use_llvm = options.use_llvm, .use_lld = options.use_lld, - .zig_lib_dir = options.zig_lib_dir orelse b.zig_lib_dir, + .zig_lib_dir = options.zig_lib_dir, }); } @@ -905,7 +903,7 @@ pub fn addTest(b: *Build, options: TestOptions) *Step.Compile { .test_runner = options.test_runner, .use_llvm = options.use_llvm, .use_lld = options.use_lld, - .zig_lib_dir = options.zig_lib_dir orelse b.zig_lib_dir, + .zig_lib_dir = options.zig_lib_dir, }); } @@ -929,7 +927,7 @@ pub fn addAssembly(b: *Build, options: AssemblyOptions) *Step.Compile { .optimize = options.optimize, }, .max_rss = options.max_rss, - .zig_lib_dir = options.zig_lib_dir orelse b.zig_lib_dir, + .zig_lib_dir = options.zig_lib_dir, }); obj_step.addAssemblyFile(options.source_file); return obj_step; diff --git a/lib/std/Build/Cache.zig b/lib/std/Build/Cache.zig index e78353fa455b..8ba2f9f12899 100644 --- a/lib/std/Build/Cache.zig +++ b/lib/std/Build/Cache.zig @@ -1007,6 +1007,22 @@ pub const Manifest = struct { } self.files.deinit(self.cache.gpa); } + + pub fn populateFileSystemInputs(man: *Manifest, buf: *std.ArrayListUnmanaged(u8)) Allocator.Error!void { + assert(@typeInfo(std.zig.Server.Message.PathPrefix).Enum.fields.len == man.cache.prefixes_len); + const gpa = man.cache.gpa; + const files = man.files.keys(); + if (files.len > 0) { + for (files) |file| { + try buf.ensureUnusedCapacity(gpa, file.prefixed_path.sub_path.len + 2); + buf.appendAssumeCapacity(file.prefixed_path.prefix + 1); + buf.appendSliceAssumeCapacity(file.prefixed_path.sub_path); + buf.appendAssumeCapacity(0); + } + // The null byte is a separator, not a terminator. + buf.items.len -= 1; + } + } }; /// On operating systems that support symlinks, does a readlink. On other operating systems, diff --git a/lib/std/Build/Step.zig b/lib/std/Build/Step.zig index 13cd47981bb2..3c6cd660fff1 100644 --- a/lib/std/Build/Step.zig +++ b/lib/std/Build/Step.zig @@ -435,6 +435,44 @@ pub fn evalZigProcess( s.result_cached = ebp_hdr.flags.cache_hit; result = try arena.dupe(u8, body[@sizeOf(EbpHdr)..]); }, + .file_system_inputs => { + s.clearWatchInputs(); + var it = std.mem.splitScalar(u8, body, 0); + while (it.next()) |prefixed_path| { + const prefix_index: std.zig.Server.Message.PathPrefix = @enumFromInt(prefixed_path[0] - 1); + const sub_path = try arena.dupe(u8, prefixed_path[1..]); + const sub_path_dirname = std.fs.path.dirname(sub_path) orelse ""; + switch (prefix_index) { + .cwd => { + const path: Build.Cache.Path = .{ + .root_dir = Build.Cache.Directory.cwd(), + .sub_path = sub_path_dirname, + }; + try addWatchInputFromPath(s, path, std.fs.path.basename(sub_path)); + }, + .zig_lib => zl: { + if (s.cast(Step.Compile)) |compile| { + if (compile.zig_lib_dir) |lp| { + try addWatchInput(s, lp); + break :zl; + } + } + const path: Build.Cache.Path = .{ + .root_dir = s.owner.graph.zig_lib_directory, + .sub_path = sub_path_dirname, + }; + try addWatchInputFromPath(s, path, std.fs.path.basename(sub_path)); + }, + .local_cache => { + const path: Build.Cache.Path = .{ + .root_dir = b.cache_root, + .sub_path = sub_path_dirname, + }; + try addWatchInputFromPath(s, path, std.fs.path.basename(sub_path)); + }, + } + } + }, else => {}, // ignore other messages } diff --git a/lib/std/zig/Server.zig b/lib/std/zig/Server.zig index 7f8de00b4aeb..f1d3bc7b61d3 100644 --- a/lib/std/zig/Server.zig +++ b/lib/std/zig/Server.zig @@ -20,10 +20,24 @@ pub const Message = struct { test_metadata, /// Body is a TestResults test_results, + /// Body is a series of strings, delimited by null bytes. + /// Each string is a prefixed file path. + /// The first byte indicates the file prefix path (see prefixes fields + /// of Cache). This byte is sent over the wire incremented so that null + /// bytes are not confused with string terminators. + /// The remaining bytes is the file path relative to that prefix. + /// The prefixes are hard-coded in Compilation.create (cwd, zig lib dir, local cache dir) + file_system_inputs, _, }; + pub const PathPrefix = enum(u8) { + cwd, + zig_lib, + local_cache, + }; + /// Trailing: /// * extra: [extra_len]u32, /// * string_bytes: [string_bytes_len]u8, @@ -58,7 +72,7 @@ pub const Message = struct { }; /// Trailing: - /// * the file system path the emitted binary can be found + /// * file system path where the emitted binary can be found pub const EmitBinPath = extern struct { flags: Flags, diff --git a/src/Compilation.zig b/src/Compilation.zig index cc5fd1a9eba2..49d4b041ae8b 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -235,6 +235,8 @@ astgen_wait_group: WaitGroup = .{}, llvm_opt_bisect_limit: c_int, +file_system_inputs: ?*std.ArrayListUnmanaged(u8), + pub const Emit = struct { /// Where the output will go. directory: Directory, @@ -1157,6 +1159,9 @@ pub const CreateOptions = struct { error_limit: ?Zcu.ErrorInt = null, global_cc_argv: []const []const u8 = &.{}, + /// Tracks all files that can cause the Compilation to be invalidated and need a rebuild. + file_system_inputs: ?*std.ArrayListUnmanaged(u8) = null, + pub const Entry = link.File.OpenOptions.Entry; }; @@ -1332,6 +1337,7 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil .gpa = gpa, .manifest_dir = try options.local_cache_directory.handle.makeOpenPath("h", .{}), }; + // These correspond to std.zig.Server.Message.PathPrefix. cache.addPrefix(.{ .path = null, .handle = std.fs.cwd() }); cache.addPrefix(options.zig_lib_directory); cache.addPrefix(options.local_cache_directory); @@ -1508,6 +1514,7 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil .force_undefined_symbols = options.force_undefined_symbols, .link_eh_frame_hdr = link_eh_frame_hdr, .global_cc_argv = options.global_cc_argv, + .file_system_inputs = options.file_system_inputs, }; // Prevent some footguns by making the "any" fields of config reflect @@ -2044,6 +2051,8 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void { ); }; if (is_hit) { + if (comp.file_system_inputs) |buf| try man.populateFileSystemInputs(buf); + comp.last_update_was_cache_hit = true; log.debug("CacheMode.whole cache hit for {s}", .{comp.root_name}); const digest = man.final(); @@ -2170,6 +2179,11 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void { try comp.performAllTheWork(main_progress_node); + switch (comp.cache_use) { + .whole => if (comp.file_system_inputs) |buf| try man.populateFileSystemInputs(buf), + .incremental => {}, + } + if (comp.module) |zcu| { const pt: Zcu.PerThread = .{ .zcu = zcu, .tid = .main }; diff --git a/src/main.zig b/src/main.zig index e00442f399e5..2fb49b74bcfa 100644 --- a/src/main.zig +++ b/src/main.zig @@ -3227,6 +3227,9 @@ fn buildOutputType( process.raiseFileDescriptorLimit(); + var file_system_inputs: std.ArrayListUnmanaged(u8) = .{}; + defer file_system_inputs.deinit(gpa); + const comp = Compilation.create(gpa, arena, .{ .zig_lib_directory = zig_lib_directory, .local_cache_directory = local_cache_directory, @@ -3350,6 +3353,7 @@ fn buildOutputType( // than to any particular module. This feature can greatly reduce CLI // noise when --search-prefix and --mod are combined. .global_cc_argv = try cc_argv.toOwnedSlice(arena), + .file_system_inputs = &file_system_inputs, }) catch |err| switch (err) { error.LibCUnavailable => { const triple_name = try target.zigTriple(arena); @@ -3433,7 +3437,7 @@ fn buildOutputType( defer root_prog_node.end(); if (arg_mode == .translate_c) { - return cmdTranslateC(comp, arena, null, root_prog_node); + return cmdTranslateC(comp, arena, null, null, root_prog_node); } updateModule(comp, color, root_prog_node) catch |err| switch (err) { @@ -4059,6 +4063,7 @@ fn serve( var child_pid: ?std.process.Child.Id = null; const main_progress_node = std.Progress.start(.{}); + const file_system_inputs = comp.file_system_inputs.?; while (true) { const hdr = try server.receiveMessage(); @@ -4067,14 +4072,16 @@ fn serve( .exit => return cleanExit(), .update => { tracy.frameMark(); + file_system_inputs.clearRetainingCapacity(); if (arg_mode == .translate_c) { var arena_instance = std.heap.ArenaAllocator.init(gpa); defer arena_instance.deinit(); const arena = arena_instance.allocator(); var output: Compilation.CImportResult = undefined; - try cmdTranslateC(comp, arena, &output, main_progress_node); + try cmdTranslateC(comp, arena, &output, file_system_inputs, main_progress_node); defer output.deinit(gpa); + try server.serveStringMessage(.file_system_inputs, file_system_inputs.items); if (output.errors.errorMessageCount() != 0) { try server.serveErrorBundle(output.errors); } else { @@ -4116,6 +4123,7 @@ fn serve( }, .hot_update => { tracy.frameMark(); + file_system_inputs.clearRetainingCapacity(); if (child_pid) |pid| { try comp.hotCodeSwap(main_progress_node, pid); try serveUpdateResults(&server, comp); @@ -4147,6 +4155,12 @@ fn serve( fn serveUpdateResults(s: *Server, comp: *Compilation) !void { const gpa = comp.gpa; + + if (comp.file_system_inputs) |file_system_inputs| { + assert(file_system_inputs.items.len > 0); + try s.serveStringMessage(.file_system_inputs, file_system_inputs.items); + } + var error_bundle = try comp.getAllErrorsAlloc(); defer error_bundle.deinit(gpa); if (error_bundle.errorMessageCount() > 0) { @@ -4434,6 +4448,7 @@ fn cmdTranslateC( comp: *Compilation, arena: Allocator, fancy_output: ?*Compilation.CImportResult, + file_system_inputs: ?*std.ArrayListUnmanaged(u8), prog_node: std.Progress.Node, ) !void { if (build_options.only_core_functionality) @panic("@translate-c is not available in a zig2.c build"); @@ -4454,7 +4469,10 @@ fn cmdTranslateC( }; if (fancy_output) |p| p.cache_hit = true; - const digest = if (try man.hit()) man.final() else digest: { + const digest = if (try man.hit()) digest: { + if (file_system_inputs) |buf| try man.populateFileSystemInputs(buf); + break :digest man.final(); + } else digest: { if (fancy_output) |p| p.cache_hit = false; var argv = std.ArrayList([]const u8).init(arena); switch (comp.config.c_frontend) { @@ -4566,6 +4584,8 @@ fn cmdTranslateC( @errorName(err), }); + if (file_system_inputs) |buf| try man.populateFileSystemInputs(buf); + break :digest digest; }; @@ -4678,6 +4698,9 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !void { const self_exe_path = try introspect.findZigExePath(arena); try child_argv.append(self_exe_path); + const argv_index_zig_lib_dir = child_argv.items.len; + _ = try child_argv.addOne(); + const argv_index_build_file = child_argv.items.len; _ = try child_argv.addOne(); @@ -4727,7 +4750,6 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !void { if (i + 1 >= args.len) fatal("expected argument after '{s}'", .{arg}); i += 1; override_lib_dir = args[i]; - try child_argv.appendSlice(&.{ arg, args[i] }); continue; } else if (mem.eql(u8, arg, "--build-runner")) { if (i + 1 >= args.len) fatal("expected argument after '{s}'", .{arg}); @@ -4865,6 +4887,8 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !void { defer zig_lib_directory.handle.close(); const cwd_path = try process.getCwdAlloc(arena); + child_argv.items[argv_index_zig_lib_dir] = zig_lib_directory.path orelse cwd_path; + const build_root = try findBuildRoot(arena, .{ .cwd_path = cwd_path, .build_file = build_file, From 5a34e6c3e608e1f526bababd3a2a146f6216d045 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Thu, 11 Jul 2024 18:28:05 -0700 Subject: [PATCH 139/152] frontend: add file system inputs for incremental cache mode These are also used for whole cache mode in the case that any compile errors are emitted. --- lib/std/Build/Cache.zig | 1 + src/Compilation.zig | 67 +++++++++++++++++++++++++++++++++++++---- src/Zcu.zig | 2 +- src/Zcu/PerThread.zig | 2 +- 4 files changed, 64 insertions(+), 8 deletions(-) diff --git a/lib/std/Build/Cache.zig b/lib/std/Build/Cache.zig index 8ba2f9f12899..fe2a6b0a830d 100644 --- a/lib/std/Build/Cache.zig +++ b/lib/std/Build/Cache.zig @@ -1010,6 +1010,7 @@ pub const Manifest = struct { pub fn populateFileSystemInputs(man: *Manifest, buf: *std.ArrayListUnmanaged(u8)) Allocator.Error!void { assert(@typeInfo(std.zig.Server.Message.PathPrefix).Enum.fields.len == man.cache.prefixes_len); + buf.clearRetainingCapacity(); const gpa = man.cache.gpa; const files = man.files.keys(); if (files.len > 0) { diff --git a/src/Compilation.zig b/src/Compilation.zig index 49d4b041ae8b..a0cf2378dad5 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -2051,6 +2051,7 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void { ); }; if (is_hit) { + // In this case the cache hit contains the full set of file system inputs. Nice! if (comp.file_system_inputs) |buf| try man.populateFileSystemInputs(buf); comp.last_update_was_cache_hit = true; @@ -2112,12 +2113,24 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void { .incremental => {}, } + // From this point we add a preliminary set of file system inputs that + // affects both incremental and whole cache mode. For incremental cache + // mode, the long-lived compiler state will track additional file system + // inputs discovered after this point. For whole cache mode, we rely on + // these inputs to make it past AstGen, and once there, we can rely on + // learning file system inputs from the Cache object. + // For compiling C objects, we rely on the cache hash system to avoid duplicating work. // Add a Job for each C object. try comp.c_object_work_queue.ensureUnusedCapacity(comp.c_object_table.count()); for (comp.c_object_table.keys()) |key| { comp.c_object_work_queue.writeItemAssumeCapacity(key); } + if (comp.file_system_inputs) |fsi| { + for (comp.c_object_table.keys()) |c_object| { + try comp.appendFileSystemInput(fsi, c_object.src.owner.root, c_object.src.src_path); + } + } // For compiling Win32 resources, we rely on the cache hash system to avoid duplicating work. // Add a Job for each Win32 resource file. @@ -2126,6 +2139,12 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void { for (comp.win32_resource_table.keys()) |key| { comp.win32_resource_work_queue.writeItemAssumeCapacity(key); } + if (comp.file_system_inputs) |fsi| { + for (comp.win32_resource_table.keys()) |win32_resource| switch (win32_resource.src) { + .rc => |f| try comp.appendFileSystemInput(fsi, f.owner.root, f.src_path), + .manifest => continue, + }; + } } if (comp.module) |zcu| { @@ -2160,12 +2179,24 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void { if (zcu.fileByIndex(file_index).mod.isBuiltin()) continue; comp.astgen_work_queue.writeItemAssumeCapacity(file_index); } + if (comp.file_system_inputs) |fsi| { + for (zcu.import_table.values()) |file| { + try comp.appendFileSystemInput(fsi, file.mod.root, file.sub_file_path); + } + } // Put a work item in for checking if any files used with `@embedFile` changed. try comp.embed_file_work_queue.ensureUnusedCapacity(zcu.embed_table.count()); for (zcu.embed_table.values()) |embed_file| { comp.embed_file_work_queue.writeItemAssumeCapacity(embed_file); } + if (comp.file_system_inputs) |fsi| { + const ip = &zcu.intern_pool; + for (zcu.embed_table.values()) |embed_file| { + const sub_file_path = embed_file.sub_file_path.toSlice(ip); + try comp.appendFileSystemInput(fsi, embed_file.owner.root, sub_file_path); + } + } try comp.work_queue.writeItem(.{ .analyze_mod = std_mod }); if (comp.config.is_test) { @@ -2179,11 +2210,6 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void { try comp.performAllTheWork(main_progress_node); - switch (comp.cache_use) { - .whole => if (comp.file_system_inputs) |buf| try man.populateFileSystemInputs(buf), - .incremental => {}, - } - if (comp.module) |zcu| { const pt: Zcu.PerThread = .{ .zcu = zcu, .tid = .main }; @@ -2224,6 +2250,8 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void { switch (comp.cache_use) { .whole => |whole| { + if (comp.file_system_inputs) |buf| try man.populateFileSystemInputs(buf); + const digest = man.final(); // Rename the temporary directory into place. @@ -2311,6 +2339,30 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void { } } +fn appendFileSystemInput( + comp: *Compilation, + file_system_inputs: *std.ArrayListUnmanaged(u8), + root: Cache.Path, + sub_file_path: []const u8, +) Allocator.Error!void { + const gpa = comp.gpa; + const prefixes = comp.cache_parent.prefixes(); + try file_system_inputs.ensureUnusedCapacity(gpa, root.sub_path.len + sub_file_path.len + 3); + if (file_system_inputs.items.len > 0) file_system_inputs.appendAssumeCapacity(0); + for (prefixes, 1..) |prefix_directory, i| { + if (prefix_directory.eql(root.root_dir)) { + file_system_inputs.appendAssumeCapacity(@intCast(i)); + if (root.sub_path.len > 0) { + file_system_inputs.appendSliceAssumeCapacity(root.sub_path); + file_system_inputs.appendAssumeCapacity(std.fs.path.sep); + } + file_system_inputs.appendSliceAssumeCapacity(sub_file_path); + return; + } + } + std.debug.panic("missing prefix directory: {}, {s}", .{ root, sub_file_path }); +} + fn flush(comp: *Compilation, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) !void { if (comp.bin_file) |lf| { // This is needed before reading the error flags. @@ -4218,6 +4270,9 @@ fn workerAstGenFile( .token = item.data.token, } }) catch continue; } + if (res.is_new) if (comp.file_system_inputs) |fsi| { + comp.appendFileSystemInput(fsi, res.file.mod.root, res.file.sub_file_path) catch continue; + }; const imported_path_digest = pt.zcu.filePathDigest(res.file_index); const imported_root_decl = pt.zcu.fileRootDecl(res.file_index); break :blk .{ res, imported_path_digest, imported_root_decl }; @@ -4588,7 +4643,7 @@ fn reportRetryableEmbedFileError( const gpa = mod.gpa; const src_loc = embed_file.src_loc; const ip = &mod.intern_pool; - const err_msg = try Zcu.ErrorMsg.create(gpa, src_loc, "unable to load '{}{s}': {s}", .{ + const err_msg = try Zcu.ErrorMsg.create(gpa, src_loc, "unable to load '{}/{s}': {s}", .{ embed_file.owner.root, embed_file.sub_file_path.toSlice(ip), @errorName(err), diff --git a/src/Zcu.zig b/src/Zcu.zig index a9d80b4fdf04..fd1e2f9d612f 100644 --- a/src/Zcu.zig +++ b/src/Zcu.zig @@ -728,7 +728,7 @@ pub const File = struct { source_loaded: bool, tree_loaded: bool, zir_loaded: bool, - /// Relative to the owning package's root_src_dir. + /// Relative to the owning package's root source directory. /// Memory is stored in gpa, owned by File. sub_file_path: []const u8, /// Whether this is populated depends on `source_loaded`. diff --git a/src/Zcu/PerThread.zig b/src/Zcu/PerThread.zig index 4a1f257ddfe2..f6a47f626b13 100644 --- a/src/Zcu/PerThread.zig +++ b/src/Zcu/PerThread.zig @@ -2666,7 +2666,7 @@ pub fn reportRetryableAstGenError( }, }; - const err_msg = try Zcu.ErrorMsg.create(gpa, src_loc, "unable to load '{}{s}': {s}", .{ + const err_msg = try Zcu.ErrorMsg.create(gpa, src_loc, "unable to load '{}/{s}': {s}", .{ file.mod.root, file.sub_file_path, @errorName(err), }); errdefer err_msg.destroy(gpa); From 67e3e4989d5dc654ce5fef87d5a974caa05b9733 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 12 Jul 2024 00:18:29 -0700 Subject: [PATCH 140/152] Compilation: fix rebase conflict --- src/Compilation.zig | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/Compilation.zig b/src/Compilation.zig index a0cf2378dad5..94ce0ee26777 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -2180,7 +2180,8 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void { comp.astgen_work_queue.writeItemAssumeCapacity(file_index); } if (comp.file_system_inputs) |fsi| { - for (zcu.import_table.values()) |file| { + for (zcu.import_table.values()) |file_index| { + const file = zcu.fileByIndex(file_index); try comp.appendFileSystemInput(fsi, file.mod.root, file.sub_file_path); } } From 3ad81c40c01649551b4ad3d2c450d8b5f7934362 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Thu, 11 Jul 2024 21:36:34 -0400 Subject: [PATCH 141/152] Zcu: allow atomic operations on packed structs Same validation rules as the backing integer would have. --- src/Sema.zig | 4 +- src/Zcu.zig | 54 +++++++++---------- test/behavior/atomics.zig | 20 +++++++ .../atomics_with_invalid_type.zig | 18 +++++++ .../compile_errors/cmpxchg_with_float.zig | 10 ---- 5 files changed, 64 insertions(+), 42 deletions(-) create mode 100644 test/cases/compile_errors/atomics_with_invalid_type.zig delete mode 100644 test/cases/compile_errors/cmpxchg_with_float.zig diff --git a/src/Sema.zig b/src/Sema.zig index 1062ece2be5a..896d18d21f26 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -23950,7 +23950,7 @@ fn checkAtomicPtrOperand( error.BadType => return sema.fail( block, elem_ty_src, - "expected bool, integer, float, enum, or pointer type; found '{}'", + "expected bool, integer, float, enum, packed struct, or pointer type; found '{}'", .{elem_ty.fmt(pt)}, ), }; @@ -24279,7 +24279,7 @@ fn zirCmpxchg( return sema.fail( block, elem_ty_src, - "expected bool, integer, enum, or pointer type; found '{}'", + "expected bool, integer, enum, packed struct, or pointer type; found '{}'", .{elem_ty.fmt(pt)}, ); } diff --git a/src/Zcu.zig b/src/Zcu.zig index a9d80b4fdf04..5179225fc1fa 100644 --- a/src/Zcu.zig +++ b/src/Zcu.zig @@ -3305,37 +3305,31 @@ pub fn atomicPtrAlignment( .spirv => @panic("TODO what should this value be?"), }; - const int_ty = switch (ty.zigTypeTag(mod)) { - .Int => ty, - .Enum => ty.intTagType(mod), - .Float => { - const bit_count = ty.floatBits(target); - if (bit_count > max_atomic_bits) { - diags.* = .{ - .bits = bit_count, - .max_bits = max_atomic_bits, - }; - return error.FloatTooBig; - } - return .none; - }, - .Bool => return .none, - else => { - if (ty.isPtrAtRuntime(mod)) return .none; - return error.BadType; - }, - }; - - const bit_count = int_ty.intInfo(mod).bits; - if (bit_count > max_atomic_bits) { - diags.* = .{ - .bits = bit_count, - .max_bits = max_atomic_bits, - }; - return error.IntTooBig; + if (ty.toIntern() == .bool_type) return .none; + if (ty.isRuntimeFloat()) { + const bit_count = ty.floatBits(target); + if (bit_count > max_atomic_bits) { + diags.* = .{ + .bits = bit_count, + .max_bits = max_atomic_bits, + }; + return error.FloatTooBig; + } + return .none; + } + if (ty.isAbiInt(mod)) { + const bit_count = ty.intInfo(mod).bits; + if (bit_count > max_atomic_bits) { + diags.* = .{ + .bits = bit_count, + .max_bits = max_atomic_bits, + }; + return error.IntTooBig; + } + return .none; } - - return .none; + if (ty.isPtrAtRuntime(mod)) return .none; + return error.BadType; } pub fn declFileScope(mod: *Module, decl_index: Decl.Index) *File { diff --git a/test/behavior/atomics.zig b/test/behavior/atomics.zig index 830c8a951dab..87ccc72174b6 100644 --- a/test/behavior/atomics.zig +++ b/test/behavior/atomics.zig @@ -413,6 +413,14 @@ test "atomics with different types" { try testAtomicsWithType(u0, 0, 0); try testAtomicsWithType(i0, 0, 0); + + try testAtomicsWithType(enum(u32) { x = 1234, y = 5678 }, .x, .y); + + try testAtomicsWithPackedStruct( + packed struct { x: u7, y: u24, z: bool }, + .{ .x = 1, .y = 2, .z = true }, + .{ .x = 3, .y = 4, .z = false }, + ); } fn testAtomicsWithType(comptime T: type, a: T, b: T) !void { @@ -426,6 +434,18 @@ fn testAtomicsWithType(comptime T: type, a: T, b: T) !void { try expect(@cmpxchgStrong(T, &x, b, a, .seq_cst, .seq_cst).? == a); } +fn testAtomicsWithPackedStruct(comptime T: type, a: T, b: T) !void { + const BackingInt = @typeInfo(T).Struct.backing_integer.?; + var x: T = b; + @atomicStore(T, &x, a, .seq_cst); + try expect(@as(BackingInt, @bitCast(x)) == @as(BackingInt, @bitCast(a))); + try expect(@as(BackingInt, @bitCast(@atomicLoad(T, &x, .seq_cst))) == @as(BackingInt, @bitCast(a))); + try expect(@as(BackingInt, @bitCast(@atomicRmw(T, &x, .Xchg, b, .seq_cst))) == @as(BackingInt, @bitCast(a))); + try expect(@cmpxchgStrong(T, &x, b, a, .seq_cst, .seq_cst) == null); + if (@sizeOf(T) != 0) + try expect(@as(BackingInt, @bitCast(@cmpxchgStrong(T, &x, b, a, .seq_cst, .seq_cst).?)) == @as(BackingInt, @bitCast(a))); +} + test "return @atomicStore, using it as a void value" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO diff --git a/test/cases/compile_errors/atomics_with_invalid_type.zig b/test/cases/compile_errors/atomics_with_invalid_type.zig new file mode 100644 index 000000000000..321cda365566 --- /dev/null +++ b/test/cases/compile_errors/atomics_with_invalid_type.zig @@ -0,0 +1,18 @@ +export fn float() void { + var x: f32 = 0; + _ = @cmpxchgWeak(f32, &x, 1, 2, .seq_cst, .seq_cst); +} + +const NormalStruct = struct { x: u32 }; +export fn normalStruct() void { + var x: NormalStruct = 0; + _ = @cmpxchgWeak(NormalStruct, &x, .{ .x = 1 }, .{ .x = 2 }, .seq_cst, .seq_cst); +} + +// error +// backend=stage2 +// target=native +// +// :3:22: error: expected bool, integer, enum, packed struct, or pointer type; found 'f32' +// :8:27: error: expected type 'tmp.NormalStruct', found 'comptime_int' +// :6:22: note: struct declared here diff --git a/test/cases/compile_errors/cmpxchg_with_float.zig b/test/cases/compile_errors/cmpxchg_with_float.zig deleted file mode 100644 index 1c2b7b6393fd..000000000000 --- a/test/cases/compile_errors/cmpxchg_with_float.zig +++ /dev/null @@ -1,10 +0,0 @@ -export fn entry() void { - var x: f32 = 0; - _ = @cmpxchgWeak(f32, &x, 1, 2, .seq_cst, .seq_cst); -} - -// error -// backend=stage2 -// target=native -// -// :3:22: error: expected bool, integer, enum, or pointer type; found 'f32' From 2b99b0428500ef880bc52e9f814f970e5fa29a7d Mon Sep 17 00:00:00 2001 From: Tau Date: Thu, 11 Jul 2024 13:57:37 +0200 Subject: [PATCH 142/152] Fix right shift on negative BigInts Closes #17662. --- lib/std/math/big/int.zig | 29 +++++++++++++++++++++++------ lib/std/math/big/int_test.zig | 17 +++++++++++++++++ 2 files changed, 40 insertions(+), 6 deletions(-) diff --git a/lib/std/math/big/int.zig b/lib/std/math/big/int.zig index 1438d9c9901b..a7881a890515 100644 --- a/lib/std/math/big/int.zig +++ b/lib/std/math/big/int.zig @@ -1173,7 +1173,9 @@ pub const Mutable = struct { /// Asserts there is enough memory to fit the result. The upper bound Limb count is /// `a.limbs.len - (shift / (@sizeOf(Limb) * 8))`. pub fn shiftRight(r: *Mutable, a: Const, shift: usize) void { - if (a.limbs.len <= shift / limb_bits) { + const full_limbs_shifted_out = shift / limb_bits; + const remaining_bits_shifted_out = shift % limb_bits; + if (a.limbs.len <= full_limbs_shifted_out) { // Shifting negative numbers converges to -1 instead of 0 if (a.positive) { r.len = 1; @@ -1186,14 +1188,29 @@ pub const Mutable = struct { } return; } + const nonzero_negative_shiftout = if (a.positive) false else nonzero: { + for (a.limbs[0..full_limbs_shifted_out]) |x| { + if (x != 0) + break :nonzero true; + } + if (remaining_bits_shifted_out == 0) + break :nonzero false; + const not_covered: Log2Limb = @intCast(limb_bits - remaining_bits_shifted_out); + break :nonzero a.limbs[full_limbs_shifted_out] << not_covered != 0; + }; llshr(r.limbs[0..], a.limbs[0..a.limbs.len], shift); - r.normalize(a.limbs.len - (shift / limb_bits)); - r.positive = a.positive; - // Shifting negative numbers converges to -1 instead of 0 - if (!r.positive and r.len == 1 and r.limbs[0] == 0) { - r.limbs[0] = 1; + + r.len = a.limbs.len - full_limbs_shifted_out; + if (nonzero_negative_shiftout) { + if (full_limbs_shifted_out > 0) { + r.limbs[a.limbs.len - full_limbs_shifted_out] = 0; + r.len += 1; + } + r.addScalar(r.toConst(), -1); } + r.normalize(r.len); + r.positive = a.positive; } /// r = ~a under 2s complement wrapping semantics. diff --git a/lib/std/math/big/int_test.zig b/lib/std/math/big/int_test.zig index ecc177000290..f06917f0f2e5 100644 --- a/lib/std/math/big/int_test.zig +++ b/lib/std/math/big/int_test.zig @@ -2066,6 +2066,23 @@ test "shift-right negative" { defer arg3.deinit(); try a.shiftRight(&arg3, 1232); try testing.expect((try a.to(i32)) == -1); // -10 >> 1232 == -1 + + var arg4 = try Managed.initSet(testing.allocator, -5); + defer arg4.deinit(); + try a.shiftRight(&arg4, 2); + try testing.expect(try a.to(i32) == -2); // -5 >> 2 == -2 + + var arg5 = try Managed.initSet(testing.allocator, -0xffff0000eeee1111dddd2222cccc3333); + defer arg5.deinit(); + try a.shiftRight(&arg5, 67); + try testing.expect(try a.to(i64) == -0x1fffe0001dddc223); + + var arg6 = try Managed.initSet(testing.allocator, -0x1ffffffffffffffff); + defer arg6.deinit(); + try a.shiftRight(&arg6, 1); + try a.shiftRight(&a, 1); + a.setSign(true); + try testing.expect(try a.to(u64) == 0x8000000000000000); } test "sat shift-left simple unsigned" { From 3bf0d2e5168c1722656a5eaa56e4bf16272df6d5 Mon Sep 17 00:00:00 2001 From: YANG Xudong Date: Fri, 12 Jul 2024 15:47:32 +0800 Subject: [PATCH 143/152] std: Add loongarch support for coff. (#20583) * std: Add loongarch support for coff. See: https://learn.microsoft.com/en-us/windows/win32/debug/pe-format#machine-types * Update toCoffMachine. --- lib/std/Target.zig | 4 ++-- lib/std/coff.zig | 8 ++++++++ 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/lib/std/Target.zig b/lib/std/Target.zig index 5392ac228383..4ab7f8cf2497 100644 --- a/lib/std/Target.zig +++ b/lib/std/Target.zig @@ -1276,8 +1276,8 @@ pub const Cpu = struct { .spirv => .Unknown, .spirv32 => .Unknown, .spirv64 => .Unknown, - .loongarch32 => .Unknown, - .loongarch64 => .Unknown, + .loongarch32 => .LOONGARCH32, + .loongarch64 => .LOONGARCH64, }; } diff --git a/lib/std/coff.zig b/lib/std/coff.zig index 3f15352f436c..a41221b2731a 100644 --- a/lib/std/coff.zig +++ b/lib/std/coff.zig @@ -1002,6 +1002,10 @@ pub const MachineType = enum(u16) { I386 = 0x14c, /// Intel Itanium processor family IA64 = 0x200, + /// LoongArch32 + LOONGARCH32 = 0x6232, + /// LoongArch64 + LOONGARCH64 = 0x6264, /// Mitsubishi M32R little endian M32R = 0x9041, /// MIPS16 @@ -1047,6 +1051,8 @@ pub const MachineType = enum(u16) { .aarch64 => .ARM64, .riscv64 => .RISCV64, .x86_64 => .X64, + .loongarch32 => .LOONGARCH32, + .loongarch64 => .LOONGARCH64, // there's cases we don't (yet) handle else => unreachable, }; @@ -1062,6 +1068,8 @@ pub const MachineType = enum(u16) { .ARM64 => .aarch64, .RISCV64 => .riscv64, .X64 => .x86_64, + .LOONGARCH32 => .loongarch32, + .LOONGARCH64 => .loongarch64, // there's cases we don't (yet) handle else => null, }; From 0d79aa01768d600a19f7a7493afce417da7e3810 Mon Sep 17 00:00:00 2001 From: xtex Date: Fri, 12 Jul 2024 17:06:06 +0800 Subject: [PATCH 144/152] std.Build.Step.Run: support prefixed artifact args Just like how addPrefixedFileArg and addPrefixedDirectoryArg works, we can make it for artifacts. Signed-off-by: Bingwu Zhang --- lib/std/Build/Step/Run.zig | 27 ++++++++++++++++++++++----- 1 file changed, 22 insertions(+), 5 deletions(-) diff --git a/lib/std/Build/Step/Run.zig b/lib/std/Build/Step/Run.zig index 69d6b393fd41..a03f794cd073 100644 --- a/lib/std/Build/Step/Run.zig +++ b/lib/std/Build/Step/Run.zig @@ -126,7 +126,7 @@ pub const StdIo = union(enum) { }; pub const Arg = union(enum) { - artifact: *Step.Compile, + artifact: PrefixedArtifact, lazy_path: PrefixedLazyPath, directory_source: PrefixedLazyPath, bytes: []u8, @@ -134,6 +134,11 @@ pub const Arg = union(enum) { output_directory: *Output, }; +pub const PrefixedArtifact = struct { + prefix: []const u8, + artifact: *Step.Compile, +}; + pub const PrefixedLazyPath = struct { prefix: []const u8, lazy_path: std.Build.LazyPath, @@ -185,10 +190,20 @@ pub fn enableTestRunnerMode(run: *Run) void { } pub fn addArtifactArg(run: *Run, artifact: *Step.Compile) void { + run.addPrefixedArtifactArg("", artifact); +} + +pub fn addPrefixedArtifactArg(run: *Run, prefix: []const u8, artifact: *Step.Compile) void { const b = run.step.owner; + + const prefixed_artifact: PrefixedArtifact = .{ + .prefix = b.dupe(prefix), + .artifact = artifact, + }; + run.argv.append(b.allocator, .{ .artifact = prefixed_artifact }) catch @panic("OOM"); + const bin_file = artifact.getEmittedBin(); bin_file.addStepDependencies(&run.step); - run.argv.append(b.allocator, Arg{ .artifact = artifact }) catch @panic("OOM"); } /// Provides a file path as a command line argument to the command being run. @@ -610,14 +625,16 @@ fn make(step: *Step, prog_node: std.Progress.Node) !void { man.hash.addBytes(file.prefix); man.hash.addBytes(file_path); }, - .artifact => |artifact| { + .artifact => |pa| { + const artifact = pa.artifact; + if (artifact.rootModuleTarget().os.tag == .windows) { // On Windows we don't have rpaths so we have to add .dll search paths to PATH run.addPathForDynLibs(artifact); } const file_path = artifact.installed_path orelse artifact.generated_bin.?.path.?; // the path is guaranteed to be set - try argv_list.append(file_path); + try argv_list.append(b.fmt("{s}{s}", .{ pa.prefix, file_path })); _ = try man.addFile(file_path, null); }, @@ -912,7 +929,7 @@ fn runCommand( // work even for the edge case that the binary was produced by a // third party. const exe = switch (run.argv.items[0]) { - .artifact => |exe| exe, + .artifact => |exe| exe.artifact, else => break :interpret, }; switch (exe.kind) { From 4f9a8b68430b9b44cba78664bd0f60d5c5db5fe3 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 12 Jul 2024 11:00:52 -0700 Subject: [PATCH 145/152] update build system unit test need to add another field to initialize now --- lib/std/Build/Step/Options.zig | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/std/Build/Step/Options.zig b/lib/std/Build/Step/Options.zig index 9ce23e0802a6..b67acd408697 100644 --- a/lib/std/Build/Step/Options.zig +++ b/lib/std/Build/Step/Options.zig @@ -523,6 +523,7 @@ test Options { .query = .{}, .result = try std.zig.system.resolveTargetQuery(.{}), }, + .zig_lib_directory = std.Build.Cache.Directory.cwd(), }; var builder = try std.Build.create( From f77b43dad39a5140f3e39f32e98e9624368c16d6 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 12 Jul 2024 14:19:17 -0700 Subject: [PATCH 146/152] zig build: add a --debug-target CLI flag it's not advertised in the usage and only available in debug builds of the compiler. Makes it easier to test changes to the build runner that might affect targets differently. --- src/main.zig | 35 ++++++++++++++++++++++++++++++----- 1 file changed, 30 insertions(+), 5 deletions(-) diff --git a/src/main.zig b/src/main.zig index 2fb49b74bcfa..0d942ea6c453 100644 --- a/src/main.zig +++ b/src/main.zig @@ -4691,6 +4691,7 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !void { var verbose_llvm_cpu_features = false; var fetch_only = false; var system_pkg_dir_path: ?[]const u8 = null; + var debug_target: ?[]const u8 = null; const argv_index_exe = child_argv.items.len; _ = try child_argv.addOne(); @@ -4799,6 +4800,14 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !void { } else { warn("Zig was compiled without debug extensions. --debug-compile-errors has no effect.", .{}); } + } else if (mem.eql(u8, arg, "--debug-target")) { + if (i + 1 >= args.len) fatal("expected argument after '{s}'", .{arg}); + i += 1; + if (build_options.enable_debug_extensions) { + debug_target = args[i]; + } else { + warn("Zig was compiled without debug extensions. --debug-target has no effect.", .{}); + } } else if (mem.eql(u8, arg, "--verbose-link")) { verbose_link = true; } else if (mem.eql(u8, arg, "--verbose-cc")) { @@ -4857,11 +4866,27 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !void { }); defer root_prog_node.end(); - const target_query: std.Target.Query = .{}; - const resolved_target: Package.Module.ResolvedTarget = .{ - .result = std.zig.resolveTargetQueryOrFatal(target_query), - .is_native_os = true, - .is_native_abi = true, + // Normally the build runner is compiled for the host target but here is + // some code to help when debugging edits to the build runner so that you + // can make sure it compiles successfully on other targets. + const resolved_target: Package.Module.ResolvedTarget = t: { + if (build_options.enable_debug_extensions) { + if (debug_target) |triple| { + const target_query = try std.Target.Query.parse(.{ + .arch_os_abi = triple, + }); + break :t .{ + .result = std.zig.resolveTargetQueryOrFatal(target_query), + .is_native_os = false, + .is_native_abi = false, + }; + } + } + break :t .{ + .result = std.zig.resolveTargetQueryOrFatal(.{}), + .is_native_os = true, + .is_native_abi = true, + }; }; const exe_basename = try std.zig.binNameAlloc(arena, .{ From 5efcc2e9e7c84893b9e418ca82d8d2d4366dde7c Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 12 Jul 2024 14:20:20 -0700 Subject: [PATCH 147/152] build runner: refactor fs watch logic for OS abstraction Makes the build runner compile successfully for non-linux targets; printing an error if you ask for --watch rather than making build scripts fail to compile. --- lib/compiler/build_runner.zig | 151 +++--------- lib/std/Build/Watch.zig | 432 +++++++++++++++++++++++----------- 2 files changed, 328 insertions(+), 255 deletions(-) diff --git a/lib/compiler/build_runner.zig b/lib/compiler/build_runner.zig index 384c16438046..703571eb56a6 100644 --- a/lib/compiler/build_runner.zig +++ b/lib/compiler/build_runner.zig @@ -10,6 +10,7 @@ const File = std.fs.File; const Step = std.Build.Step; const Watch = std.Build.Watch; const Allocator = std.mem.Allocator; +const fatal = std.zig.fatal; pub const root = @import("@build"); pub const dependencies = @import("@dependencies"); @@ -371,18 +372,7 @@ pub fn main() !void { else => return err, }; - var w = Watch.init; - if (watch) { - w.fan_fd = try std.posix.fanotify_init(.{ - .CLASS = .NOTIF, - .CLOEXEC = true, - .NONBLOCK = true, - .REPORT_NAME = true, - .REPORT_DIR_FID = true, - .REPORT_FID = true, - .REPORT_TARGET_FID = true, - }, 0); - } + var w = if (watch) try Watch.init() else undefined; try run.thread_pool.init(thread_pool_options); defer run.thread_pool.deinit(); @@ -403,127 +393,53 @@ pub fn main() !void { }; if (!watch) return cleanExit(); - // Add missing marks and note persisted ones. - for (run.step_stack.keys()) |step| { - for (step.inputs.table.keys(), step.inputs.table.values()) |path, *files| { - const reaction_set = rs: { - const gop = try w.dir_table.getOrPut(gpa, path); - if (!gop.found_existing) { - const dir_handle = try Watch.getDirHandle(gpa, path); - // `dir_handle` may already be present in the table in - // the case that we have multiple Cache.Path instances - // that compare inequal but ultimately point to the same - // directory on the file system. - // In such case, we must revert adding this directory, but keep - // the additions to the step set. - const dh_gop = try w.handle_table.getOrPut(gpa, dir_handle); - if (dh_gop.found_existing) { - _ = w.dir_table.pop(); - } else { - assert(dh_gop.index == gop.index); - dh_gop.value_ptr.* = .{}; - std.posix.fanotify_mark(w.fan_fd, .{ - .ADD = true, - .ONLYDIR = true, - }, Watch.fan_mask, path.root_dir.handle.fd, path.subPathOrDot()) catch |err| { - fatal("unable to watch {}: {s}", .{ path, @errorName(err) }); - }; - } - break :rs dh_gop.value_ptr; - } - break :rs &w.handle_table.values()[gop.index]; - }; - for (files.items) |basename| { - const gop = try reaction_set.getOrPut(gpa, basename); - if (!gop.found_existing) gop.value_ptr.* = .{}; - try gop.value_ptr.put(gpa, step, w.generation); - } - } + switch (builtin.os.tag) { + .linux => {}, + else => fatal("--watch not yet implemented for {s}", .{@tagName(builtin.os.tag)}), } - { - // Remove marks for files that are no longer inputs. - var i: usize = 0; - while (i < w.handle_table.entries.len) { - { - const reaction_set = &w.handle_table.values()[i]; - var step_set_i: usize = 0; - while (step_set_i < reaction_set.entries.len) { - const step_set = &reaction_set.values()[step_set_i]; - var dirent_i: usize = 0; - while (dirent_i < step_set.entries.len) { - const generations = step_set.values(); - if (generations[dirent_i] == w.generation) { - dirent_i += 1; - continue; - } - step_set.swapRemoveAt(dirent_i); - } - if (step_set.entries.len > 0) { - step_set_i += 1; - continue; - } - reaction_set.swapRemoveAt(step_set_i); - } - if (reaction_set.entries.len > 0) { - i += 1; - continue; - } - } - - const path = w.dir_table.keys()[i]; - - std.posix.fanotify_mark(w.fan_fd, .{ - .REMOVE = true, - .ONLYDIR = true, - }, Watch.fan_mask, path.root_dir.handle.fd, path.subPathOrDot()) catch |err| switch (err) { - error.FileNotFound => {}, // Expected, harmless. - else => |e| std.log.warn("unable to unwatch '{}': {s}", .{ path, @errorName(e) }), - }; - - w.dir_table.swapRemoveAt(i); - w.handle_table.swapRemoveAt(i); - } - w.generation +%= 1; - } + try w.update(gpa, run.step_stack.keys()); // Wait until a file system notification arrives. Read all such events // until the buffer is empty. Then wait for a debounce interval, resetting // if any more events come in. After the debounce interval has passed, // trigger a rebuild on all steps with modified inputs, as well as their // recursive dependants. - var poll_fds: [1]std.posix.pollfd = .{ - .{ - .fd = w.fan_fd, - .events = std.posix.POLL.IN, - .revents = undefined, - }, - }; var caption_buf: [std.Progress.Node.max_name_len]u8 = undefined; const caption = std.fmt.bufPrint(&caption_buf, "Watching {d} Directories", .{ w.dir_table.entries.len, }) catch &caption_buf; var debouncing_node = main_progress_node.start(caption, 0); - var debouncing = false; - while (true) { - const timeout: i32 = if (debouncing) debounce_interval_ms else -1; - const events_len = try std.posix.poll(&poll_fds, timeout); - if (events_len == 0) { + var debounce_timeout: Watch.Timeout = .none; + while (true) switch (try w.wait(gpa, debounce_timeout)) { + .timeout => { debouncing_node.end(); - Watch.markFailedStepsDirty(gpa, run.step_stack.keys()); + markFailedStepsDirty(gpa, run.step_stack.keys()); continue :rebuild; - } - if (try w.markDirtySteps(gpa)) { - if (!debouncing) { - debouncing = true; - debouncing_node.end(); - debouncing_node = main_progress_node.start("Debouncing (Change Detected)", 0); - } - } - } + }, + .dirty => if (debounce_timeout == .none) { + debounce_timeout = .{ .ms = debounce_interval_ms }; + debouncing_node.end(); + debouncing_node = main_progress_node.start("Debouncing (Change Detected)", 0); + }, + .clean => {}, + }; } } +fn markFailedStepsDirty(gpa: Allocator, all_steps: []const *Step) void { + for (all_steps) |step| switch (step.state) { + .dependency_failure, .failure, .skipped => step.recursiveReset(gpa), + else => continue, + }; + // Now that all dirty steps have been found, the remaining steps that + // succeeded from last run shall be marked "cached". + for (all_steps) |step| switch (step.state) { + .success => step.result_cached = true, + else => continue, + }; +} + const Run = struct { max_rss: u64, max_rss_is_default: bool, @@ -1430,11 +1346,6 @@ fn fatalWithHint(comptime f: []const u8, args: anytype) noreturn { process.exit(1); } -fn fatal(comptime f: []const u8, args: anytype) noreturn { - std.debug.print(f ++ "\n", args); - process.exit(1); -} - fn validateSystemLibraryOptions(b: *std.Build) void { var bad = false; for (b.graph.system_library_options.keys(), b.graph.system_library_options.values()) |k, v| { diff --git a/lib/std/Build/Watch.zig b/lib/std/Build/Watch.zig index e4bef7ca4ef6..534e9814de24 100644 --- a/lib/std/Build/Watch.zig +++ b/lib/std/Build/Watch.zig @@ -1,41 +1,21 @@ +const builtin = @import("builtin"); const std = @import("../std.zig"); const Watch = @This(); const Step = std.Build.Step; const Allocator = std.mem.Allocator; const assert = std.debug.assert; +const fatal = std.zig.fatal; dir_table: DirTable, -/// Keyed differently but indexes correspond 1:1 with `dir_table`. -handle_table: HandleTable, -fan_fd: std.posix.fd_t, +os: Os, generation: Generation, -pub const fan_mask: std.os.linux.fanotify.MarkMask = .{ - .CLOSE_WRITE = true, - .CREATE = true, - .DELETE = true, - .DELETE_SELF = true, - .EVENT_ON_CHILD = true, - .MOVED_FROM = true, - .MOVED_TO = true, - .MOVE_SELF = true, - .ONDIR = true, -}; - -pub const init: Watch = .{ - .dir_table = .{}, - .handle_table = .{}, - .fan_fd = -1, - .generation = 0, -}; - /// Key is the directory to watch which contains one or more files we are /// interested in noticing changes to. /// /// Value is generation. const DirTable = std.ArrayHashMapUnmanaged(Cache.Path, void, Cache.Path.TableAdapter, false); -const HandleTable = std.ArrayHashMapUnmanaged(LinuxFileHandle, ReactionSet, LinuxFileHandle.Adapter, false); /// Special key of "." means any changes in this directory trigger the steps. const ReactionSet = std.StringArrayHashMapUnmanaged(StepSet); const StepSet = std.AutoArrayHashMapUnmanaged(*Step, Generation); @@ -45,6 +25,255 @@ const Generation = u8; const Hash = std.hash.Wyhash; const Cache = std.Build.Cache; +const Os = switch (builtin.os.tag) { + .linux => struct { + const posix = std.posix; + + /// Keyed differently but indexes correspond 1:1 with `dir_table`. + handle_table: HandleTable, + poll_fds: [1]posix.pollfd, + + const HandleTable = std.ArrayHashMapUnmanaged(FileHandle, ReactionSet, FileHandle.Adapter, false); + + const fan_mask: std.os.linux.fanotify.MarkMask = .{ + .CLOSE_WRITE = true, + .CREATE = true, + .DELETE = true, + .DELETE_SELF = true, + .EVENT_ON_CHILD = true, + .MOVED_FROM = true, + .MOVED_TO = true, + .MOVE_SELF = true, + .ONDIR = true, + }; + + const FileHandle = struct { + handle: *align(1) std.os.linux.file_handle, + + fn clone(lfh: FileHandle, gpa: Allocator) Allocator.Error!FileHandle { + const bytes = lfh.slice(); + const new_ptr = try gpa.alignedAlloc( + u8, + @alignOf(std.os.linux.file_handle), + @sizeOf(std.os.linux.file_handle) + bytes.len, + ); + const new_header: *std.os.linux.file_handle = @ptrCast(new_ptr); + new_header.* = lfh.handle.*; + const new: FileHandle = .{ .handle = new_header }; + @memcpy(new.slice(), lfh.slice()); + return new; + } + + fn destroy(lfh: FileHandle, gpa: Allocator) void { + const ptr: [*]u8 = @ptrCast(lfh.handle); + const allocated_slice = ptr[0 .. @sizeOf(std.os.linux.file_handle) + lfh.handle.handle_bytes]; + return gpa.free(allocated_slice); + } + + fn slice(lfh: FileHandle) []u8 { + const ptr: [*]u8 = &lfh.handle.f_handle; + return ptr[0..lfh.handle.handle_bytes]; + } + + const Adapter = struct { + pub fn hash(self: Adapter, a: FileHandle) u32 { + _ = self; + const unsigned_type: u32 = @bitCast(a.handle.handle_type); + return @truncate(Hash.hash(unsigned_type, a.slice())); + } + pub fn eql(self: Adapter, a: FileHandle, b: FileHandle, b_index: usize) bool { + _ = self; + _ = b_index; + return a.handle.handle_type == b.handle.handle_type and std.mem.eql(u8, a.slice(), b.slice()); + } + }; + }; + + fn getDirHandle(gpa: Allocator, path: std.Build.Cache.Path) !FileHandle { + var file_handle_buffer: [@sizeOf(std.os.linux.file_handle) + 128]u8 align(@alignOf(std.os.linux.file_handle)) = undefined; + var mount_id: i32 = undefined; + var buf: [std.fs.max_path_bytes]u8 = undefined; + const adjusted_path = if (path.sub_path.len == 0) "./" else std.fmt.bufPrint(&buf, "{s}/", .{ + path.sub_path, + }) catch return error.NameTooLong; + const stack_ptr: *std.os.linux.file_handle = @ptrCast(&file_handle_buffer); + stack_ptr.handle_bytes = file_handle_buffer.len - @sizeOf(std.os.linux.file_handle); + try posix.name_to_handle_at(path.root_dir.handle.fd, adjusted_path, stack_ptr, &mount_id, std.os.linux.AT.HANDLE_FID); + const stack_lfh: FileHandle = .{ .handle = stack_ptr }; + return stack_lfh.clone(gpa); + } + + fn markDirtySteps(w: *Watch, gpa: Allocator) !bool { + const fan_fd = w.os.getFanFd(); + const fanotify = std.os.linux.fanotify; + const M = fanotify.event_metadata; + var events_buf: [256 + 4096]u8 = undefined; + var any_dirty = false; + while (true) { + var len = posix.read(fan_fd, &events_buf) catch |err| switch (err) { + error.WouldBlock => return any_dirty, + else => |e| return e, + }; + var meta: [*]align(1) M = @ptrCast(&events_buf); + while (len >= @sizeOf(M) and meta[0].event_len >= @sizeOf(M) and meta[0].event_len <= len) : ({ + len -= meta[0].event_len; + meta = @ptrCast(@as([*]u8, @ptrCast(meta)) + meta[0].event_len); + }) { + assert(meta[0].vers == M.VERSION); + if (meta[0].mask.Q_OVERFLOW) { + any_dirty = true; + std.log.warn("file system watch queue overflowed; falling back to fstat", .{}); + markAllFilesDirty(w, gpa); + return true; + } + const fid: *align(1) fanotify.event_info_fid = @ptrCast(meta + 1); + switch (fid.hdr.info_type) { + .DFID_NAME => { + const file_handle: *align(1) std.os.linux.file_handle = @ptrCast(&fid.handle); + const file_name_z: [*:0]u8 = @ptrCast((&file_handle.f_handle).ptr + file_handle.handle_bytes); + const file_name = std.mem.span(file_name_z); + const lfh: FileHandle = .{ .handle = file_handle }; + if (w.os.handle_table.getPtr(lfh)) |reaction_set| { + if (reaction_set.getPtr(".")) |glob_set| + any_dirty = markStepSetDirty(gpa, glob_set, any_dirty); + if (reaction_set.getPtr(file_name)) |step_set| + any_dirty = markStepSetDirty(gpa, step_set, any_dirty); + } + }, + else => |t| std.log.warn("unexpected fanotify event '{s}'", .{@tagName(t)}), + } + } + } + } + + fn getFanFd(os: *const @This()) posix.fd_t { + return os.poll_fds[0].fd; + } + + fn update(w: *Watch, gpa: Allocator, steps: []const *Step) !void { + const fan_fd = w.os.getFanFd(); + // Add missing marks and note persisted ones. + for (steps) |step| { + for (step.inputs.table.keys(), step.inputs.table.values()) |path, *files| { + const reaction_set = rs: { + const gop = try w.dir_table.getOrPut(gpa, path); + if (!gop.found_existing) { + const dir_handle = try Os.getDirHandle(gpa, path); + // `dir_handle` may already be present in the table in + // the case that we have multiple Cache.Path instances + // that compare inequal but ultimately point to the same + // directory on the file system. + // In such case, we must revert adding this directory, but keep + // the additions to the step set. + const dh_gop = try w.os.handle_table.getOrPut(gpa, dir_handle); + if (dh_gop.found_existing) { + _ = w.dir_table.pop(); + } else { + assert(dh_gop.index == gop.index); + dh_gop.value_ptr.* = .{}; + posix.fanotify_mark(fan_fd, .{ + .ADD = true, + .ONLYDIR = true, + }, fan_mask, path.root_dir.handle.fd, path.subPathOrDot()) catch |err| { + fatal("unable to watch {}: {s}", .{ path, @errorName(err) }); + }; + } + break :rs dh_gop.value_ptr; + } + break :rs &w.os.handle_table.values()[gop.index]; + }; + for (files.items) |basename| { + const gop = try reaction_set.getOrPut(gpa, basename); + if (!gop.found_existing) gop.value_ptr.* = .{}; + try gop.value_ptr.put(gpa, step, w.generation); + } + } + } + + { + // Remove marks for files that are no longer inputs. + var i: usize = 0; + while (i < w.os.handle_table.entries.len) { + { + const reaction_set = &w.os.handle_table.values()[i]; + var step_set_i: usize = 0; + while (step_set_i < reaction_set.entries.len) { + const step_set = &reaction_set.values()[step_set_i]; + var dirent_i: usize = 0; + while (dirent_i < step_set.entries.len) { + const generations = step_set.values(); + if (generations[dirent_i] == w.generation) { + dirent_i += 1; + continue; + } + step_set.swapRemoveAt(dirent_i); + } + if (step_set.entries.len > 0) { + step_set_i += 1; + continue; + } + reaction_set.swapRemoveAt(step_set_i); + } + if (reaction_set.entries.len > 0) { + i += 1; + continue; + } + } + + const path = w.dir_table.keys()[i]; + + posix.fanotify_mark(fan_fd, .{ + .REMOVE = true, + .ONLYDIR = true, + }, fan_mask, path.root_dir.handle.fd, path.subPathOrDot()) catch |err| switch (err) { + error.FileNotFound => {}, // Expected, harmless. + else => |e| std.log.warn("unable to unwatch '{}': {s}", .{ path, @errorName(e) }), + }; + + w.dir_table.swapRemoveAt(i); + w.os.handle_table.swapRemoveAt(i); + } + w.generation +%= 1; + } + } + }, + else => void, +}; + +pub fn init() !Watch { + switch (builtin.os.tag) { + .linux => { + const fan_fd = try std.posix.fanotify_init(.{ + .CLASS = .NOTIF, + .CLOEXEC = true, + .NONBLOCK = true, + .REPORT_NAME = true, + .REPORT_DIR_FID = true, + .REPORT_FID = true, + .REPORT_TARGET_FID = true, + }, 0); + return .{ + .dir_table = .{}, + .os = switch (builtin.os.tag) { + .linux => .{ + .handle_table = .{}, + .poll_fds = .{ + .{ + .fd = fan_fd, + .events = std.posix.POLL.IN, + .revents = undefined, + }, + }, + }, + else => {}, + }, + .generation = 0, + }; + }, + else => @panic("unimplemented"), + } +} + pub const Match = struct { /// Relative to the watched directory, the file path that triggers this /// match. @@ -68,119 +297,8 @@ pub const Match = struct { }; }; -pub const LinuxFileHandle = struct { - handle: *align(1) std.os.linux.file_handle, - - pub fn clone(lfh: LinuxFileHandle, gpa: Allocator) Allocator.Error!LinuxFileHandle { - const bytes = lfh.slice(); - const new_ptr = try gpa.alignedAlloc( - u8, - @alignOf(std.os.linux.file_handle), - @sizeOf(std.os.linux.file_handle) + bytes.len, - ); - const new_header: *std.os.linux.file_handle = @ptrCast(new_ptr); - new_header.* = lfh.handle.*; - const new: LinuxFileHandle = .{ .handle = new_header }; - @memcpy(new.slice(), lfh.slice()); - return new; - } - - pub fn destroy(lfh: LinuxFileHandle, gpa: Allocator) void { - const ptr: [*]u8 = @ptrCast(lfh.handle); - const allocated_slice = ptr[0 .. @sizeOf(std.os.linux.file_handle) + lfh.handle.handle_bytes]; - return gpa.free(allocated_slice); - } - - pub fn slice(lfh: LinuxFileHandle) []u8 { - const ptr: [*]u8 = &lfh.handle.f_handle; - return ptr[0..lfh.handle.handle_bytes]; - } - - pub const Adapter = struct { - pub fn hash(self: Adapter, a: LinuxFileHandle) u32 { - _ = self; - const unsigned_type: u32 = @bitCast(a.handle.handle_type); - return @truncate(Hash.hash(unsigned_type, a.slice())); - } - pub fn eql(self: Adapter, a: LinuxFileHandle, b: LinuxFileHandle, b_index: usize) bool { - _ = self; - _ = b_index; - return a.handle.handle_type == b.handle.handle_type and std.mem.eql(u8, a.slice(), b.slice()); - } - }; -}; - -pub fn getDirHandle(gpa: Allocator, path: std.Build.Cache.Path) !LinuxFileHandle { - var file_handle_buffer: [@sizeOf(std.os.linux.file_handle) + 128]u8 align(@alignOf(std.os.linux.file_handle)) = undefined; - var mount_id: i32 = undefined; - var buf: [std.fs.max_path_bytes]u8 = undefined; - const adjusted_path = if (path.sub_path.len == 0) "./" else std.fmt.bufPrint(&buf, "{s}/", .{ - path.sub_path, - }) catch return error.NameTooLong; - const stack_ptr: *std.os.linux.file_handle = @ptrCast(&file_handle_buffer); - stack_ptr.handle_bytes = file_handle_buffer.len - @sizeOf(std.os.linux.file_handle); - try std.posix.name_to_handle_at(path.root_dir.handle.fd, adjusted_path, stack_ptr, &mount_id, std.os.linux.AT.HANDLE_FID); - const stack_lfh: LinuxFileHandle = .{ .handle = stack_ptr }; - return stack_lfh.clone(gpa); -} - -pub fn markDirtySteps(w: *Watch, gpa: Allocator) !bool { - const fanotify = std.os.linux.fanotify; - const M = fanotify.event_metadata; - var events_buf: [256 + 4096]u8 = undefined; - var any_dirty = false; - while (true) { - var len = std.posix.read(w.fan_fd, &events_buf) catch |err| switch (err) { - error.WouldBlock => return any_dirty, - else => |e| return e, - }; - var meta: [*]align(1) M = @ptrCast(&events_buf); - while (len >= @sizeOf(M) and meta[0].event_len >= @sizeOf(M) and meta[0].event_len <= len) : ({ - len -= meta[0].event_len; - meta = @ptrCast(@as([*]u8, @ptrCast(meta)) + meta[0].event_len); - }) { - assert(meta[0].vers == M.VERSION); - if (meta[0].mask.Q_OVERFLOW) { - any_dirty = true; - std.log.warn("file system watch queue overflowed; falling back to fstat", .{}); - markAllFilesDirty(w, gpa); - return true; - } - const fid: *align(1) fanotify.event_info_fid = @ptrCast(meta + 1); - switch (fid.hdr.info_type) { - .DFID_NAME => { - const file_handle: *align(1) std.os.linux.file_handle = @ptrCast(&fid.handle); - const file_name_z: [*:0]u8 = @ptrCast((&file_handle.f_handle).ptr + file_handle.handle_bytes); - const file_name = std.mem.span(file_name_z); - const lfh: Watch.LinuxFileHandle = .{ .handle = file_handle }; - if (w.handle_table.getPtr(lfh)) |reaction_set| { - if (reaction_set.getPtr(".")) |glob_set| - any_dirty = markStepSetDirty(gpa, glob_set, any_dirty); - if (reaction_set.getPtr(file_name)) |step_set| - any_dirty = markStepSetDirty(gpa, step_set, any_dirty); - } - }, - else => |t| std.log.warn("unexpected fanotify event '{s}'", .{@tagName(t)}), - } - } - } -} - -pub fn markFailedStepsDirty(gpa: Allocator, all_steps: []const *Step) void { - for (all_steps) |step| switch (step.state) { - .dependency_failure, .failure, .skipped => step.recursiveReset(gpa), - else => continue, - }; - // Now that all dirty steps have been found, the remaining steps that - // succeeded from last run shall be marked "cached". - for (all_steps) |step| switch (step.state) { - .success => step.result_cached = true, - else => continue, - }; -} - fn markAllFilesDirty(w: *Watch, gpa: Allocator) void { - for (w.handle_table.values()) |reaction_set| { + for (w.os.handle_table.values()) |reaction_set| { for (reaction_set.values()) |step_set| { for (step_set.keys()) |step| { step.recursiveReset(gpa); @@ -199,3 +317,47 @@ fn markStepSetDirty(gpa: Allocator, step_set: *StepSet, any_dirty: bool) bool { } return any_dirty or this_any_dirty; } + +pub fn update(w: *Watch, gpa: Allocator, steps: []const *Step) !void { + switch (builtin.os.tag) { + .linux => return Os.update(w, gpa, steps), + else => @compileError("unimplemented"), + } +} + +pub const Timeout = union(enum) { + none, + ms: u16, + + pub fn to_i32_ms(t: Timeout) i32 { + return switch (t) { + .none => -1, + .ms => |ms| ms, + }; + } +}; + +pub const WaitResult = enum { + timeout, + /// File system watching triggered on files that were marked as inputs to at least one Step. + /// Relevant steps have been marked dirty. + dirty, + /// File system watching triggered but none of the events were relevant to + /// what we are listening to. There is nothing to do. + clean, +}; + +pub fn wait(w: *Watch, gpa: Allocator, timeout: Timeout) !WaitResult { + switch (builtin.os.tag) { + .linux => { + const events_len = try std.posix.poll(&w.os.poll_fds, timeout.to_i32_ms()); + return if (events_len == 0) + .timeout + else if (try Os.markDirtySteps(w, gpa)) + .dirty + else + .clean; + }, + else => @compileError("unimplemented"), + } +} From 30ef0ed054a42b0c426d821a4fd9ac57e434d224 Mon Sep 17 00:00:00 2001 From: Krzysztof Wolicki Date: Fri, 12 Jul 2024 16:05:23 +0200 Subject: [PATCH 148/152] Args including and after `--` get passed to build runner directly --- src/main.zig | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/main.zig b/src/main.zig index 0d942ea6c453..39f07e982b65 100644 --- a/src/main.zig +++ b/src/main.zig @@ -4852,6 +4852,11 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !void { i += 1; child_argv.items[argv_index_seed] = args[i]; continue; + } else if (mem.eql(u8, arg, "--")) { + // The rest of the args are supposed to get passed onto + // build runner's `build.args` + try child_argv.appendSlice(args[i..]); + break; } } try child_argv.append(arg); From 9f8e4ddf25feba313f7a9982796c4caf54fca6f9 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Sat, 13 Jul 2024 01:26:32 -0400 Subject: [PATCH 149/152] Builder: fix llvm ir syntax --- src/codegen/llvm/Builder.zig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/codegen/llvm/Builder.zig b/src/codegen/llvm/Builder.zig index 558b0662ad57..a23bc86c57a9 100644 --- a/src/codegen/llvm/Builder.zig +++ b/src/codegen/llvm/Builder.zig @@ -9380,7 +9380,7 @@ pub fn printUnbuffered( \\ , .{function_attributes.fmt(self)}); try writer.print( - \\{s}{}{}{}{}{}{"} {} {}( + \\{s}{}{}{}{}{}{"} {%} {}( , .{ if (function.instructions.len > 0) "define" else "declare", global.linkage, From d8c1c9ea6e5ba70e9d7e7ce9b6c6b85f08cd4bca Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 12 Jul 2024 23:34:53 -0700 Subject: [PATCH 150/152] frontend: report correct paths for C objects oops, the src_path field of CObject is not relative to the module owner's root directory. --- src/Compilation.zig | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/Compilation.zig b/src/Compilation.zig index 94ce0ee26777..d262d6742d39 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -942,7 +942,7 @@ const CacheUse = union(CacheMode) { implib_sub_path: ?[]u8, docs_sub_path: ?[]u8, lf_open_opts: link.File.OpenOptions, - tmp_artifact_directory: ?Cache.Directory, + tmp_artifact_directory: ?Directory, /// Prevents other processes from clobbering files in the output directory. lock: ?Cache.Lock, @@ -2128,7 +2128,7 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void { } if (comp.file_system_inputs) |fsi| { for (comp.c_object_table.keys()) |c_object| { - try comp.appendFileSystemInput(fsi, c_object.src.owner.root, c_object.src.src_path); + try comp.appendFileSystemInput(fsi, Cache.Path.cwd(), c_object.src.src_path); } } @@ -2141,7 +2141,7 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void { } if (comp.file_system_inputs) |fsi| { for (comp.win32_resource_table.keys()) |win32_resource| switch (win32_resource.src) { - .rc => |f| try comp.appendFileSystemInput(fsi, f.owner.root, f.src_path), + .rc => |f| try comp.appendFileSystemInput(fsi, Cache.Path.cwd(), f.src_path), .manifest => continue, }; } From 11534aa34d6e9c66081eb2918bfaacbb21db0e56 Mon Sep 17 00:00:00 2001 From: kcbanner Date: Sat, 13 Jul 2024 15:31:56 -0400 Subject: [PATCH 151/152] zcu: fixup incorrect pass-by-value of Zcu --- src/Sema.zig | 5 ----- src/Zcu.zig | 6 +++--- 2 files changed, 3 insertions(+), 8 deletions(-) diff --git a/src/Sema.zig b/src/Sema.zig index 896d18d21f26..eb49bc037e34 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -508,11 +508,6 @@ pub const Block = struct { } }; - /// For debugging purposes. - pub fn dump(block: *Block, mod: Module) void { - Zir.dumpBlock(mod, block); - } - pub fn makeSubBlock(parent: *Block) Block { return .{ .parent = parent, diff --git a/src/Zcu.zig b/src/Zcu.zig index 7351592ec132..6a9812d73649 100644 --- a/src/Zcu.zig +++ b/src/Zcu.zig @@ -3127,14 +3127,14 @@ pub fn errNote( /// Deprecated. There is no global target for a Zig Compilation Unit. Instead, /// look up the target based on the Module that contains the source code being /// analyzed. -pub fn getTarget(zcu: Module) Target { +pub fn getTarget(zcu: *const Zcu) Target { return zcu.root_mod.resolved_target.result; } /// Deprecated. There is no global optimization mode for a Zig Compilation /// Unit. Instead, look up the optimization mode based on the Module that /// contains the source code being analyzed. -pub fn optimizeMode(zcu: Module) std.builtin.OptimizeMode { +pub fn optimizeMode(zcu: *const Zcu) std.builtin.OptimizeMode { return zcu.root_mod.optimize_mode; } @@ -3203,7 +3203,7 @@ pub const Feature = enum { separate_thread, }; -pub fn backendSupportsFeature(zcu: Module, comptime feature: Feature) bool { +pub fn backendSupportsFeature(zcu: *const Zcu, comptime feature: Feature) bool { const backend = target_util.zigBackend(zcu.root_mod.resolved_target.result, zcu.comp.config.use_llvm); return target_util.backendSupportsFeature(backend, feature); } From 959d227d1370c9cf9198ed111e3a8bca15a7e47b Mon Sep 17 00:00:00 2001 From: Ryan Liptak Date: Fri, 12 Jul 2024 00:38:10 -0700 Subject: [PATCH 152/152] ArgIteratorWindows: Reduce allocated memory by parsing the WTF-16 string directly Before this commit, the WTF-16 command line string would be converted to WTF-8 in `init`, and then a second buffer of the WTF-8 size + 1 would be allocated to store the parsed arguments. The converted WTF-8 command line would then be parsed and the relevant bytes would be copied into the argument buffer before being returned. After this commit, only the WTF-8 size of the WTF-16 string is calculated (without conversion) which is then used to allocate the buffer for the parsed arguments. Parsing is then done on the WTF-16 slice directly, with the arguments being converted to WTF-8 on-the-fly. This has a few (minor) benefits: - Cuts the amount of memory allocated by ArgIteratorWindows in half (or better) - Makes the total amount of memory allocated by ArgIteratorWindows predictable, since, before, the upfront `wtf16LeToWtf8Alloc` call could end up allocating more-memory-than-necessary temporarily due to its internal use of an ArrayList. Now, the amount of memory allocated is always exactly `calcWtf8Len(cmd_line) + 1`. --- lib/std/process.zig | 57 +++++++++++++++++++++++++++------------------ lib/std/unicode.zig | 33 ++++++++++++++++++++++++++ 2 files changed, 67 insertions(+), 23 deletions(-) diff --git a/lib/std/process.zig b/lib/std/process.zig index fce77175abac..d40753f19d1b 100644 --- a/lib/std/process.zig +++ b/lib/std/process.zig @@ -663,11 +663,11 @@ pub const ArgIteratorWasi = struct { /// - https://daviddeley.com/autohotkey/parameters/parameters.htm#WINCRULES pub const ArgIteratorWindows = struct { allocator: Allocator, - /// Owned by the iterator. - /// Encoded as WTF-8. - cmd_line: []const u8, + /// Encoded as WTF-16 LE. + cmd_line: [:0]const u16, index: usize = 0, - /// Owned by the iterator. Long enough to hold the entire `cmd_line` plus a null terminator. + /// Owned by the iterator. Long enough to hold contiguous NUL-terminated slices + /// of each argument encoded as WTF-8. buffer: []u8, start: usize = 0, end: usize = 0, @@ -676,13 +676,18 @@ pub const ArgIteratorWindows = struct { /// `cmd_line_w` *must* be a WTF16-LE-encoded string. /// - /// The iterator makes a copy of `cmd_line_w` converted WTF-8 and keeps it; it does *not* take - /// ownership of `cmd_line_w`. + /// The iterator stores and uses `cmd_line_w`, so its memory must be valid for + /// at least as long as the returned ArgIteratorWindows. pub fn init(allocator: Allocator, cmd_line_w: [*:0]const u16) InitError!ArgIteratorWindows { - const cmd_line = try unicode.wtf16LeToWtf8Alloc(allocator, mem.sliceTo(cmd_line_w, 0)); - errdefer allocator.free(cmd_line); - - const buffer = try allocator.alloc(u8, cmd_line.len + 1); + const cmd_line = mem.sliceTo(cmd_line_w, 0); + const wtf8_len = unicode.calcWtf8Len(cmd_line); + + // This buffer must be large enough to contain contiguous NUL-terminated slices + // of each argument. For arguments past the first one, space for the NUL-terminator + // is guaranteed due to the necessary whitespace between arugments. However, we need + // one extra byte to guarantee enough room for the NUL terminator if the command line + // ends up being exactly 1 argument long with no quotes, etc. + const buffer = try allocator.alloc(u8, wtf8_len + 1); errdefer allocator.free(buffer); return .{ @@ -714,11 +719,11 @@ pub const ArgIteratorWindows = struct { for (0..count) |_| emitCharacter(self, '\\'); } - fn emitCharacter(self: *ArgIteratorWindows, char: u8) void { - self.buffer[self.end] = char; - self.end += 1; + fn emitCharacter(self: *ArgIteratorWindows, code_unit: u16) void { + const wtf8_len = std.unicode.wtf8Encode(code_unit, self.buffer[self.end..]) catch unreachable; + self.end += wtf8_len; - // Because we are emitting WTF-8 byte-by-byte, we need to + // Because we are emitting WTF-8, we need to // check to see if we've emitted two consecutive surrogate // codepoints that form a valid surrogate pair in order // to ensure that we're always emitting well-formed WTF-8 @@ -732,9 +737,7 @@ pub const ArgIteratorWindows = struct { // This is relevant when dealing with a WTF-16 encoded // command line like this: // "<0xD801>"<0xDC37> - // which would get converted to WTF-8 in `cmd_line` as: - // "<0xED><0xA0><0x81>"<0xED><0xB0><0xB7> - // and then after parsing it'd naively get emitted as: + // which would get parsed and converted to WTF-8 as: // <0xED><0xA0><0x81><0xED><0xB0><0xB7> // but instead, we need to recognize the surrogate pair // and emit the codepoint it encodes, which in this @@ -780,7 +783,7 @@ pub const ArgIteratorWindows = struct { fn emitBackslashes(_: *ArgIteratorWindows, _: usize) void {} - fn emitCharacter(_: *ArgIteratorWindows, _: u8) void {} + fn emitCharacter(_: *ArgIteratorWindows, _: u16) void {} fn yieldArg(_: *ArgIteratorWindows) bool { return true; @@ -798,7 +801,10 @@ pub const ArgIteratorWindows = struct { var inside_quotes = false; while (true) : (self.index += 1) { - const char = if (self.index != self.cmd_line.len) self.cmd_line[self.index] else 0; + const char = if (self.index != self.cmd_line.len) + mem.littleToNative(u16, self.cmd_line[self.index]) + else + 0; switch (char) { 0 => { return strategy.yieldArg(self); @@ -823,7 +829,10 @@ pub const ArgIteratorWindows = struct { // Skip spaces and tabs. The iterator completes if we reach the end of the string here. while (true) : (self.index += 1) { - const char = if (self.index != self.cmd_line.len) self.cmd_line[self.index] else 0; + const char = if (self.index != self.cmd_line.len) + mem.littleToNative(u16, self.cmd_line[self.index]) + else + 0; switch (char) { 0 => return strategy.eof, ' ', '\t' => continue, @@ -844,7 +853,10 @@ pub const ArgIteratorWindows = struct { var backslash_count: usize = 0; var inside_quotes = false; while (true) : (self.index += 1) { - const char = if (self.index != self.cmd_line.len) self.cmd_line[self.index] else 0; + const char = if (self.index != self.cmd_line.len) + mem.littleToNative(u16, self.cmd_line[self.index]) + else + 0; switch (char) { 0 => { strategy.emitBackslashes(self, backslash_count); @@ -867,7 +879,7 @@ pub const ArgIteratorWindows = struct { } else { if (inside_quotes and self.index + 1 != self.cmd_line.len and - self.cmd_line[self.index + 1] == '"') + mem.littleToNative(u16, self.cmd_line[self.index + 1]) == '"') { strategy.emitCharacter(self, '"'); self.index += 1; @@ -892,7 +904,6 @@ pub const ArgIteratorWindows = struct { /// argument slices. pub fn deinit(self: *ArgIteratorWindows) void { self.allocator.free(self.buffer); - self.allocator.free(self.cmd_line); } }; diff --git a/lib/std/unicode.zig b/lib/std/unicode.zig index c0edf89a1efa..7fbb0d04f0f2 100644 --- a/lib/std/unicode.zig +++ b/lib/std/unicode.zig @@ -2107,3 +2107,36 @@ test "well-formed WTF-16 roundtrips" { mem.nativeToLittle(u16, 0xDC00), // low surrogate }); } + +/// Returns the length, in bytes, that would be necessary to encode the +/// given WTF-16 LE slice as WTF-8. +pub fn calcWtf8Len(wtf16le: []const u16) usize { + var it = Wtf16LeIterator.init(wtf16le); + var num_wtf8_bytes: usize = 0; + while (it.nextCodepoint()) |codepoint| { + // Note: If utf8CodepointSequenceLength is ever changed to error on surrogate + // codepoints, then it would no longer be eligible to be used in this context. + num_wtf8_bytes += utf8CodepointSequenceLength(codepoint) catch |err| switch (err) { + error.CodepointTooLarge => unreachable, + }; + } + return num_wtf8_bytes; +} + +fn testCalcWtf8Len() !void { + const L = utf8ToUtf16LeStringLiteral; + try testing.expectEqual(@as(usize, 1), calcWtf8Len(L("a"))); + try testing.expectEqual(@as(usize, 10), calcWtf8Len(L("abcdefghij"))); + // unpaired surrogate + try testing.expectEqual(@as(usize, 3), calcWtf8Len(&[_]u16{ + mem.nativeToLittle(u16, 0xD800), + })); + try testing.expectEqual(@as(usize, 15), calcWtf8Len(L("こんにちは"))); + // First codepoints that are encoded as 1, 2, 3, and 4 bytes + try testing.expectEqual(@as(usize, 1 + 2 + 3 + 4), calcWtf8Len(L("\u{0}\u{80}\u{800}\u{10000}"))); +} + +test "calculate wtf8 string length of given wtf16 string" { + try testCalcWtf8Len(); + try comptime testCalcWtf8Len(); +}