From 3f8980971fb3f4893d32612d6d85a3101c0c0019 Mon Sep 17 00:00:00 2001 From: mlugg Date: Sun, 11 Aug 2024 23:16:06 +0100 Subject: [PATCH] frontend: incremental progress. This fixes the regressions introduced by #20964. It also cleans up the "outdated file root" mechanism by virtue of deleting it. We now detect outdated file roots just after updating ZIR refs, and re-scan their namespaces. @jacobly0, this deletes some asserts in `InternPool` which didn't look appropriate to me, in order to deal with #20697. --- src/Compilation.zig | 17 +- src/InternPool.zig | 2 - src/Sema.zig | 3 +- src/Zcu.zig | 111 +++++--------- src/Zcu/PerThread.zig | 349 ++++++++++++++++++++++-------------------- 5 files changed, 228 insertions(+), 254 deletions(-) diff --git a/src/Compilation.zig b/src/Compilation.zig index 01a5772e231d..d596aff6f5a7 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -3599,10 +3599,9 @@ fn performAllTheWorkInner( // Pre-load these things from our single-threaded context since they // will be needed by the worker threads. const path_digest = zcu.filePathDigest(file_index); - const old_root_type = zcu.fileRootType(file_index); const file = zcu.fileByIndex(file_index); comp.thread_pool.spawnWgId(&astgen_wait_group, workerAstGenFile, .{ - comp, file, file_index, path_digest, old_root_type, zir_prog_node, &astgen_wait_group, .root, + comp, file, file_index, path_digest, zir_prog_node, &astgen_wait_group, .root, }); } } @@ -3640,6 +3639,7 @@ fn performAllTheWorkInner( } try reportMultiModuleErrors(pt); try zcu.flushRetryableFailures(); + zcu.sema_prog_node = main_progress_node.start("Semantic Analysis", 0); zcu.codegen_prog_node = main_progress_node.start("Code Generation", 0); } @@ -4291,7 +4291,6 @@ fn workerAstGenFile( file: *Zcu.File, file_index: Zcu.File.Index, path_digest: Cache.BinDigest, - old_root_type: InternPool.Index, prog_node: std.Progress.Node, wg: *WaitGroup, src: Zcu.AstGenSrc, @@ -4300,7 +4299,7 @@ fn workerAstGenFile( defer child_prog_node.end(); const pt: Zcu.PerThread = .{ .zcu = comp.module.?, .tid = @enumFromInt(tid) }; - pt.astGenFile(file, path_digest, old_root_type) catch |err| switch (err) { + pt.astGenFile(file, path_digest) catch |err| switch (err) { error.AnalysisFail => return, else => { file.status = .retryable_failure; @@ -4331,7 +4330,7 @@ fn workerAstGenFile( // `@import("builtin")` is handled specially. if (mem.eql(u8, import_path, "builtin")) continue; - const import_result, const imported_path_digest, const imported_root_type = blk: { + const import_result, const imported_path_digest = blk: { comp.mutex.lock(); defer comp.mutex.unlock(); @@ -4346,8 +4345,7 @@ fn workerAstGenFile( comp.appendFileSystemInput(fsi, res.file.mod.root, res.file.sub_file_path) catch continue; }; const imported_path_digest = pt.zcu.filePathDigest(res.file_index); - const imported_root_type = pt.zcu.fileRootType(res.file_index); - break :blk .{ res, imported_path_digest, imported_root_type }; + break :blk .{ res, imported_path_digest }; }; if (import_result.is_new) { log.debug("AstGen of {s} has import '{s}'; queuing AstGen of {s}", .{ @@ -4358,7 +4356,7 @@ fn workerAstGenFile( .import_tok = item.data.token, } }; comp.thread_pool.spawnWgId(wg, workerAstGenFile, .{ - comp, import_result.file, import_result.file_index, imported_path_digest, imported_root_type, prog_node, wg, sub_src, + comp, import_result.file, import_result.file_index, imported_path_digest, prog_node, wg, sub_src, }); } } @@ -6451,7 +6449,8 @@ fn buildOutputFromZig( try comp.updateSubCompilation(sub_compilation, misc_task_tag, prog_node); - assert(out.* == null); + // Under incremental compilation, `out` may already be populated from a prior update. + assert(out.* == null or comp.incremental); out.* = try sub_compilation.toCrtFile(); } diff --git a/src/InternPool.zig b/src/InternPool.zig index 08168900685d..e812cb037ae3 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -961,7 +961,6 @@ const Local = struct { pub fn view(mutable: Mutable) View { const capacity = mutable.list.header().capacity; - assert(capacity > 0); // optimizes `MultiArrayList.Slice.items` return .{ .bytes = mutable.list.bytes, .len = mutable.mutate.len, @@ -999,7 +998,6 @@ const Local = struct { pub fn view(list: ListSelf) View { const capacity = list.header().capacity; - assert(capacity > 0); // optimizes `MultiArrayList.Slice.items` return .{ .bytes = list.bytes, .len = capacity, diff --git a/src/Sema.zig b/src/Sema.zig index 20daa78e9a3f..0d699e88df7b 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -6006,8 +6006,7 @@ fn zirCImport(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileEr return sema.fail(&child_block, src, "C import failed: {s}", .{@errorName(err)}); const path_digest = zcu.filePathDigest(result.file_index); - const old_root_type = zcu.fileRootType(result.file_index); - pt.astGenFile(result.file, path_digest, old_root_type) catch |err| + pt.astGenFile(result.file, path_digest) catch |err| return sema.fail(&child_block, src, "C import failed: {s}", .{@errorName(err)}); // TODO: register some kind of dependency on the file. diff --git a/src/Zcu.zig b/src/Zcu.zig index bcb331b59755..8a696d99c9e5 100644 --- a/src/Zcu.zig +++ b/src/Zcu.zig @@ -162,12 +162,6 @@ outdated: std.AutoArrayHashMapUnmanaged(AnalUnit, u32) = .{}, /// Such `AnalUnit`s are ready for immediate re-analysis. /// See `findOutdatedToAnalyze` for details. outdated_ready: std.AutoArrayHashMapUnmanaged(AnalUnit, void) = .{}, -/// This contains a set of struct types whose corresponding `Cau` may not be in -/// `outdated`, but are the root types of files which have updated source and -/// thus must be re-analyzed. If such a type is only in this set, the struct type -/// index may be preserved (only the namespace might change). If its owned `Cau` -/// is also outdated, the struct type index must be recreated. -outdated_file_root: std.AutoArrayHashMapUnmanaged(InternPool.Index, void) = .{}, /// This contains a list of AnalUnit whose analysis or codegen failed, but the /// failure was something like running out of disk space, and trying again may /// succeed. On the next update, we will flush this list, marking all members of @@ -2148,7 +2142,6 @@ pub fn deinit(zcu: *Zcu) void { zcu.potentially_outdated.deinit(gpa); zcu.outdated.deinit(gpa); zcu.outdated_ready.deinit(gpa); - zcu.outdated_file_root.deinit(gpa); zcu.retryable_failures.deinit(gpa); zcu.test_functions.deinit(gpa); @@ -2355,8 +2348,6 @@ fn markTransitiveDependersPotentiallyOutdated(zcu: *Zcu, maybe_outdated: AnalUni pub fn findOutdatedToAnalyze(zcu: *Zcu) Allocator.Error!?AnalUnit { if (!zcu.comp.incremental) return null; - if (true) @panic("TODO: findOutdatedToAnalyze"); - if (zcu.outdated.count() == 0 and zcu.potentially_outdated.count() == 0) { log.debug("findOutdatedToAnalyze: no outdated depender", .{}); return null; @@ -2381,87 +2372,57 @@ pub fn findOutdatedToAnalyze(zcu: *Zcu) Allocator.Error!?AnalUnit { return zcu.outdated_ready.keys()[0]; } - // Next, we will see if there is any outdated file root which was not in - // `outdated`. This set will be small (number of files changed in this - // update), so it's alright for us to just iterate here. - for (zcu.outdated_file_root.keys()) |file_decl| { - const decl_depender = AnalUnit.wrap(.{ .decl = file_decl }); - if (zcu.outdated.contains(decl_depender)) { - // Since we didn't hit this in the first loop, this Decl must have - // pending dependencies, so is ineligible. - continue; - } - if (zcu.potentially_outdated.contains(decl_depender)) { - // This Decl's struct may or may not need to be recreated depending - // on whether it is outdated. If we analyzed it now, we would have - // to assume it was outdated and recreate it! - continue; - } - log.debug("findOutdatedToAnalyze: outdated file root decl '{d}'", .{file_decl}); - return decl_depender; - } - - // There is no single AnalUnit which is ready for re-analysis. Instead, we - // must assume that some Decl with PO dependencies is outdated - e.g. in the - // above example we arbitrarily pick one of A or B. We should select a Decl, - // since a Decl is definitely responsible for the loop in the dependency - // graph (since you can't depend on a runtime function analysis!). + // There is no single AnalUnit which is ready for re-analysis. Instead, we must assume that some + // Cau with PO dependencies is outdated -- e.g. in the above example we arbitrarily pick one of + // A or B. We should select a Cau, since a Cau is definitely responsible for the loop in the + // dependency graph (since IES dependencies can't have loops). We should also, of course, not + // select a Cau owned by a `comptime` declaration, since you can't depend on those! - // The choice of this Decl could have a big impact on how much total - // analysis we perform, since if analysis concludes its tyval is unchanged, - // then other PO AnalUnit may be resolved as up-to-date. To hopefully avoid - // doing too much work, let's find a Decl which the most things depend on - - // the idea is that this will resolve a lot of loops (but this is only a - // heuristic). + // The choice of this Cau could have a big impact on how much total analysis we perform, since + // if analysis concludes any dependencies on its result are up-to-date, then other PO AnalUnit + // may be resolved as up-to-date. To hopefully avoid doing too much work, let's find a Decl + // which the most things depend on - the idea is that this will resolve a lot of loops (but this + // is only a heuristic). log.debug("findOutdatedToAnalyze: no trivial ready, using heuristic; {d} outdated, {d} PO", .{ zcu.outdated.count(), zcu.potentially_outdated.count(), }); - const Decl = {}; - - var chosen_decl_idx: ?Decl.Index = null; - var chosen_decl_dependers: u32 = undefined; - - for (zcu.outdated.keys()) |depender| { - const decl_index = switch (depender.unwrap()) { - .decl => |d| d, - .func => continue, - }; - - var n: u32 = 0; - var it = zcu.intern_pool.dependencyIterator(.{ .decl_val = decl_index }); - while (it.next()) |_| n += 1; + const ip = &zcu.intern_pool; - if (chosen_decl_idx == null or n > chosen_decl_dependers) { - chosen_decl_idx = decl_index; - chosen_decl_dependers = n; - } - } + var chosen_cau: ?InternPool.Cau.Index = null; + var chosen_cau_dependers: u32 = undefined; - for (zcu.potentially_outdated.keys()) |depender| { - const decl_index = switch (depender.unwrap()) { - .decl => |d| d, - .func => continue, - }; + inline for (.{ zcu.outdated.keys(), zcu.potentially_outdated.keys() }) |outdated_units| { + for (outdated_units) |unit| { + const cau = switch (unit.unwrap()) { + .cau => |cau| cau, + .func => continue, // a `func` definitely can't be causing the loop so it is a bad choice + }; + const cau_owner = ip.getCau(cau).owner; - var n: u32 = 0; - var it = zcu.intern_pool.dependencyIterator(.{ .decl_val = decl_index }); - while (it.next()) |_| n += 1; + var n: u32 = 0; + var it = ip.dependencyIterator(switch (cau_owner.unwrap()) { + .none => continue, // there can be no dependencies on this `Cau` so it is a terrible choice + .type => |ty| .{ .interned = ty }, + .nav => |nav| .{ .nav_val = nav }, + }); + while (it.next()) |_| n += 1; - if (chosen_decl_idx == null or n > chosen_decl_dependers) { - chosen_decl_idx = decl_index; - chosen_decl_dependers = n; + if (chosen_cau == null or n > chosen_cau_dependers) { + chosen_cau = cau; + chosen_cau_dependers = n; + } } } - log.debug("findOutdatedToAnalyze: heuristic returned Decl {d} ({d} dependers)", .{ - chosen_decl_idx.?, - chosen_decl_dependers, + log.debug("findOutdatedToAnalyze: heuristic returned Cau {d} ({d} dependers)", .{ + @intFromEnum(chosen_cau.?), + chosen_cau_dependers, }); - return AnalUnit.wrap(.{ .decl = chosen_decl_idx.? }); + return AnalUnit.wrap(.{ .cau = chosen_cau.? }); } /// During an incremental update, before semantic analysis, call this to flush all values from @@ -2583,7 +2544,7 @@ pub fn mapOldZirToNew( break :inst unnamed_tests.items[unnamed_test_idx]; }, _ => inst: { - const name_nts = new_decl.name.toString(old_zir).?; + const name_nts = new_decl.name.toString(new_zir).?; const name = new_zir.nullTerminatedString(name_nts); if (new_decl.name.isNamedTest(new_zir)) { break :inst named_tests.get(name) orelse continue; diff --git a/src/Zcu/PerThread.zig b/src/Zcu/PerThread.zig index 1074062d55c5..2cc908d1ab64 100644 --- a/src/Zcu/PerThread.zig +++ b/src/Zcu/PerThread.zig @@ -39,7 +39,6 @@ pub fn astGenFile( pt: Zcu.PerThread, file: *Zcu.File, path_digest: Cache.BinDigest, - old_root_type: InternPool.Index, ) !void { dev.check(.ast_gen); assert(!file.mod.isBuiltin()); @@ -299,25 +298,15 @@ pub fn astGenFile( file.status = .astgen_failure; return error.AnalysisFail; } - - if (old_root_type != .none) { - // The root of this file must be re-analyzed, since the file has changed. - comp.mutex.lock(); - defer comp.mutex.unlock(); - - log.debug("outdated file root type: {}", .{old_root_type}); - try zcu.outdated_file_root.put(gpa, old_root_type, {}); - } } const UpdatedFile = struct { - file_index: Zcu.File.Index, file: *Zcu.File, inst_map: std.AutoHashMapUnmanaged(Zir.Inst.Index, Zir.Inst.Index), }; -fn cleanupUpdatedFiles(gpa: Allocator, updated_files: *std.ArrayListUnmanaged(UpdatedFile)) void { - for (updated_files.items) |*elem| elem.inst_map.deinit(gpa); +fn cleanupUpdatedFiles(gpa: Allocator, updated_files: *std.AutoArrayHashMapUnmanaged(Zcu.File.Index, UpdatedFile)) void { + for (updated_files.values()) |*elem| elem.inst_map.deinit(gpa); updated_files.deinit(gpa); } @@ -328,143 +317,165 @@ pub fn updateZirRefs(pt: Zcu.PerThread) Allocator.Error!void { const gpa = zcu.gpa; // We need to visit every updated File for every TrackedInst in InternPool. - var updated_files: std.ArrayListUnmanaged(UpdatedFile) = .{}; + var updated_files: std.AutoArrayHashMapUnmanaged(Zcu.File.Index, UpdatedFile) = .{}; defer cleanupUpdatedFiles(gpa, &updated_files); for (zcu.import_table.values()) |file_index| { const file = zcu.fileByIndex(file_index); const old_zir = file.prev_zir orelse continue; const new_zir = file.zir; - try updated_files.append(gpa, .{ - .file_index = file_index, + const gop = try updated_files.getOrPut(gpa, file_index); + assert(!gop.found_existing); + gop.value_ptr.* = .{ .file = file, .inst_map = .{}, - }); - const inst_map = &updated_files.items[updated_files.items.len - 1].inst_map; - try Zcu.mapOldZirToNew(gpa, old_zir.*, new_zir, inst_map); + }; + if (!new_zir.hasCompileErrors()) { + try Zcu.mapOldZirToNew(gpa, old_zir.*, file.zir, &gop.value_ptr.inst_map); + } } - if (updated_files.items.len == 0) + if (updated_files.count() == 0) return; for (ip.locals, 0..) |*local, tid| { const tracked_insts_list = local.getMutableTrackedInsts(gpa); for (tracked_insts_list.view().items(.@"0"), 0..) |*tracked_inst, tracked_inst_unwrapped_index| { - for (updated_files.items) |updated_file| { - const file_index = updated_file.file_index; - if (tracked_inst.file != file_index) continue; - - const file = updated_file.file; - const old_zir = file.prev_zir.?.*; - const new_zir = file.zir; - const old_tag = old_zir.instructions.items(.tag); - const old_data = old_zir.instructions.items(.data); - const inst_map = &updated_file.inst_map; - - const old_inst = tracked_inst.inst; - const tracked_inst_index = (InternPool.TrackedInst.Index.Unwrapped{ - .tid = @enumFromInt(tid), - .index = @intCast(tracked_inst_unwrapped_index), - }).wrap(ip); - tracked_inst.inst = inst_map.get(old_inst) orelse { - // Tracking failed for this instruction. Invalidate associated `src_hash` deps. - log.debug("tracking failed for %{d}", .{old_inst}); - try zcu.markDependeeOutdated(.{ .src_hash = tracked_inst_index }); - continue; - }; + const file_index = tracked_inst.file; + const updated_file = updated_files.get(file_index) orelse continue; - if (old_zir.getAssociatedSrcHash(old_inst)) |old_hash| hash_changed: { - if (new_zir.getAssociatedSrcHash(tracked_inst.inst)) |new_hash| { - if (std.zig.srcHashEql(old_hash, new_hash)) { - break :hash_changed; - } - log.debug("hash for (%{d} -> %{d}) changed: {} -> {}", .{ - old_inst, - tracked_inst.inst, - std.fmt.fmtSliceHexLower(&old_hash), - std.fmt.fmtSliceHexLower(&new_hash), - }); + const file = updated_file.file; + + const old_inst = tracked_inst.inst; + const tracked_inst_index = (InternPool.TrackedInst.Index.Unwrapped{ + .tid = @enumFromInt(tid), + .index = @intCast(tracked_inst_unwrapped_index), + }).wrap(ip); + if (file.zir.hasCompileErrors()) { + // If we mark this as outdated now, users of this inst will just get a transitive analysis failure. + // Ultimately, they would end up throwing out potentially useful analysis results. + // So, do nothing. We already have the file failure -- that's sufficient for now! + continue; + } + tracked_inst.inst = updated_file.inst_map.get(old_inst) orelse { + // Tracking failed for this instruction. Invalidate associated `src_hash` deps. + log.debug("tracking failed for %{d}", .{old_inst}); + tracked_inst.inst = @enumFromInt(std.math.maxInt(u32)); + try zcu.markDependeeOutdated(.{ .src_hash = tracked_inst_index }); + continue; + }; + + const old_zir = file.prev_zir.?.*; + const new_zir = file.zir; + const old_tag = old_zir.instructions.items(.tag); + const old_data = old_zir.instructions.items(.data); + + if (old_zir.getAssociatedSrcHash(old_inst)) |old_hash| hash_changed: { + if (new_zir.getAssociatedSrcHash(tracked_inst.inst)) |new_hash| { + if (std.zig.srcHashEql(old_hash, new_hash)) { + break :hash_changed; } - // The source hash associated with this instruction changed - invalidate relevant dependencies. - try zcu.markDependeeOutdated(.{ .src_hash = tracked_inst_index }); + log.debug("hash for (%{d} -> %{d}) changed: {} -> {}", .{ + old_inst, + tracked_inst.inst, + std.fmt.fmtSliceHexLower(&old_hash), + std.fmt.fmtSliceHexLower(&new_hash), + }); } + // The source hash associated with this instruction changed - invalidate relevant dependencies. + try zcu.markDependeeOutdated(.{ .src_hash = tracked_inst_index }); + } - // If this is a `struct_decl` etc, we must invalidate any outdated namespace dependencies. - const has_namespace = switch (old_tag[@intFromEnum(old_inst)]) { - .extended => switch (old_data[@intFromEnum(old_inst)].extended.opcode) { - .struct_decl, .union_decl, .opaque_decl, .enum_decl => true, - else => false, - }, + // If this is a `struct_decl` etc, we must invalidate any outdated namespace dependencies. + const has_namespace = switch (old_tag[@intFromEnum(old_inst)]) { + .extended => switch (old_data[@intFromEnum(old_inst)].extended.opcode) { + .struct_decl, .union_decl, .opaque_decl, .enum_decl => true, else => false, - }; - if (!has_namespace) continue; - - var old_names: std.AutoArrayHashMapUnmanaged(InternPool.NullTerminatedString, void) = .{}; - defer old_names.deinit(zcu.gpa); - { - var it = old_zir.declIterator(old_inst); - while (it.next()) |decl_inst| { - const decl_name = old_zir.getDeclaration(decl_inst)[0].name; - switch (decl_name) { - .@"comptime", .@"usingnamespace", .unnamed_test, .decltest => continue, - _ => if (decl_name.isNamedTest(old_zir)) continue, - } - const name_zir = decl_name.toString(old_zir).?; - const name_ip = try zcu.intern_pool.getOrPutString( - zcu.gpa, - pt.tid, - old_zir.nullTerminatedString(name_zir), - .no_embedded_nulls, - ); - try old_names.put(zcu.gpa, name_ip, {}); + }, + else => false, + }; + if (!has_namespace) continue; + + var old_names: std.AutoArrayHashMapUnmanaged(InternPool.NullTerminatedString, void) = .{}; + defer old_names.deinit(zcu.gpa); + { + var it = old_zir.declIterator(old_inst); + while (it.next()) |decl_inst| { + const decl_name = old_zir.getDeclaration(decl_inst)[0].name; + switch (decl_name) { + .@"comptime", .@"usingnamespace", .unnamed_test, .decltest => continue, + _ => if (decl_name.isNamedTest(old_zir)) continue, } + const name_zir = decl_name.toString(old_zir).?; + const name_ip = try zcu.intern_pool.getOrPutString( + zcu.gpa, + pt.tid, + old_zir.nullTerminatedString(name_zir), + .no_embedded_nulls, + ); + try old_names.put(zcu.gpa, name_ip, {}); } - var any_change = false; - { - var it = new_zir.declIterator(tracked_inst.inst); - while (it.next()) |decl_inst| { - const decl_name = new_zir.getDeclaration(decl_inst)[0].name; - switch (decl_name) { - .@"comptime", .@"usingnamespace", .unnamed_test, .decltest => continue, - _ => if (decl_name.isNamedTest(new_zir)) continue, - } - const name_zir = decl_name.toString(new_zir).?; - const name_ip = try zcu.intern_pool.getOrPutString( - zcu.gpa, - pt.tid, - new_zir.nullTerminatedString(name_zir), - .no_embedded_nulls, - ); - if (!old_names.swapRemove(name_ip)) continue; - // Name added - any_change = true; - try zcu.markDependeeOutdated(.{ .namespace_name = .{ - .namespace = tracked_inst_index, - .name = name_ip, - } }); + } + var any_change = false; + { + var it = new_zir.declIterator(tracked_inst.inst); + while (it.next()) |decl_inst| { + const decl_name = new_zir.getDeclaration(decl_inst)[0].name; + switch (decl_name) { + .@"comptime", .@"usingnamespace", .unnamed_test, .decltest => continue, + _ => if (decl_name.isNamedTest(new_zir)) continue, } - } - // The only elements remaining in `old_names` now are any names which were removed. - for (old_names.keys()) |name_ip| { + const name_zir = decl_name.toString(new_zir).?; + const name_ip = try zcu.intern_pool.getOrPutString( + zcu.gpa, + pt.tid, + new_zir.nullTerminatedString(name_zir), + .no_embedded_nulls, + ); + if (!old_names.swapRemove(name_ip)) continue; + // Name added any_change = true; try zcu.markDependeeOutdated(.{ .namespace_name = .{ .namespace = tracked_inst_index, .name = name_ip, } }); } + } + // The only elements remaining in `old_names` now are any names which were removed. + for (old_names.keys()) |name_ip| { + any_change = true; + try zcu.markDependeeOutdated(.{ .namespace_name = .{ + .namespace = tracked_inst_index, + .name = name_ip, + } }); + } - if (any_change) { - try zcu.markDependeeOutdated(.{ .namespace = tracked_inst_index }); - } + if (any_change) { + try zcu.markDependeeOutdated(.{ .namespace = tracked_inst_index }); } } } - for (updated_files.items) |updated_file| { + // TODO @jacobly0 REHASH tracked_insts HERE PLS + + for (updated_files.keys(), updated_files.values()) |file_index, updated_file| { const file = updated_file.file; - const prev_zir = file.prev_zir.?; - file.prev_zir = null; - prev_zir.deinit(gpa); - gpa.destroy(prev_zir); + if (file.zir.hasCompileErrors()) { + // Keep `prev_zir` around: it's the last non-error ZIR. + // Don't update the namespace, as we have no new data to update *to*. + } else { + const prev_zir = file.prev_zir.?; + file.prev_zir = null; + prev_zir.deinit(gpa); + gpa.destroy(prev_zir); + + // For every file which has changed, re-scan the namespace of the file's root struct type. + // These types are special-cased because they don't have an enclosing declaration which will + // be re-analyzed (causing the struct's namespace to be re-scanned). It's fine to do this + // now because this work is fast (no actual Sema work is happening, we're just updating the + // namespace contents). We must do this after updating ZIR refs above, since `scanNamespace` + // will track some instructions. + try pt.updateFileNamespace(file_index); + } } } @@ -473,6 +484,8 @@ pub fn updateZirRefs(pt: Zcu.PerThread) Allocator.Error!void { pub fn ensureFileAnalyzed(pt: Zcu.PerThread, file_index: Zcu.File.Index) Zcu.SemaError!void { const file_root_type = pt.zcu.fileRootType(file_index); if (file_root_type != .none) { + // The namespace is already up-to-date thanks to the `updateFileNamespace` calls at the + // start of this update. We just have to check whether the type itself is okay! const file_root_type_cau = pt.zcu.intern_pool.loadStructType(file_root_type).cau.unwrap().?; return pt.ensureCauAnalyzed(file_root_type_cau); } else { @@ -516,12 +529,7 @@ pub fn ensureCauAnalyzed(pt: Zcu.PerThread, cau_index: InternPool.Cau.Index) Zcu _ = zcu.outdated_ready.swapRemove(anal_unit); } - // TODO: this only works if namespace lookups in Sema trigger `ensureCauAnalyzed`, because - // `outdated_file_root` information is not "viral", so we need that a namespace lookup first - // handles the case where the file root is not an outdated *type* but does have an outdated - // *namespace*. A more logically simple alternative may be for a file's root struct to register - // a dependency on the file's entire source code (hash). Alternatively, we could make sure that - // these are always handled first in an update. Actually, that's probably the best option. + // TODO: document this elsewhere mlugg! // For my own benefit, here's how a namespace update for a normal (non-file-root) type works: // `const S = struct { ... };` // We are adding or removing a declaration within this `struct`. @@ -535,16 +543,12 @@ pub fn ensureCauAnalyzed(pt: Zcu.PerThread, cau_index: InternPool.Cau.Index) Zcu // * we basically do `scanDecls`, updating the namespace as needed // * TODO: optimize this to make sure we only do it once a generation i guess? // * so everyone lived happily ever after - const file_root_outdated = switch (cau.owner.unwrap()) { - .type => |ty| zcu.outdated_file_root.swapRemove(ty), - .nav, .none => false, - }; if (zcu.fileByIndex(inst_info.file).status != .success_zir) { return error.AnalysisFail; } - if (!cau_outdated and !file_root_outdated) { + if (!cau_outdated) { // We can trust the current information about this `Cau`. if (zcu.failed_analysis.contains(anal_unit) or zcu.transitive_failed_analysis.contains(anal_unit)) { return error.AnalysisFail; @@ -571,10 +575,13 @@ pub fn ensureCauAnalyzed(pt: Zcu.PerThread, cau_index: InternPool.Cau.Index) Zcu const sema_result: SemaCauResult = res: { if (inst_info.inst == .main_struct_inst) { - const changed = try pt.semaFileUpdate(inst_info.file, cau_outdated); + // Note that this is definitely a *recreation* due to outdated, because + // this instruction indicates that `cau.owner` is a `type`, which only + // reaches here if `cau_outdated`. + try pt.recreateFileRoot(inst_info.file); break :res .{ - .invalidate_decl_val = changed, - .invalidate_decl_ref = changed, + .invalidate_decl_val = true, + .invalidate_decl_ref = true, }; } @@ -915,12 +922,9 @@ fn createFileRootStruct( return wip_ty.finish(ip, new_cau_index.toOptional(), namespace_index); } -/// Re-analyze the root type of a file on an incremental update. -/// If `type_outdated`, the struct type itself is considered outdated and is -/// reconstructed at a new InternPool index. Otherwise, the namespace is just -/// re-analyzed. Returns whether the decl's tyval was invalidated. -/// Returns `error.AnalysisFail` if the file has an error. -fn semaFileUpdate(pt: Zcu.PerThread, file_index: Zcu.File.Index, type_outdated: bool) Zcu.SemaError!bool { +/// Recreate the root type of a file after it becomes outdated. A new struct type +/// is constructed at a new InternPool index, reusing the namespace for efficiency. +fn recreateFileRoot(pt: Zcu.PerThread, file_index: Zcu.File.Index) Zcu.SemaError!void { const zcu = pt.zcu; const ip = &zcu.intern_pool; const file = zcu.fileByIndex(file_index); @@ -929,48 +933,58 @@ fn semaFileUpdate(pt: Zcu.PerThread, file_index: Zcu.File.Index, type_outdated: assert(file_root_type != .none); - log.debug("semaFileUpdate mod={s} sub_file_path={s} type_outdated={}", .{ + log.debug("recreateFileRoot mod={s} sub_file_path={s}", .{ file.mod.fully_qualified_name, file.sub_file_path, - type_outdated, }); if (file.status != .success_zir) { return error.AnalysisFail; } - if (type_outdated) { - // Invalidate the existing type, reusing its namespace. - const file_root_type_cau = ip.loadStructType(file_root_type).cau.unwrap().?; - ip.removeDependenciesForDepender( - zcu.gpa, - InternPool.AnalUnit.wrap(.{ .cau = file_root_type_cau }), - ); - ip.remove(pt.tid, file_root_type); - _ = try pt.createFileRootStruct(file_index, namespace_index); - return true; - } - - // Only the struct's namespace is outdated. - // Preserve the type - just scan the namespace again. + // Invalidate the existing type, reusing its namespace. + const file_root_type_cau = ip.loadStructType(file_root_type).cau.unwrap().?; + ip.removeDependenciesForDepender( + zcu.gpa, + InternPool.AnalUnit.wrap(.{ .cau = file_root_type_cau }), + ); + ip.remove(pt.tid, file_root_type); + _ = try pt.createFileRootStruct(file_index, namespace_index); +} - const extended = file.zir.instructions.items(.data)[@intFromEnum(Zir.Inst.Index.main_struct_inst)].extended; - const small: Zir.Inst.StructDecl.Small = @bitCast(extended.small); +/// Re-scan the namespace of a file's root struct type on an incremental update. +/// The file must have successfully populated ZIR. +/// If the file's root struct type is not populated (the file is unreferenced), nothing is done. +/// This is called by `updateZirRefs` for all updated files before the main work loop. +/// This function does not perform any semantic analysis. +fn updateFileNamespace(pt: Zcu.PerThread, file_index: Zcu.File.Index) Allocator.Error!void { + const zcu = pt.zcu; - var extra_index: usize = extended.operand + @typeInfo(Zir.Inst.StructDecl).Struct.fields.len; - extra_index += @intFromBool(small.has_fields_len); - const decls_len = if (small.has_decls_len) blk: { - const decls_len = file.zir.extra[extra_index]; - extra_index += 1; - break :blk decls_len; - } else 0; - const decls = file.zir.bodySlice(extra_index, decls_len); + const file = zcu.fileByIndex(file_index); + assert(file.status == .success_zir); + const file_root_type = zcu.fileRootType(file_index); + if (file_root_type == .none) return; - if (!type_outdated) { - try pt.scanNamespace(namespace_index, decls); - } + log.debug("updateFileNamespace mod={s} sub_file_path={s}", .{ + file.mod.fully_qualified_name, + file.sub_file_path, + }); - return false; + const namespace_index = Type.fromInterned(file_root_type).getNamespaceIndex(zcu); + const decls = decls: { + const extended = file.zir.instructions.items(.data)[@intFromEnum(Zir.Inst.Index.main_struct_inst)].extended; + const small: Zir.Inst.StructDecl.Small = @bitCast(extended.small); + + var extra_index: usize = extended.operand + @typeInfo(Zir.Inst.StructDecl).Struct.fields.len; + extra_index += @intFromBool(small.has_fields_len); + const decls_len = if (small.has_decls_len) blk: { + const decls_len = file.zir.extra[extra_index]; + extra_index += 1; + break :blk decls_len; + } else 0; + break :decls file.zir.bodySlice(extra_index, decls_len); + }; + try pt.scanNamespace(namespace_index, decls); } /// Regardless of the file status, will create a `Decl` if none exists so that we can track @@ -1936,6 +1950,9 @@ const ScanDeclIter = struct { const cau, const nav = if (existing_cau) |cau_index| cau_nav: { const nav_index = ip.getCau(cau_index).owner.unwrap().nav; const nav = ip.getNav(nav_index); + if (nav.name != name) { + std.debug.panic("'{}' vs '{}'", .{ nav.name.fmt(ip), name.fmt(ip) }); + } assert(nav.name == name); assert(nav.fqn == fqn); break :cau_nav .{ cau_index, nav_index };