From 5e0a1a3c3170a0f675a5e9f559a814242dd83c08 Mon Sep 17 00:00:00 2001 From: Evan Wallace Date: Sat, 13 Mar 2021 22:15:17 -0800 Subject: [PATCH 1/5] add "entry names", overhaul chunk path algorithm --- internal/bundler/bundler.go | 13 +- internal/bundler/linker.go | 1036 ++++++++++------- .../bundler/snapshots/snapshots_splitting.txt | 112 +- internal/config/config.go | 10 + internal/fs/fs_mock.go | 3 + internal/fs/fs_mock_test.go | 7 + internal/helpers/joiner.go | 10 + internal/sourcemap/sourcemap.go | 121 ++ lib/common.ts | 2 + lib/types.ts | 1 + pkg/api/api.go | 1 + pkg/api/api_impl.go | 1 + pkg/cli/cli_impl.go | 3 + scripts/js-api-tests.js | 87 +- scripts/verify-source-map.js | 6 +- 15 files changed, 925 insertions(+), 488 deletions(-) diff --git a/internal/bundler/bundler.go b/internal/bundler/bundler.go index 9a9c124358d..704e141a313 100644 --- a/internal/bundler/bundler.go +++ b/internal/bundler/bundler.go @@ -52,10 +52,9 @@ type file struct { // fully assembled later. jsonMetadataChunk string - // The path of this entry point relative to the lowest common ancestor - // directory containing all entry points. Note: this must have OS-independent - // path separators (i.e. '/' not '\'). - entryPointRelPath string + // If "isEntryPoint" is true, this is the index of the corresponding entry + // point chunk. + entryPointChunkIndex uint32 // If this file ends up being used in the bundle, these are additional files // that must be written to the output directory. It's used by the "file" @@ -1516,6 +1515,12 @@ func applyOptionDefaults(options *config.Options) { } // Configure default path templates + if len(options.EntryPathTemplate) == 0 { + options.EntryPathTemplate = []config.PathTemplate{ + {Data: "./", Placeholder: config.DirPlaceholder}, + {Data: "/", Placeholder: config.NamePlaceholder}, + } + } if len(options.ChunkPathTemplate) == 0 { options.ChunkPathTemplate = []config.PathTemplate{ {Data: "./", Placeholder: config.NamePlaceholder}, diff --git a/internal/bundler/linker.go b/internal/bundler/linker.go index c19a7ef5a70..629ed49eed8 100644 --- a/internal/bundler/linker.go +++ b/internal/bundler/linker.go @@ -6,10 +6,13 @@ import ( "encoding/base64" "encoding/binary" "fmt" + "hash" + "math/rand" "path" "sort" "strings" "sync" + "time" "github.com/evanw/esbuild/internal/ast" "github.com/evanw/esbuild/internal/compat" @@ -68,10 +71,6 @@ type linkerContext struct { files []file hasErrors bool - // This is the relative path for automatically-generated code splitting chunks - // relative to the output directory - generatedChunkRelDir string - // This helps avoid an infinite loop when matching imports to exports cycleDetector []importTracker @@ -96,6 +95,13 @@ type linkerContext struct { // Calling this will block until the computation is done. The resulting value // is shared between threads and must be treated as immutable. dataForSourceMaps func() []dataForSourceMap + + // The unique key prefix is a random string that is unique to every linking + // operation. It is used as a prefix for the unique keys assigned to every + // chunk. These unique keys are used to identify each chunk before the final + // output paths have been computed. + uniqueKeyPrefix string + uniqueKeyPrefixBytes []byte // This is just "uniqueKeyPrefix" in byte form } // This contains linker-specific metadata corresponding to a "file" struct @@ -316,15 +322,9 @@ type partRange struct { } type chunkInfo struct { - // The path of this chunk's directory relative to the output directory. Note: - // this must have OS-independent path separators (i.e. '/' not '\'). - relDir string - - // The name of this chunk. This is initially empty for non-entry point chunks - // because the file name contains a hash of the file contents, which haven't - // been generated yet. Don't access this directly. Instead call "relPath()" - // which first checks that the base name is not empty. - baseNameOrEmpty string + // This is a random string and is used to represent the output path of this + // chunk before the final output path has been computed. + uniqueKey string filesWithPartsInChunk map[uint32]bool filesInChunkInOrder []uint32 @@ -341,6 +341,44 @@ type chunkInfo struct { // This is the representation-specific information chunkRepr chunkRepr + + // This is the final path of this chunk relative to the output directory, but + // without the substitution of the final hash (since it hasn't been computed). + finalTemplate []config.PathTemplate + + // This is the final path of this chunk relative to the output directory. It + // is the substitution of the final hash into "finalTemplate". + finalRelPath string + + // When this chunk is initially generated in isolation, the output pieces + // will contain slices of the output with the unique keys of other chunks + // omitted. The output hash will contain the hash of those pieces. At this + // point, this variable is the current value of the output hash. + isolatedChunkHash []byte + + // Later on in the linking process, the hashes of the referenced other chunks + // will be mixed into the hash. This is separated into two phases like this + // to handle cycles in the chunk import graph. + outputPieces []outputPiece + outputHash hash.Hash + + // Other fields relating to the output file for this chunk + jsonMetadataChunkCallback func(finalOutputSize int) []byte + outputSourceMap sourcemap.SourceMapPieces + isExecutable bool +} + +// This is a chunk of source code followed by a reference to another chunk. For +// example, the file "@import 'CHUNK0001'; body { color: black; }" would be +// represented by two pieces, one with the data "@import '" and another with the +// data "'; body { color: black; }". The first would have the chunk index 1 and +// the second would have an invalid chunk index. +type outputPiece struct { + data []byte + + // Note: This may be invalid. For example, the chunk may not contain any + // imports, in which case there is one piece with data and no chunk index. + chunkIndex ast.Index32 } type generateContinue struct { @@ -349,7 +387,7 @@ type generateContinue struct { } type chunkRepr interface { - generate(c *linkerContext, chunk *chunkInfo) func(generateContinue) []OutputFile + generate(c *linkerContext, chunks []chunkInfo, chunk *chunkInfo) func(generateContinue) } type chunkReprJS struct { @@ -363,15 +401,6 @@ type chunkReprJS struct { type chunkReprCSS struct { } -// Returns the path of this chunk relative to the output directory. Note: -// this must have OS-independent path separators (i.e. '/' not '\'). -func (chunk *chunkInfo) relPath() string { - if chunk.baseNameOrEmpty == "" { - panic("Internal error") - } - return path.Join(chunk.relDir, chunk.baseNameOrEmpty) -} - func newLinkerContext( options *config.Options, log logger.Log, @@ -393,15 +422,6 @@ func newLinkerContext( symbols: js_ast.NewSymbolMap(len(files)), reachableFiles: reachableFiles, dataForSourceMaps: dataForSourceMaps, - - // Note: This contains placeholders instead of what the placeholders are - // substituted with. That should be fine though because this should only - // ever be used for figuring out how many "../" to add to a relative path - // from a chunk whose final path hasn't been calculated yet to a chunk - // whose final path has already been calculated. That and placeholders are - // never substituted with something containing a "/" so substitution should - // never change the "../" count. - generatedChunkRelDir: fs.Dir(config.TemplateToString(options.ChunkPathTemplate)), } // Clone various things since we may mutate them later @@ -613,7 +633,24 @@ func (c *linkerContext) addPartToFile(sourceIndex uint32, part js_ast.Part, part return partIndex } +func (c *linkerContext) generateUniqueKeyPrefix() bool { + var data [12]byte + rand.Seed(time.Now().UnixNano()) + if _, err := rand.Read(data[:]); err != nil { + c.log.AddError(nil, logger.Loc{}, fmt.Sprintf("Failed to read from randomness source: %s", err.Error())) + return false + } + + // This is 16 bytes and shouldn't generate escape characters when put into strings + c.uniqueKeyPrefix = base64.URLEncoding.EncodeToString(data[:]) + c.uniqueKeyPrefixBytes = []byte(c.uniqueKeyPrefix) + return true +} + func (c *linkerContext) link() []OutputFile { + if !c.generateUniqueKeyPrefix() { + return nil + } c.scanImportsAndExports() // Stop now if there were errors @@ -641,137 +678,192 @@ func (c *linkerContext) link() []OutputFile { } func (c *linkerContext) generateChunksInParallel(chunks []chunkInfo) []OutputFile { - // Determine the order of files within the chunk ahead of time. This may - // generate additional CSS chunks from JS chunks that import CSS files. - { - originalChunks := chunks - for i, chunk := range originalChunks { - js, jsParts, css := c.chunkFileOrder(&chunk) - - switch chunk.chunkRepr.(type) { - case *chunkReprJS: - chunks[i].filesInChunkInOrder = js - chunks[i].partsInChunkInOrder = jsParts - - // If JS files include CSS files, make a sibling chunk for the CSS - if len(css) > 0 { - baseNameOrEmpty := chunk.baseNameOrEmpty - if baseNameOrEmpty != "" { - if js := c.options.OutputExtensionJS; strings.HasSuffix(baseNameOrEmpty, js) { - baseNameOrEmpty = baseNameOrEmpty[:len(baseNameOrEmpty)-len(js)] - } - baseNameOrEmpty += c.options.OutputExtensionCSS - } - chunks = append(chunks, chunkInfo{ - filesInChunkInOrder: css, - entryBits: chunk.entryBits, - isEntryPoint: chunk.isEntryPoint, - sourceIndex: chunk.sourceIndex, - entryPointBit: chunk.entryPointBit, - relDir: chunk.relDir, - baseNameOrEmpty: baseNameOrEmpty, - filesWithPartsInChunk: make(map[uint32]bool), - chunkRepr: &chunkReprCSS{}, - }) - } - - case *chunkReprCSS: - chunks[i].filesInChunkInOrder = css - } - } - } - - // We want to process chunks with as much parallelism as possible. However, - // content hashing means chunks that import other chunks must be completed - // after the imported chunks are completed because the import paths contain - // the content hash. It's only safe to process a chunk when the dependency - // count reaches zero. - type ordering struct { - dependencies sync.WaitGroup - dependents []uint32 - } - chunkOrdering := make([]ordering, len(chunks)) - for chunkIndex, chunk := range chunks { - chunkOrdering[chunkIndex].dependencies.Add(len(chunk.crossChunkImports)) - for _, otherChunkIndex := range chunk.crossChunkImports { - dependents := &chunkOrdering[otherChunkIndex].dependents - *dependents = append(*dependents, uint32(chunkIndex)) - } - } - - // Check for loops in the dependency graph since they cause a deadlock - var check func(int, []int) - check = func(chunkIndex int, path []int) { - for _, otherChunkIndex := range path { - if chunkIndex == otherChunkIndex { - panic("Internal error: Chunk import graph contains a cycle") - } - } - path = append(path, chunkIndex) - for _, otherChunkIndex := range chunks[chunkIndex].crossChunkImports { - check(int(otherChunkIndex), path) - } - } - for i := range chunks { - check(i, nil) - } - - results := make([][]OutputFile, len(chunks)) - resultsWaitGroup := sync.WaitGroup{} - resultsWaitGroup.Add(len(chunks)) - // Generate each chunk on a separate goroutine + generateWaitGroup := sync.WaitGroup{} + generateWaitGroup.Add(len(chunks)) for i := range chunks { go func(i int) { chunk := &chunks[i] - order := &chunkOrdering[i] // Start generating the chunk without dependencies, but stop when // dependencies are needed. This returns a callback that is called // later to resume generating the chunk once dependencies are known. - resume := chunk.chunkRepr.generate(c, chunk) - - // Wait for all dependencies to be resolved first - order.dependencies.Wait() + resume := chunk.chunkRepr.generate(c, chunks, chunk) // Fill in the cross-chunk import records now that the paths are known crossChunkImportRecords := make([]ast.ImportRecord, len(chunk.crossChunkImports)) crossChunkAbsPaths := make([]string, len(chunk.crossChunkImports)) for i, otherChunkIndex := range chunk.crossChunkImports { - relPath := chunks[otherChunkIndex].relPath() - crossChunkAbsPaths[i] = c.fs.Join(c.options.AbsOutputDir, relPath) + crossChunkAbsPaths[i] = chunks[otherChunkIndex].uniqueKey crossChunkImportRecords[i] = ast.ImportRecord{ Kind: ast.ImportStmt, - Path: logger.Path{Text: c.pathBetweenChunks(chunk.relDir, relPath)}, + Path: logger.Path{Text: chunks[otherChunkIndex].uniqueKey}, } } // Generate the chunk - results[i] = resume(generateContinue{ + resume(generateContinue{ crossChunkAbsPaths: crossChunkAbsPaths, crossChunkImportRecords: crossChunkImportRecords, }) - // Wake up any dependents now that we're done - for _, chunkIndex := range order.dependents { - chunkOrdering[chunkIndex].dependencies.Done() - } - resultsWaitGroup.Done() + generateWaitGroup.Done() }(i) } + generateWaitGroup.Wait() + + // Compute the final hashes of each chunk. This can technically be done in + // parallel but it probably doesn't matter so much because we're not hashing + // that much data. + visited := make([]uint32, len(chunks)) + var finalHash [sha1.Size]byte + for chunkIndex := range chunks { + chunk := &chunks[chunkIndex] + + // Compute the final hash using the isolated hashes of the dependencies + appendIsolatedHashesForImportedChunks(chunk.outputHash, chunks, uint32(chunkIndex), visited, ^uint32(chunkIndex)) + chunk.outputHash.Sum(finalHash[:0]) + + // Render the last remaining placeholder in the template + hash := hashForFileName(finalHash) + chunk.finalRelPath = config.TemplateToString(config.SubstituteTemplate(chunk.finalTemplate, config.PathPlaceholders{ + Hash: &hash, + })) + } + + // Generate the final output files by joining file pieces together + var resultsWaitGroup sync.WaitGroup + results := make([][]OutputFile, len(chunks)) + resultsWaitGroup.Add(len(chunks)) + for chunkIndex, chunk := range chunks { + go func(chunkIndex int, chunk chunkInfo) { + var outputFiles []OutputFile + + // Each file may optionally contain additional files to be copied to the + // output directory. This is used by the "file" loader. + for _, sourceIndex := range chunk.filesInChunkInOrder { + outputFiles = append(outputFiles, c.files[sourceIndex].additionalFiles...) + } + + // Path substitution for the chunk itself + finalRelDir := c.fs.Dir(chunk.finalRelPath) + outputContentsJoiner, outputSourceMapShifts := c.substituteFinalPaths(chunks, chunk.outputPieces, func(finalRelPathForImport string) string { + return c.pathBetweenChunks(finalRelDir, finalRelPathForImport) + }) + + // Generate the optional source map for this chunk + if c.options.SourceMap != config.SourceMapNone && chunk.outputSourceMap.Suffix != nil { + outputSourceMap := chunk.outputSourceMap.Finalize(outputSourceMapShifts) + finalRelPathForSourceMap := chunk.finalRelPath + ".map" + + // Potentially write a trailing source map comment + switch c.options.SourceMap { + case config.SourceMapLinkedWithComment: + importPath := c.pathBetweenChunks(finalRelDir, finalRelPathForSourceMap) + if strings.HasPrefix(importPath, "./") { + importPath = importPath[2:] + } + outputContentsJoiner.EnsureNewlineAtEnd() + outputContentsJoiner.AddString("//# sourceMappingURL=") + outputContentsJoiner.AddString(importPath) + outputContentsJoiner.AddString("\n") + + case config.SourceMapInline, config.SourceMapInlineAndExternal: + outputContentsJoiner.EnsureNewlineAtEnd() + outputContentsJoiner.AddString("//# sourceMappingURL=data:application/json;base64,") + outputContentsJoiner.AddString(base64.StdEncoding.EncodeToString(outputSourceMap)) + outputContentsJoiner.AddString("\n") + } + + // Potentially write the external source map file + switch c.options.SourceMap { + case config.SourceMapLinkedWithComment, config.SourceMapInlineAndExternal, config.SourceMapExternalWithoutComment: + outputFiles = append(outputFiles, OutputFile{ + AbsPath: c.fs.Join(c.options.AbsOutputDir, finalRelPathForSourceMap), + Contents: outputSourceMap, + jsonMetadataChunk: fmt.Sprintf( + "{\n \"imports\": [],\n \"exports\": [],\n \"inputs\": {},\n \"bytes\": %d\n }", len(outputSourceMap)), + }) + } + } + + // Finalize the output contents + outputContents := outputContentsJoiner.Done() - // Join the results in chunk order for determinism + // Path substitution for the JSON metadata + var jsonMetadataChunk string + if c.options.NeedsMetafile { + jsonMetadataChunkPieces := c.breakOutputIntoPieces(chunk.jsonMetadataChunkCallback(len(outputContents)), uint32(len(chunks))) + jsonMetadataChunkBytes, _ := c.substituteFinalPaths(chunks, jsonMetadataChunkPieces, func(finalRelPathForImport string) string { + return c.res.PrettyPath(logger.Path{Text: c.fs.Join(c.options.AbsOutputDir, finalRelPathForImport), Namespace: "file"}) + }) + jsonMetadataChunk = string(jsonMetadataChunkBytes.Done()) + } + + // Generate the output file for this chunk + outputFiles = append(outputFiles, OutputFile{ + AbsPath: c.fs.Join(c.options.AbsOutputDir, chunk.finalRelPath), + Contents: outputContents, + jsonMetadataChunk: jsonMetadataChunk, + IsExecutable: chunk.isExecutable, + }) + + results[chunkIndex] = outputFiles + resultsWaitGroup.Done() + }(chunkIndex, chunk) + } resultsWaitGroup.Wait() - var outputFiles []OutputFile - for _, group := range results { - outputFiles = append(outputFiles, group...) + + // Merge the output files from the different goroutines together in order + outputFilesLen := 0 + for _, result := range results { + outputFilesLen += len(result) + } + outputFiles := make([]OutputFile, 0, outputFilesLen) + for _, result := range results { + outputFiles = append(outputFiles, result...) } return outputFiles } +// Given a set of output pieces (i.e. a buffer already divided into the spans +// between import paths), substitute the final import paths in and then join +// everything into a single byte buffer. +func (c *linkerContext) substituteFinalPaths( + chunks []chunkInfo, + pieces []outputPiece, + modifyPath func(string) string, +) (j helpers.Joiner, shifts []sourcemap.SourceMapShift) { + var shift sourcemap.SourceMapShift + shifts = make([]sourcemap.SourceMapShift, 0, len(pieces)) + shifts = append(shifts, shift) + + for _, piece := range pieces { + var dataOffset sourcemap.LineColumnOffset + j.AddBytes(piece.data) + dataOffset.AdvanceBytes(piece.data) + shift.Before.Add(dataOffset) + shift.After.Add(dataOffset) + + if piece.chunkIndex.IsValid() { + chunk := chunks[piece.chunkIndex.GetIndex()] + importPath := modifyPath(chunk.finalRelPath) + j.AddString(importPath) + shift.Before.AdvanceString(chunk.uniqueKey) + shift.After.AdvanceString(importPath) + shifts = append(shifts, shift) + } + } + + return +} + func (c *linkerContext) pathBetweenChunks(fromRelDir string, toRelPath string) string { // Return an absolute path if a public path has been configured if c.options.PublicPath != "" { + if strings.HasPrefix(toRelPath, "./") { + toRelPath = toRelPath[2:] + } return c.options.PublicPath + toRelPath } @@ -795,8 +887,97 @@ func (c *linkerContext) pathBetweenChunks(fromRelDir string, toRelPath string) s return relPath } +// Returns the path of this file relative to "outbase", which is then ready to +// be joined with the absolute output directory path. The directory and name +// components are returned separately for convenience. +// +// This makes sure to have the directory end in a slash so that it can be +// substituted into a path template without necessarily having a "/" after it. +// Extra slashes should get cleaned up automatically when we join it with the +// output directory. +func (c *linkerContext) pathRelativeToOutbase(sourceIndex uint32, stdExt string) (relDir string, baseName string, baseExt string) { + file := &c.files[sourceIndex] + relDir = "./" + baseExt = stdExt + + // If the output path was configured explicitly, use it verbatim + if c.options.AbsOutputFile != "" { + baseName = c.fs.Base(c.options.AbsOutputFile) + + // Strip off the extension + ext := c.fs.Ext(baseName) + baseName = baseName[:len(baseName)-len(ext)] + + // Use the extension from the explicit output file path. However, don't do + // that if this is a CSS chunk but the entry point file is not CSS. In that + // case use the standard extension. This happens when importing CSS into JS. + if _, ok := file.repr.(*reprCSS); ok || stdExt != c.options.OutputExtensionCSS { + baseExt = ext + } + return + } + + // Come up with a path for virtual paths (i.e. non-file-system paths) + if file.source.KeyPath.Namespace != "file" { + baseName = baseFileNameForVirtualModulePath(file.source.KeyPath.Text) + + // Swap the file extension for the standard one + baseName = baseName[:len(baseName)-len(path.Ext(baseName))] + return + } + + // Try to get a relative path to the base directory + relPath, ok := c.fs.Rel(c.options.AbsOutputBase, file.source.KeyPath.Text) + if !ok { + // This can fail in some situations such as on different drives on + // Windows. In that case we just use the file name. + baseName = c.fs.Base(file.source.KeyPath.Text) + + // Swap the file extension for the standard one + baseName = baseName[:len(baseName)-len(c.fs.Ext(baseName))] + return + } + + // Now we finally have a relative path + relDir = c.fs.Dir(relPath) + "/" + baseName = c.fs.Base(relPath) + + // Swap the file extension for the standard one + baseName = baseName[:len(baseName)-len(c.fs.Ext(baseName))] + + // Use platform-independent slashes + relDir = strings.ReplaceAll(relDir, "\\", "/") + + // Replace leading "../" so we don't try to write outside of the output + // directory. This normally can't happen because "AbsOutputBase" is + // automatically computed to contain all entry point files, but it can + // happen if someone sets it manually via the "outbase" API option. + // + // Note that we can't just strip any leading "../" because that could + // cause two separate entry point paths to collide. For example, there + // could be both "src/index.js" and "../src/index.js" as entry points. + dotDotCount := 0 + for strings.HasPrefix(relDir[dotDotCount*3:], "../") { + dotDotCount++ + } + if dotDotCount > 0 { + // The use of "_.._" here is somewhat arbitrary but it is unlikely to + // collide with a folder named by a human and it works on Windows + // (Windows doesn't like names that end with a "."). And not starting + // with a "." means that it will not be hidden on Unix. + relDir = strings.Repeat("_.._/", dotDotCount) + relDir[dotDotCount*3:] + } + return +} + func (c *linkerContext) computeCrossChunkDependencies(chunks []chunkInfo) { - if len(chunks) < 2 { + jsChunks := 0 + for _, chunk := range chunks { + if _, ok := chunk.chunkRepr.(*chunkReprJS); ok { + jsChunks++ + } + } + if jsChunks < 2 { // No need to compute cross-chunk dependencies if there can't be any return } @@ -833,7 +1014,8 @@ func (c *linkerContext) computeCrossChunkDependencies(chunks []chunkInfo) { for _, importRecordIndex := range part.ImportRecordIndices { record := &repr.ast.ImportRecords[importRecordIndex] if record.SourceIndex.IsValid() && c.isExternalDynamicImport(record) { - record.Path.Text = c.pathBetweenChunks(chunk.relDir, c.files[record.SourceIndex.GetIndex()].entryPointRelPath) + otherChunkIndex := c.files[record.SourceIndex.GetIndex()].entryPointChunkIndex + record.Path.Text = chunks[otherChunkIndex].uniqueKey record.SourceIndex = ast.Index32{} } } @@ -925,7 +1107,7 @@ func (c *linkerContext) computeCrossChunkDependencies(chunks []chunkInfo) { // these chunks are evaluated for their side effects too. if chunk.isEntryPoint { for otherChunkIndex, otherChunk := range chunks { - if chunkIndex != otherChunkIndex && otherChunk.entryBits.hasBit(chunk.entryPointBit) { + if _, ok := otherChunk.chunkRepr.(*chunkReprJS); ok && chunkIndex != otherChunkIndex && otherChunk.entryBits.hasBit(chunk.entryPointBit) { imports := chunkRepr.importsFromOtherChunks[uint32(otherChunkIndex)] chunkRepr.importsFromOtherChunks[uint32(otherChunkIndex)] = imports } @@ -2798,8 +2980,6 @@ func (c *linkerContext) computeChunks() []chunkInfo { // Compute entry point names for i, entryPoint := range c.entryPoints { - var relDir string - var baseName string var chunkRepr chunkRepr file := &c.files[entryPoint] @@ -2810,54 +2990,6 @@ func (c *linkerContext) computeChunks() []chunkInfo { chunkRepr = &chunkReprCSS{} } - if c.options.AbsOutputFile != "" { - baseName = c.fs.Base(c.options.AbsOutputFile) - } else { - source := file.source - if source.KeyPath.Namespace != "file" { - baseName = baseFileNameForVirtualModulePath(source.KeyPath.Text) - } else if relPath, ok := c.fs.Rel(c.options.AbsOutputBase, source.KeyPath.Text); ok { - relDir = c.fs.Dir(relPath) - baseName = c.fs.Base(relPath) - relDir = strings.ReplaceAll(relDir, "\\", "/") - - // Replace leading "../" so we don't try to write outside of the output - // directory. This normally can't happen because "AbsOutputBase" is - // automatically computed to contain all entry point files, but it can - // happen if someone sets it manually via the "outbase" API option. - // - // Note that we can't just strip any leading "../" because that could - // cause two separate entry point paths to collide. For example, there - // could be both "src/index.js" and "../src/index.js" as entry points. - dotDotCount := 0 - for strings.HasPrefix(relDir[dotDotCount*3:], "../") { - dotDotCount++ - } - if dotDotCount > 0 { - // The use of "_.._" here is somewhat arbitrary but it is unlikely to - // collide with a folder named by a human and it works on Windows - // (Windows doesn't like names that end with a "."). And not starting - // with a "." means that it will not be hidden on Unix. - relDir = strings.Repeat("_.._/", dotDotCount) + relDir[dotDotCount*3:] - } - } else { - baseName = c.fs.Base(source.KeyPath.Text) - } - - // Swap the extension for the standard one - ext := c.fs.Ext(baseName) - baseName = baseName[:len(baseName)-len(ext)] - switch chunkRepr.(type) { - case *chunkReprJS: - baseName += c.options.OutputExtensionJS - case *chunkReprCSS: - baseName += c.options.OutputExtensionCSS - } - } - - // Always use cross-platform path separators to avoid problems with Windows - file.entryPointRelPath = path.Join(relDir, baseName) - // Create a chunk for the entry point here to ensure that the chunk is // always generated even if the resulting file is empty entryBits := newBitSet(uint(len(c.entryPoints))) @@ -2867,8 +2999,6 @@ func (c *linkerContext) computeChunks() []chunkInfo { isEntryPoint: true, sourceIndex: entryPoint, entryPointBit: uint(i), - relDir: relDir, - baseNameOrEmpty: baseName, filesWithPartsInChunk: make(map[uint32]bool), chunkRepr: chunkRepr, } @@ -2889,7 +3019,6 @@ func (c *linkerContext) computeChunks() []chunkInfo { if !ok { chunk.entryBits = partMeta.entryBits chunk.filesWithPartsInChunk = make(map[uint32]bool) - chunk.relDir = c.generatedChunkRelDir chunk.chunkRepr = &chunkReprJS{} chunks[key] = chunk } @@ -2906,7 +3035,6 @@ func (c *linkerContext) computeChunks() []chunkInfo { if !ok { chunk.entryBits = file.entryBits chunk.filesWithPartsInChunk = make(map[uint32]bool) - chunk.relDir = c.generatedChunkRelDir chunk.chunkRepr = &chunkReprJS{} chunks[key] = chunk } @@ -2923,9 +3051,87 @@ func (c *linkerContext) computeChunks() []chunkInfo { } sort.Strings(sortedKeys) sortedChunks := make([]chunkInfo, len(chunks)) - for i, key := range sortedKeys { - sortedChunks[i] = chunks[key] + for chunkIndex, key := range sortedKeys { + chunk := chunks[key] + sortedChunks[chunkIndex] = chunk + + // Map from the entry point file to this chunk. We will need this later if + // a file contains a dynamic import to this entry point, since we'll need + // to look up the path for this chunk to use with the import. + if chunk.isEntryPoint { + c.files[chunk.sourceIndex].entryPointChunkIndex = uint32(chunkIndex) + } } + + // Determine the order of files within the chunk ahead of time. This may + // generate additional CSS chunks from JS chunks that import CSS files. + { + for chunkIndex, chunk := range sortedChunks { + js, jsParts, css := c.chunkFileOrder(&chunk) + + switch chunk.chunkRepr.(type) { + case *chunkReprJS: + sortedChunks[chunkIndex].filesInChunkInOrder = js + sortedChunks[chunkIndex].partsInChunkInOrder = jsParts + + // If JS files include CSS files, make a sibling chunk for the CSS + if len(css) > 0 { + sortedChunks = append(sortedChunks, chunkInfo{ + filesInChunkInOrder: css, + entryBits: chunk.entryBits, + isEntryPoint: chunk.isEntryPoint, + sourceIndex: chunk.sourceIndex, + entryPointBit: chunk.entryPointBit, + filesWithPartsInChunk: make(map[uint32]bool), + chunkRepr: &chunkReprCSS{}, + }) + } + + case *chunkReprCSS: + sortedChunks[chunkIndex].filesInChunkInOrder = css + } + } + } + + // Assign general information to each chunk + for chunkIndex := range sortedChunks { + chunk := &sortedChunks[chunkIndex] + + // Assign a unique key to each chunk. This key encodes the index directly so + // we can easily recover it later without needing to look it up in a map. The + // last 8 numbers of the key are the chunk index. + chunk.uniqueKey = fmt.Sprintf("%s%08d", c.uniqueKeyPrefix, chunkIndex) + + // Determine the standard file extension + var stdExt string + switch chunk.chunkRepr.(type) { + case *chunkReprJS: + stdExt = c.options.OutputExtensionJS + case *chunkReprCSS: + stdExt = c.options.OutputExtensionCSS + } + + // Compute the template substitutions + var dir, base, ext string + var template []config.PathTemplate + if chunk.isEntryPoint { + dir, base, ext = c.pathRelativeToOutbase(chunk.sourceIndex, stdExt) + template = c.options.EntryPathTemplate + } else { + dir = "./" + base = "chunk" + ext = stdExt + template = c.options.ChunkPathTemplate + } + + // Determine the output path template + template = append(append(make([]config.PathTemplate, 0, len(template)), template...), config.PathTemplate{Data: ext}) + chunk.finalTemplate = config.SubstituteTemplate(template, config.PathPlaceholders{ + Dir: &dir, + Name: &base, + }) + } + return sortedChunks } @@ -3721,16 +3927,23 @@ func (c *linkerContext) renameSymbolsInChunk(chunk *chunkInfo, filesInOrder []ui return r } -func (repr *chunkReprJS) generate(c *linkerContext, chunk *chunkInfo) func(generateContinue) []OutputFile { - var results []OutputFile +func (repr *chunkReprJS) generate(c *linkerContext, chunks []chunkInfo, chunk *chunkInfo) func(generateContinue) { compileResults := make([]compileResultJS, 0, len(chunk.partsInChunkInOrder)) runtimeMembers := c.files[runtime.SourceIndex].repr.(*reprJS).ast.ModuleScope.Members commonJSRef := js_ast.FollowSymbols(c.symbols, runtimeMembers["__commonJS"].Ref) toModuleRef := js_ast.FollowSymbols(c.symbols, runtimeMembers["__toModule"].Ref) r := c.renameSymbolsInChunk(chunk, chunk.filesInChunkInOrder) - chunkAbsDir := c.fs.Join(c.options.AbsOutputDir, chunk.relDir) dataForSourceMaps := c.dataForSourceMaps() + // Note: This contains placeholders instead of what the placeholders are + // substituted with. That should be fine though because this should only + // ever be used for figuring out how many "../" to add to a relative path + // from a chunk whose final path hasn't been calculated yet to a chunk + // whose final path has already been calculated. That and placeholders are + // never substituted with something containing a "/" so substitution should + // never change the "../" count. + chunkAbsDir := c.fs.Dir(c.fs.Join(c.options.AbsOutputDir, config.TemplateToString(chunk.finalTemplate))) + // Generate JavaScript for each file in parallel waitGroup := sync.WaitGroup{} for _, partRange := range chunk.partsInChunkInOrder { @@ -3756,14 +3969,8 @@ func (repr *chunkReprJS) generate(c *linkerContext, chunk *chunkInfo) func(gener ) } - // Each file may optionally contain additional files to be copied to the - // output directory. This is used by the "file" loader. - for _, sourceIndex := range chunk.filesInChunkInOrder { - results = append(results, c.files[sourceIndex].additionalFiles...) - } - // Wait for cross-chunk import records before continuing - return func(continueData generateContinue) []OutputFile { + return func(continueData generateContinue) { // Also generate the cross-chunk binding code var crossChunkPrefix []byte var crossChunkSuffix []byte @@ -3857,27 +4064,27 @@ func (repr *chunkReprJS) generate(c *linkerContext, chunk *chunkInfo) func(gener } // Start the metadata - sbMeta := strings.Builder{} + jMeta := helpers.Joiner{} if c.options.NeedsMetafile { // Print imports isFirstMeta := true - sbMeta.WriteString("{\n \"imports\": [") + jMeta.AddString("{\n \"imports\": [") for i, importAbsPath := range continueData.crossChunkAbsPaths { if isFirstMeta { isFirstMeta = false } else { - sbMeta.WriteString(",") + jMeta.AddString(",") } - sbMeta.WriteString(fmt.Sprintf("\n {\n \"path\": %s,\n \"kind\": %s\n }", + jMeta.AddString(fmt.Sprintf("\n {\n \"path\": %s,\n \"kind\": %s\n }", js_printer.QuoteForJSON(c.res.PrettyPath(logger.Path{Text: importAbsPath, Namespace: "file"}), c.options.ASCIIOnly), js_printer.QuoteForJSON(continueData.crossChunkImportRecords[i].Kind.StringForMetafile(), c.options.ASCIIOnly))) } if !isFirstMeta { - sbMeta.WriteString("\n ") + jMeta.AddString("\n ") } // Print exports - sbMeta.WriteString("],\n \"exports\": [") + jMeta.AddString("],\n \"exports\": [") var aliases []string if c.options.OutputFormat.KeepES6ImportExportSyntax() { if chunk.isEntryPoint { @@ -3903,19 +4110,19 @@ func (repr *chunkReprJS) generate(c *linkerContext, chunk *chunkInfo) func(gener if isFirstMeta { isFirstMeta = false } else { - sbMeta.WriteString(",") + jMeta.AddString(",") } - sbMeta.WriteString(fmt.Sprintf("\n %s", + jMeta.AddString(fmt.Sprintf("\n %s", js_printer.QuoteForJSON(alias, c.options.ASCIIOnly))) } if !isFirstMeta { - sbMeta.WriteString("\n ") + jMeta.AddString("\n ") } if chunk.isEntryPoint { entryPoint := c.files[chunk.sourceIndex].source.PrettyPath - sbMeta.WriteString(fmt.Sprintf("],\n \"entryPoint\": %s,\n \"inputs\": {", js_printer.QuoteForJSON(entryPoint, c.options.ASCIIOnly))) + jMeta.AddString(fmt.Sprintf("],\n \"entryPoint\": %s,\n \"inputs\": {", js_printer.QuoteForJSON(entryPoint, c.options.ASCIIOnly))) } else { - sbMeta.WriteString("],\n \"inputs\": {") + jMeta.AddString("],\n \"inputs\": {") } } @@ -4030,9 +4237,7 @@ func (repr *chunkReprJS) generate(c *linkerContext, chunk *chunkInfo) func(gener } // Make sure the file ends with a newline - if j.Length() > 0 && j.LastByte() != '\n' { - j.AddString("\n") - } + j.EnsureNewlineAtEnd() // Add all unique license comments to the end of the file. These are // deduplicated because some projects have thousands of files with the same @@ -4050,117 +4255,36 @@ func (repr *chunkReprJS) generate(c *linkerContext, chunk *chunkInfo) func(gener } if c.options.SourceMap != config.SourceMapNone { - sourceMap := c.generateSourceMapForChunk(compileResultsForSourceMap, chunkAbsDir, dataForSourceMaps) - var writeDataURL bool - var writeFile bool - switch c.options.SourceMap { - case config.SourceMapInline: - writeDataURL = true - case config.SourceMapLinkedWithComment, config.SourceMapExternalWithoutComment: - writeFile = true - case config.SourceMapInlineAndExternal: - writeDataURL = true - writeFile = true - } - - // Write the generated source map as an inline comment - if writeDataURL { - j.AddString("//# sourceMappingURL=data:application/json;base64,") - j.AddString(base64.StdEncoding.EncodeToString(sourceMap)) - j.AddString("\n") - } - - // Write the generated source map as an external file - if writeFile { - // Optionally add metadata about the file - var jsonMetadataChunk string - if c.options.NeedsMetafile { - jsonMetadataChunk = fmt.Sprintf( - "{\n \"imports\": [],\n \"exports\": [],\n \"inputs\": {},\n \"bytes\": %d\n }", len(sourceMap)) - } - - // Figure out the base name for the source map which may include the content hash - sourceMapBaseName := chunk.baseNameOrEmpty - sourceMapRelDir := chunk.relDir - if sourceMapBaseName == "" { - var hash string - name := "chunk" - if config.HasPlaceholder(c.options.ChunkPathTemplate, config.HashPlaceholder) { - hash = c.chunkHashForFileName(chunk, sourceMap) - } - - relPath := config.TemplateToString(config.SubstituteTemplate(c.options.ChunkPathTemplate, config.PathPlaceholders{ - Name: &name, - Hash: &hash, - })) + c.options.OutputExtensionJS - - sourceMapBaseName = path.Base(relPath) - sourceMapRelDir = relPath[:len(relPath)-len(sourceMapBaseName)] - } - sourceMapBaseName += ".map" - - // Add a comment linking the source to its map - if c.options.SourceMap == config.SourceMapLinkedWithComment { - j.AddString("//# sourceMappingURL=") - j.AddString(sourceMapBaseName) - j.AddString("\n") - } - - results = append(results, OutputFile{ - AbsPath: c.fs.Join(c.options.AbsOutputDir, sourceMapRelDir, sourceMapBaseName), - Contents: sourceMap, - jsonMetadataChunk: jsonMetadataChunk, - }) - } + chunk.outputSourceMap = c.generateSourceMapForChunk(compileResultsForSourceMap, chunkAbsDir, dataForSourceMaps) } // The JavaScript contents are done now that the source map comment is in jsContents := j.Done() - // Figure out the base name for this chunk now that the content hash is known - if chunk.baseNameOrEmpty == "" { - var hash string - name := "chunk" - if config.HasPlaceholder(c.options.ChunkPathTemplate, config.HashPlaceholder) { - hash = c.chunkHashForFileName(chunk, jsContents) - } - - relPath := config.TemplateToString(config.SubstituteTemplate(c.options.ChunkPathTemplate, config.PathPlaceholders{ - Name: &name, - Hash: &hash, - })) + c.options.OutputExtensionJS - - chunk.baseNameOrEmpty = path.Base(relPath) - chunk.relDir = relPath[:len(relPath)-len(chunk.baseNameOrEmpty)] - } - - // End the metadata - var jsonMetadataChunk string + // End the metadata lazily. The final output size is not known until the + // final import paths are substituted into the output pieces generated below. if c.options.NeedsMetafile { - isFirstMeta := true - for _, path := range metaOrder { - if isFirstMeta { - isFirstMeta = false - } else { - sbMeta.WriteString(",") + chunk.jsonMetadataChunkCallback = func(finalOutputSize int) []byte { + isFirstMeta := true + for _, path := range metaOrder { + if isFirstMeta { + isFirstMeta = false + } else { + jMeta.AddString(",") + } + jMeta.AddString(fmt.Sprintf("\n %s: {\n \"bytesInOutput\": %d\n }", + js_printer.QuoteForJSON(path, c.options.ASCIIOnly), metaByteCount[path])) } - sbMeta.WriteString(fmt.Sprintf("\n %s: {\n \"bytesInOutput\": %d\n }", - js_printer.QuoteForJSON(path, c.options.ASCIIOnly), metaByteCount[path])) - } - if !isFirstMeta { - sbMeta.WriteString("\n ") + if !isFirstMeta { + jMeta.AddString("\n ") + } + jMeta.AddString(fmt.Sprintf("},\n \"bytes\": %d\n }", finalOutputSize)) + return jMeta.Done() } - sbMeta.WriteString(fmt.Sprintf("},\n \"bytes\": %d\n }", len(jsContents))) - jsonMetadataChunk = sbMeta.String() } - results = append(results, OutputFile{ - AbsPath: c.fs.Join(c.options.AbsOutputDir, chunk.relPath()), - Contents: jsContents, - jsonMetadataChunk: jsonMetadataChunk, - IsExecutable: isExecutable, - }) - return results + c.generateIsolatedChunkHash(chunk, c.breakOutputIntoPieces(jsContents, uint32(len(chunks)))) + chunk.isExecutable = isExecutable } } @@ -4213,7 +4337,7 @@ type externalImportCSS struct { conditions []css_ast.Token } -func (repr *chunkReprCSS) generate(c *linkerContext, chunk *chunkInfo) func(generateContinue) []OutputFile { +func (repr *chunkReprCSS) generate(c *linkerContext, chunks []chunkInfo, chunk *chunkInfo) func(generateContinue) { var results []OutputFile compileResults := make([]compileResultCSS, 0, len(chunk.filesInChunkInOrder)) @@ -4262,7 +4386,7 @@ func (repr *chunkReprCSS) generate(c *linkerContext, chunk *chunkInfo) func(gene } // Wait for cross-chunk import records before continuing - return func(continueData generateContinue) []OutputFile { + return func(continueData generateContinue) { waitGroup.Wait() j := helpers.Joiner{} newlineBeforeComment := false @@ -4308,22 +4432,22 @@ func (repr *chunkReprCSS) generate(c *linkerContext, chunk *chunkInfo) func(gene } // Start the metadata - sbMeta := strings.Builder{} + jMeta := helpers.Joiner{} if c.options.NeedsMetafile { isFirstMeta := true - sbMeta.WriteString("{\n \"imports\": [") + jMeta.AddString("{\n \"imports\": [") for i, importAbsPath := range continueData.crossChunkAbsPaths { if isFirstMeta { isFirstMeta = false } else { - sbMeta.WriteString(",") + jMeta.AddString(",") } - sbMeta.WriteString(fmt.Sprintf("\n {\n \"path\": %s,\n \"kind\": %s\n }", + jMeta.AddString(fmt.Sprintf("\n {\n \"path\": %s,\n \"kind\": %s\n }", js_printer.QuoteForJSON(c.res.PrettyPath(logger.Path{Text: importAbsPath, Namespace: "file"}), c.options.ASCIIOnly), js_printer.QuoteForJSON(continueData.crossChunkImportRecords[i].Kind.StringForMetafile(), c.options.ASCIIOnly))) } if !isFirstMeta { - sbMeta.WriteString("\n ") + jMeta.AddString("\n ") } if chunk.isEntryPoint { file := &c.files[chunk.sourceIndex] @@ -4332,13 +4456,13 @@ func (repr *chunkReprCSS) generate(c *linkerContext, chunk *chunkInfo) func(gene // importing CSS into JavaScript. We want this to be a 1:1 relationship // and there is already an output file for the JavaScript entry point. if _, ok := file.repr.(*reprCSS); ok { - sbMeta.WriteString(fmt.Sprintf("],\n \"entryPoint\": %s,\n \"inputs\": {", + jMeta.AddString(fmt.Sprintf("],\n \"entryPoint\": %s,\n \"inputs\": {", js_printer.QuoteForJSON(file.source.PrettyPath, c.options.ASCIIOnly))) } else { - sbMeta.WriteString("],\n \"inputs\": {") + jMeta.AddString("],\n \"inputs\": {") } } else { - sbMeta.WriteString("],\n \"inputs\": {") + jMeta.AddString("],\n \"inputs\": {") } } isFirstMeta := true @@ -4361,18 +4485,16 @@ func (repr *chunkReprCSS) generate(c *linkerContext, chunk *chunkInfo) func(gene if isFirstMeta { isFirstMeta = false } else { - sbMeta.WriteString(",") + jMeta.AddString(",") } - sbMeta.WriteString(fmt.Sprintf("\n %s: {\n \"bytesInOutput\": %d\n }", + jMeta.AddString(fmt.Sprintf("\n %s: {\n \"bytesInOutput\": %d\n }", js_printer.QuoteForJSON(c.files[compileResult.sourceIndex].source.PrettyPath, c.options.ASCIIOnly), len(compileResult.printedCSS))) } } // Make sure the file ends with a newline - if j.Length() > 0 && j.LastByte() != '\n' { - j.AddString("\n") - } + j.EnsureNewlineAtEnd() if len(c.options.CSSFooter) > 0 { j.AddString(c.options.CSSFooter) @@ -4382,40 +4504,180 @@ func (repr *chunkReprCSS) generate(c *linkerContext, chunk *chunkInfo) func(gene // The CSS contents are done now that the source map comment is in cssContents := j.Done() - // Figure out the base name for this chunk now that the content hash is known - if chunk.baseNameOrEmpty == "" { - var hash string - name := "chunk" - if config.HasPlaceholder(c.options.ChunkPathTemplate, config.HashPlaceholder) { - hash = c.chunkHashForFileName(chunk, cssContents) + // End the metadata lazily. The final output size is not known until the + // final import paths are substituted into the output pieces generated below. + if c.options.NeedsMetafile { + chunk.jsonMetadataChunkCallback = func(finalOutputSize int) []byte { + if !isFirstMeta { + jMeta.AddString("\n ") + } + jMeta.AddString(fmt.Sprintf("},\n \"bytes\": %d\n }", finalOutputSize)) + return jMeta.Done() } + } - relPath := config.TemplateToString(config.SubstituteTemplate(c.options.ChunkPathTemplate, config.PathPlaceholders{ - Name: &name, - Hash: &hash, - })) + c.options.OutputExtensionCSS + c.generateIsolatedChunkHash(chunk, c.breakOutputIntoPieces(cssContents, uint32(len(chunks)))) + } +} - chunk.baseNameOrEmpty = path.Base(relPath) - chunk.relDir = relPath[:len(relPath)-len(chunk.baseNameOrEmpty)] - } +func appendIsolatedHashesForImportedChunks( + hash hash.Hash, + chunks []chunkInfo, + chunkIndex uint32, + visited []uint32, + visitedKey uint32, +) { + // Only visit each chunk at most once. This is important because there may be + // cycles in the chunk import graph. If there's a cycle, we want to include + // the hash of every chunk involved in the cycle (along with all of their + // dependencies). This depth-first traversal will naturally do that. + if visited[chunkIndex] == visitedKey { + return + } + visited[chunkIndex] = visitedKey + chunk := &chunks[chunkIndex] - // End the metadata - var jsonMetadataChunk string - if c.options.NeedsMetafile { - if !isFirstMeta { - sbMeta.WriteString("\n ") + // Visit the other chunks that this chunk imports before visiting this chunk + for _, otherChunkIndex := range chunk.crossChunkImports { + appendIsolatedHashesForImportedChunks(hash, chunks, otherChunkIndex, visited, visitedKey) + } + + // Mix in the hash for this chunk + hash.Write(chunk.isolatedChunkHash) +} + +func (c *linkerContext) breakOutputIntoPieces(output []byte, chunkCount uint32) []outputPiece { + var pieces []outputPiece + prefix := c.uniqueKeyPrefixBytes + for { + // Scan for the next chunk path + boundary := bytes.Index(output, prefix) + + // Try to parse the chunk index + var chunkIndex uint32 + if boundary != -1 { + if start := boundary + len(prefix); start+8 > len(output) { + boundary = -1 + } else { + for j := 0; j < 8; j++ { + c := output[start+j] + if c < '0' || c > '9' { + boundary = -1 + break + } + chunkIndex = chunkIndex*10 + uint32(c) - '0' + } + } + if chunkIndex >= chunkCount { + boundary = -1 } - sbMeta.WriteString(fmt.Sprintf("},\n \"bytes\": %d\n }", len(cssContents))) - jsonMetadataChunk = sbMeta.String() } - results = append(results, OutputFile{ - AbsPath: c.fs.Join(c.options.AbsOutputDir, chunk.relPath()), - Contents: cssContents, - jsonMetadataChunk: jsonMetadataChunk, + // If we're at the end, generate one final piece + if boundary == -1 { + pieces = append(pieces, outputPiece{ + data: output, + }) + break + } + + // Otherwise, generate an interior piece and continue + pieces = append(pieces, outputPiece{ + data: output[:boundary], + chunkIndex: ast.MakeIndex32(chunkIndex), }) - return results + output = output[boundary+len(prefix)+8:] } + return pieces +} + +func (c *linkerContext) generateIsolatedChunkHash(chunk *chunkInfo, pieces []outputPiece) { + hash := sha1.New() + + // Mix the file names and part ranges of all of the files in this chunk into + // the hash. Objects that appear identical but that live in separate files or + // that live in separate parts in the same file must not be merged. This only + // needs to be done for JavaScript files, not CSS files. + for _, partRange := range chunk.partsInChunkInOrder { + var filePath string + file := &c.files[partRange.sourceIndex] + + if file.source.KeyPath.Namespace == "file" { + // Use the pretty path as the file name since it should be platform- + // independent (relative paths and the "/" path separator) + filePath = file.source.PrettyPath + } else { + // If this isn't in the "file" namespace, just use the full path text + // verbatim. This could be a source of cross-platform differences if + // plugins are storing platform-specific information in here, but then + // that problem isn't caused by esbuild itself. + filePath = file.source.KeyPath.Text + } + + // Include the path namespace in the hash + hashWriteLengthPrefixed(hash, []byte(file.source.KeyPath.Namespace)) + + // Then include the file path + hashWriteLengthPrefixed(hash, []byte(filePath)) + + // Also write the part range. These numbers are deterministic and allocated + // per-file so this should be a well-behaved base for a hash. + hashWriteUint32(hash, partRange.partIndexBegin) + hashWriteUint32(hash, partRange.partIndexEnd) + } + + // Hash the output path template as part of the content hash because we want + // any import to be considered different if the import's output path has changed. + for _, part := range chunk.finalTemplate { + hashWriteLengthPrefixed(hash, []byte(part.Data)) + } + + // Include the generated output content in the hash. This excludes the + // randomly-generated import paths (the unique keys) and only includes the + // data in the spans between them. + for _, piece := range pieces { + hashWriteLengthPrefixed(hash, piece.data) + } + + // Also include the source map data in the hash. The source map is named the + // same name as the chunk name for ease of discovery. So we want the hash to + // change if the source map data changes even if the chunk data doesn't change. + // Otherwise the output path for the source map wouldn't change and the source + // map wouldn't end up being updated. + // + // Note that this means the contents of all input files are included in the + // hash because of "sourcesContent", so changing a comment in an input file + // can now change the hash of the output file. This only happens when you + // have source maps enabled (and "sourcesContent", which is on by default). + // + // The generated positions in the mappings here are in the output content + // *before* the final paths have been substituted. This may seem weird. + // However, I think this shouldn't cause issues because a) the unique key + // values are all always the same length so the offsets are deterministic + // and b) the final paths will be folded into the final hash later. + hashWriteLengthPrefixed(hash, chunk.outputSourceMap.Prefix) + hashWriteLengthPrefixed(hash, chunk.outputSourceMap.Mappings) + hashWriteLengthPrefixed(hash, chunk.outputSourceMap.Suffix) + + // Store the hash so far. All other chunks that import this chunk will mix + // this hash into their "outputHash" to ensure that the import path changes + // if this chunk (or any dependencies of this chunk) is changed. + chunk.outputPieces = pieces + chunk.outputHash = hash + chunk.isolatedChunkHash = hash.Sum(nil) +} + +func hashWriteUint32(hash hash.Hash, value uint32) { + var lengthBytes [4]byte + binary.LittleEndian.PutUint32(lengthBytes[:], value) + hash.Write(lengthBytes[:]) +} + +// Hash the data in length-prefixed form because boundary locations are +// important. We don't want "a" + "bc" to hash the same as "ab" + "c". +func hashWriteLengthPrefixed(hash hash.Hash, bytes []byte) { + hashWriteUint32(hash, uint32(len(bytes))) + hash.Write(bytes) } func preventBindingsFromBeingRenamed(binding js_ast.Binding, symbols js_ast.SymbolMap) { @@ -4513,7 +4775,7 @@ func (c *linkerContext) generateSourceMapForChunk( results []compileResultJS, chunkAbsDir string, dataForSourceMaps []dataForSourceMap, -) []byte { +) (pieces sourcemap.SourceMapPieces) { j := helpers.Joiner{} j.AddString("{\n \"version\": 3") @@ -4609,8 +4871,11 @@ func (c *linkerContext) generateSourceMapForChunk( j.AddString("]") } - // Write the mappings j.AddString(",\n \"mappings\": \"") + pieces.Prefix = j.Done() + + // Write the mappings + jMappings := helpers.Joiner{} prevEndState := js_printer.SourceMapState{} prevColumnOffset := 0 for _, result := range results { @@ -4640,7 +4905,7 @@ func (c *linkerContext) generateSourceMapForChunk( } // Append the precomputed source map chunk - js_printer.AppendSourceMapChunk(&j, prevEndState, startState, chunk.Buffer) + js_printer.AppendSourceMapChunk(&jMappings, prevEndState, startState, chunk.Buffer) // Generate the relative offset to start from next time prevEndState = chunk.EndState @@ -4653,62 +4918,9 @@ func (c *linkerContext) generateSourceMapForChunk( prevColumnOffset += startState.GeneratedColumn } } - j.AddString("\"") + pieces.Mappings = jMappings.Done() // Finish the source map - j.AddString(",\n \"names\": []\n}\n") - return j.Done() -} - -func (c *linkerContext) chunkHashForFileName(chunk *chunkInfo, bytes []byte) string { - hash := sha1.New() - - // Hash the data in length-prefixed form because boundary locations are - // important. We don't want "a" + "bc" to hash the same as "ab" + "c". - var lengthBytes [4]byte - - // Mix the file names and part ranges of all of the files in this chunk into - // the hash. Objects that appear identical but that live in separate files or - // that live in separate parts in the same file must not be merged. This only - // needs to be done for JavaScript files, not CSS files. - for _, partRange := range chunk.partsInChunkInOrder { - var filePath string - file := &c.files[partRange.sourceIndex] - - if file.source.KeyPath.Namespace == "file" { - // Use the pretty path as the file name since it should be platform- - // independent (relative paths and the "/" path separator) - filePath = file.source.PrettyPath - } else { - // If this isn't in the "file" namespace, just use the full path text - // verbatim. This could be a source of cross-platform differences if - // plugins are storing platform-specific information in here, but then - // that problem isn't caused by esbuild itself. - filePath = file.source.KeyPath.Text - } - - // Include the path namespace in the hash - binary.LittleEndian.PutUint32(lengthBytes[:], uint32(len(file.source.KeyPath.Namespace))) - hash.Write(lengthBytes[:]) - hash.Write([]byte(file.source.KeyPath.Namespace)) - - // Then include the file path - binary.LittleEndian.PutUint32(lengthBytes[:], uint32(len(filePath))) - hash.Write(lengthBytes[:]) - hash.Write([]byte(filePath)) - - // Also write the part range. These numbers are deterministic and allocated - // per-file so this should be a well-behaved base for a hash. - binary.LittleEndian.PutUint32(lengthBytes[:], partRange.partIndexBegin) - hash.Write(lengthBytes[:]) - binary.LittleEndian.PutUint32(lengthBytes[:], partRange.partIndexEnd) - hash.Write(lengthBytes[:]) - } - - // Then mix the contents of the chunk itself into the hash - hash.Write(bytes) - - var hashBytes [sha1.Size]byte - hash.Sum(hashBytes[:0]) - return hashForFileName(hashBytes) + pieces.Suffix = []byte("\",\n \"names\": []\n}\n") + return } diff --git a/internal/bundler/snapshots/snapshots_splitting.txt b/internal/bundler/snapshots/snapshots_splitting.txt index c9cdfd0c1d7..7c533ee54c9 100644 --- a/internal/bundler/snapshots/snapshots_splitting.txt +++ b/internal/bundler/snapshots/snapshots_splitting.txt @@ -9,7 +9,7 @@ TestSplittingAssignToLocal import { foo, setFoo -} from "./chunk.A4MVGQNL.js"; +} from "./chunk.B5TJI52K.js"; // a.js setFoo(123); @@ -18,12 +18,12 @@ console.log(foo); ---------- /out/b.js ---------- import { foo -} from "./chunk.A4MVGQNL.js"; +} from "./chunk.B5TJI52K.js"; // b.js console.log(foo); ----------- /out/chunk.A4MVGQNL.js ---------- +---------- /out/chunk.B5TJI52K.js ---------- // shared.js var foo; function setFoo(value) { @@ -41,7 +41,7 @@ TestSplittingCircularReferenceIssue251 import { p, q -} from "./chunk.DJXI6RPU.js"; +} from "./chunk.JAD3IYVK.js"; export { p, q @@ -51,13 +51,13 @@ export { import { p, q -} from "./chunk.DJXI6RPU.js"; +} from "./chunk.JAD3IYVK.js"; export { p, q }; ----------- /out/chunk.DJXI6RPU.js ---------- +---------- /out/chunk.JAD3IYVK.js ---------- // b.js var q = 6; @@ -74,15 +74,15 @@ TestSplittingCrossChunkAssignmentDependencies ---------- /out/a.js ---------- import { setValue -} from "./chunk.2ZHLO36K.js"; +} from "./chunk.K2Z6FE6X.js"; // a.js setValue(123); ---------- /out/b.js ---------- -import "./chunk.2ZHLO36K.js"; +import "./chunk.K2Z6FE6X.js"; ----------- /out/chunk.2ZHLO36K.js ---------- +---------- /out/chunk.K2Z6FE6X.js ---------- // shared.js var observer; var value; @@ -105,7 +105,7 @@ TestSplittingCrossChunkAssignmentDependenciesRecursive ---------- /out/a.js ---------- import { setX -} from "./chunk.7E7MWOW6.js"; +} from "./chunk.PJRFLMBG.js"; // a.js setX(); @@ -113,8 +113,8 @@ setX(); ---------- /out/b.js ---------- import { setZ -} from "./chunk.AYJGT7V5.js"; -import "./chunk.7E7MWOW6.js"; +} from "./chunk.SVT47M27.js"; +import "./chunk.PJRFLMBG.js"; // b.js setZ(); @@ -123,20 +123,20 @@ setZ(); import { setY2, setZ2 -} from "./chunk.AYJGT7V5.js"; +} from "./chunk.SVT47M27.js"; import { setX2 -} from "./chunk.7E7MWOW6.js"; +} from "./chunk.PJRFLMBG.js"; // c.js setX2(); setY2(); setZ2(); ----------- /out/chunk.AYJGT7V5.js ---------- +---------- /out/chunk.SVT47M27.js ---------- import { setX -} from "./chunk.7E7MWOW6.js"; +} from "./chunk.PJRFLMBG.js"; // y.js var _y; @@ -164,7 +164,7 @@ export { setZ2 }; ----------- /out/chunk.7E7MWOW6.js ---------- +---------- /out/chunk.PJRFLMBG.js ---------- // x.js var _x; function setX(v) { @@ -182,21 +182,21 @@ export { ================================================================================ TestSplittingDuplicateChunkCollision ---------- /out/a.js ---------- -import"./chunk.HERPCUVH.js"; +import"./chunk.SK4BZ7G2.js"; ---------- /out/b.js ---------- -import"./chunk.HERPCUVH.js"; +import"./chunk.SK4BZ7G2.js"; ----------- /out/chunk.HERPCUVH.js ---------- +---------- /out/chunk.SK4BZ7G2.js ---------- console.log(123); ---------- /out/c.js ---------- -import"./chunk.SN7IOVM7.js"; +import"./chunk.WLJLUM5K.js"; ---------- /out/d.js ---------- -import"./chunk.SN7IOVM7.js"; +import"./chunk.WLJLUM5K.js"; ----------- /out/chunk.SN7IOVM7.js ---------- +---------- /out/chunk.WLJLUM5K.js ---------- console.log(123); ================================================================================ @@ -204,7 +204,7 @@ TestSplittingDynamicAndNotDynamicCommonJSIntoES6 ---------- /out/entry.js ---------- import { require_foo -} from "./chunk.EXZ7XKWK.js"; +} from "./chunk.VGIGB4OH.js"; // entry.js var import_foo = __toModule(require_foo()); @@ -213,10 +213,10 @@ import("./foo.js").then(({default: {bar: b}}) => console.log(import_foo.bar, b)) ---------- /out/foo.js ---------- import { require_foo -} from "./chunk.EXZ7XKWK.js"; +} from "./chunk.VGIGB4OH.js"; export default require_foo(); ----------- /out/chunk.EXZ7XKWK.js ---------- +---------- /out/chunk.VGIGB4OH.js ---------- // foo.js var require_foo = __commonJS((exports) => { exports.bar = 123; @@ -231,7 +231,7 @@ TestSplittingDynamicAndNotDynamicES6IntoES6 ---------- /out/entry.js ---------- import { bar -} from "./chunk.J5VSPG5Y.js"; +} from "./chunk.JQGEX5UA.js"; // entry.js import("./foo.js").then(({bar: b}) => console.log(bar, b)); @@ -239,12 +239,12 @@ import("./foo.js").then(({bar: b}) => console.log(bar, b)); ---------- /out/foo.js ---------- import { bar -} from "./chunk.J5VSPG5Y.js"; +} from "./chunk.JQGEX5UA.js"; export { bar }; ----------- /out/chunk.J5VSPG5Y.js ---------- +---------- /out/chunk.JQGEX5UA.js ---------- // foo.js var bar = 123; @@ -310,7 +310,7 @@ TestSplittingHybridCJSAndESMIssue617 ---------- /out/a.js ---------- import { require_a -} from "./chunk.JETUXFVU.js"; +} from "./chunk.XCCWV4CM.js"; export default require_a(); ---------- /out/b.js ---------- @@ -318,7 +318,7 @@ import { __defProp, __markAsModule, require_a -} from "./chunk.JETUXFVU.js"; +} from "./chunk.XCCWV4CM.js"; // b.js var import_a = __toModule(require_a()); @@ -327,7 +327,7 @@ export { export_foo as foo }; ----------- /out/chunk.JETUXFVU.js ---------- +---------- /out/chunk.XCCWV4CM.js ---------- // a.js var require_a = __commonJS((exports) => { __markAsModule(exports); @@ -349,13 +349,13 @@ TestSplittingHybridESMAndCJSIssue617 ---------- /out/a.js ---------- import { require_a -} from "./chunk.MVW3M44A.js"; +} from "./chunk.DUBTDYGE.js"; export default require_a(); ---------- /out/b.js ---------- import { require_a -} from "./chunk.MVW3M44A.js"; +} from "./chunk.DUBTDYGE.js"; // b.js var bar = require_a(); @@ -363,7 +363,7 @@ export { bar }; ----------- /out/chunk.MVW3M44A.js ---------- +---------- /out/chunk.DUBTDYGE.js ---------- // a.js var require_a = __commonJS((exports) => { __markAsModule(exports); @@ -382,7 +382,7 @@ TestSplittingMinifyIdentifiersCrashIssue437 ---------- /out/a.js ---------- import { a as o -} from "./chunk.7DXPAJYM.js"; +} from "./chunk.666QKTW2.js"; // a.js console.log(o); @@ -390,12 +390,12 @@ console.log(o); ---------- /out/b.js ---------- import { a as o -} from "./chunk.7DXPAJYM.js"; +} from "./chunk.666QKTW2.js"; // b.js console.log(o); ----------- /out/chunk.7DXPAJYM.js ---------- +---------- /out/chunk.666QKTW2.js ---------- // shared.js function n(o) { } @@ -409,7 +409,7 @@ export { ================================================================================ TestSplittingMissingLazyExport ---------- /out/a.js ---------- -import "./chunk.3I42H3S6.js"; +import "./chunk.O6GZDAIM.js"; // empty.js var empty_exports = {}; @@ -423,7 +423,7 @@ function foo() { console.log(foo()); ---------- /out/b.js ---------- -import "./chunk.3I42H3S6.js"; +import "./chunk.O6GZDAIM.js"; // common.js function bar() { @@ -433,14 +433,14 @@ function bar() { // b.js console.log(bar()); ----------- /out/chunk.3I42H3S6.js ---------- +---------- /out/chunk.O6GZDAIM.js ---------- ================================================================================ TestSplittingNestedDirectories ---------- /Users/user/project/out/pageA/page.js ---------- import { shared_default -} from "../chunk.IWXS6DUR.js"; +} from "../chunk.T3IRO6DB.js"; // Users/user/project/src/pages/pageA/page.js console.log(shared_default); @@ -448,12 +448,12 @@ console.log(shared_default); ---------- /Users/user/project/out/pageB/page.js ---------- import { shared_default -} from "../chunk.IWXS6DUR.js"; +} from "../chunk.T3IRO6DB.js"; // Users/user/project/src/pages/pageB/page.js console.log(-shared_default); ----------- /Users/user/project/out/chunk.IWXS6DUR.js ---------- +---------- /Users/user/project/out/chunk.T3IRO6DB.js ---------- // Users/user/project/src/pages/shared.js var shared_default = 123; @@ -466,7 +466,7 @@ TestSplittingReExportIssue273 ---------- /out/a.js ---------- import { a -} from "./chunk.EQDLD3IR.js"; +} from "./chunk.MYHFHJ2W.js"; export { a }; @@ -474,12 +474,12 @@ export { ---------- /out/b.js ---------- import { a -} from "./chunk.EQDLD3IR.js"; +} from "./chunk.MYHFHJ2W.js"; export { a }; ----------- /out/chunk.EQDLD3IR.js ---------- +---------- /out/chunk.MYHFHJ2W.js ---------- // a.js var a = 1; @@ -492,7 +492,7 @@ TestSplittingSharedCommonJSIntoES6 ---------- /out/a.js ---------- import { require_shared -} from "./chunk.PDDZ4EBL.js"; +} from "./chunk.J4PXVRFR.js"; // a.js var {foo} = require_shared(); @@ -501,13 +501,13 @@ console.log(foo); ---------- /out/b.js ---------- import { require_shared -} from "./chunk.PDDZ4EBL.js"; +} from "./chunk.J4PXVRFR.js"; // b.js var {foo} = require_shared(); console.log(foo); ----------- /out/chunk.PDDZ4EBL.js ---------- +---------- /out/chunk.J4PXVRFR.js ---------- // shared.js var require_shared = __commonJS((exports) => { exports.foo = 123; @@ -522,7 +522,7 @@ TestSplittingSharedES6IntoES6 ---------- /out/a.js ---------- import { foo -} from "./chunk.BONVO3YG.js"; +} from "./chunk.OQIDAIZA.js"; // a.js console.log(foo); @@ -530,12 +530,12 @@ console.log(foo); ---------- /out/b.js ---------- import { foo -} from "./chunk.BONVO3YG.js"; +} from "./chunk.OQIDAIZA.js"; // b.js console.log(foo); ----------- /out/chunk.BONVO3YG.js ---------- +---------- /out/chunk.OQIDAIZA.js ---------- // shared.js var foo = 123; @@ -546,7 +546,7 @@ export { ================================================================================ TestSplittingSideEffectsWithoutDependencies ---------- /out/a.js ---------- -import "./chunk.BA4IXZSP.js"; +import "./chunk.3LEG74S7.js"; // shared.js var a = 1; @@ -555,7 +555,7 @@ var a = 1; console.log(a); ---------- /out/b.js ---------- -import "./chunk.BA4IXZSP.js"; +import "./chunk.3LEG74S7.js"; // shared.js var b = 2; @@ -563,7 +563,7 @@ var b = 2; // b.js console.log(b); ----------- /out/chunk.BA4IXZSP.js ---------- +---------- /out/chunk.3LEG74S7.js ---------- // shared.js console.log("side effect"); diff --git a/internal/config/config.go b/internal/config/config.go index 81c03045657..7dbb39bdf4c 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -228,6 +228,7 @@ type Options struct { CSSBanner string CSSFooter string + EntryPathTemplate []PathTemplate ChunkPathTemplate []PathTemplate AssetPathTemplate []PathTemplate @@ -246,6 +247,10 @@ type PathPlaceholder uint8 const ( NoPlaceholder PathPlaceholder = iota + // The relative path from the original parent directory to the configured + // "outbase" directory, or to the lowest common ancestor directory + DirPlaceholder + // The original name of the file, or the manual chunk name, or the name of // the type of output file ("entry" or "chunk" or "asset") NamePlaceholder @@ -261,12 +266,15 @@ type PathTemplate struct { } type PathPlaceholders struct { + Dir *string Name *string Hash *string } func (placeholders PathPlaceholders) Get(placeholder PathPlaceholder) *string { switch placeholder { + case DirPlaceholder: + return placeholders.Dir case NamePlaceholder: return placeholders.Name case HashPlaceholder: @@ -284,6 +292,8 @@ func TemplateToString(template []PathTemplate) string { for _, part := range template { sb.WriteString(part.Data) switch part.Placeholder { + case DirPlaceholder: + sb.WriteString("[dir]") case NamePlaceholder: sb.WriteString("[name]") case HashPlaceholder: diff --git a/internal/fs/fs_mock.go b/internal/fs/fs_mock.go index f2ba6a78ac2..159471d8d0b 100644 --- a/internal/fs/fs_mock.go +++ b/internal/fs/fs_mock.go @@ -103,6 +103,9 @@ func splitOnSlash(path string) (string, string) { } func (*mockFS) Rel(base string, target string) (string, bool) { + base = path.Clean(base) + target = path.Clean(target) + // Base cases if base == "" || base == "." { return target, true diff --git a/internal/fs/fs_mock_test.go b/internal/fs/fs_mock_test.go index 290e9fce335..13ce94b1437 100644 --- a/internal/fs/fs_mock_test.go +++ b/internal/fs/fs_mock_test.go @@ -98,4 +98,11 @@ func TestMockFSRel(t *testing.T) { expect("/a/b/c/d", "/a/b/x", "../../x") expect("/a/b/c", "/a/b/x/y", "../x/y") expect("/a/b/c/d", "/a/b/x/y", "../../x/y") + + expect("a/b", "a/c", "../c") + expect("./a/b", "./a/c", "../c") + expect(".", "./a/b", "a/b") + expect(".", ".//a/b", "a/b") + expect(".", "././a/b", "a/b") + expect(".", "././/a/b", "a/b") } diff --git a/internal/helpers/joiner.go b/internal/helpers/joiner.go index 4dadae0079b..f1c9d6ed73d 100644 --- a/internal/helpers/joiner.go +++ b/internal/helpers/joiner.go @@ -45,7 +45,17 @@ func (j *Joiner) Length() uint32 { return j.length } +func (j *Joiner) EnsureNewlineAtEnd() { + if j.length > 0 && j.lastByte != '\n' { + j.AddString("\n") + } +} + func (j *Joiner) Done() []byte { + if len(j.strings) == 0 && len(j.bytes) == 1 && j.bytes[0].offset == 0 { + // No need to allocate if there was only a single byte array written + return j.bytes[0].data + } buffer := make([]byte, j.length) for _, item := range j.strings { copy(buffer[item.offset:], item.data) diff --git a/internal/sourcemap/sourcemap.go b/internal/sourcemap/sourcemap.go index 962be9a3b0a..9b4738c9f5e 100644 --- a/internal/sourcemap/sourcemap.go +++ b/internal/sourcemap/sourcemap.go @@ -3,6 +3,8 @@ package sourcemap import ( "bytes" "unicode/utf8" + + "github.com/evanw/esbuild/internal/helpers" ) type Mapping struct { @@ -184,6 +186,19 @@ type LineColumnOffset struct { Columns int } +func (a LineColumnOffset) ComesBefore(b LineColumnOffset) bool { + return a.Lines < b.Lines || (a.Lines == b.Lines && a.Columns < b.Columns) +} + +func (a *LineColumnOffset) Add(b LineColumnOffset) { + if b.Lines == 0 { + a.Columns += b.Columns + } else { + a.Lines += b.Lines + a.Columns = b.Columns + } +} + func (offset *LineColumnOffset) AdvanceBytes(bytes []byte) { columns := offset.Columns for len(bytes) > 0 { @@ -237,3 +252,109 @@ func (offset *LineColumnOffset) AdvanceString(text string) { } offset.Columns = columns } + +type SourceMapPieces struct { + Prefix []byte + Mappings []byte + Suffix []byte +} + +type SourceMapShift struct { + Before LineColumnOffset + After LineColumnOffset +} + +func (pieces SourceMapPieces) Finalize(shifts []SourceMapShift) []byte { + // An optimized path for when there are no shifts + if len(shifts) == 1 { + bytes := pieces.Prefix + minCap := len(bytes) + len(pieces.Mappings) + len(pieces.Suffix) + if cap(bytes) < minCap { + bytes = append(make([]byte, 0, minCap), bytes...) + } + bytes = append(bytes, pieces.Mappings...) + bytes = append(bytes, pieces.Suffix...) + return bytes + } + + startOfRun := 0 + current := 0 + generated := LineColumnOffset{} + prevShiftColumnDelta := 0 + j := helpers.Joiner{} + + // Start the source map + j.AddBytes(pieces.Prefix) + + // This assumes that a) all mappings are valid and b) all mappings are ordered + // by increasing generated position. This should be the case for all mappings + // generated by esbuild, which should be the only mappings we process here. + for current < len(pieces.Mappings) { + // Handle a line break + if pieces.Mappings[current] == ';' { + generated.Lines++ + generated.Columns = 0 + prevShiftColumnDelta = 0 + current++ + continue + } + + potentialEndOfRun := current + + // Read the generated column + generatedColumnDelta, next := DecodeVLQ(pieces.Mappings, current) + generated.Columns += generatedColumnDelta + current = next + + potentialStartOfRun := current + + // Skip over the original position information + _, current = DecodeVLQ(pieces.Mappings, current) // The original source + _, current = DecodeVLQ(pieces.Mappings, current) // The original line + _, current = DecodeVLQ(pieces.Mappings, current) // The original column + + // Skip a trailing comma + if current < len(pieces.Mappings) && pieces.Mappings[current] == ',' { + current++ + } + + // Detect crossing shift boundaries + didCrossBoundary := false + for len(shifts) > 1 && shifts[1].Before.ComesBefore(generated) { + shifts = shifts[1:] + didCrossBoundary = true + } + if !didCrossBoundary { + continue + } + + // This shift isn't relevant if the next mapping after this shift is on a + // following line. In that case, don't split and keep scanning instead. + shift := shifts[0] + if shift.After.Lines != generated.Lines { + continue + } + + // Add all previous mappings in a single run for efficiency. Since source + // mappings are relative, no data needs to be modified inside this run. + j.AddBytes(pieces.Mappings[startOfRun:potentialEndOfRun]) + + // Then modify the first mapping across the shift boundary with the updated + // generated column value. It's simplest to only support column shifts. This + // is reasonable because import paths should not contain newlines. + if shift.Before.Lines != shift.After.Lines { + panic("Unexpected line change when shifting source maps") + } + shiftColumnDelta := shift.After.Columns - shift.Before.Columns + j.AddBytes(EncodeVLQ(generatedColumnDelta + shiftColumnDelta - prevShiftColumnDelta)) + prevShiftColumnDelta = shiftColumnDelta + + // Finally, start the next run after the end of this generated column offset + startOfRun = potentialStartOfRun + } + + // Finish the source map + j.AddBytes(pieces.Mappings[startOfRun:]) + j.AddBytes(pieces.Suffix) + return j.Done() +} diff --git a/lib/common.ts b/lib/common.ts index 71f32a7131e..5785e47fb18 100644 --- a/lib/common.ts +++ b/lib/common.ts @@ -181,6 +181,7 @@ function flagsForBuildOptions( let loader = getFlag(options, keys, 'loader', mustBeObject); let outExtension = getFlag(options, keys, 'outExtension', mustBeObject); let publicPath = getFlag(options, keys, 'publicPath', mustBeString); + let entryNames = getFlag(options, keys, 'entryNames', mustBeString); let chunkNames = getFlag(options, keys, 'chunkNames', mustBeString); let assetNames = getFlag(options, keys, 'assetNames', mustBeString); let inject = getFlag(options, keys, 'inject', mustBeArray); @@ -225,6 +226,7 @@ function flagsForBuildOptions( flags.push(`--resolve-extensions=${values.join(',')}`); } if (publicPath) flags.push(`--public-path=${publicPath}`); + if (entryNames) flags.push(`--entry-names=${entryNames}`); if (chunkNames) flags.push(`--chunk-names=${chunkNames}`); if (assetNames) flags.push(`--asset-names=${assetNames}`); if (mainFields) { diff --git a/lib/types.ts b/lib/types.ts index 94ddc4e4ece..d7a5e316124 100644 --- a/lib/types.ts +++ b/lib/types.ts @@ -49,6 +49,7 @@ export interface BuildOptions extends CommonOptions { tsconfig?: string; outExtension?: { [ext: string]: string }; publicPath?: string; + entryNames?: string; chunkNames?: string; assetNames?: string; inject?: string[]; diff --git a/pkg/api/api.go b/pkg/api/api.go index 99820b87c5f..2bb7f93cc3c 100644 --- a/pkg/api/api.go +++ b/pkg/api/api.go @@ -265,6 +265,7 @@ type BuildOptions struct { Footer map[string]string NodePaths []string // The "NODE_PATH" variable from Node.js + EntryNames string ChunkNames string AssetNames string diff --git a/pkg/api/api_impl.go b/pkg/api/api_impl.go index 3478e5e20e8..3a9b28d07eb 100644 --- a/pkg/api/api_impl.go +++ b/pkg/api/api_impl.go @@ -746,6 +746,7 @@ func rebuildImpl( AbsOutputDir: validatePath(log, realFS, buildOpts.Outdir, "outdir path"), AbsOutputBase: validatePath(log, realFS, buildOpts.Outbase, "outbase path"), NeedsMetafile: buildOpts.Metafile, + EntryPathTemplate: validatePathTemplate(buildOpts.EntryNames), ChunkPathTemplate: validatePathTemplate(buildOpts.ChunkNames), AssetPathTemplate: validatePathTemplate(buildOpts.AssetNames), OutputExtensionJS: outJS, diff --git a/pkg/cli/cli_impl.go b/pkg/cli/cli_impl.go index 9d62a9c0b70..8097180366f 100644 --- a/pkg/cli/cli_impl.go +++ b/pkg/cli/cli_impl.go @@ -228,6 +228,9 @@ func parseOptionsImpl( case strings.HasPrefix(arg, "--tsconfig-raw=") && transformOpts != nil: transformOpts.TsconfigRaw = arg[len("--tsconfig-raw="):] + case strings.HasPrefix(arg, "--entry-names=") && buildOpts != nil: + buildOpts.EntryNames = arg[len("--entry-names="):] + case strings.HasPrefix(arg, "--chunk-names=") && buildOpts != nil: buildOpts.ChunkNames = arg[len("--chunk-names="):] diff --git a/scripts/js-api-tests.js b/scripts/js-api-tests.js index 048e8c7092b..65c0ebd1ccc 100644 --- a/scripts/js-api-tests.js +++ b/scripts/js-api-tests.js @@ -528,17 +528,17 @@ let buildTests = { assert.deepStrictEqual(value.outputFiles.length, 3) assert.deepStrictEqual(value.outputFiles[0].path, path.join(outdir, 'a', 'in1.js')) assert.deepStrictEqual(value.outputFiles[1].path, path.join(outdir, 'b', 'in2.js')) - assert.deepStrictEqual(value.outputFiles[2].path, path.join(outdir, 'chunk.PIM4Z7P3.js')) + assert.deepStrictEqual(value.outputFiles[2].path, path.join(outdir, 'chunk.DO6KKKV6.js')) assert.deepStrictEqual(value.outputFiles[0].text, `import { foo -} from "https://www.example.com/assets/chunk.PIM4Z7P3.js"; +} from "https://www.example.com/assets/chunk.DO6KKKV6.js"; export { foo as input1 }; `) assert.deepStrictEqual(value.outputFiles[1].text, `import { foo -} from "https://www.example.com/assets/chunk.PIM4Z7P3.js"; +} from "https://www.example.com/assets/chunk.DO6KKKV6.js"; export { foo as input2 }; @@ -775,7 +775,7 @@ body { const inEntry1 = makeInPath(entry1); const inEntry2 = makeInPath(entry2); const inImported = makeInPath(imported); - const chunk = 'chunk.KVWCN2LG.js'; + const chunk = 'chunk.22VG3QOS.js'; const outEntry1 = makeOutPath(path.basename(entry1)); const outEntry2 = makeOutPath(path.basename(entry2)); const outChunk = makeOutPath(chunk); @@ -839,7 +839,7 @@ body { const inEntry1 = makeInPath(entry1); const inEntry2 = makeInPath(entry2); const inImported = makeInPath(imported); - const chunk = 'chunk.HP5LLIY3.js'; + const chunk = 'chunk.644ZBJVW.js'; const outEntry1 = makeOutPath(path.basename(entry1)); const outEntry2 = makeOutPath(path.basename(entry2)); const outChunk = makeOutPath(chunk); @@ -905,7 +905,7 @@ body { const inImport1 = makeInPath(import1); const inImport2 = makeInPath(import2); const inShared = makeInPath(shared); - const chunk = 'chunk.FKSIHU5K.js'; + const chunk = 'chunk.B5NUMPKN.js'; const outEntry = makeOutPath(path.relative(testDir, entry)); const outImport1 = makeOutPath(path.relative(testDir, import1)); const outImport2 = makeOutPath(path.relative(testDir, import2)); @@ -943,7 +943,7 @@ body { assert.deepStrictEqual(json.outputs[outImport2].exports, []) assert.deepStrictEqual(json.outputs[outChunk].exports, []) - assert.deepStrictEqual(json.outputs[outEntry].inputs, { [inEntry]: { bytesInOutput: 70 } }) + assert.deepStrictEqual(json.outputs[outEntry].inputs, { [inEntry]: { bytesInOutput: 72 } }) assert.deepStrictEqual(json.outputs[outImport1].inputs, {}) assert.deepStrictEqual(json.outputs[outImport2].inputs, {}) assert.deepStrictEqual(json.outputs[outChunk].inputs, { [inShared]: { bytesInOutput: 28 } }) @@ -1250,7 +1250,7 @@ body { assert.strictEqual(value.outputFiles.length, 3) // These should all use forward slashes, even on Windows - const chunk = 'chunk.3OGTQ2G5.js' + const chunk = 'chunk.RDAS5GVQ.js' assert.strictEqual(Buffer.from(value.outputFiles[0].contents).toString(), `import { common_default } from "./${chunk}"; @@ -1307,7 +1307,7 @@ export { assert.strictEqual(value.outputFiles.length, 3) // These should all use forward slashes, even on Windows - const chunk = 'chunk.2GPNYGSC.js' + const chunk = 'chunk.L62YHXKF.js' assert.strictEqual(Buffer.from(value.outputFiles[0].contents).toString(), `import { common_default } from "../${chunk}"; @@ -1365,22 +1365,22 @@ export { assert.strictEqual(value.outputFiles.length, 3) // These should all use forward slashes, even on Windows - const chunk = 'chunks/name=chunk/hash=74LFAXLW.js' - assert.strictEqual(Buffer.from(value.outputFiles[0].contents).toString(), `import { + const chunk = 'chunks/name=chunk/hash=R4HXUXNL.js' + assert.strictEqual(value.outputFiles[0].text, `import { common_default } from "../${chunk}"; // scripts/.js-api-tests/splittingWithChunkPath/a/demo.js console.log("a" + common_default); `) - assert.strictEqual(Buffer.from(value.outputFiles[1].contents).toString(), `import { + assert.strictEqual(value.outputFiles[1].text, `import { common_default } from "../${chunk}"; // scripts/.js-api-tests/splittingWithChunkPath/b/demo.js console.log("b" + common_default); `) - assert.strictEqual(Buffer.from(value.outputFiles[2].contents).toString(), `// scripts/.js-api-tests/splittingWithChunkPath/common.js + assert.strictEqual(value.outputFiles[2].text, `// scripts/.js-api-tests/splittingWithChunkPath/common.js var common_default = "common"; export { @@ -1393,6 +1393,67 @@ export { assert.strictEqual(value.outputFiles[2].path, path.join(outdir, chunk)) }, + async splittingWithEntryHashes({ esbuild, testDir }) { + const inputA = path.join(testDir, 'a/demo.js') + const inputB = path.join(testDir, 'b/demo.js') + const inputCommon = path.join(testDir, 'common.js') + await mkdirAsync(path.dirname(inputA)).catch(x => x) + await mkdirAsync(path.dirname(inputB)).catch(x => x) + await writeFileAsync(inputA, ` + import x from "../${path.basename(inputCommon)}" + console.log('a' + x.name) + `) + await writeFileAsync(inputB, ` + import x from "../${path.basename(inputCommon)}" + console.log('b' + x.name) + `) + await writeFileAsync(inputCommon, ` + export default { name: 'common' } + `) + const outdir = path.join(testDir, 'out') + const value = await esbuild.build({ + entryPoints: [inputA, inputB], + bundle: true, + outdir, + format: 'esm', + splitting: true, + write: false, + entryNames: 'entry/name=[name]/hash=[hash]', + chunkNames: 'chunks/name=[name]/hash=[hash]', + }) + assert.strictEqual(value.outputFiles.length, 3) + + // These should all use forward slashes, even on Windows + const chunk = 'chunks/name=chunk/hash=UEUD4MXD.js' + assert.strictEqual(value.outputFiles[0].text, `import { + common_default +} from "../../${chunk}"; + +// scripts/.js-api-tests/splittingWithEntryHashes/a/demo.js +console.log("a" + common_default.name); +`) + assert.strictEqual(value.outputFiles[1].text, `import { + common_default +} from "../../${chunk}"; + +// scripts/.js-api-tests/splittingWithEntryHashes/b/demo.js +console.log("b" + common_default.name); +`) + assert.strictEqual(value.outputFiles[2].text, `// scripts/.js-api-tests/splittingWithEntryHashes/common.js +var common_default = {name: "common"}; + +export { + common_default +}; +`) + + const outputA = 'entry/name=demo/hash=LSS5JVZO.js' + const outputB = 'entry/name=demo/hash=ZW5IY2Q5.js' + assert.strictEqual(value.outputFiles[0].path, path.join(outdir, outputA)) + assert.strictEqual(value.outputFiles[1].path, path.join(outdir, outputB)) + assert.strictEqual(value.outputFiles[2].path, path.join(outdir, chunk)) + }, + async splittingWithChunkPathAndCrossChunkImportsIssue899({ esbuild, testDir }) { const entry1 = path.join(testDir, 'src', 'entry1.js') const entry2 = path.join(testDir, 'src', 'entry2.js') diff --git a/scripts/verify-source-map.js b/scripts/verify-source-map.js index b0b46a6e542..2f7f34316bb 100644 --- a/scripts/verify-source-map.js +++ b/scripts/verify-source-map.js @@ -328,13 +328,13 @@ async function check(kind, testCase, toSearch, { flags, entryPoints, crlf, follo if (isStdin) { outJs = stdout - recordCheck(outJs.includes(`//# sourceMappingURL=data:application/json;base64,`), `.js file contains source map`) + recordCheck(outJs.includes(`//# sourceMappingURL=data:application/json;base64,`), `.js file must contain source map`) outJsMap = Buffer.from(outJs.slice(outJs.indexOf('base64,') + 'base64,'.length).trim(), 'base64').toString() } else { outJs = await fs.readFile(path.join(tempDir, 'out.js'), 'utf8') - recordCheck(outJs.includes(`//# sourceMappingURL=out.js.map\n`), `.js file links to .js.map`) + recordCheck(outJs.includes(`//# sourceMappingURL=out.js.map\n`), `.js file must link to .js.map`) outJsMap = await fs.readFile(path.join(tempDir, 'out.js.map'), 'utf8') } @@ -421,7 +421,7 @@ async function check(kind, testCase, toSearch, { flags, entryPoints, crlf, follo await execFileAsync(esbuildPath, [nestedEntry, '--bundle', '--outfile=' + path.join(tempDir, 'out2.js'), '--sourcemap'].concat(followUpFlags), { cwd: testDir }) const out2Js = await fs.readFile(path.join(tempDir, 'out2.js'), 'utf8') - recordCheck(out2Js.includes(`//# sourceMappingURL=out2.js.map\n`), `.js file links to .js.map`) + recordCheck(out2Js.includes(`//# sourceMappingURL=out2.js.map\n`), `.js file must link to .js.map`) const out2JsMap = await fs.readFile(path.join(tempDir, 'out2.js.map'), 'utf8') const out2Map = await new SourceMapConsumer(out2JsMap) From 7d91d69657b90061d52208adbafef30fd36698e3 Mon Sep 17 00:00:00 2001 From: Evan Wallace Date: Thu, 18 Mar 2021 19:58:02 -0700 Subject: [PATCH 2/5] remove the now-unnecessary generate callback --- internal/bundler/linker.go | 789 ++++++++++++++++++------------------- 1 file changed, 385 insertions(+), 404 deletions(-) diff --git a/internal/bundler/linker.go b/internal/bundler/linker.go index 629ed49eed8..6452967d6a1 100644 --- a/internal/bundler/linker.go +++ b/internal/bundler/linker.go @@ -381,14 +381,10 @@ type outputPiece struct { chunkIndex ast.Index32 } -type generateContinue struct { - crossChunkImportRecords []ast.ImportRecord - crossChunkAbsPaths []string -} +type chunkRepr interface{ isChunk() } -type chunkRepr interface { - generate(c *linkerContext, chunks []chunkInfo, chunk *chunkInfo) func(generateContinue) -} +func (*chunkReprJS) isChunk() {} +func (*chunkReprCSS) isChunk() {} type chunkReprJS struct { // For code splitting @@ -681,34 +677,13 @@ func (c *linkerContext) generateChunksInParallel(chunks []chunkInfo) []OutputFil // Generate each chunk on a separate goroutine generateWaitGroup := sync.WaitGroup{} generateWaitGroup.Add(len(chunks)) - for i := range chunks { - go func(i int) { - chunk := &chunks[i] - - // Start generating the chunk without dependencies, but stop when - // dependencies are needed. This returns a callback that is called - // later to resume generating the chunk once dependencies are known. - resume := chunk.chunkRepr.generate(c, chunks, chunk) - - // Fill in the cross-chunk import records now that the paths are known - crossChunkImportRecords := make([]ast.ImportRecord, len(chunk.crossChunkImports)) - crossChunkAbsPaths := make([]string, len(chunk.crossChunkImports)) - for i, otherChunkIndex := range chunk.crossChunkImports { - crossChunkAbsPaths[i] = chunks[otherChunkIndex].uniqueKey - crossChunkImportRecords[i] = ast.ImportRecord{ - Kind: ast.ImportStmt, - Path: logger.Path{Text: chunks[otherChunkIndex].uniqueKey}, - } - } - - // Generate the chunk - resume(generateContinue{ - crossChunkAbsPaths: crossChunkAbsPaths, - crossChunkImportRecords: crossChunkImportRecords, - }) - - generateWaitGroup.Done() - }(i) + for chunkIndex := range chunks { + switch chunks[chunkIndex].chunkRepr.(type) { + case *chunkReprJS: + go c.generateChunkJS(chunks, chunkIndex, &generateWaitGroup) + case *chunkReprCSS: + go c.generateChunkCSS(chunks, chunkIndex, &generateWaitGroup) + } } generateWaitGroup.Wait() @@ -3927,7 +3902,9 @@ func (c *linkerContext) renameSymbolsInChunk(chunk *chunkInfo, filesInOrder []ui return r } -func (repr *chunkReprJS) generate(c *linkerContext, chunks []chunkInfo, chunk *chunkInfo) func(generateContinue) { +func (c *linkerContext) generateChunkJS(chunks []chunkInfo, chunkIndex int, chunkWaitGroup *sync.WaitGroup) { + chunk := &chunks[chunkIndex] + chunkRepr := chunk.chunkRepr.(*chunkReprJS) compileResults := make([]compileResultJS, 0, len(chunk.partsInChunkInOrder)) runtimeMembers := c.files[runtime.SourceIndex].repr.(*reprJS).ast.ModuleScope.Members commonJSRef := js_ast.FollowSymbols(c.symbols, runtimeMembers["__commonJS"].Ref) @@ -3969,323 +3946,328 @@ func (repr *chunkReprJS) generate(c *linkerContext, chunks []chunkInfo, chunk *c ) } - // Wait for cross-chunk import records before continuing - return func(continueData generateContinue) { - // Also generate the cross-chunk binding code - var crossChunkPrefix []byte - var crossChunkSuffix []byte - { - // Indent the file if everything is wrapped in an IIFE - indent := 0 - if c.options.OutputFormat == config.FormatIIFE { - indent++ - } - printOptions := js_printer.Options{ - Indent: indent, - OutputFormat: c.options.OutputFormat, - RemoveWhitespace: c.options.RemoveWhitespace, - MangleSyntax: c.options.MangleSyntax, + // Also generate the cross-chunk binding code + var crossChunkPrefix []byte + var crossChunkSuffix []byte + { + // Indent the file if everything is wrapped in an IIFE + indent := 0 + if c.options.OutputFormat == config.FormatIIFE { + indent++ + } + printOptions := js_printer.Options{ + Indent: indent, + OutputFormat: c.options.OutputFormat, + RemoveWhitespace: c.options.RemoveWhitespace, + MangleSyntax: c.options.MangleSyntax, + } + crossChunkImportRecords := make([]ast.ImportRecord, len(chunk.crossChunkImports)) + for i, otherChunkIndex := range chunk.crossChunkImports { + crossChunkImportRecords[i] = ast.ImportRecord{ + Kind: ast.ImportStmt, + Path: logger.Path{Text: chunks[otherChunkIndex].uniqueKey}, } - crossChunkPrefix = js_printer.Print(js_ast.AST{ - ImportRecords: continueData.crossChunkImportRecords, - Parts: []js_ast.Part{{Stmts: repr.crossChunkPrefixStmts}}, - }, c.symbols, r, printOptions).JS - crossChunkSuffix = js_printer.Print(js_ast.AST{ - Parts: []js_ast.Part{{Stmts: repr.crossChunkSuffixStmts}}, - }, c.symbols, r, printOptions).JS } + crossChunkPrefix = js_printer.Print(js_ast.AST{ + ImportRecords: crossChunkImportRecords, + Parts: []js_ast.Part{{Stmts: chunkRepr.crossChunkPrefixStmts}}, + }, c.symbols, r, printOptions).JS + crossChunkSuffix = js_printer.Print(js_ast.AST{ + Parts: []js_ast.Part{{Stmts: chunkRepr.crossChunkSuffixStmts}}, + }, c.symbols, r, printOptions).JS + } - waitGroup.Wait() + waitGroup.Wait() - j := helpers.Joiner{} - prevOffset := sourcemap.LineColumnOffset{} + j := helpers.Joiner{} + prevOffset := sourcemap.LineColumnOffset{} - // Optionally strip whitespace - indent := "" - space := " " - newline := "\n" - if c.options.RemoveWhitespace { - space = "" - newline = "" - } - newlineBeforeComment := false - isExecutable := false + // Optionally strip whitespace + indent := "" + space := " " + newline := "\n" + if c.options.RemoveWhitespace { + space = "" + newline = "" + } + newlineBeforeComment := false + isExecutable := false - if chunk.isEntryPoint { - repr := c.files[chunk.sourceIndex].repr.(*reprJS) + if chunk.isEntryPoint { + repr := c.files[chunk.sourceIndex].repr.(*reprJS) - // Start with the hashbang if there is one - if repr.ast.Hashbang != "" { - hashbang := repr.ast.Hashbang + "\n" - prevOffset.AdvanceString(hashbang) - j.AddString(hashbang) - newlineBeforeComment = true - isExecutable = true - } + // Start with the hashbang if there is one + if repr.ast.Hashbang != "" { + hashbang := repr.ast.Hashbang + "\n" + prevOffset.AdvanceString(hashbang) + j.AddString(hashbang) + newlineBeforeComment = true + isExecutable = true + } - // Add the top-level directive if present - if repr.ast.Directive != "" { - quoted := string(js_printer.QuoteForJSON(repr.ast.Directive, c.options.ASCIIOnly)) + ";" + newline - prevOffset.AdvanceString(quoted) - j.AddString(quoted) - newlineBeforeComment = true - } + // Add the top-level directive if present + if repr.ast.Directive != "" { + quoted := string(js_printer.QuoteForJSON(repr.ast.Directive, c.options.ASCIIOnly)) + ";" + newline + prevOffset.AdvanceString(quoted) + j.AddString(quoted) + newlineBeforeComment = true } + } - if len(c.options.JSBanner) > 0 { - prevOffset.AdvanceString(c.options.JSBanner) - prevOffset.AdvanceString("\n") - j.AddString(c.options.JSBanner) - j.AddString("\n") + if len(c.options.JSBanner) > 0 { + prevOffset.AdvanceString(c.options.JSBanner) + prevOffset.AdvanceString("\n") + j.AddString(c.options.JSBanner) + j.AddString("\n") + } + + // Optionally wrap with an IIFE + if c.options.OutputFormat == config.FormatIIFE { + var text string + indent = " " + if len(c.options.GlobalName) > 0 { + text = c.generateGlobalNamePrefix() } + if c.options.UnsupportedJSFeatures.Has(compat.Arrow) { + text += "(function()" + space + "{" + newline + } else { + text += "(()" + space + "=>" + space + "{" + newline + } + prevOffset.AdvanceString(text) + j.AddString(text) + newlineBeforeComment = false + } - // Optionally wrap with an IIFE - if c.options.OutputFormat == config.FormatIIFE { - var text string - indent = " " - if len(c.options.GlobalName) > 0 { - text = c.generateGlobalNamePrefix() - } - if c.options.UnsupportedJSFeatures.Has(compat.Arrow) { - text += "(function()" + space + "{" + newline + // Put the cross-chunk prefix inside the IIFE + if len(crossChunkPrefix) > 0 { + newlineBeforeComment = true + prevOffset.AdvanceBytes(crossChunkPrefix) + j.AddBytes(crossChunkPrefix) + } + + // Start the metadata + jMeta := helpers.Joiner{} + if c.options.NeedsMetafile { + // Print imports + isFirstMeta := true + jMeta.AddString("{\n \"imports\": [") + for _, otherChunkIndex := range chunk.crossChunkImports { + if isFirstMeta { + isFirstMeta = false } else { - text += "(()" + space + "=>" + space + "{" + newline + jMeta.AddString(",") } - prevOffset.AdvanceString(text) - j.AddString(text) - newlineBeforeComment = false + jMeta.AddString(fmt.Sprintf("\n {\n \"path\": %s,\n \"kind\": %s\n }", + js_printer.QuoteForJSON(c.res.PrettyPath(logger.Path{Text: chunks[otherChunkIndex].uniqueKey, Namespace: "file"}), c.options.ASCIIOnly), + js_printer.QuoteForJSON(ast.ImportStmt.StringForMetafile(), c.options.ASCIIOnly))) } - - // Put the cross-chunk prefix inside the IIFE - if len(crossChunkPrefix) > 0 { - newlineBeforeComment = true - prevOffset.AdvanceBytes(crossChunkPrefix) - j.AddBytes(crossChunkPrefix) + if !isFirstMeta { + jMeta.AddString("\n ") } - // Start the metadata - jMeta := helpers.Joiner{} - if c.options.NeedsMetafile { - // Print imports - isFirstMeta := true - jMeta.AddString("{\n \"imports\": [") - for i, importAbsPath := range continueData.crossChunkAbsPaths { - if isFirstMeta { - isFirstMeta = false - } else { - jMeta.AddString(",") - } - jMeta.AddString(fmt.Sprintf("\n {\n \"path\": %s,\n \"kind\": %s\n }", - js_printer.QuoteForJSON(c.res.PrettyPath(logger.Path{Text: importAbsPath, Namespace: "file"}), c.options.ASCIIOnly), - js_printer.QuoteForJSON(continueData.crossChunkImportRecords[i].Kind.StringForMetafile(), c.options.ASCIIOnly))) - } - if !isFirstMeta { - jMeta.AddString("\n ") - } - - // Print exports - jMeta.AddString("],\n \"exports\": [") - var aliases []string - if c.options.OutputFormat.KeepES6ImportExportSyntax() { - if chunk.isEntryPoint { - if fileRepr := c.files[chunk.sourceIndex].repr.(*reprJS); fileRepr.meta.cjsWrap { - aliases = []string{"default"} - } else { - resolvedExports := fileRepr.meta.resolvedExports - aliases = make([]string, 0, len(resolvedExports)) - for alias := range resolvedExports { - aliases = append(aliases, alias) - } - } + // Print exports + jMeta.AddString("],\n \"exports\": [") + var aliases []string + if c.options.OutputFormat.KeepES6ImportExportSyntax() { + if chunk.isEntryPoint { + if fileRepr := c.files[chunk.sourceIndex].repr.(*reprJS); fileRepr.meta.cjsWrap { + aliases = []string{"default"} } else { - aliases = make([]string, 0, len(repr.exportsToOtherChunks)) - for _, alias := range repr.exportsToOtherChunks { + resolvedExports := fileRepr.meta.resolvedExports + aliases = make([]string, 0, len(resolvedExports)) + for alias := range resolvedExports { aliases = append(aliases, alias) } } - } - isFirstMeta = true - sort.Strings(aliases) // Sort for determinism - for _, alias := range aliases { - if isFirstMeta { - isFirstMeta = false - } else { - jMeta.AddString(",") + } else { + aliases = make([]string, 0, len(chunkRepr.exportsToOtherChunks)) + for _, alias := range chunkRepr.exportsToOtherChunks { + aliases = append(aliases, alias) } - jMeta.AddString(fmt.Sprintf("\n %s", - js_printer.QuoteForJSON(alias, c.options.ASCIIOnly))) } - if !isFirstMeta { - jMeta.AddString("\n ") - } - if chunk.isEntryPoint { - entryPoint := c.files[chunk.sourceIndex].source.PrettyPath - jMeta.AddString(fmt.Sprintf("],\n \"entryPoint\": %s,\n \"inputs\": {", js_printer.QuoteForJSON(entryPoint, c.options.ASCIIOnly))) + } + isFirstMeta = true + sort.Strings(aliases) // Sort for determinism + for _, alias := range aliases { + if isFirstMeta { + isFirstMeta = false } else { - jMeta.AddString("],\n \"inputs\": {") + jMeta.AddString(",") } + jMeta.AddString(fmt.Sprintf("\n %s", + js_printer.QuoteForJSON(alias, c.options.ASCIIOnly))) } - - // Concatenate the generated JavaScript chunks together - var compileResultsForSourceMap []compileResultJS - var entryPointTail *js_printer.PrintResult - var commentList []string - var metaOrder []string - var metaByteCount map[string]int - commentSet := make(map[string]bool) - prevComment := uint32(0) - if c.options.NeedsMetafile { - metaOrder = make([]string, 0, len(compileResults)) - metaByteCount = make(map[string]int, len(compileResults)) + if !isFirstMeta { + jMeta.AddString("\n ") } - for _, compileResult := range compileResults { - isRuntime := compileResult.sourceIndex == runtime.SourceIndex - for text := range compileResult.ExtractedComments { - if !commentSet[text] { - commentSet[text] = true - commentList = append(commentList, text) - } - } + if chunk.isEntryPoint { + entryPoint := c.files[chunk.sourceIndex].source.PrettyPath + jMeta.AddString(fmt.Sprintf("],\n \"entryPoint\": %s,\n \"inputs\": {", js_printer.QuoteForJSON(entryPoint, c.options.ASCIIOnly))) + } else { + jMeta.AddString("],\n \"inputs\": {") + } + } - // If this is the entry point, it may have some extra code to stick at the - // end of the chunk after all modules have evaluated - if compileResult.entryPointTail != nil { - entryPointTail = compileResult.entryPointTail + // Concatenate the generated JavaScript chunks together + var compileResultsForSourceMap []compileResultJS + var entryPointTail *js_printer.PrintResult + var commentList []string + var metaOrder []string + var metaByteCount map[string]int + commentSet := make(map[string]bool) + prevComment := uint32(0) + if c.options.NeedsMetafile { + metaOrder = make([]string, 0, len(compileResults)) + metaByteCount = make(map[string]int, len(compileResults)) + } + for _, compileResult := range compileResults { + isRuntime := compileResult.sourceIndex == runtime.SourceIndex + for text := range compileResult.ExtractedComments { + if !commentSet[text] { + commentSet[text] = true + commentList = append(commentList, text) } + } - // Add a comment with the file path before the file contents - if c.options.Mode == config.ModeBundle && !c.options.RemoveWhitespace && prevComment != compileResult.sourceIndex && len(compileResult.JS) > 0 { - if newlineBeforeComment { - prevOffset.AdvanceString("\n") - j.AddString("\n") - } - - path := c.files[compileResult.sourceIndex].source.PrettyPath + // If this is the entry point, it may have some extra code to stick at the + // end of the chunk after all modules have evaluated + if compileResult.entryPointTail != nil { + entryPointTail = compileResult.entryPointTail + } - // Make sure newlines in the path can't cause a syntax error. This does - // not minimize allocations because it's expected that this case never - // comes up in practice. - path = strings.ReplaceAll(path, "\r", "\\r") - path = strings.ReplaceAll(path, "\n", "\\n") - path = strings.ReplaceAll(path, "\u2028", "\\u2028") - path = strings.ReplaceAll(path, "\u2029", "\\u2029") - - text := fmt.Sprintf("%s// %s\n", indent, path) - prevOffset.AdvanceString(text) - j.AddString(text) - prevComment = compileResult.sourceIndex + // Add a comment with the file path before the file contents + if c.options.Mode == config.ModeBundle && !c.options.RemoveWhitespace && prevComment != compileResult.sourceIndex && len(compileResult.JS) > 0 { + if newlineBeforeComment { + prevOffset.AdvanceString("\n") + j.AddString("\n") } - // Don't include the runtime in source maps - if isRuntime { - prevOffset.AdvanceString(string(compileResult.JS)) - j.AddBytes(compileResult.JS) - } else { - // Save the offset to the start of the stored JavaScript - compileResult.generatedOffset = prevOffset - j.AddBytes(compileResult.JS) + path := c.files[compileResult.sourceIndex].source.PrettyPath - // Ignore empty source map chunks - if compileResult.SourceMapChunk.ShouldIgnore { - prevOffset.AdvanceBytes(compileResult.JS) - } else { - prevOffset = sourcemap.LineColumnOffset{} + // Make sure newlines in the path can't cause a syntax error. This does + // not minimize allocations because it's expected that this case never + // comes up in practice. + path = strings.ReplaceAll(path, "\r", "\\r") + path = strings.ReplaceAll(path, "\n", "\\n") + path = strings.ReplaceAll(path, "\u2028", "\\u2028") + path = strings.ReplaceAll(path, "\u2029", "\\u2029") - // Include this file in the source map - if c.options.SourceMap != config.SourceMapNone { - compileResultsForSourceMap = append(compileResultsForSourceMap, compileResult) - } - } + text := fmt.Sprintf("%s// %s\n", indent, path) + prevOffset.AdvanceString(text) + j.AddString(text) + prevComment = compileResult.sourceIndex + } - // Include this file in the metadata - if c.options.NeedsMetafile { - // Accumulate file sizes since a given file may be split into multiple parts - path := c.files[compileResult.sourceIndex].source.PrettyPath - if count, ok := metaByteCount[path]; ok { - metaByteCount[path] = count + len(compileResult.JS) - } else { - metaOrder = append(metaOrder, path) - metaByteCount[path] = len(compileResult.JS) - } + // Don't include the runtime in source maps + if isRuntime { + prevOffset.AdvanceString(string(compileResult.JS)) + j.AddBytes(compileResult.JS) + } else { + // Save the offset to the start of the stored JavaScript + compileResult.generatedOffset = prevOffset + j.AddBytes(compileResult.JS) + + // Ignore empty source map chunks + if compileResult.SourceMapChunk.ShouldIgnore { + prevOffset.AdvanceBytes(compileResult.JS) + } else { + prevOffset = sourcemap.LineColumnOffset{} + + // Include this file in the source map + if c.options.SourceMap != config.SourceMapNone { + compileResultsForSourceMap = append(compileResultsForSourceMap, compileResult) } } - // Put a newline before the next file path comment - if len(compileResult.JS) > 0 { - newlineBeforeComment = true + // Include this file in the metadata + if c.options.NeedsMetafile { + // Accumulate file sizes since a given file may be split into multiple parts + path := c.files[compileResult.sourceIndex].source.PrettyPath + if count, ok := metaByteCount[path]; ok { + metaByteCount[path] = count + len(compileResult.JS) + } else { + metaOrder = append(metaOrder, path) + metaByteCount[path] = len(compileResult.JS) + } } } - // Stick the entry point tail at the end of the file. Deliberately don't - // include any source mapping information for this because it's automatically - // generated and doesn't correspond to a location in the input file. - if entryPointTail != nil { - j.AddBytes(entryPointTail.JS) + // Put a newline before the next file path comment + if len(compileResult.JS) > 0 { + newlineBeforeComment = true } + } - // Put the cross-chunk suffix inside the IIFE - if len(crossChunkSuffix) > 0 { - if newlineBeforeComment { - j.AddString(newline) - } - j.AddBytes(crossChunkSuffix) - } + // Stick the entry point tail at the end of the file. Deliberately don't + // include any source mapping information for this because it's automatically + // generated and doesn't correspond to a location in the input file. + if entryPointTail != nil { + j.AddBytes(entryPointTail.JS) + } - // Optionally wrap with an IIFE - if c.options.OutputFormat == config.FormatIIFE { - j.AddString("})();" + newline) + // Put the cross-chunk suffix inside the IIFE + if len(crossChunkSuffix) > 0 { + if newlineBeforeComment { + j.AddString(newline) } + j.AddBytes(crossChunkSuffix) + } - // Make sure the file ends with a newline - j.EnsureNewlineAtEnd() + // Optionally wrap with an IIFE + if c.options.OutputFormat == config.FormatIIFE { + j.AddString("})();" + newline) + } - // Add all unique license comments to the end of the file. These are - // deduplicated because some projects have thousands of files with the same - // comment. The comment must be preserved in the output for legal reasons but - // at the same time we want to generate a small bundle when minifying. - sort.Strings(commentList) - for _, text := range commentList { - j.AddString(text) - j.AddString("\n") - } + // Make sure the file ends with a newline + j.EnsureNewlineAtEnd() - if len(c.options.JSFooter) > 0 { - j.AddString(c.options.JSFooter) - j.AddString("\n") - } + // Add all unique license comments to the end of the file. These are + // deduplicated because some projects have thousands of files with the same + // comment. The comment must be preserved in the output for legal reasons but + // at the same time we want to generate a small bundle when minifying. + sort.Strings(commentList) + for _, text := range commentList { + j.AddString(text) + j.AddString("\n") + } - if c.options.SourceMap != config.SourceMapNone { - chunk.outputSourceMap = c.generateSourceMapForChunk(compileResultsForSourceMap, chunkAbsDir, dataForSourceMaps) - } + if len(c.options.JSFooter) > 0 { + j.AddString(c.options.JSFooter) + j.AddString("\n") + } - // The JavaScript contents are done now that the source map comment is in - jsContents := j.Done() + if c.options.SourceMap != config.SourceMapNone { + chunk.outputSourceMap = c.generateSourceMapForChunk(compileResultsForSourceMap, chunkAbsDir, dataForSourceMaps) + } - // End the metadata lazily. The final output size is not known until the - // final import paths are substituted into the output pieces generated below. - if c.options.NeedsMetafile { - chunk.jsonMetadataChunkCallback = func(finalOutputSize int) []byte { - isFirstMeta := true - for _, path := range metaOrder { - if isFirstMeta { - isFirstMeta = false - } else { - jMeta.AddString(",") - } - jMeta.AddString(fmt.Sprintf("\n %s: {\n \"bytesInOutput\": %d\n }", - js_printer.QuoteForJSON(path, c.options.ASCIIOnly), metaByteCount[path])) - } - if !isFirstMeta { - jMeta.AddString("\n ") + // The JavaScript contents are done now that the source map comment is in + jsContents := j.Done() + + // End the metadata lazily. The final output size is not known until the + // final import paths are substituted into the output pieces generated below. + if c.options.NeedsMetafile { + chunk.jsonMetadataChunkCallback = func(finalOutputSize int) []byte { + isFirstMeta := true + for _, path := range metaOrder { + if isFirstMeta { + isFirstMeta = false + } else { + jMeta.AddString(",") } - jMeta.AddString(fmt.Sprintf("},\n \"bytes\": %d\n }", finalOutputSize)) - return jMeta.Done() + jMeta.AddString(fmt.Sprintf("\n %s: {\n \"bytesInOutput\": %d\n }", + js_printer.QuoteForJSON(path, c.options.ASCIIOnly), metaByteCount[path])) + } + if !isFirstMeta { + jMeta.AddString("\n ") } + jMeta.AddString(fmt.Sprintf("},\n \"bytes\": %d\n }", finalOutputSize)) + return jMeta.Done() } - - c.generateIsolatedChunkHash(chunk, c.breakOutputIntoPieces(jsContents, uint32(len(chunks)))) - chunk.isExecutable = isExecutable } + + c.generateIsolatedChunkHash(chunk, c.breakOutputIntoPieces(jsContents, uint32(len(chunks)))) + chunk.isExecutable = isExecutable + chunkWaitGroup.Done() } func (c *linkerContext) generateGlobalNamePrefix() string { @@ -4337,7 +4319,8 @@ type externalImportCSS struct { conditions []css_ast.Token } -func (repr *chunkReprCSS) generate(c *linkerContext, chunks []chunkInfo, chunk *chunkInfo) func(generateContinue) { +func (c *linkerContext) generateChunkCSS(chunks []chunkInfo, chunkIndex int, chunkWaitGroup *sync.WaitGroup) { + chunk := &chunks[chunkIndex] var results []OutputFile compileResults := make([]compileResultCSS, 0, len(chunk.filesInChunkInOrder)) @@ -4385,139 +4368,137 @@ func (repr *chunkReprCSS) generate(c *linkerContext, chunks []chunkInfo, chunk * }(sourceIndex, compileResult) } - // Wait for cross-chunk import records before continuing - return func(continueData generateContinue) { - waitGroup.Wait() - j := helpers.Joiner{} - newlineBeforeComment := false - - if len(c.options.CSSBanner) > 0 { - j.AddString(c.options.CSSBanner) - j.AddString("\n") - } + waitGroup.Wait() + j := helpers.Joiner{} + newlineBeforeComment := false - // Generate any prefix rules now - { - ast := css_ast.AST{} + if len(c.options.CSSBanner) > 0 { + j.AddString(c.options.CSSBanner) + j.AddString("\n") + } - // "@charset" is the only thing that comes before "@import" - for _, compileResult := range compileResults { - if compileResult.hasCharset { - ast.Rules = append(ast.Rules, &css_ast.RAtCharset{Encoding: "UTF-8"}) - break - } - } + // Generate any prefix rules now + { + ast := css_ast.AST{} - // Insert all external "@import" rules at the front. In CSS, all "@import" - // rules must come first or the browser will just ignore them. - for _, compileResult := range compileResults { - for _, external := range compileResult.externalImports { - ast.Rules = append(ast.Rules, &css_ast.RAtImport{ - ImportRecordIndex: uint32(len(ast.ImportRecords)), - ImportConditions: external.conditions, - }) - ast.ImportRecords = append(ast.ImportRecords, external.record) - } + // "@charset" is the only thing that comes before "@import" + for _, compileResult := range compileResults { + if compileResult.hasCharset { + ast.Rules = append(ast.Rules, &css_ast.RAtCharset{Encoding: "UTF-8"}) + break } + } - if len(ast.Rules) > 0 { - css := css_printer.Print(ast, css_printer.Options{ - RemoveWhitespace: c.options.RemoveWhitespace, + // Insert all external "@import" rules at the front. In CSS, all "@import" + // rules must come first or the browser will just ignore them. + for _, compileResult := range compileResults { + for _, external := range compileResult.externalImports { + ast.Rules = append(ast.Rules, &css_ast.RAtImport{ + ImportRecordIndex: uint32(len(ast.ImportRecords)), + ImportConditions: external.conditions, }) - if len(css) > 0 { - j.AddString(css) - newlineBeforeComment = true - } + ast.ImportRecords = append(ast.ImportRecords, external.record) } } - // Start the metadata - jMeta := helpers.Joiner{} - if c.options.NeedsMetafile { - isFirstMeta := true - jMeta.AddString("{\n \"imports\": [") - for i, importAbsPath := range continueData.crossChunkAbsPaths { - if isFirstMeta { - isFirstMeta = false - } else { - jMeta.AddString(",") - } - jMeta.AddString(fmt.Sprintf("\n {\n \"path\": %s,\n \"kind\": %s\n }", - js_printer.QuoteForJSON(c.res.PrettyPath(logger.Path{Text: importAbsPath, Namespace: "file"}), c.options.ASCIIOnly), - js_printer.QuoteForJSON(continueData.crossChunkImportRecords[i].Kind.StringForMetafile(), c.options.ASCIIOnly))) + if len(ast.Rules) > 0 { + css := css_printer.Print(ast, css_printer.Options{ + RemoveWhitespace: c.options.RemoveWhitespace, + }) + if len(css) > 0 { + j.AddString(css) + newlineBeforeComment = true } - if !isFirstMeta { - jMeta.AddString("\n ") + } + } + + // Start the metadata + jMeta := helpers.Joiner{} + if c.options.NeedsMetafile { + isFirstMeta := true + jMeta.AddString("{\n \"imports\": [") + for _, otherChunkIndex := range chunk.crossChunkImports { + if isFirstMeta { + isFirstMeta = false + } else { + jMeta.AddString(",") } - if chunk.isEntryPoint { - file := &c.files[chunk.sourceIndex] - - // Do not generate "entryPoint" for CSS files that are the result of - // importing CSS into JavaScript. We want this to be a 1:1 relationship - // and there is already an output file for the JavaScript entry point. - if _, ok := file.repr.(*reprCSS); ok { - jMeta.AddString(fmt.Sprintf("],\n \"entryPoint\": %s,\n \"inputs\": {", - js_printer.QuoteForJSON(file.source.PrettyPath, c.options.ASCIIOnly))) - } else { - jMeta.AddString("],\n \"inputs\": {") - } + jMeta.AddString(fmt.Sprintf("\n {\n \"path\": %s,\n \"kind\": %s\n }", + js_printer.QuoteForJSON(c.res.PrettyPath(logger.Path{Text: chunks[otherChunkIndex].uniqueKey, Namespace: "file"}), c.options.ASCIIOnly), + js_printer.QuoteForJSON(ast.ImportAt.StringForMetafile(), c.options.ASCIIOnly))) + } + if !isFirstMeta { + jMeta.AddString("\n ") + } + if chunk.isEntryPoint { + file := &c.files[chunk.sourceIndex] + + // Do not generate "entryPoint" for CSS files that are the result of + // importing CSS into JavaScript. We want this to be a 1:1 relationship + // and there is already an output file for the JavaScript entry point. + if _, ok := file.repr.(*reprCSS); ok { + jMeta.AddString(fmt.Sprintf("],\n \"entryPoint\": %s,\n \"inputs\": {", + js_printer.QuoteForJSON(file.source.PrettyPath, c.options.ASCIIOnly))) } else { jMeta.AddString("],\n \"inputs\": {") } + } else { + jMeta.AddString("],\n \"inputs\": {") } - isFirstMeta := true + } + isFirstMeta := true - // Concatenate the generated CSS chunks together - for _, compileResult := range compileResults { - if c.options.Mode == config.ModeBundle && !c.options.RemoveWhitespace { - if newlineBeforeComment { - j.AddString("\n") - } - j.AddString(fmt.Sprintf("/* %s */\n", c.files[compileResult.sourceIndex].source.PrettyPath)) - } - if len(compileResult.printedCSS) > 0 { - newlineBeforeComment = true + // Concatenate the generated CSS chunks together + for _, compileResult := range compileResults { + if c.options.Mode == config.ModeBundle && !c.options.RemoveWhitespace { + if newlineBeforeComment { + j.AddString("\n") } - j.AddString(compileResult.printedCSS) + j.AddString(fmt.Sprintf("/* %s */\n", c.files[compileResult.sourceIndex].source.PrettyPath)) + } + if len(compileResult.printedCSS) > 0 { + newlineBeforeComment = true + } + j.AddString(compileResult.printedCSS) - // Include this file in the metadata - if c.options.NeedsMetafile { - if isFirstMeta { - isFirstMeta = false - } else { - jMeta.AddString(",") - } - jMeta.AddString(fmt.Sprintf("\n %s: {\n \"bytesInOutput\": %d\n }", - js_printer.QuoteForJSON(c.files[compileResult.sourceIndex].source.PrettyPath, c.options.ASCIIOnly), - len(compileResult.printedCSS))) + // Include this file in the metadata + if c.options.NeedsMetafile { + if isFirstMeta { + isFirstMeta = false + } else { + jMeta.AddString(",") } + jMeta.AddString(fmt.Sprintf("\n %s: {\n \"bytesInOutput\": %d\n }", + js_printer.QuoteForJSON(c.files[compileResult.sourceIndex].source.PrettyPath, c.options.ASCIIOnly), + len(compileResult.printedCSS))) } + } - // Make sure the file ends with a newline - j.EnsureNewlineAtEnd() + // Make sure the file ends with a newline + j.EnsureNewlineAtEnd() - if len(c.options.CSSFooter) > 0 { - j.AddString(c.options.CSSFooter) - j.AddString("\n") - } + if len(c.options.CSSFooter) > 0 { + j.AddString(c.options.CSSFooter) + j.AddString("\n") + } - // The CSS contents are done now that the source map comment is in - cssContents := j.Done() + // The CSS contents are done now that the source map comment is in + cssContents := j.Done() - // End the metadata lazily. The final output size is not known until the - // final import paths are substituted into the output pieces generated below. - if c.options.NeedsMetafile { - chunk.jsonMetadataChunkCallback = func(finalOutputSize int) []byte { - if !isFirstMeta { - jMeta.AddString("\n ") - } - jMeta.AddString(fmt.Sprintf("},\n \"bytes\": %d\n }", finalOutputSize)) - return jMeta.Done() + // End the metadata lazily. The final output size is not known until the + // final import paths are substituted into the output pieces generated below. + if c.options.NeedsMetafile { + chunk.jsonMetadataChunkCallback = func(finalOutputSize int) []byte { + if !isFirstMeta { + jMeta.AddString("\n ") } + jMeta.AddString(fmt.Sprintf("},\n \"bytes\": %d\n }", finalOutputSize)) + return jMeta.Done() } - - c.generateIsolatedChunkHash(chunk, c.breakOutputIntoPieces(cssContents, uint32(len(chunks)))) } + + c.generateIsolatedChunkHash(chunk, c.breakOutputIntoPieces(cssContents, uint32(len(chunks)))) + chunkWaitGroup.Done() } func appendIsolatedHashesForImportedChunks( From f07bcc75ddba69363a0cc1dc3279633f9eb6091c Mon Sep 17 00:00:00 2001 From: Evan Wallace Date: Thu, 18 Mar 2021 20:12:37 -0700 Subject: [PATCH 3/5] update help text and change log --- CHANGELOG.md | 30 ++++++++++++++++++++++++++++++ cmd/esbuild/main.go | 8 +++++++- internal/bundler/bundler.go | 2 ++ internal/bundler/linker.go | 2 +- 4 files changed, 40 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6f510ce659a..5811482e3ed 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,36 @@ ## Unreleased +* Enable hashes in entry point file paths ([#518](https://github.com/evanw/esbuild/issues/518)) + + This release adds the new `--entry-names=` flag. It's similar to the `--chunk-names=` and `--asset-names=` flags except it sets the output paths for entry point files. The pattern defaults to `[dir]/[name]` which should be equivalent to the previous entry point output path behavior, so this should be a backward-compatible change. + + This change has the following consequences: + + * It is now possible for entry point output paths to contain a hash. For example, this now happens if you pass `--entry-names=[dir]/[name]-[hash]`. This means you can now use esbuild to generate output files such that all output paths have a hash in them, which means it should now be possible to serve the output files with an infinite cache lifetime so they are only downloaded once and then cached by the browser forever. + + * It is now possible to prevent the generation of subdirectories inside the output directory. Previously esbuild replicated the directory structure of the input entry points relative to the `outbase` directory (which defaults to the [lowest common ancestor](https://en.wikipedia.org/wiki/Lowest_common_ancestor) directory across all entry points). This value is substituted into the newly-added `[dir]` placeholder. But you can now omit it by omitting that placeholder, like this: `--entry-names=[name]`. + + * Source map names should now be equal to the corresponding output file name plus an additional `.map` extension. Previously the hashes were content hashes, so the source map had a different hash than the corresponding output file because they had different contents. Now they have the same hash so finding the source map should now be easier (just add `.map`). + + * Due to the way the new hashing algorithm works, all chunks can now be generated fully in parallel instead of some chunks having to wait until their dependency chunks have been generated first. The import paths for dependency chunks are now swapped in after chunk generation in a second pass (detailed below). This could theoretically result in a speedup although I haven't done any benchmarks around this. + + Implementing this feature required overhauling how hashes are calculated to prevent the chicken-and-egg hashing problem due to dynamic imports, which can cause cycles in the import graph of the resulting output files when code splitting is enabled. Since generating a hash involved first hashing all of your dependencies, you could end up in a situation where you needed to know the hash to calculate the hash (if a file was a dependency of itself). + + The hashing algorithm now works in three steps (potentially subject to change in the future): + + 1. The initial versions of all output files are generated in parallel, with temporary paths used for any imports of other output files. Each temporary path is a randomly-generated string that is unique for each output file. An initial source map is also generated at this step if source maps are enabled. + + The hash for the first step includes: the raw content of the output file excluding the temporary paths, the relative file paths of all input files present in that output file, the relative output path for the resulting output file (with `[hash]` for the hash that hasn't been computed yet), and contents of the initial source map. + + 2. After the initial versions of all output files have been generated, calculate the final hash and final output path for each output file. Calculating the final output path involves substituting the final hash for the `[hash]` placeholder in the entry name template. + + The hash for the second step includes: the hash from the first step for this file and all of its transitive dependencies. + + 3. After all output files have a final output path, the import paths in each output file for importing other output files are substituted. Source map offsets also have to be adjusted because the final output path is likely a different length than the temporary path used in the first step. This is also done in parallel for each output file. + + This whole algorithm roughly means the hash of a given output file should change if an only if any input file in that output file or any output file it depends on is changed. So the output path and therefore the browser's cache key should not change for a given output file in between builds if none of the relevant input files were changed. + * Fix importing a path containing a `?` character on Windows ([#989](https://github.com/evanw/esbuild/issues/989)) On Windows, the `?` character is not allowed in path names. This causes esbuild to fail to import paths containing this character. This is usually fine because people don't put `?` in their file names for this reason. However, the import paths for some ancient CSS code contains the `?` character as a hack to work around a bug in Internet Explorer: diff --git a/cmd/esbuild/main.go b/cmd/esbuild/main.go index 84d7f5a4252..3f6db27ed90 100644 --- a/cmd/esbuild/main.go +++ b/cmd/esbuild/main.go @@ -45,11 +45,16 @@ var helpText = func(colors logger.Colors) string { --watch Watch mode: rebuild on file system changes ` + colors.Bold + `Advanced options:` + colors.Default + ` + --asset-names=... Path template to use for "file" loader files + (placeholders: "[name]", "[hash]") --banner:T=... Text to be prepended to each output file of type T where T is one of: css | js --charset=utf8 Do not escape UTF-8 code points + --chunk-names=... Path template to use for code splitting chunks + (placeholders: "[name]", "[hash]") --color=... Force use of color terminal escapes (true | false) - --log-limit=... Maximum message count or 0 to disable (default 10) + --entry-names=... Path template to use for entry point output paths + (placeholders: "[dir]", "[name]", "[hash]") --footer:T=... Text to be appended to each output file of type T where T is one of: css | js --global-name=... The name of the global for the IIFE format @@ -60,6 +65,7 @@ var helpText = func(colors logger.Colors) string { --keep-names Preserve "name" on functions and classes --log-level=... Disable logging (info | warning | error | silent, default info) + --log-limit=... Maximum message count or 0 to disable (default 10) --main-fields=... Override the main file order in package.json (default "browser,module,main" when platform is browser and "main,module" when platform is node) diff --git a/internal/bundler/bundler.go b/internal/bundler/bundler.go index 704e141a313..24cfc63f0fb 100644 --- a/internal/bundler/bundler.go +++ b/internal/bundler/bundler.go @@ -320,7 +320,9 @@ func parseFile(args parseArgs) { hashBytes := sha1.Sum([]byte(source.Contents)) hash = hashForFileName(hashBytes) } + dir := "./" relPath := config.TemplateToString(config.SubstituteTemplate(args.options.AssetPathTemplate, config.PathPlaceholders{ + Dir: &dir, Name: &base, Hash: &hash, })) + ext diff --git a/internal/bundler/linker.go b/internal/bundler/linker.go index 6452967d6a1..b46c8d352a5 100644 --- a/internal/bundler/linker.go +++ b/internal/bundler/linker.go @@ -3100,7 +3100,7 @@ func (c *linkerContext) computeChunks() []chunkInfo { } // Determine the output path template - template = append(append(make([]config.PathTemplate, 0, len(template)), template...), config.PathTemplate{Data: ext}) + template = append(append(make([]config.PathTemplate, 0, len(template)+1), template...), config.PathTemplate{Data: ext}) chunk.finalTemplate = config.SubstituteTemplate(template, config.PathPlaceholders{ Dir: &dir, Name: &base, From 0cfbe3c4299747b99d775db30e4344c6e387fd80 Mon Sep 17 00:00:00 2001 From: Evan Wallace Date: Thu, 18 Mar 2021 20:27:15 -0700 Subject: [PATCH 4/5] put default values in help text --- cmd/esbuild/main.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/cmd/esbuild/main.go b/cmd/esbuild/main.go index 3f6db27ed90..b398442fc05 100644 --- a/cmd/esbuild/main.go +++ b/cmd/esbuild/main.go @@ -46,15 +46,15 @@ var helpText = func(colors logger.Colors) string { ` + colors.Bold + `Advanced options:` + colors.Default + ` --asset-names=... Path template to use for "file" loader files - (placeholders: "[name]", "[hash]") + (default "[name]-[hash]") --banner:T=... Text to be prepended to each output file of type T where T is one of: css | js --charset=utf8 Do not escape UTF-8 code points --chunk-names=... Path template to use for code splitting chunks - (placeholders: "[name]", "[hash]") + (default "[name]-[hash]") --color=... Force use of color terminal escapes (true | false) --entry-names=... Path template to use for entry point output paths - (placeholders: "[dir]", "[name]", "[hash]") + (default "[dir]/[name]", can also use "[hash]") --footer:T=... Text to be appended to each output file of type T where T is one of: css | js --global-name=... The name of the global for the IIFE format From 1c6e654ee29282c5d4bd4628f1971e55809e99bc Mon Sep 17 00:00:00 2001 From: Evan Wallace Date: Thu, 18 Mar 2021 20:31:16 -0700 Subject: [PATCH 5/5] use "[name]-[hash]" instead of "[name].[hash]" --- internal/bundler/bundler.go | 4 +- internal/bundler/snapshots/snapshots_css.txt | 6 +- .../bundler/snapshots/snapshots_default.txt | 6 +- .../bundler/snapshots/snapshots_loader.txt | 18 +-- .../bundler/snapshots/snapshots_splitting.txt | 112 +++++++++--------- scripts/js-api-tests.js | 22 ++-- 6 files changed, 84 insertions(+), 84 deletions(-) diff --git a/internal/bundler/bundler.go b/internal/bundler/bundler.go index 24cfc63f0fb..a889108699a 100644 --- a/internal/bundler/bundler.go +++ b/internal/bundler/bundler.go @@ -1526,13 +1526,13 @@ func applyOptionDefaults(options *config.Options) { if len(options.ChunkPathTemplate) == 0 { options.ChunkPathTemplate = []config.PathTemplate{ {Data: "./", Placeholder: config.NamePlaceholder}, - {Data: ".", Placeholder: config.HashPlaceholder}, + {Data: "-", Placeholder: config.HashPlaceholder}, } } if len(options.AssetPathTemplate) == 0 { options.AssetPathTemplate = []config.PathTemplate{ {Data: "./", Placeholder: config.NamePlaceholder}, - {Data: ".", Placeholder: config.HashPlaceholder}, + {Data: "-", Placeholder: config.HashPlaceholder}, } } } diff --git a/internal/bundler/snapshots/snapshots_css.txt b/internal/bundler/snapshots/snapshots_css.txt index 90760af8d0e..5e4e5f23fc7 100644 --- a/internal/bundler/snapshots/snapshots_css.txt +++ b/internal/bundler/snapshots/snapshots_css.txt @@ -137,17 +137,17 @@ path { ================================================================================ TestFileImportURLInCSS ----------- /out/example.RPS4CMHF.data ---------- +---------- /out/example-RPS4CMHF.data ---------- This is some data. ---------- /out/entry.css ---------- /* one.css */ a { - background: url(./example.RPS4CMHF.data); + background: url(./example-RPS4CMHF.data); } /* two.css */ b { - background: url(./example.RPS4CMHF.data); + background: url(./example-RPS4CMHF.data); } /* entry.css */ diff --git a/internal/bundler/snapshots/snapshots_default.txt b/internal/bundler/snapshots/snapshots_default.txt index 49f1eb2c92b..30723b0c040 100644 --- a/internal/bundler/snapshots/snapshots_default.txt +++ b/internal/bundler/snapshots/snapshots_default.txt @@ -1310,14 +1310,14 @@ console.log(a, b); ================================================================================ TestLoaderFileWithQueryParameter ----------- /out/file.JAWLBT6L.txt ---------- +---------- /out/file-JAWLBT6L.txt ---------- This is some text ---------- /out/entry.js ---------- // file.txt?foo -var file_default = "./file.JAWLBT6L.txt?foo"; +var file_default = "./file-JAWLBT6L.txt?foo"; // file.txt?bar -var file_default2 = "./file.JAWLBT6L.txt?bar"; +var file_default2 = "./file-JAWLBT6L.txt?bar"; // entry.js console.log(file_default, file_default2); diff --git a/internal/bundler/snapshots/snapshots_loader.txt b/internal/bundler/snapshots/snapshots_loader.txt index 693ad681652..c2e58530465 100644 --- a/internal/bundler/snapshots/snapshots_loader.txt +++ b/internal/bundler/snapshots/snapshots_loader.txt @@ -46,12 +46,12 @@ console.log(x_url, y_default); ================================================================================ TestLoaderFile ----------- /out/test.T3K5TRK4.svg ---------- +---------- /out/test-T3K5TRK4.svg ---------- ---------- /out/entry.js ---------- // test.svg var require_test = __commonJS((exports, module) => { - module.exports = "./test.T3K5TRK4.svg"; + module.exports = "./test-T3K5TRK4.svg"; }); // entry.js @@ -59,18 +59,18 @@ console.log(require_test()); ================================================================================ TestLoaderFileCommonJSAndES6 ----------- /y.SXFQX7JJ.txt ---------- +---------- /y-SXFQX7JJ.txt ---------- y ----------- /x.CH3K3DWF.txt ---------- +---------- /x-CH3K3DWF.txt ---------- x ---------- /out.js ---------- // x.txt var require_x = __commonJS((exports, module) => { - module.exports = "./x.CH3K3DWF.txt"; + module.exports = "./x-CH3K3DWF.txt"; }); // y.txt -var y_default = "./y.SXFQX7JJ.txt"; +var y_default = "./y-SXFQX7JJ.txt"; // entry.js var x_url = require_x(); @@ -78,17 +78,17 @@ console.log(x_url, y_default); ================================================================================ TestLoaderFileMultipleNoCollision ----------- /dist/test.VFFI7ZOM.txt ---------- +---------- /dist/test-VFFI7ZOM.txt ---------- test ---------- /dist/out.js ---------- // a/test.txt var require_test = __commonJS((exports, module) => { - module.exports = "./test.VFFI7ZOM.txt"; + module.exports = "./test-VFFI7ZOM.txt"; }); // b/test.txt var require_test2 = __commonJS((exports, module) => { - module.exports = "./test.VFFI7ZOM.txt"; + module.exports = "./test-VFFI7ZOM.txt"; }); // entry.js diff --git a/internal/bundler/snapshots/snapshots_splitting.txt b/internal/bundler/snapshots/snapshots_splitting.txt index 7c533ee54c9..1017733e1b5 100644 --- a/internal/bundler/snapshots/snapshots_splitting.txt +++ b/internal/bundler/snapshots/snapshots_splitting.txt @@ -9,7 +9,7 @@ TestSplittingAssignToLocal import { foo, setFoo -} from "./chunk.B5TJI52K.js"; +} from "./chunk-QGLMJWOZ.js"; // a.js setFoo(123); @@ -18,12 +18,12 @@ console.log(foo); ---------- /out/b.js ---------- import { foo -} from "./chunk.B5TJI52K.js"; +} from "./chunk-QGLMJWOZ.js"; // b.js console.log(foo); ----------- /out/chunk.B5TJI52K.js ---------- +---------- /out/chunk-QGLMJWOZ.js ---------- // shared.js var foo; function setFoo(value) { @@ -41,7 +41,7 @@ TestSplittingCircularReferenceIssue251 import { p, q -} from "./chunk.JAD3IYVK.js"; +} from "./chunk-AFFJAGB7.js"; export { p, q @@ -51,13 +51,13 @@ export { import { p, q -} from "./chunk.JAD3IYVK.js"; +} from "./chunk-AFFJAGB7.js"; export { p, q }; ----------- /out/chunk.JAD3IYVK.js ---------- +---------- /out/chunk-AFFJAGB7.js ---------- // b.js var q = 6; @@ -74,15 +74,15 @@ TestSplittingCrossChunkAssignmentDependencies ---------- /out/a.js ---------- import { setValue -} from "./chunk.K2Z6FE6X.js"; +} from "./chunk-Z5LSGHIX.js"; // a.js setValue(123); ---------- /out/b.js ---------- -import "./chunk.K2Z6FE6X.js"; +import "./chunk-Z5LSGHIX.js"; ----------- /out/chunk.K2Z6FE6X.js ---------- +---------- /out/chunk-Z5LSGHIX.js ---------- // shared.js var observer; var value; @@ -105,7 +105,7 @@ TestSplittingCrossChunkAssignmentDependenciesRecursive ---------- /out/a.js ---------- import { setX -} from "./chunk.PJRFLMBG.js"; +} from "./chunk-5KRVLGZM.js"; // a.js setX(); @@ -113,8 +113,8 @@ setX(); ---------- /out/b.js ---------- import { setZ -} from "./chunk.SVT47M27.js"; -import "./chunk.PJRFLMBG.js"; +} from "./chunk-M2WBFBDC.js"; +import "./chunk-5KRVLGZM.js"; // b.js setZ(); @@ -123,20 +123,20 @@ setZ(); import { setY2, setZ2 -} from "./chunk.SVT47M27.js"; +} from "./chunk-M2WBFBDC.js"; import { setX2 -} from "./chunk.PJRFLMBG.js"; +} from "./chunk-5KRVLGZM.js"; // c.js setX2(); setY2(); setZ2(); ----------- /out/chunk.SVT47M27.js ---------- +---------- /out/chunk-M2WBFBDC.js ---------- import { setX -} from "./chunk.PJRFLMBG.js"; +} from "./chunk-5KRVLGZM.js"; // y.js var _y; @@ -164,7 +164,7 @@ export { setZ2 }; ----------- /out/chunk.PJRFLMBG.js ---------- +---------- /out/chunk-5KRVLGZM.js ---------- // x.js var _x; function setX(v) { @@ -182,21 +182,21 @@ export { ================================================================================ TestSplittingDuplicateChunkCollision ---------- /out/a.js ---------- -import"./chunk.SK4BZ7G2.js"; +import"./chunk-6UBBYZAL.js"; ---------- /out/b.js ---------- -import"./chunk.SK4BZ7G2.js"; +import"./chunk-6UBBYZAL.js"; ----------- /out/chunk.SK4BZ7G2.js ---------- +---------- /out/chunk-6UBBYZAL.js ---------- console.log(123); ---------- /out/c.js ---------- -import"./chunk.WLJLUM5K.js"; +import"./chunk-KGFAUE6M.js"; ---------- /out/d.js ---------- -import"./chunk.WLJLUM5K.js"; +import"./chunk-KGFAUE6M.js"; ----------- /out/chunk.WLJLUM5K.js ---------- +---------- /out/chunk-KGFAUE6M.js ---------- console.log(123); ================================================================================ @@ -204,7 +204,7 @@ TestSplittingDynamicAndNotDynamicCommonJSIntoES6 ---------- /out/entry.js ---------- import { require_foo -} from "./chunk.VGIGB4OH.js"; +} from "./chunk-D5KVJFGV.js"; // entry.js var import_foo = __toModule(require_foo()); @@ -213,10 +213,10 @@ import("./foo.js").then(({default: {bar: b}}) => console.log(import_foo.bar, b)) ---------- /out/foo.js ---------- import { require_foo -} from "./chunk.VGIGB4OH.js"; +} from "./chunk-D5KVJFGV.js"; export default require_foo(); ----------- /out/chunk.VGIGB4OH.js ---------- +---------- /out/chunk-D5KVJFGV.js ---------- // foo.js var require_foo = __commonJS((exports) => { exports.bar = 123; @@ -231,7 +231,7 @@ TestSplittingDynamicAndNotDynamicES6IntoES6 ---------- /out/entry.js ---------- import { bar -} from "./chunk.JQGEX5UA.js"; +} from "./chunk-3CWABKVA.js"; // entry.js import("./foo.js").then(({bar: b}) => console.log(bar, b)); @@ -239,12 +239,12 @@ import("./foo.js").then(({bar: b}) => console.log(bar, b)); ---------- /out/foo.js ---------- import { bar -} from "./chunk.JQGEX5UA.js"; +} from "./chunk-3CWABKVA.js"; export { bar }; ----------- /out/chunk.JQGEX5UA.js ---------- +---------- /out/chunk-3CWABKVA.js ---------- // foo.js var bar = 123; @@ -310,7 +310,7 @@ TestSplittingHybridCJSAndESMIssue617 ---------- /out/a.js ---------- import { require_a -} from "./chunk.XCCWV4CM.js"; +} from "./chunk-OAH3NS3J.js"; export default require_a(); ---------- /out/b.js ---------- @@ -318,7 +318,7 @@ import { __defProp, __markAsModule, require_a -} from "./chunk.XCCWV4CM.js"; +} from "./chunk-OAH3NS3J.js"; // b.js var import_a = __toModule(require_a()); @@ -327,7 +327,7 @@ export { export_foo as foo }; ----------- /out/chunk.XCCWV4CM.js ---------- +---------- /out/chunk-OAH3NS3J.js ---------- // a.js var require_a = __commonJS((exports) => { __markAsModule(exports); @@ -349,13 +349,13 @@ TestSplittingHybridESMAndCJSIssue617 ---------- /out/a.js ---------- import { require_a -} from "./chunk.DUBTDYGE.js"; +} from "./chunk-FG25RNWF.js"; export default require_a(); ---------- /out/b.js ---------- import { require_a -} from "./chunk.DUBTDYGE.js"; +} from "./chunk-FG25RNWF.js"; // b.js var bar = require_a(); @@ -363,7 +363,7 @@ export { bar }; ----------- /out/chunk.DUBTDYGE.js ---------- +---------- /out/chunk-FG25RNWF.js ---------- // a.js var require_a = __commonJS((exports) => { __markAsModule(exports); @@ -382,7 +382,7 @@ TestSplittingMinifyIdentifiersCrashIssue437 ---------- /out/a.js ---------- import { a as o -} from "./chunk.666QKTW2.js"; +} from "./chunk-KXZOZAS3.js"; // a.js console.log(o); @@ -390,12 +390,12 @@ console.log(o); ---------- /out/b.js ---------- import { a as o -} from "./chunk.666QKTW2.js"; +} from "./chunk-KXZOZAS3.js"; // b.js console.log(o); ----------- /out/chunk.666QKTW2.js ---------- +---------- /out/chunk-KXZOZAS3.js ---------- // shared.js function n(o) { } @@ -409,7 +409,7 @@ export { ================================================================================ TestSplittingMissingLazyExport ---------- /out/a.js ---------- -import "./chunk.O6GZDAIM.js"; +import "./chunk-7VOEXAIV.js"; // empty.js var empty_exports = {}; @@ -423,7 +423,7 @@ function foo() { console.log(foo()); ---------- /out/b.js ---------- -import "./chunk.O6GZDAIM.js"; +import "./chunk-7VOEXAIV.js"; // common.js function bar() { @@ -433,14 +433,14 @@ function bar() { // b.js console.log(bar()); ----------- /out/chunk.O6GZDAIM.js ---------- +---------- /out/chunk-7VOEXAIV.js ---------- ================================================================================ TestSplittingNestedDirectories ---------- /Users/user/project/out/pageA/page.js ---------- import { shared_default -} from "../chunk.T3IRO6DB.js"; +} from "../chunk-BUZITMSX.js"; // Users/user/project/src/pages/pageA/page.js console.log(shared_default); @@ -448,12 +448,12 @@ console.log(shared_default); ---------- /Users/user/project/out/pageB/page.js ---------- import { shared_default -} from "../chunk.T3IRO6DB.js"; +} from "../chunk-BUZITMSX.js"; // Users/user/project/src/pages/pageB/page.js console.log(-shared_default); ----------- /Users/user/project/out/chunk.T3IRO6DB.js ---------- +---------- /Users/user/project/out/chunk-BUZITMSX.js ---------- // Users/user/project/src/pages/shared.js var shared_default = 123; @@ -466,7 +466,7 @@ TestSplittingReExportIssue273 ---------- /out/a.js ---------- import { a -} from "./chunk.MYHFHJ2W.js"; +} from "./chunk-WZUP7CLG.js"; export { a }; @@ -474,12 +474,12 @@ export { ---------- /out/b.js ---------- import { a -} from "./chunk.MYHFHJ2W.js"; +} from "./chunk-WZUP7CLG.js"; export { a }; ----------- /out/chunk.MYHFHJ2W.js ---------- +---------- /out/chunk-WZUP7CLG.js ---------- // a.js var a = 1; @@ -492,7 +492,7 @@ TestSplittingSharedCommonJSIntoES6 ---------- /out/a.js ---------- import { require_shared -} from "./chunk.J4PXVRFR.js"; +} from "./chunk-L66EYPCY.js"; // a.js var {foo} = require_shared(); @@ -501,13 +501,13 @@ console.log(foo); ---------- /out/b.js ---------- import { require_shared -} from "./chunk.J4PXVRFR.js"; +} from "./chunk-L66EYPCY.js"; // b.js var {foo} = require_shared(); console.log(foo); ----------- /out/chunk.J4PXVRFR.js ---------- +---------- /out/chunk-L66EYPCY.js ---------- // shared.js var require_shared = __commonJS((exports) => { exports.foo = 123; @@ -522,7 +522,7 @@ TestSplittingSharedES6IntoES6 ---------- /out/a.js ---------- import { foo -} from "./chunk.OQIDAIZA.js"; +} from "./chunk-6XDG7YIK.js"; // a.js console.log(foo); @@ -530,12 +530,12 @@ console.log(foo); ---------- /out/b.js ---------- import { foo -} from "./chunk.OQIDAIZA.js"; +} from "./chunk-6XDG7YIK.js"; // b.js console.log(foo); ----------- /out/chunk.OQIDAIZA.js ---------- +---------- /out/chunk-6XDG7YIK.js ---------- // shared.js var foo = 123; @@ -546,7 +546,7 @@ export { ================================================================================ TestSplittingSideEffectsWithoutDependencies ---------- /out/a.js ---------- -import "./chunk.3LEG74S7.js"; +import "./chunk-K5RKT6UP.js"; // shared.js var a = 1; @@ -555,7 +555,7 @@ var a = 1; console.log(a); ---------- /out/b.js ---------- -import "./chunk.3LEG74S7.js"; +import "./chunk-K5RKT6UP.js"; // shared.js var b = 2; @@ -563,7 +563,7 @@ var b = 2; // b.js console.log(b); ----------- /out/chunk.3LEG74S7.js ---------- +---------- /out/chunk-K5RKT6UP.js ---------- // shared.js console.log("side effect"); diff --git a/scripts/js-api-tests.js b/scripts/js-api-tests.js index 65c0ebd1ccc..a6a56f050e6 100644 --- a/scripts/js-api-tests.js +++ b/scripts/js-api-tests.js @@ -501,7 +501,7 @@ let buildTests = { }) assert.strictEqual(value.outputFiles, void 0) const result = require(output) - assert.strictEqual(result.value, './data.L3XDQOAT.bin') + assert.strictEqual(result.value, './data-L3XDQOAT.bin') assert.strictEqual(result.__esModule, true) }, @@ -528,17 +528,17 @@ let buildTests = { assert.deepStrictEqual(value.outputFiles.length, 3) assert.deepStrictEqual(value.outputFiles[0].path, path.join(outdir, 'a', 'in1.js')) assert.deepStrictEqual(value.outputFiles[1].path, path.join(outdir, 'b', 'in2.js')) - assert.deepStrictEqual(value.outputFiles[2].path, path.join(outdir, 'chunk.DO6KKKV6.js')) + assert.deepStrictEqual(value.outputFiles[2].path, path.join(outdir, 'chunk-NDYHVWH6.js')) assert.deepStrictEqual(value.outputFiles[0].text, `import { foo -} from "https://www.example.com/assets/chunk.DO6KKKV6.js"; +} from "https://www.example.com/assets/chunk-NDYHVWH6.js"; export { foo as input1 }; `) assert.deepStrictEqual(value.outputFiles[1].text, `import { foo -} from "https://www.example.com/assets/chunk.DO6KKKV6.js"; +} from "https://www.example.com/assets/chunk-NDYHVWH6.js"; export { foo as input2 }; @@ -570,7 +570,7 @@ export { }) assert.strictEqual(value.outputFiles, void 0) const result = require(output) - assert.strictEqual(result.value, 'https://www.example.com/assets/data.L3XDQOAT.bin') + assert.strictEqual(result.value, 'https://www.example.com/assets/data-L3XDQOAT.bin') assert.strictEqual(result.__esModule, true) }, @@ -590,7 +590,7 @@ export { assert.strictEqual(value.outputFiles, void 0) assert.strictEqual(await readFileAsync(output, 'utf8'), `/* scripts/.js-api-tests/fileLoaderCSS/in.css */ body { - background: url(https://www.example.com/assets/data.L3XDQOAT.bin); + background: url(https://www.example.com/assets/data-L3XDQOAT.bin); } `) }, @@ -775,7 +775,7 @@ body { const inEntry1 = makeInPath(entry1); const inEntry2 = makeInPath(entry2); const inImported = makeInPath(imported); - const chunk = 'chunk.22VG3QOS.js'; + const chunk = 'chunk-TJO5LOVU.js'; const outEntry1 = makeOutPath(path.basename(entry1)); const outEntry2 = makeOutPath(path.basename(entry2)); const outChunk = makeOutPath(chunk); @@ -839,7 +839,7 @@ body { const inEntry1 = makeInPath(entry1); const inEntry2 = makeInPath(entry2); const inImported = makeInPath(imported); - const chunk = 'chunk.644ZBJVW.js'; + const chunk = 'chunk-3OOYZXPJ.js'; const outEntry1 = makeOutPath(path.basename(entry1)); const outEntry2 = makeOutPath(path.basename(entry2)); const outChunk = makeOutPath(chunk); @@ -905,7 +905,7 @@ body { const inImport1 = makeInPath(import1); const inImport2 = makeInPath(import2); const inShared = makeInPath(shared); - const chunk = 'chunk.B5NUMPKN.js'; + const chunk = 'chunk-HPFXYN2E.js'; const outEntry = makeOutPath(path.relative(testDir, entry)); const outImport1 = makeOutPath(path.relative(testDir, import1)); const outImport2 = makeOutPath(path.relative(testDir, import2)); @@ -1250,7 +1250,7 @@ body { assert.strictEqual(value.outputFiles.length, 3) // These should all use forward slashes, even on Windows - const chunk = 'chunk.RDAS5GVQ.js' + const chunk = 'chunk-GQNMLRG3.js' assert.strictEqual(Buffer.from(value.outputFiles[0].contents).toString(), `import { common_default } from "./${chunk}"; @@ -1307,7 +1307,7 @@ export { assert.strictEqual(value.outputFiles.length, 3) // These should all use forward slashes, even on Windows - const chunk = 'chunk.L62YHXKF.js' + const chunk = 'chunk-GNYLVIR6.js' assert.strictEqual(Buffer.from(value.outputFiles[0].contents).toString(), `import { common_default } from "../${chunk}";